blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aacc2ade3e3b635267e0895250241964852a07f3 | 51b20543e5ed606636bcde9fba329e5fa948de2e | /communityprofiles/census/remote_file.py | 2bab92682c5bab5ea49d416570d119d3a9aaa99d | [
"MIT"
] | permissive | 216software/Profiles | b821112225e8522b7b558cab87ae1c12c68c653b | 651da880a3d4295243205bdae4de88504edc91de | refs/heads/dev | 2023-03-16T04:49:01.389186 | 2023-03-09T17:04:04 | 2023-03-09T17:04:04 | 59,139,518 | 3 | 0 | null | 2016-05-18T18:02:53 | 2016-05-18T18:02:53 | null | UTF-8 | Python | false | false | 3,612 | py | # for RemoteFileObject
from os import SEEK_SET, SEEK_CUR, SEEK_END
from time import time
from datetime import timedelta
from urlparse import urlparse, urljoin
from cStringIO import StringIO
from httplib import HTTPConnection
from urllib import urlopen
from zipfile import ZipFile
from itertools import izip
class RemoteFileObject:
""" Implement enough of this to be useful:
http://docs.python.org/release/2.5.2/lib/bltin-file-objects.html
Pull data from a remote URL with HTTP range headers.
"""
def __init__(self, url, verbose=False, block_size=(16 * 1024)):
self.verbose = verbose
# scheme://host/path;parameters?query#fragment
(scheme, host, path, parameters, query, fragment) = urlparse(url)
self.host = host
self.rest = path + (query and ('?' + query) or '')
self.offset = 0
self.length = self.get_length()
self.chunks = {}
self.block_size = block_size
self.start_time = time()
def get_length(self):
"""
"""
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers={'Range': '0-1'})
length = int(conn.getresponse().getheader('content-length'))
if self.verbose:
print >> stderr, length, 'bytes in', basename(self.rest)
return length
def get_range(self, start, end):
"""
"""
headers = {'Range': 'bytes=%(start)d-%(end)d' % locals()}
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers=headers)
return conn.getresponse().read()
def read(self, count=None):
""" Read /count/ bytes from the resource at the current offset.
"""
if count is None:
# to the end
count = self.length - self.offset
out = StringIO()
while count:
chunk_offset = self.block_size * (self.offset / self.block_size)
if chunk_offset not in self.chunks:
range = chunk_offset, min(self.length, self.offset + self.block_size) - 1
self.chunks[chunk_offset] = StringIO(self.get_range(*range))
if self.verbose:
loaded = float(self.block_size) * len(self.chunks) / self.length
expect = (time() - self.start_time) / loaded
remain = max(0, int(expect * (1 - loaded)))
print >> stderr, '%.1f%%' % min(100, 100 * loaded),
print >> stderr, 'of', basename(self.rest),
print >> stderr, 'with', timedelta(seconds=remain), 'to go'
chunk = self.chunks[chunk_offset]
in_chunk_offset = self.offset % self.block_size
in_chunk_count = min(count, self.block_size - in_chunk_offset)
chunk.seek(in_chunk_offset, SEEK_SET)
out.write(chunk.read(in_chunk_count))
count -= in_chunk_count
self.offset += in_chunk_count
out.seek(0)
return out.read()
def seek(self, offset, whence=SEEK_SET):
""" Seek to the specified offset.
/whence/ behaves as with other file-like objects:
http://docs.python.org/lib/bltin-file-objects.html
"""
if whence == SEEK_SET:
self.offset = offset
elif whence == SEEK_CUR:
self.offset += offset
elif whence == SEEK_END:
self.offset = self.length + offset
def tell(self):
return self.offset
| [
"asmedrano@gmail.com"
] | asmedrano@gmail.com |
186adaf55008fdd7e7913aaa6ec5e5ff32402a20 | 70d5ac89833250f642737cfd52c2e5977ada94f7 | /Servidor/Flaskr/src/flaskr.py | 9c6b424ef1d63676629cb7a1f8ca86594fcb90e1 | [] | no_license | Plozano94/CupulaCiclope | 7b41d3fd0fe3e938a6aba104dcb9b64475d39229 | 4e22ee70210916881950bc7010e75819a21a31f8 | refs/heads/master | 2020-04-15T15:21:39.600058 | 2017-03-28T09:20:14 | 2017-03-28T09:20:14 | 46,656,430 | 0 | 1 | null | 2017-03-28T08:26:34 | 2015-11-22T10:53:50 | Python | UTF-8 | Python | false | false | 8,292 | py | #! /usr/bin/env python
# all the imports
import servidorConf as sc
import threading
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, Response
from contextlib import closing
import serial
import subprocess
import os,sys
from functions import system
import json
import time
from tinydb import TinyDB, where,Query
import pika
#Importamos el archivo de configuracion
sy=system()
def cola(sy):
#Establecemos las credenciales
credentials = pika.PlainCredentials(sc.RABBIT_USER, sc.RABBIT_PASSWD)
#Establecemos la conexion al servidor a traves del puerto 5672
connection = pika.BlockingConnection(pika.ConnectionParameters(sc.urlServer, 5672,'/',credentials))
channel = connection.channel()
#Declaramos La cola por la que recibiremos (Por si acaso no esta creada aun)
channel.queue_declare(queue=sc.me, durable=True)
#Declaramos las colas a las que enviaremos con emitter.py, por si acaso no estan declaradas aun
for x in sc.list:
channel.queue_declare(queue=x, durable=True)
#Hacemos en binding de los exchanges con las colas, teniendo en cuenta el routing key
for x,y in zip(sc.list,sc.severity):
channel.exchange_declare(exchange=x,
type='fanout')
channel.queue_bind(exchange=x,
queue=sc.me,
routing_key=y)
def callback(ch, method, properties, body):
print(" [x] %r" % (body))
if sy.follow:
if 'D' in body:
t = threading.Thread(target=sy.goto, args=(body,ser,))
t.start()
return
channel.basic_consume(callback,
queue=sc.me,
no_ack=True)
channel.start_consuming()
print 'Receptor cola activado'
# configuration
DEBUG = False
SECRET_KEY = 'development key'
# Starting our application
app = Flask(__name__)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
#Debug var
app.config['DEBUG'] = False
app.config['SECRET_KEY'] = 'some_really_long_random_string_here'
task_id=len(sy.table)+1
logged= None
os.chdir('/home/cupula/CupulaCiclope/Servidor/Flaskr')
sy.empaquetar()
#Launching Camera server if sc.camera variable is active
if sc.camera==1:
try:
newPid2=os.fork()
if newPid2==0:
print "Launched camera server"
sy.cameraServer()
sys.exit()
else:
print newPid2
except:
print "Error launching the camera"
sy.log("Error launching the camera")
#Launching Comunication with board if sc.boardPort is active
if sc.board==1:
try:
ser = serial.Serial(sc.boardPort, 9600,timeout=3)
sy.ser=ser
'''t=threading.Timer(2,sy.checkRoutine,args=(ser,))
t.daemon=True
t.start()'''
#sy.checkRoutine(ser)
print "Launched comunication with board"
except:
print "Error launching the board"
sy.log("Error launching the board")
else:
print 'Controller board variable is not activated'
sy.log('Controller board variable is not activated')
t = threading.Thread(target=cola, args=(sy,))
t.start()
@app.route('/api/cupula/montegancedo/task', methods=['POST'])
def task():
error=None
global logged
print logged
if ((logged != 'admin') and (logged !='guest')):
abort(401)
if request.method == 'POST':
#cur = get_db().cursor()
message=request.get_json()
global task_id
task_id=task_id+1
sy.table.all()
sy.table.insert({'id':task_id,'command':message,'time':time.strftime("%H:%M:%S"),'status':'non-completed'})
sy.task_json=json.dumps({'id':task_id,'command':message['command'],'time':time.strftime("%H:%M:%S"), 'status':"non-completed"})
#print sy.task_json
message=str(message['command'])
#print message
if 'SZ' in message:
sy.offset==sy.azimut
elif 'followOn' in message:
sy.follow=True
elif 'followOff' in message:
sy.follow=False
elif 'H' in message:
message='D'+str(sc.home)
t = threading.Thread(target=sy.goto, args=(message,ser,))
t.start()
elif 'D' in message:
t = threading.Thread(target=sy.goto, args=(message,ser,))
t.start()
elif 'ON' in message:
sy.on=True
sy._threadStopper_=threading.Event()
sy._thread_=threading.Timer(2,sy.checkRoutine,args=(ser,))
sy._thread_.daemon=True
sy._thread_.start()
elif 'OFF' in message:
sy.on=False
sy._threadStopper_.set()
else:
if logged in 'admin':
try:
t = threading.Thread(target=sy.send, args=(message,ser,))
t.start()
except:
print "Error launching thread"
return sy.task_json
@app.route('/api/cupula/montegancedo/', methods=['GET'])
def status():
response_json=json.dumps({'lat':"40 24 22 N" ,'long':"3 50 19 O" , 'name':"Observatorio Montegancedo",'status':{'Azimut':sy.azimut,'Laps':sy.vueltas, 'Voltage': sy.voltage, 'Direction':sy.direction}})
return response_json
@app.route('/api/cupula/montegancedo/tasks/<int:iden>', methods=['GET'])
def returnTask(iden):
s=Query()
x=sy.table.get(s.id==iden)
print x
if x==None:
abort(404)
print x['id']
sy.task_json=json.dumps({'id':x['id'],'command':x['command']['command'],'time':x['time'],'status':x['status']})
return sy.task_json
@app.route('/login', methods=['GET', 'POST'])
def login():
"""global sc.USERNAME_ADMIN
global sc.USERNAME_GUEST
global sc.PASSWORD_GUEST
global sc.PASSWORD_ADMIN"""
error = None
global logged
if request.method == 'POST':
"""if (request.form['username'] not in sc.USERNAME_ADMIN)and (request.form['username'] not in sc.USERNAME_GUEST):
logged = ''
sy._logged_=""
error = 'Invalid username'
print error
if (request.form['password'] != sc.PASSWORD_GUEST)and(request.form['password'] != sc.PASSWORD_ADMIN):
logged = ''
sy._logged_=""
error = 'Invalid password'
print error
else:
global logged
if request.form['username'] in sc.USERNAME_ADMIN:
logged = 'admin'
sy._logged_="admin"
if request.form['username'] in sc.USERNAME_GUEST:
logged = 'guest'
sy._logged_='guest'
#session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('command'))"""
if (request.form['username'] in sc.USERNAME_ADMIN):
if (request.form['password'] in sc.PASSWORD_ADMIN):
logged = 'admin'
sy._logged_='admin'
elif (request.form['username'] in sc.USERNAME_GUEST):
if (request.form['password'] in sc.PASSWORD_GUEST):
logged = 'guest'
sy._logged_='guest'
else:
error="Invalid username or password"
print error
logged = ''
sy._logged_=''
print sy._logged_
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
global logged
logged = None
#session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
print "////////////// Starting Cupula Ciclope's server//////////////"
app.run(host='0.0.0.0',port=80)
| [
"plozano94@gmail.com"
] | plozano94@gmail.com |
d5829bec6fa9c5cee52ea9965d6461ac7b09ae03 | b9aabb13870b3707609fd2ea117870c2ad40c14b | /src/train/candle_accl_trn_02_from_scratch.py | ba6158c445418add3d902997fb9e7c37e495eea4 | [] | no_license | adpartin/pilot1 | d88d2af7d15df68780ab2f82169897a9c388a2fd | c99f32052fab0de210fd200b43194b19088dc3a7 | refs/heads/master | 2023-04-14T22:30:33.339975 | 2023-04-10T23:36:34 | 2023-04-10T23:36:34 | 160,101,290 | 1 | 0 | null | 2022-12-08T02:59:53 | 2018-12-02T22:10:10 | Jupyter Notebook | UTF-8 | Python | false | false | 7,571 | py | from __future__ import print_function, division
import warnings
warnings.filterwarnings('ignore')
# from comet_ml import Experiment
import os
import sys
from pathlib import Path
import argparse
import datetime
from time import time
from pprint import pprint
from glob import glob
import sklearn
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import keras as ke
from keras import backend as K
from keras.models import Sequential, Model, model_from_json, model_from_yaml, load_model
from keras.optimizers import SGD, Adam, RMSprop, Adadelta
from keras.utils import np_utils, multi_gpu_model
from keras.callbacks import Callback, ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.model_selection import GroupShuffleSplit, GroupKFold
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
SEED = None
t_start = time()
# Utils
import ml_models
import classlogger
import utils
# Import custom callbacks
keras_contrib = '/vol/ml/apartin/projects/keras-contrib/keras_contrib'
sys.path.append(keras_contrib)
from callbacks import *
# File path
# file_path = os.path.dirname(os.path.realpath(__file__))
file_path = Path(__file__).resolve().parent
# Path
PRJ_NAME = 'candle_accl_trn'
PRJ_DIR = file_path / '../../models' / PRJ_NAME
DATADIR = PRJ_DIR / 'data'
# Arg parser
psr = argparse.ArgumentParser(description='input agg csv file')
psr.add_argument('--ep', type=int, default=350, help='Total number of epochs.')
psr.add_argument('--tr_phase', type=str, choices=['wrm', 'ref'], default='wrm')
psr.add_argument('--attn', action='store_true', default=False, help='whether to use attention layer.')
psr.add_argument('--split_by', type=str, choices=['cell', 'drug', 'both', 'none'], default='cell',
help='Specify how to disjointly partition the dataset: \
`cell` (disjoint on cell), `drug` (disjoint on drug), \
`both` (disjoint on cell and drug), `none` (random split).')
psr.add_argument('-ml', '--model_name', type=str, default='nn_reg')
psr.add_argument('--batch', type=int, default=32)
psr.add_argument('--dr_rate', type=float, default=0.2)
psr.add_argument('--skp_ep', type=int, default=10, help='Number of epochs to skip when plotting training curves.')
psr.add_argument('--base_clr', type=float, default=1e-4, help='Base learning rate for cyclical learning rate.')
psr.add_argument('--max_clr', type=float, default=1e-3, help='Max learning rate for cyclical learning rate.')
args = vars(psr.parse_args())
pprint(args)
# Args
EPOCH = args['ep']
BATCH = args['batch']
DR = args['dr_rate']
attn = args['attn']
model_name = args['model_name']
split_by = args['split_by']
skp_ep = args['skp_ep']
base_clr = args['base_clr']
max_clr = args['max_clr']
tr_phase = args['tr_phase']
if attn is True:
nn_type = 'attn'
else:
nn_type = 'fc'
# Path and outdir
# data_path = DATADIR / ('split_by_' + split_by) / f'df_{tr_phase}.parquet'
data_path = DATADIR / ('split_by_' + split_by)
outdir = PRJ_DIR / (tr_phase + '_' + nn_type) / ('split_by_' + split_by)
os.makedirs(outdir, exist_ok=True)
# Dump args
utils.dump_args(args, outdir=outdir)
# Logger
logfilename = outdir/'logfile.log'
lg = classlogger.Logger(logfilename=logfilename)
# ---------
# Load data
# ---------
def load_data(datapath):
data = pd.read_parquet(datapath, engine='auto', columns=None)
data = data.sample(frac=1.0, axis=0, random_state=SEED).reset_index(drop=True)
return data
lg.logger.info(f'Loading data ... {data_path}')
t0 = time()
# df = pd.read_parquet(data_path, engine='auto', columns=None)
# df = df.sample(frac=1.0, axis=0, random_state=SEED).reset_index(drop=True)
df_tr = load_data(data_path/f'df_{tr_phase}_tr.parquet')
df_te = load_data(data_path/f'df_{tr_phase}_te.parquet')
lg.logger.info('Done ({:.2f} mins)\n'.format( (time()-t0)/60) )
# -----
# Comet
# -----
# comet_api_key = os.environ.get('COMET_API_KEY')
# comet_prg_name = PRJ_NAME
# comet_set_name = TR_PHASE
# experiment = Experiment(api_key=comet_api_key, project_name=comet_prg_name)
# experiment.set_name(comet_set_name)
# experiment.add_tag(TR_PHASE)
# --------------------
# Split data and scale
# --------------------
# df_tr, df_te = train_test_split(df); del df
lg.logger.info('df_tr.shape: {}'.format(df_tr.shape))
lg.logger.info('df_te.shape: {}'.format(df_te.shape))
# if tr_phase == 'ref':
# lg.logger.info('\nDump ref dfs ...')
# df_tr.to_parquet(outdir/'df_tr.parquet', engine='auto', compression='snappy')
# df_te.to_parquet(outdir/'df_te.parquet', engine='auto', compression='snappy')
ytr, xtr = df_tr.iloc[:, 0], df_tr.iloc[:, 1:]; del df_tr
yte, xte = df_te.iloc[:, 0], df_te.iloc[:, 1:]; del df_te
# Scale
scaler = StandardScaler()
xtr = pd.DataFrame( scaler.fit_transform(xtr) ).astype(np.float32)
xte = pd.DataFrame( scaler.transform(xte) ).astype(np.float32)
joblib.dump(scaler, outdir/'scaler.pkl')
# -----------------
# Define callbackls
# -----------------
# Callbacks (custom)
tr_iters = xtr.shape[0] / BATCH
# step_size = int(3 * iterations) # num of training iterations per half cycle. Smith suggests to set step_size = (2-8) x (training iterations in epoch).
clr = CyclicLR(base_lr=base_clr, max_lr=max_clr, mode='triangular')
# Keras callbacks
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=15, verbose=1, mode='auto',
min_delta=0.0001, cooldown=3, min_lr=1e-9)
early_stop = EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto')
csv_logger = CSVLogger(outdir/f'krs_logger.log')
model_checkpoint_dir = outdir/'models'
os.makedirs(model_checkpoint_dir, exist_ok=True)
# checkpointer = ModelCheckpoint(str(outdir/'model.ep_{epoch:d}-val_loss_{val_loss:.5f}.h5'),
# verbose=0, save_weights_only=False, save_best_only=False)
checkpointer = ModelCheckpoint(str(model_checkpoint_dir/'model.ep_{epoch:d}-val_loss_{val_loss:.5f}.h5'),
verbose=0, save_weights_only=False, save_best_only=False)
# Callbacks list
callback_list = [checkpointer, csv_logger, early_stop, reduce_lr, # keras callbacks
clr] # custom callbacks
# -----
# Train
# -----
# fit_params
fit_params = {'batch_size': BATCH, 'epochs': EPOCH, 'verbose': 1}
fit_params['validation_data'] = (xte, yte)
fit_params['callbacks'] = callback_list
# Get the estimator
init_params = {'input_dim': xtr.shape[1], 'dr_rate': DR, 'attn': attn}
model = ml_models.get_model(model_name=model_name, init_params=init_params)
# Train model
t0 = time()
history = model.model.fit(xtr, ytr, **fit_params)
fit_runtime = time() - t0
lg.logger.info('fit_runtime: {:.1f} mins'.format(fit_runtime/60))
# Print score
score = model.model.evaluate(xte, yte, verbose=0)
lg.logger.info('val_loss: {:.5f}'.format(score[0]))
# -----------------
# Summarize results
# -----------------
# Plots
plts_path = outdir/'plts'
os.makedirs(plts_path, exist_ok=True)
ml_models.plot_prfrm_metrics(history=history, title=f'{tr_phase} training',
skp_ep=skp_ep, add_lr=True, outdir=plts_path)
# Save keras history
ml_models.save_krs_history(history, outdir)
lg.logger.info('\nProgram runtime: {:.2f} mins'.format( (time() - t_start)/60 ))
lg.logger.info('Done.')
| [
"apartin@lambda-quad.cels.anl.gov"
] | apartin@lambda-quad.cels.anl.gov |
603a486b8c2ae434c0dd625d965053eee0423d48 | ba02e8e461d47069ce6ae016b605cf65d32e19d3 | /chat.py | b7da85cce7904e33ce4ec81e3bde1e581dca99da | [
"MIT"
] | permissive | Heroadn/AwesomeChat | 6c1508b4bd71019e543553ecec0315f107c5ffc8 | 8215913eb2131494806ab5df5b2af17b0e2ab04d | refs/heads/master | 2020-07-05T08:45:55.011399 | 2019-07-21T00:53:08 | 2019-07-21T00:53:08 | 202,595,168 | 1 | 0 | null | 2019-08-15T18:52:42 | 2019-08-15T18:52:41 | null | UTF-8 | Python | false | false | 118 | py | import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from app import app
if __name__ == "__main__":
app.run()
| [
"thiago@desenvolvedor.tech"
] | thiago@desenvolvedor.tech |
169519835ac47d886105ad315fc37451bd6ff3d2 | c226af28b382c8b37ea2f00ce87fa3ed467fb5d9 | /022 hackerrank - .discard, .remove, ,pop/main.py | 33d337df25c14133788c45deb0de20000fcc5909 | [] | no_license | JoeSeff/hackerrank-python | 07a3ed8be65069eb323dd15c2e605f9ac7a68f37 | f304804cad95828898bd48db5e582ba8a21e0530 | refs/heads/master | 2022-07-19T06:21:48.743411 | 2020-05-23T00:44:00 | 2020-05-23T00:44:00 | 266,234,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | set_size = int(input()) #useless
main_set = set(list(map(int, input().split(" "))))
op_count = int(input())
for x in range(op_count):
try:
# do something
op_string = input()
if 'pop' in op_string:
main_set.pop()
elif 'discard' in op_string:
index = int(op_string.split(" ")[1])
main_set.discard(index)
elif 'remove' in op_string:
index = int(op_string.split(" ")[1])
main_set.remove(index)
except:
print('', end='')
sum = 0
for y in main_set:
sum += y
print(sum)
| [
"maxwellandlynx@gmail.com"
] | maxwellandlynx@gmail.com |
667c8b4b904d7c226e66d67f7273f4a22805337a | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27051.py | 21e1599a8f2a20678ec565a2b8443285bc4a4d16 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | # Matplotlib legend relative to figure in multiplot
matplotlib
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
e726984d4019bc6974ee4b2702b243d18c0669f7 | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2006/devops/day02/local_mail.py | 3533d97f07484696810b548f9f0931ac688dde15 | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from email.mime.text import MIMEText
from email.header import Header
import smtplib
# 准备邮件, plain表示纯文本
message = MIMEText("python local email test.\n", 'plain', 'utf8') # 正文
message['From'] = Header('root', 'utf8')
message['To'] = Header('zhangsan', 'utf8')
message['Subject'] = Header('py test', 'utf8')
# 发送邮件
smtp = smtplib.SMTP()
smtp.connect('localhost')
smtp.sendmail('root', ['root', 'zhangsan'], message.as_bytes())
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
8b20ab0b23ca75e102d3f7c1bd8017bf3ac1b485 | 22dcbf9595c28279b681caac26e43113ce75de5c | /automl/cloud-client/import_dataset_test.py | 35d23edc7e8fc745ed598a895c037a49b9cc7f90 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jerry-enebeli/python-docs-samples | 2e61672e9819405733c94ef824ba0b0f92b3e930 | 0d78724126ce25dd6203cfd2ee3467b88e5c27b9 | refs/heads/master | 2022-12-12T18:05:16.899492 | 2020-09-01T22:35:40 | 2020-09-01T22:35:40 | 292,189,370 | 1 | 0 | Apache-2.0 | 2020-09-02T05:39:23 | 2020-09-02T05:39:22 | null | UTF-8 | Python | false | false | 1,497 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import import_dataset
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
BUCKET_ID = "{}-lcm".format(PROJECT_ID)
DATASET_ID = "TEN0000000000000000000"
def test_import_dataset(capsys):
# As importing a dataset can take a long time and only four operations can
# be run on a dataset at once. Try to import into a nonexistent dataset and
# confirm that the dataset was not found, but other elements of the request
# were valid.
try:
data = "gs://{}/sentiment-analysis/dataset.csv".format(BUCKET_ID)
import_dataset.import_dataset(PROJECT_ID, DATASET_ID, data)
out, _ = capsys.readouterr()
assert (
"The Dataset doesn't exist or is inaccessible for use with AutoMl."
in out
)
except Exception as e:
assert (
"The Dataset doesn't exist or is inaccessible for use with AutoMl."
in e.message
)
| [
"noreply@github.com"
] | noreply@github.com |
d00743937cab44b9afa3346ef34b5568ac1d46f9 | cc37219d1f9f30ba67bbaee3f394a308c5698f51 | /brewery/ds/elasticsearch_streams.py | e7182300a32a0603489c4008baf4d3ae98a024ad | [
"LicenseRef-scancode-saas-mit",
"MIT"
] | permissive | smoothdeveloper/brewery | 5afecb8bd97780255b51d96432bd7f085db2c9a7 | 2f602fc7db22f53ff7ac1a8143981aac46d2c254 | refs/heads/master | 2021-01-18T07:53:47.091013 | 2012-04-14T22:01:11 | 2012-04-14T22:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,242 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base
from brewery import dq
import time
from brewery.metadata import expand_record
try:
from pyes.es import ES
except ImportError:
from brewery.utils import MissingPackage
pyes = MissingPackage("pyes", "ElasticSearch streams", "http://www.elasticsearch.org/")
class ESDataSource(base.DataSource):
"""docstring for ClassName
"""
def __init__(self, document_type, database=None, host=None, port=None,
expand=False, **elasticsearch_args):
"""Creates a ElasticSearch data source stream.
:Attributes:
* document_type: elasticsearch document_type name
* database: database name
* host: elasticsearch database server host, default is ``localhost``
* port: elasticsearch port, default is ``27017``
* expand: expand dictionary values and treat children as top-level keys with dot '.'
separated key path to the child..
"""
self.document_type = document_type
self.database_name = database
self.host = host
self.port = port
self.elasticsearch_args = elasticsearch_args
self.expand = expand
self.connection = None
self._fields = None
def initialize(self):
"""Initialize ElasticSearch source stream:
"""
args = self.elasticsearch_args.copy()
server = ""
if self.host:
server = self.host
if self.port:
server += ":" + self.port
self.connection = ES(server, **args)
self.connection.default_indices = self.database_name
self.connection.default_types = self.document_type
def read_fields(self, limit=0):
keys = []
probes = {}
def probe_record(record, parent=None):
for key, value in record.items():
if parent:
full_key = parent + "." + key
else:
full_key = key
if self.expand and type(value) == dict:
probe_record(value, full_key)
continue
if not full_key in probes:
probe = dq.FieldTypeProbe(full_key)
probes[full_key] = probe
keys.append(full_key)
else:
probe = probes[full_key]
probe.probe(value)
for record in self.document_type.find(limit=limit):
probe_record(record)
fields = []
for key in keys:
probe = probes[key]
field = base.Field(probe.field)
storage_type = probe.unique_storage_type
if not storage_type:
field.storage_type = "unknown"
elif storage_type == "unicode":
field.storage_type = "string"
else:
field.storage_type = "unknown"
field.concrete_storage_type = storage_type
# FIXME: Set analytical type
fields.append(field)
self._fields = list(fields)
return self._fields
def rows(self):
if not self.connection:
raise RuntimeError("Stream is not initialized")
from pyes.query import MatchAllQuery
fields = self.field_names
results = self.connection.search(MatchAllQuery(), search_type="scan", timeout="5m", size="200")
return ESRowIterator(results, fields)
def records(self):
if not self.connection:
raise RuntimeError("Stream is not initialized")
from pyes.query import MatchAllQuery
results = self.connection.search(MatchAllQuery(), search_type="scan", timeout="5m", size="200")
return ESRecordIterator(results, self.expand)
class ESRowIterator(object):
"""Wrapper for ElasticSearch ResultSet to be able to return rows() as tuples and records() as
dictionaries"""
def __init__(self, resultset, field_names):
self.resultset = resultset
self.field_names = field_names
def __getitem__(self, index):
record = self.resultset.__getitem__(index)
array = []
for field in self.field_names:
value = record
for key in field.split('.'):
if key in value:
value = value[key]
else:
break
array.append(value)
return tuple(array)
class ESRecordIterator(object):
"""Wrapper for ElasticSearch ResultSet to be able to return rows() as tuples and records() as
dictionaries"""
def __init__(self, resultset, expand=False):
self.resultset = resultset
self.expand = expand
def __getitem__(self, index):
def expand_record(record, parent=None):
ret = {}
for key, value in record.items():
if parent:
full_key = parent + "." + key
else:
full_key = key
if type(value) == dict:
expanded = expand_record(value, full_key)
ret.update(expanded)
else:
ret[full_key] = value
return ret
record = self.resultset.__getitem__(index)
if not self.expand:
return record
else:
return expand_record(record)
class ESDataTarget(base.DataTarget):
"""docstring for ClassName
"""
def __init__(self, document_type, database="test", host="127.0.0.1", port="9200",
truncate=False, expand=False, **elasticsearch_args):
"""Creates a ElasticSearch data target stream.
:Attributes:
* document_ElasticSearch elasticsearch document_type name
* database: database name
* host: ElasticSearch database server host, default is ``localhost``
* port: ElasticSearch port, default is ``9200``
* expand: expand dictionary values and treat children as top-level keys with dot '.'
separated key path to the child..
* truncate: delete existing data in the document_type. Default: False
"""
self.document_type = document_type
self.database_name = database
self.host = host
self.port = port
self.elasticsearch_args = elasticsearch_args
self.expand = expand
self.truncate = truncate
self._fields = None
def initialize(self):
"""Initialize ElasticSearch source stream:
"""
from pyes.es import ES
from pyes.exceptions import IndexAlreadyExistsException
args = self.elasticsearch_args.copy()
server = ""
if self.host:
server = self.host
if self.port:
server += ":" + self.port
create = args.pop("create", False)
replace = args.pop("replace", False)
self.connection = ES(server, **args)
self.connection.default_indices = self.database_name
self.connection.default_types = self.document_type
created = False
if create:
try:
self.connection.create_index(self.database_name)
self.connection.refresh(self.database_name)
created = True
except IndexAlreadyExistsException:
pass
if replace and not created:
self.connection.delete_index_if_exists(self.database_name)
time.sleep(2)
self.connection.create_index(self.database_name)
self.connection.refresh(self.database_name)
if self.truncate:
self.connection.delete_mapping(self.database_name, self.document_type)
self.connection.refresh(self.database_name)
def append(self, obj):
record = obj
if not isinstance(obj, dict):
record = dict(zip(self.field_names, obj))
if self.expand:
record = expand_record(record)
id = record.get('id') or record.get('_id')
self.connection.index(record, self.database_name, self.document_type, id, bulk=True)
def finalize(self):
self.connection.flush_bulk(forced=True)
| [
"alberto@ingparo.it"
] | alberto@ingparo.it |
5954efaab0e577db693e12d13b5b277dff8f5fa3 | 836d1dc6545e60caffc7bb557e1584311a53d131 | /dice_ml/dice.py | 144523439fc7f8be305c5c0825ef4ce5d504c9c1 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | shyamalschandra/DiCE | 223f6be0c6435f9b08a4522501a0b2a6b47bd3ed | 1fcc1c927bcafb10ce1d68a11a1b070daeeacd1e | refs/heads/master | 2021-05-20T10:51:03.646302 | 2020-03-28T18:27:15 | 2020-03-28T18:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | """Module pointing to different implementations of DiCE based on different frameworks such as Tensorflow or PyTorch."""
import tensorflow as tf
class Dice:
"""An interface class to different DiCE implementations."""
def __init__(self, data_interface, model_interface):
"""Init method
:param data_interface: an interface to access data related params.
:param model_interface: an interface to access the output or gradients of a trained ML model.
"""
self.decide_implementation_type(data_interface, model_interface)
def decide_implementation_type(self, data_interface, model_interface):
"""Decides DiCE implementation type."""
self.__class__ = decide(data_interface, model_interface)
self.__init__(data_interface, model_interface)
# To add new implementations of DiCE, add the class in dice_interfaces subpackage and import-and-return the class in an elif loop as shown in the below method.
def decide(data_interface, model_interface):
"""Decides DiCE implementation type."""
if model_interface.backend == 'TF1': # pretrained Keras Sequential model with Tensorflow 1.x backend
from dice_ml.dice_interfaces.dice_tensorflow1 import DiceTensorFlow1
return DiceTensorFlow1
elif model_interface.backend == 'TF2': # pretrained Keras Sequential model with Tensorflow 2.x backend
from dice_ml.dice_interfaces.dice_tensorflow2 import DiceTensorFlow2
return DiceTensorFlow2
elif model_interface.backend == 'PYT': # PyTorch backend
from dice_ml.dice_interfaces.dice_pytorch import DicePyTorch
return DicePyTorch
| [
"raam.arvind93@gmail.com"
] | raam.arvind93@gmail.com |
7210089d7186b085a99cad32a2ccc8dd3638694f | 03fd908d77eb4f02316ba7fb2f851f7317bba1cd | /upload/models.py | 374f87a62b5e061ab3e8bcd6a9f30d5d039abdc9 | [] | no_license | amalmajeed/miniproject-Unfinished | 110406297104766d96c421a2d5846839187bc451 | 826ea030f06fb302c62ef1fac751fff3464b5c7d | refs/heads/master | 2021-01-18T03:24:58.624529 | 2016-03-25T16:25:53 | 2016-03-25T16:25:53 | 54,554,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | from __future__ import unicode_literals
from django.db import models
class stud(models.Model):
name=models.CharField(max_length=200)
course=models.CharField(max_length=200)
branch=models.CharField(max_length=200)
admno=models.CharField(max_length=200,unique=True)
validtill=models.DateField()
dateofbirth=models.DateField()
bloodgroup=models.CharField(max_length=200)
address=models.CharField(max_length=200)
contact1=models.CharField(max_length=10)
contact2=models.CharField(max_length=10)
photo=models.FileField()
clss=models.CharField(max_length=5,blank=True,null=True)
rollno=models.CharField(max_length=5,blank=True,null=True)
def __str__(self):
return self.name
class faculty(models.Model):
name=models.CharField(max_length=200)
designation=models.CharField(max_length=200)
dateofbirth=models.DateField()
bloodgroup=models.CharField(max_length=200)
address=models.CharField(max_length=200)
contact=models.CharField(max_length=10)
photo=models.FileField()
def __str__(self):
return self.name
class SDesign(models.Model):
college = models.CharField(max_length=120)
cfont = models.CharField(max_length=120)
cfontsize = models.IntegerField()
addline1 = models.CharField(max_length=120)
addline2 = models.CharField(max_length=120)
addline3 = models.CharField(max_length=120)
addline4 = models.CharField(max_length=120)
addline5 = models.CharField(max_length=120)
addline1to5font = models.CharField(max_length=120)
addline1to5fontsize = models.IntegerField()
detfont = models.CharField(max_length=120)
detfontsize = models.IntegerField()
clogo = models.ImageField(null=True, blank=True)
ilogo = models.ImageField(null=True, blank=True)
psign = models.ImageField(null=True, blank=True)
bdesign = models.ImageField(null=True, blank=True)
def __unicode__(self):
return self.college
def __str__(self):
return self.college
class FDesign(models.Model):
college = models.CharField(max_length=120)
cfont = models.CharField(max_length=120)
cfontsize = models.IntegerField()
addline1 = models.CharField(max_length=120)
addline1font = models.CharField(max_length=120)
addline1fontsize = models.IntegerField()
addline2 = models.CharField(max_length=120)
addline3 = models.CharField(max_length=120)
addline4 = models.CharField(max_length=120)
addline5 = models.CharField(max_length=120)
addline2to5font = models.CharField(max_length=120)
addline2to5fontsize = models.IntegerField()
detfont = models.CharField(max_length=120)
detfontsize = models.IntegerField()
ilogo = models.ImageField(null=True, blank=True)
psign = models.ImageField(null=True, blank=True)
bdesign = models.ImageField(null=True, blank=True)
def __unicode__(self):
return self.college
def __str__(self):
return self.college
| [
"amalmajeed7@gmail.com"
] | amalmajeed7@gmail.com |
267e6ab9cbcd602600329dc5b581c85af97604e9 | 0043ff2759ea31711c80483ed2ed87f2bef013b0 | /magic/migrations/0014_auto_20190512_2007.py | 7d5cae395b49e3054cad476b1906586950573462 | [] | no_license | ialame/djangoEbay7 | 65ca49bffe2abc9c7731e1420ea822c81baf758c | 303d9d627fb1a75ef659d4fa4f7e1103b58632a0 | refs/heads/master | 2020-05-22T13:59:07.836151 | 2019-05-13T07:57:54 | 2019-05-13T07:57:54 | 186,371,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | # Generated by Django 2.2.1 on 2019-05-12 20:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magic', '0013_auto_20190512_2006'),
]
operations = [
migrations.AddField(
model_name='extension',
name='baseSetSize',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='extension',
name='nbCartes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='extension',
name='nom',
field=models.TextField(default=0, max_length=120),
),
migrations.AddField(
model_name='extension',
name='nomDossier',
field=models.TextField(default=0, max_length=120),
),
migrations.AddField(
model_name='extension',
name='nomFR',
field=models.TextField(default=0, max_length=120),
),
migrations.AddField(
model_name='extension',
name='nomRaccourci',
field=models.TextField(default=0, max_length=120),
),
migrations.AddField(
model_name='extension',
name='nomUS',
field=models.TextField(default=0, max_length=120),
),
]
| [
"ibrahimalame@iMac-de-Ibrahim.local"
] | ibrahimalame@iMac-de-Ibrahim.local |
7058f2f37989ff337436d6ecf89c51ed574d82ee | 7f33c02743fbfd18726ffef08924f528354372dd | /Python_Projects/python3_selfstudy/priklady_z_knihy/k04/digit_names.py | 1167be7a4ec6d9440194cb8be9928866a345010e | [] | no_license | zabojnikp/study | a524eb9c2265a73e1db0b5f0e76b359c123a397b | 43424bfc6641cd8fa13ab119ce283fb460b4ffc1 | refs/heads/master | 2020-04-06T14:21:55.786353 | 2018-11-27T22:10:48 | 2018-11-27T22:10:48 | 157,538,244 | 0 | 0 | null | 2018-11-27T22:10:49 | 2018-11-14T11:24:20 | Python | UTF-8 | Python | false | false | 1,390 | py | #!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import sys
Language = "cs"
ENGLISH = {0: "zero", 1: "one", 2: "two", 3: "three", 4: "four",
5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine"}
CZECH = {0: "nula", 1: "jedna", 2: "dvě", 3: "tři", 4: "čtyři",
5: "pět", 6: "šest", 7: "sedm", 8: "osm", 9: "devět"}
def main():
if len(sys.argv) == 1 or sys.argv[1] in {"-h", "--help"}:
print("použití: {0} [en|cs] číslo".format(sys.argv[0]))
sys.exit()
args = sys.argv[1:]
if args[0] in {"en", "cs"}:
global Language
Language = args.pop(0)
print_digits(args.pop(0))
def print_digits(digits):
dictionary = ENGLISH if Language == "en" else CZECH
for digit in digits:
print(dictionary[int(digit)], end=" ")
print()
main()
| [
"zabojnikova.petra@gmail.com"
] | zabojnikova.petra@gmail.com |
ca6c8528b0f01a18a40ee1adcd9ade945136db6d | 4ff4d0a84eb3a63e5a7b685119c87aed4ac2382f | /lab/lab01/lab01.py | 43a167abf3504d62e27f1212a1a0107367dbf523 | [
"Apache-2.0"
] | permissive | MessiahChen/CS61A | 7743b99ceee98ea10c93d84ac47cbb46b39e7951 | 2552a08086323561dbb0d13a9550b7ccbebb6fc7 | refs/heads/master | 2020-08-23T00:17:47.863013 | 2020-07-25T13:56:56 | 2020-07-25T13:56:56 | 216,504,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | """Lab 1: Expressions and Control Structures"""
def both_positive(x, y):
"""Returns True if both x and y are positive.
>>> both_positive(-1, 1)
False
>>> both_positive(1, 1)
True
"""
return x>0 and y > 0 # You can replace this line!
def sum_digits(n):
"""Sum all the digits of n.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
>>> x = sum_digits(123) # make sure that you are using return rather than print
>>> x
6
"""
"*** YOUR CODE HERE ***"
sum = 0
while n:
sum += n%10
n //= 10
return sum | [
"361733357@qq.com"
] | 361733357@qq.com |
4045d9c0e8eb149d14cb7dfaee23209a400ff120 | ea2abf208442ef99603c80ce3fd3b479606f9547 | /cm/register_cm.py | 0ff6d759f18b9c66ca78a48ee960ea93eae00798 | [
"Apache-2.0"
] | permissive | HotMaps/renovation_scenarios | e8aabddd72313121926c8551237a4e55047fe825 | 6860897227ee74f3e76c81ab7932937a259b8044 | refs/heads/master | 2022-05-02T03:27:51.207929 | 2019-01-16T14:00:36 | 2019-01-16T14:00:36 | 175,600,348 | 0 | 0 | NOASSERTION | 2021-03-29T18:59:17 | 2019-03-14T10:33:05 | Python | UTF-8 | Python | false | false | 1,001 | py | #!/usr/bin/env python
import logging
import json
import time
from app.api_v1.transactions import register
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
def start_runner():
def start_loop():
not_started = True
i = 0
while not_started:
LOGGER.info('In start loop')
response = register()
LOGGER.info("[HTAPI] register response: %s ", response)
LOGGER.info('Server not yet started')
i=i+1
LOGGER.info('count = %s',str(i))
try:
json.loads(response)
LOGGER.info('Server started, quiting start_loop')
not_started = False
except :
LOGGER.info('Server not yet started')
time.sleep(2)
print('Started runner')
start_loop()
if __name__ == '__main__':
start_runner()
| [
"lesly.houndole@crem.ch"
] | lesly.houndole@crem.ch |
7d5a843b7ccd6ec857e04b021fd7d03a8f19f117 | aa51504503fc039f504b3421003e36ac974cacea | /3. HTTP Protocol Exploit/http_system_inspection.py | ae5b4524ecde7f00528577ebe25170ec095cb4e7 | [] | no_license | gasbugs/PySchool01-PythonHackingProgramming | 25701e6e4dc2d8ca8ca67d982cbaffc85dc95869 | 8bd71ce0701e8c01bd03f76a6ffae41dac849427 | refs/heads/master | 2021-08-24T06:30:55.026890 | 2017-12-08T11:44:17 | 2017-12-08T11:44:17 | 113,568,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | import requests, copy
host = "http://172.30.1.8"
uri = "/changeuser.ghp"
org_headers = {
"User-Agent" : "Mozilla/4.0",
"Host" : host.split("://")[1],
"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language" : "en-us",
"Accept-Encoding" : "gzip, deflate",
"Referer": host,
"Conection": "Keep-Alive"
}
org_cookies = {
"SESSIONID":"6771",
"UserID":"id",
"PassWD":"password"
}
payload = "A" * 4528
for key in list(org_headers.keys()):
print("Header",key, end=": ")
try:
headers = copy.deepcopy(org_headers)
headers[key] = payload
res = requests.get(host + uri, headers=headers, cookies=org_cookies)
print(": Good!")
except Exception as e:
print(e[:10])
for key in list(org_cookies.keys()):
print("Cookie",key, end=": ")
try:
cookies = copy.deepcopy(org_cookies)
cookies[key] = payload
res = requests.get(host + uri, headers=org_headers, cookies=cookies)
print(": Good!")
except Exception as e:
print(e[:10])
| [
"isc0304@naver.com"
] | isc0304@naver.com |
e658bf448865024182e1f4fcc107da7498d979d6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_sponging.py | 3002cc59058a485eaef0fe654fc8482c1eaff6ca | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _SPONGING():
def __init__(self,):
self.name = "SPONGING"
self.definitions = sponge
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sponge']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1202d3042e2eaac961b709774bc2bb25241c57a9 | 8b5950752c10ae00c90a0aab50190640ab4bd946 | /pyramid_restful/routers.py | 2f5f7f191ec3afc32437bdba26e690f6d7387a56 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | sjschmidt44/pyramid-restful-framework | 18aae207e56db4baca13c2ffdb88b01e66a3949a | d0098a947474223a99b595c4002993cc9c12c837 | refs/heads/master | 2020-03-18T13:10:21.350297 | 2018-05-24T20:29:06 | 2018-05-24T20:29:06 | 134,765,135 | 0 | 0 | null | 2018-05-24T20:26:31 | 2018-05-24T20:26:30 | null | UTF-8 | Python | false | false | 7,602 | py | import itertools
from collections import namedtuple
from .exceptions import ImproperlyConfigured
__all__ = ['ViewSetRouter']
Route = namedtuple('Route', ['url', 'mapping', 'name', 'initkwargs'])
DynamicDetailRoute = namedtuple('DynamicDetailRoute', ['url', 'name', 'initkwargs'])
DynamicListRoute = namedtuple('DynamicListRoute', ['url', 'name', 'initkwargs'])
def replace_methodname(format_string, methodname):
"""
Partially format a format_string, swapping out any '{methodname}' or '{methodnamehyphen}' components.
"""
methodnamehyphen = methodname.replace('_', '-')
ret = format_string
ret = ret.replace('{methodname}', methodname)
ret = ret.replace('{methodnamehyphen}', methodnamehyphen)
return ret
def flatten(list_of_lists):
"""
Takes an iterable of iterables, returns a single iterable containing all items
"""
return itertools.chain(*list_of_lists)
class ViewSetRouter:
"""
Automatically adds routes and associates views to the Pyramid ``Configurator`` for ViewSets, including
any decorated ``list_routes`` and ``detail_routes``.
"""
routes = [
# List route.
Route(
url=r'/{prefix}{trailing_slash}',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs=dict()
),
# Dynamically generated list routes. Generated using @list_route decorator on methods of the viewset.
DynamicListRoute(
url=r'/{prefix}/{methodname}{trailing_slash}',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
# Detail route.
Route(
url=r'/{prefix}/{lookup}{trailing_slash}',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs=dict()
),
# Dynamically generated detail routes. Generated using @detail_route decorator on methods of the viewset.
DynamicDetailRoute(
url=r'/{prefix}/{lookup}/{methodname}{trailing_slash}',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
]
def __init__(self, configurator, trailing_slash=True):
"""
:param configurator: pyramid Configurator
:return: void
"""
self.configurator = configurator
self.trailing_slash = trailing_slash and '/' or ''
self.registry = list()
def register(self, prefix, viewset, basename, factory=None, permission=None):
"""
Factory and permission are likely only going to exist until I have enough time
to write a permissions module for PRF.
:param prefix: the uri route prefix.
:param viewset: The ViewSet class to route.
:param basename: Used to name the route in pyramid.
:param factory: Optional, root factory to be used as the context to the route.
:param permission: Optional, permission to assign the route.
"""
lookup = self.get_lookup(viewset)
routes = self.get_routes(viewset)
for route in routes:
# Only actions which actually exist on the viewset will be bound
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue # empty viewset
url = route.url.format(
prefix=prefix,
lookup=lookup,
trailing_slash=self.trailing_slash
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
if factory:
self.configurator.add_route(name, url, factory=factory)
else:
self.configurator.add_route(name, url)
self.configurator.add_view(view, route_name=name, permission=permission)
def get_routes(self, viewset):
"""
Augment `self.routes` with any dynamically generated routes.
Returns a list of the Route namedtuple.
"""
known_actions = list(flatten([route.mapping.values() for route in self.routes if isinstance(route, Route)]))
# Determine any `@detail_route` or `@list_route` decorated methods on the viewset
detail_routes = []
list_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
detail = getattr(attr, 'detail', True)
if httpmethods:
# check against know actions list
if methodname in known_actions:
raise ImproperlyConfigured('Cannot use @detail_route or @list_route '
'decorators on method "%s" '
'as it is an existing route'.format(methodname))
httpmethods = [method.lower() for method in httpmethods]
if detail:
detail_routes.append((httpmethods, methodname))
else:
list_routes.append((httpmethods, methodname))
def _get_dynamic_routes(route, dynamic_routes):
ret = []
for httpmethods, methodname in dynamic_routes:
method_kwargs = getattr(viewset, methodname).kwargs
initkwargs = route.initkwargs.copy()
initkwargs.update(method_kwargs)
url_path = initkwargs.pop("url_path", None) or methodname
ret.append(Route(
url=replace_methodname(route.url, url_path),
mapping={httpmethod: methodname for httpmethod in httpmethods},
name=replace_methodname(route.name, url_path),
initkwargs=initkwargs,
))
return ret
ret = []
for route in self.routes:
if isinstance(route, DynamicDetailRoute):
# Dynamic detail routes (@detail_route decorator)
ret += _get_dynamic_routes(route, detail_routes)
elif isinstance(route, DynamicListRoute):
# Dynamic list routes (@list_route decorator)
ret += _get_dynamic_routes(route, list_routes)
else:
# Standard route
ret.append(route)
return ret
def get_lookup(self, viewset):
base_regex = '{%s}'
lookup_field = getattr(viewset, 'lookup_field', 'id')
lookup_url_kwargs = getattr(viewset, 'lookup_url_kwargs', None)
if lookup_url_kwargs:
lookup_url_keys = list(lookup_url_kwargs)
if len(lookup_url_keys) > 1:
raise ImproperlyConfigured('ViewSetRouter does not support nested routes.')
lookup_url_kwarg = lookup_url_keys[0]
else:
lookup_url_kwarg = lookup_field
return base_regex % lookup_url_kwarg
def get_method_map(self, viewset, method_map):
"""
Given a viewset, and a mapping of http methods to actions, return a new mapping which only
includes any mappings that are actually implemented by the viewset.
"""
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods
| [
"danpoland84@gmail.com"
] | danpoland84@gmail.com |
4b64459424dc5911815eba2ab4a4c279f47f53b4 | 5d8414fcdc20acb352077d081bfddfe00b0e0829 | /Implementation/command_line/insert_new_deliveryStock.py | ec1b6bfbb4518a1491b0332205d9cd06fbbe8e3c | [] | no_license | 31062/COMP4Coursework | 652c32b34cc22d965261cbe58ef7e549f3ac6537 | 7b974f717a57b2e66eec0fd59e0c0ff940f9ab86 | refs/heads/master | 2021-01-22T13:31:47.558926 | 2015-04-29T09:28:18 | 2015-04-29T09:28:18 | 24,451,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | import sqlite3
def insert_deliverystock_data(values):
#open/create new database
with sqlite3.connect("pub_stock.db") as db:
#make the cursor
cursor = db.cursor()
#create sql
sql = """insert into DeliveryStock(DeliveryID,StockID) values(?,?)"""
cursor.execute(sql,values)
db.commit()
def insert_deliverystock_main():
with sqlite3.connect("pub_stock.db") as db:
cursor = db.cursor()
cursor.execute("select DeliveryID,DeliveryTimeDate from Delivery")
delivery = cursor.fetchall()
cursor.execute("select StockID from Stock")
stock = cursor.fetchall()
check = False
while not check:
try:
print("{0:<6}".format("ID"))
for each in delivery:
print("{0:<6}".format(each[0]))
print()
deliveryID = int(input("deliveryID: "))
check = True
except ValueError:
print("datatype error")
check = False
check = False
while not check:
try:
print()
print("{0:<6}".format("ID"))
for each_1 in stock:
print("{0:<6}".format(each_1[0]))
print()
stockID = int(input("stockID: "))
check = True
except ValueError:
print("datatype error")
check = False
d_stock = (deliveryID,stockID)
insert_deliverystock_data(d_stock)
if __name__ == "__main__":
insert_deliverystock_main()
| [
"31062@longroad.ac.uk"
] | 31062@longroad.ac.uk |
c3184ea5650eb177afb8206df2c615c1d7a52517 | 99e88a8c8b7f252ccbc5a694392cd0f1220e6b8c | /make_random.py | 27fd517556d7ef9c5382a551cef12975b4c83ea5 | [] | no_license | nitishjain2007/Author_Verification | 6a143a7dff11b8b78d22c82cd5c8f095bb54c23f | c13d40c10488e6c99df5423beb155fad9f792b4e | refs/heads/master | 2021-01-10T05:44:04.312269 | 2016-03-01T22:55:17 | 2016-03-01T22:55:17 | 52,915,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import os, sys
import random
import shutil
import sys
reload(sys)
path = os.getcwd()
data_file = path + "/Data/Dataset/data.txt"
label_file = path + "/Data/Dataset/lables.txt"
data = [line.rstrip('\n') for line in open(data_file)]
labels = [line.rstrip('\n') for line in open(label_file)]
os.mkdir(path + "/Data/Dataset/RandomDataset")
for i in range(0,len(data)):
data[i] = data[i] + str(i+1)
for j in range(0,200):
os.mkdir(path + "/Data/Dataset/RandomDataset/set" + str(j+1))
os.chdir(path + "/Data/Dataset/RandomDataset/set" + str(j+1))
random.shuffle(data)
fo = open("data.txt", "wb")
for line in data:
data_p = line.split(" ")
for i in data_p[0:-1]:
fo.write(i + " ")
fo.write("\n")
fo.close()
fo = open("lables.txt", "wb")
count=0
for line in data:
data_p = line.split(" ")
fo.write(labels[int(data_p[-1])-1] + "\n")
fo.close()
fo = open("mapping.txt", "wb")
for line in data:
data_p = line.split(" ")
fo.write(data_p[-1] + "\n")
fo.close()
| [
"nitishjain2007@gmail.com"
] | nitishjain2007@gmail.com |
886d0997531024402470f77c621befe7e97b1e63 | c64d6d1fce81212965b1df7d4f4d4e72f218243e | /dot.py | 4b4b054e57b178f594ca931bb299f515610ea5a3 | [] | no_license | saipoojavr/saipoojacodekata | 384b0aa19f29c8b66f9498ebfbdf162cda4ddc97 | 3e38f1f06c4348f4262f654526c86d64e6893029 | refs/heads/master | 2020-05-23T01:01:54.615085 | 2019-12-06T15:29:54 | 2019-12-06T15:29:54 | 186,580,757 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | astr=str(input())
print(astr+'.') | [
"noreply@github.com"
] | noreply@github.com |
99d2820ce15d04ac923852d6a481ebf37cd75680 | 145991b6ba5ff52d3f519ad262233804a61068f3 | /transformer/pre_warm.py | 8cd2a82936d7864edf65cb568652bd4d10245653 | [] | no_license | FuPeiJiang/summarization | 21a99703d3a89589863d297ffb6f748deb862d25 | d3afd14bf257ccbff2fae2077e8df87e706de4c1 | refs/heads/master | 2023-02-19T20:24:29.701764 | 2021-01-17T14:52:20 | 2021-01-17T14:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,525 | py | import torch
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForTokenClassification
tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
summarization_model = AutoModelForSeq2SeqLM.from_pretrained("sshleifer/distilbart-cnn-12-6")
ner_model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
ner_tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
ARTICLE = """
Fisheries and Oceans Canada conducted an ecosystem-based survey from October 6-16,
2020 on the CCGS Sir John Franklin. This study targeted juvenile Pacific Salmon from Queen
Charlotte Sound to Dixon Entrance.
There were 26 species sampled in 1741 kg of catch, with 25% Pacific Salmon caught by weight.
Overall, Opalescent Inshore Squid (45%) and Moon Jellyfish (26%) were the most abundant
species by weight. Lengths and weights were recorded for 17 species, including all 5 Pacific
Salmon species (Oncorhynchus spp.). Juvenile Chum Salmon were the most abundant Pacific
Salmon species with large catches, particularly in Hecate Strait, and only 2% containing empty
stomachs. Pink Salmon were the most widespread species, whereas juvenile Sockeye Salmon
were localized in northern Dixon Entrance. Both juvenile Pink Salmon and Sockeye Salmon were
primarily feeding on euphausiids and amphipods. Juvenile Coho Salmon were less abundant
and were caught in Dixon Entrance and Hecate Strait, to a lesser amount. Juvenile Coho Salmon
had 47% empty stomachs and the widest variety of prey in their stomach contents. Only three
Chinook Salmon were caught. The only Chinook Salmon with stomach contents contained
Squid.
Biological samples for genetic stock composition and energy density are at the Pacific Biological
Station, Fisheries and Oceans Canada (Nanaimo, BC) for laboratory analysis. Associated
information on the physical oceanography and zooplankton composition was collected from
21 stations, and will be analysed at the the Institute of Ocean Sciences, Fisheries and Oceans
Canada (Sidney, BC).
In addition, gear optimization occurred for the 2022 Pan-Pacific High Seas Expedition. The
protocols for the offshore LFS 1142 trawl net, MOCNESS (Multiple Opening and Closing Net
with Environmental Sensing System), oblique zooplankton tows, and CTD rosette deployment
using the Launch and Recovery System (LARS) were tested.
"""
summarizer = pipeline("summarization")
print(summarizer(ARTICLE, max_length=4096, min_length=30, do_sample=False))
| [
"j.zhao2k19@gmail.com"
] | j.zhao2k19@gmail.com |
231dea914b42c030f614204b1d325eac9b984313 | 9271eb221f162963b2898474b002141b69146f7c | /tor/base/DieRollResult.py | 78b607258328856df8a148bdf9d4d25779e2f7e1 | [] | no_license | MathiasGartner/TOR | 15215d487ba39da9f0add7281dfe6e32ee296e2a | 040b3a5280d59965d98b97fee1719e519cbef2fd | refs/heads/master | 2023-06-24T00:38:49.649247 | 2023-06-19T20:17:43 | 2023-06-19T20:17:43 | 254,355,284 | 0 | 0 | null | 2020-04-15T08:42:21 | 2020-04-09T11:41:22 | Python | UTF-8 | Python | false | false | 381 | py |
import json
class DieRollResult:
def __init__(self, found=False, result=-1, position=None):
self.found = found
self.result = result
self.position = position
def __str__(self):
return "<DieRollResult: (found={}, result={}, position={})>".format(self.found, self.result, self.position)
def __repr__(self):
return self.__str__() | [
"mathiasgartner@gmx.at"
] | mathiasgartner@gmx.at |
3cbcf3f2726f69789da88a8cad54a19b8a78d701 | 5dd95ea351025267628c17ec11429e7e5b92d431 | /ksp_mission_logger/settings.py | 68f63c458512e4d98414d1ff46241bdb30bdf0d7 | [
"MIT"
] | permissive | DeepSpace2/ksp-mission-logger | d2e35d7d4f951f55f84f45ecfd2dbeef50a46f23 | ab5ce4fc4fd81d7fb69b9a1c3a3cad23949387df | refs/heads/master | 2020-12-31T04:42:30.653607 | 2016-05-13T11:20:53 | 2016-05-13T11:20:53 | 58,260,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,438 | py | """
Django settings for ksp_mission_logger project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Insert your root directory between the ''. Make sure you don't have a trailing slash
ROOT_KSP_PATH = r'D:\Program Files\Steam\SteamApps\common\Kerbal Space Program'
assert ROOT_KSP_PATH
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'we(f_c3d%)^tdcc!b@aoj*l=$h-%xp=!$5ra+@90u#rl(3xeks'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'missionlogger.apps.MissionloggerConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ksp_mission_logger.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ksp_mission_logger.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"deepspace2@users.noreply.github.com"
] | deepspace2@users.noreply.github.com |
4e20a61a6a684ff2060af7de95bbd5e9ab90b192 | f7a3a997b18a4d96497cdbd4a95c15e6d14fb1c8 | /htc/__init__.py | c1b18b627082cea851e7c596c61459183ef27f75 | [
"MIT"
] | permissive | IMSY-DKFZ/htc | 758f6bde332326aeca0e95b3aa1835c29a0b1962 | c73eeaf785c5cd74ec62022e70c94cece7d1eb80 | refs/heads/main | 2023-08-17T08:18:34.837275 | 2023-08-02T08:56:37 | 2023-08-02T08:56:37 | 495,827,768 | 14 | 4 | NOASSERTION | 2023-09-11T05:14:03 | 2022-05-24T13:08:29 | Jupyter Notebook | UTF-8 | Python | false | false | 12,901 | py | # SPDX-FileCopyrightText: 2022 Division of Intelligent Medical Systems, DKFZ
# SPDX-License-Identifier: MIT
import importlib
import sys
from typing import TYPE_CHECKING
from lazy_imports import LazyImporter
# Cannot be imported lazily because they would be misinterpreted as a module by LazyImporter
from htc.settings import settings
from htc.settings_seg import settings_seg
# If you want to add new imports to this file, please add them both, to the _import_structure dict and inside the TYPE_CHECKING check
# We lazy-load all import htc functionality into the htc namespace (e.g. so that users can write from htc import DataPath)
# A direct import (like guarded in TYPE_CHECKING below) would cost a lot of time since we have many dependencies and this would require loading all of them even if not needed
# This way, the import cost is only payed when the functionality is actually needed
_import_structure = {
"cpp": [
"hierarchical_bootstrapping",
"kfold_combinations",
"map_label_image",
"nunique",
"segmentation_mask",
"spxs_predictions",
"tensor_mapping",
],
"evaluation": ["evaluation"], # Also make modules itself importable
"evaluation.analyze_tfevents": ["read_tfevent_losses"],
"evaluation.evaluate_images": ["calc_dice_metric", "calc_surface_dice", "calc_surface_distance", "evaluate_images"],
"evaluation.evaluate_superpixels": ["EvaluateSuperpixelImage"],
"evaluation.metrics.ECELoss": ["ECELoss"],
"evaluation.metrics.scores": ["normalize_grouped_cm"],
"fonts": ["fonts"],
"fonts.set_font": ["set_font"],
"model_processing": ["model_processing"],
"model_processing.ImageConsumer": ["ImageConsumer"],
"model_processing.Runner": ["Runner"],
"model_processing.TestLeaveOneOutPredictor": ["TestLeaveOneOutPredictor"],
"model_processing.TestPredictor": ["TestPredictor"],
"model_processing.ValidationPredictor": ["ValidationPredictor"],
"models": ["models"],
"models.common.class_weights": ["calculate_class_weights"],
"models.common.distance_correlation": ["distance_correlation", "distance_correlation_features"],
"models.common.ForwardHookPromise": ["ForwardHookPromise"],
"models.common.HierarchicalSampler": ["HierarchicalSampler"],
"models.common.HTCDataset": ["HTCDataset"],
"models.common.HTCDatasetStream": ["HTCDatasetStream"],
"models.common.HTCLightning": ["HTCLightning"],
"models.common.HTCModel": ["HTCModel"],
"models.common.MetricAggregation": ["MetricAggregation"],
"models.common.StreamDataLoader": ["StreamDataLoader"],
"models.common.torch_helpers": [
"FlexibleIdentity",
"copy_sample",
"cpu_only_tensor",
"move_batch_gpu",
"pad_tensors",
"smooth_one_hot",
"str_to_dtype",
],
"models.common.utils": ["get_n_classes", "infer_swa_lr", "samples_equal"],
"models.data.DataSpecification": ["DataSpecification"],
"models.data.SpecsGeneration": ["SpecsGeneration"],
"models.image.DatasetImage": ["DatasetImage"],
"models.image.DatasetImageBatch": ["DatasetImageBatch"],
"models.image.DatasetImageStream": ["DatasetImageStream"],
"models.image.LightningImage": ["LightningImage"],
"models.image.ModelImage": ["ModelImage"],
"models.patch.DatasetPatchImage": ["DatasetPatchImage"],
"models.patch.DatasetPatchStream": ["DatasetPatchStream"],
"models.patch.LightningPatch": ["LightningPatch"],
"models.pixel.DatasetPixelStream": ["DatasetPixelStream"],
"models.pixel.LightningPixel": ["LightningPixel"],
"models.pixel.ModelPixel": ["ModelPixel"],
"models.pixel.ModelPixelRGB": ["ModelPixelRGB"],
"models.superpixel_classification.DatasetSuperpixelImage": ["DatasetSuperpixelImage"],
"models.superpixel_classification.DatasetSuperpixelStream": ["DatasetSuperpixelStream"],
"models.superpixel_classification.LightningSuperpixelClassification": ["LightningSuperpixelClassification"],
"models.superpixel_classification.ModelSuperpixelClassification": ["ModelSuperpixelClassification"],
"tissue_atlas": ["tissue_atlas"],
"tissue_atlas.median_pixel.DatasetMedianPixel": ["DatasetMedianPixel"],
"tissue_atlas.median_pixel.LightningMedianPixel": ["LightningMedianPixel"],
"tissue_atlas.MetricAggregationClassification": ["MetricAggregationClassification"],
"tissue_atlas.settings_atlas": ["settings_atlas"],
"tivita": ["tivita"],
"tivita.colorscale": ["tivita_colorscale"],
"tivita.DataPath": ["DataPath"],
"tivita.DatasetSettings": ["DatasetSettings"],
"tivita.hsi": ["read_tivita_dark", "read_tivita_hsi", "tivita_wavelengths"],
"tivita.metadata": ["generate_metadata_table", "read_meta_file"],
"tivita.rgb": ["hsi_to_rgb", "read_tivita_rgb"],
"utils": ["utils"],
"utils.AdvancedJSONEncoder": ["AdvancedJSONEncoder"],
"utils.blosc_compression": ["compress_file", "decompress_file"],
"utils.ColorcheckerReader": ["ColorcheckerReader"],
"utils.ColoredFileLog": ["ColoredFileLog"],
"utils.colors": ["generate_distinct_colors"],
"utils.Config": ["Config"],
"utils.DatasetDir": ["DatasetDir"],
"utils.DelayedFileHandler": ["DelayedFileHandler"],
"utils.DomainMapper": ["DomainMapper"],
"utils.DuplicateFilter": ["DuplicateFilter"],
"utils.general": [
"apply_recursive",
"clear_directory",
"merge_dicts_deep",
"safe_copy",
"sha256_file",
"subprocess_run",
],
"utils.helper_functions": [
"basic_statistics",
"group_median_spectra",
"median_table",
"sort_labels",
"sort_labels_cm",
"utilization_table",
],
"utils.LabelMapping": ["LabelMapping"],
"utils.LDA": ["LDA"],
"utils.MeasureTime": ["MeasureTime"],
"utils.MultiPath": ["MultiPath"],
"utils.parallel": ["p_imap", "p_map"],
"utils.SpectrometerReader": ["SpectrometerReader"],
"utils.sqldf": ["sqldf"],
"utils.type_from_string": ["type_from_string"],
"utils.unify_path": ["unify_path"],
"utils.visualization": [
"add_std_fill",
"compress_html",
"create_class_scores_figure",
"create_confusion_figure",
"create_confusion_figure_comparison",
"create_ece_figure",
"create_overview_document",
"create_running_metric_plot",
"create_segmentation_overlay",
"create_surface_dice_plot",
"create_training_stats_figure",
"prediction_figure_html",
"show_loss_chart",
"visualize_dict",
],
}
if TYPE_CHECKING:
from htc.cpp import (
hierarchical_bootstrapping,
kfold_combinations,
map_label_image,
nunique,
segmentation_mask,
spxs_predictions,
tensor_mapping,
)
from htc.evaluation.analyze_tfevents import read_tfevent_losses
from htc.evaluation.evaluate_images import (
calc_dice_metric,
calc_surface_dice,
calc_surface_distance,
evaluate_images,
)
from htc.evaluation.evaluate_superpixels import EvaluateSuperpixelImage
from htc.evaluation.metrics.ECELoss import ECELoss
from htc.evaluation.metrics.scores import normalize_grouped_cm
from htc.fonts.set_font import set_font
from htc.model_processing.ImageConsumer import ImageConsumer
from htc.model_processing.Runner import Runner
from htc.model_processing.TestLeaveOneOutPredictor import TestLeaveOneOutPredictor
from htc.model_processing.TestPredictor import TestPredictor
from htc.model_processing.ValidationPredictor import ValidationPredictor
from htc.models.common.class_weights import calculate_class_weights
from htc.models.common.distance_correlation import distance_correlation, distance_correlation_features
from htc.models.common.ForwardHookPromise import ForwardHookPromise
from htc.models.common.HierarchicalSampler import HierarchicalSampler
from htc.models.common.HTCDataset import HTCDataset
from htc.models.common.HTCDatasetStream import HTCDatasetStream
from htc.models.common.HTCLightning import HTCLightning
from htc.models.common.HTCModel import HTCModel
from htc.models.common.MetricAggregation import MetricAggregation
from htc.models.common.StreamDataLoader import StreamDataLoader
from htc.models.common.torch_helpers import (
FlexibleIdentity,
copy_sample,
cpu_only_tensor,
move_batch_gpu,
pad_tensors,
smooth_one_hot,
str_to_dtype,
)
from htc.models.common.utils import get_n_classes, infer_swa_lr, samples_equal
from htc.models.data.DataSpecification import DataSpecification
from htc.models.data.SpecsGeneration import SpecsGeneration
from htc.models.image.DatasetImage import DatasetImage
from htc.models.image.DatasetImageBatch import DatasetImageBatch
from htc.models.image.DatasetImageStream import DatasetImageStream
from htc.models.image.LightningImage import LightningImage
from htc.models.image.ModelImage import ModelImage
from htc.models.patch.DatasetPatchImage import DatasetPatchImage
from htc.models.patch.DatasetPatchStream import DatasetPatchStream
from htc.models.patch.LightningPatch import LightningPatch
from htc.models.pixel.DatasetPixelStream import DatasetPixelStream
from htc.models.pixel.LightningPixel import LightningPixel
from htc.models.pixel.ModelPixel import ModelPixel
from htc.models.pixel.ModelPixelRGB import ModelPixelRGB
from htc.models.superpixel_classification.DatasetSuperpixelImage import DatasetSuperpixelImage
from htc.models.superpixel_classification.DatasetSuperpixelStream import DatasetSuperpixelStream
from htc.models.superpixel_classification.LightningSuperpixelClassification import LightningSuperpixelClassification
from htc.models.superpixel_classification.ModelSuperpixelClassification import ModelSuperpixelClassification
from htc.tissue_atlas.median_pixel.DatasetMedianPixel import DatasetMedianPixel
from htc.tissue_atlas.median_pixel.LightningMedianPixel import LightningMedianPixel
from htc.tissue_atlas.MetricAggregationClassification import MetricAggregationClassification
from htc.tissue_atlas.settings_atlas import settings_atlas
from htc.tivita.colorscale import tivita_colorscale
from htc.tivita.DataPath import DataPath
from htc.tivita.DatasetSettings import DatasetSettings
from htc.tivita.hsi import read_tivita_dark, read_tivita_hsi, tivita_wavelengths
from htc.tivita.metadata import generate_metadata_table, read_meta_file
from htc.tivita.rgb import hsi_to_rgb, read_tivita_rgb
from htc.utils.AdvancedJSONEncoder import AdvancedJSONEncoder
from htc.utils.blosc_compression import compress_file, decompress_file
from htc.utils.ColorcheckerReader import ColorcheckerReader
from htc.utils.ColoredFileLog import ColoredFileLog
from htc.utils.colors import generate_distinct_colors
from htc.utils.Config import Config
from htc.utils.DatasetDir import DatasetDir
from htc.utils.DelayedFileHandler import DelayedFileHandler
from htc.utils.DomainMapper import DomainMapper
from htc.utils.DuplicateFilter import DuplicateFilter
from htc.utils.general import (
apply_recursive,
clear_directory,
merge_dicts_deep,
safe_copy,
sha256_file,
subprocess_run,
)
from htc.utils.helper_functions import (
basic_statistics,
group_median_spectra,
median_table,
sort_labels,
sort_labels_cm,
utilization_table,
)
from htc.utils.LabelMapping import LabelMapping
from htc.utils.LDA import LDA
from htc.utils.MeasureTime import MeasureTime
from htc.utils.MultiPath import MultiPath
from htc.utils.parallel import p_imap, p_map
from htc.utils.SpectrometerReader import SpectrometerReader
from htc.utils.sqldf import sqldf
from htc.utils.type_from_string import type_from_string
from htc.utils.unify_path import unify_path
from htc.utils.visualization import (
add_std_fill,
compress_html,
create_class_scores_figure,
create_confusion_figure,
create_confusion_figure_comparison,
create_ece_figure,
create_overview_document,
create_running_metric_plot,
create_segmentation_overlay,
create_surface_dice_plot,
create_training_stats_figure,
prediction_figure_html,
show_loss_chart,
visualize_dict,
)
else:
spec = importlib.util.find_spec("htc")
sys.modules[__name__] = LazyImporter(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"settings": settings, "settings_seg": settings_seg},
)
sys.modules[__name__].__spec__ = spec
| [
"j.sellner@dkfz-heidelberg.de"
] | j.sellner@dkfz-heidelberg.de |
eb5007714d3e169fc67ff7de612d97edbdde15ae | ca805265bbc8d9b3f5ccb8dd343524843fc0f776 | /scaling/commands/bench_results_processer.py | d94add1e14ef3dfdd535d410b4a504c6f1e7200a | [] | no_license | alenzhao/QIIME-Scaling | 8dc7b4b99da404c016e59e48197b8f938df1cf14 | 29408a3a0ff2a74039f78a04fff831dabb23fa1a | refs/heads/master | 2021-01-12T10:46:22.961035 | 2016-06-18T16:56:48 | 2016-06-18T16:56:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | #!/usr/bin/env python
from __future__ import division
__author__ = "Jose Antonio Navas Molina"
__copyright__ = "Copyright 2013, The QIIME Scaling Project"
__credits__ = ["Jose Antonio Navas Molina"]
__license__ = "BSD"
__version__ = "0.0.2-dev"
__maintainer__ = "Jose Antonio Navas Molina"
__email__ = "josenavasmolina@gmail.com"
__status__ = "Development"
from pyqi.core.command import (Command, CommandIn, CommandOut,
ParameterCollection)
from scaling.process_results import (process_benchmark_results, CompData)
from scaling.cluster_util import wait_on
class BenchResultsProcesser(Command):
"""Subclassing the pyqi.core.command.Command class"""
BriefDescription = "Processes the benchmark suite results"
LongDescription = ("Takes the benchmark suite output directory and "
"processes the benchmark measurements, creating plots "
"and collapsing results in a usable form.")
CommandIns = ParameterCollection([
CommandIn(Name='bench_results', DataType=list,
Description='List with the benchmark results',
Required=True),
CommandIn(Name='job_ids', DataType=list,
Description='List of job ids to wait for if running in a '
'pbs cluster', Required=False)
])
CommandOuts = ParameterCollection([
CommandOut(Name="bench_data", DataType=CompData,
Description="Dictionary with the benchmark results"),
])
def run(self, **kwargs):
bench_results = kwargs['bench_results']
job_ids = kwargs['job_ids']
if job_ids:
wait_on(job_ids)
data = process_benchmark_results(bench_results)
return {'bench_data': data}
CommandConstructor = BenchResultsProcesser
| [
"josenavasmolina@gmail.com"
] | josenavasmolina@gmail.com |
93fb0eda0e72cecb65b7c8b4f7243a07cc3e3fdd | 639ed2d085bb4e7545ee5b87292205e6539a5e4b | /patient/models.py | db751a3a5090aa50d9f950e0a9929ea4288fab81 | [] | no_license | Ntonsite/crud-django | 71d5adf329f4ce71bfea8fc3c649527db74e1b3f | 01edeca5858cb76e6445e7d6561cf0d4906e1c95 | refs/heads/master | 2022-12-13T02:57:25.972807 | 2020-09-09T07:12:38 | 2020-09-09T07:12:38 | 293,169,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Patient(models.Model):
patientName = models.CharField(max_length=100, default='patientName')
address = models.CharField(max_length=100, default='sample')
age = models.CharField(max_length=100, default='sample address')
phoneNumber = models.CharField(max_length=100, default='phoneNumber')
email = models.EmailField(max_length=100, null=True)
occupation = models.CharField(max_length=100, default='occupation')
date_registered = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.patientName
class ClinicalInfo(models.Model):
history = models.CharField(max_length=300, default='someHistory')
visualAcuity = models.CharField(max_length=300, default='some')
ExternalEyeExamination = models.CharField(max_length=300, default='Examination')
functionalTest = models.CharField(max_length=300, default='Test')
refractionDetails = models.CharField(max_length=300, default='Details')
diagnosis = models.CharField(max_length=300, default='Diagnosis')
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
class Item(models.Model):
itemName = models.CharField(max_length=100)
count = models.CharField(max_length=100)
price = models.CharField(max_length=200)
| [
"mwamlimantonsite@gmail.com"
] | mwamlimantonsite@gmail.com |
fa1ad882fd1595df3715ec3227356ed30c4c6017 | fc212767c6c838360b62a3dcd8030a1dfcbf62fc | /muddery/utils/quest_handler.py | 7d19069f731f83b3001d9318edf55756332a4a5f | [
"BSD-3-Clause"
] | permissive | caibingcheng/muddery | 24d6eba76358621736e6a3d66333361239c35472 | dcbf55f4e1c18a2c69576fd0edcec4699c1519b9 | refs/heads/master | 2021-05-19T09:49:19.319735 | 2020-03-29T03:55:51 | 2020-03-29T03:55:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,984 | py | """
QuestHandler handles a character's quests.
"""
from django.conf import settings
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from evennia.utils import logger
from muddery.utils.builder import build_object
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils.localized_strings_handler import _
from muddery.utils.exception import MudderyError
from muddery.utils.game_settings import GAME_SETTINGS
from muddery.worlddata.dao.quest_dependencies_mapper import QUEST_DEPENDENCIES
from muddery.mappings.quest_status_set import QUEST_STATUS_SET
from muddery.mappings.typeclass_set import TYPECLASS
class QuestHandler(object):
"""
Handles a character's quests.
"""
def __init__(self, owner):
"""
Initialize handler
"""
self.owner = owner
self.current_quests = owner.db.current_quests
self.finished_quests = owner.db.finished_quests
def accept(self, quest_key):
"""
Accept a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key in self.current_quests:
return
# Create quest object.
new_quest = build_object(quest_key)
if not new_quest:
return
new_quest.set_owner(self.owner)
self.current_quests[quest_key] = new_quest
self.owner.msg({"msg": _("Accepted quest {C%s{n.") % new_quest.get_name()})
self.show_quests()
self.owner.show_location()
def remove_all(self):
"""
Remove all quests.
It will be called when quests' owner will be deleted.
"""
for quest_key in self.current_quests:
self.current_quests[quest_key].delete()
self.current_quests = []
def give_up(self, quest_key):
"""
Accept a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if not GAME_SETTINGS.get("can_give_up_quests"):
logger.log_tracemsg("Can not give up quests.")
raise MudderyError(_("Can not give up this quest."))
if quest_key not in self.current_quests:
raise MudderyError(_("Can not find this quest."))
self.current_quests[quest_key].delete()
del(self.current_quests[quest_key])
if quest_key in self.finished_quests:
self.finished_quests.remove(quest_key)
self.show_quests()
def turn_in(self, quest_key):
"""
Turn in a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return
if not self.current_quests[quest_key].is_accomplished:
return
# Get quest's name.
name = self.current_quests[quest_key].get_name()
# Call turn in function in the quest.
self.current_quests[quest_key].turn_in()
# Delete the quest.
self.current_quests[quest_key].delete()
del (self.current_quests[quest_key])
self.finished_quests.add(quest_key)
self.owner.msg({"msg": _("Turned in quest {C%s{n.") % name})
self.show_quests()
self.owner.show_location()
def get_accomplished_quests(self):
"""
Get all quests that their objectives are accomplished.
"""
quests = set()
for quest in self.current_quests:
if self.current_quests[quest].is_accomplished():
quests.add(quest)
return quests
def is_accomplished(self, quest_key):
"""
Whether the character accomplished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return False
return self.current_quests[quest_key].is_accomplished()
def is_not_accomplished(self, quest_key):
"""
Whether the character accomplished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return False
return not self.current_quests[quest_key].is_accomplished()
def is_finished(self, quest_key):
"""
Whether the character finished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
return quest_key in self.finished_quests
def is_in_progress(self, quest_key):
"""
If the character is doing this quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
return quest_key in self.current_quests
def can_provide(self, quest_key):
"""
If can provide this quest to the owner.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if self.is_finished(quest_key):
return False
if self.is_in_progress(quest_key):
return False
if not self.match_dependencies(quest_key):
return False
if not self.match_condition(quest_key):
return False
return True
def match_dependencies(self, quest_key):
"""
Check quest's dependencies
Args:
quest_key: (string) quest's key
Returns:
(boolean) result
"""
for dep in QUEST_DEPENDENCIES.filter(quest_key):
status = QUEST_STATUS_SET.get(dep.type)
if not status.match(self.owner, dep.dependency):
return False
return True
def match_condition(self, quest_key):
"""
Check if the quest matches its condition.
Args:
quest_key: (string) quest's key
Returns:
(boolean) result
"""
# Get quest's record.
model_name = TYPECLASS("QUEST").model_name
if not model_name:
return False
model_quest = apps.get_model(settings.WORLD_DATA_APP, model_name)
try:
record = model_quest.objects.get(key=quest_key)
return STATEMENT_HANDLER.match_condition(record.condition, self.owner, None)
except Exception as e:
logger.log_errmsg("Can't get quest %s's condition: %s" % (quest_key, e))
return False
def show_quests(self):
"""
Send quests to player.
"""
quests = self.return_quests()
self.owner.msg({"quests": quests})
def return_quests(self):
"""
Get quests' data.
"""
quests = []
for quest in self.current_quests.values():
info = {"dbref": quest.dbref,
"name": quest.name,
"desc": quest.db.desc,
"objectives": quest.return_objectives(),
"accomplished": quest.is_accomplished()}
quests.append(info)
return quests
def at_objective(self, object_type, object_key, number=1):
"""
Called when the owner may complete some objectives.
Call relative hooks.
Args:
object_type: (type) objective's type
object_key: (string) object's key
number: (int) objective's number
Returns:
None
"""
status_changed = False
for quest in self.current_quests.values():
if quest.at_objective(object_type, object_key, number):
status_changed = True
if quest.is_accomplished():
self.owner.msg({"msg":
_("Quest {C%s{n's goals are accomplished.") % quest.name})
if status_changed:
self.show_quests()
| [
"luyijun999@gmail.com"
] | luyijun999@gmail.com |
a2c934ac353581957bb68e20d41fd2e838ccd25e | f6fe194c520fcfe8f99d1e452988e47a1ec30525 | /venv/bin/pip | 200de81d5f7a1ab0fa2053a6f72ce38a3df66257 | [] | no_license | zupeiza/flask-blog | ce2056a0e0d4026b56e458cbfa12025cf951a3d4 | 9735c5caae438ddaa2715bd34b074eb179da7919 | refs/heads/master | 2016-08-08T17:38:51.907346 | 2016-03-21T12:47:14 | 2016-03-21T12:47:14 | 54,382,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | #!/home/zupe/ZuPy/flask-blog/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zupeiza@gmail.com"
] | zupeiza@gmail.com | |
0337df5b789db070f38e4d2cd0d34058d82bcc49 | bfd61850148f08700f7b1b11e054603256c73d78 | /quick/__init__.py | a32a659853fcfc32e8b50140b8d11f5be89c923a | [] | no_license | zero-miao/alg_sort | d1e41aa39bda34356405032d400427315220e630 | 21a721f89f63822afd08b40a1af77a7e10dc0599 | refs/heads/master | 2020-07-13T15:32:10.286540 | 2019-08-29T07:34:31 | 2019-08-29T07:34:31 | 205,106,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | def quick_sort_recursive(lst, start=0, end=None):
""" 快速排序
时间复杂度:
最优: n*log(n), 假设其每次一分为 2(j 在正中间), T(n) = 2*T(n/2)+n => T(n) = n + 2*n/2 +...+ log(n)*n/log(n) = n*log(n)
最差: n^2, 假设每次仅减少一个(j 在最边上), T(n) = T(n-1) + n => T(n) = n + n-1 +...+ 1 = n^2
比较次数:
最优: n*log(n), 每次循环的时候, 循环长度就是比较次数.
最差: n^2, 同上.
空间复杂度:
最优: 同时间复杂度中最优, 需要递归 log(n) 次, 即栈空间 log(n)
最差: 同时间复杂度中最差, 需要递归 n 次, 即栈空间 n.
完全正序或逆序: 即为最差情况, (n^2, n)
不稳定
>>> lst = [7,6,5,4,3,2,1]
>>> quick_sort_recursive(lst)
[1, 2, 3, 4, 5, 6, 7]
>>> lst
[1, 2, 3, 4, 5, 6, 7]
>>> quick_sort_recursive([6,8,4,2,4,6,8])
[2, 4, 4, 6, 6, 8, 8]
"""
if end is None:
end = len(lst)
if start >= end:
return lst
pivot = lst[start]
j = start # j 是最后一个小于等于 pivot 的元素的索引. i 用于遍历
for index, item in enumerate(lst[start + 1:end], start + 1):
if item < pivot:
j += 1
# 这里的操作就会改变 数组的稳定性: 即之前在前的元素, 最后不一定在前.
lst[j], lst[index] = lst[index], lst[j]
lst[j], lst[start] = lst[start], lst[j]
quick_sort_recursive(lst, start=start, end=j)
quick_sort_recursive(lst, start=j + 1, end=end)
return lst
| [
"ao.mei@gaeamobile.com"
] | ao.mei@gaeamobile.com |
05fd07e5476139a62cfe20d31ba439fe918bee15 | 157e8683eaf13aa904f84243292f5d3dc4ba4360 | /Python_by_Example_Challenges/challenges_80to87.py | 6fb749914f7e1028b1bb9f49b79f471e74708754 | [] | no_license | MaryBobs/python | 0b5b45ec9ab21e786eba897f5a1b887adbb7f404 | 4cfaa6871ec87478c85cd105029282cb056c28c3 | refs/heads/master | 2020-12-02T23:26:45.926119 | 2020-01-06T09:30:44 | 2020-01-06T09:30:44 | 231,151,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | ###080
##name = input("Enter your first name: ")
##print("Your first name is:",len(name),"characters long")
##surname = input("Enter your surname: ")
##print("Your surname is",len(surname),"characters long")
##fullname = name + " " + surname
##print("Your name:",fullname,"is",len(fullname),"characters long")
##
###081
##subject = input("Enter your favourite school subject: ")
##for letter in subject:
## print(letter,end="-")
##
###082
##poem = ("I've nearly reached breaking point, she snapped")
##print(poem)
##start = int(input("A number between 0 and 10: "))
##end = int(input("A number between 20 and 30: "))
##print(poem[start:end])
##
###083
##word = input("Type a word in uppercase: ")
##while word.isupper() == False:
## word = input("Try again: ")
##else:
## print("Thank you")
##
###084
##postcode = input("Enter your postcode: ")
##print(postcode[0:2].upper())
##
###085
##name = input("Enter your name: ").lower()
##count = 0
##for i in name:
## if i == "a" or i == "e" or i == "i" or i == "o" or i == "u":
## count = count + 1
##print("There are",count,"vowels in your name")
##
###086
##password = input("Enter your new password: ")
##passwordcheck = input("Enter it again: ")
##if password == passwordcheck:
## print("Thank you")
##elif password.upper() == passwordcheck.upper():
## print("They must be in the same case")
##else:
## print("Incorrect")
#087
word = input("Enter a word: ")
word = word[::-1]
for i in word:
print(i)
| [
"55357115+MaryBobs@users.noreply.github.com"
] | 55357115+MaryBobs@users.noreply.github.com |
9b09e3f842d2de8df5974a6f1edd7a94e2d5200f | 460405d5666b79c088c9a473b3be4d85d1950fe1 | /accounts/migrations/0003_auto_20201101_2302.py | bebd60e8388330614edd6adfbd97578f548a338e | [] | no_license | TaPPa45/fillbooks | 236d08d1caf08adf02116166acf41ac3967c7a36 | 68f6af413e4569ec57ea625aa8af0d07d067f9b0 | refs/heads/main | 2023-01-11T02:49:31.134859 | 2020-11-15T16:07:40 | 2020-11-15T16:07:40 | 381,718,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.2.12 on 2020-11-01 20:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20201011_1633'),
]
operations = [
migrations.AlterField(
model_name='branch',
name='balance',
field=models.CharField(max_length=50, verbose_name='Баланс филиала'),
),
]
| [
"dimadev@dimankab1.fvds.ru"
] | dimadev@dimankab1.fvds.ru |
0d3e7da7d35dc0e85ff5002ba1b008ceca4e07f2 | d489eb7998aa09e17ce8d8aef085a65f799e6a02 | /lib/modules/powershell/situational_awareness/network/powerview/share_finder.py | b7eb7430bb04ecd40ca90099bbefd0f114040073 | [
"MIT"
] | permissive | fengjixuchui/invader | d36078bbef3d740f95930d9896b2d7dd7227474c | 68153dafbe25e7bb821c8545952d0cc15ae35a3e | refs/heads/master | 2020-07-21T19:45:10.479388 | 2019-09-26T11:32:38 | 2019-09-26T11:32:38 | 206,958,809 | 2 | 1 | MIT | 2019-09-26T11:32:39 | 2019-09-07T11:32:17 | PowerShell | UTF-8 | Python | false | false | 6,300 | py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Find-DomainShare',
'Author': ['@harmj0y'],
'Description': ('Finds shares on machines in the domain. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'ComputerLDAPFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerSearchBase' : {
'Description' : 'Specifies the LDAP source to search through for computers',
'Required' : False,
'Value' : ''
},
'ComputerOperatingSystem' : {
'Description' : 'Return computers with a specific operating system, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerServicePack' : {
'Description' : 'Return computers with the specified service pack, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerSiteName' : {
'Description' : 'Return computers in the specific AD Site name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'CheckShareAccess' : {
'Description' : 'Switch. Only display found shares that the local user has access to.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Jitter' : {
'Description' : 'Specifies the jitter (0-1.0) to apply to any specified -Delay, defaults to +/- 0.3.',
'Required' : False,
'Value' : ''
},
'Threads' : {
'Description' : 'The maximum concurrent threads to execute.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
script += "\n" + moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| [
"noreply@github.com"
] | noreply@github.com |
b950cea9bfca52f532a55cad63ebec8f1f7c442b | fed7697f8be852f836240215ae732fdeb85bde6f | /poems/info_origin.py | 562203198a21feaebd23f137799aeaa8d13571cc | [] | no_license | luckyBoy93/bar_poem | 1243c45f4448e94746f5d14717ae10a37d94dfa5 | 0dc533383a1c45771a6016d2235c9afc387be37a | refs/heads/master | 2020-01-23T21:18:03.661921 | 2016-11-23T10:50:54 | 2016-11-23T10:50:54 | 74,567,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,606 | py | # -*- encoding: utf-8 -*-
import pymysql
import re
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='bar_poem3', charset='utf8', autocommit=True)
curDB = conn.cursor()
cursor = conn.cursor(pymysql.cursors.DictCursor)
curDB.execute("SET NAMES utf8")
def get_stop(tt):
m = re.search("(\d+)", tt)
if m:
return m.group()[0]
return '0'
def get_razmer(tt):
if u'я' in tt:
return 'yamb'
if u'х' in tt:
return 'horey'
if u'дол' in tt:
return 'dolnik'
if u'д' in tt:
return 'daktil'
if u'ам' in tt:
return 'anapest'
if u'ан' in tt:
return 'amfibrahii'
return 'other'
curDB.execute("TRUNCATE TABLE poems_origin_generated")
query = "SELECT * " \
"FROM `academ16` " \
"LEFT JOIN konkordans ON konkordans.id = academ16.konk_id " \
"LEFT JOIN poems_info ON poems_info.id = academ16.metr_id " \
"WHERE academ16.konk_id > 0 AND academ16.metr_id > 0"
curDB.execute(query)
#col_names = [i[0] for i in curDB.description]
for row in curDB.fetchall():
title = row[2]
poem_body = row[4]
year = str( row[1] )
stop = str( get_stop( row[25] ) )
lines_num = str( row[27] )
razmer = str( get_razmer( row[25] ) )
print(row[12], row[11])
m_end = str( row[12] )
g_end = str( row[13] )
d_end = str( row[14] )
strofika = str( row[32] )
partial_line = str( row[18] )
m_no = str( row[15] )
g_no = str( row[16] )
d_no = str( row[17] )
strofika_type = row[19]
query = "INSERT INTO `poems_origin_generated` (" \
"`title`, " \
"`poem_body`, " \
"`year`, " \
"`stop`, " \
"`lines_num`, " \
"`razmer`, " \
"`m_end`, " \
"`g_end`, " \
"`d_end`, " \
"`strofika`, " \
"`partial_line`, " \
"`m_no`, " \
"`g_no`, " \
"`d_no`, " \
"`strofika_type`) " \
"VALUES (" \
"'"+title+"', " \
"'"+poem_body.replace("'", '`')+"', " \
"'"+year+"', " \
"'"+stop+"', " \
"'"+lines_num+"', " \
"'"+razmer+"', " \
"'"+m_end+"', " \
"'"+g_end+"', " \
"'"+d_end+"', " \
"'"+strofika+"', " \
"'"+partial_line+"', " \
"'"+m_no+"', " \
"'"+g_no+"', " \
"'"+d_no+"', " \
"'"+strofika_type+"');"
#print(query)
print(title)
curDB.execute(query) | [
"matveysodboev@gmal.com"
] | matveysodboev@gmal.com |
73fccea44018f9ce0bbec825b01759800db7f0c4 | 8d5f3ec2d50f1cb7d694a1016105bcf37b3dc829 | /bearladder.py | a9f2c165e12fddeff8595b7d3a007f4bb6a52ca0 | [] | no_license | SushantSriv/CODECHEF_python-codes | acbbabb33d1481d32f3b70b517927631703fa43f | a4524356e8d19ba1206e1688f9e307c7d462c213 | refs/heads/master | 2021-09-21T15:32:42.777321 | 2018-08-28T15:33:49 | 2018-08-28T15:33:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | for i in range(int(input())):
a,b=list(map(int,input().split(" ")))
if(a%2==1 and a<b):
if(abs(b-a)==1 or abs(b-a)==2):
print("YES")
else:
print("NO")
elif(a%2==0):
if(abs(b-a)==2 or b==a-1):
print("YES")
else:
print("NO")
elif(a%2==1 and a>b):
if(abs(a-b)==2):
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | noreply@github.com |
f0bd5de1ea650b98a69f891dccd5526b09bff57e | bd47051178c9971321dd223ffdac84847db1bda7 | /PrisDilem/PrisDilemmaupdated.py | fb76c381818d1f876defe41f0c70883d816c6fc9 | [] | no_license | AriFleischer13/CSC15-Python | 184973f09ae26c1ec7e28f04488874728aaabff4 | 0c744da91d429b3010e10d738a373c5fb3e78562 | refs/heads/master | 2020-04-16T21:58:03.663602 | 2019-01-20T02:56:18 | 2019-01-20T02:56:18 | 165,947,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | py | from random import choice
import itertools
Defect = 0
Cooperate = 1
class Agent:
def __init__(self, name, strategy):
self.name = name
self.strategy = strategy
self.history = {} # a dict, key is another agent, value is a list of events where self interacted with another agent
self.totalscore = 0
def choice(self,opponent):
if opponent in self.history:
hist = self.history[opponent]
else: hist = []
pick = self.strategy(hist)
return pick
def recordhist(self,opponent,oppPlay,reward):
if opponent not in self.history:
self.history[opponent] = []
self.history[opponent].append(oppPlay)
self.totalscore += reward
def gettotalscore(self):
return self.totalscore
def confront(a1,a2):
a1choice = a1.choice(a2)
a2choice = a2.choice(a1)
if a1choice == a2choice:
if a1choice == Defect:
a1reward = 1
a2reward = 1
else:
a1reward = 3
a2reward = 3
else:
if a1choice == Defect:
a1reward = 5
a2reward = 0
else:
a1reward = 0
a2reward = 5
a1.recordhist(a2,a2choice,a1reward)
a2.recordhist(a1,a1choice,a2reward)
def AllD(history):
return Defect
def AllC(history):
return Cooperate
def TFT(history):
if len(history) == 0:
return Cooperate
else:
return history[-1]
#Cooperate Unitl Other Defects
def Grudge(history):
for x in history:
if x == Defect:
return Defect
return Cooperate
#Alternate between Cooperating and Defecting
def CthenD(history):
if len(history)%2==0:
return Cooperate
else: return Defect
#Alternate between Defecting and Cooperating
def DthenC(history):
if len(history)%2==0:
return Defect
else: return Cooperate
#Cooperates 50% of the time until opponent defects then defects all the time
def fiftyfiftyC(history):
for x in history:
if x == Defect:
return Defect
else:
return choice([Defect,Cooperate])
#Cooperates all of the time until opponent defects then defects 50% of the time
def fiftyfiftyD(history):
for x in history:
if x == Defect:
return choice([Defect,Cooperate])
else:
return Cooperate
#Cooperates all of the time until opponent defects then defects 2/3 of the time
def TwoThirdD(history):
for x in history:
if x == Defect:
return choice([Defect,Defect,Cooperate])
else:
return Cooperate
#Cooperates 2/3 of the time until opponent defects then defects all the time
def TwoThirdC(history):
for x in history:
if x == Defect:
return Defect
else:
return choice([Defect,Cooperate,Cooperate])
#Always Cooperates turn one then after that has a 66% chance of defecting
def Defect66(history):
if len(history) == 0:
return Cooperate
else:
return choice([Defect,Defect,Cooperate])
def Random(history):
return choice([Cooperate,Defect])
#new code that creates my contenders list by going through a list and appending the items to the list of contenders instead of appending each one one by one
strategies = [AllD,AllC,TFT,Grudge,CthenD,fiftyfiftyD,TwoThirdD,Defect66,Random]
strategydict = {AllD:"AllD",AllC:"AllC",TFT:"TFT",Grudge:"Grudge",CthenD:"CthenD",fiftyfiftyD:"50-50D",TwoThirdD:"2/3D",Defect66:"Defect66%",Random:"Random"}
contenders = []
for strategy in strategies:
contenders.append(Agent(strategydict[strategy],strategy))
def printscores():
for x in contenders:
print("The score of " + x.name + " = " + str(x.gettotalscore()))
rounds = int(input("Enter how many rounds you would like to play. Max rounds is 300. "))
for round in range(rounds):
#new code to delete one strategy after every 50 rounds and then add a copy of each of the strategies remaining and reset scores to zero
if round>0 and round%50==0:
loser = contenders[0].strategy
minscore = contenders[0].gettotalscore()
for c in contenders:
score = c.gettotalscore()
if score < minscore:
loser = c.strategy
minscore = c.gettotalscore()
for i in range(len(contenders)):
if contenders[i].strategy == loser:
deleteidx = i
del(contenders[deleteidx])
print(strategydict[loser])
for strategy in strategies:
if strategy == loser:
strategies.remove(strategy)
contenders = []
for event in range(int(round/50)+1):
for strategy in strategies:
contenders.append(Agent(strategydict[strategy],strategy))
for x in itertools.combinations(contenders,2):
confront(x[0],x[1])
printscores()
| [
"AriFleischer@Aris-MBP.fios-router.home"
] | AriFleischer@Aris-MBP.fios-router.home |
f771c890fb42345b62e32a574e8cb81ebefa1c40 | 75d921f808ef53a5f1c6165ee6298001cbc36c68 | /src/settings.py | c85d82ac097c53ed08b10d8e2f2940a6ee2ccecf | [] | no_license | shiv-konar/NeoTicketinSystem | 7181694de0cb49add1f18605c4b96c1e6ec7600a | 8b5b748f457cfaf104b78adc48bcead544f81369 | refs/heads/master | 2021-01-19T03:35:32.099376 | 2016-07-11T22:20:55 | 2016-07-11T22:20:55 | 63,106,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | """
Django settings for src project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tnbx*%+=&()3ihh$q@v$4d)=*(7+z%2^n9l4ynip#gect&_%(w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'issueLog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-uk'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static", "static_root")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static", "our_static"),
]
| [
"Balajipadmavati1"
] | Balajipadmavati1 |
f98a8e3d28124d57f3295b6d5e9ff8b8bded75ed | a7c02348f6312eda84b8100b17a136279dcb5246 | /ecommerce/migrations/0009_auto_20201117_0616.py | ec00d61f4929cb5ad8a2198bc177e9d7724f5f1b | [] | no_license | Emmanuel-Aggrey/ecom | 87538d61d99b2db0a55d62cd108b57dc0d514154 | ffbc9436763f1c4f271a75bfb983507dfd6564ee | refs/heads/master | 2023-02-02T01:15:21.266305 | 2020-12-21T14:02:21 | 2020-12-21T14:02:21 | 297,808,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # Generated by Django 2.2.10 on 2020-11-17 06:16
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0008_auto_20200929_0055'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| [
"aggrey.en@gmail.com"
] | aggrey.en@gmail.com |
a0c8019736191cb3518c16db5aba3dd3c2efbe96 | f5ff90dbd999c52d2885efc2fa6d1c35a7380d28 | /Python Modules/Palindrome.py | 89eb5d49d6881477a9d823539daa35f44e6b3105 | [
"MIT"
] | permissive | MysticSaiyan/MITx-6.00.1x | 5b36249a4962b43539e04d0b2bcb55928a238e9c | 5b8e972399670f43fe25994cac7fa6a35a403b4f | refs/heads/master | 2021-08-20T08:47:20.733248 | 2017-11-28T16:45:40 | 2017-11-28T16:45:40 | 111,674,624 | 0 | 0 | null | 2017-11-22T12:03:11 | 2017-11-22T11:15:50 | Python | UTF-8 | Python | false | false | 369 | py | def isPalindrome(s):
def toChars(s):
s = s.lower()
ans = ''
for c in s:
if c in 'abcdefghijklmnopqrstuvwxyz':
ans = ans + c
return ans
def isPal(s):
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and isPal(s[1:-1])
return isPal(toChars(s)) | [
"pragz111@gmail.com"
] | pragz111@gmail.com |
35647717123a5bdd87ee9cecc764d41973a71528 | d04126f6572398d715c048800b1ad4b0e0c35d32 | /classifier.py | 221300947a7c1a4f1bd56c41ec7f76e05345cc7a | [] | no_license | ArisPapangelis/Food-intake-toolbox | 1fc63aac3dc2ac881ddd38eb9eeab746dcd8021a | 8ccfa90b1f2096b27491318332583f7ce0212889 | refs/heads/main | 2023-03-16T08:47:24.517738 | 2021-03-05T01:56:10 | 2021-03-05T01:56:10 | 335,093,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,240 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 17:38:11 2021
@author: Aris Papangelis
Script to train a supervised learning classifier
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score, KFold, LeaveOneOut, LeavePOut, LeaveOneGroupOut
from sklearn.naive_bayes import GaussianNB
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif, RFE
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, roc_auc_score, roc_curve
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
#Clemson cafeteria dataset
dataset = pd.read_csv("csv/clemson_indicators.csv", delimiter = ';')
cols_to_use = ['TP', 'FP', 'FN', 'Precision','Recall', 'F1']
dataset = pd.concat([dataset, pd.read_csv("csv/bite_metrics.csv", usecols=cols_to_use, delimiter = ';')], axis=1)
dataset['BMI'] = [0.0]* len(dataset)
cols_to_use = ["Participant","Gender","Age","BMI"]
demographics = pd.read_csv("csv/clemson_demographics.csv", usecols = cols_to_use, delimiter = ';')
for i in range(len(dataset)):
name = dataset.at[i,'Participant'].split('_')[0]
bmi = demographics[demographics['Participant']==name]['BMI']
dataset.at[i,'BMI'] = bmi
dataset = dataset[dataset['F1'] > 0.6]
#Dataset katerinas
dataset2 = pd.read_csv("csv/katerina_indicators.csv", delimiter = ';')
dataset2 = pd.concat([dataset2, pd.read_csv("csv/katerina_demographics.csv", usecols=['BMI'], delimiter = ';')], axis=1)
cols_to_use = ['Participant', 'a', 'b', 'Total food intake','Average food intake rate',
'Average bite size', 'Bites per minute', 'BMI']
#Final data
data = pd.concat([dataset[cols_to_use], dataset2[cols_to_use]], ignore_index=True)
#data = dataset[cols_to_use]
X = data.iloc[:,1:-1]
Y = data.iloc[:,-1]
#Rescale data to range (0,1)
data_scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
#data_scaler = preprocessing.RobustScaler()
X_rescaled = data_scaler.fit_transform(X)
#0 for normal, 1 for overweight or underweight
Y = np.where((Y < 18.5) | (Y > 25), 1, 0)
#Data visualisation
df = data.iloc[:,1:]
df['BMI'] = Y
df.iloc[:,:-1] = X_rescaled
cor = df.corr()
plt.figure(figsize=(19.2, 10.8))
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.figure(figsize=(19.2, 10.8))
plt.suptitle('Features')
i=1
for col in df.columns[:-1]:
plt.subplot(3, 2, i)
#plt.title(col)
row = sns.boxplot( x='BMI', y=col, data= df)
i+=1
"""
#Select best features through SelectKBest
features = SelectKBest(score_func=f_classif, k=3)
fit = features.fit(X_rescaled, Y)
print(fit.scores_)
X_rescaled = fit.transform(X_rescaled)
"""
#Select best features through RFE
model = ExtraTreesClassifier(random_state=0)
rfe = RFE(model, 3)
fit = rfe.fit(X_rescaled, Y)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
print("Feature Ranking: %s" % fit.ranking_)
X_rescaled = X_rescaled[:,fit.support_]
"""
#Do PCA on X_rescaled
pca = PCA()
fit = pca.fit(X_rescaled)
print("Explained Variance: %s" % fit.explained_variance_ratio_)
print(fit.components_)
"""
#Classification
x_train, x_test, y_train, y_test = train_test_split(X_rescaled, Y, test_size=0.2, random_state=0)
#model = RandomForestClassifier(random_state=0).fit(x_train, y_train)
model = ExtraTreesClassifier(random_state=0).fit(x_train, y_train)
#model = KNeighborsClassifier(n_neighbors=3).fit(x_train, y_train)
#model = DecisionTreeClassifier(random_state=0).fit(x_train, y_train)
#model = GaussianNB().fit(x_train, y_train)
y_pred = model.predict(x_test)
acc = accuracy_score(y_test, y_pred)
report = classification_report(y_test,y_pred)
cm = confusion_matrix(y_test,y_pred)
ROC = roc_auc_score(y_test, y_pred)
#Check Algorithms
#Decision tree has high recall and f1, random forest has balance between recall and accuracy, as well as best roc_auc
#If we prefer FP to FN (normal classified as overweight) decision tree is better
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier(n_neighbors=3)))
models.append(('CART', DecisionTreeClassifier()))
models.append(('RF', RandomForestClassifier()))
models.append(('ET', ExtraTreesClassifier()))
models.append(('ADA', AdaBoostClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=42, shuffle=True)
#kfold = LeavePOut(100)
cv_results = cross_val_score(model, X_rescaled, Y, cv=kfold, scoring='roc_auc')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
| [
"36777816+ArisPapangelis@users.noreply.github.com"
] | 36777816+ArisPapangelis@users.noreply.github.com |
96c76ae94d06dfc58e6363603425d800499d1a75 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/ml/azure-ai-ml/azure/ai/ml/identity/_internal/pipeline.py | 66a31affbd4140ddaf931c75a2cb7069bbb5a312 | [
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 2,585 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from azure.ai.ml._user_agent import USER_AGENT
from azure.core.configuration import Configuration
from azure.core.pipeline import Pipeline
from azure.core.pipeline.policies import (
ContentDecodePolicy,
CustomHookPolicy,
DistributedTracingPolicy,
HeadersPolicy,
HttpLoggingPolicy,
NetworkTraceLoggingPolicy,
ProxyPolicy,
RetryPolicy,
UserAgentPolicy,
)
from azure.core.pipeline.transport import RequestsTransport
def _get_config(**kwargs):
"""Configuration common to a/sync pipelines."""
config = Configuration(**kwargs)
config.custom_hook_policy = CustomHookPolicy(**kwargs)
config.headers_policy = HeadersPolicy(**kwargs)
config.http_logging_policy = HttpLoggingPolicy(**kwargs)
config.logging_policy = NetworkTraceLoggingPolicy(**kwargs)
config.proxy_policy = ProxyPolicy(**kwargs)
config.user_agent_policy = UserAgentPolicy(base_user_agent=USER_AGENT, **kwargs)
return config
def _get_policies(config, _per_retry_policies=None, **kwargs):
policies = [
config.headers_policy,
config.user_agent_policy,
config.proxy_policy,
ContentDecodePolicy(**kwargs),
config.retry_policy,
]
if _per_retry_policies:
policies.extend(_per_retry_policies)
policies.extend(
[
config.custom_hook_policy,
config.logging_policy,
DistributedTracingPolicy(**kwargs),
config.http_logging_policy,
]
)
return policies
def build_pipeline(transport=None, policies=None, **kwargs):
if not policies:
config = _get_config(**kwargs)
config.retry_policy = RetryPolicy(**kwargs)
policies = _get_policies(config, **kwargs)
if not transport:
transport = RequestsTransport(**kwargs)
return Pipeline(transport, policies=policies)
def build_async_pipeline(transport=None, policies=None, **kwargs):
from azure.core.pipeline import AsyncPipeline
if not policies:
from azure.core.pipeline.policies import AsyncRetryPolicy
config = _get_config(**kwargs)
config.retry_policy = AsyncRetryPolicy(**kwargs)
policies = _get_policies(config, **kwargs)
if not transport:
from azure.core.pipeline.transport import AioHttpTransport
transport = AioHttpTransport(**kwargs)
return AsyncPipeline(transport, policies=policies)
| [
"noreply@github.com"
] | noreply@github.com |
dd228ee5b5c345d7ead8c12e93e2c65210cece10 | da1465fa5b0dd06d2674a50b908ac6eb65ac07b4 | /formapi/tests/calls.py | aa5bd9f8038c336ac7e3621ba133fa94c0290934 | [
"MIT"
] | permissive | hannseman/django-formapi | b135577547d315a6530608397a74b60a2c7c1aa1 | 610ea55f4525f47a89ffb5114bb98fa740546da0 | refs/heads/master | 2021-01-17T22:30:21.113740 | 2015-04-29T15:02:22 | 2015-04-29T15:02:22 | 11,033,872 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from formapi import calls
from formapi.api import API
class AuthenticateUserCall(calls.APICall):
"""
Authenticate a user
"""
username = forms.CharField(required=True)
password = forms.CharField(required=True, widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive.")
}
def __init__(self, *args, **kwargs):
self.user_cache = None
super(AuthenticateUserCall, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
self.username_field = User._meta.get_field('username')
def action(self, test):
return self.get_user_id()
def clean(self):
super(AuthenticateUserCall, self).clean()
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] % {
'username': self.username_field.verbose_name
})
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class DivisionCall(calls.APICall):
"""
Returns the quotient of two integers
"""
dividend = forms.IntegerField()
divisor = forms.IntegerField()
signed_requests = False
def action(self, test):
dividend = self.cleaned_data.get('dividend')
divisor = self.cleaned_data.get('divisor')
try:
return float(dividend) / float(divisor)
except ZeroDivisionError:
self.add_error("DIVISION BY ZERO, OH SHIIIIII")
class ProgrammingLanguages(calls.APICall):
RUBY = 'ruby'
PYTHON = 'python'
JAVA = 'java'
LANGUAGES = (
(RUBY, 'Freshman'),
(PYTHON, 'Sophomore'),
(JAVA, 'Junior')
)
languages = forms.MultipleChoiceField(choices=LANGUAGES)
def action(self, test):
return u'Good for you'
API.register(AuthenticateUserCall, 'user', 'authenticate', version='v1.0.0')
API.register(DivisionCall, 'math', 'divide', version='v1.0.0')
API.register(ProgrammingLanguages, 'comp', 'lang', version='v1.0.0')
| [
"hannes@5monkeys.se"
] | hannes@5monkeys.se |
b88b3654f353b709ae058fa2b3a9941510b100dd | edbf1231cd8f6dded5c405a63711b9645b8031f2 | /venv/Scripts/pdf2txt.py | 2b1804fa48c4e4fa18cbd60cb246625962f8d0c7 | [] | no_license | c0mmand3r3/DataScience | 45e24138aaf23cb77176a9f91044f23798dcf4da | cda4d5aabff038a9f288a7f18e4302ee6b8356ab | refs/heads/master | 2020-04-10T00:39:05.637051 | 2019-02-15T10:00:26 | 2019-02-15T10:00:26 | 160,691,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,060 | py | #!C:\Users\DELL\PycharmProjects\DataScience\venv\Scripts\python.exe
"""
Converts PDF text content (though not images containing text) to plain text, html, xml or "tags".
"""
import argparse
import logging
import six
import sys
import pdfminer.settings
pdfminer.settings.STRICT = False
import pdfminer.high_level
import pdfminer.layout
from pdfminer.image import ImageWriter
def extract_text(files=[], outfile='-',
_py2_no_more_posargs=None, # Bloody Python2 needs a shim
no_laparams=False, all_texts=None, detect_vertical=None, # LAParams
word_margin=None, char_margin=None, line_margin=None, boxes_flow=None, # LAParams
output_type='text', codec='utf-8', strip_control=False,
maxpages=0, page_numbers=None, password="", scale=1.0, rotation=0,
layoutmode='normal', output_dir=None, debug=False,
disable_caching=False, **other):
if _py2_no_more_posargs is not None:
raise ValueError("Too many positional arguments passed.")
if not files:
raise ValueError("Must provide files to work upon!")
# If any LAParams group arguments were passed, create an LAParams object and
# populate with given args. Otherwise, set it to None.
if not no_laparams:
laparams = pdfminer.layout.LAParams()
for param in ("all_texts", "detect_vertical", "word_margin", "char_margin", "line_margin", "boxes_flow"):
paramv = locals().get(param, None)
if paramv is not None:
setattr(laparams, param, paramv)
else:
laparams = None
imagewriter = None
if output_dir:
imagewriter = ImageWriter(output_dir)
if output_type == "text" and outfile != "-":
for override, alttype in ( (".htm", "html"),
(".html", "html"),
(".xml", "xml"),
(".tag", "tag") ):
if outfile.endswith(override):
output_type = alttype
if outfile == "-":
outfp = sys.stdout
if outfp.encoding is not None:
codec = 'utf-8'
else:
outfp = open(outfile, "wb")
for fname in files:
with open(fname, "rb") as fp:
pdfminer.high_level.extract_text_to_fp(fp, **locals())
return outfp
def maketheparser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True)
parser.add_argument("files", type=str, default=None, nargs="+", help="File to process.")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="Debug output.")
parser.add_argument("-p", "--pagenos", type=str, help="Comma-separated list of page numbers to parse. Included for legacy applications, use --page-numbers for more idiomatic argument entry.")
parser.add_argument("--page-numbers", type=int, default=None, nargs="+", help="Alternative to --pagenos with space-separated numbers; supercedes --pagenos where it is used.")
parser.add_argument("-m", "--maxpages", type=int, default=0, help="Maximum pages to parse")
parser.add_argument("-P", "--password", type=str, default="", help="Decryption password for PDF")
parser.add_argument("-o", "--outfile", type=str, default="-", help="Output file (default \"-\" is stdout)")
parser.add_argument("-t", "--output_type", type=str, default="text", help="Output type: text|html|xml|tag (default is text)")
parser.add_argument("-c", "--codec", type=str, default="utf-8", help="Text encoding")
parser.add_argument("-s", "--scale", type=float, default=1.0, help="Scale")
parser.add_argument("-A", "--all-texts", default=None, action="store_true", help="LAParams all texts")
parser.add_argument("-V", "--detect-vertical", default=None, action="store_true", help="LAParams detect vertical")
parser.add_argument("-W", "--word-margin", type=float, default=None, help="LAParams word margin")
parser.add_argument("-M", "--char-margin", type=float, default=None, help="LAParams char margin")
parser.add_argument("-L", "--line-margin", type=float, default=None, help="LAParams line margin")
parser.add_argument("-F", "--boxes-flow", type=float, default=None, help="LAParams boxes flow")
parser.add_argument("-Y", "--layoutmode", default="normal", type=str, help="HTML Layout Mode")
parser.add_argument("-n", "--no-laparams", default=False, action="store_true", help="Pass None as LAParams")
parser.add_argument("-R", "--rotation", default=0, type=int, help="Rotation")
parser.add_argument("-O", "--output-dir", default=None, help="Output directory for images")
parser.add_argument("-C", "--disable-caching", default=False, action="store_true", help="Disable caching")
parser.add_argument("-S", "--strip-control", default=False, action="store_true", help="Strip control in XML mode")
return parser
# main
def main(args=None):
P = maketheparser()
A = P.parse_args(args=args)
if A.page_numbers:
A.page_numbers = set([x-1 for x in A.page_numbers])
if A.pagenos:
A.page_numbers = set([int(x)-1 for x in A.pagenos.split(",")])
imagewriter = None
if A.output_dir:
imagewriter = ImageWriter(A.output_dir)
if six.PY2 and sys.stdin.encoding:
A.password = A.password.decode(sys.stdin.encoding)
if A.output_type == "text" and A.outfile != "-":
for override, alttype in ( (".htm", "html"),
(".html", "html"),
(".xml", "xml" ),
(".tag", "tag" ) ):
if A.outfile.endswith(override):
A.output_type = alttype
if A.outfile == "-":
outfp = sys.stdout
if outfp.encoding is not None:
# Why ignore outfp.encoding? :-/ stupid cathal?
A.codec = 'utf-8'
else:
outfp = open(A.outfile, "wb")
## Test Code
outfp = extract_text(**vars(A))
outfp.close()
return 0
if __name__ == '__main__': sys.exit(main())
| [
"anishbasnetworld@gmail.com"
] | anishbasnetworld@gmail.com |
e1a369dc2579d3d7f7b2687df356ca92d408e5ca | 6699b8944b71e86725fdc17bb5f9cd69e254b4eb | /leetcode/1448.count-good-nodes-in-binary-tree/solution.py | 93c087524ccb57bbc8d3bb206aa6f474859f9b57 | [] | no_license | jadesym/interview | 6099e663090408f548b4f4b0b17ae90bb60a7d46 | 5b6eecedfa1c7e496bcfe852e2d3896e993ff16e | refs/heads/main | 2023-01-07T21:56:59.063542 | 2022-12-30T20:13:34 | 2022-12-30T20:13:34 | 41,118,644 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def goodNodes(self, root: TreeNode) -> int:
startMax = -(10 ** 5)
return self.dfs(root, startMax)
def dfs(self, node: TreeNode, maxSoFar: int) -> int:
good_nodes = 0
cur_node_max = max(maxSoFar, node.val)
# Handle if root node
if maxSoFar <= node.val:
good_nodes += 1
# print(node.val)
if node.left is not None:
good_nodes += self.dfs(node.left, cur_node_max)
if node.right is not None:
good_nodes += self.dfs(node.right, cur_node_max)
return good_nodes
| [
"kfu@kfu-mn1.linkedin.biz"
] | kfu@kfu-mn1.linkedin.biz |
6e8855e96569e6e5c38f9d5ad1ce4910d477b9c4 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2021_06_01_preview/models/_models_py3.py | 4007eb90a0326a819655f715f93c6d9bbbc685cf | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 206,121 | py | # coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class ActivationProperties(_serialization.Model):
"""The activation properties of the connected registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The activation status of the connected registry. Known values are: "Active" and
"Inactive".
:vartype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ActivationStatus
"""
_validation = {
"status": {"readonly": True},
}
_attribute_map = {
"status": {"key": "status", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.status = None
class ActiveDirectoryObject(_serialization.Model):
"""The Active Directory Object that will be used for authenticating the token of a container
registry.
:ivar object_id: The user/group/application object ID for Active Directory Object that will be
used for authenticating the token of a container registry.
:vartype object_id: str
:ivar tenant_id: The tenant ID of user/group/application object Active Directory Object that
will be used for authenticating the token of a container registry.
:vartype tenant_id: str
"""
_attribute_map = {
"object_id": {"key": "objectId", "type": "str"},
"tenant_id": {"key": "tenantId", "type": "str"},
}
def __init__(self, *, object_id: Optional[str] = None, tenant_id: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword object_id: The user/group/application object ID for Active Directory Object that will
be used for authenticating the token of a container registry.
:paramtype object_id: str
:keyword tenant_id: The tenant ID of user/group/application object Active Directory Object that
will be used for authenticating the token of a container registry.
:paramtype tenant_id: str
"""
super().__init__(**kwargs)
self.object_id = object_id
self.tenant_id = tenant_id
class Actor(_serialization.Model):
"""The agent that initiated the event. For most situations, this could be from the authorization
context of the request.
:ivar name: The subject or username associated with the request context that generated the
event.
:vartype name: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, name: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword name: The subject or username associated with the request context that generated the
event.
:paramtype name: str
"""
super().__init__(**kwargs)
self.name = name
class CallbackConfig(_serialization.Model):
"""The configuration of service URI and custom headers for the webhook.
All required parameters must be populated in order to send to Azure.
:ivar service_uri: The service URI for the webhook to post notifications. Required.
:vartype service_uri: str
:ivar custom_headers: Custom headers that will be added to the webhook notifications.
:vartype custom_headers: dict[str, str]
"""
_validation = {
"service_uri": {"required": True},
}
_attribute_map = {
"service_uri": {"key": "serviceUri", "type": "str"},
"custom_headers": {"key": "customHeaders", "type": "{str}"},
}
def __init__(self, *, service_uri: str, custom_headers: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword service_uri: The service URI for the webhook to post notifications. Required.
:paramtype service_uri: str
:keyword custom_headers: Custom headers that will be added to the webhook notifications.
:paramtype custom_headers: dict[str, str]
"""
super().__init__(**kwargs)
self.service_uri = service_uri
self.custom_headers = custom_headers
class ProxyResource(_serialization.Model):
"""The resource model definition for a ARM proxy resource. It will have everything other than
required location and tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ConnectedRegistry(ProxyResource): # pylint: disable=too-many-instance-attributes
"""An object that represents a connected registry for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar provisioning_state: Provisioning state of the resource. Known values are: "Creating",
"Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
:ivar mode: The mode of the connected registry resource that indicates the permissions of the
registry. Known values are: "Registry" and "Mirror".
:vartype mode: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectedRegistryMode
:ivar version: The current version of ACR runtime on the connected registry.
:vartype version: str
:ivar connection_state: The current connection state of the connected registry. Known values
are: "Online", "Offline", "Syncing", and "Unhealthy".
:vartype connection_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectionState
:ivar last_activity_time: The last activity time of the connected registry.
:vartype last_activity_time: ~datetime.datetime
:ivar activation: The activation properties of the connected registry.
:vartype activation:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ActivationProperties
:ivar parent: The parent of the connected registry.
:vartype parent: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ParentProperties
:ivar client_token_ids: The list of the ACR token resource IDs used to authenticate clients to
the connected registry.
:vartype client_token_ids: list[str]
:ivar login_server: The login server properties of the connected registry.
:vartype login_server:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.LoginServerProperties
:ivar logging: The logging properties of the connected registry.
:vartype logging: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.LoggingProperties
:ivar status_details: The list of current statuses of the connected registry.
:vartype status_details:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.StatusDetailProperties]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
"version": {"readonly": True},
"connection_state": {"readonly": True},
"last_activity_time": {"readonly": True},
"activation": {"readonly": True},
"status_details": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"mode": {"key": "properties.mode", "type": "str"},
"version": {"key": "properties.version", "type": "str"},
"connection_state": {"key": "properties.connectionState", "type": "str"},
"last_activity_time": {"key": "properties.lastActivityTime", "type": "iso-8601"},
"activation": {"key": "properties.activation", "type": "ActivationProperties"},
"parent": {"key": "properties.parent", "type": "ParentProperties"},
"client_token_ids": {"key": "properties.clientTokenIds", "type": "[str]"},
"login_server": {"key": "properties.loginServer", "type": "LoginServerProperties"},
"logging": {"key": "properties.logging", "type": "LoggingProperties"},
"status_details": {"key": "properties.statusDetails", "type": "[StatusDetailProperties]"},
}
def __init__(
self,
*,
mode: Optional[Union[str, "_models.ConnectedRegistryMode"]] = None,
parent: Optional["_models.ParentProperties"] = None,
client_token_ids: Optional[List[str]] = None,
login_server: Optional["_models.LoginServerProperties"] = None,
logging: Optional["_models.LoggingProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword mode: The mode of the connected registry resource that indicates the permissions of
the registry. Known values are: "Registry" and "Mirror".
:paramtype mode: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectedRegistryMode
:keyword parent: The parent of the connected registry.
:paramtype parent: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ParentProperties
:keyword client_token_ids: The list of the ACR token resource IDs used to authenticate clients
to the connected registry.
:paramtype client_token_ids: list[str]
:keyword login_server: The login server properties of the connected registry.
:paramtype login_server:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.LoginServerProperties
:keyword logging: The logging properties of the connected registry.
:paramtype logging: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.LoggingProperties
"""
super().__init__(**kwargs)
self.provisioning_state = None
self.mode = mode
self.version = None
self.connection_state = None
self.last_activity_time = None
self.activation = None
self.parent = parent
self.client_token_ids = client_token_ids
self.login_server = login_server
self.logging = logging
self.status_details = None
class ConnectedRegistryListResult(_serialization.Model):
"""The result of a request to list connected registries for a container registry.
:ivar value: The list of connected registries. Since this list may be incomplete, the nextLink
field should be used to request the next list of connected registries.
:vartype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectedRegistry]
:ivar next_link: The URI that can be used to request the next list of connected registries.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[ConnectedRegistry]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.ConnectedRegistry"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The list of connected registries. Since this list may be incomplete, the
nextLink field should be used to request the next list of connected registries.
:paramtype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectedRegistry]
:keyword next_link: The URI that can be used to request the next list of connected registries.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ConnectedRegistryUpdateParameters(_serialization.Model):
"""The parameters for updating a connected registry.
:ivar sync_properties: The sync properties of the connected registry with its parent.
:vartype sync_properties:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.SyncUpdateProperties
:ivar logging: The logging properties of the connected registry.
:vartype logging: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.LoggingProperties
:ivar client_token_ids: The list of the ACR token resource IDs used to authenticate clients to
the connected registry.
:vartype client_token_ids: list[str]
"""
_attribute_map = {
"sync_properties": {"key": "properties.syncProperties", "type": "SyncUpdateProperties"},
"logging": {"key": "properties.logging", "type": "LoggingProperties"},
"client_token_ids": {"key": "properties.clientTokenIds", "type": "[str]"},
}
def __init__(
self,
*,
sync_properties: Optional["_models.SyncUpdateProperties"] = None,
logging: Optional["_models.LoggingProperties"] = None,
client_token_ids: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword sync_properties: The sync properties of the connected registry with its parent.
:paramtype sync_properties:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.SyncUpdateProperties
:keyword logging: The logging properties of the connected registry.
:paramtype logging: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.LoggingProperties
:keyword client_token_ids: The list of the ACR token resource IDs used to authenticate clients
to the connected registry.
:paramtype client_token_ids: list[str]
"""
super().__init__(**kwargs)
self.sync_properties = sync_properties
self.logging = logging
self.client_token_ids = client_token_ids
class EncryptionProperty(_serialization.Model):
"""EncryptionProperty.
:ivar status: Indicates whether or not the encryption is enabled for container registry. Known
values are: "enabled" and "disabled".
:vartype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EncryptionStatus
:ivar key_vault_properties: Key vault properties.
:vartype key_vault_properties:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.KeyVaultProperties
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"key_vault_properties": {"key": "keyVaultProperties", "type": "KeyVaultProperties"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.EncryptionStatus"]] = None,
key_vault_properties: Optional["_models.KeyVaultProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword status: Indicates whether or not the encryption is enabled for container registry.
Known values are: "enabled" and "disabled".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EncryptionStatus
:keyword key_vault_properties: Key vault properties.
:paramtype key_vault_properties:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.KeyVaultProperties
"""
super().__init__(**kwargs)
self.status = status
self.key_vault_properties = key_vault_properties
class ErrorResponse(_serialization.Model):
"""An error response from the Azure Container Registry service.
:ivar error: Azure container registry build API error body.
:vartype error: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ErrorResponseBody
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorResponseBody"},
}
def __init__(self, *, error: Optional["_models.ErrorResponseBody"] = None, **kwargs: Any) -> None:
"""
:keyword error: Azure container registry build API error body.
:paramtype error: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ErrorResponseBody
"""
super().__init__(**kwargs)
self.error = error
class ErrorResponseBody(_serialization.Model):
"""An error response from the Azure Container Registry service.
All required parameters must be populated in order to send to Azure.
:ivar code: error code. Required.
:vartype code: str
:ivar message: error message. Required.
:vartype message: str
:ivar target: target of the particular error.
:vartype target: str
:ivar details: an array of additional nested error response info objects, as described by this
contract.
:vartype details:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.InnerErrorDescription]
"""
_validation = {
"code": {"required": True},
"message": {"required": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[InnerErrorDescription]"},
}
def __init__(
self,
*,
code: str,
message: str,
target: Optional[str] = None,
details: Optional[List["_models.InnerErrorDescription"]] = None,
**kwargs: Any
) -> None:
"""
:keyword code: error code. Required.
:paramtype code: str
:keyword message: error message. Required.
:paramtype message: str
:keyword target: target of the particular error.
:paramtype target: str
:keyword details: an array of additional nested error response info objects, as described by
this contract.
:paramtype details:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.InnerErrorDescription]
"""
super().__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class EventInfo(_serialization.Model):
"""The basic information of an event.
:ivar id: The event ID.
:vartype id: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
:keyword id: The event ID.
:paramtype id: str
"""
super().__init__(**kwargs)
self.id = id
class Event(EventInfo):
"""The event for a webhook.
:ivar id: The event ID.
:vartype id: str
:ivar event_request_message: The event request message sent to the service URI.
:vartype event_request_message:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EventRequestMessage
:ivar event_response_message: The event response message received from the service URI.
:vartype event_response_message:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EventResponseMessage
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"event_request_message": {"key": "eventRequestMessage", "type": "EventRequestMessage"},
"event_response_message": {"key": "eventResponseMessage", "type": "EventResponseMessage"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
event_request_message: Optional["_models.EventRequestMessage"] = None,
event_response_message: Optional["_models.EventResponseMessage"] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The event ID.
:paramtype id: str
:keyword event_request_message: The event request message sent to the service URI.
:paramtype event_request_message:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EventRequestMessage
:keyword event_response_message: The event response message received from the service URI.
:paramtype event_response_message:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EventResponseMessage
"""
super().__init__(id=id, **kwargs)
self.event_request_message = event_request_message
self.event_response_message = event_response_message
class EventContent(_serialization.Model):
"""The content of the event request message.
:ivar id: The event ID.
:vartype id: str
:ivar timestamp: The time at which the event occurred.
:vartype timestamp: ~datetime.datetime
:ivar action: The action that encompasses the provided event.
:vartype action: str
:ivar target: The target of the event.
:vartype target: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Target
:ivar request: The request that generated the event.
:vartype request: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Request
:ivar actor: The agent that initiated the event. For most situations, this could be from the
authorization context of the request.
:vartype actor: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Actor
:ivar source: The registry node that generated the event. Put differently, while the actor
initiates the event, the source generates it.
:vartype source: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Source
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"timestamp": {"key": "timestamp", "type": "iso-8601"},
"action": {"key": "action", "type": "str"},
"target": {"key": "target", "type": "Target"},
"request": {"key": "request", "type": "Request"},
"actor": {"key": "actor", "type": "Actor"},
"source": {"key": "source", "type": "Source"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
timestamp: Optional[datetime.datetime] = None,
action: Optional[str] = None,
target: Optional["_models.Target"] = None,
request: Optional["_models.Request"] = None,
actor: Optional["_models.Actor"] = None,
source: Optional["_models.Source"] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The event ID.
:paramtype id: str
:keyword timestamp: The time at which the event occurred.
:paramtype timestamp: ~datetime.datetime
:keyword action: The action that encompasses the provided event.
:paramtype action: str
:keyword target: The target of the event.
:paramtype target: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Target
:keyword request: The request that generated the event.
:paramtype request: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Request
:keyword actor: The agent that initiated the event. For most situations, this could be from the
authorization context of the request.
:paramtype actor: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Actor
:keyword source: The registry node that generated the event. Put differently, while the actor
initiates the event, the source generates it.
:paramtype source: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Source
"""
super().__init__(**kwargs)
self.id = id
self.timestamp = timestamp
self.action = action
self.target = target
self.request = request
self.actor = actor
self.source = source
class EventListResult(_serialization.Model):
"""The result of a request to list events for a webhook.
:ivar value: The list of events. Since this list may be incomplete, the nextLink field should
be used to request the next list of events.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Event]
:ivar next_link: The URI that can be used to request the next list of events.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Event]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Event"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of events. Since this list may be incomplete, the nextLink field
should be used to request the next list of events.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Event]
:keyword next_link: The URI that can be used to request the next list of events.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class EventRequestMessage(_serialization.Model):
"""The event request message sent to the service URI.
:ivar content: The content of the event request message.
:vartype content: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.EventContent
:ivar headers: The headers of the event request message.
:vartype headers: dict[str, str]
:ivar method: The HTTP method used to send the event request message.
:vartype method: str
:ivar request_uri: The URI used to send the event request message.
:vartype request_uri: str
:ivar version: The HTTP message version.
:vartype version: str
"""
_attribute_map = {
"content": {"key": "content", "type": "EventContent"},
"headers": {"key": "headers", "type": "{str}"},
"method": {"key": "method", "type": "str"},
"request_uri": {"key": "requestUri", "type": "str"},
"version": {"key": "version", "type": "str"},
}
def __init__(
self,
*,
content: Optional["_models.EventContent"] = None,
headers: Optional[Dict[str, str]] = None,
method: Optional[str] = None,
request_uri: Optional[str] = None,
version: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword content: The content of the event request message.
:paramtype content: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.EventContent
:keyword headers: The headers of the event request message.
:paramtype headers: dict[str, str]
:keyword method: The HTTP method used to send the event request message.
:paramtype method: str
:keyword request_uri: The URI used to send the event request message.
:paramtype request_uri: str
:keyword version: The HTTP message version.
:paramtype version: str
"""
super().__init__(**kwargs)
self.content = content
self.headers = headers
self.method = method
self.request_uri = request_uri
self.version = version
class EventResponseMessage(_serialization.Model):
"""The event response message received from the service URI.
:ivar content: The content of the event response message.
:vartype content: str
:ivar headers: The headers of the event response message.
:vartype headers: dict[str, str]
:ivar reason_phrase: The reason phrase of the event response message.
:vartype reason_phrase: str
:ivar status_code: The status code of the event response message.
:vartype status_code: str
:ivar version: The HTTP message version.
:vartype version: str
"""
_attribute_map = {
"content": {"key": "content", "type": "str"},
"headers": {"key": "headers", "type": "{str}"},
"reason_phrase": {"key": "reasonPhrase", "type": "str"},
"status_code": {"key": "statusCode", "type": "str"},
"version": {"key": "version", "type": "str"},
}
def __init__(
self,
*,
content: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
reason_phrase: Optional[str] = None,
status_code: Optional[str] = None,
version: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword content: The content of the event response message.
:paramtype content: str
:keyword headers: The headers of the event response message.
:paramtype headers: dict[str, str]
:keyword reason_phrase: The reason phrase of the event response message.
:paramtype reason_phrase: str
:keyword status_code: The status code of the event response message.
:paramtype status_code: str
:keyword version: The HTTP message version.
:paramtype version: str
"""
super().__init__(**kwargs)
self.content = content
self.headers = headers
self.reason_phrase = reason_phrase
self.status_code = status_code
self.version = version
class ExportPipeline(ProxyResource):
"""An object that represents an export pipeline for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar location: The location of the export pipeline.
:vartype location: str
:ivar identity: The identity of the export pipeline.
:vartype identity: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:ivar target: The target properties of the export pipeline.
:vartype target:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPipelineTargetProperties
:ivar options: The list of all options configured for the pipeline.
:vartype options: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineOptions]
:ivar provisioning_state: The provisioning state of the pipeline at the time the operation was
called. Known values are: "Creating", "Updating", "Deleting", "Succeeded", "Failed", and
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"location": {"key": "location", "type": "str"},
"identity": {"key": "identity", "type": "IdentityProperties"},
"target": {"key": "properties.target", "type": "ExportPipelineTargetProperties"},
"options": {"key": "properties.options", "type": "[str]"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
location: Optional[str] = None,
identity: Optional["_models.IdentityProperties"] = None,
target: Optional["_models.ExportPipelineTargetProperties"] = None,
options: Optional[List[Union[str, "_models.PipelineOptions"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The location of the export pipeline.
:paramtype location: str
:keyword identity: The identity of the export pipeline.
:paramtype identity:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:keyword target: The target properties of the export pipeline.
:paramtype target:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPipelineTargetProperties
:keyword options: The list of all options configured for the pipeline.
:paramtype options: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineOptions]
"""
super().__init__(**kwargs)
self.location = location
self.identity = identity
self.target = target
self.options = options
self.provisioning_state = None
class ExportPipelineListResult(_serialization.Model):
"""The result of a request to list export pipelines for a container registry.
:ivar value: The list of export pipelines. Since this list may be incomplete, the nextLink
field should be used to request the next list of export pipelines.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPipeline]
:ivar next_link: The URI that can be used to request the next list of pipeline runs.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[ExportPipeline]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.ExportPipeline"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of export pipelines. Since this list may be incomplete, the nextLink
field should be used to request the next list of export pipelines.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPipeline]
:keyword next_link: The URI that can be used to request the next list of pipeline runs.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ExportPipelineTargetProperties(_serialization.Model):
"""The properties of the export pipeline target.
All required parameters must be populated in order to send to Azure.
:ivar type: The type of target for the export pipeline.
:vartype type: str
:ivar uri: The target uri of the export pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName".
:vartype uri: str
:ivar key_vault_uri: They key vault secret uri to obtain the target storage SAS token.
Required.
:vartype key_vault_uri: str
"""
_validation = {
"key_vault_uri": {"required": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"uri": {"key": "uri", "type": "str"},
"key_vault_uri": {"key": "keyVaultUri", "type": "str"},
}
def __init__(
self, *, key_vault_uri: str, type: Optional[str] = None, uri: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword type: The type of target for the export pipeline.
:paramtype type: str
:keyword uri: The target uri of the export pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName".
:paramtype uri: str
:keyword key_vault_uri: They key vault secret uri to obtain the target storage SAS token.
Required.
:paramtype key_vault_uri: str
"""
super().__init__(**kwargs)
self.type = type
self.uri = uri
self.key_vault_uri = key_vault_uri
class ExportPolicy(_serialization.Model):
"""The export policy for a container registry.
:ivar status: The value that indicates whether the policy is enabled or not. Known values are:
"enabled" and "disabled".
:vartype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPolicyStatus
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
}
def __init__(self, *, status: Union[str, "_models.ExportPolicyStatus"] = "enabled", **kwargs: Any) -> None:
"""
:keyword status: The value that indicates whether the policy is enabled or not. Known values
are: "enabled" and "disabled".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPolicyStatus
"""
super().__init__(**kwargs)
self.status = status
class GenerateCredentialsParameters(_serialization.Model):
"""The parameters used to generate credentials for a specified token or user of a container
registry.
:ivar token_id: The resource ID of the token for which credentials have to be generated.
:vartype token_id: str
:ivar expiry: The expiry date of the generated credentials after which the credentials become
invalid.
:vartype expiry: ~datetime.datetime
:ivar name: Specifies name of the password which should be regenerated if any -- password1 or
password2. Known values are: "password1" and "password2".
:vartype name: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPasswordName
"""
_attribute_map = {
"token_id": {"key": "tokenId", "type": "str"},
"expiry": {"key": "expiry", "type": "iso-8601"},
"name": {"key": "name", "type": "str"},
}
def __init__(
self,
*,
token_id: Optional[str] = None,
expiry: Optional[datetime.datetime] = None,
name: Optional[Union[str, "_models.TokenPasswordName"]] = None,
**kwargs: Any
) -> None:
"""
:keyword token_id: The resource ID of the token for which credentials have to be generated.
:paramtype token_id: str
:keyword expiry: The expiry date of the generated credentials after which the credentials
become invalid.
:paramtype expiry: ~datetime.datetime
:keyword name: Specifies name of the password which should be regenerated if any -- password1
or password2. Known values are: "password1" and "password2".
:paramtype name: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPasswordName
"""
super().__init__(**kwargs)
self.token_id = token_id
self.expiry = expiry
self.name = name
class GenerateCredentialsResult(_serialization.Model):
"""The response from the GenerateCredentials operation.
:ivar username: The username for a container registry.
:vartype username: str
:ivar passwords: The list of passwords for a container registry.
:vartype passwords:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPassword]
"""
_attribute_map = {
"username": {"key": "username", "type": "str"},
"passwords": {"key": "passwords", "type": "[TokenPassword]"},
}
def __init__(
self,
*,
username: Optional[str] = None,
passwords: Optional[List["_models.TokenPassword"]] = None,
**kwargs: Any
) -> None:
"""
:keyword username: The username for a container registry.
:paramtype username: str
:keyword passwords: The list of passwords for a container registry.
:paramtype passwords:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPassword]
"""
super().__init__(**kwargs)
self.username = username
self.passwords = passwords
class IdentityProperties(_serialization.Model):
"""Managed identity for the resource.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:ivar type: The identity type. Known values are: "SystemAssigned", "UserAssigned",
"SystemAssigned, UserAssigned", and "None".
:vartype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ResourceIdentityType
:ivar user_assigned_identities: The list of user identities associated with the resource. The
user identity
dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/
providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.containerregistry.v2021_06_01_preview.models.UserIdentityProperties]
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"tenant_id": {"key": "tenantId", "type": "str"},
"type": {"key": "type", "type": "str"},
"user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserIdentityProperties}"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
tenant_id: Optional[str] = None,
type: Optional[Union[str, "_models.ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "_models.UserIdentityProperties"]] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: The principal ID of resource identity.
:paramtype principal_id: str
:keyword tenant_id: The tenant ID of resource.
:paramtype tenant_id: str
:keyword type: The identity type. Known values are: "SystemAssigned", "UserAssigned",
"SystemAssigned, UserAssigned", and "None".
:paramtype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ResourceIdentityType
:keyword user_assigned_identities: The list of user identities associated with the resource.
The user identity
dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/
providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.containerregistry.v2021_06_01_preview.models.UserIdentityProperties]
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.tenant_id = tenant_id
self.type = type
self.user_assigned_identities = user_assigned_identities
class ImportImageParameters(_serialization.Model):
"""ImportImageParameters.
All required parameters must be populated in order to send to Azure.
:ivar source: The source of the image. Required.
:vartype source: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportSource
:ivar target_tags: List of strings of the form repo[:tag]. When tag is omitted the source will
be used (or 'latest' if source tag is also omitted).
:vartype target_tags: list[str]
:ivar untagged_target_repositories: List of strings of repository names to do a manifest only
copy. No tag will be created.
:vartype untagged_target_repositories: list[str]
:ivar mode: When Force, any existing target tags will be overwritten. When NoForce, any
existing target tags will fail the operation before any copying begins. Known values are:
"NoForce" and "Force".
:vartype mode: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportMode
"""
_validation = {
"source": {"required": True},
}
_attribute_map = {
"source": {"key": "source", "type": "ImportSource"},
"target_tags": {"key": "targetTags", "type": "[str]"},
"untagged_target_repositories": {"key": "untaggedTargetRepositories", "type": "[str]"},
"mode": {"key": "mode", "type": "str"},
}
def __init__(
self,
*,
source: "_models.ImportSource",
target_tags: Optional[List[str]] = None,
untagged_target_repositories: Optional[List[str]] = None,
mode: Union[str, "_models.ImportMode"] = "NoForce",
**kwargs: Any
) -> None:
"""
:keyword source: The source of the image. Required.
:paramtype source: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportSource
:keyword target_tags: List of strings of the form repo[:tag]. When tag is omitted the source
will be used (or 'latest' if source tag is also omitted).
:paramtype target_tags: list[str]
:keyword untagged_target_repositories: List of strings of repository names to do a manifest
only copy. No tag will be created.
:paramtype untagged_target_repositories: list[str]
:keyword mode: When Force, any existing target tags will be overwritten. When NoForce, any
existing target tags will fail the operation before any copying begins. Known values are:
"NoForce" and "Force".
:paramtype mode: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportMode
"""
super().__init__(**kwargs)
self.source = source
self.target_tags = target_tags
self.untagged_target_repositories = untagged_target_repositories
self.mode = mode
class ImportPipeline(ProxyResource):
"""An object that represents an import pipeline for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar location: The location of the import pipeline.
:vartype location: str
:ivar identity: The identity of the import pipeline.
:vartype identity: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:ivar source: The source properties of the import pipeline.
:vartype source:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportPipelineSourceProperties
:ivar trigger: The properties that describe the trigger of the import pipeline.
:vartype trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineTriggerProperties
:ivar options: The list of all options configured for the pipeline.
:vartype options: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineOptions]
:ivar provisioning_state: The provisioning state of the pipeline at the time the operation was
called. Known values are: "Creating", "Updating", "Deleting", "Succeeded", "Failed", and
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"location": {"key": "location", "type": "str"},
"identity": {"key": "identity", "type": "IdentityProperties"},
"source": {"key": "properties.source", "type": "ImportPipelineSourceProperties"},
"trigger": {"key": "properties.trigger", "type": "PipelineTriggerProperties"},
"options": {"key": "properties.options", "type": "[str]"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
location: Optional[str] = None,
identity: Optional["_models.IdentityProperties"] = None,
source: Optional["_models.ImportPipelineSourceProperties"] = None,
trigger: Optional["_models.PipelineTriggerProperties"] = None,
options: Optional[List[Union[str, "_models.PipelineOptions"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The location of the import pipeline.
:paramtype location: str
:keyword identity: The identity of the import pipeline.
:paramtype identity:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:keyword source: The source properties of the import pipeline.
:paramtype source:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportPipelineSourceProperties
:keyword trigger: The properties that describe the trigger of the import pipeline.
:paramtype trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineTriggerProperties
:keyword options: The list of all options configured for the pipeline.
:paramtype options: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineOptions]
"""
super().__init__(**kwargs)
self.location = location
self.identity = identity
self.source = source
self.trigger = trigger
self.options = options
self.provisioning_state = None
class ImportPipelineListResult(_serialization.Model):
"""The result of a request to list import pipelines for a container registry.
:ivar value: The list of import pipelines. Since this list may be incomplete, the nextLink
field should be used to request the next list of import pipelines.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportPipeline]
:ivar next_link: The URI that can be used to request the next list of pipeline runs.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[ImportPipeline]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.ImportPipeline"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of import pipelines. Since this list may be incomplete, the nextLink
field should be used to request the next list of import pipelines.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportPipeline]
:keyword next_link: The URI that can be used to request the next list of pipeline runs.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ImportPipelineSourceProperties(_serialization.Model):
"""The properties of the import pipeline source.
All required parameters must be populated in order to send to Azure.
:ivar type: The type of source for the import pipeline. "AzureStorageBlobContainer"
:vartype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineSourceType
:ivar uri: The source uri of the import pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName".
:vartype uri: str
:ivar key_vault_uri: They key vault secret uri to obtain the source storage SAS token.
Required.
:vartype key_vault_uri: str
"""
_validation = {
"key_vault_uri": {"required": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"uri": {"key": "uri", "type": "str"},
"key_vault_uri": {"key": "keyVaultUri", "type": "str"},
}
def __init__(
self,
*,
key_vault_uri: str,
type: Union[str, "_models.PipelineSourceType"] = "AzureStorageBlobContainer",
uri: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword type: The type of source for the import pipeline. "AzureStorageBlobContainer"
:paramtype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineSourceType
:keyword uri: The source uri of the import pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName".
:paramtype uri: str
:keyword key_vault_uri: They key vault secret uri to obtain the source storage SAS token.
Required.
:paramtype key_vault_uri: str
"""
super().__init__(**kwargs)
self.type = type
self.uri = uri
self.key_vault_uri = key_vault_uri
class ImportSource(_serialization.Model):
"""ImportSource.
All required parameters must be populated in order to send to Azure.
:ivar resource_id: The resource identifier of the source Azure Container Registry.
:vartype resource_id: str
:ivar registry_uri: The address of the source registry (e.g. 'mcr.microsoft.com').
:vartype registry_uri: str
:ivar credentials: Credentials used when importing from a registry uri.
:vartype credentials:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportSourceCredentials
:ivar source_image: Repository name of the source image.
Specify an image by repository ('hello-world'). This will use the 'latest' tag.
Specify an image by tag ('hello-world:latest').
Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123'). Required.
:vartype source_image: str
"""
_validation = {
"source_image": {"required": True},
}
_attribute_map = {
"resource_id": {"key": "resourceId", "type": "str"},
"registry_uri": {"key": "registryUri", "type": "str"},
"credentials": {"key": "credentials", "type": "ImportSourceCredentials"},
"source_image": {"key": "sourceImage", "type": "str"},
}
def __init__(
self,
*,
source_image: str,
resource_id: Optional[str] = None,
registry_uri: Optional[str] = None,
credentials: Optional["_models.ImportSourceCredentials"] = None,
**kwargs: Any
) -> None:
"""
:keyword resource_id: The resource identifier of the source Azure Container Registry.
:paramtype resource_id: str
:keyword registry_uri: The address of the source registry (e.g. 'mcr.microsoft.com').
:paramtype registry_uri: str
:keyword credentials: Credentials used when importing from a registry uri.
:paramtype credentials:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportSourceCredentials
:keyword source_image: Repository name of the source image.
Specify an image by repository ('hello-world'). This will use the 'latest' tag.
Specify an image by tag ('hello-world:latest').
Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123'). Required.
:paramtype source_image: str
"""
super().__init__(**kwargs)
self.resource_id = resource_id
self.registry_uri = registry_uri
self.credentials = credentials
self.source_image = source_image
class ImportSourceCredentials(_serialization.Model):
"""ImportSourceCredentials.
All required parameters must be populated in order to send to Azure.
:ivar username: The username to authenticate with the source registry.
:vartype username: str
:ivar password: The password used to authenticate with the source registry. Required.
:vartype password: str
"""
_validation = {
"password": {"required": True},
}
_attribute_map = {
"username": {"key": "username", "type": "str"},
"password": {"key": "password", "type": "str"},
}
def __init__(self, *, password: str, username: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword username: The username to authenticate with the source registry.
:paramtype username: str
:keyword password: The password used to authenticate with the source registry. Required.
:paramtype password: str
"""
super().__init__(**kwargs)
self.username = username
self.password = password
class InnerErrorDescription(_serialization.Model):
"""inner error.
All required parameters must be populated in order to send to Azure.
:ivar code: error code. Required.
:vartype code: str
:ivar message: error message. Required.
:vartype message: str
:ivar target: target of the particular error.
:vartype target: str
"""
_validation = {
"code": {"required": True},
"message": {"required": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
}
def __init__(self, *, code: str, message: str, target: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword code: error code. Required.
:paramtype code: str
:keyword message: error message. Required.
:paramtype message: str
:keyword target: target of the particular error.
:paramtype target: str
"""
super().__init__(**kwargs)
self.code = code
self.message = message
self.target = target
class IPRule(_serialization.Model):
"""IP rule with specific IP or IP range in CIDR format.
All required parameters must be populated in order to send to Azure.
:ivar action: The action of IP ACL rule. "Allow"
:vartype action: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Action
:ivar ip_address_or_range: Specifies the IP or IP range in CIDR format. Only IPV4 address is
allowed. Required.
:vartype ip_address_or_range: str
"""
_validation = {
"ip_address_or_range": {"required": True},
}
_attribute_map = {
"action": {"key": "action", "type": "str"},
"ip_address_or_range": {"key": "value", "type": "str"},
}
def __init__(
self, *, ip_address_or_range: str, action: Optional[Union[str, "_models.Action"]] = None, **kwargs: Any
) -> None:
"""
:keyword action: The action of IP ACL rule. "Allow"
:paramtype action: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Action
:keyword ip_address_or_range: Specifies the IP or IP range in CIDR format. Only IPV4 address is
allowed. Required.
:paramtype ip_address_or_range: str
"""
super().__init__(**kwargs)
self.action = action
self.ip_address_or_range = ip_address_or_range
class KeyVaultProperties(_serialization.Model):
"""KeyVaultProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar key_identifier: Key vault uri to access the encryption key.
:vartype key_identifier: str
:ivar versioned_key_identifier: The fully qualified key identifier that includes the version of
the key that is actually used for encryption.
:vartype versioned_key_identifier: str
:ivar identity: The client id of the identity which will be used to access key vault.
:vartype identity: str
:ivar key_rotation_enabled: Auto key rotation status for a CMK enabled registry.
:vartype key_rotation_enabled: bool
:ivar last_key_rotation_timestamp: Timestamp of the last successful key rotation.
:vartype last_key_rotation_timestamp: ~datetime.datetime
"""
_validation = {
"versioned_key_identifier": {"readonly": True},
"key_rotation_enabled": {"readonly": True},
"last_key_rotation_timestamp": {"readonly": True},
}
_attribute_map = {
"key_identifier": {"key": "keyIdentifier", "type": "str"},
"versioned_key_identifier": {"key": "versionedKeyIdentifier", "type": "str"},
"identity": {"key": "identity", "type": "str"},
"key_rotation_enabled": {"key": "keyRotationEnabled", "type": "bool"},
"last_key_rotation_timestamp": {"key": "lastKeyRotationTimestamp", "type": "iso-8601"},
}
def __init__(self, *, key_identifier: Optional[str] = None, identity: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword key_identifier: Key vault uri to access the encryption key.
:paramtype key_identifier: str
:keyword identity: The client id of the identity which will be used to access key vault.
:paramtype identity: str
"""
super().__init__(**kwargs)
self.key_identifier = key_identifier
self.versioned_key_identifier = None
self.identity = identity
self.key_rotation_enabled = None
self.last_key_rotation_timestamp = None
class LoggingProperties(_serialization.Model):
"""The logging properties of the connected registry.
:ivar log_level: The verbosity of logs persisted on the connected registry. Known values are:
"Debug", "Information", "Warning", "Error", and "None".
:vartype log_level: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.LogLevel
:ivar audit_log_status: Indicates whether audit logs are enabled on the connected registry.
Known values are: "Enabled" and "Disabled".
:vartype audit_log_status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.AuditLogStatus
"""
_attribute_map = {
"log_level": {"key": "logLevel", "type": "str"},
"audit_log_status": {"key": "auditLogStatus", "type": "str"},
}
def __init__(
self,
*,
log_level: Union[str, "_models.LogLevel"] = "Information",
audit_log_status: Union[str, "_models.AuditLogStatus"] = "Disabled",
**kwargs: Any
) -> None:
"""
:keyword log_level: The verbosity of logs persisted on the connected registry. Known values
are: "Debug", "Information", "Warning", "Error", and "None".
:paramtype log_level: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.LogLevel
:keyword audit_log_status: Indicates whether audit logs are enabled on the connected registry.
Known values are: "Enabled" and "Disabled".
:paramtype audit_log_status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.AuditLogStatus
"""
super().__init__(**kwargs)
self.log_level = log_level
self.audit_log_status = audit_log_status
class LoginServerProperties(_serialization.Model):
"""The login server properties of the connected registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar host: The host of the connected registry. Can be FQDN or IP.
:vartype host: str
:ivar tls: The TLS properties of the connected registry login server.
:vartype tls: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TlsProperties
"""
_validation = {
"host": {"readonly": True},
"tls": {"readonly": True},
}
_attribute_map = {
"host": {"key": "host", "type": "str"},
"tls": {"key": "tls", "type": "TlsProperties"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.host = None
self.tls = None
class NetworkRuleSet(_serialization.Model):
"""The network rule set for a container registry.
All required parameters must be populated in order to send to Azure.
:ivar default_action: The default action of allow or deny when no other rules match. Known
values are: "Allow" and "Deny".
:vartype default_action: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.DefaultAction
:ivar virtual_network_rules: The virtual network rules.
:vartype virtual_network_rules:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.VirtualNetworkRule]
:ivar ip_rules: The IP ACL rules.
:vartype ip_rules: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.IPRule]
"""
_validation = {
"default_action": {"required": True},
}
_attribute_map = {
"default_action": {"key": "defaultAction", "type": "str"},
"virtual_network_rules": {"key": "virtualNetworkRules", "type": "[VirtualNetworkRule]"},
"ip_rules": {"key": "ipRules", "type": "[IPRule]"},
}
def __init__(
self,
*,
default_action: Union[str, "_models.DefaultAction"] = "Allow",
virtual_network_rules: Optional[List["_models.VirtualNetworkRule"]] = None,
ip_rules: Optional[List["_models.IPRule"]] = None,
**kwargs: Any
) -> None:
"""
:keyword default_action: The default action of allow or deny when no other rules match. Known
values are: "Allow" and "Deny".
:paramtype default_action: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.DefaultAction
:keyword virtual_network_rules: The virtual network rules.
:paramtype virtual_network_rules:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.VirtualNetworkRule]
:keyword ip_rules: The IP ACL rules.
:paramtype ip_rules: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.IPRule]
"""
super().__init__(**kwargs)
self.default_action = default_action
self.virtual_network_rules = virtual_network_rules
self.ip_rules = ip_rules
class OperationDefinition(_serialization.Model):
"""The definition of a container registry operation.
:ivar origin: The origin information of the container registry operation.
:vartype origin: str
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:ivar display: The display information for the container registry operation.
:vartype display:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationDisplayDefinition
:ivar is_data_action: This property indicates if the operation is an action or a data action
ref:
https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#management-and-data-operations.
:vartype is_data_action: bool
:ivar service_specification: The definition of Azure Monitoring service.
:vartype service_specification:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationServiceSpecificationDefinition
"""
_attribute_map = {
"origin": {"key": "origin", "type": "str"},
"name": {"key": "name", "type": "str"},
"display": {"key": "display", "type": "OperationDisplayDefinition"},
"is_data_action": {"key": "isDataAction", "type": "bool"},
"service_specification": {
"key": "properties.serviceSpecification",
"type": "OperationServiceSpecificationDefinition",
},
}
def __init__(
self,
*,
origin: Optional[str] = None,
name: Optional[str] = None,
display: Optional["_models.OperationDisplayDefinition"] = None,
is_data_action: Optional[bool] = None,
service_specification: Optional["_models.OperationServiceSpecificationDefinition"] = None,
**kwargs: Any
) -> None:
"""
:keyword origin: The origin information of the container registry operation.
:paramtype origin: str
:keyword name: Operation name: {provider}/{resource}/{operation}.
:paramtype name: str
:keyword display: The display information for the container registry operation.
:paramtype display:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationDisplayDefinition
:keyword is_data_action: This property indicates if the operation is an action or a data action
ref:
https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#management-and-data-operations.
:paramtype is_data_action: bool
:keyword service_specification: The definition of Azure Monitoring service.
:paramtype service_specification:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationServiceSpecificationDefinition
"""
super().__init__(**kwargs)
self.origin = origin
self.name = name
self.display = display
self.is_data_action = is_data_action
self.service_specification = service_specification
class OperationDisplayDefinition(_serialization.Model):
"""The display information for a container registry operation.
:ivar provider: The resource provider name: Microsoft.ContainerRegistry.
:vartype provider: str
:ivar resource: The resource on which the operation is performed.
:vartype resource: str
:ivar operation: The operation that users can perform.
:vartype operation: str
:ivar description: The description for the operation.
:vartype description: str
"""
_attribute_map = {
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword provider: The resource provider name: Microsoft.ContainerRegistry.
:paramtype provider: str
:keyword resource: The resource on which the operation is performed.
:paramtype resource: str
:keyword operation: The operation that users can perform.
:paramtype operation: str
:keyword description: The description for the operation.
:paramtype description: str
"""
super().__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(_serialization.Model):
"""The result of a request to list container registry operations.
:ivar value: The list of container registry operations. Since this list may be incomplete, the
nextLink field should be used to request the next list of operations.
:vartype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationDefinition]
:ivar next_link: The URI that can be used to request the next list of container registry
operations.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[OperationDefinition]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.OperationDefinition"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The list of container registry operations. Since this list may be incomplete,
the nextLink field should be used to request the next list of operations.
:paramtype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationDefinition]
:keyword next_link: The URI that can be used to request the next list of container registry
operations.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationLogSpecificationDefinition(_serialization.Model):
"""The definition of Azure Monitoring log.
:ivar name: Log name.
:vartype name: str
:ivar display_name: Log display name.
:vartype display_name: str
:ivar blob_duration: Log blob duration.
:vartype blob_duration: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"blob_duration": {"key": "blobDuration", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
blob_duration: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Log name.
:paramtype name: str
:keyword display_name: Log display name.
:paramtype display_name: str
:keyword blob_duration: Log blob duration.
:paramtype blob_duration: str
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.blob_duration = blob_duration
class OperationMetricSpecificationDefinition(_serialization.Model):
"""The definition of Azure Monitoring metric.
:ivar name: Metric name.
:vartype name: str
:ivar display_name: Metric display name.
:vartype display_name: str
:ivar display_description: Metric description.
:vartype display_description: str
:ivar unit: Metric unit.
:vartype unit: str
:ivar aggregation_type: Metric aggregation type.
:vartype aggregation_type: str
:ivar internal_metric_name: Internal metric name.
:vartype internal_metric_name: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"display_description": {"key": "displayDescription", "type": "str"},
"unit": {"key": "unit", "type": "str"},
"aggregation_type": {"key": "aggregationType", "type": "str"},
"internal_metric_name": {"key": "internalMetricName", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
aggregation_type: Optional[str] = None,
internal_metric_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Metric name.
:paramtype name: str
:keyword display_name: Metric display name.
:paramtype display_name: str
:keyword display_description: Metric description.
:paramtype display_description: str
:keyword unit: Metric unit.
:paramtype unit: str
:keyword aggregation_type: Metric aggregation type.
:paramtype aggregation_type: str
:keyword internal_metric_name: Internal metric name.
:paramtype internal_metric_name: str
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.internal_metric_name = internal_metric_name
class OperationServiceSpecificationDefinition(_serialization.Model):
"""The definition of Azure Monitoring list.
:ivar metric_specifications: A list of Azure Monitoring metrics definition.
:vartype metric_specifications:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationMetricSpecificationDefinition]
:ivar log_specifications: A list of Azure Monitoring log definitions.
:vartype log_specifications:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationLogSpecificationDefinition]
"""
_attribute_map = {
"metric_specifications": {"key": "metricSpecifications", "type": "[OperationMetricSpecificationDefinition]"},
"log_specifications": {"key": "logSpecifications", "type": "[OperationLogSpecificationDefinition]"},
}
def __init__(
self,
*,
metric_specifications: Optional[List["_models.OperationMetricSpecificationDefinition"]] = None,
log_specifications: Optional[List["_models.OperationLogSpecificationDefinition"]] = None,
**kwargs: Any
) -> None:
"""
:keyword metric_specifications: A list of Azure Monitoring metrics definition.
:paramtype metric_specifications:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationMetricSpecificationDefinition]
:keyword log_specifications: A list of Azure Monitoring log definitions.
:paramtype log_specifications:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.OperationLogSpecificationDefinition]
"""
super().__init__(**kwargs)
self.metric_specifications = metric_specifications
self.log_specifications = log_specifications
class ParentProperties(_serialization.Model):
"""The properties of the connected registry parent.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource ID of the parent to which the connected registry will be associated.
:vartype id: str
:ivar sync_properties: The sync properties of the connected registry with its parent. Required.
:vartype sync_properties:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.SyncProperties
"""
_validation = {
"sync_properties": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"sync_properties": {"key": "syncProperties", "type": "SyncProperties"},
}
def __init__(
self,
*,
sync_properties: "_models.SyncProperties",
id: Optional[str] = None, # pylint: disable=redefined-builtin
**kwargs: Any
) -> None:
"""
:keyword id: The resource ID of the parent to which the connected registry will be associated.
:paramtype id: str
:keyword sync_properties: The sync properties of the connected registry with its parent.
Required.
:paramtype sync_properties:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.SyncProperties
"""
super().__init__(**kwargs)
self.id = id
self.sync_properties = sync_properties
class PipelineRun(ProxyResource):
"""An object that represents a pipeline run for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar provisioning_state: The provisioning state of a pipeline run. Known values are:
"Creating", "Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
:ivar request: The request parameters for a pipeline run.
:vartype request: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunRequest
:ivar response: The response of a pipeline run.
:vartype response: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunResponse
:ivar force_update_tag: How the pipeline run should be forced to recreate even if the pipeline
run configuration has not changed.
:vartype force_update_tag: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
"response": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"request": {"key": "properties.request", "type": "PipelineRunRequest"},
"response": {"key": "properties.response", "type": "PipelineRunResponse"},
"force_update_tag": {"key": "properties.forceUpdateTag", "type": "str"},
}
def __init__(
self,
*,
request: Optional["_models.PipelineRunRequest"] = None,
force_update_tag: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword request: The request parameters for a pipeline run.
:paramtype request: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunRequest
:keyword force_update_tag: How the pipeline run should be forced to recreate even if the
pipeline run configuration has not changed.
:paramtype force_update_tag: str
"""
super().__init__(**kwargs)
self.provisioning_state = None
self.request = request
self.response = None
self.force_update_tag = force_update_tag
class PipelineRunListResult(_serialization.Model):
"""The result of a request to list pipeline runs for a container registry.
:ivar value: The list of pipeline runs. Since this list may be incomplete, the nextLink field
should be used to request the next list of pipeline runs.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRun]
:ivar next_link: The URI that can be used to request the next list of pipeline runs.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[PipelineRun]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.PipelineRun"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of pipeline runs. Since this list may be incomplete, the nextLink
field should be used to request the next list of pipeline runs.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRun]
:keyword next_link: The URI that can be used to request the next list of pipeline runs.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class PipelineRunRequest(_serialization.Model):
"""The request properties provided for a pipeline run.
:ivar pipeline_resource_id: The resource ID of the pipeline to run.
:vartype pipeline_resource_id: str
:ivar artifacts: List of source artifacts to be transferred by the pipeline.
Specify an image by repository ('hello-world'). This will use the 'latest' tag.
Specify an image by tag ('hello-world:latest').
Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123').
:vartype artifacts: list[str]
:ivar source: The source properties of the pipeline run.
:vartype source:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunSourceProperties
:ivar target: The target properties of the pipeline run.
:vartype target:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunTargetProperties
:ivar catalog_digest: The digest of the tar used to transfer the artifacts.
:vartype catalog_digest: str
"""
_attribute_map = {
"pipeline_resource_id": {"key": "pipelineResourceId", "type": "str"},
"artifacts": {"key": "artifacts", "type": "[str]"},
"source": {"key": "source", "type": "PipelineRunSourceProperties"},
"target": {"key": "target", "type": "PipelineRunTargetProperties"},
"catalog_digest": {"key": "catalogDigest", "type": "str"},
}
def __init__(
self,
*,
pipeline_resource_id: Optional[str] = None,
artifacts: Optional[List[str]] = None,
source: Optional["_models.PipelineRunSourceProperties"] = None,
target: Optional["_models.PipelineRunTargetProperties"] = None,
catalog_digest: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword pipeline_resource_id: The resource ID of the pipeline to run.
:paramtype pipeline_resource_id: str
:keyword artifacts: List of source artifacts to be transferred by the pipeline.
Specify an image by repository ('hello-world'). This will use the 'latest' tag.
Specify an image by tag ('hello-world:latest').
Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123').
:paramtype artifacts: list[str]
:keyword source: The source properties of the pipeline run.
:paramtype source:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunSourceProperties
:keyword target: The target properties of the pipeline run.
:paramtype target:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunTargetProperties
:keyword catalog_digest: The digest of the tar used to transfer the artifacts.
:paramtype catalog_digest: str
"""
super().__init__(**kwargs)
self.pipeline_resource_id = pipeline_resource_id
self.artifacts = artifacts
self.source = source
self.target = target
self.catalog_digest = catalog_digest
class PipelineRunResponse(_serialization.Model):
"""The response properties returned for a pipeline run.
:ivar status: The current status of the pipeline run.
:vartype status: str
:ivar imported_artifacts: The artifacts imported in the pipeline run.
:vartype imported_artifacts: list[str]
:ivar progress: The current progress of the copy operation.
:vartype progress: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProgressProperties
:ivar start_time: The time the pipeline run started.
:vartype start_time: ~datetime.datetime
:ivar finish_time: The time the pipeline run finished.
:vartype finish_time: ~datetime.datetime
:ivar source: The source of the pipeline run.
:vartype source:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportPipelineSourceProperties
:ivar target: The target of the pipeline run.
:vartype target:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPipelineTargetProperties
:ivar catalog_digest: The digest of the tar used to transfer the artifacts.
:vartype catalog_digest: str
:ivar trigger: The trigger that caused the pipeline run.
:vartype trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineTriggerDescriptor
:ivar pipeline_run_error_message: The detailed error message for the pipeline run in the case
of failure.
:vartype pipeline_run_error_message: str
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"imported_artifacts": {"key": "importedArtifacts", "type": "[str]"},
"progress": {"key": "progress", "type": "ProgressProperties"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"finish_time": {"key": "finishTime", "type": "iso-8601"},
"source": {"key": "source", "type": "ImportPipelineSourceProperties"},
"target": {"key": "target", "type": "ExportPipelineTargetProperties"},
"catalog_digest": {"key": "catalogDigest", "type": "str"},
"trigger": {"key": "trigger", "type": "PipelineTriggerDescriptor"},
"pipeline_run_error_message": {"key": "pipelineRunErrorMessage", "type": "str"},
}
def __init__(
self,
*,
status: Optional[str] = None,
imported_artifacts: Optional[List[str]] = None,
progress: Optional["_models.ProgressProperties"] = None,
start_time: Optional[datetime.datetime] = None,
finish_time: Optional[datetime.datetime] = None,
source: Optional["_models.ImportPipelineSourceProperties"] = None,
target: Optional["_models.ExportPipelineTargetProperties"] = None,
catalog_digest: Optional[str] = None,
trigger: Optional["_models.PipelineTriggerDescriptor"] = None,
pipeline_run_error_message: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword status: The current status of the pipeline run.
:paramtype status: str
:keyword imported_artifacts: The artifacts imported in the pipeline run.
:paramtype imported_artifacts: list[str]
:keyword progress: The current progress of the copy operation.
:paramtype progress:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProgressProperties
:keyword start_time: The time the pipeline run started.
:paramtype start_time: ~datetime.datetime
:keyword finish_time: The time the pipeline run finished.
:paramtype finish_time: ~datetime.datetime
:keyword source: The source of the pipeline run.
:paramtype source:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ImportPipelineSourceProperties
:keyword target: The target of the pipeline run.
:paramtype target:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPipelineTargetProperties
:keyword catalog_digest: The digest of the tar used to transfer the artifacts.
:paramtype catalog_digest: str
:keyword trigger: The trigger that caused the pipeline run.
:paramtype trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineTriggerDescriptor
:keyword pipeline_run_error_message: The detailed error message for the pipeline run in the
case of failure.
:paramtype pipeline_run_error_message: str
"""
super().__init__(**kwargs)
self.status = status
self.imported_artifacts = imported_artifacts
self.progress = progress
self.start_time = start_time
self.finish_time = finish_time
self.source = source
self.target = target
self.catalog_digest = catalog_digest
self.trigger = trigger
self.pipeline_run_error_message = pipeline_run_error_message
class PipelineRunSourceProperties(_serialization.Model):
"""PipelineRunSourceProperties.
:ivar type: The type of the source. "AzureStorageBlob"
:vartype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunSourceType
:ivar name: The name of the source.
:vartype name: str
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__(
self,
*,
type: Union[str, "_models.PipelineRunSourceType"] = "AzureStorageBlob",
name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword type: The type of the source. "AzureStorageBlob"
:paramtype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunSourceType
:keyword name: The name of the source.
:paramtype name: str
"""
super().__init__(**kwargs)
self.type = type
self.name = name
class PipelineRunTargetProperties(_serialization.Model):
"""PipelineRunTargetProperties.
:ivar type: The type of the target. "AzureStorageBlob"
:vartype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunTargetType
:ivar name: The name of the target.
:vartype name: str
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__(
self,
*,
type: Union[str, "_models.PipelineRunTargetType"] = "AzureStorageBlob",
name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword type: The type of the target. "AzureStorageBlob"
:paramtype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunTargetType
:keyword name: The name of the target.
:paramtype name: str
"""
super().__init__(**kwargs)
self.type = type
self.name = name
class PipelineSourceTriggerDescriptor(_serialization.Model):
"""PipelineSourceTriggerDescriptor.
:ivar timestamp: The timestamp when the source update happened.
:vartype timestamp: ~datetime.datetime
"""
_attribute_map = {
"timestamp": {"key": "timestamp", "type": "iso-8601"},
}
def __init__(self, *, timestamp: Optional[datetime.datetime] = None, **kwargs: Any) -> None:
"""
:keyword timestamp: The timestamp when the source update happened.
:paramtype timestamp: ~datetime.datetime
"""
super().__init__(**kwargs)
self.timestamp = timestamp
class PipelineSourceTriggerProperties(_serialization.Model):
"""PipelineSourceTriggerProperties.
All required parameters must be populated in order to send to Azure.
:ivar status: The current status of the source trigger. Known values are: "Enabled" and
"Disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TriggerStatus
"""
_validation = {
"status": {"required": True},
}
_attribute_map = {
"status": {"key": "status", "type": "str"},
}
def __init__(self, *, status: Union[str, "_models.TriggerStatus"] = "Enabled", **kwargs: Any) -> None:
"""
:keyword status: The current status of the source trigger. Known values are: "Enabled" and
"Disabled".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TriggerStatus
"""
super().__init__(**kwargs)
self.status = status
class PipelineTriggerDescriptor(_serialization.Model):
"""PipelineTriggerDescriptor.
:ivar source_trigger: The source trigger that caused the pipeline run.
:vartype source_trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineSourceTriggerDescriptor
"""
_attribute_map = {
"source_trigger": {"key": "sourceTrigger", "type": "PipelineSourceTriggerDescriptor"},
}
def __init__(
self, *, source_trigger: Optional["_models.PipelineSourceTriggerDescriptor"] = None, **kwargs: Any
) -> None:
"""
:keyword source_trigger: The source trigger that caused the pipeline run.
:paramtype source_trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineSourceTriggerDescriptor
"""
super().__init__(**kwargs)
self.source_trigger = source_trigger
class PipelineTriggerProperties(_serialization.Model):
"""PipelineTriggerProperties.
:ivar source_trigger: The source trigger properties of the pipeline.
:vartype source_trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineSourceTriggerProperties
"""
_attribute_map = {
"source_trigger": {"key": "sourceTrigger", "type": "PipelineSourceTriggerProperties"},
}
def __init__(
self, *, source_trigger: Optional["_models.PipelineSourceTriggerProperties"] = None, **kwargs: Any
) -> None:
"""
:keyword source_trigger: The source trigger properties of the pipeline.
:paramtype source_trigger:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineSourceTriggerProperties
"""
super().__init__(**kwargs)
self.source_trigger = source_trigger
class Policies(_serialization.Model):
"""The policies for a container registry.
:ivar quarantine_policy: The quarantine policy for a container registry.
:vartype quarantine_policy:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.QuarantinePolicy
:ivar trust_policy: The content trust policy for a container registry.
:vartype trust_policy: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TrustPolicy
:ivar retention_policy: The retention policy for a container registry.
:vartype retention_policy:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.RetentionPolicy
:ivar export_policy: The export policy for a container registry.
:vartype export_policy: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPolicy
"""
_attribute_map = {
"quarantine_policy": {"key": "quarantinePolicy", "type": "QuarantinePolicy"},
"trust_policy": {"key": "trustPolicy", "type": "TrustPolicy"},
"retention_policy": {"key": "retentionPolicy", "type": "RetentionPolicy"},
"export_policy": {"key": "exportPolicy", "type": "ExportPolicy"},
}
def __init__(
self,
*,
quarantine_policy: Optional["_models.QuarantinePolicy"] = None,
trust_policy: Optional["_models.TrustPolicy"] = None,
retention_policy: Optional["_models.RetentionPolicy"] = None,
export_policy: Optional["_models.ExportPolicy"] = None,
**kwargs: Any
) -> None:
"""
:keyword quarantine_policy: The quarantine policy for a container registry.
:paramtype quarantine_policy:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.QuarantinePolicy
:keyword trust_policy: The content trust policy for a container registry.
:paramtype trust_policy: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TrustPolicy
:keyword retention_policy: The retention policy for a container registry.
:paramtype retention_policy:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.RetentionPolicy
:keyword export_policy: The export policy for a container registry.
:paramtype export_policy: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.ExportPolicy
"""
super().__init__(**kwargs)
self.quarantine_policy = quarantine_policy
self.trust_policy = trust_policy
self.retention_policy = retention_policy
self.export_policy = export_policy
class PrivateEndpoint(_serialization.Model):
"""The Private Endpoint resource.
:ivar id: This is private endpoint resource created with Microsoft.Network resource provider.
:vartype id: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
:keyword id: This is private endpoint resource created with Microsoft.Network resource
provider.
:paramtype id: str
"""
super().__init__(**kwargs)
self.id = id
class PrivateEndpointConnection(ProxyResource):
"""An object that represents a private endpoint connection for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar private_endpoint: The resource of private endpoint.
:vartype private_endpoint:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateEndpoint
:ivar private_link_service_connection_state: A collection of information about the state of the
connection between service consumer and provider.
:vartype private_link_service_connection_state:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of private endpoint connection resource. Known
values are: "Creating", "Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
"private_link_service_connection_state": {
"key": "properties.privateLinkServiceConnectionState",
"type": "PrivateLinkServiceConnectionState",
},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
private_endpoint: Optional["_models.PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
**kwargs: Any
) -> None:
"""
:keyword private_endpoint: The resource of private endpoint.
:paramtype private_endpoint:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateEndpoint
:keyword private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:paramtype private_link_service_connection_state:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateLinkServiceConnectionState
"""
super().__init__(**kwargs)
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = None
class PrivateEndpointConnectionListResult(_serialization.Model):
"""The result of a request to list private endpoint connections for a container registry.
:ivar value: The list of private endpoint connections. Since this list may be incomplete, the
nextLink field should be used to request the next list of private endpoint connections.
:vartype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateEndpointConnection]
:ivar next_link: The URI that can be used to request the next list of private endpoint
connections.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[PrivateEndpointConnection]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.PrivateEndpointConnection"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The list of private endpoint connections. Since this list may be incomplete,
the nextLink field should be used to request the next list of private endpoint connections.
:paramtype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateEndpointConnection]
:keyword next_link: The URI that can be used to request the next list of private endpoint
connections.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class PrivateLinkResource(_serialization.Model):
"""A resource that supports private link capabilities.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The resource type is private link resource.
:vartype type: str
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:ivar required_zone_names: The private link resource Private link DNS zone name.
:vartype required_zone_names: list[str]
"""
_validation = {
"type": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"group_id": {"key": "properties.groupId", "type": "str"},
"required_members": {"key": "properties.requiredMembers", "type": "[str]"},
"required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
name: Optional[str] = None,
group_id: Optional[str] = None,
required_members: Optional[List[str]] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The resource ID.
:paramtype id: str
:keyword name: The name of the resource.
:paramtype name: str
:keyword group_id: The private link resource group id.
:paramtype group_id: str
:keyword required_members: The private link resource required member names.
:paramtype required_members: list[str]
:keyword required_zone_names: The private link resource Private link DNS zone name.
:paramtype required_zone_names: list[str]
"""
super().__init__(**kwargs)
self.type = None
self.id = id
self.name = name
self.group_id = group_id
self.required_members = required_members
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(_serialization.Model):
"""The result of a request to list private link resources for a container registry.
:ivar value: The list of private link resources. Since this list may be incomplete, the
nextLink field should be used to request the next list of private link resources.
:vartype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateLinkResource]
:ivar next_link: The URI that can be used to request the next list of private link resources.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[PrivateLinkResource]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.PrivateLinkResource"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The list of private link resources. Since this list may be incomplete, the
nextLink field should be used to request the next list of private link resources.
:paramtype value:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateLinkResource]
:keyword next_link: The URI that can be used to request the next list of private link
resources.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class PrivateLinkServiceConnectionState(_serialization.Model):
"""The state of a private link service connection.
:ivar status: The private link service connection status. Known values are: "Approved",
"Pending", "Rejected", and "Disconnected".
:vartype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectionStatus
:ivar description: The description for connection status. For example if connection is rejected
it can indicate reason for rejection.
:vartype description: str
:ivar actions_required: A message indicating if changes on the service provider require any
updates on the consumer. Known values are: "None" and "Recreate".
:vartype actions_required: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ActionsRequired
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"description": {"key": "description", "type": "str"},
"actions_required": {"key": "actionsRequired", "type": "str"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.ConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[Union[str, "_models.ActionsRequired"]] = None,
**kwargs: Any
) -> None:
"""
:keyword status: The private link service connection status. Known values are: "Approved",
"Pending", "Rejected", and "Disconnected".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ConnectionStatus
:keyword description: The description for connection status. For example if connection is
rejected it can indicate reason for rejection.
:paramtype description: str
:keyword actions_required: A message indicating if changes on the service provider require any
updates on the consumer. Known values are: "None" and "Recreate".
:paramtype actions_required: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ActionsRequired
"""
super().__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class ProgressProperties(_serialization.Model):
"""ProgressProperties.
:ivar percentage: The percentage complete of the copy operation.
:vartype percentage: str
"""
_attribute_map = {
"percentage": {"key": "percentage", "type": "str"},
}
def __init__(self, *, percentage: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword percentage: The percentage complete of the copy operation.
:paramtype percentage: str
"""
super().__init__(**kwargs)
self.percentage = percentage
class QuarantinePolicy(_serialization.Model):
"""The quarantine policy for a container registry.
:ivar status: The value that indicates whether the policy is enabled or not. Known values are:
"enabled" and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PolicyStatus
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
}
def __init__(self, *, status: Optional[Union[str, "_models.PolicyStatus"]] = None, **kwargs: Any) -> None:
"""
:keyword status: The value that indicates whether the policy is enabled or not. Known values
are: "enabled" and "disabled".
:paramtype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PolicyStatus
"""
super().__init__(**kwargs)
self.status = status
class RegenerateCredentialParameters(_serialization.Model):
"""The parameters used to regenerate the login credential.
All required parameters must be populated in order to send to Azure.
:ivar name: Specifies name of the password which should be regenerated -- password or
password2. Required. Known values are: "password" and "password2".
:vartype name: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PasswordName
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, name: Union[str, "_models.PasswordName"], **kwargs: Any) -> None:
"""
:keyword name: Specifies name of the password which should be regenerated -- password or
password2. Required. Known values are: "password" and "password2".
:paramtype name: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PasswordName
"""
super().__init__(**kwargs)
self.name = name
class Resource(_serialization.Model):
"""An Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar location: The location of the resource. This cannot be changed after the resource is
created. Required.
:vartype location: str
:ivar tags: The tags of the resource.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword location: The location of the resource. This cannot be changed after the resource is
created. Required.
:paramtype location: str
:keyword tags: The tags of the resource.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.system_data = None
class Registry(Resource): # pylint: disable=too-many-instance-attributes
"""An object that represents a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar location: The location of the resource. This cannot be changed after the resource is
created. Required.
:vartype location: str
:ivar tags: The tags of the resource.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar sku: The SKU of the container registry. Required.
:vartype sku: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Sku
:ivar identity: The identity of the container registry.
:vartype identity: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:ivar login_server: The URL that can be used to log into the container registry.
:vartype login_server: str
:ivar creation_date: The creation date of the container registry in ISO8601 format.
:vartype creation_date: ~datetime.datetime
:ivar provisioning_state: The provisioning state of the container registry at the time the
operation was called. Known values are: "Creating", "Updating", "Deleting", "Succeeded",
"Failed", and "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
:ivar status: The status of the container registry at the time the operation was called.
:vartype status: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Status
:ivar admin_user_enabled: The value that indicates whether the admin user is enabled.
:vartype admin_user_enabled: bool
:ivar network_rule_set: The network rule set for a container registry.
:vartype network_rule_set:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleSet
:ivar policies: The policies for a container registry.
:vartype policies: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Policies
:ivar encryption: The encryption settings of container registry.
:vartype encryption:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EncryptionProperty
:ivar data_endpoint_enabled: Enable a single data endpoint per region for serving data.
:vartype data_endpoint_enabled: bool
:ivar data_endpoint_host_names: List of host names that will serve data when
dataEndpointEnabled is true.
:vartype data_endpoint_host_names: list[str]
:ivar private_endpoint_connections: List of private endpoint connections for a container
registry.
:vartype private_endpoint_connections:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PrivateEndpointConnection]
:ivar public_network_access: Whether or not public network access is allowed for the container
registry. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PublicNetworkAccess
:ivar network_rule_bypass_options: Whether to allow trusted Azure services to access a network
restricted registry. Known values are: "AzureServices" and "None".
:vartype network_rule_bypass_options: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleBypassOptions
:ivar zone_redundancy: Whether or not zone redundancy is enabled for this container registry.
Known values are: "Enabled" and "Disabled".
:vartype zone_redundancy: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ZoneRedundancy
:ivar anonymous_pull_enabled: Enables registry-wide pull from unauthenticated clients.
:vartype anonymous_pull_enabled: bool
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
"system_data": {"readonly": True},
"sku": {"required": True},
"login_server": {"readonly": True},
"creation_date": {"readonly": True},
"provisioning_state": {"readonly": True},
"status": {"readonly": True},
"data_endpoint_host_names": {"readonly": True},
"private_endpoint_connections": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"sku": {"key": "sku", "type": "Sku"},
"identity": {"key": "identity", "type": "IdentityProperties"},
"login_server": {"key": "properties.loginServer", "type": "str"},
"creation_date": {"key": "properties.creationDate", "type": "iso-8601"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"status": {"key": "properties.status", "type": "Status"},
"admin_user_enabled": {"key": "properties.adminUserEnabled", "type": "bool"},
"network_rule_set": {"key": "properties.networkRuleSet", "type": "NetworkRuleSet"},
"policies": {"key": "properties.policies", "type": "Policies"},
"encryption": {"key": "properties.encryption", "type": "EncryptionProperty"},
"data_endpoint_enabled": {"key": "properties.dataEndpointEnabled", "type": "bool"},
"data_endpoint_host_names": {"key": "properties.dataEndpointHostNames", "type": "[str]"},
"private_endpoint_connections": {
"key": "properties.privateEndpointConnections",
"type": "[PrivateEndpointConnection]",
},
"public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
"network_rule_bypass_options": {"key": "properties.networkRuleBypassOptions", "type": "str"},
"zone_redundancy": {"key": "properties.zoneRedundancy", "type": "str"},
"anonymous_pull_enabled": {"key": "properties.anonymousPullEnabled", "type": "bool"},
}
def __init__(
self,
*,
location: str,
sku: "_models.Sku",
tags: Optional[Dict[str, str]] = None,
identity: Optional["_models.IdentityProperties"] = None,
admin_user_enabled: bool = False,
network_rule_set: Optional["_models.NetworkRuleSet"] = None,
policies: Optional["_models.Policies"] = None,
encryption: Optional["_models.EncryptionProperty"] = None,
data_endpoint_enabled: Optional[bool] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
network_rule_bypass_options: Optional[Union[str, "_models.NetworkRuleBypassOptions"]] = None,
zone_redundancy: Optional[Union[str, "_models.ZoneRedundancy"]] = None,
anonymous_pull_enabled: bool = False,
**kwargs: Any
) -> None:
"""
:keyword location: The location of the resource. This cannot be changed after the resource is
created. Required.
:paramtype location: str
:keyword tags: The tags of the resource.
:paramtype tags: dict[str, str]
:keyword sku: The SKU of the container registry. Required.
:paramtype sku: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Sku
:keyword identity: The identity of the container registry.
:paramtype identity:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:keyword admin_user_enabled: The value that indicates whether the admin user is enabled.
:paramtype admin_user_enabled: bool
:keyword network_rule_set: The network rule set for a container registry.
:paramtype network_rule_set:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleSet
:keyword policies: The policies for a container registry.
:paramtype policies: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Policies
:keyword encryption: The encryption settings of container registry.
:paramtype encryption:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EncryptionProperty
:keyword data_endpoint_enabled: Enable a single data endpoint per region for serving data.
:paramtype data_endpoint_enabled: bool
:keyword public_network_access: Whether or not public network access is allowed for the
container registry. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PublicNetworkAccess
:keyword network_rule_bypass_options: Whether to allow trusted Azure services to access a
network restricted registry. Known values are: "AzureServices" and "None".
:paramtype network_rule_bypass_options: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleBypassOptions
:keyword zone_redundancy: Whether or not zone redundancy is enabled for this container
registry. Known values are: "Enabled" and "Disabled".
:paramtype zone_redundancy: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ZoneRedundancy
:keyword anonymous_pull_enabled: Enables registry-wide pull from unauthenticated clients.
:paramtype anonymous_pull_enabled: bool
"""
super().__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.identity = identity
self.login_server = None
self.creation_date = None
self.provisioning_state = None
self.status = None
self.admin_user_enabled = admin_user_enabled
self.network_rule_set = network_rule_set
self.policies = policies
self.encryption = encryption
self.data_endpoint_enabled = data_endpoint_enabled
self.data_endpoint_host_names = None
self.private_endpoint_connections = None
self.public_network_access = public_network_access
self.network_rule_bypass_options = network_rule_bypass_options
self.zone_redundancy = zone_redundancy
self.anonymous_pull_enabled = anonymous_pull_enabled
class RegistryListCredentialsResult(_serialization.Model):
"""The response from the ListCredentials operation.
:ivar username: The username for a container registry.
:vartype username: str
:ivar passwords: The list of passwords for a container registry.
:vartype passwords:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.RegistryPassword]
"""
_attribute_map = {
"username": {"key": "username", "type": "str"},
"passwords": {"key": "passwords", "type": "[RegistryPassword]"},
}
def __init__(
self,
*,
username: Optional[str] = None,
passwords: Optional[List["_models.RegistryPassword"]] = None,
**kwargs: Any
) -> None:
"""
:keyword username: The username for a container registry.
:paramtype username: str
:keyword passwords: The list of passwords for a container registry.
:paramtype passwords:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.RegistryPassword]
"""
super().__init__(**kwargs)
self.username = username
self.passwords = passwords
class RegistryListResult(_serialization.Model):
"""The result of a request to list container registries.
:ivar value: The list of container registries. Since this list may be incomplete, the nextLink
field should be used to request the next list of container registries.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Registry]
:ivar next_link: The URI that can be used to request the next list of container registries.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Registry]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Registry"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of container registries. Since this list may be incomplete, the
nextLink field should be used to request the next list of container registries.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Registry]
:keyword next_link: The URI that can be used to request the next list of container registries.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RegistryNameCheckRequest(_serialization.Model):
"""A request to check whether a container registry name is available.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the container registry. Required.
:vartype name: str
:ivar type: The resource type of the container registry. This field must be set to
'Microsoft.ContainerRegistry/registries'. Required. Default value is
"Microsoft.ContainerRegistry/registries".
:vartype type: str
"""
_validation = {
"name": {"required": True, "max_length": 50, "min_length": 5, "pattern": r"^[a-zA-Z0-9]*$"},
"type": {"required": True, "constant": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
type = "Microsoft.ContainerRegistry/registries"
def __init__(self, *, name: str, **kwargs: Any) -> None:
"""
:keyword name: The name of the container registry. Required.
:paramtype name: str
"""
super().__init__(**kwargs)
self.name = name
class RegistryNameStatus(_serialization.Model):
"""The result of a request to check the availability of a container registry name.
:ivar name_available: The value that indicates whether the name is available.
:vartype name_available: bool
:ivar reason: If any, the reason that the name is not available.
:vartype reason: str
:ivar message: If any, the error message that provides more detail for the reason that the name
is not available.
:vartype message: str
"""
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[str] = None,
message: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name_available: The value that indicates whether the name is available.
:paramtype name_available: bool
:keyword reason: If any, the reason that the name is not available.
:paramtype reason: str
:keyword message: If any, the error message that provides more detail for the reason that the
name is not available.
:paramtype message: str
"""
super().__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class RegistryPassword(_serialization.Model):
"""The login password for the container registry.
:ivar name: The password name. Known values are: "password" and "password2".
:vartype name: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PasswordName
:ivar value: The password value.
:vartype value: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"value": {"key": "value", "type": "str"},
}
def __init__(
self, *, name: Optional[Union[str, "_models.PasswordName"]] = None, value: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword name: The password name. Known values are: "password" and "password2".
:paramtype name: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PasswordName
:keyword value: The password value.
:paramtype value: str
"""
super().__init__(**kwargs)
self.name = name
self.value = value
class RegistryUpdateParameters(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""The parameters for updating a container registry.
:ivar identity: The identity of the container registry.
:vartype identity: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:ivar tags: The tags for the container registry.
:vartype tags: dict[str, str]
:ivar sku: The SKU of the container registry.
:vartype sku: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Sku
:ivar admin_user_enabled: The value that indicates whether the admin user is enabled.
:vartype admin_user_enabled: bool
:ivar network_rule_set: The network rule set for a container registry.
:vartype network_rule_set:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleSet
:ivar policies: The policies for a container registry.
:vartype policies: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Policies
:ivar encryption: The encryption settings of container registry.
:vartype encryption:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EncryptionProperty
:ivar data_endpoint_enabled: Enable a single data endpoint per region for serving data.
:vartype data_endpoint_enabled: bool
:ivar public_network_access: Whether or not public network access is allowed for the container
registry. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PublicNetworkAccess
:ivar network_rule_bypass_options: Whether to allow trusted Azure services to access a network
restricted registry. Known values are: "AzureServices" and "None".
:vartype network_rule_bypass_options: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleBypassOptions
:ivar anonymous_pull_enabled: Enables registry-wide pull from unauthenticated clients.
:vartype anonymous_pull_enabled: bool
"""
_attribute_map = {
"identity": {"key": "identity", "type": "IdentityProperties"},
"tags": {"key": "tags", "type": "{str}"},
"sku": {"key": "sku", "type": "Sku"},
"admin_user_enabled": {"key": "properties.adminUserEnabled", "type": "bool"},
"network_rule_set": {"key": "properties.networkRuleSet", "type": "NetworkRuleSet"},
"policies": {"key": "properties.policies", "type": "Policies"},
"encryption": {"key": "properties.encryption", "type": "EncryptionProperty"},
"data_endpoint_enabled": {"key": "properties.dataEndpointEnabled", "type": "bool"},
"public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
"network_rule_bypass_options": {"key": "properties.networkRuleBypassOptions", "type": "str"},
"anonymous_pull_enabled": {"key": "properties.anonymousPullEnabled", "type": "bool"},
}
def __init__(
self,
*,
identity: Optional["_models.IdentityProperties"] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["_models.Sku"] = None,
admin_user_enabled: Optional[bool] = None,
network_rule_set: Optional["_models.NetworkRuleSet"] = None,
policies: Optional["_models.Policies"] = None,
encryption: Optional["_models.EncryptionProperty"] = None,
data_endpoint_enabled: Optional[bool] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
network_rule_bypass_options: Optional[Union[str, "_models.NetworkRuleBypassOptions"]] = None,
anonymous_pull_enabled: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword identity: The identity of the container registry.
:paramtype identity:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.IdentityProperties
:keyword tags: The tags for the container registry.
:paramtype tags: dict[str, str]
:keyword sku: The SKU of the container registry.
:paramtype sku: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Sku
:keyword admin_user_enabled: The value that indicates whether the admin user is enabled.
:paramtype admin_user_enabled: bool
:keyword network_rule_set: The network rule set for a container registry.
:paramtype network_rule_set:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleSet
:keyword policies: The policies for a container registry.
:paramtype policies: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Policies
:keyword encryption: The encryption settings of container registry.
:paramtype encryption:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.EncryptionProperty
:keyword data_endpoint_enabled: Enable a single data endpoint per region for serving data.
:paramtype data_endpoint_enabled: bool
:keyword public_network_access: Whether or not public network access is allowed for the
container registry. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PublicNetworkAccess
:keyword network_rule_bypass_options: Whether to allow trusted Azure services to access a
network restricted registry. Known values are: "AzureServices" and "None".
:paramtype network_rule_bypass_options: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.NetworkRuleBypassOptions
:keyword anonymous_pull_enabled: Enables registry-wide pull from unauthenticated clients.
:paramtype anonymous_pull_enabled: bool
"""
super().__init__(**kwargs)
self.identity = identity
self.tags = tags
self.sku = sku
self.admin_user_enabled = admin_user_enabled
self.network_rule_set = network_rule_set
self.policies = policies
self.encryption = encryption
self.data_endpoint_enabled = data_endpoint_enabled
self.public_network_access = public_network_access
self.network_rule_bypass_options = network_rule_bypass_options
self.anonymous_pull_enabled = anonymous_pull_enabled
class RegistryUsage(_serialization.Model):
"""The quota usage for a container registry.
:ivar name: The name of the usage.
:vartype name: str
:ivar limit: The limit of the usage.
:vartype limit: int
:ivar current_value: The current value of the usage.
:vartype current_value: int
:ivar unit: The unit of measurement. Known values are: "Count" and "Bytes".
:vartype unit: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.RegistryUsageUnit
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"limit": {"key": "limit", "type": "int"},
"current_value": {"key": "currentValue", "type": "int"},
"unit": {"key": "unit", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
limit: Optional[int] = None,
current_value: Optional[int] = None,
unit: Optional[Union[str, "_models.RegistryUsageUnit"]] = None,
**kwargs: Any
) -> None:
"""
:keyword name: The name of the usage.
:paramtype name: str
:keyword limit: The limit of the usage.
:paramtype limit: int
:keyword current_value: The current value of the usage.
:paramtype current_value: int
:keyword unit: The unit of measurement. Known values are: "Count" and "Bytes".
:paramtype unit: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.RegistryUsageUnit
"""
super().__init__(**kwargs)
self.name = name
self.limit = limit
self.current_value = current_value
self.unit = unit
class RegistryUsageListResult(_serialization.Model):
"""The result of a request to get container registry quota usages.
:ivar value: The list of container registry quota usages.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.RegistryUsage]
"""
_attribute_map = {
"value": {"key": "value", "type": "[RegistryUsage]"},
}
def __init__(self, *, value: Optional[List["_models.RegistryUsage"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The list of container registry quota usages.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.RegistryUsage]
"""
super().__init__(**kwargs)
self.value = value
class Replication(Resource):
"""An object that represents a replication for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar location: The location of the resource. This cannot be changed after the resource is
created. Required.
:vartype location: str
:ivar tags: The tags of the resource.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar provisioning_state: The provisioning state of the replication at the time the operation
was called. Known values are: "Creating", "Updating", "Deleting", "Succeeded", "Failed", and
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
:ivar status: The status of the replication at the time the operation was called.
:vartype status: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Status
:ivar region_endpoint_enabled: Specifies whether the replication's regional endpoint is
enabled. Requests will not be routed to a replication whose regional endpoint is disabled,
however its data will continue to be synced with other replications.
:vartype region_endpoint_enabled: bool
:ivar zone_redundancy: Whether or not zone redundancy is enabled for this container registry
replication. Known values are: "Enabled" and "Disabled".
:vartype zone_redundancy: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ZoneRedundancy
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
"status": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"status": {"key": "properties.status", "type": "Status"},
"region_endpoint_enabled": {"key": "properties.regionEndpointEnabled", "type": "bool"},
"zone_redundancy": {"key": "properties.zoneRedundancy", "type": "str"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
region_endpoint_enabled: bool = True,
zone_redundancy: Optional[Union[str, "_models.ZoneRedundancy"]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The location of the resource. This cannot be changed after the resource is
created. Required.
:paramtype location: str
:keyword tags: The tags of the resource.
:paramtype tags: dict[str, str]
:keyword region_endpoint_enabled: Specifies whether the replication's regional endpoint is
enabled. Requests will not be routed to a replication whose regional endpoint is disabled,
however its data will continue to be synced with other replications.
:paramtype region_endpoint_enabled: bool
:keyword zone_redundancy: Whether or not zone redundancy is enabled for this container registry
replication. Known values are: "Enabled" and "Disabled".
:paramtype zone_redundancy: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ZoneRedundancy
"""
super().__init__(location=location, tags=tags, **kwargs)
self.provisioning_state = None
self.status = None
self.region_endpoint_enabled = region_endpoint_enabled
self.zone_redundancy = zone_redundancy
class ReplicationListResult(_serialization.Model):
"""The result of a request to list replications for a container registry.
:ivar value: The list of replications. Since this list may be incomplete, the nextLink field
should be used to request the next list of replications.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Replication]
:ivar next_link: The URI that can be used to request the next list of replications.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Replication]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Replication"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of replications. Since this list may be incomplete, the nextLink field
should be used to request the next list of replications.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Replication]
:keyword next_link: The URI that can be used to request the next list of replications.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ReplicationUpdateParameters(_serialization.Model):
"""The parameters for updating a replication.
:ivar tags: The tags for the replication.
:vartype tags: dict[str, str]
:ivar region_endpoint_enabled: Specifies whether the replication's regional endpoint is
enabled. Requests will not be routed to a replication whose regional endpoint is disabled,
however its data will continue to be synced with other replications.
:vartype region_endpoint_enabled: bool
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"region_endpoint_enabled": {"key": "properties.regionEndpointEnabled", "type": "bool"},
}
def __init__(
self, *, tags: Optional[Dict[str, str]] = None, region_endpoint_enabled: Optional[bool] = None, **kwargs: Any
) -> None:
"""
:keyword tags: The tags for the replication.
:paramtype tags: dict[str, str]
:keyword region_endpoint_enabled: Specifies whether the replication's regional endpoint is
enabled. Requests will not be routed to a replication whose regional endpoint is disabled,
however its data will continue to be synced with other replications.
:paramtype region_endpoint_enabled: bool
"""
super().__init__(**kwargs)
self.tags = tags
self.region_endpoint_enabled = region_endpoint_enabled
class Request(_serialization.Model):
"""The request that generated the event.
:ivar id: The ID of the request that initiated the event.
:vartype id: str
:ivar addr: The IP or hostname and possibly port of the client connection that initiated the
event. This is the RemoteAddr from the standard http request.
:vartype addr: str
:ivar host: The externally accessible hostname of the registry instance, as specified by the
http host header on incoming requests.
:vartype host: str
:ivar method: The request method that generated the event.
:vartype method: str
:ivar useragent: The user agent header of the request.
:vartype useragent: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"addr": {"key": "addr", "type": "str"},
"host": {"key": "host", "type": "str"},
"method": {"key": "method", "type": "str"},
"useragent": {"key": "useragent", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
addr: Optional[str] = None,
host: Optional[str] = None,
method: Optional[str] = None,
useragent: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The ID of the request that initiated the event.
:paramtype id: str
:keyword addr: The IP or hostname and possibly port of the client connection that initiated the
event. This is the RemoteAddr from the standard http request.
:paramtype addr: str
:keyword host: The externally accessible hostname of the registry instance, as specified by the
http host header on incoming requests.
:paramtype host: str
:keyword method: The request method that generated the event.
:paramtype method: str
:keyword useragent: The user agent header of the request.
:paramtype useragent: str
"""
super().__init__(**kwargs)
self.id = id
self.addr = addr
self.host = host
self.method = method
self.useragent = useragent
class RetentionPolicy(_serialization.Model):
"""The retention policy for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar days: The number of days to retain an untagged manifest after which it gets purged.
:vartype days: int
:ivar last_updated_time: The timestamp when the policy was last updated.
:vartype last_updated_time: ~datetime.datetime
:ivar status: The value that indicates whether the policy is enabled or not. Known values are:
"enabled" and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PolicyStatus
"""
_validation = {
"last_updated_time": {"readonly": True},
}
_attribute_map = {
"days": {"key": "days", "type": "int"},
"last_updated_time": {"key": "lastUpdatedTime", "type": "iso-8601"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self, *, days: int = 7, status: Optional[Union[str, "_models.PolicyStatus"]] = None, **kwargs: Any
) -> None:
"""
:keyword days: The number of days to retain an untagged manifest after which it gets purged.
:paramtype days: int
:keyword status: The value that indicates whether the policy is enabled or not. Known values
are: "enabled" and "disabled".
:paramtype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PolicyStatus
"""
super().__init__(**kwargs)
self.days = days
self.last_updated_time = None
self.status = status
class ScopeMap(ProxyResource):
"""An object that represents a scope map for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar description: The user friendly description of the scope map.
:vartype description: str
:ivar type_properties_type: The type of the scope map. E.g. BuildIn scope map.
:vartype type_properties_type: str
:ivar creation_date: The creation date of scope map.
:vartype creation_date: ~datetime.datetime
:ivar provisioning_state: Provisioning state of the resource. Known values are: "Creating",
"Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
:ivar actions: The list of scoped permissions for registry artifacts.
E.g. repositories/repository-name/content/read,
repositories/repository-name/metadata/write.
:vartype actions: list[str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"type_properties_type": {"readonly": True},
"creation_date": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"description": {"key": "properties.description", "type": "str"},
"type_properties_type": {"key": "properties.type", "type": "str"},
"creation_date": {"key": "properties.creationDate", "type": "iso-8601"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"actions": {"key": "properties.actions", "type": "[str]"},
}
def __init__(
self, *, description: Optional[str] = None, actions: Optional[List[str]] = None, **kwargs: Any
) -> None:
"""
:keyword description: The user friendly description of the scope map.
:paramtype description: str
:keyword actions: The list of scoped permissions for registry artifacts.
E.g. repositories/repository-name/content/read,
repositories/repository-name/metadata/write.
:paramtype actions: list[str]
"""
super().__init__(**kwargs)
self.description = description
self.type_properties_type = None
self.creation_date = None
self.provisioning_state = None
self.actions = actions
class ScopeMapListResult(_serialization.Model):
"""The result of a request to list scope maps for a container registry.
:ivar value: The list of scope maps. Since this list may be incomplete, the nextLink field
should be used to request the next list of scope maps.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ScopeMap]
:ivar next_link: The URI that can be used to request the next list of scope maps.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[ScopeMap]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.ScopeMap"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of scope maps. Since this list may be incomplete, the nextLink field
should be used to request the next list of scope maps.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ScopeMap]
:keyword next_link: The URI that can be used to request the next list of scope maps.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ScopeMapUpdateParameters(_serialization.Model):
"""The properties for updating the scope map.
:ivar description: The user friendly description of the scope map.
:vartype description: str
:ivar actions: The list of scope permissions for registry artifacts.
E.g. repositories/repository-name/pull,
repositories/repository-name/delete.
:vartype actions: list[str]
"""
_attribute_map = {
"description": {"key": "properties.description", "type": "str"},
"actions": {"key": "properties.actions", "type": "[str]"},
}
def __init__(
self, *, description: Optional[str] = None, actions: Optional[List[str]] = None, **kwargs: Any
) -> None:
"""
:keyword description: The user friendly description of the scope map.
:paramtype description: str
:keyword actions: The list of scope permissions for registry artifacts.
E.g. repositories/repository-name/pull,
repositories/repository-name/delete.
:paramtype actions: list[str]
"""
super().__init__(**kwargs)
self.description = description
self.actions = actions
class Sku(_serialization.Model):
"""The SKU of a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The SKU name of the container registry. Required for registry creation. Required.
Known values are: "Classic", "Basic", "Standard", and "Premium".
:vartype name: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SkuName
:ivar tier: The SKU tier based on the SKU name. Known values are: "Classic", "Basic",
"Standard", and "Premium".
:vartype tier: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SkuTier
"""
_validation = {
"name": {"required": True},
"tier": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"tier": {"key": "tier", "type": "str"},
}
def __init__(self, *, name: Union[str, "_models.SkuName"], **kwargs: Any) -> None:
"""
:keyword name: The SKU name of the container registry. Required for registry creation.
Required. Known values are: "Classic", "Basic", "Standard", and "Premium".
:paramtype name: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SkuName
"""
super().__init__(**kwargs)
self.name = name
self.tier = None
class Source(_serialization.Model):
"""The registry node that generated the event. Put differently, while the actor initiates the
event, the source generates it.
:ivar addr: The IP or hostname and the port of the registry node that generated the event.
Generally, this will be resolved by os.Hostname() along with the running port.
:vartype addr: str
:ivar instance_id: The running instance of an application. Changes after each restart.
:vartype instance_id: str
"""
_attribute_map = {
"addr": {"key": "addr", "type": "str"},
"instance_id": {"key": "instanceID", "type": "str"},
}
def __init__(self, *, addr: Optional[str] = None, instance_id: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword addr: The IP or hostname and the port of the registry node that generated the event.
Generally, this will be resolved by os.Hostname() along with the running port.
:paramtype addr: str
:keyword instance_id: The running instance of an application. Changes after each restart.
:paramtype instance_id: str
"""
super().__init__(**kwargs)
self.addr = addr
self.instance_id = instance_id
class Status(_serialization.Model):
"""The status of an Azure resource at the time the operation was called.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar display_status: The short label for the status.
:vartype display_status: str
:ivar message: The detailed message for the status, including alerts and error messages.
:vartype message: str
:ivar timestamp: The timestamp when the status was changed to the current value.
:vartype timestamp: ~datetime.datetime
"""
_validation = {
"display_status": {"readonly": True},
"message": {"readonly": True},
"timestamp": {"readonly": True},
}
_attribute_map = {
"display_status": {"key": "displayStatus", "type": "str"},
"message": {"key": "message", "type": "str"},
"timestamp": {"key": "timestamp", "type": "iso-8601"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.display_status = None
self.message = None
self.timestamp = None
class StatusDetailProperties(_serialization.Model):
"""The status detail properties of the connected registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The component of the connected registry corresponding to the status.
:vartype type: str
:ivar code: The code of the status.
:vartype code: str
:ivar description: The description of the status.
:vartype description: str
:ivar timestamp: The timestamp of the status.
:vartype timestamp: ~datetime.datetime
:ivar correlation_id: The correlation ID of the status.
:vartype correlation_id: str
"""
_validation = {
"type": {"readonly": True},
"code": {"readonly": True},
"description": {"readonly": True},
"timestamp": {"readonly": True},
"correlation_id": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"code": {"key": "code", "type": "str"},
"description": {"key": "description", "type": "str"},
"timestamp": {"key": "timestamp", "type": "iso-8601"},
"correlation_id": {"key": "correlationId", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.type = None
self.code = None
self.description = None
self.timestamp = None
self.correlation_id = None
class SyncProperties(_serialization.Model):
"""The sync properties of the connected registry with its parent.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar token_id: The resource ID of the ACR token used to authenticate the connected registry to
its parent during sync. Required.
:vartype token_id: str
:ivar schedule: The cron expression indicating the schedule that the connected registry will
sync with its parent.
:vartype schedule: str
:ivar sync_window: The time window during which sync is enabled for each schedule occurrence.
Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:vartype sync_window: ~datetime.timedelta
:ivar message_ttl: The period of time for which a message is available to sync before it is
expired. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
Required.
:vartype message_ttl: ~datetime.timedelta
:ivar last_sync_time: The last time a sync occurred between the connected registry and its
parent.
:vartype last_sync_time: ~datetime.datetime
:ivar gateway_endpoint: The gateway endpoint used by the connected registry to communicate with
its parent.
:vartype gateway_endpoint: str
"""
_validation = {
"token_id": {"required": True},
"message_ttl": {"required": True},
"last_sync_time": {"readonly": True},
"gateway_endpoint": {"readonly": True},
}
_attribute_map = {
"token_id": {"key": "tokenId", "type": "str"},
"schedule": {"key": "schedule", "type": "str"},
"sync_window": {"key": "syncWindow", "type": "duration"},
"message_ttl": {"key": "messageTtl", "type": "duration"},
"last_sync_time": {"key": "lastSyncTime", "type": "iso-8601"},
"gateway_endpoint": {"key": "gatewayEndpoint", "type": "str"},
}
def __init__(
self,
*,
token_id: str,
message_ttl: datetime.timedelta,
schedule: Optional[str] = None,
sync_window: Optional[datetime.timedelta] = None,
**kwargs: Any
) -> None:
"""
:keyword token_id: The resource ID of the ACR token used to authenticate the connected registry
to its parent during sync. Required.
:paramtype token_id: str
:keyword schedule: The cron expression indicating the schedule that the connected registry will
sync with its parent.
:paramtype schedule: str
:keyword sync_window: The time window during which sync is enabled for each schedule
occurrence. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:paramtype sync_window: ~datetime.timedelta
:keyword message_ttl: The period of time for which a message is available to sync before it is
expired. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
Required.
:paramtype message_ttl: ~datetime.timedelta
"""
super().__init__(**kwargs)
self.token_id = token_id
self.schedule = schedule
self.sync_window = sync_window
self.message_ttl = message_ttl
self.last_sync_time = None
self.gateway_endpoint = None
class SyncUpdateProperties(_serialization.Model):
"""The parameters for updating the sync properties of the connected registry with its parent.
:ivar schedule: The cron expression indicating the schedule that the connected registry will
sync with its parent.
:vartype schedule: str
:ivar sync_window: The time window during which sync is enabled for each schedule occurrence.
Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:vartype sync_window: ~datetime.timedelta
:ivar message_ttl: The period of time for which a message is available to sync before it is
expired. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:vartype message_ttl: ~datetime.timedelta
"""
_attribute_map = {
"schedule": {"key": "schedule", "type": "str"},
"sync_window": {"key": "syncWindow", "type": "duration"},
"message_ttl": {"key": "messageTtl", "type": "duration"},
}
def __init__(
self,
*,
schedule: Optional[str] = None,
sync_window: Optional[datetime.timedelta] = None,
message_ttl: Optional[datetime.timedelta] = None,
**kwargs: Any
) -> None:
"""
:keyword schedule: The cron expression indicating the schedule that the connected registry will
sync with its parent.
:paramtype schedule: str
:keyword sync_window: The time window during which sync is enabled for each schedule
occurrence. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:paramtype sync_window: ~datetime.timedelta
:keyword message_ttl: The period of time for which a message is available to sync before it is
expired. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:paramtype message_ttl: ~datetime.timedelta
"""
super().__init__(**kwargs)
self.schedule = schedule
self.sync_window = sync_window
self.message_ttl = message_ttl
class SystemData(_serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:vartype created_by_type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Known values
are: "User", "Application", "ManagedIdentity", and "Key".
:vartype last_modified_by_type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.LastModifiedByType
:ivar last_modified_at: The timestamp of resource modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
"created_by": {"key": "createdBy", "type": "str"},
"created_by_type": {"key": "createdByType", "type": "str"},
"created_at": {"key": "createdAt", "type": "iso-8601"},
"last_modified_by": {"key": "lastModifiedBy", "type": "str"},
"last_modified_by_type": {"key": "lastModifiedByType", "type": "str"},
"last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "_models.LastModifiedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:paramtype created_by_type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Known
values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.LastModifiedByType
:keyword last_modified_at: The timestamp of resource modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super().__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class Target(_serialization.Model):
"""The target of the event.
:ivar media_type: The MIME type of the referenced object.
:vartype media_type: str
:ivar size: The number of bytes of the content. Same as Length field.
:vartype size: int
:ivar digest: The digest of the content, as defined by the Registry V2 HTTP API Specification.
:vartype digest: str
:ivar length: The number of bytes of the content. Same as Size field.
:vartype length: int
:ivar repository: The repository name.
:vartype repository: str
:ivar url: The direct URL to the content.
:vartype url: str
:ivar tag: The tag name.
:vartype tag: str
:ivar name: The name of the artifact.
:vartype name: str
:ivar version: The version of the artifact.
:vartype version: str
"""
_attribute_map = {
"media_type": {"key": "mediaType", "type": "str"},
"size": {"key": "size", "type": "int"},
"digest": {"key": "digest", "type": "str"},
"length": {"key": "length", "type": "int"},
"repository": {"key": "repository", "type": "str"},
"url": {"key": "url", "type": "str"},
"tag": {"key": "tag", "type": "str"},
"name": {"key": "name", "type": "str"},
"version": {"key": "version", "type": "str"},
}
def __init__(
self,
*,
media_type: Optional[str] = None,
size: Optional[int] = None,
digest: Optional[str] = None,
length: Optional[int] = None,
repository: Optional[str] = None,
url: Optional[str] = None,
tag: Optional[str] = None,
name: Optional[str] = None,
version: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword media_type: The MIME type of the referenced object.
:paramtype media_type: str
:keyword size: The number of bytes of the content. Same as Length field.
:paramtype size: int
:keyword digest: The digest of the content, as defined by the Registry V2 HTTP API
Specification.
:paramtype digest: str
:keyword length: The number of bytes of the content. Same as Size field.
:paramtype length: int
:keyword repository: The repository name.
:paramtype repository: str
:keyword url: The direct URL to the content.
:paramtype url: str
:keyword tag: The tag name.
:paramtype tag: str
:keyword name: The name of the artifact.
:paramtype name: str
:keyword version: The version of the artifact.
:paramtype version: str
"""
super().__init__(**kwargs)
self.media_type = media_type
self.size = size
self.digest = digest
self.length = length
self.repository = repository
self.url = url
self.tag = tag
self.name = name
self.version = version
class TlsCertificateProperties(_serialization.Model):
"""The TLS certificate properties of the connected registry login server.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of certificate location. "LocalDirectory"
:vartype type: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.CertificateType
:ivar location: Indicates the location of the certificates.
:vartype location: str
"""
_validation = {
"type": {"readonly": True},
"location": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.type = None
self.location = None
class TlsProperties(_serialization.Model):
"""The TLS properties of the connected registry login server.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: Indicates whether HTTPS is enabled for the login server. Known values are:
"Enabled" and "Disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TlsStatus
:ivar certificate: The certificate used to configure HTTPS for the login server.
:vartype certificate:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TlsCertificateProperties
"""
_validation = {
"status": {"readonly": True},
"certificate": {"readonly": True},
}
_attribute_map = {
"status": {"key": "status", "type": "str"},
"certificate": {"key": "certificate", "type": "TlsCertificateProperties"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.status = None
self.certificate = None
class Token(ProxyResource):
"""An object that represents a token for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar creation_date: The creation date of scope map.
:vartype creation_date: ~datetime.datetime
:ivar provisioning_state: Provisioning state of the resource. Known values are: "Creating",
"Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
:ivar scope_map_id: The resource ID of the scope map to which the token will be associated
with.
:vartype scope_map_id: str
:ivar credentials: The credentials that can be used for authenticating the token.
:vartype credentials:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCredentialsProperties
:ivar status: The status of the token example enabled or disabled. Known values are: "enabled"
and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenStatus
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"creation_date": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"creation_date": {"key": "properties.creationDate", "type": "iso-8601"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"scope_map_id": {"key": "properties.scopeMapId", "type": "str"},
"credentials": {"key": "properties.credentials", "type": "TokenCredentialsProperties"},
"status": {"key": "properties.status", "type": "str"},
}
def __init__(
self,
*,
scope_map_id: Optional[str] = None,
credentials: Optional["_models.TokenCredentialsProperties"] = None,
status: Optional[Union[str, "_models.TokenStatus"]] = None,
**kwargs: Any
) -> None:
"""
:keyword scope_map_id: The resource ID of the scope map to which the token will be associated
with.
:paramtype scope_map_id: str
:keyword credentials: The credentials that can be used for authenticating the token.
:paramtype credentials:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCredentialsProperties
:keyword status: The status of the token example enabled or disabled. Known values are:
"enabled" and "disabled".
:paramtype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenStatus
"""
super().__init__(**kwargs)
self.creation_date = None
self.provisioning_state = None
self.scope_map_id = scope_map_id
self.credentials = credentials
self.status = status
class TokenCertificate(_serialization.Model):
"""The properties of a certificate used for authenticating a token.
:ivar name: Known values are: "certificate1" and "certificate2".
:vartype name: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCertificateName
:ivar expiry: The expiry datetime of the certificate.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The thumbprint of the certificate.
:vartype thumbprint: str
:ivar encoded_pem_certificate: Base 64 encoded string of the public certificate1 in PEM format
that will be used for authenticating the token.
:vartype encoded_pem_certificate: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"expiry": {"key": "expiry", "type": "iso-8601"},
"thumbprint": {"key": "thumbprint", "type": "str"},
"encoded_pem_certificate": {"key": "encodedPemCertificate", "type": "str"},
}
def __init__(
self,
*,
name: Optional[Union[str, "_models.TokenCertificateName"]] = None,
expiry: Optional[datetime.datetime] = None,
thumbprint: Optional[str] = None,
encoded_pem_certificate: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Known values are: "certificate1" and "certificate2".
:paramtype name: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCertificateName
:keyword expiry: The expiry datetime of the certificate.
:paramtype expiry: ~datetime.datetime
:keyword thumbprint: The thumbprint of the certificate.
:paramtype thumbprint: str
:keyword encoded_pem_certificate: Base 64 encoded string of the public certificate1 in PEM
format that will be used for authenticating the token.
:paramtype encoded_pem_certificate: str
"""
super().__init__(**kwargs)
self.name = name
self.expiry = expiry
self.thumbprint = thumbprint
self.encoded_pem_certificate = encoded_pem_certificate
class TokenCredentialsProperties(_serialization.Model):
"""The properties of the credentials that can be used for authenticating the token.
:ivar certificates:
:vartype certificates:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCertificate]
:ivar passwords:
:vartype passwords:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPassword]
"""
_attribute_map = {
"certificates": {"key": "certificates", "type": "[TokenCertificate]"},
"passwords": {"key": "passwords", "type": "[TokenPassword]"},
}
def __init__(
self,
*,
certificates: Optional[List["_models.TokenCertificate"]] = None,
passwords: Optional[List["_models.TokenPassword"]] = None,
**kwargs: Any
) -> None:
"""
:keyword certificates:
:paramtype certificates:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCertificate]
:keyword passwords:
:paramtype passwords:
list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPassword]
"""
super().__init__(**kwargs)
self.certificates = certificates
self.passwords = passwords
class TokenListResult(_serialization.Model):
"""The result of a request to list tokens for a container registry.
:ivar value: The list of tokens. Since this list may be incomplete, the nextLink field should
be used to request the next list of tokens.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Token]
:ivar next_link: The URI that can be used to request the next list of tokens.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Token]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Token"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of tokens. Since this list may be incomplete, the nextLink field
should be used to request the next list of tokens.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Token]
:keyword next_link: The URI that can be used to request the next list of tokens.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class TokenPassword(_serialization.Model):
"""The password that will be used for authenticating the token of a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar creation_time: The creation datetime of the password.
:vartype creation_time: ~datetime.datetime
:ivar expiry: The expiry datetime of the password.
:vartype expiry: ~datetime.datetime
:ivar name: The password name "password1" or "password2". Known values are: "password1" and
"password2".
:vartype name: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPasswordName
:ivar value: The password value.
:vartype value: str
"""
_validation = {
"value": {"readonly": True},
}
_attribute_map = {
"creation_time": {"key": "creationTime", "type": "iso-8601"},
"expiry": {"key": "expiry", "type": "iso-8601"},
"name": {"key": "name", "type": "str"},
"value": {"key": "value", "type": "str"},
}
def __init__(
self,
*,
creation_time: Optional[datetime.datetime] = None,
expiry: Optional[datetime.datetime] = None,
name: Optional[Union[str, "_models.TokenPasswordName"]] = None,
**kwargs: Any
) -> None:
"""
:keyword creation_time: The creation datetime of the password.
:paramtype creation_time: ~datetime.datetime
:keyword expiry: The expiry datetime of the password.
:paramtype expiry: ~datetime.datetime
:keyword name: The password name "password1" or "password2". Known values are: "password1" and
"password2".
:paramtype name: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenPasswordName
"""
super().__init__(**kwargs)
self.creation_time = creation_time
self.expiry = expiry
self.name = name
self.value = None
class TokenUpdateParameters(_serialization.Model):
"""The parameters for updating a token.
:ivar scope_map_id: The resource ID of the scope map to which the token will be associated
with.
:vartype scope_map_id: str
:ivar status: The status of the token example enabled or disabled. Known values are: "enabled"
and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenStatus
:ivar credentials: The credentials that can be used for authenticating the token.
:vartype credentials:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCredentialsProperties
"""
_attribute_map = {
"scope_map_id": {"key": "properties.scopeMapId", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"credentials": {"key": "properties.credentials", "type": "TokenCredentialsProperties"},
}
def __init__(
self,
*,
scope_map_id: Optional[str] = None,
status: Optional[Union[str, "_models.TokenStatus"]] = None,
credentials: Optional["_models.TokenCredentialsProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope_map_id: The resource ID of the scope map to which the token will be associated
with.
:paramtype scope_map_id: str
:keyword status: The status of the token example enabled or disabled. Known values are:
"enabled" and "disabled".
:paramtype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenStatus
:keyword credentials: The credentials that can be used for authenticating the token.
:paramtype credentials:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TokenCredentialsProperties
"""
super().__init__(**kwargs)
self.scope_map_id = scope_map_id
self.status = status
self.credentials = credentials
class TrustPolicy(_serialization.Model):
"""The content trust policy for a container registry.
:ivar type: The type of trust policy. "Notary"
:vartype type: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.TrustPolicyType
:ivar status: The value that indicates whether the policy is enabled or not. Known values are:
"enabled" and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PolicyStatus
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
type: Union[str, "_models.TrustPolicyType"] = "Notary",
status: Optional[Union[str, "_models.PolicyStatus"]] = None,
**kwargs: Any
) -> None:
"""
:keyword type: The type of trust policy. "Notary"
:paramtype type: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.TrustPolicyType
:keyword status: The value that indicates whether the policy is enabled or not. Known values
are: "enabled" and "disabled".
:paramtype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PolicyStatus
"""
super().__init__(**kwargs)
self.type = type
self.status = status
class UserIdentityProperties(_serialization.Model):
"""UserIdentityProperties.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
}
def __init__(self, *, principal_id: Optional[str] = None, client_id: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword principal_id: The principal id of user assigned identity.
:paramtype principal_id: str
:keyword client_id: The client id of user assigned identity.
:paramtype client_id: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.client_id = client_id
class VirtualNetworkRule(_serialization.Model):
"""Virtual network rule.
All required parameters must be populated in order to send to Azure.
:ivar action: The action of virtual network rule. "Allow"
:vartype action: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Action
:ivar virtual_network_resource_id: Resource ID of a subnet, for example:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
Required.
:vartype virtual_network_resource_id: str
"""
_validation = {
"virtual_network_resource_id": {"required": True},
}
_attribute_map = {
"action": {"key": "action", "type": "str"},
"virtual_network_resource_id": {"key": "id", "type": "str"},
}
def __init__(
self, *, virtual_network_resource_id: str, action: Optional[Union[str, "_models.Action"]] = None, **kwargs: Any
) -> None:
"""
:keyword action: The action of virtual network rule. "Allow"
:paramtype action: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Action
:keyword virtual_network_resource_id: Resource ID of a subnet, for example:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
Required.
:paramtype virtual_network_resource_id: str
"""
super().__init__(**kwargs)
self.action = action
self.virtual_network_resource_id = virtual_network_resource_id
class Webhook(Resource):
"""An object that represents a webhook for a container registry.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar location: The location of the resource. This cannot be changed after the resource is
created. Required.
:vartype location: str
:ivar tags: The tags of the resource.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.SystemData
:ivar status: The status of the webhook at the time the operation was called. Known values are:
"enabled" and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookStatus
:ivar scope: The scope of repositories where the event can be triggered. For example, 'foo:*'
means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only.
'foo' is equivalent to 'foo:latest'. Empty means all events.
:vartype scope: str
:ivar actions: The list of actions that trigger the webhook to post notifications.
:vartype actions: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookAction]
:ivar provisioning_state: The provisioning state of the webhook at the time the operation was
called. Known values are: "Creating", "Updating", "Deleting", "Succeeded", "Failed", and
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ProvisioningState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
"system_data": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"status": {"key": "properties.status", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"actions": {"key": "properties.actions", "type": "[str]"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
status: Optional[Union[str, "_models.WebhookStatus"]] = None,
scope: Optional[str] = None,
actions: Optional[List[Union[str, "_models.WebhookAction"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The location of the resource. This cannot be changed after the resource is
created. Required.
:paramtype location: str
:keyword tags: The tags of the resource.
:paramtype tags: dict[str, str]
:keyword status: The status of the webhook at the time the operation was called. Known values
are: "enabled" and "disabled".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookStatus
:keyword scope: The scope of repositories where the event can be triggered. For example,
'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar'
only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
:paramtype scope: str
:keyword actions: The list of actions that trigger the webhook to post notifications.
:paramtype actions: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookAction]
"""
super().__init__(location=location, tags=tags, **kwargs)
self.status = status
self.scope = scope
self.actions = actions
self.provisioning_state = None
class WebhookCreateParameters(_serialization.Model):
"""The parameters for creating a webhook.
All required parameters must be populated in order to send to Azure.
:ivar tags: The tags for the webhook.
:vartype tags: dict[str, str]
:ivar location: The location of the webhook. This cannot be changed after the resource is
created. Required.
:vartype location: str
:ivar service_uri: The service URI for the webhook to post notifications.
:vartype service_uri: str
:ivar custom_headers: Custom headers that will be added to the webhook notifications.
:vartype custom_headers: dict[str, str]
:ivar status: The status of the webhook at the time the operation was called. Known values are:
"enabled" and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookStatus
:ivar scope: The scope of repositories where the event can be triggered. For example, 'foo:*'
means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only.
'foo' is equivalent to 'foo:latest'. Empty means all events.
:vartype scope: str
:ivar actions: The list of actions that trigger the webhook to post notifications.
:vartype actions: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookAction]
"""
_validation = {
"location": {"required": True},
}
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
"service_uri": {"key": "properties.serviceUri", "type": "str"},
"custom_headers": {"key": "properties.customHeaders", "type": "{str}"},
"status": {"key": "properties.status", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"actions": {"key": "properties.actions", "type": "[str]"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
service_uri: Optional[str] = None,
custom_headers: Optional[Dict[str, str]] = None,
status: Optional[Union[str, "_models.WebhookStatus"]] = None,
scope: Optional[str] = None,
actions: Optional[List[Union[str, "_models.WebhookAction"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: The tags for the webhook.
:paramtype tags: dict[str, str]
:keyword location: The location of the webhook. This cannot be changed after the resource is
created. Required.
:paramtype location: str
:keyword service_uri: The service URI for the webhook to post notifications.
:paramtype service_uri: str
:keyword custom_headers: Custom headers that will be added to the webhook notifications.
:paramtype custom_headers: dict[str, str]
:keyword status: The status of the webhook at the time the operation was called. Known values
are: "enabled" and "disabled".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookStatus
:keyword scope: The scope of repositories where the event can be triggered. For example,
'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar'
only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
:paramtype scope: str
:keyword actions: The list of actions that trigger the webhook to post notifications.
:paramtype actions: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookAction]
"""
super().__init__(**kwargs)
self.tags = tags
self.location = location
self.service_uri = service_uri
self.custom_headers = custom_headers
self.status = status
self.scope = scope
self.actions = actions
class WebhookListResult(_serialization.Model):
"""The result of a request to list webhooks for a container registry.
:ivar value: The list of webhooks. Since this list may be incomplete, the nextLink field should
be used to request the next list of webhooks.
:vartype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Webhook]
:ivar next_link: The URI that can be used to request the next list of webhooks.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Webhook]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Webhook"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of webhooks. Since this list may be incomplete, the nextLink field
should be used to request the next list of webhooks.
:paramtype value: list[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Webhook]
:keyword next_link: The URI that can be used to request the next list of webhooks.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class WebhookUpdateParameters(_serialization.Model):
"""The parameters for updating a webhook.
:ivar tags: The tags for the webhook.
:vartype tags: dict[str, str]
:ivar service_uri: The service URI for the webhook to post notifications.
:vartype service_uri: str
:ivar custom_headers: Custom headers that will be added to the webhook notifications.
:vartype custom_headers: dict[str, str]
:ivar status: The status of the webhook at the time the operation was called. Known values are:
"enabled" and "disabled".
:vartype status: str or ~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookStatus
:ivar scope: The scope of repositories where the event can be triggered. For example, 'foo:*'
means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only.
'foo' is equivalent to 'foo:latest'. Empty means all events.
:vartype scope: str
:ivar actions: The list of actions that trigger the webhook to post notifications.
:vartype actions: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookAction]
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"service_uri": {"key": "properties.serviceUri", "type": "str"},
"custom_headers": {"key": "properties.customHeaders", "type": "{str}"},
"status": {"key": "properties.status", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"actions": {"key": "properties.actions", "type": "[str]"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
service_uri: Optional[str] = None,
custom_headers: Optional[Dict[str, str]] = None,
status: Optional[Union[str, "_models.WebhookStatus"]] = None,
scope: Optional[str] = None,
actions: Optional[List[Union[str, "_models.WebhookAction"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: The tags for the webhook.
:paramtype tags: dict[str, str]
:keyword service_uri: The service URI for the webhook to post notifications.
:paramtype service_uri: str
:keyword custom_headers: Custom headers that will be added to the webhook notifications.
:paramtype custom_headers: dict[str, str]
:keyword status: The status of the webhook at the time the operation was called. Known values
are: "enabled" and "disabled".
:paramtype status: str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookStatus
:keyword scope: The scope of repositories where the event can be triggered. For example,
'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar'
only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
:paramtype scope: str
:keyword actions: The list of actions that trigger the webhook to post notifications.
:paramtype actions: list[str or
~azure.mgmt.containerregistry.v2021_06_01_preview.models.WebhookAction]
"""
super().__init__(**kwargs)
self.tags = tags
self.service_uri = service_uri
self.custom_headers = custom_headers
self.status = status
self.scope = scope
self.actions = actions
| [
"noreply@github.com"
] | noreply@github.com |
a653ca06c4441385d27dfd1f9ef0fa271a74121a | 0c06a49909784c060db3522d3ab26e18429ae685 | /003-discrete_sine_transform.py | 20f7bf697f17e3ce6f312f48bfce2392223e36d5 | [] | no_license | TheBlueChameleon/Py_Advanced_Codes | 5ba97cc24de409a4ad6c80df6949cab7b4eb9df3 | 98247be007cbfcb9f6ea2a7e2d905cf4e0aeb3ec | refs/heads/main | 2023-06-09T16:15:52.852921 | 2021-06-19T08:49:14 | 2021-06-19T08:49:14 | 338,657,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | import scipy
import numpy as np
import matplotlib.pyplot as plt
N = 6280
P = 4
X = np.linspace(0, P * np.pi, N)
Y = np.sin(np.pi * X) + .3 * np.sin(5 * np.pi * X)
A = scipy.fft.dst(Y, norm="ortho")
W = [k / (P * np.pi) for k in range(len(A))]
spectrumLimit = 30
fig = plt.figure( figsize=(5, 10) )
drw = fig.add_subplot(411)
drw.set_xlabel("time t")
drw.set_ylabel("signal intensity")
drw.plot(X, Y)
drw = fig.add_subplot(412)
drw.set_xlabel("angular frequency $\omega$")
drw.set_ylabel("signal amplitude")
drw.plot(W[:spectrumLimit], A[:spectrumLimit])
A[30:] = 0
reconstruction = scipy.fft.idst(A, norm="ortho")
drw = fig.add_subplot(413)
drw.set_xlabel("angular frequency $\omega$")
drw.set_ylabel("signal amplitude")
drw.plot(W[:spectrumLimit], A[:spectrumLimit])
drw = fig.add_subplot(414)
drw.set_xlabel("time t")
drw.set_ylabel("signal intensity")
drw.plot(X, reconstruction)
plt.show()
| [
"stefan_hartinger@gmx.de"
] | stefan_hartinger@gmx.de |
89d3186396baa0f5e7a3782aa285ac0dbd56d027 | 9a1a0b47b59e55e3f2043ad32d5d58455e69425d | /0708/repeticiones/ej113.py | 896abdd0ecd8dce9430c5332c13b4b92e6b06d17 | [] | no_license | piranna/asi-iesenlaces | dcabc0213791c04a8b6b4ccb850d5bda78292ae1 | cf35cbea732065e09a58604a93538a9b9dca875f | refs/heads/master | 2016-08-06T05:09:44.270637 | 2008-06-15T09:00:27 | 2008-06-15T09:00:27 | 32,416,588 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 397 | py | # -*- coding: cp1252 -*-
"""
$Id$
Ej 113
Pide un texto que no tenga mayúsculas. Si hay una letra mayúscula,
vuelve a pedir el texto
"""
texto = raw_input("Introduce una frase toda en minúsculas: ")
while not texto.islower():
# vuelve a pedir el texto si hay alguna mayúscula
texto = raw_input("Introduce una frase toda en minúsculas: ")
print "Texto introducido", texto
| [
"morillas@f86dea77-7e2e-0410-97ea-a74e350978e6"
] | morillas@f86dea77-7e2e-0410-97ea-a74e350978e6 |
9146fd25e657112193413c9c94cfc2f46fec9414 | 110d75e6936c551c38668856656bcae7e9af5d08 | /evstats.py | 3c8abd781f935d6167796c3a82cbbf8c979640b7 | [
"MIT"
] | permissive | claireshort4/OEvent | fe463f60114ba44788dfa8d838e69bbdd3b0561a | 51cc9b4dab7e96160a31b9b451cf9c32bd014271 | refs/heads/master | 2023-03-22T15:40:41.228570 | 2021-03-12T21:07:25 | 2021-03-12T21:07:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import numpy as np
# get coefficient of variation squared; (< 1 means rhythmic; 1=Poisson, > 1 for bursty)
def getCV2 (isi): return (np.std(isi)/np.mean(isi))**2
# get local variation
# < 1 for regular/rhythmic, == 1 for Poisson, > 1 for bursty
# based on Shinomoto et al 2005
def getLV (isi):
s = 0.0
if len(isi) < 2: return 0.0
for i in range(len(isi)-2):
n = (isi[i]-isi[i+1])**2
d = (isi[i]+isi[i+1])**2
if d > 0.:
s += 3.0*n/d
return s / (len(isi)-1)
# get the fano factor; llevent is list of list of events
def getFF (lcount):
#lcount = [len(levent) for levent in llevent]
avg = np.mean(lcount)
if avg > 0. and len(lcount) > 1:
return np.std(lcount)**2 / avg
return 0.0
| [
"samnemo@gmail.com"
] | samnemo@gmail.com |
cecdcb7982425da46c4c32df796eb8ef8076c3f2 | e098e0498f61d0d9345b8ef711de7a43c423f2d9 | /app.py | 505ae03f82981ebceb9083341cae7fafd8ea6959 | [] | no_license | byronvickers/tapioca | e24fc07ea908a9b7fef2f04e59d71e2b304bd933 | e73f9d64e9cd5982b80ae20d64df6f53b3ab4662 | refs/heads/master | 2020-07-30T10:16:56.967964 | 2016-11-13T17:26:50 | 2016-11-13T17:26:50 | 73,630,993 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,531 | py | # tmux attach
# pip install flask
# export FLASK_APP=app.py
# flask run --host=0.0.0.0
from flask import Flask, request, redirect, url_for, render_template, send_from_directory
import shutil, subprocess, logging
from itertools import product
from werkzeug.utils import secure_filename
import os, urllib
import requests
import commands
STYPATH = "/home/ubuntu/fast-neural-style/models/instance_norm"
FNSPATH = "/home/ubuntu/fast-neural-style"
IMGPATH = "/home/ubuntu/fast-neural-style/out.png"
STATICPATH = "static/out.png"
TESTPATH = "/home/ubuntu/fast-neural-style/test.out.png"
TESTSTATICPATH = "static/test.out.png"
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
UPLOAD_FOLDER = '/home/ubuntu/flask-tmp/uploads'
APPLY_FILTERS_PATH = '/home/ubuntu/run_all_filters.py'
EMILY_FOLDER = '/home/ubuntu/flask-tmp/templates/emily'
### App
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['EMILY_FOLDER'] = EMILY_FOLDER
print ("UPSTYLE")
### Logging
logger = logging.getLogger('werkzeug')
handler = logging.FileHandler('access.log')
logger.addHandler(handler)
# Also add the handler to Flask's logger for cases
# where Werkzeug isn't used as the underlying WSGI server.
app.logger.addHandler(handler)
### Cache control
@app.after_request
def add_header(response):
response.cache_control.no_store = True
response.cache_control.no_cache = True
return response
### App routes
@app.route("/")
def index():
with open("templates/index.html") as f:
content = f.read()
return content
@app.route("/combine/")
def combine():
return redirect(url_for('index'))
src = request.args.get('img', 'chicago.jpg')
sty = request.args.get('sty', 'mosaic.t7')
shellargs = ["th", "fast_neural_style.lua",
"-model", os.path.join(STYPATH,sty),
"-input_image", os.path.join(app.config['UPLOAD_FOLDER'], src),
"-output_image", "out.png",
"-gpu", "0"]
ret = subprocess.call(shellargs, cwd = FNSPATH)
shutil.copy(IMGPATH, STATICPATH)
return redirect(url_for('index'))
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=False)
@app.route('/emily/<path:path>')
def serve_emily(path):
return send_from_directory(app.config['EMILY_FOLDER'],
path, as_attachment=False)
@app.route("/test/")
def test():
shutil.copy(TESTPATH, TESTSTATICPATH)
return "<img src='/static/test.out.png'/>"
@app.route("/ui/")
def emily():
with open("templates/emily/index.html") as f:
content = f.read()
return content
@app.route("/fbtest/")
def fbtest():
with open("templates/fbtest.html") as f:
content = f.read()
return content
### Uploads
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('index'))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="/upload" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/urlload')
def load_url():
url = request.args.get('url', None)
uid = request.args.get('userid', None)
if url is None or uid is None:
return "Pass a url and uid parameter"
resp = requests.get(url)
img = resp.content
filename = "raw.png"
freldir = os.path.join(uid, filename)
try:
os.makedirs(os.path.join(app.config['UPLOAD_FOLDER'], uid))
except OSError:
return "Already exists"
with open(os.path.join(app.config['UPLOAD_FOLDER'], freldir), 'wb') as f:
f.write(img)
command = '{} {}'.format(APPLY_FILTERS_PATH, uid)
commands.getstatusoutput(command)
return "Success"
if __name__ == "__main__":
app.run()
| [
"ubuntu@ip-172-31-45-111.ec2.internal"
] | ubuntu@ip-172-31-45-111.ec2.internal |
f118e5471d0b71f954e6bd4e04a5883209a3e328 | 2466d0c361562f51169dd2b101cf2abd4fb4197f | /210127_test.py | 1ebc6541e933f1e8210068cb455d27b02f2d7546 | [] | no_license | SayaTakeuchi1010/refl1d | 360cff173eae87ae635e105cadc0b4ab05a618bb | f10762ce2d16ac6d2baa4a31b6e64505cbadf28c | refs/heads/master | 2023-05-01T13:19:58.707811 | 2021-05-21T22:46:19 | 2021-05-21T22:46:19 | 363,268,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | from refl1d.names import*
from numpy import mod, exp, arange, linspace
import math
Probe.view = 'log' #log plot
data_file_n ='C:/Users/saya6/Documents/NCNR/test/TiOx_2.3_trunc.refl'
instrument = NCNR.XRay(wavelength=1.54, Tlo = 1.4975, slits_at_Tlo=0.01, slits_above = 0.01)
probe = instrument.load('C:/Users/saya6/Documents/NCNR/test/TiOx_2.3_trunc.refl',back_reflectivity=False)
Si=SLD('Silicon', rho=20.071, irho= 0.458)
SiO2=SLD('SiO2', rho=18.831, irho=0.2440)
TiO2=SLD('TiO2', rho=30.799, irho=1.557)
TiO2_1=SLD('TiO2_1', rho=30.799, irho=1.557)
gas=SLD('Ambient',rho=0,irho=0)
Si_Thickness = Parameter(name='Si_Thickness', value= 50.0)
#value has to be float?
SiO2_Thickness=Parameter(name='SiO2_Thickness', value = 0.10)
TiO2_Thickness=Parameter(name='TiO2_Thickness', value=650.0)
TiO2_1_Thickness=Parameter(name='TiO2_1_Thickness', value=100.0)
Si_FracWidth=Parameter(name='Si:SiO2(W/T)', value=0.10)
SiO2_FracWidth = Parameter(name='SiO2:TiO2(W/T)', value=0.10)
TiO2_FracWidth=Parameter(name='TiO2:TiO2_1(W/T)', value = 0.10)
TiO2_1_FracWidth=Parameter(name='TiO2_1:gas(W/T)', value=0.10)
Si_FracWidth.range(0.0, 0.55) # Ptints 'Parameter(Si:SiO2(W/T))'
# use fraction 55% thickness
SiO2_FracWidth.range(0.0, 0.55) # Ptints Parameter(SiO2:TiO2(W/T))
TiO2_FracWidth.range(0.0, 0.55) # Prints Parameter(TiO2:TiO2_1(W/T))
TiO2_1_FracWidth.range(0.0, 0.55) # Prints Parameter(TiO2_1:gas(W/T))
T_eff0 = Parameter(name='Effective Thickness Si:SiO2', value = 50.0)
T_eff0=pow((pow(Si_Thickness, -4.0)+pow(SiO2_Thickness, -4.0)), -0.25)
T_eff1=Parameter(name='Effective Thickness SiO2:TiO2', value=50.0)
T_ef1=pow((pow(SiO2_Thickness, -4.0)+pow(TiO2_Thickness, -4.0)), -0.25)
T_eff1=pow((pow(SiO2_Thickness, -4.0)+pow(TiO2_Thickness, -4.0)), -0.25)
T_eff2=Parameter(name='Effective Thickness TiO2:TiO2_1', value=50.0)
T_eff2=pow((pow(TiO2_Thickness, -4.0)+pow(TiO2_1_Thickness, -4.0)), -9.23)
Si_SiO2 = (Si_FracWidth)*(T_eff0)
SiO2_TiO2 = (SiO2_FracWidth)*(T_eff1)
TiO2_TiO2_1=(TiO2_FracWidth)*(T_eff2)
TiO2_1_gas=(TiO2_1_FracWidth)*(TiO2_1_Thickness)
sample=Si(50.0, Si_SiO2)|SiO2(SiO2_Thickness, SiO2_TiO2)|TiO2(TiO2_Thickness,TiO2_TiO2_1)|TiO2_1(TiO2_1_Thickness, TiO2_1_gas)|gas(0.0, 0.0)
# 50A semi infinate,
TiO2.rho.range(20, 38) # prints Parameter(TiO2 rho)
TiO2_1.rho.range(20, 38) # prints Parameter(TiO2_1 rho) SLD 10-6 A^-2
SiO2_Thickness.range(0, 100) # prints Parameter(SiO2_Thickness) A
TiO2_Thickness.range(0, 650) # prints Parameter(TiO2_Thickness)
TiO2_1_Thickness.range(0, 200) # prints Parameter(TiO2_1_Thickness)
theta_offset=Parameter(name='Theta_Offset', value=0.0) # keep, fixed at 0
step = False
intensity=Parameter(name='Intensity', value = 0.9509)
intensity.range(0.9, 1.5)
probe.intensity= intensity
background=Parameter(name='Background', value=1e-10)
background.range(0, 1e-7)
probe.background=background
M=Experiment(probe=probe, sample=sample)
problem=FitProblem(M)
print("end") | [
"saya.takeuchi@nist.gov"
] | saya.takeuchi@nist.gov |
4c2902a924d3a9a14d643a543c10bb7afec70217 | e043f008aaec14e006051c7609748729a78bef90 | /tests/test_falconparser.py | e56ce14595d4071e3f0f9eb1c42050f17872770b | [
"MIT"
] | permissive | DamianHeard/webargs | 723f38789ae4be61247da2a94ab590e11c808da7 | 4bba0bb4ca7bef3d0c53fab8f9af632e9653b2ed | refs/heads/dev | 2021-01-18T00:25:58.792302 | 2015-11-09T02:07:52 | 2015-11-09T02:07:52 | 45,103,859 | 0 | 0 | null | 2015-10-28T10:05:39 | 2015-10-28T10:05:39 | null | UTF-8 | Python | false | false | 4,147 | py | # -*- coding: utf-8 -*-
import json
import pytest
import falcon
import webtest
from webargs import fields
from webargs.falconparser import parser, use_args, use_kwargs
def use_args_hook(args, context_key='args', **kwargs):
def hook(req, resp, params):
parsed_args = parser.parse(args, req=req, **kwargs)
req.context[context_key] = parsed_args
return hook
@pytest.fixture()
def api():
api_ = falcon.API()
hello_args = {
'name': fields.Str(required=True)
}
class ParseResource(object):
def on_get(self, req, resp):
args = parser.parse(hello_args, req=req, locations=('query', 'headers', 'cookies'))
resp.body = json.dumps(args)
def on_post(self, req, resp):
args = parser.parse(hello_args, req=req, locations=('form', ))
resp.body = json.dumps(args)
def on_put(self, req, resp):
args = parser.parse(hello_args, req=req, locations=('json', ))
resp.body = json.dumps(args)
class UseArgsResource(object):
@use_args(hello_args)
def on_get(self, req, resp, args):
resp.body = json.dumps(args)
class UseArgsWithParamResource(object):
@use_args(hello_args)
def on_get(self, req, resp, args, _id):
args['_id'] = int(_id)
resp.body = json.dumps(args)
class UseKwargsResource(object):
@use_kwargs(hello_args)
def on_get(self, req, resp, name):
resp.body = json.dumps({'name': name})
class AlwaysErrorResource(object):
args = {'bad': fields.Field(validate=lambda x: False)}
def on_get(self, req, resp):
parser.parse(self.args, req=req)
@falcon.before(use_args_hook(hello_args))
class HookResource(object):
def on_get(self, req, resp):
resp.body(req.context['args'])
api_.add_route('/parse', ParseResource())
api_.add_route('/use_args', UseArgsResource())
api_.add_route('/use_args_with_param/{_id}', UseArgsWithParamResource())
api_.add_route('/use_kwargs', UseKwargsResource())
api_.add_route('/hook', UseKwargsResource())
api_.add_route('/error', AlwaysErrorResource())
return api_
@pytest.fixture()
def testapp(api):
return webtest.TestApp(api)
class TestParseResource:
url = '/parse'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
def test_parse_form(self, testapp):
res = testapp.post(self.url, {'name': 'Fred'})
assert res.json == {'name': 'Fred'}
def test_parse_json(self, testapp):
res = testapp.put_json(self.url, {'name': 'Fred'})
assert res.json == {'name': 'Fred'}
def test_parse_headers(self, testapp):
res = testapp.get(self.url, headers={'name': 'Fred'})
assert res.json == {'name': 'Fred'}
def test_parsing_cookies(self, testapp):
testapp.set_cookie('name', 'Fred')
assert testapp.get(self.url).json == {'name': 'Fred'}
class TestErrorHandler:
url = '/error'
def test_error_handler_returns_422_response(self, testapp):
res = testapp.get(self.url + '?bad=42', expect_errors=True)
assert res.status_code == 422
assert 'errors' in res.json
assert 'bad' in res.json['errors']
assert res.json['errors']['bad'] == ['Invalid value.']
class TestUseArgsResource:
url = '/use_args'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
class TestUseArgsWithParamResource:
url = '/use_args_with_param/42'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred', '_id': 42}
class TestUseKwargsResource:
url = '/use_kwargs'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
class TestHookResource:
url = '/hook'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
| [
"sloria1@gmail.com"
] | sloria1@gmail.com |
a98c79f72d9b23ba1ca4450b0123d1e96e092bd4 | 70d7d4d8168233a0ab12fb5708412dd01c4487fe | /__str___ and __repr__/__str__test.py | 89ba75a1d73d11ab927a3a2c234f20b853da4b5a | [] | no_license | spetum/python_practice | 312134eadeb18ae8ffa5b592f1f7e9781c613efb | bccfbd2123dc866719f00603ccdbadc2b1920b67 | refs/heads/master | 2020-07-01T18:41:29.900282 | 2020-01-18T16:27:49 | 2020-01-18T16:27:49 | 201,259,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | class Car:
def __init__(self, color, mileage):
self.color = color
self.mileage = mileage
def __str__ (self):
return f'Color of car is {self.color} and has {self.mileage} miles.'.format(slef=self)
if __name__ == '__main__' :
my_car = Car('red', 3213)
print(my_car)
| [
"aesthetictask@hotmail.com"
] | aesthetictask@hotmail.com |
1ec0b0fa3ca62cec709674e27863288278930c91 | 2cc18381baf1cb13cd7cb2b20f9241b81775cf66 | /join.py | 91276675dcb26a09bb1c3aef11ed4f65d22baaa0 | [] | no_license | liza3641/PokemonGameClient | 226dafe51f5621770eb3ed2d7b4a6b3a4d5be0be | 0567c4ae469f5d2290374d1c300ab52bf6cd4ac0 | refs/heads/main | 2023-02-01T05:00:52.242579 | 2020-12-17T16:26:51 | 2020-12-17T16:26:51 | 318,370,820 | 0 | 0 | null | 2020-12-04T01:44:22 | 2020-12-04T01:44:21 | null | UTF-8 | Python | false | false | 3,856 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
from tkinter import *
import client_server
import random
import os
def join_main():
root = Toplevel()
root.title("JOIN") # 제목
root.geometry("640x480") # 가로 x 세로
root.resizable(0, 0) # 크기 변경 불가
currunt_path = os.path.dirname(__file__) # 현재 파일의 위치 반환
image_path = os.path.join(currunt_path, "image") # inages 폴더 위치 반환
# 배경화면 지정
image = PhotoImage(file= image_path + "\\map\\po.PNG")
label110 = Label(root, image=image)
label110.pack()
# 비밀번호 Page text
label1 = Label(root, text="아이디:5~20자, 비밀번호:8~16자")
label1.pack()
label1.place(x=236, y=450 )# 위치 지정
# ID :
label11 = Label(root, text = "ID : ") # 라벨 위젯
label11.pack()
label11.place(x=200, y=350) # 위치 지정
# PW :
label12 = Label(root, text = "PW : ") # 라벨 위젯
label12.pack()
label12.place(x=193, y=370) # 위치 지정
# PW OK :
label13 = Label(root, text = "PW OK : ") # 라벨 위젯
label13.pack()
label13.place(x=172, y=390) # 위치 지정
# ID를 입력하는 입력 박스 세팅
txt1 = Entry(root, width=30)
txt1.pack()
txt1.place(x=230, y=350) # 위치 지정
txt1.insert(0, "")
# PW를 입력하는 입력 박스 세팅
e2 = Entry(root, width=30) # 줄바꿈 X
e2.pack(pady=3)
e2.place(x=230, y=370) # 위치 지정
e2.insert(0, "")
# PW OK를 입력하는 입력 박스 세팅
e3 = Entry(root, width=30) # 줄바꿈 X
e3.pack(pady=3)
e3.place(x=230, y=390) # 위치 지정
e3.insert(0, "")
# 회원가입창 함수 설정
def a_join():
id = txt1.get() # 로그인 입력박스에 적은 값을 id에 저장
pwd = e2.get() # 비밀번호 입력박스에 적은 값을 pwd에 저장
pwd_ok = e3.get() # 비밀번호 확인 입력박스에 적은 값을 pwd_ok에 저장
if len(id) < 5 or len(id) > 20:
# 아이디가 조건에 안 맞을 시 하단 텍스트를 바꿈
print("아이디는 5~20자 이여야 합니다.")
label1.config(text="아이디는 5~20자 이여야 합니다.")
elif len(pwd) < 8 or len(pwd) > 16:
# 비밀번호가 조건에 안 맞을 시 하단 텍스트를 바꿈
print("비밀번호는 8~16자 이여야 합니다.")
label1.config(text="비밀번호는 8~16자 이여야 합니다.")
elif pwd != pwd_ok:
# 비밀번호 확인이 조건에 안 맞을 시 하단 텍스트를 바꿈
print("비밀번호가 동일하지 않습니다.")
label1.config(text="비밀번호가 동일하지 않습니다.")
else:
# 값 서버로 보내기
print("서버로 값 보내기 실행")
join_value = client_server.join(id, pwd) # join_value에 client_server의 return값 반환
# join_value = "ok" or "no"
# join_value가 ok라면 하단 텍스트를 회원가입이 완료되었습니다로 바꿔줌
if join_value == "ok":
finish = "회원가입이 완료되었습니다."
label1.config(text=finish)
root.destroy() # 창 닫기
# join_value가 no라면 하단 텍스트를 이미 있는 아이디입니다로 바꿔줌
else:
finish = "이미 있는 아이디입니다."
label1.config(text=finish)
print("버튼 값 :",join_value)
# 회원가입 버튼
btn = Button(root, text="회원가입 확인", command=a_join) # 회원가입 버튼 클릭 시 a_join 함수 실행
btn.pack()
btn.place(x=285, y=415) # 위치 선정
root.mainloop() | [
"dhtkdals1972@naver.com"
] | dhtkdals1972@naver.com |
dfdcee5ef26549aad4dbd730b609c6310f235a99 | a3f3bcec45c6d27922aeb2f53b3d971be6da0aff | /list4.py | 27d29e38320f0bb6c7b1c4fdfb151a7912f224b8 | [] | no_license | arunshenoy99/python-assignments | 2a323b3d00a33b7bf8f7d859e1b26c749b2bd740 | 490da07a19ea233206d75e12a4b7ce31fe53a018 | refs/heads/master | 2022-04-17T09:48:58.514781 | 2020-04-07T19:48:37 | 2020-04-07T19:48:37 | 252,694,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Python | Ways to find length of list
def length_normal(my_list):
count = 0
for el in my_list:
count += 1
return count
def length_fun(my_list):
return len(my_list)
my_list = ['1', '2', '3', '4']
print('Using normal method:{}'.format(length_normal(my_list)))
print('Using len function:{}'.format(length_fun(my_list)))
#output
#Using normal method:4
#Using len function:4 | [
"devarunshenoy99@gmail.com"
] | devarunshenoy99@gmail.com |
c336eb20b02286f93156c8da834136555aee0286 | 60eeca91a261eedb35f866645fe037e40f9a0ee7 | /test_case/pb2/equity/equityservice_pb2_grpc.py | 74ee7571beebe88b1cbc809b45dea4a14b69e557 | [] | no_license | jiaheqi/python-api-test | ca09b31bc92fbd1df97850c53700a461f50909e1 | 6c4204579f59dbfaa2659dc76a302d9e286112d5 | refs/heads/master | 2020-06-21T04:40:22.482546 | 2019-07-24T10:03:26 | 2019-07-24T10:03:26 | 197,346,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,424 | py | # -*- coding: UTF-8 -*-
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import equityservice_pb2 as equityservice__pb2
class EquityServiceStub(object):
"""
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.open = channel.unary_unary(
'/EquityService/open',
request_serializer=equityservice__pb2.AccountRequest.SerializeToString,
response_deserializer=equityservice__pb2.AccountResponse.FromString,
)
self.consume = channel.unary_unary(
'/EquityService/consume',
request_serializer=equityservice__pb2.ConsumeRequest.SerializeToString,
response_deserializer=equityservice__pb2.ConsumeResponse.FromString,
)
self.correct = channel.unary_unary(
'/EquityService/correct',
request_serializer=equityservice__pb2.CorrectRequest.SerializeToString,
response_deserializer=equityservice__pb2.CorrectResponse.FromString,
)
self.refund = channel.unary_unary(
'/EquityService/refund',
request_serializer=equityservice__pb2.RefundRequest.SerializeToString,
response_deserializer=equityservice__pb2.RefundResponse.FromString,
)
self.recharge = channel.unary_unary(
'/EquityService/recharge',
request_serializer=equityservice__pb2.RechargeRequest.SerializeToString,
response_deserializer=equityservice__pb2.RechargeResponse.FromString,
)
self.callBackInvoice = channel.unary_unary(
'/EquityService/callBackInvoice',
request_serializer=equityservice__pb2.CallBackInvoiceRequest.SerializeToString,
response_deserializer=equityservice__pb2.CallBackInvoiceResponse.FromString,
)
self.updateAccount = channel.unary_unary(
'/EquityService/updateAccount',
request_serializer=equityservice__pb2.UpdateAccountRequest.SerializeToString,
response_deserializer=equityservice__pb2.UpdateAccountResponse.FromString,
)
self.updateAlarm = channel.unary_unary(
'/EquityService/updateAlarm',
request_serializer=equityservice__pb2.AlarmRequest.SerializeToString,
response_deserializer=equityservice__pb2.AlarmResponse.FromString,
)
self.transferAccount = channel.unary_unary(
'/EquityService/transferAccount',
request_serializer=equityservice__pb2.TransferAccountRequest.SerializeToString,
response_deserializer=equityservice__pb2.AccountResponse.FromString,
)
self.queryRechargeDetail = channel.unary_unary(
'/EquityService/queryRechargeDetail',
request_serializer=equityservice__pb2.RechargeQueryRequest.SerializeToString,
response_deserializer=equityservice__pb2.RechargeResponse.FromString,
)
self.queryRechargePageInfo = channel.unary_unary(
'/EquityService/queryRechargePageInfo',
request_serializer=equityservice__pb2.RechargePageInfoRequest.SerializeToString,
response_deserializer=equityservice__pb2.RechargePageInfoResponse.FromString,
)
self.queryConsumeDetail = channel.unary_unary(
'/EquityService/queryConsumeDetail',
request_serializer=equityservice__pb2.ConsumeQueryRequest.SerializeToString,
response_deserializer=equityservice__pb2.ConsumeResponse.FromString,
)
self.queryConsumePageInfo = channel.unary_unary(
'/EquityService/queryConsumePageInfo',
request_serializer=equityservice__pb2.ConsumePageInfoRequest.SerializeToString,
response_deserializer=equityservice__pb2.ConsumePageInfoResponse.FromString,
)
self.queryCorrectDetail = channel.unary_unary(
'/EquityService/queryCorrectDetail',
request_serializer=equityservice__pb2.CorrectQueryRequest.SerializeToString,
response_deserializer=equityservice__pb2.CorrectResponse.FromString,
)
self.queryCorrectPageInfo = channel.unary_unary(
'/EquityService/queryCorrectPageInfo',
request_serializer=equityservice__pb2.CorrectPageInfoRequest.SerializeToString,
response_deserializer=equityservice__pb2.CorrectPageInfoResponse.FromString,
)
self.queryAccountPageInfo = channel.unary_unary(
'/EquityService/queryAccountPageInfo',
request_serializer=equityservice__pb2.AccountPageInfoRequest.SerializeToString,
response_deserializer=equityservice__pb2.AccountPageInfoResponse.FromString,
)
self.queryParamAccountPageInfo = channel.unary_unary(
'/EquityService/queryParamAccountPageInfo',
request_serializer=equityservice__pb2.AccountPageParamInfoRequest.SerializeToString,
response_deserializer=equityservice__pb2.AccountPageInfoResponse.FromString,
)
self.queryAccountDetail = channel.unary_unary(
'/EquityService/queryAccountDetail',
request_serializer=equityservice__pb2.AccountQueryRequest.SerializeToString,
response_deserializer=equityservice__pb2.AccountResponse.FromString,
)
self.queryAccountAndAlarmDetail = channel.unary_unary(
'/EquityService/queryAccountAndAlarmDetail',
request_serializer=equityservice__pb2.AccountAndAlarmQueryRequest.SerializeToString,
response_deserializer=equityservice__pb2.AccountAndAlarmPageInfoResponse.FromString,
)
self.updateAccountOpen = channel.unary_unary(
'/EquityService/updateAccountOpen',
request_serializer=equityservice__pb2.UpdateAccountOpenRequest.SerializeToString,
response_deserializer=equityservice__pb2.UpdateAccountOpenResponse.FromString,
)
class EquityServiceServicer(object):
"""
"""
def open(self, request, context):
"""开户
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def consume(self, request, context):
"""资产消费接口
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def correct(self, request, context):
"""资产消费冲正接口
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def refund(self, request, context):
"""资产充值退款接口
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def recharge(self, request, context):
"""资产充值接口
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def callBackInvoice(self, request, context):
"""开票返回接口
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateAccount(self, request, context):
"""修改权益账户
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateAlarm(self, request, context):
"""修改权益账户报警阀值
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def transferAccount(self, request, context):
"""权益账户转账
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryRechargeDetail(self, request, context):
"""查询充值明细
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryRechargePageInfo(self, request, context):
"""查询充值列表
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryConsumeDetail(self, request, context):
"""查询消费明细
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryConsumePageInfo(self, request, context):
"""查询消费列表
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryCorrectDetail(self, request, context):
"""查询消费冲正明细
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryCorrectPageInfo(self, request, context):
"""查询消费冲正列表
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryAccountPageInfo(self, request, context):
"""获取权益账户列表
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryParamAccountPageInfo(self, request, context):
"""获取权益账户列表
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryAccountDetail(self, request, context):
"""获取账户明细
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def queryAccountAndAlarmDetail(self, request, context):
"""批量获取账户和余额报警明细
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateAccountOpen(self, request, context):
"""修改权益账户状态
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EquityServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'open': grpc.unary_unary_rpc_method_handler(
servicer.open,
request_deserializer=equityservice__pb2.AccountRequest.FromString,
response_serializer=equityservice__pb2.AccountResponse.SerializeToString,
),
'consume': grpc.unary_unary_rpc_method_handler(
servicer.consume,
request_deserializer=equityservice__pb2.ConsumeRequest.FromString,
response_serializer=equityservice__pb2.ConsumeResponse.SerializeToString,
),
'correct': grpc.unary_unary_rpc_method_handler(
servicer.correct,
request_deserializer=equityservice__pb2.CorrectRequest.FromString,
response_serializer=equityservice__pb2.CorrectResponse.SerializeToString,
),
'refund': grpc.unary_unary_rpc_method_handler(
servicer.refund,
request_deserializer=equityservice__pb2.RefundRequest.FromString,
response_serializer=equityservice__pb2.RefundResponse.SerializeToString,
),
'recharge': grpc.unary_unary_rpc_method_handler(
servicer.recharge,
request_deserializer=equityservice__pb2.RechargeRequest.FromString,
response_serializer=equityservice__pb2.RechargeResponse.SerializeToString,
),
'callBackInvoice': grpc.unary_unary_rpc_method_handler(
servicer.callBackInvoice,
request_deserializer=equityservice__pb2.CallBackInvoiceRequest.FromString,
response_serializer=equityservice__pb2.CallBackInvoiceResponse.SerializeToString,
),
'updateAccount': grpc.unary_unary_rpc_method_handler(
servicer.updateAccount,
request_deserializer=equityservice__pb2.UpdateAccountRequest.FromString,
response_serializer=equityservice__pb2.UpdateAccountResponse.SerializeToString,
),
'updateAlarm': grpc.unary_unary_rpc_method_handler(
servicer.updateAlarm,
request_deserializer=equityservice__pb2.AlarmRequest.FromString,
response_serializer=equityservice__pb2.AlarmResponse.SerializeToString,
),
'transferAccount': grpc.unary_unary_rpc_method_handler(
servicer.transferAccount,
request_deserializer=equityservice__pb2.TransferAccountRequest.FromString,
response_serializer=equityservice__pb2.AccountResponse.SerializeToString,
),
'queryRechargeDetail': grpc.unary_unary_rpc_method_handler(
servicer.queryRechargeDetail,
request_deserializer=equityservice__pb2.RechargeQueryRequest.FromString,
response_serializer=equityservice__pb2.RechargeResponse.SerializeToString,
),
'queryRechargePageInfo': grpc.unary_unary_rpc_method_handler(
servicer.queryRechargePageInfo,
request_deserializer=equityservice__pb2.RechargePageInfoRequest.FromString,
response_serializer=equityservice__pb2.RechargePageInfoResponse.SerializeToString,
),
'queryConsumeDetail': grpc.unary_unary_rpc_method_handler(
servicer.queryConsumeDetail,
request_deserializer=equityservice__pb2.ConsumeQueryRequest.FromString,
response_serializer=equityservice__pb2.ConsumeResponse.SerializeToString,
),
'queryConsumePageInfo': grpc.unary_unary_rpc_method_handler(
servicer.queryConsumePageInfo,
request_deserializer=equityservice__pb2.ConsumePageInfoRequest.FromString,
response_serializer=equityservice__pb2.ConsumePageInfoResponse.SerializeToString,
),
'queryCorrectDetail': grpc.unary_unary_rpc_method_handler(
servicer.queryCorrectDetail,
request_deserializer=equityservice__pb2.CorrectQueryRequest.FromString,
response_serializer=equityservice__pb2.CorrectResponse.SerializeToString,
),
'queryCorrectPageInfo': grpc.unary_unary_rpc_method_handler(
servicer.queryCorrectPageInfo,
request_deserializer=equityservice__pb2.CorrectPageInfoRequest.FromString,
response_serializer=equityservice__pb2.CorrectPageInfoResponse.SerializeToString,
),
'queryAccountPageInfo': grpc.unary_unary_rpc_method_handler(
servicer.queryAccountPageInfo,
request_deserializer=equityservice__pb2.AccountPageInfoRequest.FromString,
response_serializer=equityservice__pb2.AccountPageInfoResponse.SerializeToString,
),
'queryParamAccountPageInfo': grpc.unary_unary_rpc_method_handler(
servicer.queryParamAccountPageInfo,
request_deserializer=equityservice__pb2.AccountPageParamInfoRequest.FromString,
response_serializer=equityservice__pb2.AccountPageInfoResponse.SerializeToString,
),
'queryAccountDetail': grpc.unary_unary_rpc_method_handler(
servicer.queryAccountDetail,
request_deserializer=equityservice__pb2.AccountQueryRequest.FromString,
response_serializer=equityservice__pb2.AccountResponse.SerializeToString,
),
'queryAccountAndAlarmDetail': grpc.unary_unary_rpc_method_handler(
servicer.queryAccountAndAlarmDetail,
request_deserializer=equityservice__pb2.AccountAndAlarmQueryRequest.FromString,
response_serializer=equityservice__pb2.AccountAndAlarmPageInfoResponse.SerializeToString,
),
'updateAccountOpen': grpc.unary_unary_rpc_method_handler(
servicer.updateAccountOpen,
request_deserializer=equityservice__pb2.UpdateAccountOpenRequest.FromString,
response_serializer=equityservice__pb2.UpdateAccountOpenResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'EquityService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"jiaheqi@hualala.com"
] | jiaheqi@hualala.com |
d3482bafbf83b25f3fc0815b98edbcb28c6babe8 | 1b13710d7b3e6ba59cd8fd52e8ad5a3d1432f497 | /test_case.py | fa1b60812a11abe5c45a0f803437f35008871e9a | [] | no_license | tvandort/tdd-p2 | dca59a4db5d5962bd9f9ee6915fc787996a505e1 | 6df5edf76a293ce5ae7911e84dc317997a3f929d | refs/heads/master | 2020-03-09T14:22:59.655166 | 2018-04-12T21:56:28 | 2018-04-12T21:56:28 | 128,833,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from test_result import TestResult
class TestCase:
def __init__(self, name):
self.name= name
def setUp(self):
pass
def tearDown(self):
pass
def run(self, result):
result.testStarted()
self.setUp()
try:
method = getattr(self, self.name)
method()
except:
result.testFailed()
self.tearDown()
return result
| [
"tvandort1@gmail.com"
] | tvandort1@gmail.com |
38590fcb54bc8781c91f196e3e5517bfe2b08f66 | 8039fd72bc4d7a7fc89ded9d63b47f020e9d95a5 | /app/core/management/commands/wait_for_db.py | 7add791e22ad05de1029347de5eb7839ecafc151 | [
"MIT"
] | permissive | vturbin/recipe-app-api | 543b5439f73299c3db12638c18df51adcc740108 | edec6371863ea7bd3aec95f6472fa66e0db5386b | refs/heads/main | 2023-01-16T06:55:29.750405 | 2020-11-22T17:06:21 | 2020-11-22T17:06:21 | 307,793,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!')) | [
"v.turbins@smart-iot.solutions"
] | v.turbins@smart-iot.solutions |
e0da5bd6190656653922935822c614334e58cb44 | 24f693c58ed674bc2bade68ccfc5b77ffecbb566 | /django/api/desarrollo/selenium/prueba.py | 7165f5dc838b626135c64093e8857710f5b0323b | [] | no_license | vazquezjav/Tesis_Final | e63c1a6ca72e3702ce4f4d32831540e83ff960a7 | b8e90e9177a01af90ca84d9599a80915f403d219 | refs/heads/main | 2023-06-28T17:22:55.691632 | 2021-07-29T16:25:22 | 2021-07-29T16:25:22 | 390,783,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from selenium import webdriver
from selenium.webdriver.firefox.options import Options
firefoxOptions = Options()
firefoxOptions.add_argument("-headless")
browser = webdriver.Firefox(executable_path="./geckodriver", options=firefoxOptions)
browser.get('https://www.linuxhint.com')
print('Title: %s' % browser.title)
browser.quit()
| [
"vazquezjavie079@gmail.com"
] | vazquezjavie079@gmail.com |
b88c2230726043ad25102eb261b604d25df27d52 | dddf1118b82661df9e7d68ce041279ab45586532 | /net_api.py | 0645e0bea4304512a5e67c80acfee4003e7807d3 | [] | no_license | lemonsina/thirteenwater | 9bfad638f92a57fdd44127afb0b5ca013f75656e | 1d52e20d3c72c7dca2166d4a85d2e1a40c52b84c | refs/heads/master | 2022-02-28T04:20:28.425214 | 2019-10-14T19:20:26 | 2019-10-14T19:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,483 | py | import requests
import cards_division
url='https://api.shisanshui.rtxux.xyz'
cards_list = []
max_list = []
def sign_in(username,password):
global url
payload={"username":username,"password":password}
headers={'content-type': "application/json"}
r=requests.post(url+'/auth/login',json=payload,headers=headers)
if r.status_code==200:
data = r.json()
status = data['status']
if status==0:
token = data.get('data').get('token')
f = open('token.txt', 'w')
f.write(token)
f.close()
f=open('user_id.txt','w')
user_id=data.get('data').get('user_id')
f.write(str(user_id))
f.close
return status
else:
return r.status_code
def register(username,password):
global url
payload = {"username": username, "password": password}
headers = {'content-type': "application/json"}
r = requests.post(url + '/auth/register', json=payload,headers=headers)
if r.status_code==200:
data = r.json()
status = data['status']
return status
else:
return r.status_code
def logout():
global url
f = open('token.txt')
token = f.readline()
f.close
headers = {"X-Auth-Token": token}
r = requests.post(url + '/auth/logout', headers=headers)
data = r.json()
status = data['status']
return status
def new_game_and_play():
global url
f=open('token.txt')
token=f.readline()
f.close
headers={"X-Auth-Token":token}
r=requests.post(url+'/game/open',headers=headers)
data=r.json()
print(data)
status = data['status']
game_id=data.get('data').get('id')
cards_string=data.get('data').get('card')
global cards_list
global max_list
cards_list=cards_string.split(' ')
max_list=cards_division.divide_cards(cards_list)
front = ' '.join(max_list[10:13])
middle = ' '.join(max_list[5:10])
rear = ' '.join(max_list[0:5])
payload={
"id":game_id,
"card":[
front,
middle,
rear
]
}
print(payload)
headers = {
'content-type': "application/json",
"X-Auth-Token": token
}
r2=requests.post(url+'/game/submit',json=payload,headers=headers)
data=r2.json()
status2=data.get("status")
print(status2)
for i in range(0,13):
if max_list[i][0]=='*':
max_list[i]='^'+max_list[i][1:]
return max_list
def get_rank():
global url
r=requests.get(url+'/game/rank')
data=r.json()
return data
def get_history_list(limit,page):
global url
f = open('token.txt')
token = f.readline()
f.close
f = open('user_id.txt')
user_id = int(f.readline())
f.close
params={
"player_id":user_id,
"limit":limit,
"page":page
}
headers = {
'content-type': "application/json",
"X-Auth-Token": token
}
r=requests.get(url+'/history',params=params,headers=headers)
data=r.json()
return data
def get_history_details(game_id):
global url
f = open('token.txt')
token = f.readline()
f.close
headers = {
'content-type': "application/json",
"X-Auth-Token": token
}
r = requests.get(url + '/history/'+str(game_id),headers=headers)
data = r.json()
return data
if __name__=='__main__':
print(sign_in('111666','111666'))
print(get_history_list(20,1))
| [
"747265828@qq.com"
] | 747265828@qq.com |
761272cafba7280e0e789a461deda19ce8598416 | f088119d9068c00fa66cacb1be844481239dc451 | /p13.py | aa2291cb82f75d5f7de07b90cc3d3200797d0422 | [] | no_license | thamilarasi43/thamilarasi | 7c9ba952d418ea0d2ccdcd70b10d3eacf46899b5 | 027bcd6f91164e3368f6633b4c79cd1a635a5300 | refs/heads/master | 2020-03-09T12:20:57.263416 | 2018-04-28T14:08:03 | 2018-04-28T14:08:03 | 128,783,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | n1=int(input("enter n"))
q=int(input("enter q"))
l=[]
l1=[]
for i in range(n1):
a=int(input("enter n val"))
l.append(a)
for j in range(q):
c=0
u=int(input())
v=int(input())
for i in range(u,v):
l1.append(l)
print(min(l))
| [
"noreply@github.com"
] | noreply@github.com |
8e55286d2adba619b99dc413e3201836767bb789 | a88a99fb3f754649db06ad86d22b5cb0d2d1e19c | /scholariumat/users/migrations/0005_auto_20181125_1759.py | 37e390c09b8e19e06e7d8ed6a47ff1bf93ab1a89 | [
"MIT"
] | permissive | valuehack/scholariumat | 91ec59647948759d917ce7077d06b0aa9618c807 | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | refs/heads/master | 2022-12-07T22:20:23.967854 | 2020-04-09T22:05:52 | 2020-04-09T22:05:52 | 135,466,121 | 0 | 3 | MIT | 2022-12-06T18:38:22 | 2018-05-30T15:55:14 | JavaScript | UTF-8 | Python | false | false | 540 | py | # Generated by Django 2.0.9 on 2018-11-25 16:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20181120_1929'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='name',
new_name='last_name',
),
migrations.AddField(
model_name='profile',
name='first_name',
field=models.CharField(blank=True, max_length=200),
),
]
| [
"merlin.buczek@gmail.com"
] | merlin.buczek@gmail.com |
a2156bc789a1d722ae16fc02c3016a476e85d470 | 41a4ef26cf3b4710dfa6fe3f1e88a935bb909654 | /utils/logger.py | 5a8cecd51025f9a4557d25d6b86232b0ad7b72a8 | [] | no_license | little-alexandra/attention_ocr | c6c0846342f947bbb8697f99e02cdd5ce2c276c2 | 475273573ae02efe1c7c1ba3905939580d26876e | refs/heads/master | 2020-11-30T10:01:52.232714 | 2019-12-18T09:35:08 | 2019-12-18T09:35:08 | 230,371,558 | 1 | 0 | null | 2019-12-27T04:13:46 | 2019-12-27T04:13:45 | null | UTF-8 | Python | false | false | 1,463 | py | import logging
import time
import os
from logging import handlers
import datetime
import tensorflow as tf
debug=True
def _p(tensor,msg):
if (debug):
dt = datetime.datetime.now().strftime('TF_DEBUG: %m-%d %H:%M:%S: ')
msg = dt + msg
return tf.Print(tensor, [tensor], msg,summarize= 100)
else:
return tensor
def _p_shape(tensor,msg):
if (debug):
dt = datetime.datetime.now().strftime('TF_DEBUG: %m-%d %H:%M:%S: ')
msg = dt + msg
return tf.Print(tensor, [tf.shape(tensor)], msg,summarize= 100)
else:
return tensor
def init(level=logging.DEBUG,when="D",backup=7,_format="%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d行 %(message)s"):
train_start_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
filename = 'logs/ocr-attention-'+train_start_time + '.log'
_dir = os.path.dirname(filename)
if not os.path.isdir(_dir):os.makedirs(_dir)
logger = logging.getLogger()
if not logger.handlers:
formatter = logging.Formatter(_format)
logger.setLevel(level)
handler = handlers.TimedRotatingFileHandler(filename, when=when, backupCount=backup,encoding="utf-8")
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
| [
"piginzoo@gmail.com"
] | piginzoo@gmail.com |
18e134dac00b481624b17bd208a6bde059b3e038 | ed6df70a3b07e2e584e62adaf8efc60760d8d5c0 | /main.py | 64e7935c99ff55dc2673bd74e6ee6cf1ed4a398a | [] | no_license | madeo17/tic-tac-toe | 92221206ad8d01e20bafc678b3399a9e34b8df6a | 04d269544f5c8f0fe50c99ed424933b232c8eb16 | refs/heads/master | 2020-03-27T18:31:10.326697 | 2018-08-31T19:17:15 | 2018-08-31T19:17:15 | 146,927,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | py | #! python3
# Tic Tac Toe game - my first project
import os
import random
def display_board(board):
os.system('cls')
print('| | | |')
print(f'| {board[7]} | {board[8]} | {board[9]} |')
print('| | | |')
print('-------------')
print('| | | |')
print(f'| {board[4]} | {board[5]} | {board[6]} |')
print('| | | |')
print('-------------')
print('| | | |')
print(f'| {board[1]} | {board[2]} | {board[3]} |')
print('| | | |\n')
def player_input():
choice = input('your move:')
while not choice.isdigit() or not (0 < int(choice) < 10):
choice = input('Invalid input. Try again:')
return int(choice)
def place_marker(board, mark, position, three_in_line):
board[position] = mark
for i in three_in_line[mark]:
if position in i:
i.remove(position)
def win_check(mark, three_in_line):
return [] in three_in_line[mark]
def choose_first():
return random.choice(['X', 'O'])
def space_check(board, position):
return ' ' == board[position]
def full_board_check(board):
return ' ' not in board
def player_choice(board):
choice = player_input()
while not space_check(board, choice):
print("This place isn't available.")
choice = player_input()
return choice
def winner(three_in_line):
if [] in three_in_line['X']:
return 'X'
elif [] in three_in_line['O']:
return 'O'
else:
return False
def replay():
while True:
choice = input('Do you want to play again? [Y/N]:')
if choice.lower() == 'y':
return True
if choice.lower() == 'n':
return False
def main():
print('\nTIC TAC TOE\n')
wanna_play = True
while wanna_play:
player_name = {'X': '', 'O': ''}
print('Enter your names:')
player_name['X'] = input('Player X:')
player_name['O'] = input('Player O:')
first_player = choose_first()
second_player = 'X' if 'X' != first_player else 'O'
print(f'\n{player_name[first_player]} will start.')
input('Press Enter when ready...')
board = ['#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ]
three_in_line = {'X': [[7, 8, 9], [4, 5, 6], [1, 2, 3], [1, 4, 7], [2, 5, 8], [3, 6, 9], [3, 5, 7], [1, 5, 9]],
'O': [[7, 8, 9], [4, 5, 6], [1, 2, 3], [1, 4, 7], [2, 5, 8], [3, 6, 9], [3, 5, 7], [1, 5, 9]]}
display_board(board)
while not full_board_check(board):
print(f'{player_name[first_player]}, ', end='')
place_marker(board, first_player,
player_choice(board), three_in_line)
display_board(board)
if win_check(first_player, three_in_line):
break
if full_board_check(board):
break
print(f'{player_name[second_player]}, ', end='')
place_marker(board, second_player,
player_choice(board), three_in_line)
display_board(board)
if win_check(first_player, three_in_line):
break
if winner(three_in_line):
print(
f'Congratulations {player_name[winner(three_in_line)]}! You win!\n')
else:
print('TIE!')
wanna_play = replay()
print('Bye bye! Have a nice day!')
if __name__ == '__main__':
main()
| [
"matemalec@gmail.com"
] | matemalec@gmail.com |
e42024f8c39e22cdf45b7337bb35c1eb1b5507ec | c8d494d3d47660500537d0214050772e975befda | /tools/get_item_img.py | 243c35e23d47b6e95d656806ea672ae1ce77933e | [] | no_license | ragnarok-online-japan/itemsearch-nextjs | 76f91a8b81d9395877295078f2f45cf5df3037b2 | 0025a33226a15dbbbf038291bd3541506b3e6452 | refs/heads/main | 2023-05-11T03:40:26.624590 | 2021-05-25T03:44:19 | 2021-05-25T03:44:19 | 323,914,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | #!/usr/bin/env python3
# pip3 install pyquery aiohttp Pillow
import argparse
import json
import os
import io
import traceback
import urllib.request, urllib.parse, urllib.error
from PIL import Image, ImageDraw, ImageFont
parser = argparse.ArgumentParser(description='')
parser.add_argument('--import-items',
action='store',
nargs='?',
default='./items.json',
type=str,
help='import items.json')
parser.add_argument('--export-path',
action='store',
nargs='?',
default='./images',
type=str,
help='export path')
parser.add_argument('--icon-url',
action='store',
nargs='?',
default='https://rotool.gungho.jp/icon/',
type=str,
help='icon url')
parser.add_argument('--font',
action='store',
nargs='?',
default='./SourceCodePro-Light.ttf',
type=str,
help='Font(TTF) file path')
args = parser.parse_args()
def download(args: dict, key: str, font: ImageFont):
image_file_path = "{:s}/{:s}.png".format(args.export_path, key)
if os.path.isfile(image_file_path) == True:
print(key, "downloaded")
return
try:
with urllib.request.urlopen("{:s}/{:s}.png".format(args.icon_url, key)) as response:
data = response.read()
# Pillowのイメージ化
image = Image.open(io.BytesIO(data))
# 横幅、高さ
_, height = image.size
draw = ImageDraw.Draw(image)
draw.text((4,height-36), "(c)Gravity Co., Ltd. & LeeMyoungJin(studio DTDS) All rights reserved.\n(c)GungHo Online Entertainment, Inc. All Rights Reserved.", font=font)
# 保存
image.save(image_file_path, format="PNG", quality=100, optimize=True)
except urllib.error.URLError as ex:
print(key, ex)
except:
print(traceback.format_exc())
raise
def main(args: dict):
items = {}
with open(args.import_items, "r", encoding="utf-8") as fp:
items = json.load(fp)
if os.path.isdir(args.export_path) == False:
os.mkdir(args.export_path)
font = ImageFont.truetype(args.font, size=10)
for key in items:
item = items[key]
if "is_card" in item and item["is_card"] == True:
#print(key, "card")
download(args, key, font)
elif "is_enchant" in item and item["is_enchant"] == True:
#print(key, "enchant")
download(args, key, font)
if __name__ == '__main__':
main(args)
| [
"h-mineta@0nyx.net"
] | h-mineta@0nyx.net |
a1bc023ef9ad5e453512141f2085d35f378c1e41 | 8d598fa67ea31b067f61df7553e5588e05ea1102 | /test2.py | 7f31567030e01354db450eab20b6bb62894654c7 | [] | no_license | pin034/olimpiada2020.py | 5f40dab54be0610edd14af0ea398d21abe76d1d7 | 81bee86b9142ff50fa188f5a693ea486ee2cb18d | refs/heads/master | 2023-03-04T11:41:41.548540 | 2021-02-17T08:39:25 | 2021-02-17T08:39:25 | 294,321,816 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | f = int(input())
e = int(input())
e = e+1
a = 0
b = 0
for x in range(f,e):
if x % 2 == 0:
a = a + x
else:
b = b + x
a = a - b
print (a)
| [
"noreply@github.com"
] | noreply@github.com |
cb012bebcabc0aebf3e733ef8ecfb058ae9b1ceb | fbe58a32edf598edb66afb1af7a54a174f590d5c | /Intro_Problems/fibonacci_last_digit.py | ab9eb977bf6767204ecf5f2188cf6ae6ba481316 | [] | no_license | viritaromero/Algo-and-DS | f70a8e6a02109a722f03c75208819bbd73693cb7 | be2edefc179811b1fa62395797a4447e7df3a70d | refs/heads/master | 2020-04-19T08:45:01.694996 | 2017-01-22T02:41:01 | 2017-01-22T02:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Uses python3
import sys
def get_fibonacci_last_digit_naive(n):
fibs = [0,1]
for i in range(2,n+1):
fibs.append(fibs[-1]%10 + fibs[-2]%10)
# if n <= 1:
# return n
#
# previous = 0
# current = 1
#
# for _ in range(n - 1):
# previous, current = current, previous + current
#
return fibs[n]%10
if __name__ == '__main__':
input = sys.stdin.read()
n = int(input)
print(get_fibonacci_last_digit_naive(n))
| [
"chaitanyadeva96@gmail.com"
] | chaitanyadeva96@gmail.com |
de3b15f73a895866ee623c91bf321c778e86a9cd | 50a2e22c8af5bc9adc9320c754177341e223e996 | /findTree.py | 7acd9206deb80796bfbee0390c536af590d711d7 | [] | no_license | cuibjut/binary_tree | d7572026bcc3cd3890ce2d1534d929c0019b6de7 | 8f21e350314b7b1f9ef585836a73091e92b2d55d | refs/heads/master | 2020-04-10T16:39:53.209424 | 2018-12-10T09:49:56 | 2018-12-10T09:49:56 | 161,151,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | """
1.根据前序,中序遍历二叉树的顺序求后序遍历二叉树顺序
2.根据后序,中序遍历二叉树的顺序求前序遍历二叉树顺序
"""
def find_last_tree(prelist, midlist, lastlist):
"""
首先写出递归的终止条件及边界条件
:param prelist: 先序遍历树的list
:param midlist: 中序遍历树的list
:param lastlist: 后序遍历树的list
:return: 通过先序和后序遍历返回树的后序遍历结果
"""
if len(prelist) == 0: # 判断边界
return
if len(prelist) == 1: # 递归终止条件
lastlist.append(prelist[0])
return
value = prelist[0]
n = midlist.index(value)
find_last_tree(prelist[1:n+1], midlist[0:n], lastlist)
find_last_tree(prelist[n+1:], midlist[n+1:], lastlist)
lastlist.append(value) # 要构造一颗后序遍历二叉树,把先序遍历的中的先根节点最后加到树种
return lastlist
def find_pre_tree(lastList, midList, preList):
if len(lastList) == 0: # 判断边界条件
return
if len(lastList) == 1: # 递归终止条件
preList.append(lastList[0])
return
value = lastList[-1]
preList.append(value)
n = midList.index(value)
print(lastList[0:n], midList[0:n], preList)
find_pre_tree(lastList[0:n], midList[0:n], preList)
find_pre_tree(lastList[n:-1], midList[n+1:], preList)
return preList
if __name__ == "__main__":
# preList = list('12473568')
# midList = list('47215386')
# lastList = []
# lastList = find_last_tree(preList, midList, lastList)
# print(lastList)
lastList = list('74258631')
midList = list('47215386')
preList = []
preList = find_pre_tree(lastList, midList, preList)
print(preList)
| [
"tizi@ibantang.com"
] | tizi@ibantang.com |
95ab244bbb47dafcd216738b20b3b9c82b00e6b3 | 5c33148acbdaeeae2e0570519e3009e96bca117c | /CAPANXIN/wsgi.py | 0c39949c23235cff917b85096da8c2decfc60865 | [] | no_license | Raisony/CAPAN | 875e04ed930fd74edd56173da2d424e3d80eed03 | bae76d3cd9907c6c57e6458d258b8aea2094295f | refs/heads/master | 2022-12-08T04:33:11.324775 | 2020-08-20T03:54:11 | 2020-08-20T03:54:11 | 288,899,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for CAPANXIN project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CAPANXIN.settings')
application = get_wsgi_application()
| [
"s753242357@gmail.com"
] | s753242357@gmail.com |
0d7f2e1ee9666fac5950797ec00593729a0849b8 | d3190984762e07cfab6a0fef38bc2755f7832114 | /functions/dshop_processing_func.py | 86a1e5fff172560da46923644aab1d3b0a7c2bf5 | [] | no_license | Rommagcom/final_project | 47be6891b02677b12068873b0da77c5873c17a0f | c248108dec1ced3dbd74c65d32ec47d8fdd38cd7 | refs/heads/master | 2023-07-15T03:08:50.396055 | 2021-08-24T19:28:34 | 2021-08-24T19:28:34 | 399,523,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,488 | py | import psycopg2
import logging
import os
from datetime import datetime
import requests
import json
from hdfs import InsecureClient
from airflow.hooks.base_hook import BaseHook
import pyspark
from pyspark.sql import SparkSession
def load_from_db(table, **context):
execution_date = context['execution_date']
for_date = execution_date.strftime("%Y-%m-%d")
hdfs_conn = BaseHook.get_connection('datalake_hdfs')
pg_conn = BaseHook.get_connection('oltp_postgres')
logging.info(f"Writing table {table} for date {for_date} from {pg_conn.host} to Bronze")
client = InsecureClient("http://"+hdfs_conn.host, user=hdfs_conn.login)
with psycopg2.connect(dbname='dshop_bu', user=pg_conn.login, password=pg_conn.password, host=pg_conn.host) as pg_connection:
cursor = pg_connection.cursor()
with client.write(os.path.join('new_datalake', 'bronze','dshop',table,for_date, table)+".csv", overwrite=True) as csv_file:
cursor.copy_expert(f"COPY {table} TO STDOUT WITH HEADER CSV", csv_file)
logging.info("Successfully loaded")
def load_from_api(load_for_date, **context):
hdfs_conn = BaseHook.get_connection('datalake_hdfs')
api_conn_auth = BaseHook.get_connection('https_outofstock')
api_conn_get_data = BaseHook.get_connection('https_outofstock_get')
logging.info(f"Getting OAuth token from API {api_conn_auth.host} with cred {api_conn_auth.extra}")
client = InsecureClient("http://"+hdfs_conn.host, user=hdfs_conn.login)
my_headers = {'Content-Type' : 'application/json'}
response = requests.post(api_conn_auth.host, data=api_conn_auth.extra,headers=my_headers)
response.raise_for_status()
resp_auth = response.json()
logging.info(f"OAuth token from API {resp_auth['access_token']}")
if response.status_code == 200:
logging.info(f"Successfully authorized to API")
logging.info(f"Getting data from out of stock API for date {load_for_date}")
dir_to_save = os.path.join('new_datalake', 'bronze','dshop','out_of_stock_api',load_for_date)
#client.makedirs(dir_to_save) # Directory creation
my_headers = {'Authorization' :'JWT ' + resp_auth['access_token']}
params= {'date': load_for_date }
response = requests.get(api_conn_get_data.host, params=params, headers = my_headers)
if response.status_code==404:
if 'message' in response.json():
logging.info(response.json()['message'])
response.raise_for_status()
product_ids = list(response.json())
for product_id in product_ids:
client.write(os.path.join(dir_to_save, str(product_id['product_id'])+'.json'), data=json.dumps(product_id), encoding='utf-8',overwrite=True)
logging.info("Uploading task finished")
def load_to_silver(table,**context):
this_folder = os.path.dirname(os.path.abspath(__file__))
spark = SparkSession.builder\
.config('spark.driver.extraClassPath',os.path.join(this_folder,'postgresql-42.2.23.jar'))\
.master('local')\
.appName('transform_stage')\
.getOrCreate()
execution_date = context['execution_date']
for_date = execution_date.strftime("%Y-%m-%d")
logging.info(f"Loading table {table} from Bronze to process")
if table=='out_of_stock_api':
table='out_of_stock'
tableDf = spark.read.load(os.path.join('new_datalake','bronze','dshop','out_of_stock',for_date)
,header = "true"
,inferSchema = "true"
, format = "json")
else:
tableDf = spark.read.load(os.path.join('new_datalake','bronze','dshop',table,for_date)
,header = "true"
,inferSchema = "true"
,format = "csv")
logging.info(f"Cleaning up table {table}")
tableDf = tableDf.dropDuplicates().na.drop("all")
if "department" in tableDf.schema.fieldNames():
tableDf = tableDf.na.drop(subset=["department"])
if "area" in tableDf.schema.fieldNames():
tableDf = tableDf.na.drop(subset=["area",])
if "product_name" in tableDf.schema.fieldNames():
tableDf = tableDf.na.drop(subset=["product_name"])
if "aisle_id" in tableDf.schema.fieldNames():
tableDf = tableDf.na.drop(subset=["aisle_id"])
logging.info(f"Writing data {table} for date {for_date} from Bronze to Silver")
tableDf.write\
.parquet(os.path.join('new_datalake','silver','dshop',table), mode='overwrite')
logging.info("Successfully loaded to Silver")
def load_to_dwh(table,**context):
this_folder = os.path.dirname(os.path.abspath(__file__))
spark = SparkSession.builder\
.config('spark.driver.extraClassPath',os.path.join(this_folder,'postgresql-42.2.23.jar'))\
.appName('move_to_dwh_stage')\
.getOrCreate()
execution_date = context['execution_date']
for_date = execution_date.strftime("%Y-%m-%d")
logging.info(f"Loading table {table} from Silver to process for date {for_date}")
gp_conn = BaseHook.get_connection('gp_dshop')
gp_url = f"jdbc:postgresql://{gp_conn.host}:{gp_conn.port}/{gp_conn.schema}"
gp_properties = {"user": gp_conn.login, "password": gp_conn.password}
tableDf = spark.read.parquet(os.path.join('new_datalake','silver','dshop',table))
tableDf.write.jdbc(gp_url,table=table,properties=gp_properties,mode='append')
logging.info("Successfully loaded to dwh")
| [
"roman.ivanitskiy@airastana.com"
] | roman.ivanitskiy@airastana.com |
4c8bbdc6300f6fd0f97b2e3c2f51225dda51a45d | 172f123d63f2472568cf7989ba136e548f4da5e4 | /AppofML_ITP449/Code_repo/hw1_files/HW1_Q4_Jhaveri_Shantanu.py | d93ad651775315358b06dc417481f9e2249ddceb | [] | no_license | ShantanuJhaveri/LM-Intro_ML | 1a14b78b4afee3653910247c66b645ccf3742c14 | 6edc57d8e5af426cf0d8e7796afb06569084c691 | refs/heads/master | 2023-03-14T03:41:02.176288 | 2021-03-10T03:35:44 | 2021-03-10T03:35:44 | 314,376,420 | 1 | 0 | null | 2020-12-02T09:52:46 | 2020-11-19T21:40:09 | Python | UTF-8 | Python | false | false | 810 | py | # Shantanu Jhaveri
# ITP 449 Fall 2020
# HW1
# Question 4
# Write a program that prompts the user to enter a loan amount, annual interest rate, and number of
# years for a car loan. Then it prints the monthly payment amount.
loanAmount = float(input("ENTER A LOAN AMOUNT: "))
i = float(input("ENTER AN ANNUAL INTEREST RATE: ")) / 12 * 0.01
t = float(input("ENTER NUMBER OF YEARS FOR THE LOAN: ")) * 12
# based on the equation given in the document, but for some reason has the wrong
# answer
def pmt(loanAmount, i, t):
monthlyPayments = float((loanAmount * i * ((1 + i) ** t))
/
(((1 + i) ** t) - 1))
# monthlyPayments = loanAmount * (i/(1-(1+i)**(-t))) * 1/(1+i)
return monthlyPayments
print(i)
print(t)
print(pmt(loanAmount, i, t))
| [
"sj06434@usc.edu"
] | sj06434@usc.edu |
ca03e41429a459b4cc726e31ae18f1e3d4294e70 | dbdcc72fcf99fc2f47fd8d30742bf758a62d784f | /FULLERTON_FR_BILLING.py | 66b9db088df7b7a0ff789c314e18f47d2e7fe1f0 | [] | no_license | mohaksehgal/CODES | 35ae14ec9ea772cfc96dad7826435db57eb71b47 | 7674649bf8421871a69d411ecfd1f3a35eed96a9 | refs/heads/main | 2023-06-05T22:47:54.903498 | 2021-06-23T04:45:37 | 2021-06-23T04:45:37 | 379,480,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,247 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 11:27:47 2021
@author: rishi
"""
import pandas as pd
import numpy as np
A=pd.read_excel(r"/Users/mohaksehgal/Documents/Work/Billing/FULLERTON/JUN 21/MASTER_FILE_FULLERTON_FR.xlsx")
P=pd.read_excel(r'/Users/mohaksehgal/Documents/Work/MIS/FULLERTON/JUN 21/MIS_FULLERTON_FR.xlsx')
PAID_FILE=pd.read_excel(r"/Users/mohaksehgal/Documents/Work/MIS/FULLERTON/JUN 21/FULLERTON_FR_PAID FILE_22JUN21.xlsx")
for i in range(0,len(A['COMPANY'])):
if A.loc[i,'COMPANY']=='FULLERTON':
for j in range(0,len(P['COMPANY'])):
if (A.loc[i,'COMPANY']==P.loc[j,'COMPANY']):
if P.loc[j,'POS_RES%']<75:
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*4/100
A.loc[i,'PERCENTAGE']=str(4)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(4),a,P.loc[j,'COMPANY'])
elif (P.loc[j,'POS_RES%']>=75) and (P.loc[j,'POS_RES%']<78):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*4.25/100
A.loc[i,'PERCENTAGE']=str(4.25)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(4.25),a,P.loc[j,'COMPANY'])
elif (P.loc[j,'POS_RES%']>=78) and (P.loc[j,'POS_RES%']<80):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*4.5/100
A.loc[i,'PERCENTAGE']=str(4.5)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(4.5),a,P.loc[j,'COMPANY'])
elif (P.loc[j,'POS_RES%']>=80) and (P.loc[j,'POS_RES%']<82):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*4.75/100
A.loc[i,'PERCENTAGE']=str(4.75)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(4.75),a,P.loc[j,'COMPANY'])
elif (P.loc[j,'POS_RES%']>=82) and (P.loc[j,'POS_RES%']<84):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*5/100
A.loc[i,'PERCENTAGE']=str(5)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(5),a,P.loc[j,'COMPNAY'])
elif (P.loc[j,'POS_RES%']>=84) and (P.loc[j,'POS_RES%']<86):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*5.25/100
A.loc[i,'PERCENTAGE']=str(5.25)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],A.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(5.25),a,P.loc[j,'COMPNAY'])
elif (P.loc[j,'POS_RES%']>=86) and (P.loc[j,'POS_RES%']<88):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*5.5/100
A.loc[i,'PERCENTAGE']=str(5.5)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(5.5),a,P.loc[j,'COMPNAY'])
elif (P.loc[j,'POS_RES%']>=88) and (P.loc[j,'POS_RES%']<90):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*5.75/100
A.loc[i,'PERCENTAGE']=str(5.75)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(5.75),a,P.loc[j,'COMPNAY'])
elif (P.loc[j,'POS_RES%']>=90) and (P.loc[j,'POS_RES%']<92):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*6/100
A.loc[i,'PERCENTAGE']=str(6)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(6),a,P.loc[j,'COMPNAY'])
elif (P.loc[j,'POS_RES%']>=92):
if A.loc[i,'STATUS']=='SB':
a=A.loc[i,'Billing PAID AMT.']*6.25/100
A.loc[i,'PERCENTAGE']=str(6.25)+'%'
A.loc[i,'MOHAK']=a
print(A.loc[i,'AGREEMENTID'],A.loc[i,'COMPANY'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(6.25),a,P.loc[j,'COMPANY'])
# PL-SELF
# elif A.loc[i,'PRODUCT']=='PL Self':
# for j in range(0,len(P['PRODUCT'])):
# if (A.loc[i,'PRODUCT']==P.loc[j,'PRODUCT']):
# if P.loc[j,'POS_RES%']<55:
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*6/100
# A.loc[i,'PERCENTAGE']=str(6)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(6),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=55) and (P.loc[j,'POS_RES%']<65):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*8/100
# A.loc[i,'PERCENTAGE']=str(8)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(8),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=65) and (P.loc[j,'POS_RES%']<70):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*10/100
# A.loc[i,'PERCENTAGE']=str(10)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(10),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=70) and (P.loc[j,'POS_RES%']<75):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*12/100
# A.loc[i,'PERCENTAGE']=str(12)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(12),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=75) and (P.loc[j,'POS_RES%']<80):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*14/100
# A.loc[i,'PERCENTAGE']=str(14)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(14),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=80) and (P.loc[j,'POS_RES%']<85):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*16/100
# A.loc[i,'PERCENTAGE']=str(16)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(16),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=85) and (P.loc[j,'POS_RES%']<90):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*18/100
# A.loc[i,'PERCENTAGE']=str(18)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(18),a,P.loc[j,'PRODUCT'])
# elif (P.loc[j,'POS_RES%']>=90):
# if A.loc[i,'STATUS']=='SB':
# a=A.loc[i,'Billing PAID AMT.']*20/100
# A.loc[i,'PERCENTAGE']=str(20)+'%'
# A.loc[i,'MOHAK']=a
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'PRODUCT'],P.loc[j,'POS_RES%'],A.loc[i,'Billing PAID AMT.'],str(20),a,P.loc[j,'PRODUCT'])
FLOWLIST=A[A['STATUS']=='FLOW'].index
for i in range(0,len(FLOWLIST)):
A.loc[FLOWLIST[i],'MOHAK']=0
A[A['MOHAK'].isnull()]['STATUS'].value_counts()
# for i in range(0,len(A['AGREEMENTID'])):
# if A.loc[i,'STATUS']=='SETTLEMENT':
# for j in range(0,len(PAID_FILE['AGREEMENTID'])):
# if A.loc[i,'AGREEMENTID']==PAID_FILE.loc[j,'AGREEMENTID']:
# wavier=100-round(PAID_FILE.loc[j,'SETTLEMENT_TOTAL_AMOUNT']/A.loc[i,'POS']*100)
# if (A.loc[i,'POS']<100000):
# if wavier==0:
# a=A.loc[i,'Billing PAID AMT.']*18/100
# if a>=20000:
# A.loc[i,'MOHAK']=20000
# elif a<20000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(18)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(18),A.loc[i,'MOHAK'],wavier)
# elif (wavier>0) and (wavier<=25):
# a=A.loc[i,'Billing PAID AMT.']*15/100
# if a>=15000:
# A.loc[i,'MOHAK']=15000
# elif a<15000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(15)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(15),A.loc[i,'MOHAK'],wavier)
# elif (wavier>25) and (wavier<=50):
# a=A.loc[i,'Billing PAID AMT.']*12/100
# if a>=10000:
# A.loc[i,'MOHAK']=10000
# elif a<10000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(12)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(12),A.loc[i,'MOHAK'],wavier)
# elif (A.loc[i,'POS']>=100000) and (A.loc[i,'POS']<200000):
# if wavier==0:
# a=A.loc[i,'Billing PAID AMT.']*21/100
# if a>=30000:
# A.loc[i,'MOHAK']=30000
# elif a<30000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(21)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(21),A.loc[i,'MOHAK'],wavier)
# elif (wavier>0) and (wavier<=25):
# a=A.loc[i,'Billing PAID AMT.']*18/100
# if a>=25000:
# A.loc[i,'MOHAK']=25000
# elif a<25000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(18)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(18),A.loc[i,'MOHAK'],wavier)
# elif (wavier>25) and (wavier<=50):
# a=A.loc[i,'Billing PAID AMT.']*15/100
# if a>=20000:
# A.loc[i,'MOHAK']=20000
# elif a<20000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(15)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(15),A.loc[i,'MOHAK'],wavier)
# elif (A.loc[i,'POS']>=200000) and (A.loc[i,'POS']<300000):
# if wavier==0:
# a=A.loc[i,'Billing PAID AMT.']*25/100
# if a>=35000:
# A.loc[i,'MOHAK']=35000
# elif a<35000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(25)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(25),A.loc[i,'MOHAK'],wavier)
# elif (wavier>0) and (wavier<=25):
# a=A.loc[i,'Billing PAID AMT.']*22/100
# if a>=30000:
# A.loc[i,'MOHAK']=30000
# elif a<30000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(22)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(22),A.loc[i,'MOHAK'],wavier)
# elif (wavier>25) and (wavier<=50):
# a=A.loc[i,'Billing PAID AMT.']*19/100
# if a>=25000:
# A.loc[i,'MOHAK']=25000
# elif a<25000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(19)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(19),A.loc[i,'MOHAK'],wavier)
# elif (A.loc[i,'POS']>=300000):
# if wavier==0:
# a=A.loc[i,'Billing PAID AMT.']*30/100
# if a>=40000:
# A.loc[i,'MOHAK']=40000
# elif a<40000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(30)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(30),A.loc[i,'MOHAK'],wavier)
# elif (wavier>0) and (wavier<=25):
# a=A.loc[i,'Billing PAID AMT.']*27/100
# if a>=35000:
# A.loc[i,'MOHAK']=35000
# elif a<35000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(27)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(27),A.loc[i,'MOHAK'],wavier)
# elif (wavier>25) and (wavier<=50):
# a=A.loc[i,'Billing PAID AMT.']*24/100
# if a>=30000:
# A.loc[i,'MOHAK']=30000
# elif a<30000:
# A.loc[i,'MOHAK']=a
# A.loc[i,'PERCENTAGE']=str(24)+'%'
# print(A.loc[i,'AGREEMENTID'],A.loc[i,'Billing PAID AMT.'],str(24),A.loc[i,'MOHAK'],wavier)
A.rename({'MOHAK':'PAYOUT'},axis=1,inplace=True)
A.to_excel(r'/Users/mohaksehgal/Documents/Work/Billing/FULLERTON/JUN 21/PAYOUT_FULLERTON_FR.xlsx',index=False) | [
"noreply@github.com"
] | noreply@github.com |
399b47e046b7c3c4f711e023be033fbbff5c8369 | b7b35320158115f1aeb4c86ab69fca0d6081c5fa | /ThePosts/posts/migrations/0001_initial.py | 7ab3ef0c9576274bf8de77ee3c63be9b012e0507 | [] | no_license | osmarsalesjr/PPI | 0c7d1d1d79ca00c739fb6877b317f8837d5a6b1d | 57e939adeaf66d849c72c7e4f48501d9e9e3db66 | refs/heads/master | 2020-04-17T16:18:52.175841 | 2019-03-03T17:30:53 | 2019-03-03T17:30:53 | 166,735,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # Generated by Django 2.1.5 on 2019-02-10 02:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('text', models.CharField(max_length=255)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
221d5cafd65cefa2f79d23099f66c2284256273a | 443a42433948d7003e200705cd27271f4b3e1029 | /posts/models.py | 30b08868f5572c61a1e3e4577dea12ddcf244f6a | [] | no_license | alejo5248/platzigram | fc86902b4f908bc54176df5a6fa4cf931c6c8730 | efd8239c675d3aedc286cdead7a1e4acd0ea1b03 | refs/heads/master | 2020-04-14T18:41:02.350774 | 2019-01-10T16:49:11 | 2019-01-10T16:49:11 | 164,029,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py |
from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
user=models.ForeignKey(User, on_delete=models.CASCADE)
perfil=models.ForeignKey('usuarios.Perfil', on_delete=models.CASCADE)
titulo=models.CharField(max_length=50)
foto=models.ImageField(upload_to='posts/images')
creado=models.DateTimeField(auto_now_add=True)
modificado=models.DateTimeField(auto_now=True)
def __str__(self):
return'{} by @{}'.format(self.titulo, self.user.username) | [
"alejandro_hurtado@utp.edu.co"
] | alejandro_hurtado@utp.edu.co |
4e2ef96ae7a28d25546c82eb854eb6d1494f7b18 | 99c3315d8d5fe7df02cee16c8dcac049d707aba2 | /server.py | a68702fc985e324fd4e9a341c5ab745bdb2b178c | [] | no_license | trianasuanjaya/utsrekayasaprotokol | db77851d452a6553966a40b890390d4489279911 | 0f36df8e301faffa9896a2618885c49277419b0f | refs/heads/master | 2020-07-29T04:23:23.823261 | 2016-11-14T08:37:34 | 2016-11-14T08:37:34 | 73,682,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | import socket
import time;
import datetime
s=socket.socket()
host = socket.gethostname()
port = 12345
s.bind ((host,port))
s.listen(5)
while True:
c,addr = s.accept()
print ('daftar Printah') ,c.recv(1024)
print ('dari'), addr
localtime = time.localtime(time.time())
dt = datetime.datetime(*localtime[:6])
print "Local current time :", localtime
c.send(dt.strftime('Waktu : pukul %H : %M'))
c.close()
| [
"noreply@github.com"
] | noreply@github.com |
c31079b9c2e363ef5429989d70f5beee59451d4c | f4e9a1950e629f1de65842c2522b856417ea10cf | /Python/addNumbers.py | 32adb196801ef1e6e757384340e2ccfc590d43ef | [] | no_license | TimmyYung/TheRightCodes | 687d972658b993e620b4d9fe255d459d886a47d7 | 322eb0542c176c4178f3a240e422461b94e6f2ad | refs/heads/gh-pages | 2023-02-04T10:47:08.635052 | 2020-12-22T20:21:43 | 2020-12-22T20:21:43 | 149,155,435 | 0 | 0 | null | 2020-03-25T05:03:36 | 2018-09-17T16:31:36 | HTML | UTF-8 | Python | false | false | 118 | py | def sum_to(number):
total = 0
for n in range(number+1):
total += n
return total
print(sum_to(10)) | [
"592619@pdsb.net"
] | 592619@pdsb.net |
046b6856d4a0ebc36013df65d5ad55797e818c35 | ba9394bd4f5845ab8e3fcdb7e7dfe18c41c4c8d1 | /core/main/run_group_test.py | ae0c696888b7f2e07330c844cd3933bf5975ad8e | [] | no_license | ZongweiBai/rest-test-tools | b050992aaffa2431bcee06c82e405fdfe50ab5f7 | dd096b5a06c2cc2962c5f6be9f85867b2623293c | refs/heads/master | 2022-11-28T04:36:10.836360 | 2020-08-04T09:16:20 | 2020-08-04T09:16:20 | 282,186,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,375 | py | # coding:utf-8
import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from core.util.request_http import RequestHttp
from core.data.get_data import GetData
from core.util.common_assert import CommonUtil
import json
from core.data.dependent_data import DependentData
from core.util.send_mail import SendEmail
from core.util.log_printer import Logger
from core.core_config import CoreConfig
class RunGroupTest:
"""运行组测试"""
def __init__(self, file_name=None, sheet_id=0):
self.core_config = CoreConfig()
self.logger = Logger(use_console=False).logger
self.request_http = RequestHttp()
self.data = GetData(file_name=file_name, sheet_id=sheet_id)
self.com_util = CommonUtil()
self.send_mail = SendEmail()
self.dependent_data = DependentData()
# 程序执行
def go_on_run(self):
res = None
pass_count = []
fail_count = []
no_run_count = []
excel_data_list = self.data.parse_excel_file()
index = 1
for excel_data in iter(excel_data_list):
try:
if excel_data.run_test_case:
request_info = self.data.refresh_request_info(excel_data, self.dependent_data)
res = self.request_http.execute(excel_data.request_method, excel_data.request_url,
request_info[0], request_info[1], request_info[2])
if not excel_data.expect_http_code.__eq__(res.status_code):
fail_count.append(index)
self.logger.error(("第%s条用例实际响应状态码:%s" % (index, res.status_code)))
raise Exception("第%s条用例Http状态码响应结果与预期结果不一致" % index)
# excel中拿到的expect数据是str类型,但是返回的res是dict类型,两者数据比较必须都是字符类型
if self.com_util.is_contain(excel_data.expect_http_response, json.dumps(res.json())):
pass_count.append(index)
self.logger.info("第%s条用例测试通过" % index)
# 将响应放入缓存中
self.dependent_data.put_cache(excel_data.test_case_id, res.json())
else:
fail_count.append(index)
self.logger.error(("第%s条用例实际响应结果:%s" % (index, res.json())))
raise Exception("第%s条用例实际结果与预期结果不一致" % index)
else:
no_run_count.append(index)
except Exception as e:
# 将报错写入指定路径的日志文件里
self.logger.error(("第%s条用例报错:" % index))
self.logger.exception(e)
fail_count.append(index)
# 手动抛出异常
raise Exception(print(e))
index = index + 1
# self.send_mail.send_main(pass_count,fail_count,no_run_count)
# 当模块被直接运行时,以下代码块将被运行,当模块是被导入时,代码块不被运行
if __name__ == '__main__':
run = RunGroupTest()
run.go_on_run()
| [
"jonezw@163.com"
] | jonezw@163.com |
41d5b59f249d61ce0e7c3a1265c87df046d697a5 | 4a940f0b685540f7013a2a3ce2893fd51aafdd2c | /Week3/suffix_array_long/suffix_array_long.py | d098cceec74ee0914ece2d6e31458c9ea1fcb97c | [] | no_license | QilinGu/String | 11028223847f962cc0ba7a630dcb5c87c4fa4eb4 | 46efcdc35e5ebe47025bb1f9743ad80b41b360ca | refs/heads/master | 2021-01-11T17:43:22.171353 | 2016-09-10T10:25:56 | 2016-09-10T10:25:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,420 | py | # python3
import sys
def sort_characters(text):
order = [None for i in range(len(text))]
alpha = len(set(text))
my_hash = {}
for i, each in enumerate(sorted(set(text))):
my_hash[each] = i
count = [0 for i in range(alpha)]
for i in range(0, len(text)):
count[my_hash[text[i]]] += 1
for j in range(1, alpha):
count[j] += count[j - 1]
i = len(text) - 1
while i >= 0:
c = my_hash[text[i]]
count[c] -= 1
order[count[c]] = i
i -= 1
return order
def compute_char_classes(text, order):
clas = [None for _ in range(len(text))]
clas[order[0]] = 0
for i in range(1, len(text)):
clas[order[i]] = clas[order[i - 1]]
if text[order[i]] != text[order[i-1]]:
clas[order[i]] += 1
return clas
def sort_doubled(text, L, order, clas):
count = [0 for i in range(len(text))]
newOrder = [None for i in range(len(text))]
for i in range(len(text)):
count[clas[i]] += 1
for j in range(1, len(text)):
count[j] += count[j - 1]
i = len(text) - 1
while i >= 0:
start = (order[i] - L + len(text)) % len(text)
cl = clas[start]
count[cl] -= 1
newOrder[count[cl]] = start
i -= 1
return newOrder
def updated_classes(order, clas, L):
n = len(order)
new_class = [None for i in range(n)]
new_class[order[0]] = 0
for i in range(1, n):
cur = order[i]
prev = order[i - 1]
mid = (cur + L) % n
mid_prev = (prev + L) % n
new_class[cur] = new_class[prev]
if clas[cur] != clas[prev] or clas[mid] != clas[mid_prev]:
new_class[cur] += 1
return new_class
def lcp_of_suffixes(text, i, j, equal):
print(equal, text[i:], text[j:])
lcp = equal
while i + lcp < len(text) and j + lcp < len(text):
if text[i + lcp] == text[j + lcp]:
lcp += 1
else:
break
return lcp
def invert_suffix_array(order):
pos = [None for _ in range(len(order))]
for i in range(len(pos)):
pos[order[i]] = i
return pos
def compute_lcp_array(text, order):
lcp_array = [None for i in range(len(text) - 1)]
lcp = 0
pos_in_order = invert_suffix_array(order)
suffix = order[0]
for i in range(len(text)):
order_index = pos_in_order[suffix]
if order_index == len(text) - 1:
lcp = 0
suffix = (suffix + 1) % len(text)
continue
next_suffix = order[order_index + 1]
if lcp > 0:
lcp = lcp_of_suffixes(text, suffix, next_suffix, lcp - 1)
else:
lcp = lcp_of_suffixes(text, suffix, next_suffix, 0)
lcp_array[order_index] = lcp
suffix = (suffix + 1) % len(text)
return lcp_array
def build_suffix_array(text):
"""
Build suffix array of the string text and
return a list result of the same length as the text
such that the value result[i] is the index (0-based)
in text where the i-th lexicographically smallest
suffix of text starts.
"""
result = []
# Implement this function yourself
order = sort_characters(text)
clas = compute_char_classes(text, order)
L = 1
while L < len(text):
order = sort_doubled(text, L, order, clas)
clas = updated_classes(order, clas, L)
L *= 2
result = order
print(compute_lcp_array(text, result))
for each in result:
print(text[each:])
return result
if __name__ == '__main__':
text = sys.stdin.readline().strip()
print(" ".join(map(str, build_suffix_array(text))))
| [
"arsen.khadikov@gmail.com"
] | arsen.khadikov@gmail.com |
2495228312f4bfdd2e648407cb25f51a82d29143 | 812a1c54d03c0542e05f3a55d6caaf2b5723cf5c | /rango/wrango/templatetags/rango_template_tags.py | 2500599ca11ff18088fd9e653f89fa4335a34de0 | [] | no_license | tabithalee/rango2 | 4b407ec8453ef398c96a95c4a9ec66d4ecb83214 | 42542351b6cf0381fe9b6e4c56f4634a1fa6f2dc | refs/heads/master | 2021-01-11T16:39:22.087221 | 2017-02-10T17:12:25 | 2017-02-10T17:12:25 | 80,132,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from django import template
from wrango.models import Category
register = template.Library()
@register.inclusion_tag('rango/cats.html')
def get_category_list(cat=None):
return {'cats': Category.objects.all(), 'act_cat': cat}
| [
"2295373l@student.gla.ac.uk"
] | 2295373l@student.gla.ac.uk |
3d70ab16d2c568cd1c582f59087249c3ee44045e | 509c3aeded2b8a5abf3e536cd3283d97c67a93d9 | /Web/WebProgramming/server.py | 122568483a763ba300ab6f90882dfdb926fb5463 | [] | no_license | Kimmirikwa/bc-13-Inventory_Management | 6849bfdbbbcb857bcf895e2fb9fa89674d52e3b7 | 1dd100ce1a5c4f09b509487f74d82cd5c90d50e7 | refs/heads/master | 2020-06-13T16:19:53.838733 | 2016-12-06T14:45:40 | 2016-12-06T14:45:40 | 75,712,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from functools import wraps
from flask import Flask, url_for, request, render_template, Response
#method to carry out the authentication
app = Flask(__name__)
@app.route("/")
def sign_in():
pass
@app.route("/addasset", methods=["GET","POST"])
def add_asset():
pass
@app.route("/addusers")
def add_users():
pass
@app.route("/assignasset")
def assign_asset():
pass
@app.route("/unassignasset")
def unassign_asset():
pass
@app.route("/viewassigned")
def view_assigned():
pass
@app.route("/repportlost")
def report_lost():
pass
@app.route("/reportfound")
def report_found():
pass | [
"kimrodrikwa@gmail.com"
] | kimrodrikwa@gmail.com |
499384826def0b608af69a4bac123283fadb6546 | b6a160bc968e2c90fd4623312044a1a3093e3659 | /google-python-exercises/basic/string2.py | cb5b0147fc1e4cc051c9e088b67f2e44c3775af8 | [
"Apache-2.0"
] | permissive | bminor21/PythonProjects | 82a2a4ccef64598c81514bcc5c27ce44b5a55531 | b8c3ed637e0b239e2bb942ba461a625a14eb8755 | refs/heads/master | 2022-01-06T17:15:04.430726 | 2019-06-14T00:02:58 | 2019-06-14T00:02:58 | 109,908,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
if len(s) > 3:
if s[-3:] == "ing":
s = s + "ly"
else:
s = s + "ing"
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
_str = ""
l1 = s.find("not")
l2 = s.find("bad")
if l1 == -1 or l2 == -1 or l2 < l1:
return s
_str = s[0:l1] + "good" + s[l2+3:]
return _str
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
aMid = len(a) / 2
bMid = len(b) / 2
if len(a) % 2 == 1:
aMid += 1
if len(b) % 2 == 1:
bMid += 1
s = a[0:aMid] + b[0:bMid] + a[aMid:] + b[bMid:]
return s
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| [
"brett.minor21@gmail.com"
] | brett.minor21@gmail.com |
1d8305e331e32d08f428a20307d75df6b5b6bb80 | 3ff6932fb2b356bca95c4ae6794ac85d838c958c | /source_pytorch/predict.py | 48c0844421b8cf0622cd438308e5e7bd7584de5b | [] | no_license | gkuzivam/plagiarism-detector | 9f9a9da2f4ca10fe4072f9f6a44c012b8e399a4a | 23091368788aebff549ffa23d002b4d2e044db4c | refs/heads/master | 2020-07-03T06:04:10.592992 | 2019-08-11T20:55:59 | 2019-08-11T20:55:59 | 201,812,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | # import libraries
import os
import numpy as np
import torch
from six import BytesIO
# import model from model.py, by name
#from source_pytorch import model
from source_pytorch.model import BinaryClassifier
# default content type is numpy array
NP_CONTENT_TYPE = 'application/x-npy'
# Provided model load function
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Prep for testing
model.to(device).eval()
print("Done loading model.")
return model
# Provided input data loading
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == NP_CONTENT_TYPE:
stream = BytesIO(serialized_input_data)
return np.load(stream)
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
# Provided output data handling
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
if accept == NP_CONTENT_TYPE:
stream = BytesIO()
np.save(stream, prediction_output)
return stream.getvalue(), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
# Provided predict function
def predict_fn(input_data, model):
print('Predicting class labels for the input data...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Process input_data so that it is ready to be sent to our model.
data = torch.from_numpy(input_data.astype('float32'))
data = data.to(device)
# Put the model into evaluation mode
model.eval()
# Compute the result of applying the model to the input data
# The variable `out_label` should be a rounded value, either 1 or 0
out = model(data)
out_np = out.cpu().detach().numpy()
out_label = out_np.round()
return out_label | [
"noreply@github.com"
] | noreply@github.com |
f6dd0869441d5a356d50e7e10e961dee504deb0e | 46734ec336f502dc3d69e31428bacb9cef222a70 | /examples/bq_file_load_benchmark/load_benchmark_tools/benchmark_load_table.py | cee84591f2ea51e813d6e1c94db979c177d7a5b4 | [
"Apache-2.0"
] | permissive | tims/professional-services | 93ef3d3083d73991d4faba2c40a8ab4ea550cca8 | 8c610e259217ad83dbbceeb388aa1da828d1343b | refs/heads/master | 2020-12-07T20:01:07.550995 | 2020-02-03T07:43:31 | 2020-02-03T07:43:31 | 232,788,171 | 0 | 0 | Apache-2.0 | 2020-01-09T10:58:05 | 2020-01-09T10:58:04 | null | UTF-8 | Python | false | false | 10,178 | py | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
import time
from google.api_core import exceptions
from google.cloud import bigquery
from google.cloud import storage
from generic_benchmark_tools import benchmark_parameters
from generic_benchmark_tools import benchmark_result_util
from generic_benchmark_tools import table_util
from generic_benchmark_tools import file_constants
class BenchmarkLoadTable(object):
"""Represents a BigQuery load table.
Holds methods for creating a table in BigQuery and loading data from GCS
into the table.
Attributes:
benchmark_name(str): The name of the benchmark test.
bq_project(str): ID of the project that holds the BigQuery dataset
and table that the data is loaded into.
bq_client(google.cloud.bigquery.client.Client): Client to hold
configurations needed for BigQuery API requests.
gcs_project(str): ID of the project that holds the GCS bucket
where the files to be loaded are stored.
gcs_client(google.cloud.storage.client.Client): Client to hold
configurations needed for GCS API requests.
staging_project(str): ID of the project that holds the staging tables
used to create the file combinations.
staging_dataset_id(str): ID of the dataset that contains the staging
table that the files loaded into the benchmark table were
generated from.
dataset_id(str): ID of the dataset that holds the benchmark table.
dataset_ref(google.cloud.bigquery.dataset.DatasetReference): Pointer
to the dataset that holds the benchmark table.
bucket_name(str): Name of the bucket that holds the files to be loaded
into the benchmark table.
path(str): Path of the files in GCS to be loaded into the benchmark
table. Path does not include the full GCS URI.
uri(str): Full GCS URI of the files to be loaded into the benchmark
table. Includes the 'gs://' prefix, the bucket name, and path
above.
results_table_name(str): Name of the BigQuery table that the
benchmark table's load results will be inserted into.
results_table_dataset_id(str): Name of the BigQuery dataset that the
benchmark table's load results will be inserted into.
results_table(google.cloud.bigquery.table.Table): BigQuery table that
the benchmark table's load results will be inserted into.
bq_logs_dataset(str): Name of dataset hold BQ logs table.
file_type(str): Type of files that will be loaded from GCS into
the benchmark table (i.e. csv, avro, parquet, etc).
compression_format(bigquery.job.Compression): Object representing the
compression of the file.
benchmark_table_util(load_benchmark_tools.table_util.TableUtil): Object to
assist with the handling of the benchmark table's creation
and properties.
num_columns(int): Number of columns in the benchmark table.
column_types(str): Representation of the types of columns in the
benchmark table(50_STRING_50_NUMERIC, 100_STRING, etc)
bq_schema(List[google.cloud.bigquery.schema.SchemaField]): Schema of
the benchmark table.
load_job(google.cloud.bigquery.job.LoadJob): Object for loading data
from GCS to BigQuery tables.
job_destination_table(str): Name of the destination table. Generated
using the current timestamp converted to a string.
"""
def __init__(
self,
benchmark_name,
bq_project,
gcs_project,
staging_project,
staging_dataset_id,
dataset_id,
bucket_name,
path,
results_table_name,
results_table_dataset_id,
bq_logs_dataset,
):
self.benchmark_name = benchmark_name
self.bq_project = bq_project
self.bq_client = bigquery.Client(
project=self.bq_project
)
self.gcs_project = gcs_project
self.gcs_client = storage.Client(
project=self.gcs_project
)
self.staging_project = staging_project
self.staging_dataset_id = staging_dataset_id
self.dataset_id = dataset_id
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
self.bucket_name = bucket_name
self.path = path
self.uri = 'gs://{0:s}/{1:s}'.format(self.bucket_name, path)
self.results_table_name = results_table_name
self.results_table_dataset_id = results_table_dataset_id
self.results_table_dataset_ref = self.bq_client.dataset(
results_table_dataset_id
)
results_table_ref = self.results_table_dataset_ref.table(
self.results_table_name
)
self.results_table = self.bq_client.get_table(results_table_ref)
self.bq_logs_dataset = bq_logs_dataset
self.file_type = None
self.compression_format = None
self.benchmark_table_util = None
self.num_columns = None
self.column_types = None
self.bq_schema = None
self.load_job = None
self.job_destination_table = None
self.gather_file_properties()
def gather_file_properties(self):
"""Gathers properties of the files loaded into the benchmark table.
"""
# gather file properties from the files' path
# pylint: disable=line-too-long
benchmark_details_pattern = \
r'fileType=(\w+)/compression=(\w+)/numColumns=(\d+)/columnTypes=(\w+)/numFiles=(\d+)/tableSize=(\w+)'
self.file_type, compression, self.num_columns, self.column_types, \
num_files, table_size = \
re.findall(benchmark_details_pattern, self.path)[0]
self.compression_format = (file_constants.FILE_CONSTANTS
['compressionFormats'][compression])
# get schema from the staging table that the file was generated from
source_staging_table_name = '{0:s}_{1:s}'.format(
self.column_types,
self.num_columns
)
source_staging_table_util = table_util.TableUtil(
source_staging_table_name,
self.staging_dataset_id,
project=self.staging_project,
)
if self.file_type == 'parquet' or self.file_type == 'avro':
self.bq_schema = None
else:
self.bq_schema = source_staging_table_util.table.schema
def create_table(self):
"""Creates the bencmark table in BigQuery.
The method creates an empty table using the schema from the staging
table that the files were generated from. It uses the current
timestamp to name the benchmark table to create a random, unique name.
"""
self.job_destination_table = '{0:d}'.format(int(time.time()))
self.benchmark_table_util = table_util.TableUtil(
self.job_destination_table,
self.dataset_id,
bq_schema=self.bq_schema,
)
self.benchmark_table_util.create_table()
def load_from_gcs(self):
"""Loads GCS files into the benchmark table and stores results.
Creates and runs a load job to load files the GCS URI into the
benchmark table. Then uses benchmark_result_util.BenchmarkResultUtil
to gather results and generate a results row, which it then inserts
into the BigQuery results table.
Raises:
google.api_core.exceptions.BadRequest: 400 Error while reading data,
error message: Total data size exceeds max allowed size
"""
job_type = benchmark_parameters.BENCHMARK_PARAMETERS[
'benchmark_names'][self.benchmark_name]['type']
source_formats = file_constants.FILE_CONSTANTS['sourceFormats']
job_config = bigquery.LoadJobConfig()
job_config.source_format = source_formats[self.file_type]
if self.file_type == 'csv':
job_config.skip_leading_rows = 1
self.load_job = self.bq_client.load_table_from_uri(
source_uris='{0:s}/*'.format(self.uri),
destination=self.dataset_ref.table(self.job_destination_table),
job_config=job_config,
)
logging.info('Started load job {0:s} for table {1:s}.'.format(
self.load_job.job_id,
self.job_destination_table
))
try:
self.load_job.result()
result = benchmark_result_util.LoadBenchmarkResultUtil(
job=self.load_job,
job_type=job_type,
benchmark_name=self.benchmark_name,
project_id=self.bq_project,
result_table_name=self.results_table_name,
result_dataset_id=self.results_table_dataset_id,
bq_logs_dataset=self.bq_logs_dataset,
job_source_uri='{0:s}/*'.format(self.uri),
load_table_id=self.job_destination_table,
load_dataset_id=self.dataset_id
)
result.insert_results_row()
except exceptions.BadRequest as e:
logging.error(e.message)
self.bq_client.delete_table(self.benchmark_table_util.table_ref)
logging.info('Deleting table {0:s}'.format(
self.job_destination_table
))
| [
"jferriero@google.com"
] | jferriero@google.com |
03f2beccf2929eaf7844b8fcfdfdbb117ecae76c | 957ff175ffedc1cfafd43ebe20f64f83cc55325f | /heritagesites/SELECT ca.country_area_name, hs.site_name.py | 56d3d62ccfbd1f2743aead41ee3e32fa96811754 | [] | no_license | chenyipeng1/heritagesites | d12621a17d1a337e7beee0546065006246d78ed6 | 740262646c193c42eb6612e17843ac534798426f | refs/heads/master | 2020-04-06T08:31:35.066478 | 2018-11-20T01:48:51 | 2018-11-20T01:48:51 | 157,307,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | SELECT rg.region_name, srg.sub_region_name,ca.country_area_name, hs.site_name, hsc.category_name
FROM heritage_site hs
LEFT JOIN heritage_site_jurisdiction hsj
ON hs.heritage_site_id = hsj.heritage_site_id
LEFT JOIN country_area ca
ON hsj.country_area_id = ca.country_area_id
LEFT JOIN region rg
ON ca.region_id = rg.region_id
LEFT JOIN sub_region srg
ON ca.sub_region_id = srg.sub_region_id
LEFT JOIN heritage_site_category hsc
ON hs.heritage_site_category_id = hsc.category_id
WHERE ca.country_area_id = 47
OR ca.country_area_id = 48
OR ca.country_area_id = 49
ORDER BY rg.region_name, srg.sub_region_name, ca.country_area_name, hs.site_name; | [
"chenyipeng@chendeMacBook-Pro.local"
] | chenyipeng@chendeMacBook-Pro.local |
9e3769ed23384bf504e6dc9a8a92c51ee8651186 | d5ad13232e3f1ced55f6956bc4cbda87925c8085 | /cc_mcc_seq/coverage/coverage_stat/1_coverage_stat.py | 495a50838e4e7cb74a40295b588b592c6c6f5ef4 | [] | no_license | arvin580/SIBS | c0ba9a8a41f59cb333517c286f7d80300b9501a2 | 0cc2378bf62359ec068336ea4de16d081d0f58a4 | refs/heads/master | 2021-01-23T21:57:35.658443 | 2015-04-09T23:11:34 | 2015-04-09T23:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | chr=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY']
for sample in range(3,13) :
for ch in chr :
inFile=open('../fudan1.coverage')
list1=list()
for line in inFile :
line=line.strip()
fields=line.split('\t')
if fields[0].find(ch)!=-1 :
list1.append(fields[sample])
ouFile=open('fudan1.coverage.'+ch+'.'+str(sample-3),'w')
for item in list1 :
ouFile.write(item+'\n')
ouFile.close()
inFile.close()
| [
"sunhanice@gmail.com"
] | sunhanice@gmail.com |
5b1d5b593511fa873e79c2f510b7c882a1ce7bc0 | f0b5f7e7a1737b79210300085d366ec8870a361f | /dynamic_porgramming/dynamic_programming_8-2_fibo_recursive.py | d10253229f7843ae5f69bdfa70aba50e9de038c2 | [] | no_license | cksrb1616/Algorithm_past_papers | 135f2143f466bd38f1b770de16fe88eeb086c1df | 193b3e41165c577d944b6e60f0c255141882170f | refs/heads/master | 2023-04-02T21:31:28.194694 | 2021-03-29T16:26:40 | 2021-03-29T16:26:40 | 201,218,237 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | #Using memoization
#Top_down method : 작은 문제들이 모두 해결되었을 때 큰 문제가 해결됨
#Bottom_up mehtod : 작은 문제를 하나씩 해결해 나가면 먼저 해결된 문제를 활용해 다음 문제를 차례로 해결
#Dynamic -> Bottom Up
d = [0]*100
def fibo(x):
if x == 1 or x == 2:
return 1
if d[x] != 0:
return d[x]
d[x] = fibo(x-1) + fibo(x-2)
return d[x]
print(fibo(99)) | [
"53897355+cksrb1616@users.noreply.github.com"
] | 53897355+cksrb1616@users.noreply.github.com |
aa586b92e81d84b242d0d337f70ca4cbca0dcb8a | 1e1ae51914c9338f33096d85e4d2edfb4fbf835d | /make_check_list.py | 6989ad6fdc7f385acd78d9b3e5ef8638216531a9 | [
"Apache-2.0"
] | permissive | clinfo/2021_Patients_Transport | b4c98ebb1d292acf116b01c77a602a3a1e80c5f9 | 4f14cd0b1350eca98dfbe9d4ae530fda34759811 | refs/heads/main | 2023-02-27T23:04:54.906983 | 2021-02-12T07:40:30 | 2021-02-12T07:40:30 | 336,447,946 | 0 | 1 | Apache-2.0 | 2021-02-10T09:04:43 | 2021-02-06T03:37:24 | Python | UTF-8 | Python | false | false | 6,074 | py | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import glob
filnames = glob.glob('data_transport/u_*')
filnames.sort()
dates = [filname.split('.')[-2].split('_')[-1] for filname in filnames]
gammas = [filname.split('.')[-2].split('_')[-2] for filname in filnames]
opt_types = [filname.split('.')[-2].split('_')[-3] for filname in filnames]
df_severe_beds = pd.read_csv('data_Koro/severe_beds.csv',index_col=0)
df_u = pd.DataFrame(df_severe_beds['japan_prefecture_code'])
df_u['都道府県名'] = df_severe_beds['都道府県名']
df_i = df_u.copy()
for i in range(len(filnames)):
uv = np.load(filnames[i])
T = uv.shape[0]
N = int(np.sqrt(uv.shape[1]))
U = uv.reshape(T,N,N)
df_u[opt_types[i] + '_' + gammas[i] + '_' + dates[i]] = U[:14,:,:].sum(axis = 0).sum(1)
df_i[opt_types[i] + '_' + gammas[i] + '_' + dates[i]] = U[:14,:,:].sum(axis = 0).sum(0)
df_u.to_csv('resultC_transport_strategy/summary/export_num.csv')
df_i.to_csv('resultC_transport_strategy/summary/import_num.csv')
dirC = 'resultC_tranport_strategy/'
dirnames = (df_severe_beds['japan_prefecture_code'] + df_severe_beds['都道府県名']).values
for i in range(len(filnames)):
uv = np.load(filnames[i])
T = uv.shape[0]
N = int(np.sqrt(uv.shape[1]))
U = uv.reshape(T,N,N)
png_links = [dirC + dirnames[j] + '/transport_{0}_{1}_{2}.png\n'.format(opt_types[i],gammas[i],dates[i]) for j in range(df_u.shape[0]) if U[:14,:,:].sum(axis = 0).sum(1)[j]>0]
path = 'slide_png_list/transport_{0}_{1}_{2}.txt'.format(opt_types[i],gammas[i],dates[i])
with open(path, mode='w') as f:
f.writelines(png_links)
if dates[i] == max(dates) and opt_types[i] == 'mean':
path = 'slide_png_list/transport_{0}.txt'.format(gammas[i])
with open(path, mode='w') as f:
f.writelines(png_links)
# ################################### Hospital ######################################
filnames = glob.glob('data_hospital_transport/u_*')
filnames.sort()
dates = [filname.split('.')[-2].split('_')[-1] for filname in filnames]
gammas = [filname.split('.')[-2].split('_')[-2] for filname in filnames]
opt_types = [filname.split('.')[-2].split('_')[-3] for filname in filnames]
df_severe_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0)
df_u = pd.DataFrame(df_severe_beds['japan_prefecture_code'])
df_u['都道府県名'] = df_severe_beds['都道府県名']
df_i = df_u.copy()
for i in range(len(filnames)):
uv = np.load(filnames[i])
T = uv.shape[0]
N = int(np.sqrt(uv.shape[1]))
U = uv.reshape(T,N,N)
df_u[opt_types[i] + '_' + gammas[i] + '_' + dates[i]] = U[:14,:,:].sum(axis = 0).sum(1)
df_i[opt_types[i] + '_' + gammas[i] + '_' + dates[i]] = U[:14,:,:].sum(axis = 0).sum(0)
df_u.to_csv('resultD_transport_strategy_hospital/summary/export_num.csv')
df_i.to_csv('resultD_transport_strategy_hospital/summary/import_num.csv')
dirD = 'resultD_transport_strategy_hospital/'
dirnames = (df_severe_beds['japan_prefecture_code'] + df_severe_beds['都道府県名']).values
for i in range(len(filnames)):
uv = np.load(filnames[i])
T = uv.shape[0]
N = int(np.sqrt(uv.shape[1]))
U = uv.reshape(T,N,N)
png_links = [dirD + dirnames[j] + '/transport_{0}_{1}_{2}.png\n'.format(opt_types[i],gammas[i],dates[i]) for j in range(df_u.shape[0]) if U[:14,:,:].sum(axis = 0).sum(1)[j]>0]
path = 'slide_png_hospital/transport_{0}_{1}_{2}.txt'.format(opt_types[i],gammas[i],dates[i])
with open(path, mode='w') as f:
f.writelines(png_links)
if dates[i] == max(dates) and opt_types[i] == 'mean':
path = 'slide_png_hospital/transport_{0}.txt'.format(gammas[i])
with open(path, mode='w') as f:
f.writelines(png_links)
# ################################### over_beds_date ######################################
filenames = glob.glob('data_severe/x_*')
filenames.sort()
forecast_dates = [filename.split('_')[-1].split('.')[0] for filename in filenames]
df_severe = pd.read_csv("data_severe/x_" +max(forecast_dates) + ".csv",index_col=0)
df_hospital = pd.read_csv("data_hospital/x_" +max(forecast_dates) + ".csv",index_col=0)
df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0)
df_severe_beds = pd.read_csv('data_Koro/severe_beds.csv',index_col=0)
new_time_google = max(forecast_dates)
new_time_Koro = max(df_hospital_beds.columns[2:])
N = df_severe.shape[1]
severe_beds = df_severe_beds[new_time_Koro].values
hospital_beds = df_hospital_beds[new_time_Koro].values
gamma = 1
over_severe_beds100 = []
over_hospital_beds100 = []
for i in range(N):
if sum(df_severe[df_severe.columns[i]] > gamma * severe_beds[i]) !=0:
over_severe_beds100.append( min(df_severe.index[df_severe[df_severe.columns[i]] > gamma * severe_beds[i]]))
else:
over_severe_beds100.append(None)
if sum(df_hospital[df_hospital.columns[i]] > gamma * hospital_beds[i]) !=0:
over_hospital_beds100.append( min(df_hospital.index[df_hospital[df_hospital.columns[i]] > gamma * hospital_beds[i]]))
else:
over_hospital_beds100.append(None)
gamma = 0.8
over_severe_beds080 = []
over_hospital_beds080 = []
for i in range(N):
if sum(df_severe[df_severe.columns[i]] > gamma * severe_beds[i]) !=0:
over_severe_beds080.append( min(df_severe.index[df_severe[df_severe.columns[i]] > gamma * severe_beds[i]]))
else:
over_severe_beds080.append(None)
if sum(df_hospital[df_hospital.columns[i]] > gamma * hospital_beds[i]) !=0:
over_hospital_beds080.append( min(df_hospital.index[df_hospital[df_hospital.columns[i]] > gamma * hospital_beds[i]]))
else:
over_hospital_beds080.append(None)
df = df_hospital_beds[['japan_prefecture_code','都道府県名']].copy()
df['over_severe_beds080'] = over_severe_beds080
df['over_severe_beds100'] = over_severe_beds100
df['over_hospital_beds080'] = over_hospital_beds080
df['over_hospital_beds100'] = over_hospital_beds100
df.to_csv('slide_over_beds/over_list_'+new_time_google +'.csv')
| [
"rokitaniplus1@gmail.com"
] | rokitaniplus1@gmail.com |
a4aa0917081417465a81c6bdb13eed8cd38f6553 | ffba2f65351e14e89b7c195c0767587367f22fda | /venv/bin/easy_install-2.7 | 71b48de2aa2bb3f2791931a97955761f306a9858 | [] | no_license | vitor-araujo/FlaskSimpleBlog | 8e4eba054017dbf4f909047165562a1a07d152c5 | d6f9b7b73efe54b4d62c104ae14a675a2502ff39 | refs/heads/master | 2021-09-01T04:13:04.278745 | 2017-12-24T17:40:03 | 2017-12-24T17:40:03 | 115,278,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | 7 | #!/home/vitor/flaskblog/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"araujo.vitorgabriel@gmail.com"
] | araujo.vitorgabriel@gmail.com |
4830116877968ecd4ebc6659f852aff6af73f637 | 69d05e7788a6facf4e5777c4b3819fa43d0f1df0 | /gui/brain_labeling_gui_thalamus_v2.py | e8041a84ea17c4b97c081da7271935a7fb7af6a9 | [] | no_license | xl1393/MouseBrainAtlas | e238d06a5b51ca037a25e3f79fb422f7a7367222 | 1e875f9f872d38c916de3e460c1e88f2ef07fa32 | refs/heads/master | 2021-05-09T05:14:40.721154 | 2018-01-27T09:24:46 | 2018-01-27T09:24:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,987 | py | #! /usr/bin/env python
import sys
import os
from datetime import datetime
import time
import json
from collections import defaultdict, OrderedDict
import numpy as np
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from shapely.geometry import Polygon as ShapelyPolygon
from shapely.geometry import Point as ShapelyPoint
from shapely.geometry import LineString as ShapelyLineString
from shapely.geometry import LinearRing as ShapelyLineRing
from skimage.color import label2rgb
from pandas import DataFrame
sys.path.append(os.environ['REPO_DIR'] + '/utilities')
from utilities2015 import *
from data_manager import DataManager
from metadata import *
from annotation_utilities import *
from gui_utilities import *
from ui.ui_BrainLabelingGui_v15 import Ui_BrainLabelingGui
from widgets.custom_widgets import *
from widgets.SignalEmittingItems import *
from widgets.DrawableZoomableBrowsableGraphicsScene_ForLabeling_v2 import DrawableZoomableBrowsableGraphicsScene_ForLabeling
from DataFeeder import ImageDataFeeder_v2, VolumeResectionDataFeeder
######################################################################
MARKER_COLOR_CHAR = 'w'
#######################################################################
class ReadRGBComponentImagesThread(QThread):
def __init__(self, stack, sections):
QThread.__init__(self)
self.stack = stack
self.sections = sections
def __del__(self):
self.wait()
def run(self):
for sec in self.gscenes['sagittal'].active_section:
if sec in self.gscenes['sagittal'].per_channel_pixmap_cached:
continue
try:
fp = DataManager.get_image_filepath_v2(stack=self.stack, section=sec, prep_id=2, resol='lossless', version='contrastStretchedBlue')
except Exception as e:
sys.stderr.write('Section %d is invalid: %s\n' % (sec, str(e)))
continue
if not os.path.exists(fp):
sys.stderr.write('Image %s does not exist.\n' % fp)
continue
qimage = QImage(fp)
self.emit(SIGNAL('component_image_loaded(QImage, int)'), qimage, sec)
class BrainLabelingGUI(QMainWindow, Ui_BrainLabelingGui):
# class BrainLabelingGUI(QMainWindow, Ui_RectificationGUI):
def __init__(self, parent=None, stack=None, first_sec=None, last_sec=None, downsample=None, img_version=None, prep_id=None):
"""
Initialization of BrainLabelingGUI.
"""
# t0 = time.time()
# self.app = QApplication(sys.argv)
QMainWindow.__init__(self, parent)
self.stack = stack
self.sagittal_downsample = downsample
self.prep_id = prep_id
self.setupUi(self)
self.button_save.clicked.connect(self.save_contours)
self.button_saveMarkers.clicked.connect(self.save_markers)
self.button_saveStructures.clicked.connect(self.save_structures)
self.button_load.clicked.connect(self.load_contours)
self.button_loadMarkers.clicked.connect(self.load_markers)
self.button_loadStructures.clicked.connect(self.load_structures)
self.button_loadWarpedAtlas.clicked.connect(self.load_warped_atlas_volume)
self.button_inferSide.clicked.connect(self.infer_side)
self.button_displayOptions.clicked.connect(self.select_display_options)
self.button_displayStructures.clicked.connect(self.select_display_structures)
self.lineEdit_username.returnPressed.connect(self.username_changed)
self.structure_volumes = defaultdict(dict)
# self.structure_adjustments_3d = defaultdict(list)
self.volume_cache = {}
# for ds in [8, 32]:
for ds in [32]:
try:
# self.volume_cache[ds] = DataManager.load_intensity_volume_v2(self.stack, downscale=ds, prep_id=1)
self.volume_cache[ds] = DataManager.load_intensity_volume_v2(self.stack, downscale=ds, prep_id=4)
print self.volume_cache[ds].shape
except:
sys.stderr.write('Intensity volume of downsample %d does not exist.\n' % ds)
self.splitter.setSizes([500, 500, 500])
self.splitter_2.setSizes([1000, 500])
self.sagittal_tb_gscene = DrawableZoomableBrowsableGraphicsScene_ForLabeling(id='sagittal_tb', gui=self, gview=self.sagittal_tb_gview)
self.sagittal_tb_gview.setScene(self.sagittal_tb_gscene)
self.coronal_gscene = DrawableZoomableBrowsableGraphicsScene_ForLabeling(id='coronal', gui=self, gview=self.coronal_gview)
self.coronal_gview.setScene(self.coronal_gscene)
self.horizontal_gscene = DrawableZoomableBrowsableGraphicsScene_ForLabeling(id='horizontal', gui=self, gview=self.horizontal_gview)
self.horizontal_gview.setScene(self.horizontal_gscene)
self.sagittal_gscene = DrawableZoomableBrowsableGraphicsScene_ForLabeling(id='sagittal', gui=self, gview=self.sagittal_gview)
self.sagittal_gview.setScene(self.sagittal_gscene)
self.sagittal_gscene.set_default_line_width(5)
self.sagittal_gscene.set_default_line_color('b')
self.sagittal_gscene.set_default_vertex_radius(10)
self.sagittal_gscene.set_default_vertex_color('r')
self.gscenes = {'coronal': self.coronal_gscene, 'sagittal': self.sagittal_gscene, 'horizontal': self.horizontal_gscene,
'sagittal_tb': self.sagittal_tb_gscene}
for gscene in self.gscenes.itervalues():
gscene.drawings_updated.connect(self.drawings_updated)
gscene.crossline_updated.connect(self.crossline_updated)
gscene.active_image_updated.connect(self.active_image_updated)
gscene.structure_volume_updated.connect(self.update_structure_volume)
gscene.set_structure_volumes(self.structure_volumes)
# gscene.set_drawings(self.drawings)
# from functools import partial
# self.gscenes['sagittal'].set_conversion_func_section_to_z(partial(DataManager.convert_section_to_z, stack=self.stack, z_begin=))
# self.gscenes['sagittal'].set_conversion_func_z_to_section(partial(DataManager.convert_z_to_section, stack=self.stack))
##################
# self.slider_downsample.valueChanged.connect(self.downsample_factor_changed)
###################
self.contextMenu_set = True
self.recent_labels = []
self.structure_names = load_structure_names(os.environ['REPO_DIR']+'/gui/structure_names.txt')
self.new_labelnames = load_structure_names(os.environ['REPO_DIR']+'/gui/newStructureNames.txt')
self.structure_names = OrderedDict(sorted(self.new_labelnames.items()) + sorted(self.structure_names.items()))
self.installEventFilter(self)
# first_sec0, last_sec0 = DataManager.load_cropbox(self.stack)[4:]
if self.prep_id == 3:
first_sec0, last_sec0 = DataManager.load_cropbox_thalamus(self.stack)[4:]
elif self.prep_id == 2:
first_sec0, last_sec0 = DataManager.load_cropbox(self.stack)[4:]
else:
raise
self.sections = range(first_sec0, last_sec0 + 1)
image_feeder = ImageDataFeeder_v2('image feeder', stack=self.stack, sections=self.sections,
prep_id=self.prep_id, use_data_manager=False,
downscale=self.sagittal_downsample,
version=img_version)
image_feeder.set_orientation('sagittal')
self.gscenes['sagittal'].set_data_feeder(image_feeder)
self.connect(self.gscenes['sagittal'], SIGNAL("image_loaded(int)"), self.image_loaded)
# self.button_stop.clicked.connect(self.read_images_thread.terminate)
volume_resection_feeder = VolumeResectionDataFeeder('volume resection feeder', self.stack)
if hasattr(self, 'volume_cache') and self.volume_cache is not None:
coronal_volume_resection_feeder = VolumeResectionDataFeeder('coronal resection feeder', self.stack)
coronal_volume_resection_feeder.set_volume_cache(self.volume_cache)
coronal_volume_resection_feeder.set_orientation('coronal')
coronal_volume_resection_feeder.set_downsample_factor(32)
# coronal_volume_resection_feeder.set_downsample_factor(8)
print coronal_volume_resection_feeder.x_dim, coronal_volume_resection_feeder.y_dim, coronal_volume_resection_feeder.z_dim
self.gscenes['coronal'].set_data_feeder(coronal_volume_resection_feeder)
self.gscenes['coronal'].set_active_i(50)
horizontal_volume_resection_feeder = VolumeResectionDataFeeder('horizontal resection feeder', self.stack)
horizontal_volume_resection_feeder.set_volume_cache(self.volume_cache)
horizontal_volume_resection_feeder.set_orientation('horizontal')
horizontal_volume_resection_feeder.set_downsample_factor(32)
# horizontal_volume_resection_feeder.set_downsample_factor(8)
self.gscenes['horizontal'].set_data_feeder(horizontal_volume_resection_feeder)
self.gscenes['horizontal'].set_active_i(150)
sagittal_volume_resection_feeder = VolumeResectionDataFeeder('sagittal resection feeder', self.stack)
sagittal_volume_resection_feeder.set_volume_cache(self.volume_cache)
sagittal_volume_resection_feeder.set_orientation('sagittal')
sagittal_volume_resection_feeder.set_downsample_factor(32)
# sagittal_volume_resection_feeder.set_downsample_factor(8)
self.gscenes['sagittal_tb'].set_data_feeder(sagittal_volume_resection_feeder)
self.gscenes['sagittal_tb'].set_active_i(150)
try:
self.gscenes['sagittal'].set_active_section(first_sec)
except Exception as e:
# sys.stderr.write(e.message + '\n')
pass
##############################
# Internal structure volumes #
##############################
# Set the downsample factor for the structure volumes.
# Try to match the highest resolution among all gviews, but upper limit is 1/8.
self.volume_downsample_factor = max(8, np.min([gscene.data_feeder.downsample for gscene in self.gscenes.itervalues()]))
for gscene in self.gscenes.values():
gscene.set_structure_volumes_downscale_factor(self.volume_downsample_factor)
#####################
# Load R/G/B images #
#####################
# self.read_component_images_thread = ReadRGBComponentImagesThread(stack=self.stack, sections=range(first_sec, last_sec+1))
# self.connect(self.read_component_images_thread, SIGNAL("component_image_loaded(QImage, int)"), self.component_image_loaded)
# self.read_component_images_thread.start()
#####################################
# Set global origins of each gscene #
#####################################
if self.prep_id == 3: # thalamus only
lossless_image_cropboxXY_wrt_WholebrainAlignedPadded_tbResol = DataManager.load_cropbox_thalamus(stack=self.stack)[:4]
elif self.prep_id == 2:
lossless_image_cropboxXY_wrt_WholebrainAlignedPadded_tbResol = DataManager.load_cropbox(stack=self.stack)[:4]
else:
raise
thumbnail_image_cropbox_wrt_WholebrainAlignedPadded_tbResol = np.loadtxt(DataManager.get_intensity_volume_bbox_filepath_v2(stack=self.stack, prep_id=4))
# Record the appropriate coordinate origin for this gscene.
# The coordinate is wrt to origin of "whole brain aligned and padded volume", in thumbnail resolution (1/32 of raw).
self.image_origin_wrt_WholebrainAlignedPadded_tbResol = {}
self.image_origin_wrt_WholebrainAlignedPadded_tbResol['sagittal'] = \
np.array((lossless_image_cropboxXY_wrt_WholebrainAlignedPadded_tbResol[0],
lossless_image_cropboxXY_wrt_WholebrainAlignedPadded_tbResol[2],
0))
for gid in ['coronal', 'horizontal', 'sagittal_tb']:
self.image_origin_wrt_WholebrainAlignedPadded_tbResol[gid] = \
np.array((thumbnail_image_cropbox_wrt_WholebrainAlignedPadded_tbResol[0],
thumbnail_image_cropbox_wrt_WholebrainAlignedPadded_tbResol[2],
thumbnail_image_cropbox_wrt_WholebrainAlignedPadded_tbResol[4]))
@pyqtSlot(int)
def image_loaded(self, sec):
"""
"""
gscene_id = self.sender().id
gscene = self.gscenes[gscene_id]
if gscene.active_section == sec:
gscene.update_image()
self.statusBar().showMessage('Image %d loaded.\n' % sec)
print 'Image', sec, 'received.'
@pyqtSlot(object, int)
def component_image_loaded(self, qimage_blue, sec):
"""
Callback for when R/G/B images are loaded.
Args:
qimage (QImage): the image
sec (int): section
"""
self.gscenes['sagittal'].per_channel_pixmap_cached[sec] = qimage_blue
self.statusBar().showMessage('R/G/B images %d loaded.\n' % sec)
print 'R/G/B images', sec, 'received.'
@pyqtSlot()
def username_changed(self):
self.username = str(self.sender().text())
print 'username changed to', self.username
def get_username(self):
if not hasattr(self, 'username') or self.username is None:
username, okay = QInputDialog.getText(self, "Username", "Please enter your username:", QLineEdit.Normal, 'anon')
if not okay: return
self.username = str(username)
self.lineEdit_username.setText(self.username)
return self.username
def structure_tree_changed(self, item, column):
tree_widget = self.sender()
complete_name = str(item.text(column))
abbr = re.findall('^.*?(\((.*)\))?$', complete_name)[0][1]
check_state = item.checkState(column)
if check_state == Qt.Unchecked:
for gscene in self.gscenes.values():
for section_index, polygons in gscene.drawings.iteritems():
for polygon in polygons:
if polygon.label == abbr:
polygon.setVisible(False)
elif check_state == Qt.PartiallyChecked:
pass
elif check_state == Qt.Checked:
for gscene in self.gscenes.values():
for section_index, polygons in gscene.drawings.iteritems():
for polygon in polygons:
if polygon.label == abbr:
polygon.setVisible(True)
else:
raise Exception('Unknown check state.')
# selected_items = tree_widget.selectedItems()
# print [str(it.text(0)) for it in selected_items]
@pyqtSlot()
def select_display_structures(self):
loaded_structure_abbrs = set([convert_name_to_unsided(name_s) for name_s in self.gscenes['sagittal'].get_label_section_lookup().keys()])
structure_tree_dict = json.load(open('structure_tree.json'))
# structure_tree_dict = {name: d for name, d in structure_tree_dict_all.iteritems() if d['abbr'] in loaded_structure_names}
# structure_tree_names = {'brainstem': {}}
def structure_entry_to_str(node):
if 'abbr' in node and len(node['abbr']) > 0:
key = node['fullname'] + ' (' + node['abbr'] + ')'
else:
key = node['fullname']
return key
def get_children_names(name):
node = structure_tree_dict[name]
key = structure_entry_to_str(node)
return (key, dict([get_children_names(child_name) for child_name in node['children']]))
structure_name_tree = dict([get_children_names('brainstem')])
# extract_names(structure_tree_names['brainstem'], structure_tree_dict['brainstem'], structure_tree_dict)
# structure_tree_names = {'midbrain': ['IC', 'SC'], 'hindbrain': {'pons': ['7N', '5N'], 'medulla': ['7n', 'SCP']}}
display_structures_widget = QDialog(self)
tree_widget = QTreeWidget(display_structures_widget)
tree_widget.setHeaderLabels(['Structures'])
fill_tree_widget(tree_widget, structure_name_tree, loaded_structure_abbrs)
# tree_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
tree_widget.setMinimumHeight(1000)
tree_widget.setMinimumWidth(500)
# http://stackoverflow.com/questions/27521391/signal-a-qtreewidgetitem-toggled-checkbox
tree_widget.itemChanged.connect(self.structure_tree_changed)
dialog_layout = QVBoxLayout(display_structures_widget)
dialog_layout.addWidget(tree_widget)
display_structures_widget.setLayout(dialog_layout)
# display_structures_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
display_structures_widget.setWindowTitle("Select structures to show")
# display_structures_widget.exec_()
display_structures_widget.show()
# @pyqtSlot()
# def select_display_structures(self):
#
# display_structures_widget = QDialog(self)
#
# scroll = QScrollArea(display_structures_widget)
#
# viewport = QWidget(display_structures_widget)
# scroll.setWidget(viewport)
# scroll.setWidgetResizable(True)
#
# viewport_layout = QVBoxLayout(viewport)
#
# structure_names = set([convert_name_to_unsided(name_s) for name_s in self.gscenes['sagittal'].get_label_section_lookup().keys()])
#
# if not hasattr(self, 'show_structure'):
# self.show_structure = {}
#
# for name in sorted(structure_names):
# if name not in self.show_structure:
# self.show_structure[name] = True
#
# checkbox_showStructure = QCheckBox(name)
# checkbox_showStructure.setChecked(self.show_structure[name])
# checkbox_showStructure.stateChanged.connect(self.checkbox_showStructure_callback)
# viewport_layout.addWidget(checkbox_showStructure)
#
# viewport.setLayout(viewport_layout)
#
# dialog_layout = QVBoxLayout(display_structures_widget)
# dialog_layout.addWidget(scroll)
# display_structures_widget.setLayout(dialog_layout)
#
# display_structures_widget.setWindowTitle("Select structures to show")
# # display_structures_widget.exec_()
# display_structures_widget.show()
# def checkbox_showStructure_callback(self, checked):
# name = str(self.sender().text())
# self.show_structure[name] = bool(checked)
#
# for gscene in self.gscenes.values():
# for section_index, polygons in gscene.drawings.iteritems():
# for polygon in polygons:
# if polygon.label == name:
# polygon.setVisible(bool(checked))
@pyqtSlot()
def select_display_options(self):
if not hasattr(self, 'show_polygons'):
self.show_polygons = True
self.show_vertices = True
self.show_labels = True
self.hide_interpolated = False
display_option_widget = QDialog(self)
layout = QVBoxLayout()
checkbox_showPolygons = QCheckBox("Polygon")
checkbox_showPolygons.setChecked(self.show_polygons)
checkbox_showPolygons.stateChanged.connect(self.checkbox_showPolygons_callback)
layout.addWidget(checkbox_showPolygons)
checkbox_showVertices = QCheckBox("Vertices")
checkbox_showVertices.setChecked(self.show_vertices)
checkbox_showVertices.stateChanged.connect(self.checkbox_showVertices_callback)
layout.addWidget(checkbox_showVertices)
checkbox_showLabels = QCheckBox("Labels")
checkbox_showLabels.setChecked(self.show_labels)
checkbox_showLabels.stateChanged.connect(self.checkbox_showLabels_callback)
layout.addWidget(checkbox_showLabels)
checkbox_hideInterpolated = QCheckBox("Hide interpolated")
checkbox_hideInterpolated.setChecked(self.hide_interpolated)
checkbox_hideInterpolated.stateChanged.connect(self.checkbox_hideInterpolated_callback)
layout.addWidget(checkbox_hideInterpolated)
display_option_widget.setLayout(layout)
display_option_widget.setWindowTitle("Select display options")
display_option_widget.exec_()
@pyqtSlot(int)
def checkbox_showLabels_callback(self, checked):
self.show_labels = checked
for gscene in self.gscenes.itervalues():
for section_index, polygons in gscene.drawings.iteritems():
for polygon in polygons:
polygon.properties['label_textItem'].setVisible(checked)
@pyqtSlot(int)
def checkbox_showVertices_callback(self, checked):
self.show_vertices = checked
for gscene in self.gscenes.itervalues():
for section_index, polygons in gscene.drawings.iteritems():
for polygon in polygons:
for v in polygon.vertex_circles:
v.setVisible(checked)
@pyqtSlot(int)
def checkbox_showPolygons_callback(self, checked):
self.show_polygons = checked
for gscene in self.gscenes.itervalues():
for section_index, polygons in gscene.drawings.iteritems():
for polygon in polygons:
polygon.setVisible(checked)
@pyqtSlot(int)
def checkbox_hideInterpolated_callback(self, checked):
self.hide_interpolated = checked
for gscene in self.gscenes.itervalues():
for section_index, polygons in gscene.drawings.iteritems():
for polygon in polygons:
if polygon.type != 'confirmed':
polygon.setVisible(not bool(checked))
@pyqtSlot()
def infer_side(self):
self.gscenes['sagittal'].infer_side()
self.gscenes['sagittal_tb'].infer_side()
self.gscenes['coronal'].infer_side()
self.gscenes['horizontal'].infer_side()
#
# def merge_contour_entries(self, new_entries_df):
# """
# Merge new entries into loaded entries.
# new_entries: dict. {polygon_id: entry}FileName
# Return: new dict.
# """
#
# self.contour_df_loaded.update(new_entries_df)
@pyqtSlot()
def save_markers(self):
"""
Save markers.
"""
timestamp = datetime.now().strftime("%m%d%Y%H%M%S")
sagittal_markers_curr_session = self.gscenes['sagittal'].convert_drawings_to_entries(timestamp=timestamp, username=self.username, classes=['neuron'])
sagittal_markers_original = convert_annotation_v3_aligned_cropped_to_original(DataFrame(sagittal_markers_curr_session).T, stack=self.stack,
prep_id=self.prep_id)
sagittal_markers_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=True, suffix='neurons', timestamp=timestamp)
save_hdf_v2(sagittal_markers_original, sagittal_markers_fp)
upload_to_s3(sagittal_markers_fp)
print 'Sagittal markers saved to %s.' % sagittal_markers_fp
self.statusBar().showMessage('Sagittal markers saved to %s.' % sagittal_markers_fp)
@pyqtSlot()
def save_structures(self):
"""
Save 3D structure volumes.
"""
timestamp = datetime.now().strftime("%m%d%Y%H%M%S")
import uuid
entries = {}
for (name, side), v in self.structure_volumes.iteritems():
entry = {}
# entry['volume_in_bbox'] = v['volume_in_bbox']
entry['volume_in_bbox'] = bp.pack_ndarray_str(v['volume_in_bbox'])
entry['bbox'] = v['bbox']
entry['name'] = name
entry['side'] = side
if 'edits' not in v or v['edits'] is None or len(v['edits']) == 0:
entry['edits'] = []
else:
entry['edits'] = v['edits']
# entry['edits'] = [{'type': 'creation', 'username':self.username, 'timestamp':timestamp}]
# else:
if hasattr(v, 'structure_id') and v.properties['structure_id'] is not None:
structure_id = v.properties['structure_id']
else:
structure_id = str(uuid.uuid4().fields[-1])
entries[structure_id] = entry
structure_df = DataFrame(entries).T
structure_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=True, suffix='structures', timestamp=timestamp)
save_hdf_v2(structure_df, structure_df_fp)
upload_to_s3(structure_df_fp)
print '3D structures saved to %s.' % structure_df_fp
@pyqtSlot()
def save_contours(self):
"""
Save structure boundaries.
"""
timestamp = datetime.now().strftime("%m%d%Y%H%M%S")
# Save sagittal
sagittal_contour_entries_curr_session = self.gscenes['sagittal'].convert_drawings_to_entries(timestamp=timestamp, username=self.username)
sagittal_contours_df_original = convert_annotation_v3_aligned_cropped_to_original(DataFrame(sagittal_contour_entries_curr_session).T,
stack=self.stack, in_downsample=self.gscenes['sagittal'].data_feeder.downsample, prep_id=self.prep_id)
sagittal_contours_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=True, suffix='contours', timestamp=timestamp)
# sagittal_contours_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=False, stack_m=stack_m,
# classifier_setting_m=classifier_setting_m,
# classifier_setting_f=classifier_setting_f,
# warp_setting=warp_setting, suffix='contours')
save_hdf_v2(sagittal_contours_df_original, sagittal_contours_df_fp)
upload_to_s3(sagittal_contours_df_fp)
self.statusBar().showMessage('Sagittal boundaries saved to %s.' % sagittal_contours_df_fp)
print 'Sagittal boundaries saved to %s.' % sagittal_contours_df_fp
# Save coronal
# coronal_contour_entries_curr_session = self.gscenes['coronal'].convert_drawings_to_entries(timestamp=timestamp, username=self.username)
# # print coronal_contour_entries_curr_session
# if len(coronal_contour_entries_curr_session) > 0:
# # coronal_contours_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=False, stack_m=stack_m,
# # classifier_setting_m=classifier_setting_m,
# # classifier_setting_f=classifier_setting_f,
# # warp_setting=warp_setting, suffix='contours_coronal')
# coronal_contours_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=True, suffix='contours_coronal', timestamp=timestamp)
# save_hdf_v2(coronal_contour_entries_curr_session, coronal_contours_df_fp)
# upload_to_s3(coronal_contours_df_fp)
# self.statusBar().showMessage('Coronal boundaries saved to %s.' % coronal_contours_df_fp)
# print 'Coronal boundaries saved to %s.' % coronal_contours_df_fp
# Save horizontal
# horizontal_contour_entries_curr_session = self.gscenes['horizontal'].convert_drawings_to_entries(timestamp=timestamp, username=self.username)
# if len(horizontal_contour_entries_curr_session) > 0:
# # horizontal_contours_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=False, stack_m=stack_m,
# # classifier_setting_m=classifier_setting_m,
# # classifier_setting_f=classifier_setting_f,
# # warp_setting=warp_setting, suffix='contours_horizontal')
# horizontal_contours_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=True, suffix='contours_horizontal', timestamp=timestamp)
# save_hdf_v2(horizontal_contour_entries_curr_session, horizontal_contours_df_fp)
# upload_to_s3(horizontal_contours_df_fp)
# self.statusBar().showMessage('Horizontal boundaries saved to %s.' % horizontal_contours_df_fp)
# print 'Horizontal boundaries saved to %s.' % horizontal_contours_df_fp
@pyqtSlot()
def load_markers(self):
"""
"""
markers_df_fp = str(QFileDialog.getOpenFileName(self, "Choose marker annotation file", os.path.join(ANNOTATION_THALAMUS_ROOTDIR, self.stack)))
# download_from_s3(markers_df_fp)
markers_df = load_hdf_v2(markers_df_fp)
markers_df_cropped = convert_annotation_v3_original_to_aligned_cropped(markers_df, stack=self.stack, prep_id=self.prep_id)
markers_df_cropped_sagittal = markers_df_cropped[(markers_df_cropped['orientation'] == 'sagittal') & (markers_df_cropped['downsample'] == self.gscenes['sagittal'].data_feeder.downsample)]
# for i, marker_entry in markers_df_cropped_sagittal.iterrows():
# if 'label' not in marker_entry:
# print marker_entry
self.gscenes['sagittal'].load_drawings(markers_df_cropped_sagittal, append=False, vertex_color=MARKER_COLOR_CHAR)
@pyqtSlot()
def load_warped_atlas_volume(self):
"""
Load warped atlas volumes.
This populates the graphicsscenes with contours. Note that no volumes are reconstructed from them yet.
"""
warped_atlas_volumes = DataManager.load_transformed_volume_all_known_structures(stack_m='atlasV5', stack_f=self.stack, warp_setting=17, prep_id_f=2, detector_id_f=15,
return_label_mappings=False,
name_or_index_as_key='name',
# structures=['VLL_R'])
structures=['SNC_R'])
# structures=['PBG_R'])
# structures=['6N_R'])
# structures=['3N_L', '3N_R', '4N_L', '4N_R'])
# warped_atlas_volumes = {'PBG_L': warped_atlas_volumes['PBG_L']}
# warped_atlas_volumes = {k: warped_atlas_volumes[k] for k in ['PBG_R']}
# warped_atlas_volumes = {k: warped_atlas_volumes[k] for k in ['3N_L', '3N_R', '4N_L', '4N_R']}
# warped_atlas_volumes = {k: warped_atlas_volumes[k] for k in ['SNC_R']}
# warped_atlas_volumes = {k: warped_atlas_volumes[k] for k in ['6N_R']}
from registration_utilities import get_structure_contours_from_aligned_atlas
# warped_atlas_volumes = {'6N_R': warped_atlas_volumes}
warped_atlas_contours_by_section = get_structure_contours_from_aligned_atlas(warped_atlas_volumes, volume_origin=(0,0,0),
sections=metadata_cache['valid_sections'][self.stack],
downsample_factor=32, level=.5, sample_every=1, first_sec=metadata_cache['section_limits'][self.stack][0])
import uuid
contour_entries = {}
for sec, contours_by_sided_name in warped_atlas_contours_by_section.iteritems():
for sided_name, contour in contours_by_sided_name.iteritems():
unsided_name, side, _, _ = parse_label(sided_name)
# If already loaded as edited volume, skip.
if (unsided_name, side) in self.structure_volumes:
sys.stderr.write('Structure %s,%s already loaded as edited volume. Skipped.\n' % (unsided_name, side))
continue
if len(contour) < 3:
sys.stderr.write("On sec %d, %s has only %d vertices. Skipped.\n" % (sec, sided_name, len(contour)))
continue
polygon_id = str(uuid.uuid4().fields[-1])
contour_entry = {'name': unsided_name,
'label_position': np.mean(contour, axis=0),
'side': side,
'creator': 'hector',
'time_created': datetime.now().strftime("%m%d%Y%H%M%S"),
'edits': [],
'vertices': contour,
'downsample': 32,
'type': 'intersected',
'orientation': 'sagittal',
'parent_structure': [],
'side_manually_assigned': True,
'id': polygon_id,
'class': 'contour',
'section': sec}
contour_entries[polygon_id] = contour_entry
warped_atlas_contours_df = pd.DataFrame(contour_entries).T
self.gscenes['sagittal'].load_drawings(warped_atlas_contours_df, append=False)
@pyqtSlot()
def load_structures(self):
"""
Load a 3D structure annotation file.
"""
structures_df_fp = str(QFileDialog.getOpenFileName(self, "Choose the structure annotation file", os.path.join(ANNOTATION_THALAMUS_ROOTDIR, self.stack)))
# print structures_df_fp
structure_df = load_hdf_v2(structures_df_fp)
self.structure_df_loaded = structure_df
for structure_id, structure_entry in structure_df.iterrows():
if structure_entry['side'] is None:
t = (structure_entry['name'], 'S')
else:
t = (structure_entry['name'], structure_entry['side'])
if 'edits' in structure_entry:
edits = structure_entry['edits']
else:
edits = []
self.structure_volumes[t] = {'volume_in_bbox': bp.unpack_ndarray_str(structure_entry['volume_in_bbox']).astype(np.bool),
'bbox': structure_entry['bbox'],
'edits': edits,
'structure_id': structure_id}
sys.stderr.write("Updating gscene contours for structure %s...\n" % str(t))
# for gscene_id in self.gscenes:
# self.update_structure_volume(structure_entry['name'], structure_entry['side'], use_confirmed_only=False, recompute_from_contours=False)
t = time.time()
self.update_structure_volume(structure_entry['name'], structure_entry['side'], \
use_confirmed_only=False, recompute_from_contours=False, \
affected_gscenes=['sagittal', 'sagittal_tb', 'horizontal', 'coronal'])
sys.stderr.write("Update gscene contours: %.2f seconds.\n" % (time.time()-t))
@pyqtSlot()
def load_contours(self):
"""
Load contours.
"""
sagittal_contours_df_fp = str(QFileDialog.getOpenFileName(self, "Choose sagittal contour annotation file", os.path.join(ANNOTATION_THALAMUS_ROOTDIR, self.stack)))
sagittal_contours_df = load_hdf_v2(sagittal_contours_df_fp)
sagittal_contours_df_cropped = convert_annotation_v3_original_to_aligned_cropped(sagittal_contours_df, stack=self.stack,\
out_downsample=self.gscenes['sagittal'].data_feeder.downsample,
prep_id=self.prep_id)
sagittal_contours_df_cropped_sagittal = sagittal_contours_df_cropped[(sagittal_contours_df_cropped['orientation'] == 'sagittal') & (sagittal_contours_df_cropped['downsample'] == self.gscenes['sagittal'].data_feeder.downsample)]
self.gscenes['sagittal'].load_drawings(sagittal_contours_df_cropped_sagittal, append=False)
@pyqtSlot()
def active_image_updated(self):
self.setWindowTitle('BrainLabelingGUI, stack %(stack)s, fn %(fn)s, section %(sec)d, z=%(z).2f, x=%(x).2f, y=%(y).2f' % \
dict(stack=self.stack,
sec=self.gscenes['sagittal'].active_section
if self.gscenes['sagittal'].active_section is not None else -1,
fn=metadata_cache['sections_to_filenames'][self.stack][self.gscenes['sagittal'].active_section] \
if self.gscenes['sagittal'].active_section is not None else '',
z=self.gscenes['sagittal'].active_i,
x=self.gscenes['coronal'].active_i if self.gscenes['coronal'].active_i is not None else 0,
y=self.gscenes['horizontal'].active_i if self.gscenes['horizontal'].active_i is not None else 0))
@pyqtSlot(int, int, int, str)
def crossline_updated(self, cross_x_lossless, cross_y_lossless, cross_z_lossless, source_gscene_id):
print 'GUI: update all crosses to', cross_x_lossless, cross_y_lossless, cross_z_lossless, 'from', source_gscene_id
for gscene_id, gscene in self.gscenes.iteritems():
# if gscene_id == source_gscene_id: # Skip updating the crossline if the update is triggered from this gscene
# continue
if gscene.mode == 'crossline':
try:
gscene.update_cross(cross_x_lossless, cross_y_lossless, cross_z_lossless,
origin=self.image_origin_wrt_WholebrainAlignedPadded_tbResol[gscene_id]*32.)
except Exception as e:
sys.stderr.write(str(e) + '\n')
@pyqtSlot(object)
def drawings_updated(self, polygon):
print 'Drawings updated.'
# self.save()
# sagittal_label_section_lookup = self.gscenes['sagittal'].get_label_section_lookup()
# labels = sagittal_label_section_lookup.keys()
# self.gscenes['coronal'].get_label_section_lookup()
# self.gscenes['horizontal'].get_label_section_lookup()
@pyqtSlot(str, str, bool, bool)
def update_structure_volume(self, name_u, side, use_confirmed_only, recompute_from_contours, from_gscene_id=None, affected_gscenes=None):
"""
This function is triggered by `structure_volume_updated` signal from a gscene.
- Retrieve the volumes stored internally for each view.
The volumes in different views are potentially different.
- Compute the average volume across all views.
- Use this average volume to update the stored version in each view.
Args:
use_confirmed_only (bool): If True, when reconstructing the volume, only use confirmed contours.
recompute_from_contours (bool): Set to True, if want to re-compute the volume based on contours,
replacing the volume in `self.structure_volumes` if it already exists.
Set to False, if `self.structure_volumes` already stores the new volume and therefore
calling this function is just to update the contours.
from_gscene_id (str):
affected_gscenes (list of str):
"""
# Arguments passed in are Qt Strings. This guarantees they are python str.
name_u = str(name_u)
side = str(side)
# Reconstruct the volume for each gview.
# Only interpolate between confirmed contours.
# volumes_3view = {}
# bboxes_3view = {}
# for gscene_id, gscene in self.gscenes.iteritems():
if (name_u, side) not in self.structure_volumes or recompute_from_contours:
print 'Re-computing volume of %s from contours.' % str((name_u, side))
if from_gscene_id is None:
assert self.sender() is not None, Exception("Cannot infer the interpolation direction. Must provide from_gscene_id or call as a slot.")
from_gscene_id = self.sender().id
gscene = self.gscenes[from_gscene_id]
if use_confirmed_only:
matched_confirmed_polygons = [p for i, polygons in gscene.drawings.iteritems() for p in polygons \
if p.properties['label'] == name_u and \
p.properties['side'] == side and \
p.properties['type'] == 'confirmed']
else:
matched_confirmed_polygons = [p for i, polygons in gscene.drawings.iteritems() for p in polygons \
if p.properties['label'] == name_u and p.properties['side'] == side]
if len(matched_confirmed_polygons) < 2:
sys.stderr.write('%s: Cannot interpolate because there are fewer than two confirmed polygons for structure %s.\n' % (from_gscene_id, (name_u, side)))
return
factor_dataResol_to_volResol = float(gscene.data_feeder.downsample) / self.volume_downsample_factor
if from_gscene_id == 'sagittal' or from_gscene_id == 'sagittal_tb':
# keys are depth coordinates, wrt the origin of "whole brain aligned and padded volume", in internal structure resolution.
# values are 2D contour vertex coordinates, wrt the origin of "whole brain aligned and padded volume", in internal structure resolution.
contour_points_grouped_by_pos_wrt_WholebrainAlignedPadded_volResol = {p.properties['position_um'] / (XY_PIXEL_DISTANCE_LOSSLESS * self.volume_downsample_factor): \
[((c.scenePos().x() * gscene.data_feeder.downsample + \
self.image_origin_wrt_WholebrainAlignedPadded_tbResol[from_gscene_id][0] * 32.) / float(self.volume_downsample_factor),
(c.scenePos().y() * gscene.data_feeder.downsample + \
self.image_origin_wrt_WholebrainAlignedPadded_tbResol[from_gscene_id][1] * 32.) / float(self.volume_downsample_factor))
for c in p.vertex_circles]
for p in matched_confirmed_polygons}
for p in matched_confirmed_polygons:
print 'z =', p.properties['position_um'] / (XY_PIXEL_DISTANCE_LOSSLESS * self.volume_downsample_factor)
for c in p.vertex_circles:
print c.scenePos().x() * gscene.data_feeder.downsample, self.image_origin_wrt_WholebrainAlignedPadded_tbResol[from_gscene_id][0] * 32., self.volume_downsample_factor
print c.scenePos().y() * gscene.data_feeder.downsample, self.image_origin_wrt_WholebrainAlignedPadded_tbResol[from_gscene_id][1] * 32., self.volume_downsample_factor
volume_volResol, bbox_wrt_WholebrainAlignedPadded_volResol = interpolate_contours_to_volume(contour_points_grouped_by_pos_wrt_WholebrainAlignedPadded_volResol, 'z')
elif from_gscene_id == 'coronal':
contour_points_grouped_by_pos = {p.properties['position_um'] / (XY_PIXEL_DISTANCE_LOSSLESS * self.volume_downsample_factor): \
[(c.scenePos().y() * factor_dataResol_to_volResol,
(gscene.data_feeder.z_dim - 1 - c.scenePos().x()) * factor_dataResol_to_volResol)
for c in p.vertex_circles]
for p in matched_confirmed_polygons}
volume, bbox = interpolate_contours_to_volume(contour_points_grouped_by_pos, 'x')
# self.gscenes[gscene_id].structure_volumes[(name_u, side)] = volume, bbox
# self.structure_volumes[(name_u, side)] = interpolate_contours_to_volume(contour_points_grouped_by_pos, 'x')
elif from_gscene_id == 'horizontal':
contour_points_grouped_by_pos = {p.properties['position_um'] / (XY_PIXEL_DISTANCE_LOSSLESS * self.volume_downsample_factor): \
[(c.scenePos().x() * factor_dataResol_to_volResol,
(gscene.data_feeder.z_dim - 1 - c.scenePos().y()) * factor_dataResol_to_volResol)
for c in p.vertex_circles]
for p in matched_confirmed_polygons}
volume, bbox = interpolate_contours_to_volume(contour_points_grouped_by_pos, 'y')
# self.gscenes[gscene_id].structure_volumes[(name_u, side)] = volume, bbox
# self.gscenes[gscene_id].structure_volumes[(name_u, side)] = interpolate_contours_to_volume(contour_points_grouped_by_pos, 'y')
# self.structure_volumes[(name_u, side)] = interpolate_contours_to_volume(contour_points_grouped_by_pos, 'y')
self.structure_volumes[(name_u, side)]['volume_in_bbox'] = volume_volResol
self.structure_volumes[(name_u, side)]['bbox'] = bbox_wrt_WholebrainAlignedPadded_volResol
print 'Internal structures:', (name_u, side), self.structure_volumes[(name_u, side)]['bbox']
# volumes_3view[gscene_id] = volume
# bboxes_3view[gscene_id] = bbox
# self.structure_volumes[(name_u, side)] = \
# average_multiple_volumes(volumes_3view.values(), bboxes_3view.values())
# self.structure_volumes[(name_u, side)] = self.gscenes['sagittal'].structure_volumes[(name_u, side)]
if affected_gscenes is None:
affected_gscenes = self.gscenes.keys()
for gscene_id in affected_gscenes:
self.gscenes[gscene_id].update_drawings_from_structure_volume(name_u, side)
print '3D structure updated.'
self.statusBar().showMessage('3D structure updated.')
def eventFilter(self, obj, event):
# print obj.metaObject().className(), event.type()
if event.type() == QEvent.KeyPress:
key = event.key()
if key == Qt.Key_1:
self.gscenes['sagittal'].show_previous()
elif key == Qt.Key_2:
self.gscenes['sagittal'].show_next()
elif key == Qt.Key_3:
self.gscenes['coronal'].show_previous()
elif key == Qt.Key_4:
self.gscenes['coronal'].show_next()
elif key == Qt.Key_5:
self.gscenes['horizontal'].show_previous()
elif key == Qt.Key_6:
self.gscenes['horizontal'].show_next()
elif key == Qt.Key_7:
self.gscenes['sagittal_tb'].show_previous()
elif key == Qt.Key_8:
self.gscenes['sagittal_tb'].show_next()
elif key == Qt.Key_Space:
if not event.isAutoRepeat():
for gscene in self.gscenes.itervalues():
gscene.set_mode('crossline')
elif key == Qt.Key_F:
##################### Save structure ######################
# username = self.get_username()
timestamp = datetime.now().strftime("%m%d%Y%H%M%S")
# {(name, side): (vol, bbox, edits, id)}
new_structure_df = self.structure_df_loaded.copy()
for (name, side), structure_entry in self.structure_volumes.iteritems():
struct_id = structure_entry['structure_id']
new_structure_df.loc[struct_id]['volume_in_bbox'] = structure_entry['volume_in_bbox']
new_structure_df.loc[struct_id]['bbox'] = structure_entry['bbox']
new_structure_df.loc[struct_id]['edits'] = structure_entry['edits']
new_structure_df_fp = DataManager.get_annotation_thalamus_filepath(stack=self.stack, by_human=False, stack_m='atlasV3',
classifier_setting_m=37,
classifier_setting_f=37,
warp_setting=8, suffix='structures', timestamp=timestamp)
save_hdf_v2(new_structure_df, new_structure_df_fp)
self.statusBar().showMessage('3D structure labelings are saved to %s.\n' % new_structure_df_fp)
# ##################### Save contours #####################
# self.save()
elif key == Qt.Key_A:
print "Reconstructing selected structure volumes..."
curr_structure_label = self.gscenes['sagittal'].active_polygon.properties['label']
curr_structure_side = self.gscenes['sagittal'].active_polygon.properties['side']
self.update_structure_volume(name_u=curr_structure_label, side=curr_structure_side,
use_confirmed_only=False, recompute_from_contours=False, from_gscene_id='sagittal')
elif key == Qt.Key_P:
print "Reconstructing all structure volumes..."
structures_curr_section = [(p.properties['label'], p.properties['side'])
for p in self.gscenes['sagittal'].drawings[self.gscenes['sagittal'].active_i]]
for curr_structure_label, curr_structure_side in structures_curr_section:
self.update_structure_volume(name_u=curr_structure_label, side=curr_structure_side, use_confirmed_only=False, recompute_from_contours=False)
elif key == Qt.Key_U:
# For all structures on the current section
# structures_curr_section = [(p.properties['label'], p.properties['side'])
# for p in self.gscenes['sagittal'].drawings[self.gscenes['sagittal'].active_i]]
# for curr_structure_label, curr_structure_side in structures_curr_section:
curr_structure_label = self.gscenes['sagittal'].active_polygon.properties['label']
curr_structure_side = self.gscenes['sagittal'].active_polygon.properties['side']
name_side_tuple = (curr_structure_label, curr_structure_side)
assert name_side_tuple in self.structure_volumes, \
"structure_volumes does not have %s. Need to reconstruct this structure first." % str(name_side_tuple)
if name_side_tuple in self.gscenes['sagittal'].uncertainty_lines:
print "Remove uncertainty line"
for gscene in self.gscenes.itervalues():
gscene.hide_uncertainty_line(name_side_tuple)
else:
print "Add uncertainty line"
if curr_structure_side == 'S':
name = curr_structure_label
else:
name = curr_structure_label + '_' + curr_structure_side
current_structure_hessians = DataManager.load_confidence(stack_m='atlasV3', stack_f=self.stack, classifier_setting_m=37, classifier_setting_f=37, warp_setting=8,
param_suffix=name, what='hessians')
H, fmax = current_structure_hessians[84.64]
U, S, UT = np.linalg.svd(H)
flattest_dir = U[:,-1]
current_structure_peakwidth = DataManager.load_confidence(stack_m='atlasV3', stack_f=self.stack, classifier_setting_m=37, classifier_setting_f=37, warp_setting=8,
param_suffix=name, what='peak_radius')
pw_max_um, _, _ = current_structure_peakwidth[118.75][84.64]
len_lossless_res = pw_max_um / XY_PIXEL_DISTANCE_LOSSLESS
vol = self.structure_volumes[name_side_tuple]['volume_in_bbox']
bbox = self.structure_volumes[name_side_tuple]['bbox']
c_vol_res_gl = np.mean(np.where(vol), axis=1)[[1,0,2]] + (bbox[0], bbox[2], bbox[4])
e1 = c_vol_res_gl * self.volume_downsample_factor - len_lossless_res * flattest_dir
e2 = c_vol_res_gl * self.volume_downsample_factor + len_lossless_res * flattest_dir
for gscene in self.gscenes.itervalues():
e1_gscene = point3d_to_point2d(e1, gscene)
e2_gscene = point3d_to_point2d(e2, gscene)
print gscene.id, e1_gscene, e2_gscene
gscene.set_uncertainty_line(name_side_tuple, e1_gscene, e2_gscene)
if name_side_tuple in self.gscenes['sagittal'].structure_onscreen_messages:
self.gscenes['sagittal'].hide_structure_onscreen_message(name_side_tuple)
else:
current_structure_zscores = DataManager.load_confidence(stack_m='atlasV3', stack_f=self.stack, classifier_setting_m=37, classifier_setting_f=37, warp_setting=8,
param_suffix=name, what='zscores')
zscore, fmax, mean, std = current_structure_zscores[118.75]
print str((zscore, fmax, mean, std)), np.array(e1_gscene + e2_gscene)/2
self.gscenes['sagittal'].set_structure_onscreen_message(name_side_tuple, "zscore = %.2f" % zscore, (e1_gscene + e2_gscene)/2)
elif event.type() == QEvent.KeyRelease:
key = event.key()
if key == Qt.Key_Space:
if not event.isAutoRepeat():
for gscene in self.gscenes.itervalues():
gscene.set_mode('idle')
return False
def point3d_to_point2d(pt3d, gscene):
"""
Convert a 3D point to 2D point on a gscene.
Args:
pt3d ((3,)-ndarray): a point coordinate in lossless-resolution coordinate.
gscene (QGraphicScene)
"""
pt3d_gscene_res = pt3d / gscene.data_feeder.downsample
if gscene.id == 'sagittal' or gscene.id == 'sagittal_tb':
pt2d = (pt3d_gscene_res[0], pt3d_gscene_res[1])
elif gscene.id == 'coronal':
pt2d = (gscene.data_feeder.z_dim - 1 - pt3d_gscene_res[2], pt3d_gscene_res[1])
elif gscene.id == 'horizontal':
pt2d = (pt3d_gscene_res[0], gscene.data_feeder.z_dim - 1 - pt3d_gscene_res[2])
return np.array(pt2d)
def load_structure_names(fn):
"""
Load structure names from a file.
Args:
fn (str): a file containing rows of structure names.
Returns:
(list of str)
"""
names = {}
with open(fn, 'r') as f:
for ln in f.readlines():
abbr, fullname = ln.split('\t')
names[abbr] = fullname.strip()
return names
if __name__ == "__main__":
import argparse
import sys
import time
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Launch brain labeling GUI.')
parser.add_argument("stack_name", type=str, help="stack name")
parser.add_argument("-f", "--first_sec", type=int, help="first section")
parser.add_argument("-l", "--last_sec", type=int, help="last section")
parser.add_argument("-v", "--img_version", type=str, help="image version", default='jpeg')
parser.add_argument("-d", "--downsample", type=float, help="downsample", default=1)
parser.add_argument("-p", "--prep", type=int, help="preprocessing id", default=3)
args = parser.parse_args()
from sys import argv, exit
appl = QApplication(argv)
stack = args.stack_name
downsample = args.downsample
img_version = args.img_version
prep_id = args.prep
if prep_id == 3:
default_first_sec, default_last_sec = DataManager.load_cropbox_thalamus(stack)[4:]
elif prep_id == 2:
default_first_sec, default_last_sec = DataManager.load_cropbox(stack)[4:]
else:
raise
first_sec = default_first_sec if args.first_sec is None else args.first_sec
last_sec = default_last_sec if args.last_sec is None else args.last_sec
m = BrainLabelingGUI(stack=stack, first_sec=first_sec, last_sec=last_sec, downsample=downsample, img_version=img_version, prep_id=prep_id)
m.showMaximized()
m.raise_()
exit(appl.exec_())
| [
"cyc3700@gmail.com"
] | cyc3700@gmail.com |
b0378dcfcc5c4214352f38c63a94baff4b9218a4 | f88c89842dc78ac228d4ca800f655ea88032ea07 | /week12/db.py | c97c265a56d95625e6935256fad749a5119fc645 | [] | no_license | hpmalinova/Hack-Bulgaria | a210ae717c83503053eef4762aedef57891fb4b5 | 8e1ee0247f2411f4729435a117fde417d474fffc | refs/heads/master | 2021-09-28T06:08:29.820826 | 2020-07-31T12:10:05 | 2020-07-31T12:10:05 | 248,812,505 | 0 | 0 | null | 2021-09-22T19:06:33 | 2020-03-20T17:19:10 | Python | UTF-8 | Python | false | false | 940 | py | from sqlalchemy import (Column, Integer, String, CheckConstraint)
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
DB_NAME = 'urls.db'
engine = create_engine(f"sqlite:///{DB_NAME}")
Base = declarative_base()
Session = sessionmaker(bind=engine, expire_on_commit=False)
@contextmanager
def session_scope():
session = Session()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def create_tables():
Base.metadata.create_all(engine)
class Url(Base):
__tablename__ = "urls"
url_id = Column(Integer, primary_key=True)
url = Column(String(300), nullable=False, unique=True)
add_all_children = Column(String(5), CheckConstraint('add_all_children = "True" or add_all_children = "False"'))
| [
"hpmalinova@github.com"
] | hpmalinova@github.com |
0d559cf6aa2322ca323012ec31353caa7d42bdbe | 24e0d8e58b5b8ec000e8e9069d57508fffae9687 | /mihirfybsc1234.py | fc018795ec68af435289de073aa33b228371dfc7 | [] | no_license | black-organisation/python | 7eb368b0a4b4ede09aac27392d65f75236aebd8e | 0979a18fab5209b7a47efd49a1a850c6ee380f62 | refs/heads/master | 2020-07-11T21:28:31.104714 | 2019-08-27T07:59:24 | 2019-08-27T07:59:24 | 204,647,368 | 0 | 2 | null | 2019-08-27T14:18:25 | 2019-08-27T07:37:54 | Python | UTF-8 | Python | false | false | 19 | py | x=5
print(type(x))
| [
"noreply@github.com"
] | noreply@github.com |
5756879bd7614530d66100e9e2ff74d43a089301 | 850d12238e47b1acebaf826eae70ac718409db9e | /web/articles/tests/factories.py | a58eacbdb6f012926236a735120046bc7fe25385 | [] | no_license | ictkovalenko/django-market-search-pytest | 3dd2cb50902d645054e99cffad4f57cdbec6ffad | ed005a506db644f5d6b4c777ed3d0c01c9aedba7 | refs/heads/master | 2020-04-13T12:25:07.682770 | 2018-12-26T17:12:30 | 2018-12-26T17:12:30 | 163,201,294 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import factory.faker
from django.utils.text import slugify
from web.articles.models import Article, Category
class CategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = Category
title = factory.Faker('word')
slug = factory.LazyAttribute(lambda x: slugify(x.title))
class ArticleFactory(factory.django.DjangoModelFactory):
class Meta:
model = Article
title = factory.Faker('name')
slug = factory.LazyAttribute(lambda x: slugify(x.title))
text = factory.Faker('word')
| [
"ict.kovalenko@gmail.com"
] | ict.kovalenko@gmail.com |
f7ca419508798f1929999e5cb30894c192fb6861 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/70/usersdata/164/36211/submittedfiles/impedimento.py | d16709d5193363df8e0a3b6b23963d9cbe92b2b9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
L=int(input('Digite a posição de L: '))
R=int(input('Digite a posição de R: '))
D=int(input('Digite a posição de D: '))
if (R>50) and (L<R) and (R>D):
print('S')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2f0c21da46fc0a27a43c211905c51a9b98e78cad | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/resources/types/campaign_criterion_simulation.py | b270fa50fac0d5f5919e4dcac9d75a76b8179a43 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,103 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.common.types import simulation
from google.ads.googleads.v4.enums.types import simulation_modification_method
from google.ads.googleads.v4.enums.types import simulation_type
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.resources',
marshal='google.ads.googleads.v4',
manifest={
'CampaignCriterionSimulation',
},
)
class CampaignCriterionSimulation(proto.Message):
r"""A campaign criterion simulation. Supported combinations of
advertising channel type, criterion ids, simulation type and
simulation modification method is detailed below respectively.
1. SEARCH - 30000,30001,30002 - BID_MODIFIER - UNIFORM
2. SHOPPING - 30000,30001,30002 - BID_MODIFIER - UNIFORM
3. DISPLAY - 30001 - BID_MODIFIER - UNIFORM
Attributes:
resource_name (str):
Output only. The resource name of the campaign criterion
simulation. Campaign criterion simulation resource names
have the form:
``customers/{customer_id}/campaignCriterionSimulations/{campaign_id}~{criterion_id}~{type}~{modification_method}~{start_date}~{end_date}``
campaign_id (google.protobuf.wrappers_pb2.Int64Value):
Output only. Campaign ID of the simulation.
criterion_id (google.protobuf.wrappers_pb2.Int64Value):
Output only. Criterion ID of the simulation.
type_ (google.ads.googleads.v4.enums.types.SimulationTypeEnum.SimulationType):
Output only. The field that the simulation
modifies.
modification_method (google.ads.googleads.v4.enums.types.SimulationModificationMethodEnum.SimulationModificationMethod):
Output only. How the simulation modifies the
field.
start_date (google.protobuf.wrappers_pb2.StringValue):
Output only. First day on which the
simulation is based, in YYYY-MM-DD format.
end_date (google.protobuf.wrappers_pb2.StringValue):
Output only. Last day on which the simulation
is based, in YYYY-MM-DD format.
bid_modifier_point_list (google.ads.googleads.v4.common.types.BidModifierSimulationPointList):
Output only. Simulation points if the simulation type is
BID_MODIFIER.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
campaign_id = proto.Field(
proto.MESSAGE,
number=2,
message=wrappers_pb2.Int64Value,
)
criterion_id = proto.Field(
proto.MESSAGE,
number=3,
message=wrappers_pb2.Int64Value,
)
type_ = proto.Field(
proto.ENUM,
number=4,
enum=simulation_type.SimulationTypeEnum.SimulationType,
)
modification_method = proto.Field(
proto.ENUM,
number=5,
enum=simulation_modification_method.SimulationModificationMethodEnum.SimulationModificationMethod,
)
start_date = proto.Field(
proto.MESSAGE,
number=6,
message=wrappers_pb2.StringValue,
)
end_date = proto.Field(
proto.MESSAGE,
number=7,
message=wrappers_pb2.StringValue,
)
bid_modifier_point_list = proto.Field(
proto.MESSAGE,
number=8,
oneof='point_list',
message=simulation.BidModifierSimulationPointList,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
e424b7999c84e1af9ea13099de164c0a69ff51db | 062e82e410b2e7fa7c7beca2e3f1961c56976990 | /widrow-Hoff.py | 35ca66bb0d952d452f40ce277a6b59ced17874e7 | [] | no_license | SilentFlame/Machine-Learning | 1483b780fb7d5c0c1bc6d3a2ddabafc242b8e680 | 6296f66935a5b162a1c1d9a3581b97edb790fb81 | refs/heads/master | 2021-01-12T13:31:59.195427 | 2017-10-31T08:50:24 | 2017-10-31T08:50:24 | 69,379,120 | 12 | 19 | null | 2017-10-31T15:28:40 | 2016-09-27T16:58:30 | Python | UTF-8 | Python | false | false | 3,052 | py | import matplotlib.pyplot as plt
from math import sqrt
d1=[(1,2,7), (1,8,1), (1,7,5), (1,6,3), (1,7,8), (1,5,9), (1,4,5)]
d2=[(-1,-4,-2), (-1,1,1), (-1,-1,-3), (-1,-3,2), (-1,-5,-3.25), (-1,-2,-4), (-1,-7,-1)]
dataset = [(1,2,7), (1,8,1), (1,7,5), (1,6,3), (1,7,8), (1,5,9), (1,4,5), (-1,-4,-2), (-1,1,1), (-1,-1,-3), (-1,-3,2), (-1,-5,-3.25), (-1,-2,-4), (-1,-7,-1)]
def mid_eval(a,b):
rslt = 0
for i in xrange(3):
rslt += a[i]*b[i]
return rslt
# def mod(x):
# if x < 0:
# return x*(-1)
# else:
# return x
# nk = 0
# def widrow_hoff():
# weight = [1,1,1]
# eta = 0.9
# global nk
# theta = 0.5
# iterations = 0
# while(1):
# iterations += 1
# classi = 0
# print weight
# for i in xrange(len(dataset)):
# semi = mid_eval(weight, dataset[i])
# correct = 0
# for j in xrange(3):
# if (mod(eta*(semi-1)*dataset[i][j])) < theta:
# correct+=1
# if correct == 3:
# classi += 1
# if classi != len(dataset): # if not all the points are correctly classified.
# for i in xrange(len(dataset)):
# nk += 1
# eta = eta/nk #using anneling for learning rate. n(k)=n(1)/k
# semi = mid_eval(weight, dataset[i])
# correct = 0
# for j in xrange(3):
# if mod((eta*(semi-1)*dataset[i][j])) < theta:
# correct+=1
# if correct == 3: #whatif the classification becomes true only due to change in the eta value.
# break
# for j in xrange(3):
# weight[j] = weight[j] + float(eta*(1-semi)*dataset[i][j])
# if classi == len(dataset):
# print "weights: ",
# print weight
# print "no. of Iterations: ",
# print iterations
# break
# if correct == 3:
# print "weights: ",
# print weight
# print "no. of Iterations: ",
# print iterations
# break
# print "value of K: ",
# print nk
# return weight
def comp_funcn(a):
result = 0
for i in xrange(3):
result += pow(a[i], 2)
return sqrt(result)
def widrow_hoff():
iterations = 0
eta = 0.2
theta = 0.9
weight = [1,1,1]
nk = 0
while(1):
iterations += 1
nk += 1
eta = eta/nk
for i in xrange(len(dataset)):
ans = [0,0,0]
semi = mid_eval(weight, dataset[i])
for j in xrange(3):
ans[j] = float(eta*(1-semi)*dataset[i][j])
for k in xrange(3):
weight[k] += ans[k]
comp = comp_funcn(ans)
print "Updated weights: ",
print ans
print "comaparision value: ",
print comp
if ( comp >= theta):
break
if ( comp >= theta):
break
print "No. of Iterations: ",
print iterations
print "weights: ",
print weight
return weight
def main():
a=widrow_hoff()
print a
x1 = []
x2 = []
y1 = []
y2 = []
for j in xrange(len(d1)):
x1.append(d1[j][1])
y1.append(d1[j][2])
for j in xrange(len(d2)):
x2.append((-1)*d2[j][1])
y2.append((-1)*d2[j][2])
plt.plot(x1,y1,'ro')
plt.plot(x2,y2,'bo')
m1 = a[2]/a[1]
m2 = (-1)/(m1)
c = (-1)*a[0]/a[2]
ya = m2*100+c
yb = m2*(-100)+c
plt.plot([100,-100],[ya,yb],'r')
plt.axis([-10,10,-10,10])
plt.show()
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
024f84553a9236e5be76e600a7b4b5b301bf7e7d | 131c0b883c53430fa57042f59469c7a032704dd8 | /venv/Scripts/jp.py | a22dfb2455b6fb43d79b84c9e97c15c8ea5961df | [] | no_license | gilchristc/flask_weather_app | 7f4379e35f4967e6dadd98ad7c30b245e67ffb7a | e009b5469f7a44b8cabb2f39d6967b806694ed9c | refs/heads/main | 2023-02-09T23:05:51.145819 | 2020-12-21T19:46:36 | 2020-12-21T19:46:36 | 322,373,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | #!c:\users\chesmere\documents\projects\flask_weather_app\venv\scripts\python.exe
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4, ensure_ascii=False))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
| [
"57827322+gilchristc@users.noreply.github.com"
] | 57827322+gilchristc@users.noreply.github.com |
729eee46f82bfd3f50b8d900822724398481b416 | 4fe3be1c30b2af5077cda4721d697c5c366bf3c9 | /tuples_are_immutable.py | 8f5253fb5d400abbb711dfe76043e90af9502a15 | [] | no_license | boiidae/py4me | 4ae3cd9dfd6e38fe34673dabfa43af2e6cff8a82 | 7012d954cb1fa713b49f6d07cd9a1133443eb410 | refs/heads/master | 2023-03-18T20:29:47.158557 | 2021-03-04T16:19:03 | 2021-03-04T16:19:03 | 344,109,190 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # Unlike a list, once you create a tuple, you cannot alter its contents - similar to a string
x = [9, 8, 7]
x[2] = 6
print(x)
| [
"jamesbenjaminhanson@gmail.com"
] | jamesbenjaminhanson@gmail.com |
c9edfa92fd006a044a73cfaee57dbbd855d2ee83 | a35cc0e669a6a19307640b04ce6354e758dc83b6 | /src/test/resources/integration_tests/python/basic_integration_test.py | 4a4f8578475d63cf8f37cddf9ee9482793693d24 | [
"Apache-2.0"
] | permissive | weisong44/vertx-device-mediation | 10929f42cf9f276ba9f92047e04c0be856ac4ff9 | 23a7605f875309a40c34b86fe09445569e80725b | refs/heads/master | 2021-01-20T09:01:21.747160 | 2014-06-01T06:29:02 | 2014-06-01T06:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | # Simple integration test which shows tests deploying other verticles, using the Vert.x API etc
from org.vertx.testtools import VertxAssert
import vertx_tests
from core.event_bus import EventBus
import vertx
# The test methods must begin with "test"
def test_http():
# Create an HTTP server which just sends back OK response immediately
def req_handler(req):
req.response.end()
def resp_handler(resp):
VertxAssert.assertTrue(200 == resp.status_code)
# If we get here, the test is complete
# You must always call `testComplete()` at the end. Remember that testing is *asynchronous* so
# we cannot assume the test is complete by the time the test method has finished executing like
# in standard synchronous tests
VertxAssert.testComplete()
def listen_handler(err, server):
VertxAssert.assertNull(err)
# The server is listening so send an HTTP request
vertx.create_http_client().set_port(8181).get_now("/", resp_handler)
vertx.create_http_server().request_handler(req_handler).listen(8181, "0.0.0.0", listen_handler)
# This test deploys some arbitrary verticle - note that the call to testComplete() is inside the Verticle `SomeVerticle`
def test_deploy_arbitrary_verticle():
vertx.deploy_verticle('com.weisong.test.integration.java.SomeVerticle')
# This demonstrates how tests are asynchronous - the timer does not fire until 1 second later -
# which is almost certainly after the test method has completed.
def test_complete_on_timer():
def handler(timer_id):
VertxAssert.assertNotNull(timer_id)
VertxAssert.testComplete()
vertx.set_timer(1000, handler)
vertx_tests.start_tests(locals())
| [
"weisong44@gmail.com"
] | weisong44@gmail.com |
cd75e6a6f0237942d457b0bc7d92531f8d7e6677 | 6f57bd46d35bf3c1e834afb1adb695b6b2db2f1a | /venv/bin/pip3 | a5689898a7192795d87cb7f2feec0a66a261fad8 | [] | no_license | shwethabm/Disaster-management-app | c8da45ae59a72940fb31888c5789a6f6cbbf93aa | f317bd51b566ba01568c12d4db914c8944cea895 | refs/heads/master | 2020-08-05T18:16:19.362955 | 2019-10-23T14:34:35 | 2019-10-23T14:34:35 | 212,649,496 | 0 | 0 | null | 2019-10-03T18:31:31 | 2019-10-03T18:24:17 | null | UTF-8 | Python | false | false | 399 | #!/home/acer/PycharmProject/disapp/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"shwethabmenon23@gmail.com"
] | shwethabmenon23@gmail.com | |
14702de94ce9640e0715dfc9cb602737d182fc45 | 2d0585b6f2a5d832d71f0a19d2839671491da1d2 | /uniques.py | 98c42563c70ba5f354505d4c1f938072946e01cc | [] | no_license | oguzhanozdmr/uniquesTxtFile | 2237918e3f4323d303133fca5eb0f5a0b31a7796 | ca217de3b7e336982b19cf1c892f1a44bb241958 | refs/heads/master | 2023-03-20T06:08:52.444896 | 2021-03-13T07:44:00 | 2021-03-13T07:44:00 | 347,272,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,870 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: ademoguzhanozdemir
"""
import sys
import os
import re
__params = {"-infile": None,
"-outfile": None,
"-sort": False,
"-deleteblank": False,
"-casesensitive": True}
__required_params = {"-infile": True,
"-outfile": False,
"-sort": False,
"-deleteblank": False,
"-casesensitive": False}
__params_description = {"-infile": "infile path",
"-outfile": "outfile path, default name_out.txt",
"-sort": "Default False",
"-deleteblank": "Default False",
"-casesensitive": "Default True"}
__params_have = {"-infile": True,
"-outfile": True,
"-sort": False,
"-deleteblank": False,
"-casesensitive": True}
__params_type = {"-infile": "(.*?)[.]txt$",
"-outfile": "(.*?)[.]txt$",
"-casesensitive": "(^False$|^True$)"}
def __read_txt(filepath: str = None) -> list:
"""
read txt
"""
if filepath is None:
filepath = __params["-infile"]
try:
assert os.path.isfile(filepath), f"Not found {filepath}"
file = open(filepath, "r", encoding="utf-8")
txt_list = file.readlines()
file.close()
except IOError:
assert False, "Error: File does not appear to exist."
txt_list = None
finally:
assert filepath is not None, "Error"
assert filepath.strip(), "Error"
return txt_list
def __build_output_file_name(infile: str = None) -> str:
"""
import os
data1.txt
data1.txt.out.txt
0: data1
1: .txt
data1.out.txt
"""
if infile is None:
infile = __params["-infile"]
parts = os.path.splitext(infile)
# ('file1', '.txt')
only_file_name = parts[0]
only_file_ext = parts[1]
new_file_name = only_file_name + "_out" + only_file_ext
# file1 .out .txt
return new_file_name
def __write_txt(txt_list: list, out_path: str = None) -> None:
"""
listeki değerleri txt dosyasına yazıyor
Parameters
----------
txt_list : list
DESCRIPTION.
out_path : str, optional
DESCRIPTION. The default is None.
Returns
-------
None
DESCRIPTION.
"""
if out_path is None:
out_path = __params["-outfile"]
if __params["-outfile"] is None:
out_path = __build_output_file_name()
assert isinstance(txt_list, list), "liste boş olamaz"
txt = "".join(txt_list)
file = open(out_path, "w", encoding="utf-8")
file.write(txt)
file.close()
def __process():
"""
parameterlere göre işlemleri gerçekleştiriyor
Returns
-------
None.
"""
read_txt_line = __read_txt()
if read_txt_line[-1]:
read_txt_line[-1] = read_txt_line[-1] + "\n"
new_txt_line = []
unique_txt = []
for row in read_txt_line:
if __params["-deleteblank"]:
if not row.strip():
row = row.strip()
if __params["-casesensitive"]:
if row not in new_txt_line:
new_txt_line.append(row)
else:
if row.lower() not in unique_txt:
new_txt_line.append(row)
unique_txt.append(row.lower())
if __params["-sort"]:
new_txt_line = __sort(new_txt_line)
__write_txt(new_txt_line)
print("successful")
def __partition(c_list: list, low_index: int, high_index: int):
pivot = c_list[high_index]
s_j = low_index - 1
for s_i in range(low_index, high_index):
if c_list[s_i] < pivot:
s_j += 1
c_list[s_i], c_list[s_j] = c_list[s_j], c_list[s_i]
c_list[s_j + 1], c_list[high_index] = pivot, c_list[s_j + 1]
return s_j + 1, c_list
def __quicksort(c_list: list, low_index: int, high_index: int):
if low_index < high_index:
pivot, c_list = __partition(c_list, low_index, high_index)
__quicksort(c_list, low_index, pivot - 1)
__quicksort(c_list, pivot + 1, high_index)
return c_list
def __sort(txt_lst: list) -> list:
"""
sıralama yapıyor
Parameters
----------
txt_lst : list
DESCRIPTION.
Returns
-------
list
sıralanmış liste.
"""
if len(txt_lst) < 100:
# TODO: 10 listeyi parcalayip gonder
txt_lst = __quicksort(txt_lst, 0, len(txt_lst)-1)
else:
# pyhton 3.8.1
txt_lst = sorted(txt_lst)
return txt_lst
def __error_params(parameter_name: str) -> None:
"""
Exit system and message
Parameters
----------
parameter_name : str
DESCRIPTION.
Returns
-------
None
DESCRIPTION.
"""
print(f"{parameter_name} error value")
sys.exit(0)
def __type_change() -> None:
"""
gelen degerlerin tiplerini degistiriyor
"""
__params["-casesensitive"] = __bool_type(__params["-casesensitive"])
def __bool_type(input_value: str) -> bool:
"""
True False degerini bool olarak donduruyor
Parameters
----------
input_value : str
Returns
-------
bool
donen bool deger.
"""
if isinstance(input_value, bool):
return input_value
result = False
if input_value.strip().lower() == "true":
result = True
return result
def main():
"""
main define
Returns
-------
None.
"""
if len(sys.argv) == 1:
print("\n uniques params \n")
for txt in __params_description:
print(f"{txt} ->{__params_description[txt]}")
sys.exit()
for parameter in sys.argv:
if parameter[0] != "-":
continue
if parameter in __params:
if __params_have[parameter]:
index_parameter = sys.argv.index(parameter) + 1
if index_parameter < len(sys.argv):
parameter_value = sys.argv[index_parameter]
if re.search(__params_type[parameter], parameter_value):
__params[parameter] = parameter_value
else:
__error_params(parameter)
else:
__error_params(parameter)
else:
__params[parameter] = not __params[parameter]
else:
print(f"Error: {parameter} unknow")
sys.exit()
for parameter in __required_params:
if __required_params[parameter]:
if __params[parameter] is None:
print(f"{parameter} is required")
sys.exit(0)
__type_change()
__process()
if __name__ == "__main__":
main()
| [
"ademoguzhanozdmr@gmail.com"
] | ademoguzhanozdmr@gmail.com |
1db7145016e10f8994357be4b75ac8b745805ec2 | bdaf214a6fc12d426f4edea860d12ca8222d3872 | /grafica/basic_shapes.py | 62abb1bfa797e4d875a505da45ef2ed8c6396fa9 | [] | no_license | Frocoa/PoolParty | 79c5dfca2836996e17652acd0cc53a06750f75cf | 34cc4d197c4c48f355ed18668b0d58d402f60a2f | refs/heads/main | 2023-06-24T01:29:35.191776 | 2021-07-27T00:43:32 | 2021-07-27T00:43:32 | 387,316,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,871 | py |
# coding=utf-8
"""Vertices and indices for a variety of simple shapes"""
import math
__author__ = "Daniel Calderon"
__license__ = "MIT"
# A simple class container to store vertices and indices that define a shape
class Shape:
def __init__(self, vertices, indices, textureFileName=None):
self.vertices = vertices
self.indices = indices
self.textureFileName = textureFileName
def merge(destinationShape, strideSize, sourceShape):
# current vertices are an offset for indices refering to vertices of the new shape
offset = len(destinationShape.vertices)
destinationShape.vertices += sourceShape.vertices
destinationShape.indices += [(offset/strideSize) + index for index in sourceShape.indices]
def applyOffset(shape, stride, offset):
numberOfVertices = len(shape.vertices)//stride
for i in range(numberOfVertices):
index = i * stride
shape.vertices[index] += offset[0]
shape.vertices[index + 1] += offset[1]
shape.vertices[index + 2] += offset[2]
def scaleVertices(shape, stride, scaleFactor):
numberOfVertices = len(shape.vertices) // stride
for i in range(numberOfVertices):
index = i * stride
shape.vertices[index] *= scaleFactor[0]
shape.vertices[index + 1] *= scaleFactor[1]
shape.vertices[index + 2] *= scaleFactor[2]
def createAxis(length=1.0):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-length, 0.0, 0.0, 0.0, 0.0, 0.0,
length, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -length, 0.0, 0.0, 0.0, 0.0,
0.0, length, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, -length, 0.0, 0.0, 0.0,
0.0, 0.0, length, 0.0, 0.0, 1.0]
# This shape is meant to be drawn with GL_LINES,
# i.e. every 2 indices, we have 1 line.
indices = [
0, 1,
2, 3,
4, 5]
return Shape(vertices, indices)
def createRainbowTriangle():
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.0, 0.0, 1.0, 0.0,
0.0, 0.5, 0.0, 0.0, 0.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [0, 1, 2]
return Shape(vertices, indices)
def createRainbowQuad():
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.0, 0.0, 1.0, 0.0,
0.5, 0.5, 0.0, 0.0, 0.0, 1.0,
-0.5, 0.5, 0.0, 1.0, 1.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2,
2, 3, 0]
return Shape(vertices, indices)
def createColorQuad(r, g, b):
# Defining locations and colors for each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, r, g, b,
0.5, -0.5, 0.0, r, g, b,
0.5, 0.5, 0.0, r, g, b,
-0.5, 0.5, 0.0, r, g, b]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2,
2, 3, 0]
return Shape(vertices, indices)
def createTextureQuad(nx, ny):
# Defining locations and texture coordinates for each vertex of the shape
vertices = [
# positions texture
-0.5, -0.5, 0.0, 0, ny,
0.5, -0.5, 0.0, nx, ny,
0.5, 0.5, 0.0, nx, 0,
-0.5, 0.5, 0.0, 0, 0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2,
2, 3, 0]
return Shape(vertices, indices)
def createBlackCircle(N):
# First vertex at the center, white color
vertices = [0, 0, 0, 1.0, 1.0, 1.0]
indices = []
dtheta = 2 * math.pi / N
for i in range(N):
theta = i * dtheta
vertices += [
# vertex coordinates
0.5 * math.cos(theta), 0.5 * math.sin(theta), 0,
# negro
0, 0, 0]
# A triangle is created using the center, this and the next vertex
indices += [0, i, i+1]
# The final triangle connects back to the second vertex
indices += [0, N, 1]
return Shape(vertices, indices)
def createRainbowCube():
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.5, 1.0, 0.0, 0.0,
0.5, -0.5, 0.5, 0.0, 1.0, 0.0,
0.5, 0.5, 0.5, 0.0, 0.0, 1.0,
-0.5, 0.5, 0.5, 1.0, 1.0, 1.0,
-0.5, -0.5, -0.5, 1.0, 1.0, 0.0,
0.5, -0.5, -0.5, 0.0, 1.0, 1.0,
0.5, 0.5, -0.5, 1.0, 0.0, 1.0,
-0.5, 0.5, -0.5, 1.0, 1.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
4, 5, 1, 1, 0, 4,
6, 7, 3, 3, 2, 6,
5, 6, 2, 2, 1, 5,
7, 4, 0, 0, 3, 7]
return Shape(vertices, indices)
def createColorCube(r, g, b):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.5, r, g, b,
0.5, -0.5, 0.5, r, g, b,
0.5, 0.5, 0.5, r, g, b,
-0.5, 0.5, 0.5, r, g, b,
-0.5, -0.5, -0.5, r, g, b,
0.5, -0.5, -0.5, r, g, b,
0.5, 0.5, -0.5, r, g, b,
-0.5, 0.5, -0.5, r, g, b]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
4, 5, 1, 1, 0, 4,
6, 7, 3, 3, 2, 6,
5, 6, 2, 2, 1, 5,
7, 4, 0, 0, 3, 7]
return Shape(vertices, indices)
def createTextureCube(image_filename):
# Defining locations and texture coordinates for each vertex of the shape
vertices = [
# positions texture coordinates
# Z+
-0.5, -0.5, 0.5, 0, 1,
0.5, -0.5, 0.5, 1, 1,
0.5, 0.5, 0.5, 1, 0,
-0.5, 0.5, 0.5, 0, 0,
# Z-
-0.5, -0.5, -0.5, 0, 1,
0.5, -0.5, -0.5, 1, 1,
0.5, 0.5, -0.5, 1, 0,
-0.5, 0.5, -0.5, 0, 0,
# X+
0.5, -0.5, -0.5, 0, 1,
0.5, 0.5, -0.5, 1, 1,
0.5, 0.5, 0.5, 1, 0,
0.5, -0.5, 0.5, 0, 0
,
# X-
-0.5, -0.5, -0.5, 0, 1,
-0.5, 0.5, -0.5, 1, 1,
-0.5, 0.5, 0.5, 1, 0,
-0.5, -0.5, 0.5, 0, 0,
# Y+
-0.5, 0.5, -0.5, 0, 1,
0.5, 0.5, -0.5, 1, 1,
0.5, 0.5, 0.5, 1, 0,
-0.5, 0.5, 0.5, 0, 0,
# Y-
-0.5, -0.5, -0.5, 0, 1,
0.5, -0.5, -0.5, 1, 1,
0.5, -0.5, 0.5, 1, 0,
-0.5, -0.5, 0.5, 0, 0
]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0, # Z+
7, 6, 5, 5, 4, 7, # Z-
8, 9,10,10,11, 8, # X+
15,14,13,13,12,15, # X-
19,18,17,17,16,19, # Y+
20,21,22,22,23,20] # Y-
return Shape(vertices, indices, image_filename)
def createRainbowNormalsCube():
sq3 = 0.57735027
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors normals
-0.5, -0.5, 0.5, 1.0, 0.0, 0.0, -sq3, -sq3, sq3,
0.5, -0.5, 0.5, 0.0, 1.0, 0.0, sq3, -sq3, sq3,
0.5, 0.5, 0.5, 0.0, 0.0, 1.0, sq3, sq3, sq3,
-0.5, 0.5, 0.5, 1.0, 1.0, 1.0, -sq3, sq3, sq3,
-0.5, -0.5, -0.5, 1.0, 1.0, 0.0, -sq3, -sq3, -sq3,
0.5, -0.5, -0.5, 0.0, 1.0, 1.0, sq3, -sq3, -sq3,
0.5, 0.5, -0.5, 1.0, 0.0, 1.0, sq3, sq3, -sq3,
-0.5, 0.5, -0.5, 1.0, 1.0, 1.0, -sq3, sq3, -sq3]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
4, 5, 1, 1, 0, 4,
6, 7, 3, 3, 2, 6,
5, 6, 2, 2, 1, 5,
7, 4, 0, 0, 3, 7]
return Shape(vertices, indices)
def createColorNormalsCube(r, g, b):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors normals
# Z+
-0.5, -0.5, 0.5, r, g, b, 0,0,1,
0.5, -0.5, 0.5, r, g, b, 0,0,1,
0.5, 0.5, 0.5, r, g, b, 0,0,1,
-0.5, 0.5, 0.5, r, g, b, 0,0,1,
# Z-
-0.5, -0.5, -0.5, r, g, b, 0,0,-1,
0.5, -0.5, -0.5, r, g, b, 0,0,-1,
0.5, 0.5, -0.5, r, g, b, 0,0,-1,
-0.5, 0.5, -0.5, r, g, b, 0,0,-1,
# X+
0.5, -0.5, -0.5, r, g, b, 1,0,0,
0.5, 0.5, -0.5, r, g, b, 1,0,0,
0.5, 0.5, 0.5, r, g, b, 1,0,0,
0.5, -0.5, 0.5, r, g, b, 1,0,0,
# X-
-0.5, -0.5, -0.5, r, g, b, -1,0,0,
-0.5, 0.5, -0.5, r, g, b, -1,0,0,
-0.5, 0.5, 0.5, r, g, b, -1,0,0,
-0.5, -0.5, 0.5, r, g, b, -1,0,0,
# Y+
-0.5, 0.5, -0.5, r, g, b, 0,1,0,
0.5, 0.5, -0.5, r, g, b, 0,1,0,
0.5, 0.5, 0.5, r, g, b, 0,1,0,
-0.5, 0.5, 0.5, r, g, b, 0,1,0,
# Y-
-0.5, -0.5, -0.5, r, g, b, 0,-1,0,
0.5, -0.5, -0.5, r, g, b, 0,-1,0,
0.5, -0.5, 0.5, r, g, b, 0,-1,0,
-0.5, -0.5, 0.5, r, g, b, 0,-1,0
]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0, # Z+
7, 6, 5, 5, 4, 7, # Z-
8, 9,10,10,11, 8, # X+
15,14,13,13,12,15, # X-
19,18,17,17,16,19, # Y+
20,21,22,22,23,20] # Y-
return Shape(vertices, indices)
def createTextureNormalsCube():
# Defining locations,texture coordinates and normals for each vertex of the shape
vertices = [
# positions tex coords normals
# Z+
-0.5, -0.5, 0.5, 0, 1, 0,0,1,
0.5, -0.5, 0.5, 1, 1, 0,0,1,
0.5, 0.5, 0.5, 1, 0, 0,0,1,
-0.5, 0.5, 0.5, 0, 0, 0,0,1,
# Z-
-0.5, -0.5, -0.5, 0, 1, 0,0,-1,
0.5, -0.5, -0.5, 1, 1, 0,0,-1,
0.5, 0.5, -0.5, 1, 0, 0,0,-1,
-0.5, 0.5, -0.5, 0, 0, 0,0,-1,
# X+
0.5, -0.5, -0.5, 0, 1, 1,0,0,
0.5, 0.5, -0.5, 1, 1, 1,0,0,
0.5, 0.5, 0.5, 1, 0, 1,0,0,
0.5, -0.5, 0.5, 0, 0, 1,0,0,
# X-
-0.5, -0.5, -0.5, 0, 1, -1,0,0,
-0.5, 0.5, -0.5, 1, 1, -1,0,0,
-0.5, 0.5, 0.5, 1, 0, -1,0,0,
-0.5, -0.5, 0.5, 0, 0, -1,0,0,
# Y+
-0.5, 0.5, -0.5, 0, 1, 0,1,0,
0.5, 0.5, -0.5, 1, 1, 0,1,0,
0.5, 0.5, 0.5, 1, 0, 0,1,0,
-0.5, 0.5, 0.5, 0, 0, 0,1,0,
# Y-
-0.5, -0.5, -0.5, 0, 1, 0,-1,0,
0.5, -0.5, -0.5, 1, 1, 0,-1,0,
0.5, -0.5, 0.5, 1, 0, 0,-1,0,
-0.5, -0.5, 0.5, 0, 0, 0,-1,0
]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0, # Z+
7, 6, 5, 5, 4, 7, # Z-
8, 9,10,10,11, 8, # X+
15,14,13,13,12,15, # X-
19,18,17,17,16,19, # Y+
20,21,22,22,23,20] # Y-
return Shape(vertices, indices) | [
"Frocoa@gmail.com"
] | Frocoa@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.