hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad8b2d0ea0221f96e6e009e09186a0d67f3d1e7e
| 2,303
|
py
|
Python
|
setup.py
|
HaaLeo/vague-requirements-scripts
|
e08b66aa6c0d17718bec1deb8c694d6b8237259b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
HaaLeo/vague-requirements-scripts
|
e08b66aa6c0d17718bec1deb8c694d6b8237259b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
HaaLeo/vague-requirements-scripts
|
e08b66aa6c0d17718bec1deb8c694d6b8237259b
|
[
"BSD-3-Clause"
] | null | null | null |
# ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from os import path
from setuptools import find_packages, setup
# pylint: disable=exec-used,undefined-variable
with open(path.join(path.abspath(path.dirname(__file__)), './README.md'), 'r', encoding='utf8') as rf:
LONG_DESCRIPTION = rf.read()
# with open(path.join(path.abspath(path.dirname(__file__)), 'vague-requirements-scripts/_version.py'), 'r', encoding='utf8') as f:
# exec(f.read())
setup(
name='vaguerequirementslib', # PEP8: Packages should also have short, all-lowercase names, the use of underscores is discouraged
version='0.0.1',
packages=find_packages('scripts'),
package_dir={"": "scripts"},
# Include files specified in MANIFEST.in
# include_package_data=True,
description='Some helper for the vague requirements thesis.',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/HaaLeo/vague-requirements-scripts',
author='Leo Hanisch',
license='BSD 3-Clause License',
install_requires=[
'pandas',
'numpy'
],
project_urls={
'Issue Tracker': 'https://github.com/HaaLeo/vague-requirements-scripts/issues',
# 'Changelog': 'https://github.com/HaaLeo/vague-requirements-scripts/blob/master/CHANGELOG.md#changelog'
},
python_requires='>=3.6',
keywords=[
'vague',
'requirements'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
]
)
| 40.403509
| 133
| 0.605297
|
3c47a1b6f2794a78ae32d8f676d043a2d03d1fd2
| 5,683
|
py
|
Python
|
IDS_iot_run_v1.py
|
mikelhpdatke/KDD99-spark-ml
|
cbf2d5c89bc4f19336e45e0300041a2b8a792c91
|
[
"Apache-2.0"
] | null | null | null |
IDS_iot_run_v1.py
|
mikelhpdatke/KDD99-spark-ml
|
cbf2d5c89bc4f19336e45e0300041a2b8a792c91
|
[
"Apache-2.0"
] | null | null | null |
IDS_iot_run_v1.py
|
mikelhpdatke/KDD99-spark-ml
|
cbf2d5c89bc4f19336e45e0300041a2b8a792c91
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsClassifier
import pandas
import argparse
import json
from time import time
# from kafka import KafkaProducer
col_names = ["duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins", "logged_in", "num_compromised", "root_shell", "su_attempted", "num_root", "num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
kdd_data_10percent = pandas.read_csv(
"datasetKDD99", header=None, names=col_names)
#print kdd_data_10percent.describe()
#print kdd_data_10percent['label'].value_counts()
num_features = [
"duration", "src_bytes",
"dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate"
]
features = kdd_data_10percent[num_features].astype(float)
#print features.describe()
labels = kdd_data_10percent['label'].copy()
#labels[labels!='normal.'] = 'attack.'
#print labels.value_counts()
df = features
scaled_features = MinMaxScaler().fit_transform(df.values)
scaled_features_df = pandas.DataFrame(
scaled_features, index=df.index, columns=df.columns)
# print(scaled_features_df.describe())
clf = KNeighborsClassifier(n_neighbors=5, algorithm='ball_tree', leaf_size=500)
#t0 = time()
clf.fit(features, labels)
#tt = time()-t0
#print "Classifier trained in {} seconds".format(round(tt,3))
""" #save model to disk
filename='knn_model.sav'
import pickle
pickle.dump(clf, open(filename,'wb'))
#load the model form disk
loaded_model = pickle.load(open(filename,'rb'))
"""
#print "nhan nhan"
#nhan = clf.predict(features)
#print "ghi nhan: "
# print(nhan[1:2])
#print "\n"
kdd_data_corrected = pandas.read_csv("kqout", header=None, names=col_names)
#print kdd_data_corrected['label'].value_counts()
#kdd_data_corrected['label'][kdd_data_corrected['label']!='normal.'] = 'attack.'
#print kdd_data_corrected['label'].value_counts()
kdd_data_corrected[num_features] = kdd_data_corrected[num_features].astype(
float)
df = kdd_data_corrected[num_features]
scaled_features = MinMaxScaler().fit_transform(df.values)
scaled_features_df = pandas.DataFrame(
scaled_features, index=df.index, columns=df.columns)
# print(scaled_features_df.describe())
features_train, features_test, labels_train, labels_test = train_test_split(
kdd_data_corrected[num_features],
kdd_data_corrected['label'],
test_size=0.8,
random_state=10)
t0 = time()
pred = clf.predict(features_test)
tt = time() - t0
data = {}
# print "Predicted in {} seconds".format(round(tt,3))
data['predicted_time'] = round(tt, 3)
# print "nhan du doan:"
n = len(pred)
# print n
data['predicted_label_num'] = n
#print (pred[0:n])
i = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for index in range(n):
if pred[index] == "smurf.":
i[0] += 1
elif pred[index] == "back.":
i[1] += 1
elif pred[index] == "teardrop.":
i[2] += 1
elif pred[index] == "ipsweep.":
i[3] += 1
elif pred[index] == "guess_passwd.":
i[4] += 1
elif pred[index] == "pod.":
i[5] += 1
elif pred[index] == "portsweep.":
i[6] += 1
elif pred[index] == "buffer_overflow.":
i[7] += 1
elif pred[index] == "ftp_write.":
i[8] += 1
elif pred[index] == "neptune.":
i[9] += 1
elif pred[index] == "land.":
i[10] += 1
elif pred[index] == "perl.":
i[11] += 1
elif pred[index] == "loadmodule.":
i[12] += 1
else:
i[13] += 1
# print "so luong nhan normal: %i" %(i[13])
data['normal_label_num'] = i[13]
# print "so luong nhan attack: %i" %(n-i[13])
data['attack_label_num'] = n-i[13]
data['attack_type'] = {}
# print "thong ke so luong tan cong:"
nhan = ['smurf', 'back', 'teardrop', 'ipsweep', 'guess_passwd', 'pod', 'portsweep',
'buffer_overflow', 'ftp_write', 'neptune', 'land', 'perl', 'loadmodule']
for x in range(len(i)-1):
# print nhan[x]+': '+str(i[x])
data['attack_type'][nhan[x]] = i[x]
# f = open("attack_info.txt", "w")
# for x in range(len(i)-1):
# f.write(nhan[x]+' '+str(i[x])+'\n')
# with open('log/result.txt', 'w') as outfile:
# json.dump(data, outfile)
print(data)
# producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
# value_serializer=lambda x:
# json.dumps(x).encode('utf-8'))
# producer.send('tcpdump', value=data)
# producer.flush()
print("analysis completed\n")
| 35.968354
| 301
| 0.672532
|
ceef212817587d2d2d2ee61c9056efc67ee8ad3a
| 26,670
|
py
|
Python
|
two_stream2.py
|
EddieMG/LateTemporalModeling3DCNN
|
94c87dc1d31d09bc310d0e735a2e55453976cb0d
|
[
"MIT"
] | 144
|
2020-08-06T02:18:49.000Z
|
2022-03-16T23:03:56.000Z
|
two_stream2.py
|
EddieMG/LateTemporalModeling3DCNN
|
94c87dc1d31d09bc310d0e735a2e55453976cb0d
|
[
"MIT"
] | 26
|
2020-08-12T01:07:48.000Z
|
2022-01-11T16:28:08.000Z
|
two_stream2.py
|
EddieMG/LateTemporalModeling3DCNN
|
94c87dc1d31d09bc310d0e735a2e55453976cb0d
|
[
"MIT"
] | 55
|
2020-08-13T03:00:17.000Z
|
2022-03-28T06:38:08.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 3 13:15:15 2019
@author: esat
"""
import os
import time
import argparse
import shutil
import numpy as np
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
import video_transforms
import models
import datasets
import swats
from opt.AdamW import AdamW
from utils.model_path import rgb_3d_model_path_selection
model_names = sorted(name for name in models.__dict__
if not name.startswith("__")
and callable(models.__dict__[name]))
dataset_names = sorted(name for name in datasets.__all__)
parser = argparse.ArgumentParser(description='PyTorch Two-Stream2')
parser.add_argument('--settings', metavar='DIR', default='./datasets/settings',
help='path to dataset setting files')
parser.add_argument('--dataset', '-d', default='hmdb51',
choices=["ucf101", "hmdb51", "smtV2"],
help='dataset: ucf101 | hmdb51')
parser.add_argument('--arch', '-a', metavar='ARCH', default='rgb_resneXt3D64f101',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: rgb_resneXt3D64f101)')
parser.add_argument('-s', '--split', default=1, type=int, metavar='S',
help='which split of data to work on (default: 1)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=4, type=int,
metavar='N', help='mini-batch size (default: 50)')
parser.add_argument('--iter-size', default=6, type=int,
metavar='I', help='iter size as in Caffe to reduce memory usage (default: 5)')
parser.add_argument('--lr', '--learning-rate', default=1e-2, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', default=200, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('--save-freq', default=1, type=int,
metavar='N', help='save frequency (default: 25)')
parser.add_argument('--num-seg', default=1, type=int,
metavar='N', help='Number of segments for temporal LSTM (default: 16)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-c', '--continue', dest='contine', action='store_true',
help='evaluate model on validation set')
best_prec1 = 0
best_loss = 30
HALF = False
select_according_to_best_classsification_lost = False #Otherwise select according to top1 default: False
training_continue = False
def main():
global args, best_prec1,model,writer,best_loss, length, width, height, input_size
args = parser.parse_args()
training_continue = args.contine
if '3D' in args.arch:
if 'I3D' in args.arch or 'MFNET3D' in args.arch:
if '112' in args.arch:
scale = 0.5
else:
scale = 1
else:
if '224' in args.arch:
scale = 1
else:
scale = 0.5
elif 'r2plus1d' in args.arch:
scale = 0.5
else:
scale = 1
print('scale: %.1f' %(scale))
input_size = int(224 * scale)
width = int(340 * scale)
height = int(256 * scale)
saveLocation="./checkpoint/"+args.dataset+"_"+args.arch+"_split"+str(args.split)
if not os.path.exists(saveLocation):
os.makedirs(saveLocation)
writer = SummaryWriter(saveLocation)
# create model
if args.evaluate:
print("Building validation model ... ")
model = build_model_validate()
#This line is not important, only dummy
optimizer = AdamW(model.parameters(), lr= args.lr, weight_decay=args.weight_decay)
elif training_continue:
model, start_epoch, optimizer, best_prec1 = build_model_continue()
#lr = args.lr
for param_group in optimizer.param_groups:
lr = param_group['lr']
#param_group['lr'] = lr
print("Continuing with best precision: %.3f and start epoch %d and lr: %f" %(best_prec1,start_epoch,lr))
else:
print("Building model with SGD optimizer... ")
model = build_model()
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
dampening=0.9,
weight_decay=args.weight_decay)
start_epoch = 0
if HALF:
model.half() # convert to half precision
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.float()
print("Model %s is loaded. " % (args.arch))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# optimizer = AdamW(model.parameters(),
# lr=args.lr,
# weight_decay=args.weight_decay)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', patience=5, verbose=True)
#optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
#optimizer = swats.SWATS(model.parameters(), args.lr)
print("Saving everything to directory %s." % (saveLocation))
if args.dataset=='ucf101':
dataset='./datasets/ucf101_frames'
elif args.dataset=='hmdb51':
dataset='./datasets/hmdb51_frames'
elif args.dataset=='window':
dataset='./datasets/window_frames'
else:
print("No convenient dataset entered, exiting....")
return 0
cudnn.benchmark = True
modality=args.arch.split('_')[0]
if '64f' in args.arch:
length=64
elif '32f' in args.arch:
length=32
elif '8f' in args.arch:
length=8
else:
length=16
print('length %d' %(length))
# Data transforming
if modality == "rgb" or modality == "pose":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
if 'I3D' in args.arch:
if 'resnet' in args.arch:
clip_mean = [0.45, 0.45, 0.45] * args.num_seg * length
clip_std = [0.225, 0.225, 0.225] * args.num_seg * length
else:
clip_mean = [0.5, 0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5, 0.5] * args.num_seg * length
#clip_std = [0.25, 0.25, 0.25] * args.num_seg * length
elif 'MFNET3D' in args.arch:
clip_mean = [0.48627451, 0.45882353, 0.40784314] * args.num_seg * length
clip_std = [0.234, 0.234, 0.234] * args.num_seg * length
elif "3D" in args.arch:
clip_mean = [114.7748, 107.7354, 99.4750] * args.num_seg * length
clip_std = [1, 1, 1] * args.num_seg * length
elif "r2plus1d" in args.arch:
clip_mean = [0.43216, 0.394666, 0.37645] * args.num_seg * length
clip_std = [0.22803, 0.22145, 0.216989] * args.num_seg * length
elif "rep_flow" in args.arch:
clip_mean = [0.5, 0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5, 0.5] * args.num_seg * length
elif "slowfast" in args.arch:
clip_mean = [0.45, 0.45, 0.45] * args.num_seg * length
clip_std = [0.225, 0.225, 0.225] * args.num_seg * length
else:
clip_mean = [0.485, 0.456, 0.406] * args.num_seg * length
clip_std = [0.229, 0.224, 0.225] * args.num_seg * length
elif modality == "pose":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
clip_mean = [0.485, 0.456, 0.406] * args.num_seg
clip_std = [0.229, 0.224, 0.225] * args.num_seg
elif modality == "flow":
is_color = False
scale_ratios = [1.0, 0.875, 0.75, 0.66]
if 'I3D' in args.arch:
clip_mean = [0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5] * args.num_seg * length
elif "3D" in args.arch:
clip_mean = [127.5, 127.5] * args.num_seg * length
clip_std = [1, 1] * args.num_seg * length
else:
clip_mean = [0.5, 0.5] * args.num_seg * length
clip_std = [0.226, 0.226] * args.num_seg * length
elif modality == "both":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
clip_mean = [0.485, 0.456, 0.406, 0.5, 0.5] * args.num_seg * length
clip_std = [0.229, 0.224, 0.225, 0.226, 0.226] * args.num_seg * length
else:
print("No such modality. Only rgb and flow supported.")
normalize = video_transforms.Normalize(mean=clip_mean,
std=clip_std)
if "3D" in args.arch and not ('I3D' in args.arch or 'MFNET3D' in args.arch):
train_transform = video_transforms.Compose([
video_transforms.MultiScaleCrop((input_size, input_size), scale_ratios),
video_transforms.RandomHorizontalFlip(),
video_transforms.ToTensor2(),
normalize,
])
val_transform = video_transforms.Compose([
video_transforms.CenterCrop((input_size)),
video_transforms.ToTensor2(),
normalize,
])
else:
train_transform = video_transforms.Compose([
video_transforms.MultiScaleCrop((input_size, input_size), scale_ratios),
video_transforms.RandomHorizontalFlip(),
video_transforms.ToTensor(),
normalize,
])
val_transform = video_transforms.Compose([
video_transforms.CenterCrop((input_size)),
video_transforms.ToTensor(),
normalize,
])
# data loading
train_setting_file = "train_%s_split%d.txt" % (modality, args.split)
train_split_file = os.path.join(args.settings, args.dataset, train_setting_file)
val_setting_file = "val_%s_split%d.txt" % (modality, args.split)
val_split_file = os.path.join(args.settings, args.dataset, val_setting_file)
if not os.path.exists(train_split_file) or not os.path.exists(val_split_file):
print("No split file exists in %s directory. Preprocess the dataset first" % (args.settings))
train_dataset = datasets.__dict__[args.dataset](root=dataset,
source=train_split_file,
phase="train",
modality=modality,
is_color=is_color,
new_length=length,
new_width=width,
new_height=height,
video_transform=train_transform,
num_segments=args.num_seg)
val_dataset = datasets.__dict__[args.dataset](root=dataset,
source=val_split_file,
phase="val",
modality=modality,
is_color=is_color,
new_length=length,
new_width=width,
new_height=height,
video_transform=val_transform,
num_segments=args.num_seg)
print('{} samples found, {} train samples and {} test samples.'.format(len(val_dataset)+len(train_dataset),
len(train_dataset),
len(val_dataset)))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
prec1,prec3,lossClassification = validate(val_loader, model, criterion,modality)
return
for epoch in range(start_epoch, args.epochs):
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,modality)
# evaluate on validation set
prec1 = 0.0
lossClassification = 0
if (epoch + 1) % args.save_freq == 0:
prec1,prec3,lossClassification = validate(val_loader, model, criterion,modality)
writer.add_scalar('data/top1_validation', prec1, epoch)
writer.add_scalar('data/top3_validation', prec3, epoch)
writer.add_scalar('data/classification_loss_validation', lossClassification, epoch)
scheduler.step(lossClassification)
# remember best prec@1 and save checkpoint
if select_according_to_best_classsification_lost:
is_best = lossClassification < best_loss
best_loss = min(lossClassification, best_loss)
else:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if (epoch + 1) % args.save_freq == 0:
checkpoint_name = "%03d_%s" % (epoch + 1, "checkpoint.pth.tar")
if is_best:
print("Model son iyi olarak kaydedildi")
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_loss': best_loss,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint_name, saveLocation)
checkpoint_name = "%03d_%s" % (epoch + 1, "checkpoint.pth.tar")
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_loss': best_loss,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint_name, saveLocation)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
def build_model():
modelLocation="./checkpoint/"+args.dataset+"_"+'_'.join(args.arch.split('_')[:-1])+"_split"+str(args.split)
modality=args.arch.split('_')[0]
if modality == "rgb":
model_path = rgb_3d_model_path_selection(args.arch)
elif modality == "pose":
model_path = rgb_3d_model_path_selection(args.arch)
elif modality == "flow":
model_path=''
if "3D" in args.arch:
if 'I3D' in args.arch:
model_path='./weights/flow_imagenet.pth'
elif '3D' in args.arch:
model_path='./weights/Flow_Kinetics_64f.pth'
elif modality == "both":
model_path=''
if args.dataset=='ucf101':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=101,length=args.num_seg)
elif args.dataset=='hmdb51':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=51, length=args.num_seg)
elif args.dataset=='window':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=3, length=args.num_seg)
if torch.cuda.device_count() > 1:
model=torch.nn.DataParallel(model)
model = model.cuda()
return model
def build_model_validate():
modelLocation="./checkpoint/"+args.dataset+"_"+args.arch+"_split"+str(args.split)
model_path = os.path.join(modelLocation,'model_best.pth.tar')
params = torch.load(model_path)
print(modelLocation)
if args.dataset=='ucf101':
model=models.__dict__[args.arch](modelPath='', num_classes=101,length=args.num_seg)
elif args.dataset=='hmdb51':
model=models.__dict__[args.arch](modelPath='', num_classes=51,length=args.num_seg)
if torch.cuda.device_count() > 1:
model=torch.nn.DataParallel(model)
model.load_state_dict(params['state_dict'])
model.cuda()
model.eval()
return model
def build_model_continue():
modelLocation="./checkpoint/"+args.dataset+"_"+args.arch+"_split"+str(args.split)
model_path = os.path.join(modelLocation,'model_best.pth.tar')
params = torch.load(model_path)
print(modelLocation)
if args.dataset=='ucf101':
model=models.__dict__[args.arch](modelPath='', num_classes=101,length=args.num_seg)
elif args.dataset=='hmdb51':
model=models.__dict__[args.arch](modelPath='', num_classes=51,length=args.num_seg)
model.load_state_dict(params['state_dict'])
model = model.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
dampening=0.9,
weight_decay=args.weight_decay)
optimizer.load_state_dict(params['optimizer'])
startEpoch = params['epoch']
best_prec = params['best_prec1']
return model, startEpoch, optimizer, best_prec
def train(train_loader, model, criterion, optimizer, epoch,modality):
batch_time = AverageMeter()
lossesClassification = AverageMeter()
top1 = AverageMeter()
top3 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
optimizer.zero_grad()
loss_mini_batch_classification = 0.0
acc_mini_batch = 0.0
acc_mini_batch_top3 = 0.0
totalSamplePerIter=0
for i, (inputs, targets) in enumerate(train_loader):
if modality == "rgb" or modality == "pose":
if "3D" in args.arch or 'r2plus1d' in args.arch or 'rep_flow' in args.arch or 'slowfast' in args.arch:
inputs=inputs.view(-1,length,3,input_size,input_size).transpose(1,2)
elif "tsm" in args.arch:
inputs=inputs
else:
inputs=inputs.view(-1,3*length,input_size,input_size)
elif modality == "flow":
if "3D" in args.arch:
inputs=inputs.view(-1,length,2,input_size,input_size).transpose(1,2)
else:
inputs=inputs.view(-1,2*length,input_size,input_size)
elif modality == "both":
inputs=inputs.view(-1,5*length,input_size,input_size)
if HALF:
inputs = inputs.cuda().half()
else:
inputs = inputs.cuda()
targets = targets.cuda()
output = model(inputs)
prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
acc_mini_batch += prec1.item()
acc_mini_batch_top3 += prec3.item()
#lossRanking = criterion(out_rank, targetRank)
lossClassification = criterion(output, targets)
lossClassification = lossClassification / args.iter_size
#totalLoss=lossMSE
totalLoss=lossClassification
#totalLoss = lossMSE + lossClassification
loss_mini_batch_classification += lossClassification.data.item()
totalLoss.backward()
totalSamplePerIter += output.size(0)
if (i+1) % args.iter_size == 0:
# compute gradient and do SGD step
optimizer.step()
optimizer.zero_grad()
lossesClassification.update(loss_mini_batch_classification, totalSamplePerIter)
top1.update(acc_mini_batch/args.iter_size, totalSamplePerIter)
top3.update(acc_mini_batch_top3/args.iter_size, totalSamplePerIter)
batch_time.update(time.time() - end)
end = time.time()
loss_mini_batch_classification = 0
acc_mini_batch = 0
acc_mini_batch_top3 = 0.0
totalSamplePerIter = 0.0
if (i+1) % args.print_freq == 0:
print('[%d] time: %.3f loss: %.4f' %(i,batch_time.avg,lossesClassification.avg))
# if (i+1) % args.print_freq == 0:
#
# print('Epoch: [{0}][{1}/{2}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Classification Loss {lossClassification.val:.4f} ({lossClassification.avg:.4f})\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
# 'Prec@3 {top3.val:.3f} ({top3.avg:.3f})\n'
# 'MSE Loss {lossMSE.val:.4f} ({lossMSE.avg:.4f})\t'
# 'Batch Similarity Loss {lossBatchSimilarity.val:.4f} ({lossBatchSimilarity.avg:.4f})\t'
# 'Sequence Similarity Loss {lossSequenceSimilarity.val:.4f} ({lossSequenceSimilarity.avg:.4f})'.format(
# epoch, i+1, len(train_loader)+1, batch_time=batch_time, lossClassification=lossesClassification,lossMSE=lossesMSE,
# lossBatchSimilarity = lossesBatchSimilarity , lossSequenceSimilarity=lossesSequenceSimilarity,
# top1=top1, top3=top3))
# print(' * Epoch: {epoch} Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f} MSE Loss {lossMSE.avg:.4f} '
# 'Batch Similarity Loss {lossBatchSimilarity.avg:.4f} Sequence Similarity Loss {lossSequenceSimilarity.avg:.4f} Ranking Loss {lossRanking.avg:.4f}\n'
# .format(epoch = epoch, top1=top1, top3=top3, lossClassification=lossesClassification,lossMSE=lossesMSE,
# lossBatchSimilarity = lossesBatchSimilarity ,
# lossSequenceSimilarity=lossesSequenceSimilarity),
# lossRanking = lossesRanking)
print(' * Epoch: {epoch} Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f}\n'
.format(epoch = epoch, top1=top1, top3=top3, lossClassification=lossesClassification))
writer.add_scalar('data/classification_loss_training', lossesClassification.avg, epoch)
writer.add_scalar('data/top1_training', top1.avg, epoch)
writer.add_scalar('data/top3_training', top3.avg, epoch)
def validate(val_loader, model, criterion,modality):
batch_time = AverageMeter()
lossesClassification = AverageMeter()
top1 = AverageMeter()
top3 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (inputs, targets) in enumerate(val_loader):
if modality == "rgb" or modality == "pose":
if "3D" in args.arch or 'r2plus1d' in args.arch or 'rep_flow' in args.arch or 'slowfast' in args.arch:
inputs=inputs.view(-1,length,3,input_size,input_size).transpose(1,2)
elif "tsm" in args.arch:
inputs = inputs
else:
inputs=inputs.view(-1,3*length,input_size,input_size)
elif modality == "flow":
if "3D" in args.arch:
inputs=inputs.view(-1,length,2,input_size,input_size).transpose(1,2)
else:
inputs=inputs.view(-1,2*length,input_size,input_size)
elif modality == "both":
inputs=inputs.view(-1,5*length,input_size,input_size)
if HALF:
inputs = inputs.cuda().half()
else:
inputs = inputs.cuda()
targets = targets.cuda()
# compute output
output= model(inputs)
lossClassification = criterion(output, targets)
# measure accuracy and record loss
prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
lossesClassification.update(lossClassification.data.item(), output.size(0))
top1.update(prec1.item(), output.size(0))
top3.update(prec3.item(), output.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print(' * * Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f}\n'
.format(top1=top1, top3=top3, lossClassification=lossesClassification))
return top1.avg, top3.avg, lossesClassification.avg
def save_checkpoint(state, is_best, filename, resume_path):
cur_path = os.path.join(resume_path, filename)
torch.save(state, cur_path)
best_path = os.path.join(resume_path, 'model_best.pth.tar')
if is_best:
shutil.copyfile(cur_path, best_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 40.717557
| 159
| 0.582377
|
197be7c1293a37aac440c43fdbdf5ca232800c43
| 36,526
|
py
|
Python
|
Scripts/simulation/relationships/relationship_data.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/relationships/relationship_data.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/relationships/relationship_data.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\relationships\relationship_data.py
# Compiled at: 2020-10-20 22:12:25
# Size of source mod 2**32: 47550 bytes
from collections import defaultdict
from date_and_time import DateAndTime
from distributor.rollback import ProtocolBufferRollback
from relationships.bit_timout import BitTimeoutData
from relationships.global_relationship_tuning import RelationshipGlobalTuning
from relationships.object_relationship_track_tracker import ObjectRelationshipTrackTracker
from relationships.relationship_enums import RelationshipBitCullingPrevention, RelationshipTrackType, SentimentDurationType
from relationships.relationship_track_tracker import RelationshipTrackTracker
from relationships.sentiment_track_tracker import SentimentTrackTracker
from relationships.sim_knowledge import SimKnowledge
import event_testing, services, sims4, telemetry_helper
from sims.sim_info_lod import SimInfoLODLevel
logger = sims4.log.Logger('Relationship', default_owner='jjacobson')
TELEMETRY_GROUP_RELATIONSHIPS = 'RSHP'
TELEMETRY_HOOK_ADD_BIT = 'BADD'
TELEMETRY_HOOK_REMOVE_BIT = 'BREM'
TELEMETRY_HOOK_CHANGE_LEVEL = 'RLVL'
TELEMETRY_FIELD_TARGET_ID = 'taid'
TELEMETRY_FIELD_REL_ID = 'rlid'
TELEMETRY_FIELD_BIT_ID = 'btid'
writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_RELATIONSHIPS)
class RelationshipData:
__slots__ = ('relationship', '_bits', '_bit_timeouts', '_cached_depth', 'cached_depth_dirty',
'_relationship_bit_locks', '__weakref__', '_track_tracker')
def __init__(self, relationship):
self.relationship = relationship
self._bits = {}
self._bit_timeouts = None
self._cached_depth = 0
self.cached_depth_dirty = True
self._relationship_bit_locks = None
self._track_tracker = None
@property
def bit_types(self):
return self._bits.keys()
@property
def bit_instances(self):
return self._bits.values()
@property
def depth(self):
if self.cached_depth_dirty:
self._refresh_depth_cache()
return self._cached_depth
@property
def track_tracker(self):
return self._track_tracker
def _refresh_depth_cache(self):
self._cached_depth = 0
for bit in self._bits.keys():
self._cached_depth += bit.depth
self.cached_depth_dirty = False
def _sim_ids(self):
raise NotImplementedError
def track_reached_convergence(self, track_instance):
if track_instance.track_type == RelationshipTrackType.SENTIMENT:
self.remove_track(track_instance.stat_type)
else:
if track_instance.causes_delayed_removal_on_convergence:
if self.relationship is not None:
if self.relationship.can_cull_relationship():
logger.debug('{} has been marked for culling.', self)
self.relationship._create_culling_alarm()
if track_instance.is_visible:
logger.debug('Notifying client that {} has reached convergence.', self)
if self.relationship is not None:
self.relationship._notify_client()
def can_cull_relationship(self, consider_convergence, is_played_relationship):
for bit in self._bits.values():
if bit.relationship_culling_prevention == RelationshipBitCullingPrevention.ALLOW_ALL:
continue
if bit.relationship_culling_prevention == RelationshipBitCullingPrevention.PLAYED_AND_UNPLAYED:
return False
if is_played_relationship and bit.relationship_culling_prevention == RelationshipBitCullingPrevention.PLAYED_ONLY:
return False
return True
def _find_timeout_data_by_bit(self, bit):
if self._bit_timeouts:
return self._bit_timeouts.get(bit, None)
def _find_timeout_data_by_bit_instance(self, bit_instance):
bit_manager = services.get_instance_manager(sims4.resources.Types.RELATIONSHIP_BIT)
bit = bit_manager.get(bit_instance.guid64)
return self._find_timeout_data_by_bit(bit)
def _find_timeout_data_by_handle(self, alarm_handle):
if self._bit_timeouts:
for data in self._bit_timeouts.values():
if alarm_handle is data.alarm_handle:
return data
def _timeout_alarm_callback(self, alarm_handle):
timeout_data = self._find_timeout_data_by_handle(alarm_handle)
if timeout_data is not None:
self.remove_bit(timeout_data.bit)
else:
logger.error('Failed to find alarm handle in _bit_timeouts list')
def _send_telemetry_event_for_bit_change(self, telemetry_hook, bit, sim_info, target_sim_info):
with telemetry_helper.begin_hook(writer, telemetry_hook, sim_info=sim_info) as (hook):
hook.write_int(TELEMETRY_FIELD_TARGET_ID, target_sim_info.sim_id)
hook.write_int(TELEMETRY_FIELD_REL_ID, target_sim_info.sim_id)
hook.write_int(TELEMETRY_FIELD_BIT_ID, bit.guid64)
def _update_client_for_sim_info_for_bit_add(self, bit_to_add, bit_instance, sim_info, target_sim_info, from_load):
if not sim_info is None:
if self.relationship is None:
return
sim = sim_info.get_sim_instance()
if sim is not None:
bit_instance.on_add_to_relationship(sim, target_sim_info, self.relationship, from_load)
self.show_bit_added_dialog(bit_instance, sim, target_sim_info)
elif not self.relationship._is_object_rel:
self._send_telemetry_event_for_bit_change(TELEMETRY_HOOK_ADD_BIT, bit_to_add, sim_info, target_sim_info)
services.get_event_manager().process_event((event_testing.test_events.TestEvent.AddRelationshipBit), sim_info=sim_info,
relationship_bit=bit_to_add,
sim_id=(sim_info.sim_id),
target_sim_id=(target_sim_info.sim_id))
if bit_to_add is RelationshipGlobalTuning.MARRIAGE_RELATIONSHIP_BIT:
if sim_info is not None:
sim_info.update_spouse_sim_id(target_sim_info.sim_id)
def _update_client_from_bit_add(self, bit_type, bit_instance, from_load):
raise NotImplementedError
def _update_client_for_sim_info_for_bit_remove(self, bit_to_remove, bit_instance, sim_info, target_sim_info):
if sim_info is None:
return
if target_sim_info is not None:
self._send_telemetry_event_for_bit_change(TELEMETRY_HOOK_REMOVE_BIT, bit_to_remove, sim_info, target_sim_info)
services.get_event_manager().process_event((event_testing.test_events.TestEvent.RemoveRelationshipBit), sim_info=sim_info,
relationship_bit=bit_to_remove,
sim_id=(sim_info.sim_id),
target_sim_id=(target_sim_info.sim_id))
sim = sim_info.get_sim_instance()
if sim is not None:
bit_instance.on_remove_from_relationship(sim, target_sim_info)
self.show_bit_removed_dialog(bit_instance, sim, target_sim_info)
if bit_to_remove is RelationshipGlobalTuning.MARRIAGE_RELATIONSHIP_BIT:
sim_info.update_spouse_sim_id(None)
def _update_client_from_bit_remove(self, bit_type, bit_instance):
raise NotImplementedError
def add_bit(self, bit_type, bit_instance, from_load=False):
self.cached_depth_dirty = True
self._bits[bit_type] = bit_instance
if self.relationship is None:
return
if not self.relationship.suppress_client_updates:
self._update_client_from_bit_add(bit_type, bit_instance, from_load)
if bit_type.timeout > 0:
timeout_data = self._find_timeout_data_by_bit(bit_type)
if timeout_data is None:
timeout_data = BitTimeoutData(bit_type, self._timeout_alarm_callback)
if self._bit_timeouts is None:
self._bit_timeouts = {}
self._bit_timeouts[bit_type] = timeout_data
timeout_data.reset_alarm()
remove_on_threshold = bit_type.remove_on_threshold
if remove_on_threshold is not None:
track_type = remove_on_threshold.track
if track_type is not None:
if track_type.track_type == RelationshipTrackType.SENTIMENT:
logger.error('remove_on_threshold should not be set for bits of sentiments. Sentiment bits are not added/removed based on sentiment track valuesbit:{} should be updated'.format(bit_type))
listener = self.relationship.relationship_track_tracker.create_and_add_listener(track_type, remove_on_threshold.threshold, self._on_remove_bit_threshold_satisfied)
bit_instance.add_conditional_removal_listener(listener)
def _on_remove_bit_threshold_satisfied(self, track):
for bit in self._bits.keys():
if bit.remove_on_threshold is None:
continue
if bit.remove_on_threshold.track is type(track):
self.remove_bit(bit)
return
logger.error("Got a callback to remove a bit for track {}, but one doesn't exist.", track)
def remove_bit(self, bit):
bit_instance = self._bits.get(bit)
if bit_instance is None:
logger.warn("Attempting to remove bit of type {} that doesn't exist.", bit)
if self.relationship is None:
logger.warn('Attempting to remove bit on a relationship that has been deleted')
return
if not self.relationship.suppress_client_updates:
self._update_client_from_bit_remove(bit, bit_instance)
else:
self.cached_depth_dirty = True
del self._bits[bit]
logger.debug('Removed bit {} for {}', bit, self)
timeout_data = self._find_timeout_data_by_bit(bit)
if timeout_data is not None:
timeout_data.cancel_alarm()
del self._bit_timeouts[bit]
if not self._bit_timeouts:
self._bit_timeouts = None
remove_on_threshold = bit.remove_on_threshold
if remove_on_threshold is not None:
listener = bit_instance.remove_conditional_removal_listener()
if listener is not None:
track_type = remove_on_threshold.track
if track_type is not None:
if track_type.track_type == RelationshipTrackType.SENTIMENT:
logger.error('remove_on_threshold should not be set for bits of sentiments. Sentiment bits are not added/removed based on sentiment track valuesbit:{} should be updated'.format(bit))
self.relationship.relationship_track_tracker.remove_listener(listener)
else:
logger.error("Bit {} is meant to have a listener on track {} but it doesn't for {}.".format(bit, remove_on_threshold.track, self))
def save_relationship_data(self, relationship_data_msg):
for bit in self._bits:
if bit.persisted:
relationship_data_msg.bits.append(bit.guid64)
if self._bit_timeouts is not None:
for timeout in self._bit_timeouts.values():
with ProtocolBufferRollback(relationship_data_msg.timeouts) as (timeout_proto_buffer):
timeout_proto_buffer.timeout_bit_id_hash = timeout.bit.guid64
timeout_proto_buffer.elapsed_time = timeout.get_elapsed_time()
if self._relationship_bit_locks is not None:
for relationship_bit_lock in self._relationship_bit_locks.values():
with ProtocolBufferRollback(relationship_data_msg.relationship_bit_locks) as (relationship_bit_lock_proto_buffer):
relationship_bit_lock.save(relationship_bit_lock_proto_buffer)
for track in self._track_tracker:
if not track.persisted:
continue
if not track.persist_at_convergence:
if track.is_at_convergence():
continue
with ProtocolBufferRollback(relationship_data_msg.tracks) as (track_proto_buffer):
track_proto_buffer.track_id = track.type_id()
track_proto_buffer.value = track.get_value()
track_proto_buffer.visible = track.visible_to_client
track_proto_buffer.ticks_until_decay_begins = track.get_saved_ticks_until_decay_begins()
def load_relationship_data(self, relationship_data_msg):
track_manager = services.get_instance_manager(sims4.resources.Types.STATISTIC)
track_to_bit_list_map = defaultdict(list)
try:
self._track_tracker.suppress_callback_setup_during_load = True
self._track_tracker.load_in_progress = True
for track_data in relationship_data_msg.tracks:
track_type = track_manager.get(track_data.track_id)
if track_type is None:
continue
if not track_type.persist_at_convergence:
if track_data.value == track_type.default_value:
continue
else:
track_inst = self._track_tracker.add_statistic(track_type)
if track_inst is not None:
track_inst.set_value(track_data.value)
track_inst.visible_to_client = track_data.visible
track_inst.set_time_until_decay_begins(track_data.ticks_until_decay_begins)
track_inst.fixup_callbacks_during_load()
track_to_bit_list_map[track_inst] = []
logger.warn('Failed to load track {}. This is valid if the tuning has changed.', track_type)
finally:
self._track_tracker.suppress_callback_setup_during_load = False
self._track_tracker.load_in_progress = False
bit_manager = services.get_instance_manager(sims4.resources.Types.RELATIONSHIP_BIT)
logger.assert_raise(bit_manager, 'Unable to retrieve relationship bit manager.')
if track_to_bit_list_map is None:
track_to_bit_list_map = defaultdict(list)
bit_list = []
for bit_guid in relationship_data_msg.bits:
bit = bit_manager.get(bit_guid)
if bit is None:
logger.info('Trying to load unavailable RELATIONSHIP_BIT resource: {}', bit_guid)
continue
if bit.triggered_track is not None:
track_inst = self._track_tracker.get_statistic(bit.triggered_track)
if track_inst is not None:
bit_data_set = track_inst.get_bit_data_set()
if bit_data_set:
if bit in bit_data_set:
track_to_bit_list_map[track_inst].append(bit)
continue
bit_list.append(bit)
for track_inst, track_bit_list in track_to_bit_list_map.items():
if len(track_bit_list) > 1:
active_bit = track_inst.get_active_bit_by_value()
logger.warn('{} has bad persisted Rel Bit value on Rel Track {}. Fix it by adding bit {} and removing bits {}.', self,
track_inst, active_bit, track_bit_list, owner='mkartika')
bit_list.append(active_bit)
else:
bit_list.extend(track_bit_list)
sim_id_a, sim_id_b = self._sim_ids()
while bit_list:
bit = bit_list.pop()
if bit in self._bits:
continue
self.relationship.add_relationship_bit(sim_id_a, sim_id_b,
bit,
notify_client=False,
pending_bits=bit_list,
from_load=True,
send_rel_change_event=False) or logger.warn('Failed to load relationship bit {}. This is valid if tuning has changed.', bit)
if relationship_data_msg.timeouts is not None:
for timeout_save in relationship_data_msg.timeouts:
bit = bit_manager.get(timeout_save.timeout_bit_id_hash)
timeout_data = self._find_timeout_data_by_bit(bit)
if timeout_data is not None:
if not timeout_data.load_bit_timeout(timeout_save.elapsed_time):
self.remove_bit(bit)
else:
logger.warn('Attempting to load timeout value on bit {} with no timeout. This is valid if tuning has changed.', bit)
relationship_bit_lock_manager = services.get_instance_manager(sims4.resources.Types.RELATIONSHIP_LOCK)
for relationship_bit_lock_data in relationship_data_msg.relationship_bit_locks:
lock_type = relationship_bit_lock_manager.get(relationship_bit_lock_data.relationship_bit_lock_type)
if lock_type is None:
continue
new_lock = self.add_lock(lock_type)
new_lock.load(relationship_bit_lock_data)
for track in self._track_tracker:
track.update_track_index(self.relationship)
def notify_relationship_on_lod_change(self, old_lod, new_lod):
pass
def destroy(self):
self.relationship = None
self._bits.clear()
self._bit_timeouts = None
self._cached_depth = 0
self.cached_depth_dirty = False
self._relationship_bit_locks = None
def show_bit_added_dialog(self, relationship_bit, sim, target_sim_info):
raise NotImplementedError
def show_bit_removed_dialog(self, relationship_bit, sim, target_sim_info):
raise NotImplementedError
def add_lock(self, lock_type):
if self._relationship_bit_locks is None:
self._relationship_bit_locks = {}
current_lock = self._relationship_bit_locks.get(lock_type, None)
if current_lock is not None:
return current_lock
new_lock = lock_type()
self._relationship_bit_locks[lock_type] = new_lock
return new_lock
def get_lock(self, lock_type):
if self._relationship_bit_locks is None:
return
return self._relationship_bit_locks.get(lock_type, None)
def get_all_locks(self):
locks = []
if self._relationship_bit_locks is None:
return locks
locks.extend(self._relationship_bit_locks.values())
return locks
def on_sim_creation(self, sim):
for bit_instance in self._bits.values():
bit_instance.add_buffs_for_bit_add(sim, self.relationship, True)
self._track_tracker.on_sim_creation(sim)
def set_track_to_max(self, track):
self._track_tracker.set_max(track)
def get_track(self, track, add=False):
return self._track_tracker.get_statistic(track, add)
def has_track(self, track):
return self._track_tracker.has_statistic(track)
def remove_track(self, track, on_destroy=False):
return self._track_tracker.remove_statistic(track, on_destroy)
def get_highest_priority_track_bit(self):
highest_priority_bit = None
for track in self._track_tracker:
bit = track.get_active_bit()
if not bit:
continue
if highest_priority_bit is None or bit.priority > highest_priority_bit.priority:
highest_priority_bit = bit
return highest_priority_bit
def apply_social_group_decay(self):
for track in self._track_tracker:
track.apply_social_group_decay()
def get_track_score(self, track):
return self._track_tracker.get_user_value(track)
def set_track_score(self, value, track, **kwargs):
(self._track_tracker.set_value)(track, value, **kwargs)
def add_track_score(self, increment, track, **kwargs):
(self._track_tracker.add_value)(track, increment, **kwargs)
def enable_player_sim_track_decay(self, to_enable=True):
self._track_tracker.enable_player_sim_track_decay(to_enable)
def get_track_utility_score(self, track):
track_inst = self._track_tracker.get_statistic(track)
if track_inst is not None:
return track_inst.autonomous_desire
return track.autonomous_desire
def remove_social_group_decay(self):
for track in self._track_tracker:
track.remove_social_group_decay()
def is_object_rel(self):
return self.relationship and self.relationship.is_object_rel()
class UnidirectionalRelationshipData(RelationshipData):
__slots__ = ('_knowledge', '_actor_sim_id', 'bit_added_buffs')
def __init__(self, relationship, actor_sim_id):
super().__init__(relationship)
self._knowledge = None
self._actor_sim_id = actor_sim_id
self.bit_added_buffs = None
self._track_tracker = SentimentTrackTracker(self)
def __repr__(self):
return 'UnidirectionalRelationshipData between: {} and {}'.format(self._actor_sim_id, self._target_sim_id)
@property
def sim_id_a(self):
return self._actor_sim_id
@property
def sim_id_b(self):
return self._target_sim_id
@property
def _target_sim_id(self):
return self.relationship.get_other_sim_id(self._actor_sim_id)
@property
def knowledge(self):
return self._knowledge
def initialize_knowledge(self):
self._knowledge = SimKnowledge(self)
def find_target_sim_info(self):
return services.sim_info_manager().get(self._target_sim_id)
def _sim_ids(self):
return (
self._actor_sim_id, self._target_sim_id)
def _update_client_from_bit_add(self, bit_type, bit_instance, from_load):
sim_info_manager = services.sim_info_manager()
actor_sim_info = sim_info_manager.get(self._actor_sim_id)
target_sim_info = sim_info_manager.get(self._target_sim_id)
self._update_client_for_sim_info_for_bit_add(bit_type, bit_instance, actor_sim_info, target_sim_info, from_load)
def add_bit(self, bit_type, bit_instance, from_load=False):
super().add_bit(bit_type, bit_instance, from_load=from_load)
sim_info = services.sim_info_manager().get(self._actor_sim_id)
if sim_info:
bit_instance.add_appropriateness_buffs(sim_info)
def _update_client_from_bit_remove(self, bit_type, bit_instance):
sim_info_manager = services.sim_info_manager()
actor_sim_info = sim_info_manager.get(self._actor_sim_id)
target_sim_info = sim_info_manager.get(self._target_sim_id)
self._update_client_for_sim_info_for_bit_remove(bit_type, bit_instance, actor_sim_info, target_sim_info)
def remove_bit(self, bit):
if bit not in self._bits:
logger.debug("Attempting to remove bit for {} that doesn't exist: {}", self, bit)
return
bit_instance = self._bits[bit]
super().remove_bit(bit)
sim_info = services.sim_info_manager().get(self._actor_sim_id)
if sim_info is not None:
bit_instance.remove_appropriateness_buffs(sim_info)
def remove_track(self, track, on_destroy=False):
if self.relationship is not None:
self.relationship.get_track_relationship_data(self._target_sim_id, track).remove_bit(track._neutral_bit)
return super().remove_track(track, on_destroy=on_destroy)
def save_relationship_data(self, relationship_data_msg):
super().save_relationship_data(relationship_data_msg)
if self.bit_added_buffs is not None:
for buff_id in self.bit_added_buffs:
relationship_data_msg.bit_added_buffs.append(buff_id)
if self._knowledge is not None:
relationship_data_msg.knowledge = self._knowledge.get_save_data()
sentiment_proximity_cooldown_end = self._track_tracker.proximity_cooldown_end_time
if sentiment_proximity_cooldown_end is not None:
relationship_data_msg.sentiment_proximity_cooldown_end = sentiment_proximity_cooldown_end.absolute_ticks()
def load_relationship_data(self, relationship_data_msg):
if relationship_data_msg.bit_added_buffs:
if self.bit_added_buffs is None:
self.bit_added_buffs = set()
for bit_added_buff in relationship_data_msg.bit_added_buffs:
self.bit_added_buffs.add(bit_added_buff)
super().load_relationship_data(relationship_data_msg)
if relationship_data_msg.HasField('knowledge'):
self._knowledge = SimKnowledge(self)
self._knowledge.load_knowledge(relationship_data_msg.knowledge)
if relationship_data_msg.HasField('sentiment_proximity_cooldown_end'):
self._track_tracker.proximity_cooldown_end_time = DateAndTime(relationship_data_msg.sentiment_proximity_cooldown_end)
def remove_sentiments_by_duration(self, duration_type=None, preserve_active_household_infos=False):
if preserve_active_household_infos:
active_household = services.active_household()
sim_info_manager = services.sim_info_manager()
actor_sim_info = sim_info_manager.get(self._actor_sim_id)
target_sim_info = sim_info_manager.get(self._target_sim_id)
if not actor_sim_info in active_household:
if target_sim_info in active_household:
return
tracks_to_remove = []
for sentiment_track in self._track_tracker:
if duration_type is None or sentiment_track.duration == duration_type:
tracks_to_remove.append(sentiment_track)
for sentiment_track in tracks_to_remove:
self.remove_track(sentiment_track.stat_type)
def notify_relationship_on_lod_change(self, old_lod, new_lod):
if new_lod < SimInfoLODLevel.INTERACTED:
self.remove_sentiments_by_duration()
return
if new_lod == SimInfoLODLevel.INTERACTED:
self.remove_sentiments_by_duration(duration_type=(SentimentDurationType.SHORT), preserve_active_household_infos=True)
return
def destroy(self):
self._track_tracker.destroy()
self._track_tracker = None
super().destroy()
self.bit_added_buffs = None
self._knowledge = None
self._actor_sim_id = 0
def show_bit_added_dialog--- This code section failed: ---
L. 880 0 LOAD_FAST 'relationship_bit'
2 LOAD_ATTR bit_added_notification
4 LOAD_CONST None
6 COMPARE_OP is
8 POP_JUMP_IF_FALSE 14 'to 14'
L. 881 10 LOAD_CONST None
12 RETURN_VALUE
14_0 COME_FROM 8 '8'
L. 883 14 LOAD_FAST 'target_sim_info'
16 LOAD_METHOD get_sim_instance
18 CALL_METHOD_0 0 '0 positional arguments'
20 STORE_FAST 'target_sim'
L. 884 22 LOAD_FAST 'sim'
24 LOAD_ATTR is_selectable
26 POP_JUMP_IF_FALSE 106 'to 106'
28 LOAD_FAST 'sim'
30 LOAD_ATTR is_human
32 POP_JUMP_IF_FALSE 106 'to 106'
L. 889 34 LOAD_FAST 'target_sim'
36 LOAD_CONST None
38 COMPARE_OP is
40 POP_JUMP_IF_TRUE 54 'to 54'
42 LOAD_FAST 'target_sim'
44 LOAD_ATTR is_selectable
46 POP_JUMP_IF_FALSE 54 'to 54'
48 LOAD_FAST 'target_sim'
50 LOAD_ATTR is_pet
52 POP_JUMP_IF_FALSE 70 'to 70'
54_0 COME_FROM 46 '46'
54_1 COME_FROM 40 '40'
L. 890 54 LOAD_FAST 'relationship_bit'
56 LOAD_METHOD show_bit_added_dialog
58 LOAD_FAST 'sim'
60 LOAD_FAST 'sim'
62 LOAD_FAST 'target_sim_info'
64 CALL_METHOD_3 3 '3 positional arguments'
66 POP_TOP
68 JUMP_ABSOLUTE 140 'to 140'
70_0 COME_FROM 52 '52'
L. 896 70 LOAD_FAST 'target_sim_info'
72 LOAD_ATTR relationship_tracker
74 LOAD_METHOD has_bit
76 LOAD_FAST 'sim'
78 LOAD_ATTR id
80 LOAD_GLOBAL type
82 LOAD_FAST 'relationship_bit'
84 CALL_FUNCTION_1 1 '1 positional argument'
86 CALL_METHOD_2 2 '2 positional arguments'
88 POP_JUMP_IF_TRUE 140 'to 140'
L. 897 90 LOAD_FAST 'relationship_bit'
92 LOAD_METHOD show_bit_added_dialog
94 LOAD_FAST 'sim'
96 LOAD_FAST 'sim'
98 LOAD_FAST 'target_sim_info'
100 CALL_METHOD_3 3 '3 positional arguments'
102 POP_TOP
104 JUMP_FORWARD 140 'to 140'
106_0 COME_FROM 32 '32'
106_1 COME_FROM 26 '26'
L. 898 106 LOAD_FAST 'relationship_bit'
108 LOAD_ATTR bit_added_notification
110 LOAD_ATTR show_if_unselectable
112 POP_JUMP_IF_FALSE 140 'to 140'
114 LOAD_FAST 'target_sim_info'
116 LOAD_ATTR is_selectable
118 POP_JUMP_IF_FALSE 140 'to 140'
120 LOAD_FAST 'target_sim_info'
122 LOAD_ATTR is_human
124 POP_JUMP_IF_FALSE 140 'to 140'
L. 903 126 LOAD_FAST 'relationship_bit'
128 LOAD_METHOD show_bit_added_dialog
130 LOAD_FAST 'target_sim_info'
132 LOAD_FAST 'sim'
134 LOAD_FAST 'target_sim_info'
136 CALL_METHOD_3 3 '3 positional arguments'
138 POP_TOP
140_0 COME_FROM 124 '124'
140_1 COME_FROM 118 '118'
140_2 COME_FROM 112 '112'
140_3 COME_FROM 104 '104'
140_4 COME_FROM 88 '88'
Parse error at or near `JUMP_ABSOLUTE' instruction at offset 68
def show_bit_removed_dialog(self, relationship_bit, sim, target_sim_info):
if relationship_bit.bit_removed_notification is None:
return
if not sim.is_selectable or sim.is_pet:
return
target_sim = target_sim_info.get_sim_instance()
if not (target_sim is not None or target_sim).is_selectable or target_sim.is_pet:
relationship_bit.show_bit_removed_dialog(sim, target_sim_info)
elif not target_sim_info.relationship_tracker.has_bit(sim.id, type(self)):
relationship_bit.show_bit_removed_dialog(sim, target_sim_info)
class BidirectionalRelationshipData(RelationshipData):
__slots__ = '_level_change_watcher_id'
def __init__(self, relationship):
super().__init__(relationship)
if relationship._is_object_rel == False:
self._track_tracker = RelationshipTrackTracker(self)
else:
self._track_tracker = ObjectRelationshipTrackTracker(self)
self._level_change_watcher_id = self._track_tracker.add_watcher(self._value_changed)
def __repr__(self):
return 'BidirectionalRelationshipData between: {} and {}'.format(self.sim_id_a, self.sim_id_b)
@property
def sim_id_a(self):
return self.relationship.sim_id_a
@property
def sim_id_b(self):
return self.relationship.sim_id_b
def _sim_ids(self):
return (
self.sim_id_a, self.sim_id_b)
def _value_changed(self, stat_type, old_value, new_value):
if stat_type.causes_delayed_removal_on_convergence:
self.relationship._destroy_culling_alarm()
self.relationship._last_update_time = services.time_service().sim_now
def get_prevailing_short_term_context_track(self):
tracks = [track for track in self._track_tracker if track.is_short_term_context]
if tracks:
return max(tracks, key=(lambda t: abs(t.get_value())))
return self.get_track((RelationshipGlobalTuning.DEFAULT_SHORT_TERM_CONTEXT_TRACK), add=True)
def _update_client_from_bit_add(self, bit_type, bit_instance, from_load):
sim_info_manager = services.sim_info_manager()
sim_info_a = sim_info_manager.get(self.sim_id_a)
sim_info_b = sim_info_manager.get(self.sim_id_b)
self._update_client_for_sim_info_for_bit_add(bit_type, bit_instance, sim_info_a, sim_info_b, from_load)
if sim_info_b is not None:
self._update_client_for_sim_info_for_bit_add(bit_type, bit_instance, sim_info_b, sim_info_a, from_load)
def add_bit(self, bit_type, bit_instance, from_load=False):
super().add_bit(bit_type, bit_instance, from_load=from_load)
sim_info_manager = services.sim_info_manager()
sim_info_a = sim_info_manager.get(self.sim_id_a)
sim_info_b = sim_info_manager.get(self.sim_id_b)
if sim_info_a is not None:
bit_instance.add_appropriateness_buffs(sim_info_a)
if sim_info_b is not None:
bit_instance.add_appropriateness_buffs(sim_info_b)
def _update_client_from_bit_remove(self, bit_type, bit_instance):
sim_info_manager = services.sim_info_manager()
sim_info_a = sim_info_manager.get(self.sim_id_a)
sim_info_b = sim_info_manager.get(self.sim_id_b)
self._update_client_for_sim_info_for_bit_remove(bit_type, bit_instance, sim_info_a, sim_info_b)
self._update_client_for_sim_info_for_bit_remove(bit_type, bit_instance, sim_info_b, sim_info_a)
def remove_bit(self, bit):
if bit not in self._bits:
logger.debug("Attempting to remove bit for {} that doesn't exist: {}", self, bit)
return
bit_instance = self._bits[bit]
super().remove_bit(bit)
sim_info_manager = services.sim_info_manager()
sim_info_a = sim_info_manager.get(self.sim_id_a)
sim_info_b = sim_info_manager.get(self.sim_id_b)
if sim_info_a:
bit_instance.remove_appropriateness_buffs(sim_info_a)
if sim_info_b:
bit_instance.remove_appropriateness_buffs(sim_info_b)
def destroy(self):
super().destroy()
self._track_tracker.remove_watcher(self._level_change_watcher_id)
self._level_change_watcher_id = None
self._track_tracker.destroy()
self._track_tracker = None
def can_cull_relationship(self, consider_convergence, is_played_relationship):
if consider_convergence:
if not self._track_tracker.are_all_tracks_that_cause_culling_at_convergence():
return False
return super().can_cull_relationship(consider_convergence, is_played_relationship)
def show_bit_added_dialog(self, relationship_bit, sim, target_sim_info):
if relationship_bit.bit_added_notification is None:
return
if sim.is_selectable:
if sim.is_human:
relationship_bit.show_bit_added_dialog(sim, sim, target_sim_info)
def show_bit_removed_dialog(self, relationship_bit, sim, target_sim_info):
if relationship_bit.bit_removed_notification is None:
return
if sim.is_selectable:
if sim.is_human:
relationship_bit.show_bit_removed_dialog(sim, target_sim_info)
| 46.70844
| 210
| 0.656163
|
81b937bf0050a4751cd63c24a4552505c86e6837
| 5,551
|
py
|
Python
|
tests/bindings/test_dictionary.py
|
blueflamegames/godot-python
|
b4ae8ecda7f698e1d64eb0a9acc133094fb40843
|
[
"CC-BY-3.0"
] | 1,323
|
2016-11-17T21:28:18.000Z
|
2022-03-31T17:42:37.000Z
|
tests/bindings/test_dictionary.py
|
blueflamegames/godot-python
|
b4ae8ecda7f698e1d64eb0a9acc133094fb40843
|
[
"CC-BY-3.0"
] | 301
|
2017-01-02T17:49:13.000Z
|
2022-03-14T13:17:42.000Z
|
tests/bindings/test_dictionary.py
|
blueflamegames/godot-python
|
b4ae8ecda7f698e1d64eb0a9acc133094fb40843
|
[
"CC-BY-3.0"
] | 131
|
2017-02-09T08:05:03.000Z
|
2022-03-15T06:44:34.000Z
|
import pytest
import json
from godot import Dictionary, Vector2, Array, GDString, Node, Resource, OS
def test_base():
v = Dictionary()
assert type(v) == Dictionary
def test_equal():
arr = Dictionary()
other = Dictionary()
for key, value in [("a", 1), ("b", "foo"), ("c", OS), ("d", Vector2())]:
other[key] = arr[key] = value
assert arr == other
bad = Dictionary({"a": 1})
assert not arr == bad # Force use of __eq__
assert not arr == None # Force use of __eq__
@pytest.mark.parametrize("arg", [None, 0, "foo", Vector2(), {"a": 1}, Dictionary({"b": 2})])
def test_bad_equal(arg):
arr = Dictionary({"a": 1})
assert arr != arg
def test_repr():
v = Dictionary()
assert repr(v) == "<Dictionary({})>"
v = Dictionary({"a": 1, 2: "foo", 0.5: Vector2()})
assert repr(v).startswith("<Dictionary({")
for item in ["'a': 1", "2: 'foo'", "0.5: <Vector2(x=0.0, y=0.0)>"]:
assert item in repr(v)
@pytest.mark.parametrize("arg", [42, "dummy", Vector2(), [object()], {object(): 1}, {1: object()}])
def test_bad_instantiate(arg):
with pytest.raises((TypeError, ValueError)):
Dictionary(arg)
@pytest.mark.parametrize(
"arg",
[
Dictionary(),
{},
{"a": 1, 2: "foo", 0.5: Vector2()},
Dictionary({"a": 1, 2: "foo", 0.5: Vector2()}),
],
)
def test_instantiate_from_copy(arg):
arr = Dictionary(arg)
if hasattr(arg, "_gd_ptr"):
assert arr._gd_ptr != arg._gd_ptr
def test_len():
v = Dictionary()
assert len(v) == 0
v["foo"] = "bar"
assert len(v) == 1
def test_getitem():
v = Dictionary({"a": 1, 2: "foo", 0.5: Vector2()})
assert v["a"] == 1
assert v[0.5] == Vector2()
# Missing items are stored as None
assert v["dummy"] is None
# Cannot store non Godot types
with pytest.raises(TypeError):
v[object()]
def test_setitem():
v = Dictionary({"a": 1, 2: "foo", 0.5: Vector2()})
v[0] = GDString("bar")
assert len(v) == 4
assert v[0] == GDString("bar")
v["a"] = 4
assert len(v) == 4
assert v["a"] == 4
# Cannot store non Godot types
with pytest.raises(TypeError):
v[object()] = 4
with pytest.raises(TypeError):
v[4] = object()
def test_delitem():
v = Dictionary({"a": 1, 2: "foo", 0.5: Vector2()})
del v["a"]
assert len(v) == 2
del v[0.5]
assert len(v) == 1
v[2] == GDString("foo")
# Delete on missing items should raise error
with pytest.raises(KeyError):
del v["missing"]
# Cannot store non Godot types
with pytest.raises(TypeError):
del v[object()]
def test_update():
v = Dictionary({"a": 1, "b": 2, "c": 3})
v.update({"a": "one", "d": "four"})
v.update(Dictionary({"b": "two", "e": "five"}))
assert list(v.keys()) == [
GDString("a"),
GDString("b"),
GDString("c"),
GDString("d"),
GDString("e"),
]
assert list(v.values()) == [
GDString("one"),
GDString("two"),
3,
GDString("four"),
GDString("five"),
]
def test_iter():
v = Dictionary({"a": 1, 2: "foo", 0.5: Vector2()})
items = [GDString("a"), 2, 0.5]
items_from_v = [x for x in v]
assert items_from_v == items
def test_keys():
v = Dictionary({"a": 1, 2: "foo", 0.5: Vector2()})
keys = v.keys()
assert list(keys) == [GDString("a"), 2, 0.5]
def test_values():
v = Dictionary({"a": 1, 2: "foo"})
values = v.values()
assert list(values) == [1, GDString("foo")]
def test_items():
v = Dictionary({"a": 1, 2: "foo"})
items = v.items()
assert list(items) == [(GDString("a"), 1), (2, GDString("foo"))]
def test_empty_and_clear():
v = Dictionary({"a": 1, 2: "foo"})
assert not v.empty()
v.clear()
assert len(v) == 0
assert v.empty()
def test_in():
v = Dictionary({"a": 1, 2: "foo"})
assert "a" in v
assert 2 in v
assert "dummy" not in v
assert None not in v
def test_hash():
v = Dictionary({"a": 1, 2: "foo"})
h1 = v.hash()
h2 = v.hash()
assert h1 == h2
v["b"] = 42
h3 = v.hash()
assert h3 != h2
def test_has_all():
v = Dictionary({"a": 1, 2: "foo", None: None})
elems = Array(["a", None])
assert v.has_all(elems)
bad_elems = Array(["a", 42])
assert not v.has_all(bad_elems)
def test_to_json():
v = Dictionary({"a": 1, "b": "foo"})
jsoned = v.to_json()
v2 = json.loads(str(jsoned))
assert v2 == {"a": 1, "b": "foo"}
assert json
def test_update():
v1 = Dictionary({"a": 1, "b": 2})
v2 = Dictionary({"b": 3, "c": 4})
v1.update(v2)
assert v1 == Dictionary({"a": 1, "b": 3, "c": 4})
assert v2 == Dictionary({"b": 3, "c": 4})
v2.update({"d": 5, "e": 6})
assert v1 == Dictionary({"a": 1, "b": 3, "c": 4})
assert v2 == Dictionary({"b": 3, "c": 4, "d": 5, "e": 6})
@pytest.mark.parametrize("arg", [None, 0, Vector2(), OS, Array([1, 2])])
def test_bad_update(arg):
v = Dictionary()
with pytest.raises(TypeError):
v.update(arg)
@pytest.mark.parametrize("deep", [False, True])
def test_duplicate(deep):
inner = Dictionary({0: 0})
d1 = Dictionary({0: inner})
d2 = d1.duplicate(deep)
d1[0][1] = 1
d2[0][2] = 2
if deep:
assert d1 == Dictionary({0: Dictionary({0: 0, 1: 1})})
assert d2 == Dictionary({0: Dictionary({0: 0, 2: 2})})
else:
assert d1 == Dictionary({0: Dictionary({0: 0, 1: 1, 2: 2})})
assert d2 == d1
| 24.453744
| 99
| 0.533778
|
1a31ddbac48a1d99174e8eb35f3c24440ccd213f
| 7,100
|
py
|
Python
|
src/sdi_pandas/sample/sample.py
|
thhapke/DI_Pandas
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
[
"MIT"
] | 2
|
2020-01-02T19:54:46.000Z
|
2020-03-09T08:49:33.000Z
|
src/sdi_pandas/sample/sample.py
|
thhapke/DI_Pandas
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
[
"MIT"
] | null | null | null |
src/sdi_pandas/sample/sample.py
|
thhapke/DI_Pandas
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
[
"MIT"
] | 1
|
2020-03-28T22:53:16.000Z
|
2020-03-28T22:53:16.000Z
|
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'xcol2': ['A', 'A', 'B', 'B', 'C'], \
'xcol3': ['K', 'L', 'M', 'N', 'O'], 'xcol4': ['a1', 'a1', 'b1', 'b1', 'b1']})
default_msg = api.Message(attributes = {'format': 'pandas', 'name': 'test','process_list':[]}, body=df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': '','sdi_utils':''}
operator_description = "Sample from Dataframe"
operator_description_long = "Sampling over a DataFrame but keeps datasets with the same value of the \
defined column as set and not splitting them, e.g. sampling with the invariant_column='date' samples \
but ensures that all datasets of a certain date are taken or none. This leads to the fact that the \
sample_size is only a guiding target. Depending on the size of the datasets with the same value of \
the *invariant_column* compared to the *sample_size* this could deviate a lot. "
add_readme = dict()
add_readme["References"] = "[pandas doc: sample](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html)"
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
sample_size = 0.1
config_params['sample_size'] = {'title': 'Sample size', 'description': 'Sample size', 'type': 'number'}
random_state = 1
config_params['random_state'] = {'title': 'Random state', 'description': 'Random state', 'type': 'integer'}
invariant_column = ''
config_params['invariant_column'] = {'title': 'Invariant column', 'description': 'Column where all the same value records should be kept as a whole in a sample', 'type': 'string'}
def process(msg) :
att_dict = msg.attributes
att_dict['operator'] = 'sample'
if api.config.debug_mode == True:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='DEBUG')
else:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='INFO')
logger.info("Process started")
time_monitor = tp.progress()
# start custom process definition
# test if body refers to a DataFrame type
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
###### start calculation
sample_size = api.config.sample_size
if sample_size < 1 :
sample_size = int(sample_size * df.shape[0])
if sample_size < 1 :
sample_size = 1
logger.warning("Fraction of sample size too small. Set sample size to 1.")
elif sample_size > df.shape[0]:
logger.warning("Sample size larger than number of rows")
logger.debug("Samples_size: {}/() ({})".format(sample_size,df.shape[0],sample_size/df.shape[0]))
random_state = api.config.random_state
invariant_column = tfp.read_value(api.config.invariant_column)
if invariant_column and sample_size < df.shape[0]:
# get the average number of records for each value of invariant
sc_df = df.groupby(invariant_column)[invariant_column].count()
sample_size_invariant = int(sample_size / sc_df.mean())
sample_size_invariant = 1 if sample_size_invariant == 0 else sample_size_invariant # ensure minimum
sc_df = sc_df.sample(n=sample_size_invariant, random_state=random_state).to_frame()
sc_df.rename(columns={invariant_column: 'sum'}, inplace=True)
# sample the df by merge 2 df
df = pd.merge(df, sc_df, how='inner', right_index=True, left_on=invariant_column)
df.drop(columns=['sum'], inplace=True)
else:
df = df.sample(n=sample_size, random_state=random_state)
# end custom process definition
if df.empty :
raise ValueError('DataFrame is empty')
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0],df.shape[1]))
logger.debug('Memory: {} kB'.format(df.memory_usage(deep=True).sum() / 1024 ** 2))
example_rows = EXAMPLE_ROWS if df.shape[0] > EXAMPLE_ROWS else df.shape[0]
for i in range(0, example_rows):
logger.debug('Row {}: {}'.format(i,str([str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])))
progress_str = '<BATCH ENDED><1>'
if 'storage.fileIndex' in att_dict and 'storage.fileCount' in att_dict and 'storage.endOfSequence' in att_dict:
if att_dict['storage.fileIndex'] + 1 == att_dict['storage.fileCount']:
progress_str = '<BATCH ENDED><{}>'.format(att_dict['storage.fileCount'])
else:
progress_str = '<BATCH IN-PROCESS><{}/{}>'.format(att_dict['storage.fileIndex'] + 1,
att_dict['storage.fileCount'])
att_dict['process_list'].append(att_dict['operator'])
logger.debug('Process ended: {} - {} '.format(progress_str, time_monitor.elapsed_time()))
logger.debug('Past process steps: {}'.format(att_dict['process_list']))
return log_stream.getvalue(), api.Message(attributes=att_dict,body=df)
inports = [{'name': 'data', 'type': 'message.DataFrame',"description":"Input data"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame',"description":"Output data"}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
#api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
if __name__ == '__main__':
main()
#gs.gensolution(os.path.realpath(__file__), config, inports, outports)
| 46.103896
| 191
| 0.614225
|
90bdce5ac514e3464417ec233b3f598612eeaa2d
| 1,144
|
py
|
Python
|
algorithms/bitonic_sort.py
|
marcpinet/sorting-visualizer
|
98cc6c07557d2fbf092c04e0de7ee2ffed224a11
|
[
"MIT"
] | null | null | null |
algorithms/bitonic_sort.py
|
marcpinet/sorting-visualizer
|
98cc6c07557d2fbf092c04e0de7ee2ffed224a11
|
[
"MIT"
] | null | null | null |
algorithms/bitonic_sort.py
|
marcpinet/sorting-visualizer
|
98cc6c07557d2fbf092c04e0de7ee2ffed224a11
|
[
"MIT"
] | null | null | null |
def sort(array: list[int], low: int = 0, length: int = 0, direction: int = 1):
if length == 0:
length = len(array)
def _comp_and_swap(array: list[int], index1: int, index2: int, direction: int):
if (direction == 1 and array[index1] > array[index2]) or (
direction == 0 and array[index1] < array[index2]
):
array[index1], array[index2] = array[index2], array[index1]
yield array, [array[index1], array[index2]]
def _bitonic_merge(array: list[int], low: int, length: int, direction: int):
if length > 1:
middle = int(length / 2)
for i in range(low, low + middle):
yield from _comp_and_swap(array, i, i + middle, direction)
yield from _bitonic_merge(array, low, middle, direction)
yield from _bitonic_merge(array, low + middle, middle, direction)
if length > 1:
middle = int(length / 2)
yield from sort(array, low, middle, 1)
yield from sort(array, low + middle, middle, 0)
yield from _bitonic_merge(array, low, length, direction)
yield array, []
| 42.37037
| 83
| 0.586538
|
8d19c874c6b49f7c1903a41aef3f737581b001c4
| 1,587
|
py
|
Python
|
app/reddit.py
|
huang363/CashMonkey
|
b31628360bdd8929cc69fd554f503684230fc1cd
|
[
"MIT"
] | null | null | null |
app/reddit.py
|
huang363/CashMonkey
|
b31628360bdd8929cc69fd554f503684230fc1cd
|
[
"MIT"
] | null | null | null |
app/reddit.py
|
huang363/CashMonkey
|
b31628360bdd8929cc69fd554f503684230fc1cd
|
[
"MIT"
] | null | null | null |
import json
import praw
from app.postsStruc import Submission
class Reddit():
def __init__(self, authFile, rdonly):
self.authInfo = self.getAuthDataFromFile(authFile)
if rdonly:
self.initRedditRDONLY()
else:
self.initReddit()
def getAuthDataFromFile(self, filenName):
with open(filenName, 'r') as readFile:
return json.load(readFile)
def initRedditRDONLY(self):
self.praw = praw.Reddit(
client_id = self.authInfo["client_id"],
client_secret = self.authInfo["client_secret"],
user_agent = self.authInfo["user_agent"]
)
print(self.praw)
def initReddit(self):
self.praw = praw.Reddit(
client_id = self.authInfo["client_id"],
client_secret = self.authInfo["client_secret"],
user_agent = self.authInfo["user_agent"],
username = self.authInfo["username"],
password = self.authInfo["password"]
)
def getSubreddit(self,name):
return self.praw.subreddit(name)
class Subreddit():
def __init__(self, reddit, name):
self.reddit = reddit
self.name = name
self.subreddit = reddit.getSubreddit(name)
self.submissions = []
def printHotSubmissions(self, NumPosts):
print("Hottest post on " + self.name)
for subID in self.subreddit.hot(limit=NumPosts):
sub = Submission(self.reddit, subID)
sub.printSubmission()
self.submissions.append(sub)
| 31.117647
| 63
| 0.597353
|
75a2df6761f5d8f7498bd7a47d0380b2794691ba
| 1,724
|
py
|
Python
|
rn/numbers.py
|
vikneswaran20/rn
|
33e0bfaf58bb8a5ec54c6d010035693b35e9909d
|
[
"BSD-3-Clause"
] | null | null | null |
rn/numbers.py
|
vikneswaran20/rn
|
33e0bfaf58bb8a5ec54c6d010035693b35e9909d
|
[
"BSD-3-Clause"
] | null | null | null |
rn/numbers.py
|
vikneswaran20/rn
|
33e0bfaf58bb8a5ec54c6d010035693b35e9909d
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import click
class Numbers:
def __init__(self, range, iteration, delimiter, unique, sort, precision):
self.min = range[0]
self.max = range[1]
self.iteration = iteration
self.delimiter = delimiter
self.unique = unique
self.sort = sort
self.precision = precision
self.precision_str = '{:.%sf}' % self.precision
def generate_random(self):
nums = self.get_numbers()
self.apply_sort(nums)
nums = map(str, nums)
return self.delimiter.join(nums)
def get_numbers(self):
if self.unique:
self.get_unique_numbers()
return [self.precision_str.format(round(random.uniform(self.min, self.max), self.precision))
if self.precision is not None else random.randint(int(self.min), int(self.max))
for i in range(self.iteration)]
def get_unique_numbers(self):
try:
if self.precision is not None:
factor = (10 * self.precision)
local_min = int(self.min * factor)
local_max = int((self.max + 1) * factor)
return [self.precision_str.format(round(num/factor, self.precision))
for num in random.sample(range(local_min, local_max), self.iteration)]
return random.sample(
range(int(self.min), int(self.max) + 1), self.iteration)
except ValueError:
raise click.BadParameter(
"No of iterations are greater than the given range"
+ " with uniq enabled")
def apply_sort(self, nums):
if self.sort is not None:
nums.sort(reverse=(self.sort == 'desc'))
| 34.48
| 100
| 0.585267
|
0363381cb7e835baeb83bea5d9e522c4406f57ca
| 4,147
|
py
|
Python
|
chb/mips/opcodes/MIPSStoreWord.py
|
sipma/CodeHawk-Binary
|
3d4a0c2958266571f8f660cb48f29339b253c07f
|
[
"MIT"
] | null | null | null |
chb/mips/opcodes/MIPSStoreWord.py
|
sipma/CodeHawk-Binary
|
3d4a0c2958266571f8f660cb48f29339b253c07f
|
[
"MIT"
] | null | null | null |
chb/mips/opcodes/MIPSStoreWord.py
|
sipma/CodeHawk-Binary
|
3d4a0c2958266571f8f660cb48f29339b253c07f
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020-2021 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, Dict, List, Mapping, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.invariants.XVariable import XVariable
from chb.invariants.XXpr import XXpr
from chb.mips.MIPSDictionaryRecord import mipsregistry
from chb.mips.MIPSOpcode import MIPSOpcode, simplify_result, derefstr
from chb.mips.MIPSOperand import MIPSOperand
import chb.simulation.SimSymbolicValue as SSV
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.mips.MIPSDictionary import MIPSDictionary
from chb.simulation.SimulationState import SimulationState
@mipsregistry.register_tag("sw", MIPSOpcode)
class MIPSStoreWord(MIPSOpcode):
"""SW rt, offset(base)
Store a word to memory.
args[0]: index of rt in mips dictionary
args[1]: index of offset(base) in mips dictionary
"""
def __init__(
self,
mipsd: "MIPSDictionary",
ixval: IndexedTableValue) -> None:
MIPSOpcode.__init__(self, mipsd, ixval)
@property
def operands(self) -> Sequence[MIPSOperand]:
return [self.mipsd.mips_operand(i) for i in self.args]
def global_variables(self, xdata: InstrXData) -> Mapping[str, int]:
return xdata.xprs[0].global_variables()
def annotation(self, xdata: InstrXData) -> str:
"""data format a:vxxa
vars[0]: lhs
xprs[0]: rhs
xprs[1]: rhs (simplified)
xprs[2]: address of memory location
"""
lhs = str(xdata.vars[0])
rhs = xdata.xprs[0]
rrhs = xdata.xprs[1]
xrhs = simplify_result(xdata.args[1], xdata.args[2], rhs, rrhs)
if lhs == '?' and len(xdata.xprs) == 3:
lhs = derefstr(xdata.xprs[2])
return lhs + ' := ' + xrhs
@property
def dst_operand(self) -> MIPSOperand:
return self.mipsd.mips_operand(self.args[0])
@property
def src_operand(self) -> MIPSOperand:
return self.mipsd.mips_operand(self.args[1])
# --------------------------------------------------------------------------
# Operation:
# memory[base+offset] <- rt
# --------------------------------------------------------------------------
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
dstop = self.dst_operand
srcop = self.src_operand
srcval = simstate.rhs(iaddr, srcop)
lhs = simstate.set(iaddr, dstop, srcval)
simstate.increment_programcounter()
return SU.simassign(iaddr, simstate, lhs, srcval)
| 36.377193
| 80
| 0.636122
|
aeded9110a7fbe151042a44fe4bb6fb70de9ee70
| 1,031
|
py
|
Python
|
tests/test_contrib_boolean.py
|
shuternay/django-mutant
|
cf26ef94fc22c6cd2987880befc139c755dc9f3a
|
[
"MIT"
] | 1
|
2021-04-15T02:58:09.000Z
|
2021-04-15T02:58:09.000Z
|
tests/test_contrib_boolean.py
|
shuternay/django-mutant
|
cf26ef94fc22c6cd2987880befc139c755dc9f3a
|
[
"MIT"
] | null | null | null |
tests/test_contrib_boolean.py
|
shuternay/django-mutant
|
cf26ef94fc22c6cd2987880befc139c755dc9f3a
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.utils.translation import gettext_lazy as _
from mutant.contrib.boolean.models import (
BooleanFieldDefinition, NullBooleanFieldDefinition,
)
from mutant.test.testcases import FieldDefinitionTestMixin
from .utils import BaseModelDefinitionTestCase
class BooleanFieldDefinitionTestMixin(FieldDefinitionTestMixin):
field_definition_category = _('Boolean')
def test_create_with_default(self):
super(BooleanFieldDefinitionTestMixin, self).test_create_with_default()
class BooleanFieldDefinitionTest(BooleanFieldDefinitionTestMixin,
BaseModelDefinitionTestCase):
field_definition_cls = BooleanFieldDefinition
field_definition_init_kwargs = {'default': True}
field_values = (False, True)
class NullBooleanFieldDefinitionTest(BooleanFieldDefinitionTestMixin,
BaseModelDefinitionTestCase):
field_definition_cls = NullBooleanFieldDefinition
field_values = (True, None)
| 33.258065
| 79
| 0.775946
|
58cf9336c8eba0da9e03f6623aa2a3ed793d2f19
| 7,188
|
py
|
Python
|
intersight/models/feedback_feedback_data.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/feedback_feedback_data.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/feedback_feedback_data.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FeedbackFeedbackData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comment': 'str',
'email': 'str',
'evaluation': 'str',
'follow_up': 'bool',
'trace_ids': 'object',
'type': 'str'
}
attribute_map = {
'comment': 'Comment',
'email': 'Email',
'evaluation': 'Evaluation',
'follow_up': 'FollowUp',
'trace_ids': 'TraceIds',
'type': 'Type'
}
def __init__(self, comment=None, email=None, evaluation='Excellent', follow_up=None, trace_ids=None, type='Evaluation'):
"""
FeedbackFeedbackData - a model defined in Swagger
"""
self._comment = None
self._email = None
self._evaluation = None
self._follow_up = None
self._trace_ids = None
self._type = None
if comment is not None:
self.comment = comment
if email is not None:
self.email = email
if evaluation is not None:
self.evaluation = evaluation
if follow_up is not None:
self.follow_up = follow_up
if trace_ids is not None:
self.trace_ids = trace_ids
if type is not None:
self.type = type
@property
def comment(self):
"""
Gets the comment of this FeedbackFeedbackData.
Text of the feedback
:return: The comment of this FeedbackFeedbackData.
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""
Sets the comment of this FeedbackFeedbackData.
Text of the feedback
:param comment: The comment of this FeedbackFeedbackData.
:type: str
"""
self._comment = comment
@property
def email(self):
"""
Gets the email of this FeedbackFeedbackData.
user email
:return: The email of this FeedbackFeedbackData.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this FeedbackFeedbackData.
user email
:param email: The email of this FeedbackFeedbackData.
:type: str
"""
self._email = email
@property
def evaluation(self):
"""
Gets the evaluation of this FeedbackFeedbackData.
evaluation type
:return: The evaluation of this FeedbackFeedbackData.
:rtype: str
"""
return self._evaluation
@evaluation.setter
def evaluation(self, evaluation):
"""
Sets the evaluation of this FeedbackFeedbackData.
evaluation type
:param evaluation: The evaluation of this FeedbackFeedbackData.
:type: str
"""
allowed_values = ["Excellent", "Poor", "Fair", "Good"]
if evaluation not in allowed_values:
raise ValueError(
"Invalid value for `evaluation` ({0}), must be one of {1}"
.format(evaluation, allowed_values)
)
self._evaluation = evaluation
@property
def follow_up(self):
"""
Gets the follow_up of this FeedbackFeedbackData.
if user open for follow-up or not
:return: The follow_up of this FeedbackFeedbackData.
:rtype: bool
"""
return self._follow_up
@follow_up.setter
def follow_up(self, follow_up):
"""
Sets the follow_up of this FeedbackFeedbackData.
if user open for follow-up or not
:param follow_up: The follow_up of this FeedbackFeedbackData.
:type: bool
"""
self._follow_up = follow_up
@property
def trace_ids(self):
"""
Gets the trace_ids of this FeedbackFeedbackData.
Bunch of last traceId for reproducing user last activity
:return: The trace_ids of this FeedbackFeedbackData.
:rtype: object
"""
return self._trace_ids
@trace_ids.setter
def trace_ids(self, trace_ids):
"""
Sets the trace_ids of this FeedbackFeedbackData.
Bunch of last traceId for reproducing user last activity
:param trace_ids: The trace_ids of this FeedbackFeedbackData.
:type: object
"""
self._trace_ids = trace_ids
@property
def type(self):
"""
Gets the type of this FeedbackFeedbackData.
Type from user
:return: The type of this FeedbackFeedbackData.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this FeedbackFeedbackData.
Type from user
:param type: The type of this FeedbackFeedbackData.
:type: str
"""
allowed_values = ["Evaluation", "Bug"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FeedbackFeedbackData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.856115
| 124
| 0.555787
|
0ace4aec127febad373a27588d62540537ca616d
| 5,433
|
py
|
Python
|
interactive_sdk.py
|
greenkeytech/discovery-sdk
|
3c0357ef98a723f2eaa3f190435d230917d82eea
|
[
"Apache-2.0"
] | 12
|
2019-08-13T14:08:17.000Z
|
2022-02-11T16:56:05.000Z
|
interactive_sdk.py
|
finos/greenkey-discovery-sdk
|
3c0357ef98a723f2eaa3f190435d230917d82eea
|
[
"Apache-2.0"
] | 26
|
2019-08-01T14:06:21.000Z
|
2021-03-11T17:10:57.000Z
|
interactive_sdk.py
|
greenkeytech/discovery-sdk
|
3c0357ef98a723f2eaa3f190435d230917d82eea
|
[
"Apache-2.0"
] | 5
|
2019-09-23T16:09:35.000Z
|
2021-03-31T23:24:31.000Z
|
#!/usr/bin/env python3
import os
from collections import namedtuple
import dotenv
import pandas as pd
import requests
import streamlit as st
from PIL import Image
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from launch import wait_on_service
from testing.discovery_interface import (
reload_discovery_config,
submit_discovery_transcript,
)
env = dotenv.dotenv_values("client.env")
class RetryRequest(Retry):
"""
Custom retry class with max backoff set to TIMEOUT from client.env
"""
BACKOFF_MAX = float(env["TIMEOUT"])
retries = RetryRequest(
total=int(env["RETRIES"]), backoff_factor=1.0, status_forcelist=[500, 502, 503, 504]
)
request_session = requests.Session()
request_session.mount("http://", HTTPAdapter(max_retries=retries))
ENCODING = "utf-8"
# None here prints all dataframe rows
display_rows = int(os.environ.get("DISCOVERY_DISPLAY_ROWS", 0))
pd.set_option("display.max_rows", display_rows if display_rows > 0 else None)
# TODO: add component_list
Entity = namedtuple("Entity", ["entity", "value", "tokens", "indices"])
def get_entities_from_discovery(payload):
entities = []
for intent in payload["intents"]:
entities += intent["entities"]
return entities
def format_component_list(component_list):
# Takes a component list, like
# [
# [
# "entity label"
# "entity value",
# some other values
# ],
# ]
# and formats it into a dictionary
# in comment instead of docstring due to https://github.com/streamlit/streamlit/issues/533
formatted_values = {}
for component in component_list:
formatted_values[component[0]] = component[1]
return formatted_values
def tidy_lattice_path(lattice_path):
# Returns just first index of lattice path
# in comment instead of docstring due to https://github.com/streamlit/streamlit/issues/533
return [_[0] for _ in lattice_path]
def format_entity_match(label, match):
# TODO: add component_list
# format_component_list(match["component_list"] if "component_list" in match else []),
return Entity(
label,
match["value"],
match["interpreted_transcript"],
tidy_lattice_path(match["lattice_path"]),
)
def get_entity_matches(label, entity):
entity_list = []
for matches in entity["matches"]:
# Remove if removing GROUP_ENTITIES=True
entity_list += [
format_entity_match(label, match)
for match in (matches if isinstance(matches, (list, tuple)) else [matches])
]
return entity_list
def format_entities(entities, config):
# TODO: add sidebar glossary
entity_list = []
for entity in entities:
label = entity["label"]
if label not in config["entities"]:
continue
entity_list += get_entity_matches(label, entity)
entity_list = sorted(entity_list, key=lambda e: e.indices[0] - len(e.indices) / 100)
return pd.DataFrame.from_records(entity_list, columns=Entity._fields)
def show_matched_entities(payload, config, show_ents):
if show_ents:
entities = get_entities_from_discovery(payload)
entity_df = format_entities(entities, config)
st.write(entity_df)
def reload_discovery():
with st.spinner("Reloading config"):
if reload_discovery_config():
st.success("Successfully reloaded config")
else:
st.error("Failed to reload config")
def set_intent(config):
intent = st.selectbox("Intent", config["intents"])
return intent
def test_an_interpreter(config):
intent = set_intent(config)
transcript = st.text_area("Input transcript", "eur$ 5y 3x6 5mio")
payload = submit_discovery_transcript(transcript, [intent])
# Sidebar
show_everything = st.sidebar.checkbox("Verbose discovery output", False)
show_ents = st.sidebar.checkbox("Show matched entities", True)
if show_everything:
st.write(payload)
if (
"intents" in payload
and payload["intents"]
and "entities" in payload["intents"][0]
):
show_matched_entities(payload, config, show_ents)
schema_key = st.sidebar.text_input("Schema key:", "interpreted_quote")
if schema_key and schema_key in payload.keys():
st.write(payload[schema_key])
def entity_library(config):
st.write("Available entities")
st.write(
{
k: v
if not k.startswith("_")
else {"[built-in]": {"samples": v.get("samples", [])}}
for k, v in config["entities"].items()
}
)
def main():
with st.spinner("Waiting for discovery"):
wait_on_service(":".join([env["DISCOVERY_HOST"], env["DISCOVERY_PORT"]]))
# UI FOR DEV TOOL
st.image(Image.open("logo.jpg"), width=150)
st.markdown("## Discovery Interactive SDK")
reload_button = st.sidebar.button("Reload Discovery Config")
if reload_button:
reload_discovery()
option = st.sidebar.selectbox("Mode", ["Test an interpreter", "Entity library"])
# Domain / Intent Config
config = request_session.get(
f"{env['DISCOVERY_HOST']}:{env['DISCOVERY_PORT']}/developer"
).json()
if option == "Test an interpreter":
test_an_interpreter(config)
elif option == "Entity library":
entity_library(config)
if __name__ == "__main__":
main()
| 26.89604
| 94
| 0.670164
|
ab6d33fac095864c4a20c21a0067ca9219317bd4
| 826
|
py
|
Python
|
tests/test_eml.py
|
EDIorg/pasta-upload-tweeter
|
c4c4c92bce9a63cc36d483353fd01fb99b69ed6b
|
[
"MIT"
] | 1
|
2017-11-18T04:18:34.000Z
|
2017-11-18T04:18:34.000Z
|
tests/test_eml.py
|
servilla/pasta-upload-tweeter
|
c4c4c92bce9a63cc36d483353fd01fb99b69ed6b
|
[
"MIT"
] | null | null | null |
tests/test_eml.py
|
servilla/pasta-upload-tweeter
|
c4c4c92bce9a63cc36d483353fd01fb99b69ed6b
|
[
"MIT"
] | 1
|
2019-03-17T16:09:28.000Z
|
2019-03-17T16:09:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""":Mod: test_eml
:Synopsis:
:Author:
servilla
:Created:
12/30/17
"""
import unittest
import daiquiri
from eml import Eml
logger = daiquiri.getLogger(__name__)
class TestEml(unittest.TestCase):
eml = None
@classmethod
def setUpClass(cls):
with open('./knb-lter-nin.1.1.xml', 'r') as f:
xml = f.read().encode()
TestEml.eml = Eml(eml=xml)
def setUp(self):
pass
def tearDown(self):
pass
def test_get_title(self):
title = 'Daily Water Sample Nutrient Data for North Inlet Estuary, ' \
'South Carolina, from 1978 to 1992, North Inlet LTER'
eml_title = TestEml.eml.title
self.assertEqual(title, eml_title)
if __name__ == '__main__':
unittest.main()
| 17.574468
| 78
| 0.606538
|
488744f5417bbea23fd7c12f22333b7d2f3cf597
| 2,531
|
py
|
Python
|
lib/config.py
|
paulus99/sentinel
|
e2729d9563009c6901ae0807ddb2888b93542281
|
[
"MIT"
] | 1
|
2019-04-08T01:45:23.000Z
|
2019-04-08T01:45:23.000Z
|
lib/config.py
|
paulus99/sentinel
|
e2729d9563009c6901ae0807ddb2888b93542281
|
[
"MIT"
] | null | null | null |
lib/config.py
|
paulus99/sentinel
|
e2729d9563009c6901ae0807ddb2888b93542281
|
[
"MIT"
] | 1
|
2019-04-15T03:48:50.000Z
|
2019-04-15T03:48:50.000Z
|
"""
Set up defaults and read sentinel.conf
"""
import sys
import os
from tincoin_config import TincoinConfig
default_sentinel_config = os.path.normpath(
os.path.join(os.path.dirname(__file__), '../sentinel.conf')
)
sentinel_config_file = os.environ.get('SENTINEL_CONFIG', default_sentinel_config)
sentinel_cfg = TincoinConfig.tokenize(sentinel_config_file)
sentinel_version = "1.1.0"
min_tincoind_proto_version_with_sentinel_ping = 70207
def get_tincoin_conf():
home = os.environ.get('HOME')
if sys.platform == 'darwin':
tincoin_conf = os.path.join(home, "Library/Application Support/TincoinCore/tincoin.conf")
elif sys.platform == 'win32':
tincoin_conf = os.path.join(os.environ['APPDATA'], "TincoinCore/Tincoin.conf")
else:
tincoin_conf = os.path.join(home, ".tincoincore/tincoin.conf")
tincoin_conf = sentinel_cfg.get('tincoin_conf', tincoin_conf)
return tincoin_conf
def get_network():
return sentinel_cfg.get('network', 'mainnet')
def sqlite_test_db_name(sqlite_file_path):
(root, ext) = os.path.splitext(sqlite_file_path)
test_sqlite_file_path = root + '_test' + ext
return test_sqlite_file_path
def get_db_conn():
import peewee
env = os.environ.get('SENTINEL_ENV', 'production')
# default values should be used unless you need a different config for development
db_host = sentinel_cfg.get('db_host', '127.0.0.1')
db_port = sentinel_cfg.get('db_port', None)
db_name = sentinel_cfg.get('db_name', 'sentinel')
db_user = sentinel_cfg.get('db_user', 'sentinel')
db_password = sentinel_cfg.get('db_password', 'sentinel')
db_charset = sentinel_cfg.get('db_charset', 'utf8mb4')
db_driver = sentinel_cfg.get('db_driver', 'sqlite')
if (env == 'test'):
if db_driver == 'sqlite':
db_name = sqlite_test_db_name(db_name)
else:
db_name = "%s_test" % db_name
peewee_drivers = {
'mysql': peewee.MySQLDatabase,
'postgres': peewee.PostgresqlDatabase,
'sqlite': peewee.SqliteDatabase,
}
driver = peewee_drivers.get(db_driver)
dbpfn = 'passwd' if db_driver == 'mysql' else 'password'
db_conn = {
'host': db_host,
'user': db_user,
dbpfn: db_password,
}
if db_port:
db_conn['port'] = int(db_port)
if driver == peewee.SqliteDatabase:
db_conn = {}
db = driver(db_name, **db_conn)
return db
tincoin_conf = get_tincoin_conf()
network = get_network()
db = get_db_conn()
| 29.776471
| 97
| 0.680363
|
8bc27f4c920ffe6fb759846c18c151f62b9f476b
| 2,552
|
py
|
Python
|
extras/apps/ngs_roi/tool_shed/ngs_roi/app.py
|
weese/seqan
|
1acb1688969c7b61497f2328af54b4d11228a484
|
[
"BSD-3-Clause"
] | 2
|
2016-10-28T07:43:39.000Z
|
2018-01-02T17:54:33.000Z
|
extras/apps/ngs_roi/tool_shed/ngs_roi/app.py
|
weese/seqan
|
1acb1688969c7b61497f2328af54b4d11228a484
|
[
"BSD-3-Clause"
] | 10
|
2015-03-02T16:45:39.000Z
|
2015-06-23T14:02:13.000Z
|
extras/apps/ngs_roi/tool_shed/ngs_roi/app.py
|
weese/seqan
|
1acb1688969c7b61497f2328af54b4d11228a484
|
[
"BSD-3-Clause"
] | 2
|
2015-06-17T11:44:36.000Z
|
2015-11-20T13:09:16.000Z
|
#!/usr/bin/env python
"""Support code for writing the NGS ROI report generation apps."""
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
__copyright__ = 'Copyring 2013, Freie Universitaet Berlin'
__license__ = 'BSD 3-clause'
import os
import os.path
import subprocess
class App(object):
"""Base class with helper functionality for NGS ROI report generators."""
def __init__(self, args):
self.args = args
def prepareOutDir(self):
"""Create output directory if necessary."""
if not os.path.exists(self.args.out_dir):
os.makedirs(self.args.out_dir)
def buildHref(self, ref, begin_pos, end_pos):
vals = {'ref': ref, 'start_pos': begin_pos + 1, 'end_pos': end_pos}
if self.args.link_type == 'local_igv':
vals['host'] = self.args.igv_host
vals['port'] = self.args.igv_port
return 'http://%(host)s:%(port)d/goto?locus=%(ref)s:%(start_pos)d-%(end_pos)d' % vals
else: # self.args.link_type == 'ucsc'
vals['org'] = self.args.ucsc_org
vals['db'] = self.args.ucsc_db
vals['ref'] = self.args.ucsc_chr_prefix + vals['ref']
return 'http://genome.ucsc.edu/cgi-bin/hgTracks?org=%(org)s&db=%(db)s&position=%(ref)s:%(start_pos)d-%(end_pos)d' % vals
class PlotThumbnailsRunner(object):
"""Helper class for running the roi_plot_thumbnails program.
:ivar args: Arguments as returned by argparse.
"""
def __init__(self, args):
self.args = args
def run(self):
cmd_args = ['-if', self.args.in_file,
'--input-file-file-ext', 'roi',
'-o', os.path.join(self.args.out_dir, 'thumbnail_'),
'--max-rois', self.args.max_rois,
'--max-value', self.args.max_value,
'--num-cols', self.args.num_cols,
'--num-rows', self.args.num_rows,
'--plot-height', self.args.plot_height,
'--plot-width', self.args.plot_width,
'--border-width', self.args.border_width,
'--spacing', self.args.spacing]
cmd_args = ['roi_plot_thumbnails'] + map(str, cmd_args)
#import pdb; pdb.set_trace()
import sys
print >>sys.stderr, 'Running %s' % ' '.join(cmd_args)
p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
res = p.wait()
if res:
print 'ERROR', p.stdin, p.stderr
return res
| 37.529412
| 132
| 0.581897
|
c817c124851fb452a0339eb71c07ceef02d49957
| 17,536
|
py
|
Python
|
app.py
|
Abarnakumar28/crop_recommendation
|
8ce0cd07d8c032814d06f2bb96f8b39bf66d5fa2
|
[
"MIT"
] | null | null | null |
app.py
|
Abarnakumar28/crop_recommendation
|
8ce0cd07d8c032814d06f2bb96f8b39bf66d5fa2
|
[
"MIT"
] | null | null | null |
app.py
|
Abarnakumar28/crop_recommendation
|
8ce0cd07d8c032814d06f2bb96f8b39bf66d5fa2
|
[
"MIT"
] | null | null | null |
import os
import dash
import pickle
import base64
import numpy as np
import pandas as pd
from dash import dcc
from dash import html
import plotly.graph_objects as go
from dash.dependencies import Input, Output, State
from scipy.spatial.distance import cdist
CROP_IMG_PATH = 'crops'
DATA_PATH = 'Crop_recommendation.csv'
TRAINED_MODEL_PATH = 'KNN_model_crop_prediction.pkl'
crop_img_files = [os.path.join(CROP_IMG_PATH, f) for f in os.listdir(CROP_IMG_PATH)]
def crop_id_col(df = None):
mapping = {c : i for i, c in enumerate(list(df['label'].unique()), 1)}
df['crop_id'] = [mapping[c] for c in df['label']]
return df
# def get_sample_data(df = None, col_name = None, No_of_samples = 20):
# mapping = {c : i for i, c in enumerate(list(df['label'].unique()), 1)}
# frames = []
# for k, v in mapping.items():
# samp = df.loc[(df[col_name] ==v)].iloc[:No_of_samples]
# frames.append(samp)
# return pd.concat(frames)
def data_grouping(df):
dummy = pd.DataFrame()
dummy['Nitrogen'] = pd.cut(df["N"], [-1, 70, 170], labels = ['N(0-70)', 'N(70-140)'])
dummy['Phosphorous'] = pd.cut(df["P"], [-1, 80, 170], labels = ['P(0-80)', 'P(80-150)'])
dummy['Potassium'] = pd.cut(df["K"], [-1, 50, 100, 150, 220], labels = ['K(0-50)', 'K(50-100)', 'K(100-150)', 'K(150-210)'])
dummy['Temp(°C)'] = pd.cut(df["temperature"], [-1, 15, 30, 50], labels = ['T(0-15°C)', 'T(15-30°C)', 'T(30-50°C)'])
dummy['Humidity(%)'] = pd.cut(df["humidity"], [-2, 30, 60, 110], labels = ['H(0-30%)', 'H(30-60%)', 'H(60-100%)'])
dummy['PH'] = pd.cut(df["ph"], [0, 5, 12], labels = ['ph(0-5)', 'ph(5-10)'])
dummy['Rainfall(mm)'] = pd.cut(df["rainfall"], [-1, 100, 200, 350], labels = ['rain(0-100mm)', 'rain(100-200mm)', 'rain(200-30mm)'])
dummy['Crop_name'] = df['label']
dummy['Crop_id'] = df['crop_id']
return dummy
def model_inference(feature_arr):
with open(TRAINED_MODEL_PATH, 'rb') as pickle_file:
model = pickle.load(pickle_file)
pred = model.predict(feature_arr)
return pred
def get_img_file(pred, crops_list = crop_img_files):
crop_name = pred[0]
img_file = [f for f in crops_list if crop_name in f][0]
return img_file
def get_image(img_path):
encoded_image = base64.b64encode(open(img_path, 'rb').read())
img_src = 'data:image/png;base64,{}'.format(encoded_image.decode())
return img_src
def table_fig(data_df):
drop_table = go.Figure(
data=[go.Table(
header=dict(values=list(data_df.columns),
fill_color='paleturquoise',
align='left'),
cells=dict(values=[data_df.N, data_df.P, data_df.K, data_df.temperature,
data_df.humidity, data_df.ph, data_df.rainfall, data_df.label],
fill_color='lavender',
align='left'))
])
drop_table.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',
'paper_bgcolor': 'rgba(0, 0, 0, 0)'
})
return drop_table
def build_fig(df):
layout = go.Layout(
margin={'l': 33, 'r': 40, 't': 20, 'b': 10},
)
fig = go.Figure(
go.Parcats(
dimensions=[
{'label': 'Nitrogen',
'values': list(df['Nitrogen'])},
{'label': 'Phosphorous',
'values': list(df['Phosphorous'])},
{'label': 'Potassium',
'values': list(df['Potassium'])},
{'label': 'Temperature',
'values': list(df['Temp(°C)'])},
{'label': 'Humidity',
'values': list(df['Humidity(%)'])},
{'label': 'PH',
'values': list(df['PH'])},
{'label': 'Rainfall',
'values': list(df['Rainfall(mm)'])},
{'label': 'Crop_name',
'values': list(df['Crop_name'])},
],
labelfont={'size': 16, 'family': 'Times', 'color':'yellow'},
tickfont={'size': 16, 'family': 'Times', 'color':'yellow'},
hoveron = 'category',
hoverinfo = 'count+probability',
# line = go.parcats.Line(color='#00FA9A', shape= 'hspline'),
line={'color': df.Crop_id}
),
layout=layout)
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',
'paper_bgcolor': 'rgba(0, 0, 0, 0)'
})
return fig
# data_df = pd.read_csv(DATA_PATH)
# mod_df = crop_id_col(df = data_df)
# vis_df = data_grouping(mod_df)
# Initialise the app
app = dash.Dash(__name__)
server = app.server
# Define the app
app.layout = html.Div(children=[
html.Div(className='row', # Define the row element
children=[
html.Div(className='four columns div-user-controls',
children = [
html.P('Crop recommendation application', style = {'font-size': '35px'}),
html.H2('1. Precision agriculture is currently popular. It helps farmers to develop intelligent agricultural strategies.', style = {'font-size': '20px'}),
html.H2('2. Based on seven characteristics, this application will recommend the ideal crop for farmers to grow on their fileds.', style = {'font-size': '20px'}),
html.Br(),
html.Div([
html.H2('Nitrogen content in soil:', style = {'font-size': '15px'}),
dcc.Input(id="N",
placeholder='Enter Nitrogen content in soil...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Div([
html.H2('Phosphorous content in soil:', style = {'font-size': '15px'}),
dcc.Input(id="P",
placeholder='Enter Phosphorous content in soil...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Div([
html.H2('Potassium content in soil:', style = {'font-size': '15px'}),
dcc.Input(id="K",
placeholder='Enter Potassium content in soil...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Div([
html.H2('Temparature in °C:', style = {'font-size': '15px'}),
dcc.Input(id="temp",
placeholder='Enter Temp in °C...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Div([
html.H2('Humidity in %:', style = {'font-size': '15px'}),
dcc.Input(id="hum",
placeholder='Enter Humidity in %...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Div([
html.H2('PH value of the soil (between 2-9):', style = {'font-size': '15px'}),
dcc.Input(id="ph",
placeholder='Enter PH value of the soil...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Div([
html.H2('Rainfall in mm:', style = {'font-size': '15px'}),
dcc.Input(id="rain",
placeholder='Enter rainfall in mm...',
type='text',
persistence = False,
style={'width': '400px'}
)]),
html.Br(), html.Br(),
dcc.Store(id = 'store_inputs'),
html.Button('Submit', id='submit_button', n_clicks=0, disabled=False,
style = {'font-size': '15px',
'cursor': 'pointer',
'text-align': 'center',
'color': 'white',
}
),
]), # four column Div
html.Div(className='eight columns div-for-charts bg-grey', # Define the right element
style={'background-image':'url("/assets/agriculture.png")','height':150,'width':1300},
children = [
html.H2('Precision Agriculture', style = {'text-align':'center', "padding-top": "10px",
'font-size': '35px', 'color': 'red'}),
html.H2('Data visualization:', style = {"padding-top": "80px",
"padding-left": "0",'font-size': '25px'
}),
html.Div([
dcc.Dropdown(
id="drop_down",
options=[
{'label': 'Categorical graph', 'value': 'graph'},
{'label': 'Data table', 'value': 'table'},
],
style={'height':30, 'width':600},
value='graph',
clearable=False)
]),
html.Br(),
html.Div([
dcc.Graph(id='data_visualization',
config={'displaylogo': False},
style={'height':550,'width':1200},
#animate=True,
#figure = build_fig(vis_df)
)
]),
html.Div([
html.Div([
html.H2('Prediction will be displayed here:', style = {"padding-top": "0px", 'font-size': '25px'}),
html.Img(id = "prediction_image")
], className="six columns"),
html.Div(id='crop_name', className="six columns"),
], className="row"),
]), # eight column Div
html.Br(),html.Br(),html.Br()
]) # row Div
]) # main Div
@app.callback(Output("data_visualization", "figure"),
Input('drop_down', 'value'),
)
def dropdown_options(drop_value):
data_df = pd.read_csv(DATA_PATH)
fig_table = table_fig(data_df)
mod_df = crop_id_col(df = data_df)
vis_df = data_grouping(mod_df)
fig_graph = build_fig(vis_df)
if drop_value == 'table':
return fig_table
if drop_value == 'graph':
return fig_graph
else:
dash.no_update
@app.callback(Output("store_inputs", "data"),
[Input('N', 'value'),
Input("P", "value"),
Input("K", "value"),
Input("temp", "value"),
Input("hum", "value"),
Input("ph", "value"),
Input("rain", "value")])
def store_inputs(N, P, K, temp, hum, ph, rain):
features_str = [N, P, K, temp, hum, ph, rain]
if len(features_str) == 7 and None not in features_str and '' not in features_str:
return {'N':N, 'P':P, 'K': K, 'temp':temp, 'hum':hum, 'ph':ph, 'rain':rain}
@app.callback([Output("prediction_image", "src"),
Output('crop_name', 'children')],
Input('submit_button', 'n_clicks'),
State('store_inputs', 'data'))
def update_crop_name(click, stored_inputs):
trigger = [p['prop_id'] for p in dash.callback_context.triggered][0]
if stored_inputs is not None:
N = float(stored_inputs['N'])
P =float(stored_inputs['P'])
K =float(stored_inputs['K'])
temp =float(stored_inputs['temp'])
hum =float(stored_inputs['hum'])
ph =float(stored_inputs['ph'])
rain =float(stored_inputs['rain'])
pred = model_inference(np.array([[N, P, K, temp, hum, ph, rain]]))
print(pred)
pred_crop_name = pred[0]
pred_img_file = get_img_file(pred)
print(pred_crop_name)
fig = get_image(pred_img_file)
if 'submit_button' in trigger:
return [fig,
html.P([f'Recommended crop: {pred_crop_name.capitalize()}',
html.Br(),
'Our Al-based decision-making system is suggesting ',
f'{pred_crop_name.capitalize()}',
' depending on the parameters entered.'
],
style = {"padding-top": "20px",
'display': 'list',
'font-size': '25px',}),
]
else:
return dash.no_update
else:
return dash.no_update
@app.callback([Output('N', 'value'),
Output("P", "value"),
Output("K", "value"),
Output("temp", "value"),
Output("hum", "value"),
Output("ph", "value"),
Output("rain", "value")],
Input('submit_button', 'n_clicks'))
def reset_inputs(click):
trigger = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'submit_button' in trigger:
return ['']*7
else:
return dash.no_update
# Run the app
if __name__ == '__main__':
app.run_server(debug=False)
| 50.682081
| 202
| 0.369469
|
6e801f37ce8a3508a5a618a8209c91ae236aa915
| 27,347
|
py
|
Python
|
xclim/sdba/utils.py
|
fossabot/xclim
|
31fbdce6545d29e8a762b64b880e04eeb601f9eb
|
[
"Apache-2.0"
] | null | null | null |
xclim/sdba/utils.py
|
fossabot/xclim
|
31fbdce6545d29e8a762b64b880e04eeb601f9eb
|
[
"Apache-2.0"
] | null | null | null |
xclim/sdba/utils.py
|
fossabot/xclim
|
31fbdce6545d29e8a762b64b880e04eeb601f9eb
|
[
"Apache-2.0"
] | null | null | null |
"""
sdba utilities
--------------
"""
from typing import Callable, List, Mapping, Optional, Tuple, Union
from warnings import warn
import numpy as np
import xarray as xr
from boltons.funcutils import wraps
from dask import array as dsk
from scipy.interpolate import griddata, interp1d
from xclim.core.calendar import _interpolate_doy_calendar # noqa
from xclim.core.utils import ensure_chunk_size
from .base import Grouper, parse_group
from .nbutils import _extrapolate_on_quantiles
MULTIPLICATIVE = "*"
ADDITIVE = "+"
loffsets = {"MS": "14d", "M": "15d", "YS": "181d", "Y": "182d", "QS": "45d", "Q": "46d"}
def _ecdf_1d(x, value):
sx = np.r_[-np.inf, np.sort(x, axis=None)]
return np.searchsorted(sx, value, side="right") / np.sum(~np.isnan(sx))
def map_cdf_1d(x, y, y_value):
"""Return the value in `x` with the same CDF as `y_value` in `y`."""
q = _ecdf_1d(y, y_value)
_func = np.nanquantile
return _func(x, q=q)
def map_cdf(
ds: xr.Dataset,
*,
y_value: xr.DataArray,
dim,
):
"""Return the value in `x` with the same CDF as `y_value` in `y`.
This function is meant to be wrapped in a `Grouper.apply`.
Parameters
----------
ds : xr.Dataset
Variables: x, Values from which to pick,
y, Reference values giving the ranking
y_value : float, array
Value within the support of `y`.
dim : str
Dimension along which to compute quantile.
Returns
-------
array
Quantile of `x` with the same CDF as `y_value` in `y`.
"""
return xr.apply_ufunc(
map_cdf_1d,
ds.x,
ds.y,
input_core_dims=[dim] * 2,
output_core_dims=[["x"]],
vectorize=True,
keep_attrs=True,
kwargs={"y_value": np.atleast_1d(y_value)},
output_dtypes=[ds.x.dtype],
)
def ecdf(x: xr.DataArray, value: float, dim: str = "time") -> xr.DataArray:
"""Return the empirical CDF of a sample at a given value.
Parameters
----------
x : array
Sample.
value : float
The value within the support of `x` for which to compute the CDF value.
dim : str
Dimension name.
Returns
-------
xr.DataArray
Empirical CDF.
"""
return (x <= value).sum(dim) / x.notnull().sum(dim)
def ensure_longest_doy(func: Callable) -> Callable:
"""Ensure that selected day is the longest day of year for x and y dims."""
@wraps(func)
def _ensure_longest_doy(x, y, *args, **kwargs):
if (
hasattr(x, "dims")
and hasattr(y, "dims")
and "dayofyear" in x.dims
and "dayofyear" in y.dims
and x.dayofyear.max() != y.dayofyear.max()
):
warn(
(
"get_correction received inputs defined on different dayofyear ranges. "
"Interpolating to the longest range. Results could be strange."
),
stacklevel=4,
)
if x.dayofyear.max() < y.dayofyear.max():
x = _interpolate_doy_calendar(x, int(y.dayofyear.max()))
else:
y = _interpolate_doy_calendar(y, int(x.dayofyear.max()))
return func(x, y, *args, **kwargs)
return _ensure_longest_doy
@ensure_longest_doy
def get_correction(x: xr.DataArray, y: xr.DataArray, kind: str) -> xr.DataArray:
"""Return the additive or multiplicative correction/adjustment factors."""
with xr.set_options(keep_attrs=True):
if kind == ADDITIVE:
out = y - x
elif kind == MULTIPLICATIVE:
out = y / x
else:
raise ValueError("kind must be + or *.")
if isinstance(out, xr.DataArray):
out.attrs["kind"] = kind
return out
@ensure_longest_doy
def apply_correction(
x: xr.DataArray, factor: xr.DataArray, kind: Optional[str] = None
) -> xr.DataArray:
"""Apply the additive or multiplicative correction/adjustment factors.
If kind is not given, default to the one stored in the "kind" attribute of factor.
"""
kind = kind or factor.get("kind", None)
with xr.set_options(keep_attrs=True):
if kind == ADDITIVE:
out = x + factor
elif kind == MULTIPLICATIVE:
out = x * factor
else:
raise ValueError
return out
def invert(x: xr.DataArray, kind: Optional[str] = None) -> xr.DataArray:
"""Invert a DataArray either additively (-x) or multiplicatively (1/x).
If kind is not given, default to the one stored in the "kind" attribute of x.
"""
kind = kind or x.get("kind", None)
with xr.set_options(keep_attrs=True):
if kind == ADDITIVE:
return -x
if kind == MULTIPLICATIVE:
return 1 / x # type: ignore
raise ValueError
@parse_group
def broadcast(
grouped: xr.DataArray,
x: xr.DataArray,
*,
group: Union[str, Grouper] = "time",
interp: str = "nearest",
sel: Optional[Mapping[str, xr.DataArray]] = None,
) -> xr.DataArray:
"""Broadcast a grouped array back to the same shape as a given array.
Parameters
----------
grouped : xr.DataArray
The grouped array to broadcast like `x`.
x : xr.DataArray
The array to broadcast grouped to.
group : Union[str, Grouper]
Grouping information. See :py:class:`xclim.sdba.base.Grouper` for details.
interp : {'nearest', 'linear', 'cubic'}
The interpolation method to use,
sel : Mapping[str, xr.DataArray]
Mapping of grouped coordinates to x coordinates (other than the grouping one).
Returns
-------
xr.DataArray
"""
if sel is None:
sel = {}
if group.prop != "group" and group.prop not in sel:
sel.update({group.prop: group.get_index(x, interp=interp != "nearest")})
if sel:
# Extract the correct mean factor for each time step.
if interp == "nearest": # Interpolate both the time group and the quantile.
grouped = grouped.sel(sel, method="nearest")
else: # Find quantile for nearest time group and quantile.
# For `.interp` we need to explicitly pass the shared dims
# (see pydata/xarray#4463 and Ouranosinc/xclim#449,567)
sel.update(
{dim: x[dim] for dim in set(grouped.dims).intersection(set(x.dims))}
)
if group.prop != "group":
grouped = add_cyclic_bounds(grouped, group.prop, cyclic_coords=False)
if interp == "cubic" and len(sel.keys()) > 1:
interp = "linear"
warn(
"Broadcasting operations in multiple dimensions can only be done with linear and nearest-neighbor"
" interpolation, not cubic. Using linear."
)
grouped = grouped.interp(sel, method=interp).astype(grouped.dtype)
for var in sel.keys():
if var in grouped.coords and var not in grouped.dims:
grouped = grouped.drop_vars(var)
if group.prop == "group" and "group" in grouped.dims:
grouped = grouped.squeeze("group", drop=True)
return grouped
def equally_spaced_nodes(n: int, eps: Union[float, None] = 1e-4) -> np.array:
"""Return nodes with `n` equally spaced points within [0, 1] plus two end-points.
Parameters
----------
n : int
Number of equally spaced nodes.
eps : float, None
Distance from 0 and 1 of end nodes. If None, do not add endpoints.
Returns
-------
np.array
Nodes between 0 and 1.
Notes
-----
For n=4, eps=0 : 0---x------x------x------x---1
"""
dq = 1 / n / 2
q = np.linspace(dq, 1 - dq, n)
if eps is None:
return q
return np.insert(np.append(q, 1 - eps), 0, eps)
def add_cyclic_bounds(
da: xr.DataArray, att: str, cyclic_coords: bool = True
) -> Union[xr.DataArray, xr.Dataset]:
"""Reindex an array to include the last slice at the beginning and the first at the end.
This is done to allow interpolation near the end-points.
Parameters
----------
da : Union[xr.DataArray, xr.Dataset]
An array
att : str
The name of the coordinate to make cyclic
cyclic_coords : bool
If True, the coordinates are made cyclic as well,
if False, the new values are guessed using the same step as their neighbour.
Returns
-------
Union[xr.DataArray, xr.Dataset]
da but with the last element along att prepended and the last one appended.
"""
qmf = da.pad({att: (1, 1)}, mode="wrap")
if not cyclic_coords:
vals = qmf.coords[att].values
diff = da.coords[att].diff(att)
vals[0] = vals[1] - diff[0]
vals[-1] = vals[-2] + diff[-1]
qmf = qmf.assign_coords({att: vals})
qmf[att].attrs.update(da.coords[att].attrs)
return ensure_chunk_size(qmf, **{att: -1})
def extrapolate_qm(
qf: xr.DataArray,
xq: xr.DataArray,
method: str = "constant",
abs_bounds: Optional[tuple] = (-np.inf, np.inf),
) -> Tuple[xr.DataArray, xr.DataArray]:
"""Extrapolate quantile adjustment factors beyond the computed quantiles.
Parameters
----------
qf : xr.DataArray
Adjustment factors over `quantile` coordinates.
xq : xr.DataArray
Coordinates of the adjustment factors.
For example, in EQM, this are the values at each `quantile`.
method : {"constant", "nan"}
Extrapolation method. See notes below.
abs_bounds : 2-tuple
The absolute bounds of `xq`. Defaults to (-inf, inf).
Returns
-------
qf: xr.Dataset or xr.DataArray
Extrapolated adjustment factors.
xq: xr.Dataset or xr.DataArray
Extrapolated x-values.
Notes
-----
Valid values for `method` are:
- 'nan'
Estimating values above or below the computed values will return a NaN.
- 'constant'
The adjustment factor above and below the computed values are equal to the last
and first values respectively.
"""
warn(
(
"`extrapolate_qm` is deprecated and will be removed in xclim 0.33. "
"Extrapolation is now handled directly in `interp_on_quantiles`."
),
DeprecationWarning,
)
# constant_iqr
# Same as `constant`, but values are set to NaN if farther than one interquartile range from the min and max.
q_l, q_r = [0], [1]
x_l, x_r = [abs_bounds[0]], [abs_bounds[1]]
if method == "nan":
qf_l, qf_r = np.NaN, np.NaN
elif method == "constant":
qf_l = qf.bfill("quantiles").isel(quantiles=0)
qf_r = qf.ffill("quantiles").isel(quantiles=-1)
elif (
method == "constant_iqr"
): # This won't work because add_endpoints does not support mixed y (float and DA)
raise NotImplementedError
# iqr = np.diff(xq.interp(quantile=[0.25, 0.75]))[0]
# ql, qr = [0, 0], [1, 1]
# xl, xr = [-np.inf, xq.isel(quantile=0) - iqr], [xq.isel(quantile=-1) + iqr, np.inf]
# qml, qmr = [np.nan, qm.isel(quantile=0)], [qm.isel(quantile=-1), np.nan]
else:
raise ValueError
qf = add_endpoints(qf, left=[q_l, qf_l], right=[q_r, qf_r])
xq = add_endpoints(xq, left=[q_l, x_l], right=[q_r, x_r])
return qf, xq
def add_endpoints(
da: xr.DataArray,
left: List[Union[int, float, xr.DataArray, List[int], List[float]]],
right: List[Union[int, float, xr.DataArray, List[int], List[float]]],
dim: str = "quantiles",
) -> xr.DataArray:
"""Add left and right endpoints to a DataArray.
Parameters
----------
da : DataArray
Source array.
left : [x, y]
Values to prepend
right : [x, y]
Values to append.
dim : str
Dimension along which to add endpoints.
"""
elems = []
for (x, y) in (left, right):
if isinstance(y, xr.DataArray):
if "quantiles" not in y.dims:
y = y.expand_dims("quantiles")
y = y.assign_coords(quantiles=x)
else:
y = xr.DataArray(y, coords={dim: x}, dims=(dim,))
elems.append(y)
l, r = elems # pylint: disable=unbalanced-tuple-unpacking
out = xr.concat((l, da, r), dim=dim)
return ensure_chunk_size(out, **{dim: -1})
def _interp_on_quantiles_1D(newx, oldx, oldy, method, extrap):
mask_new = np.isnan(newx)
mask_old = np.isnan(oldy) | np.isnan(oldx)
out = np.full_like(newx, np.NaN, dtype=f"float{oldy.dtype.itemsize * 8}")
if np.all(mask_new) or np.all(mask_old):
warn(
"All-NaN slice encountered in interp_on_quantiles",
category=RuntimeWarning,
)
return out
if extrap == "constant":
fill_value = (oldy[0], oldy[-1])
else: # extrap == 'nan'
fill_value = np.NaN
out[~mask_new] = interp1d(
oldx[~mask_old],
oldy[~mask_old],
kind=method,
bounds_error=False,
fill_value=fill_value,
)(newx[~mask_new])
return out
def _interp_on_quantiles_2D(newx, newg, oldx, oldy, oldg, method, extrap): # noqa
mask_new = np.isnan(newx) | np.isnan(newg)
mask_old = np.isnan(oldy) | np.isnan(oldx) | np.isnan(oldg)
out = np.full_like(newx, np.NaN, dtype=f"float{oldy.dtype.itemsize * 8}")
if np.all(mask_new) or np.all(mask_old):
warn(
"All-NaN slice encountered in interp_on_quantiles",
category=RuntimeWarning,
)
return out
out[~mask_new] = griddata(
(oldx[~mask_old], oldg[~mask_old]),
oldy[~mask_old],
(newx[~mask_new], newg[~mask_new]),
method=method,
)
if method == "nearest" or extrap != "nan":
# 'nan' extrapolation implicit for cubic and linear interpolation.
out = _extrapolate_on_quantiles(out, oldx, oldg, oldy, newx, newg, extrap)
return out
@parse_group
def interp_on_quantiles(
newx: xr.DataArray,
xq: xr.DataArray,
yq: xr.DataArray,
*,
group: Union[str, Grouper] = "time",
method: str = "linear",
extrapolation: str = "constant",
):
"""Interpolate values of yq on new values of x.
Interpolate in 2D with :py:func:`~scipy.interpolate.griddata` if grouping is used,
in 1D otherwise, with :py:class:`~scipy.interpolate.interp1d`. Any NaNs in xq
or yq are removed from the input map. Similarly, NaNs in newx are left NaNs.
Parameters
----------
newx : xr.DataArray
The values at which to evaluate `yq`. If `group` has group information,
`new` should have a coordinate with the same name as the group name
In that case, 2D interpolation is used.
xq, yq : xr.DataArray
Coordinates and values on which to interpolate. The interpolation is done
along the "quantiles" dimension if `group` has no group information.
If it does, interpolation is done in 2D on "quantiles" and on the group dimension.
group : Union[str, Grouper]
The dimension and grouping information. (ex: "time" or "time.month").
Defaults to "time".
method : {'nearest', 'linear', 'cubic'}
The interpolation method.
extrapolation : {'constant', 'nan'}
The extrapolation method used for values of `newx` outside the range of `xq`.
See notes.
Notes
-----
Extrapolation methods:
- 'nan' : Any value of `newx` outside the range of `xq` is set to NaN.
- 'constant' : Values of `newx` smaller than the minimum of `xq` are set to the first
value of `yq` and those larger than the maximum, set to the last one (first and
last values along the "quantiles" dimension).
"""
dim = group.dim
prop = group.prop
if prop == "group":
if "group" in xq.dims:
xq = xq.squeeze("group", drop=True)
if "group" in yq.dims:
yq = yq.squeeze("group", drop=True)
out = xr.apply_ufunc(
_interp_on_quantiles_1D,
newx,
xq,
yq,
kwargs={"method": method, "extrap": extrapolation},
input_core_dims=[[dim], ["quantiles"], ["quantiles"]],
output_core_dims=[[dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[yq.dtype],
)
return out
# else:
if prop not in xq.dims:
xq = xq.expand_dims({prop: group.get_coordinate()})
if prop not in yq.dims:
yq = yq.expand_dims({prop: group.get_coordinate()})
xq = add_cyclic_bounds(xq, prop, cyclic_coords=False)
yq = add_cyclic_bounds(yq, prop, cyclic_coords=False)
newg = group.get_index(newx, interp=method != "nearest")
oldg = xq[prop].expand_dims(quantiles=xq.coords["quantiles"])
return xr.apply_ufunc(
_interp_on_quantiles_2D,
newx,
newg,
xq,
yq,
oldg,
kwargs={"method": method, "extrap": extrapolation},
input_core_dims=[
[dim],
[dim],
[prop, "quantiles"],
[prop, "quantiles"],
[prop, "quantiles"],
],
output_core_dims=[[dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[yq.dtype],
)
# TODO is this useless?
def rank(da: xr.DataArray, dim: str = "time", pct: bool = False) -> xr.DataArray:
"""Ranks data along a dimension.
Replicates `xr.DataArray.rank` but as a function usable in a Grouper.apply().
Xarray's docstring is below:
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If pct, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
da: xr.DataArray
dim : str, hashable
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
DataArray
DataArray with the same coordinates and dtype 'float64'.
"""
return da.rank(dim, pct=pct)
def pc_matrix(arr: Union[np.ndarray, dsk.Array]) -> Union[np.ndarray, dsk.Array]:
"""Construct a Principal Component matrix.
This matrix can be used to transform points in arr to principal components
coordinates. Note that this function does not manage NaNs; if a single observation is null, all elements
of the transformation matrix involving that variable will be NaN.
Parameters
----------
arr : numpy.ndarray or dask.array.Array
2D array (M, N) of the M coordinates of N points.
Returns
-------
numpy.ndarray or dask.array.Array
MxM Array of the same type as arr.
"""
# Get appropriate math module
mod = dsk if isinstance(arr, dsk.Array) else np
# Covariance matrix
cov = mod.cov(arr)
# Get eigenvalues and eigenvectors
# There are no such method yet in dask, but we are lucky:
# the SVD decomposition of a symmetric matrix gives the eigen stuff.
# And covariance matrices are by definition symmetric!
# Numpy has a hermitian=True option to accelerate, but not dask...
kwargs = {} if mod is dsk else {"hermitian": True}
eig_vec, eig_vals, _ = mod.linalg.svd(cov, **kwargs)
# The PC matrix is the eigen vectors matrix scaled by the square root of the eigen values
return eig_vec * mod.sqrt(eig_vals)
def best_pc_orientation(
A: np.ndarray, Binv: np.ndarray, val: float = 1000
) -> np.ndarray:
"""Return best orientation vector for A.
Eigenvectors returned by `pc_matrix` do not have a defined orientation.
Given an inverse transform Binv and a transform A, this returns the orientation
minimizing the projected distance for a test point far from the origin.
This trick is explained in [hnilica2017]_. See documentation of
`sdba.adjustment.PrincipalComponentAdjustment`.
Parameters
----------
A : np.ndarray
MxM Matrix defining the final transformation.
Binv : np.ndarray
MxM Matrix defining the (inverse) first transformation.
val : float
The coordinate of the test point (same for all axes). It should be much
greater than the largest furthest point in the array used to define B.
Returns
-------
np.ndarray
Mx1 vector of orientation correction (1 or -1).
"""
m = A.shape[0]
orient = np.ones(m)
P = np.diag(val * np.ones(m))
# Compute first reference error
err = np.linalg.norm(P - A @ Binv @ P)
for i in range(m):
# Switch the ith axis orientation
orient[i] = -1
# Compute new error
new_err = np.linalg.norm(P - (A * orient) @ Binv @ P)
if new_err > err:
# Previous error was lower, switch back
orient[i] = 1
else:
# New orientation is better, keep and remember error.
err = new_err
return orient
def get_clusters_1d(
data: np.ndarray, u1: float, u2: float
) -> Tuple[np.array, np.array, np.array, np.array]:
"""Get clusters of a 1D array.
A cluster is defined as a sequence of values larger than u2 with at least one value larger than u1.
Parameters
----------
data: 1D ndarray
Values to get clusters from.
u1 : float
Extreme value threshold, at least one value in the cluster must exceed this.
u2 : float
Cluster threshold, values above this can be part of a cluster.
Returns
-------
(np.array, np.array, np.array, np.array)
References
----------
`getcluster` of Extremes.jl (read on 2021-04-20) https://github.com/jojal5/Extremes.jl
"""
# Boolean array, True where data is over u2
# We pad with values under u2, so that clusters never start or end at boundaries.
exce = np.concatenate(([u2 - 1], data, [u2 - 1])) > u2
# 1 just before the start of the cluster
# -1 on the last element of the cluster
bounds = np.diff(exce.astype(np.int32))
# We add 1 to get the first element and sub 1 to get the same index as in data
starts = np.where(bounds == 1)[0]
# We sub 1 to get the same index as in data and add 1 to get the element after (for python slicing)
ends = np.where(bounds == -1)[0]
cl_maxpos = []
cl_maxval = []
cl_start = []
cl_end = []
for start, end in zip(starts, ends):
cluster_max = data[start:end].max()
if cluster_max > u1:
cl_maxval.append(cluster_max)
cl_maxpos.append(start + np.argmax(data[start:end]))
cl_start.append(start)
cl_end.append(end - 1)
return (
np.array(cl_start),
np.array(cl_end),
np.array(cl_maxpos),
np.array(cl_maxval),
)
def get_clusters(data: xr.DataArray, u1, u2, dim: str = "time") -> xr.Dataset:
"""Get cluster count, maximum and position along a given dim.
See `get_clusters_1d`. Used by `adjustment.ExtremeValues`.
Parameters
----------
data: 1D ndarray
Values to get clusters from.
u1 : float
Extreme value threshold, at least one value in the cluster must exceed this.
u2 : float
Cluster threshold, values above this can be part of a cluster.
dim : str
Dimension name.
Returns
-------
xr.Dataset
With variables,
- `nclusters` : Number of clusters for each point (with `dim` reduced), int
- `start` : First index in the cluster (`dim` reduced, new `cluster`), int
- `end` : Last index in the cluster, inclusive (`dim` reduced, new `cluster`), int
- `maxpos` : Index of the maximal value within the cluster (`dim` reduced, new `cluster`), int
- `maximum` : Maximal value within the cluster (`dim` reduced, new `cluster`), same dtype as data.
For `start`, `end` and `maxpos`, -1 means NaN and should always correspond to a `NaN` in `maximum`.
The length along `cluster` is half the size of "dim", the maximal theoritical number of clusters.
"""
def _get_clusters(arr, u1, u2, N):
st, ed, mp, mv = get_clusters_1d(arr, u1, u2)
count = len(st)
pad = [-1] * (N - count)
return (
np.append(st, pad),
np.append(ed, pad),
np.append(mp, pad),
np.append(mv, [np.NaN] * (N - count)),
count,
)
# The largest possible number of clusters. Ex: odd positions are < u2, even positions are > u1.
N = data[dim].size // 2
starts, ends, maxpos, maxval, nclusters = xr.apply_ufunc(
_get_clusters,
data,
u1,
u2,
input_core_dims=[[dim], [], []],
output_core_dims=[["cluster"], ["cluster"], ["cluster"], ["cluster"], []],
kwargs={"N": N},
dask="parallelized",
vectorize=True,
dask_gufunc_kwargs={
"meta": (
np.array((), dtype=int),
np.array((), dtype=int),
np.array((), dtype=int),
np.array((), dtype=data.dtype),
np.array((), dtype=int),
),
"output_sizes": {"cluster": N},
},
)
ds = xr.Dataset(
{
"start": starts,
"end": ends,
"maxpos": maxpos,
"maximum": maxval,
"nclusters": nclusters,
}
)
return ds
def rand_rot_matrix(
crd: xr.DataArray, num: int = 1, new_dim: Optional[str] = None
) -> xr.DataArray:
r"""Generate random rotation matrices.
Rotation matrices are members of the SO(n) group, where n is the matrix size (`crd.size`).
They can be characterized as orthogonal matrices with determinant 1. A square matrix :math:`R`
is a rotation matrix if and only if :math:`R^t = R^{−1}` and :math:`\mathrm{det} R = 1`.
Parameters
----------
crd: xr.DataArray
1D coordinate DataArray along which the rotation occurs.
The output will be square with the same coordinate replicated,
the second renamed to `new_dim`.
num : int
If larger than 1 (default), the number of matrices to generate, stacked along a "matrices" dimension.
new_dim : str
Name of the new "prime" dimension, defaults to the same name as `crd` + "_prime".
Returns
-------
xr.DataArray
float, NxN if num = 1, numxNxN otherwise, where N is the length of crd.
References
----------
Mezzadri, F. (2006). How to generate random matrices from the classical compact groups. arXiv preprint math-ph/0609050.
"""
if num > 1:
return xr.concat([rand_rot_matrix(crd, num=1) for i in range(num)], "matrices")
N = crd.size
dim = crd.dims[0]
# Rename and rebuild second coordinate : "prime" axis.
if new_dim is None:
new_dim = dim + "_prime"
crd2 = xr.DataArray(crd.values, dims=new_dim, name=new_dim, attrs=crd.attrs)
# Random floats from the standardized normal distribution
Z = np.random.standard_normal((N, N))
# QR decomposition and manipulation from Mezzadri 2006
Q, R = np.linalg.qr(Z)
num = np.diag(R)
denum = np.abs(num)
lam = np.diag(num / denum) # "lambda"
return xr.DataArray(
Q @ lam, dims=(dim, new_dim), coords={dim: crd, new_dim: crd2}
).astype("float32")
| 32.135135
| 123
| 0.604929
|
32f56a796468a9d72dd3d75c68eecbd2b3367505
| 1,164
|
py
|
Python
|
pcdet/models/detectors/caddn.py
|
MartinHahner/OpenPCDet
|
9375908d30ee5023355ebdd77041d7f2cbfd7ec8
|
[
"Apache-2.0"
] | 1,984
|
2020-07-01T05:13:02.000Z
|
2022-03-31T20:34:00.000Z
|
pcdet/models/detectors/caddn.py
|
lzypoiuy/OpenPCDet
|
c9d31d393acaae34d74cb03bd6ccff9976d3d1f3
|
[
"Apache-2.0"
] | 748
|
2020-07-01T07:04:58.000Z
|
2022-03-31T07:38:51.000Z
|
pcdet/models/detectors/caddn.py
|
lzypoiuy/OpenPCDet
|
c9d31d393acaae34d74cb03bd6ccff9976d3d1f3
|
[
"Apache-2.0"
] | 764
|
2020-07-01T12:19:13.000Z
|
2022-03-31T11:19:17.000Z
|
from .detector3d_template import Detector3DTemplate
class CaDDN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict_rpn = self.dense_head.get_loss()
loss_depth, tb_dict_depth = self.vfe.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
'loss_depth': loss_depth.item(),
**tb_dict_rpn,
**tb_dict_depth
}
loss = loss_rpn + loss_depth
return loss, tb_dict, disp_dict
| 29.846154
| 83
| 0.613402
|
5b9f1ef356112311cba372e74b234645d54a80d8
| 994
|
py
|
Python
|
openstack_dashboard/dashboards/admin/overview/urls.py
|
maofutian/horizon
|
dab92e7d2f576caea8f81c8e22a516fb45633794
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/overview/urls.py
|
maofutian/horizon
|
dab92e7d2f576caea8f81c8e22a516fb45633794
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/overview/urls.py
|
maofutian/horizon
|
dab92e7d2f576caea8f81c8e22a516fb45633794
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.overview import views
urlpatterns = patterns('',
url(r'^$', views.GlobalOverview.as_view(), name='index'),
)
| 34.275862
| 78
| 0.745473
|
13225afa79bb35ebbf8df34e33dd62852f513da9
| 39,385
|
py
|
Python
|
geotrek/core/tests/test_topology.py
|
claudep/Geotrek
|
620799cc2667c3b203ef92de6ec35008111fb592
|
[
"BSD-2-Clause"
] | 50
|
2016-10-19T23:01:21.000Z
|
2022-03-28T08:28:34.000Z
|
geotrek/core/tests/test_topology.py
|
claudep/Geotrek
|
620799cc2667c3b203ef92de6ec35008111fb592
|
[
"BSD-2-Clause"
] | 1,422
|
2016-10-27T10:39:40.000Z
|
2022-03-31T13:37:10.000Z
|
geotrek/core/tests/test_topology.py
|
claudep/Geotrek
|
620799cc2667c3b203ef92de6ec35008111fb592
|
[
"BSD-2-Clause"
] | 46
|
2016-10-27T10:59:10.000Z
|
2022-03-22T15:55:56.000Z
|
import json
import math
from unittest import skipIf
from django.test import TestCase
from django.conf import settings
from django.db import connections, DEFAULT_DB_ALIAS
from django.contrib.gis.geos import Point, LineString
from geotrek.common.utils import dbnow
from geotrek.core.factories import (PathFactory, PathAggregationFactory,
TopologyFactory)
from geotrek.core.models import Path, Topology, PathAggregation
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyTest(TestCase):
def test_geom_null_is_safe(self):
t = TopologyFactory.create()
t.geom = None
t.save()
self.assertNotEqual(t.geom, None)
def test_dates(self):
t1 = dbnow()
e = TopologyFactory.build()
e.save()
t2 = dbnow()
self.assertTrue(t1 < e.date_insert < t2)
e.delete()
t3 = dbnow()
self.assertTrue(t2 < e.date_update < t3)
def test_latestupdate_delete(self):
for i in range(10):
TopologyFactory.create()
t1 = dbnow()
self.assertTrue(t1 > Topology.objects.latest("date_update").date_update)
(Topology.objects.all()[0]).delete(force=True)
self.assertFalse(t1 > Topology.objects.latest("date_update").date_update)
def test_length(self):
e = TopologyFactory.build()
self.assertEqual(e.length, 0)
e.save()
self.assertEqual(e.length, 0)
PathAggregationFactory.create(topo_object=e)
e.save()
self.assertNotEqual(e.length, 0)
def test_length_2d(self):
e = TopologyFactory.build()
e.save()
self.assertEqual(e.length_2d, None)
def test_kind(self):
from geotrek.land.models import LandEdge
from geotrek.land.factories import LandEdgeFactory
# Test with a concrete inheritance of Topology : LandEdge
self.assertEqual('TOPOLOGY', Topology.KIND)
self.assertEqual(0, len(Topology.objects.filter(kind='LANDEDGE')))
self.assertEqual('LANDEDGE', LandEdge.KIND)
# Kind of instances
e = LandEdgeFactory.create()
self.assertEqual(e.kind, LandEdge.KIND)
self.assertEqual(1, len(Topology.objects.filter(kind='LANDEDGE')))
def test_link_closest_visible_path(self):
"""
Topology must be linked to the closest visible path only
"""
path_visible = Path(name="visible",
geom='LINESTRING(0 0, 1 0, 2 0)',
visible=True)
path_visible.save()
path_unvisible = Path(name="unvisible",
geom='LINESTRING(0 3, 1 3, 2 3)',
visible=False)
path_unvisible.save()
# default manager see 1 path
self.assertEqual(Path.objects.count(), 1)
# custom manager see 2 paths
self.assertEqual(Path.include_invisible.count(), 2)
# create topo on visible path
topology = Topology._topologypoint(0, 0, None)
topology.save()
# because FK and M2M are used with default manager only, others tests are in SQL
conn = connections[DEFAULT_DB_ALIAS]
cur = conn.cursor()
cur.execute(
"""
SELECT t.id as id_path,
et.topo_object_id as id_topology,
t.visible as visible
FROM core_pathaggregation et
JOIN core_path t ON et.path_id=t.id
WHERE et.topo_object_id={topo_id}
""".format(topo_id=topology.pk))
datas = dictfetchall(cur)
# topo must be linked to visible path
self.assertIn(topology.pk, [ele['id_topology'] for ele in datas], "{}".format(datas))
self.assertIn(path_visible.pk, [ele['id_path'] for ele in datas], "{}".format(datas))
self.assertNotIn(path_unvisible.pk, [ele['id_path'] for ele in datas], "{}".format(datas))
# new topo on invible path
topology = Topology._topologypoint(0, 3, None)
topology.save()
cur.execute(
"""
SELECT t.id as id_path,
et.topo_object_id as id_topology,
t.visible as visible
FROM core_pathaggregation et
JOIN core_path t ON et.path_id=t.id
WHERE et.topo_object_id={topo_id}
""".format(topo_id=topology.pk))
datas = dictfetchall(cur)
self.assertIn(topology.pk, [ele['id_topology'] for ele in datas], "{}".format(datas))
self.assertIn(path_visible.pk, [ele['id_path'] for ele in datas], "{}".format(datas))
self.assertNotIn(path_unvisible.pk, [ele['id_path'] for ele in datas], "{}".format(datas))
cur.close()
def test_topology_linked_to_not_draft(self):
path_draft = PathFactory.create(name="draft",
geom='LINESTRING(0 0, 1 0, 2 0)',
draft=True)
path_draft.save()
path_normal = PathFactory.create(name="normal",
geom='LINESTRING(0 3, 1 3, 2 3)',
draft=False)
path_normal.save()
point = Point(0, 0, srid=settings.SRID)
closest = Path.closest(point)
self.assertEqual(point.wkt, 'POINT (0 0)')
self.assertEqual(closest, path_normal)
def test_topology_deserialize(self):
p1 = PathFactory.create(geom=LineString((0, 0), (2, 2)))
p2 = PathFactory.create(geom=LineString((2, 2), (2, 0)))
p3 = PathFactory.create(geom=LineString((2, 0), (4, 0)))
pks = [p.pk for p in [p1, p2, p3]]
Topology.deserialize('{"paths": %s, "positions": {"0": [0.3, 1.0], "2": [0.0, 0.7]}, "offset": 1}' % pks)
self.assertEqual(Path.objects.count(), 3)
def test_topology_deserialize_point(self):
PathFactory.create(geom=LineString((699999, 6600001), (700001, 6600001)))
topology = Topology.deserialize('{"lat": 46.5, "lng": 3}')
self.assertEqual(topology.offset, -1)
self.assertEqual(topology.aggregations.count(), 1)
self.assertEqual(topology.aggregations.get().start_position, .5)
self.assertEqual(topology.aggregations.get().end_position, .5)
def test_topology_deserialize_point_with_snap(self):
path = PathFactory.create(geom=LineString((699999, 6600001), (700001, 6600001)))
topology = Topology.deserialize('{"lat": 46.5, "lng": 3, "snap": %s}' % path.pk)
self.assertEqual(topology.offset, 0)
self.assertEqual(topology.aggregations.count(), 1)
self.assertEqual(topology.aggregations.get().start_position, .5)
self.assertEqual(topology.aggregations.get().end_position, .5)
def test_topology_deserialize_inexistant(self):
with self.assertRaises(Topology.DoesNotExist):
Topology.deserialize('4012999999')
def test_topology_deserialize_inexistant_point(self):
PathFactory.create(geom=LineString((699999, 6600001), (700001, 6600001)))
Topology.deserialize('{"lat": 46.5, "lng": 3, "pk": 4012999999}')
def test_topology_deserialize_inexistant_line(self):
path = PathFactory.create(geom=LineString((699999, 6600001), (700001, 6600001)))
Topology.deserialize(
'[{"pk": 4012999999, "paths": [%s], "positions": {"0": [0.0, 1.0]}, "offset": 1}]' % path.pk
)
def test_topology_deserialize_invalid(self):
with self.assertRaises(ValueError):
Topology.deserialize('[{"paths": [4012999999], "positions": {}, "offset": 1}]')
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyDeletionTest(TestCase):
def test_deleted_is_hidden_but_still_exists(self):
topology = TopologyFactory.create(offset=1)
path = topology.paths.get()
self.assertEqual(len(PathAggregation.objects.filter(topo_object=topology)), 1)
self.assertEqual(len(path.topology_set.all()), 1)
topology.delete()
# Make sure object remains in database with deleted status
self.assertEqual(len(PathAggregation.objects.filter(topo_object=topology)), 1)
# Make sure object has deleted status
self.assertTrue(topology.deleted)
# Make sure object still exists
self.assertEqual(len(path.topology_set.all()), 1)
self.assertIn(topology, Topology.objects.all())
# Make sure object can be hidden from managers
self.assertNotIn(topology, Topology.objects.existing())
self.assertEqual(len(path.topology_set.existing()), 0)
def test_deleted_when_all_path_are_deleted(self):
topology = TopologyFactory.create()
self.assertFalse(topology.deleted)
paths = path = topology.paths.all()
for path in paths:
path.delete()
topology.reload()
self.assertTrue(topology.deleted)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyMutateTest(TestCase):
def test_mutate(self):
topology1 = TopologyFactory.create(paths=[])
self.assertEqual(len(topology1.paths.all()), 0)
topology2 = TopologyFactory.create(offset=14.5)
self.assertEqual(len(topology2.paths.all()), 1)
# Normal usecase
topology1.mutate(topology2)
self.assertEqual(topology1.offset, 14.5)
self.assertEqual(len(topology1.paths.all()), 1)
def test_mutate_intersection(self):
# Mutate a Point topology at an intersection, and make sure its aggregations
# are not duplicated (c.f. SQL triggers)
# Create a 3 paths intersection
PathFactory.create(geom=LineString((0, 0), (1, 0)))
p2 = PathFactory.create(geom=LineString((1, 0), (2, 0)))
PathFactory.create(geom=LineString((1, 0), (1, 1)))
# Create a topology point at this intersection
topology = TopologyFactory.create(paths=[(p2, 0, 0)])
self.assertTrue(topology.ispoint())
# Make sure, the trigger worked, and linked to 3 paths
self.assertEqual(len(topology.paths.all()), 3)
# Mutate it to another one !
topology2 = TopologyFactory.create(paths=[])
self.assertEqual(len(topology2.paths.all()), 0)
self.assertTrue(topology2.ispoint())
topology2.mutate(topology)
self.assertEqual(len(topology2.paths.all()), 3)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyPointTest(TestCase):
def test_point_geom_3d(self):
"""
+
/ \
/ X \
+ +
"""
p1 = PathFactory.create(geom=LineString((0, 0), (4, 4)))
PathFactory.create(geom=LineString((4, 4), (8, 0)))
poi = Point(3, 1, srid=settings.SRID)
position, distance = Path.interpolate(p1, poi)
self.assertAlmostEqual(0.5, position, places=6)
self.assertAlmostEqual(-1.414, distance, places=2)
# Verify that deserializing this, we obtain the same original coordinates
# (use lat/lng as in forms)
poi.transform(settings.API_SRID)
poitopo = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
# Computed topology properties match original interpolation
self.assertAlmostEqual(0.5, poitopo.aggregations.all()[0].start_position, places=6)
self.assertAlmostEqual(-1.414, poitopo.offset, places=2)
# Resulting geometry
self.assertAlmostEqual(3, poitopo.geom.x, places=6)
self.assertAlmostEqual(1, poitopo.geom.y, places=6)
def test_point_geom_not_moving(self):
r"""
Modify path, point not moving
+ +
| |
\ X / X
/ \
| |
+ +
"""
p1 = PathFactory.create(geom=LineString((0, 0),
(0, 5),
(5, 10),
(0, 15),
(0, 20)))
poi = Point(10, 10, srid=settings.SRID)
poi.transform(settings.API_SRID)
poitopo = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
self.assertEqual(0.5, poitopo.aggregations.all()[0].start_position)
self.assertAlmostEqual(-5, poitopo.offset, places=6)
# It should have kept its position !
self.assertAlmostEqual(10, poitopo.geom.x, places=6)
self.assertAlmostEqual(10, poitopo.geom.y, places=6)
# Change path, it should still be in the same position
p1.geom = LineString((0, 0),
(0, 5),
(-5, 10),
(0, 15),
(0, 20))
p1.save()
poitopo.reload()
self.assertAlmostEqual(10, poitopo.geom.x, places=6)
self.assertAlmostEqual(10, poitopo.geom.y, places=6)
def test_point_offset_kept(self):
"""
Shorten path, offset kept.
X X
+-----------+ +------+
"""
p1 = PathFactory.create(geom=LineString((0, 0), (20, 0)))
poi = Point(5, 10, srid=settings.SRID)
poi.transform(settings.API_SRID)
poitopo = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
self.assertAlmostEqual(0.25, poitopo.aggregations.all()[0].start_position, places=6)
self.assertAlmostEqual(10, poitopo.offset, places=6)
p1.geom = LineString((0, 0), (10, 0))
p1.save()
poitopo.reload()
self.assertAlmostEqual(10, poitopo.offset, places=6)
# Not moved:
self.assertAlmostEqual(5, poitopo.geom.x, places=6)
self.assertAlmostEqual(10, poitopo.geom.y, places=6)
def test_point_offset_updated(self):
"""
Shorten path, offset updated.
X X
+-----------+ +------+
"""
p1 = PathFactory.create(geom=LineString((0, 0), (20, 0)))
poi = Point(10, 10, srid=settings.SRID)
poi.transform(settings.API_SRID)
poitopo = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
poitopo.save()
self.assertAlmostEqual(0.5, poitopo.aggregations.all()[0].start_position, places=6)
self.assertAlmostEqual(10, poitopo.offset, places=6)
p1.geom = LineString((0, 0), (0, 5))
p1.save()
poitopo.reload()
self.assertAlmostEqual(11.180339887, poitopo.offset, places=6)
# Not moved:
self.assertAlmostEqual(10, poitopo.geom.x, places=6)
self.assertAlmostEqual(10, poitopo.geom.y, places=6)
def test_point_geom_moving(self):
p1 = PathFactory.create(geom=LineString((0, 0),
(0, 5)))
poi = Point(0, 2.5, srid=settings.SRID)
poi.transform(settings.API_SRID)
poitopo = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
poitopo.save()
self.assertAlmostEqual(0.5, poitopo.aggregations.all()[0].start_position, places=6)
self.assertAlmostEqual(0, poitopo.offset, places=6)
self.assertAlmostEqual(0, poitopo.geom.x, places=6)
self.assertAlmostEqual(2.5, poitopo.geom.y, places=6)
p1.geom = LineString((10, 0),
(10, 5))
p1.save()
poitopo.reload()
self.assertAlmostEqual(10, poitopo.geom.x, places=6)
self.assertAlmostEqual(2.5, poitopo.geom.y, places=6)
def test_junction_point(self):
p1 = PathFactory.create(geom=LineString((0, 0), (2, 2)))
p2 = PathFactory.create(geom=LineString((0, 0), (2, 0)))
p3 = PathFactory.create(geom=LineString((0, 2), (0, 0)))
# Create a junction point topology
t = TopologyFactory.create(paths=[])
self.assertEqual(len(t.paths.all()), 0)
pa = PathAggregationFactory.create(topo_object=t, path=p1,
start_position=0.0, end_position=0.0)
self.assertCountEqual(t.paths.all(), [p1, p2, p3])
# Update to a non junction point topology
pa.end_position = 0.4
pa.save()
self.assertCountEqual(t.paths.all(), [p1])
# Update to a junction point topology
pa.end_position = 0.0
pa.save()
self.assertCountEqual(t.paths.all(), [p1, p2, p3])
def test_point_at_end_of_path_not_moving_after_mutate(self):
PathFactory.create(geom=LineString((400, 400), (410, 400),
srid=settings.SRID))
self.assertEqual(1, len(Path.objects.all()))
father = Topology.deserialize({'lat': -1, 'lng': -1})
father.save()
poi = Point(500, 600, srid=settings.SRID)
poi.transform(settings.API_SRID)
son = Topology.deserialize({'lat': poi.y, 'lng': poi.x})
father.mutate(son)
self.assertAlmostEqual(father.geom.x, 500, places=6)
self.assertAlmostEqual(father.geom.y, 600, places=6)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyLineTest(TestCase):
def test_topology_geom(self):
p1 = PathFactory.create(geom=LineString((0, 0), (2, 2)))
p2 = PathFactory.create(geom=LineString((2, 2), (2, 0)))
p3 = PathFactory.create(geom=LineString((2, 0), (4, 0)))
# Type Point
t = TopologyFactory.create(paths=[(p1, 0.5, 0.5)])
self.assertEqual(t.geom, Point((1, 1), srid=settings.SRID))
# 50% of path p1, 100% of path p2
t = TopologyFactory.create(paths=[(p1, 0.5, 1), p2])
self.assertEqual(t.geom, LineString((1, 1), (2, 2), (2, 0), srid=settings.SRID))
# 100% of path p2 and p3, with offset of 1
t = TopologyFactory.create(offset=1, paths=[p2, p3])
self.assertEqual(t.geom, LineString((3, 2), (3, 1), (4, 1), srid=settings.SRID))
# Change offset, geometry is computed again
t.offset = 0.5
t.save()
self.assertEqual(t.geom, LineString((2.5, 2), (2.5, 0.5), (4, 0.5), srid=settings.SRID))
def test_topology_geom_should_not_be_sampled(self):
coords = [(x, math.sin(x)) for x in range(100)]
sampled_3d = [(x, math.sin(x), math.cos(x)) for x in range(0, 100, 5)]
p1 = PathFactory.create(geom=LineString(*coords))
p1.geom_3d = LineString(*sampled_3d)
p1.save(update_fields=['geom_3d'])
t = TopologyFactory.create(paths=[p1])
self.assertEqual(len(t.geom.coords), 100)
def test_topology_geom_with_intermediate_markers(self):
# Intermediate (forced passage) markers for topologies
# Use a bifurcation, make sure computed geometry is correct
# +--p2---+
# +---+-------+---+
# p1 p3 p4
p1 = PathFactory.create(geom=LineString((0, 0), (2, 0)))
p2 = PathFactory.create(geom=LineString((2, 0), (2, 1), (4, 1), (4, 0)))
p3 = PathFactory.create(geom=LineString((2, 0), (4, 0)))
p4 = PathFactory.create(geom=LineString((4, 0), (6, 0)))
"""
From p1 to p4, with point in the middle of p3
"""
t = TopologyFactory.create(paths=[p1, p3, (p3, 0.5, 0.5), p4])
self.assertEqual(t.geom, LineString((0, 0), (2, 0), (4, 0), (6, 0), srid=settings.SRID))
"""
From p1 to p4, through p2
"""
t = TopologyFactory.create(paths=[p1, p2, (p2, 0.5, 0.5), p4])
self.assertEqual(t.geom, LineString((0, 0), (2, 0), (2, 1), (4, 1), (4, 0), (6, 0), srid=settings.SRID))
"""
From p1 to p4, though p2, but **with start/end at 0.0**
"""
t2 = TopologyFactory.create(paths=[p1, p2, (p2, 0, 0), p4])
self.assertEqual(t2.geom, t.geom)
def test_path_geom_update(self):
# Create a path
p = PathFactory.create(geom=LineString((0, 0), (4, 0)))
# Create a linear topology
t1 = TopologyFactory.create(offset=1, paths=[(p, 0, 0.5)])
t1_agg = t1.aggregations.get()
# Create a point topology
t2 = TopologyFactory.create(offset=-1, paths=[(p, 0.5, 0.5)])
t2_agg = t2.aggregations.get()
# Ensure linear topology is correct before path modification
self.assertEqual(t1.offset, 1)
self.assertEqual(t1.geom.coords, ((0, 1), (2, 1)))
self.assertEqual(t1_agg.start_position, 0.0)
self.assertEqual(t1_agg.end_position, 0.5)
# Ensure point topology is correct before path modification
self.assertEqual(t2.offset, -1)
self.assertEqual(t2.geom.coords, (2, -1))
self.assertEqual(t2_agg.start_position, 0.5)
self.assertEqual(t2_agg.end_position, 0.5)
# Modify path geometry and refresh computed data
p.geom = LineString((0, 2), (8, 2))
p.save()
t1.reload()
t1_agg = t1.aggregations.get()
t2.reload()
t2_agg = t2.aggregations.get()
# Ensure linear topology is correct after path modification
self.assertEqual(t1.offset, 1)
self.assertEqual(t1.geom.coords, ((0, 3), (4, 3)))
self.assertEqual(t1_agg.start_position, 0.0)
self.assertEqual(t1_agg.end_position, 0.5)
# Ensure point topology is correct before path modification
self.assertEqual(t2.offset, -3)
self.assertEqual(t2.geom.coords, (2, -1))
self.assertEqual(t2_agg.start_position, 0.25)
self.assertEqual(t2_agg.end_position, 0.25)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyCornerCases(TestCase):
def test_opposite_paths(self):
"""
A C
B +-------+-------+ D
"""
ab = PathFactory.create(geom=LineString((5, 0), (0, 0)))
cd = PathFactory.create(geom=LineString((5, 0), (10, 0)))
topo = TopologyFactory.create(paths=[(ab, 0.2, 0), (cd, 0, 0.2)])
expected = LineString((4, 0), (5, 0), (6, 0), srid=settings.SRID)
self.assertEqual(topo.geom, expected)
# Now let's have some fun, reverse BA :)
ab.reverse()
ab.save()
topo.reload()
self.assertEqual(topo.geom, expected)
def test_opposite_paths_with_middle(self):
"""
A C
B +-------+--------+-------+ D
"""
ab = PathFactory.create(geom=LineString((5, 0), (0, 0)))
ac = PathFactory.create(geom=LineString((5, 0), (10, 0)))
cd = PathFactory.create(geom=LineString((10, 0), (15, 0)))
topo = TopologyFactory.create(paths=[(ab, 0.2, 0), ac, (cd, 0, 0.2)])
expected = LineString((4, 0), (5, 0), (10, 0), (11, 0), srid=settings.SRID)
self.assertEqual(topo.geom, expected)
# Reverse AC ! OMG this is hell !
ac.reverse()
ac.save()
topo.reload()
self.assertEqual(topo.geom, expected)
def test_return_path(self):
"""
A
----+
|
B +------+------+ C
"""
p1 = PathFactory.create(geom=LineString((0, 0), (10, 0)))
p2 = PathFactory.create(geom=LineString((5, 0), (5, 10), (10, 10)))
p3 = Path.objects.filter(name=p1.name).exclude(pk=p1.pk)[0] # Was splitted :)
# Now create a topology B-A-C
topo = TopologyFactory.create(paths=[(p1, 0.5, 1), (p2, 0, 0.8), (p2, 0.8, 0.8), (p2, 0.8, 0), (p3, 0, 0.5)])
self.assertEqual(topo.geom, LineString((2.5, 0), (5, 0), (5, 10),
(7, 10), (5, 10), (5, 0),
(7.5, 0), srid=settings.SRID))
def test_return_path_serialized(self):
"""
Same as test_return_path() but from deserialization.
"""
p1 = PathFactory.create(geom=LineString((0, 0), (10, 0)))
p2 = PathFactory.create(geom=LineString((5, 0), (5, 10), (10, 10)))
p3 = Path.objects.filter(name=p1.name).exclude(pk=p1.pk)[0] # Was splitted :)
topo = Topology.deserialize("""
[{"offset":0,
"positions":{"0":[0.5,1],
"1":[0.0, 0.8]},
"paths":[%(p1)s,%(p2)s]
},
{"offset":0,
"positions":{"0":[0.8,0.0],
"1":[0.0, 0.5]},
"paths":[%(p2)s,%(p3)s]
}
]
""" % {'p1': p1.pk, 'p2': p2.pk, 'p3': p3.pk})
topo.kind = 'TOPOLOGY'
topo.save()
self.assertEqual(topo.geom, LineString((2.5, 0), (5, 0), (5, 10),
(7, 10), (5, 10), (5, 0),
(7.5, 0), srid=settings.SRID))
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyLoopTests(TestCase):
def test_simple_loop(self):
"""
==========
|| ||
A +==------==+ B
"""
p1 = PathFactory.create(geom=LineString((10, 0), (0, 0)))
p2 = PathFactory.create(geom=LineString((0, 0), (0, 5), (10, 5), (10, 0)))
# Full loop
topo = TopologyFactory.create(paths=[p1, p2])
self.assertEqual(topo.geom, LineString((10, 0), (0, 0), (0, 5), (10, 5), (10, 0), srid=settings.SRID))
# Subpart, like in diagram
topo = TopologyFactory.create(paths=[(p1, 0.8, 1), p2, (p1, 0, 0.2)])
self.assertEqual(topo.geom, LineString((2, 0), (0, 0), (0, 5),
(10, 5), (10, 0), (8, 0), srid=settings.SRID))
def test_trek_loop(self):
"""
=========
|| ||
+-------===========+=========+----------+
"""
p1 = PathFactory.create(geom=LineString((0, 0), (10, 0)))
p2 = PathFactory.create(geom=LineString((10, 0), (30, 0)))
p3 = PathFactory.create(geom=LineString((10, 0), (10, 5),
(20, 5), (20, 0)))
topo = TopologyFactory.create(paths=[(p1, 0.3, 1), p3, (p2, 1, 0), (p1, 1, 0.3)])
self.assertEqual(topo.geom, LineString((3, 0), (10, 0), (10, 5), (20, 5), (20, 0),
(10, 0), (3, 0), srid=settings.SRID))
def test_spoon_loop(self):
"""
=====<====
|| ||
+-------===<===>===+=====>===
"""
p1 = PathFactory.create(geom=LineString((0, 0), (10, 0)))
p2 = PathFactory.create(geom=LineString((10, 0), (10, 5),
(20, 5), (20, 0),
(10, 0)))
topo = TopologyFactory.create(paths=[(p1, 0.3, 1), (p2, 1, 0.4), (p2, 0.4, 0.4), (p2, 0.4, 0.2),
(p2, 0.2, 0.2), (p2, 0.2, 0), (p1, 1, 0.3)])
self.assertEqual(topo.geom, LineString((3, 0), (10, 0), (20, 0), (20, 5),
(17, 5), (11, 5), # extra point due middle aggregation
(10, 5), (10, 0), (3, 0), srid=settings.SRID))
# Deserializing should work too
topod = Topology.deserialize("""
[{"positions":{"0":[0.3,1],"1":[1, 0.4]},"paths":[%(pk1)s,%(pk2)s]},
{"positions":{"0":[0.4, 0.2]},"paths":[%(pk2)s]},
{"positions":{"0":[0.2,0],"1":[1,0.3]},"paths":[%(pk2)s,%(pk1)s]}]""" % {'pk1': p1.pk, 'pk2': p2.pk})
topod.kind = 'TOPOLOGY'
topod.save()
self.assertEqual(topo.geom, topod.geom)
self.assertEqual(len(topod.aggregations.all()), 7)
def test_spoon_loop_2(self):
"""
=====>====
|| ||
+-------===<===>===+=====<===
"""
p1 = PathFactory.create(geom=LineString((0, 0), (10, 0)))
p2 = PathFactory.create(geom=LineString((10, 0), (10, 5),
(20, 5), (20, 0),
(10, 0)))
topo = TopologyFactory.create(paths=[(p1, 0.3, 1), (p2, 0, 0.4), (p2, 0.4, 0.4), (p2, 0.4, 0.8), (p2, 0.8, 0.8),
(p2, 0.8, 1), (p1, 1, 0.3)])
self.assertEqual(topo.geom, LineString((3, 0), (10, 0), (10, 5),
(17, 5), (20, 5), # extra point due middle aggregation
(20, 0), (16, 0), (10, 0), (3, 0), srid=settings.SRID))
# De/Serializing should work too
serialized = """
[{"kind": "TOPOLOGY","positions":{"0":[0.3,1],"1":[0, 0.4]},"paths":[%(pk1)s,%(pk2)s],"offset": 0.0,"pk": %(topo)s},
{"kind": "TOPOLOGY","positions":{"0":[0.4, 0.8]},"paths":[%(pk2)s],"offset": 0.0,"pk": %(topo)s},
{"kind": "TOPOLOGY","positions":{"0":[0.8,1],"1":[1,0.3]},"paths":[%(pk2)s,%(pk1)s],"offset": 0.0,"pk": %(topo)s}]""" % {
'topo': topo.pk,
'pk1': p1.pk,
'pk2': p2.pk
}
self.assertEqual(json.loads(serialized), json.loads(topo.serialize()))
topod = Topology.deserialize(serialized)
self.assertEqual(topo.geom, topod.geom)
self.assertEqual(len(topod.aggregations.all()), 7)
def test_trek_all_reverse(self):
"""
+----<===+=======+====|----->
"""
p1 = PathFactory.create(geom=LineString((0, 0), (10, 0)))
p2 = PathFactory.create(geom=LineString((10, 0), (20, 0)))
p3 = PathFactory.create(geom=LineString((20, 0), (30, 0)))
topo = TopologyFactory.create(paths=[(p3, 0.2, 0), (p2, 1, 0), (p1, 1, 0.9)])
self.assertEqual(topo.geom, LineString((22.0, 0.0), (20.0, 0.0), (10.0, 0.0), (9.0, 0.0), srid=settings.SRID))
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologySerialization(TestCase):
def test_serialize_line(self):
path = PathFactory.create()
test_objdict = {'kind': Topology.KIND,
'offset': 1.0,
'positions': {},
'paths': [path.pk]}
# +|========>+
topo = TopologyFactory.create(offset=1.0, paths=[path])
test_objdict['pk'] = topo.pk
test_objdict['positions']['0'] = [0.0, 1.0]
objdict = json.loads(topo.serialize())
self.assertDictEqual(objdict[0], test_objdict)
# +<========|+
topo = TopologyFactory.create(offset=1.0, paths=[(path, 1, 0)])
test_objdict['pk'] = topo.pk
test_objdict['positions']['0'] = [1.0, 0.0]
objdict = json.loads(topo.serialize())
self.assertDictEqual(objdict[0], test_objdict)
# +|========>+<========|+
path2 = PathFactory.create()
topo = TopologyFactory.create(offset=1.0, paths=[(path, 0, 1), (path2, 1, 0)])
test_objdict['pk'] = topo.pk
test_objdict['paths'] = [path.pk, path2.pk]
test_objdict['positions'] = {'0': [0.0, 1.0], '1': [1.0, 0.0]}
objdict = json.loads(topo.serialize())
self.assertDictEqual(objdict[0], test_objdict)
# +<========|+|========>+
topo = TopologyFactory.create(offset=1.0, paths=[(path, 1, 0), (path2, 0, 1)])
test_objdict['pk'] = topo.pk
test_objdict['paths'] = [path.pk, path2.pk]
test_objdict['positions'] = {'0': [1.0, 0.0], '1': [0.0, 1.0]}
objdict = json.loads(topo.serialize())
self.assertDictEqual(objdict[0], test_objdict)
def test_serialize_point(self):
path = PathFactory.create()
topology = TopologyFactory.create(offset=1, paths=[(path, 0.5, 0.5)])
fieldvalue = topology.serialize()
# fieldvalue is like '{"lat": -5.983842291017086, "lng": -1.3630770374505987, "kind": "TOPOLOGY"}'
field = json.loads(fieldvalue)
self.assertEqual(field['pk'], topology.pk)
self.assertAlmostEqual(field['lat'], 46.5004566, places=6)
self.assertAlmostEqual(field['lng'], 3.0006428, places=6)
self.assertEqual(field['kind'], "TOPOLOGY")
def test_serialize_two_consecutive_forced(self):
path1 = PathFactory.create()
path2 = PathFactory.create()
path3 = PathFactory.create()
topology = TopologyFactory.create(paths=[path1, (path2, 0.2, 0.2), (path2, 0.4, 0.4), path3])
fieldvalue = topology.serialize()
field = json.loads(fieldvalue)
self.assertEqual(len(field), 2)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyDerialization(TestCase):
def test_deserialize_foreignkey(self):
topology = TopologyFactory.create(offset=1)
deserialized = Topology.deserialize(topology.pk)
self.assertEqual(topology, deserialized)
def test_deserialize_unedited_point_topology(self):
topology = TopologyFactory.create(offset=1)
deserialized = Topology.deserialize({'pk': topology.pk})
self.assertEqual(topology, deserialized)
def test_deserialize_unedited_line_topology(self):
topology = TopologyFactory.create(offset=1)
deserialized = Topology.deserialize([{'pk': topology.pk}, {}])
self.assertEqual(topology, deserialized)
def test_deserialize_line(self):
path = PathFactory.create()
topology = Topology.deserialize('[{"paths": [%s], "positions": {"0": [0.0, 1.0]}, "offset": 1}]' % (path.pk))
self.assertEqual(topology.offset, 1)
self.assertEqual(topology.kind, 'TMP')
self.assertEqual(len(topology.paths.all()), 1)
self.assertEqual(topology.aggregations.all()[0].path, path)
self.assertEqual(topology.aggregations.all()[0].start_position, 0.0)
self.assertEqual(topology.aggregations.all()[0].end_position, 1.0)
def test_deserialize_multiple_lines(self):
# Multiple paths
p1 = PathFactory.create(geom=LineString((0, 0), (2, 2)))
p2 = PathFactory.create(geom=LineString((2, 2), (2, 0)))
p3 = PathFactory.create(geom=LineString((2, 0), (4, 0)))
pks = [p.pk for p in [p1, p2, p3]]
topology = Topology.deserialize('{"paths": %s, "positions": {"0": [0.0, 1.0], "2": [0.0, 1.0]}, "offset": 1}' % (pks))
for i in range(3):
self.assertEqual(topology.aggregations.all()[i].start_position, 0.0)
self.assertEqual(topology.aggregations.all()[i].end_position, 1.0)
topology = Topology.deserialize('{"paths": %s, "positions": {"0": [0.3, 1.0], "2": [0.0, 0.7]}, "offset": 1}' % (pks))
self.assertEqual(topology.aggregations.all()[0].start_position, 0.3)
self.assertEqual(topology.aggregations.all()[0].end_position, 1.0)
self.assertEqual(topology.aggregations.all()[1].start_position, 0.0)
self.assertEqual(topology.aggregations.all()[1].end_position, 1.0)
self.assertEqual(topology.aggregations.all()[2].start_position, 0.0)
self.assertEqual(topology.aggregations.all()[2].end_position, 0.7)
def test_deserialize_point(self):
PathFactory.create()
# Take a point
p = Point(700100, 6600000, 0, srid=settings.SRID)
p.transform(settings.API_SRID)
closest = Path.closest(p)
# Check closest path
self.assertEqual(closest.geom.coords, ((700000, 6600000), (700100, 6600100)))
# The point has same x as first point of path, and y to 0 :
topology = Topology.deserialize('{"lng": %s, "lat": %s}' % (p.x, p.y))
self.assertAlmostEqual(topology.offset, -70.7106781, places=6)
self.assertEqual(len(topology.paths.all()), 1)
pagg = topology.aggregations.get()
self.assertAlmostEqual(pagg.start_position, 0.5, places=6)
self.assertAlmostEqual(pagg.end_position, 0.5, places=6)
def test_deserialize_serialize(self):
path = PathFactory.create(geom=LineString((1, 1), (2, 2), (2, 0)))
before = TopologyFactory.create(offset=1, paths=[(path, 0.5, 0.5)])
# Deserialize its serialized version !
after = Topology.deserialize(before.serialize())
self.assertEqual(len(before.paths.all()), len(after.paths.all()))
start_before = before.aggregations.all()[0].start_position
end_before = before.aggregations.all()[0].end_position
start_after = after.aggregations.all()[0].start_position
end_after = after.aggregations.all()[0].end_position
self.assertAlmostEqual(start_before, start_after, places=6)
self.assertAlmostEqual(end_before, end_after, places=6)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyOverlappingTest(TestCase):
def setUp(self):
self.path1 = PathFactory.create(geom=LineString((0, 0), (0, 10)))
self.path2 = PathFactory.create(geom=LineString((0, 20), (0, 10)))
self.path3 = PathFactory.create(geom=LineString((0, 20), (0, 30)))
self.path4 = PathFactory.create(geom=LineString((0, 30), (0, 40)))
self.topo1 = TopologyFactory.create(paths=[(self.path1, 0.5, 1), (self.path2, 1, 0), self.path3,
(self.path4, 0, 0.5)])
self.topo2 = TopologyFactory.create(paths=[self.path2])
self.point1 = TopologyFactory.create(paths=[(self.path2, 0.4, 0.4)])
self.point2 = TopologyFactory.create(paths=[(self.path2, 0.8, 0.8)])
self.point3 = TopologyFactory.create(paths=[(self.path2, 0.6, 0.6)])
def test_overlapping_returned_can_be_filtered(self):
overlaps = Topology.overlapping(self.topo1)
overlaps = overlaps.exclude(pk=self.topo1.pk)
self.assertEqual(len(overlaps), 4)
overlaps = Topology.overlapping(self.topo1)
overlaps = overlaps.filter(pk__in=[self.point1.pk, self.point2.pk])
self.assertEqual(len(overlaps), 2)
def test_overlapping_return_sharing_path(self):
overlaps = Topology.overlapping(self.topo1)
self.assertTrue(self.topo1 in overlaps)
self.assertTrue(self.topo2 in overlaps)
def test_overlapping_sorts_by_order_of_progression(self):
overlaps = Topology.overlapping(self.topo2)
self.assertEqual(list(overlaps), [self.topo2,
self.point1, self.point3, self.point2, self.topo1])
def test_overlapping_sorts_when_path_is_reversed(self):
overlaps = Topology.overlapping(self.topo1)
self.assertEqual(list(overlaps), [self.topo1,
self.point2, self.point3, self.point1, self.topo2])
def test_overlapping_does_not_fail_if_no_records(self):
from geotrek.trekking.models import Trek
overlaps = Topology.overlapping(Trek.objects.all())
self.assertEqual(list(overlaps), [])
| 43.09081
| 133
| 0.568643
|
f9cf6fc7a41fc9fa7bd3705301a306a7ea0c2395
| 1,844
|
py
|
Python
|
py_pdf_term/endtoend/_endtoend/caches/method/base.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | null | null | null |
py_pdf_term/endtoend/_endtoend/caches/method/base.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | 1
|
2021-08-02T13:02:12.000Z
|
2021-08-02T13:02:12.000Z
|
py_pdf_term/endtoend/_endtoend/caches/method/base.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from typing import Any, Callable, Dict, Generic, List, Union
from py_pdf_term.methods import MethodTermRanking
from py_pdf_term.methods._methods.rankingdata import RankingData
from ...configs import MethodLayerConfig
class BaseMethodLayerRankingCache(metaclass=ABCMeta):
def __init__(self, cache_dir: str) -> None:
pass
@abstractmethod
def load(
self,
pdf_paths: List[str],
config: MethodLayerConfig,
) -> Union[MethodTermRanking, None]:
raise NotImplementedError(f"{self.__class__.__name__}.load()")
@abstractmethod
def store(
self,
pdf_paths: List[str],
term_ranking: MethodTermRanking,
config: MethodLayerConfig,
) -> None:
raise NotImplementedError(f"{self.__class__.__name__}.store()")
@abstractmethod
def remove(self, pdf_paths: List[str], config: MethodLayerConfig) -> None:
raise NotImplementedError(f"{self.__class__.__name__}.remove()")
class BaseMethodLayerDataCache(Generic[RankingData], metaclass=ABCMeta):
def __init__(self, cache_dir: str) -> None:
pass
@abstractmethod
def load(
self,
pdf_paths: List[str],
config: MethodLayerConfig,
from_dict: Callable[[Dict[str, Any]], RankingData],
) -> Union[RankingData, None]:
raise NotImplementedError(f"{self.__class__.__name__}.load()")
@abstractmethod
def store(
self,
pdf_paths: List[str],
ranking_data: RankingData,
config: MethodLayerConfig,
) -> None:
raise NotImplementedError(f"{self.__class__.__name__}.store()")
@abstractmethod
def remove(self, pdf_paths: List[str], config: MethodLayerConfig) -> None:
raise NotImplementedError(f"{self.__class__.__name__}.remove()")
| 30.229508
| 78
| 0.676247
|
b38ddf0503913168d0ac4d46b50bdc2a6a95ad32
| 2,663
|
py
|
Python
|
contrib/play/esnames.py
|
Rosencrantz/aleph
|
47ac45fa72607e1ab16c7c30690013a7d00be116
|
[
"MIT"
] | 1,213
|
2017-03-15T08:10:52.000Z
|
2022-03-29T13:57:44.000Z
|
contrib/play/esnames.py
|
Rosencrantz/aleph
|
47ac45fa72607e1ab16c7c30690013a7d00be116
|
[
"MIT"
] | 1,374
|
2017-03-14T18:23:10.000Z
|
2022-03-31T18:42:20.000Z
|
contrib/play/esnames.py
|
Rosencrantz/aleph
|
47ac45fa72607e1ab16c7c30690013a7d00be116
|
[
"MIT"
] | 217
|
2017-03-17T12:04:22.000Z
|
2022-03-20T11:17:16.000Z
|
import os
import json
import redis
import requests
import normality
from collections import defaultdict
conn = redis.Redis(decode_responses=True)
def name_tokens(name):
name = normality.normalize(name, latinize=True)
# if len(name) > 2:
# return [name]
# return []
return [n for n in name.split(" ") if len(n)]
def get_entities():
params = {
"include": ["names", "countries", "schema"],
"schema": ["Thing"],
"api_key": os.environ.get("ALEPH_API_KEY"),
}
url = "http://localhost:5000/api/2/entities/_stream"
res = requests.get(url, params=params, stream=True)
for line in res.iter_lines():
entity = json.loads(line)
yield entity
def flush():
conn.delete("_total")
for key in conn.scan_iter("names:*"):
conn.delete(key)
def name_stats():
# flush()
name_count = 0
name_lengths = []
for idx, entity in enumerate(get_entities()):
tokens = defaultdict(int)
for name in entity.get("names", []):
name_count += 1
name = name_tokens(name)
name_lengths.append(len(name))
for token in name:
tokens[token] += 1
for key, count in tokens.items():
conn.incrby("names:" + key, count)
conn.incrby("_total", sum(tokens.values()))
if idx % 1000 == 0:
print(idx)
if idx > 100000:
break
# print('unique tokens', len(tokens))
print("name count", name_count)
name_avg = sum(name_lengths) / float(name_count)
print("name avg", name_avg)
# counter = Counter(tokens)
# print(counter.most_common(50))
def name_score(name):
max_value = 0
count_value = 0
total_value = 0
for key in conn.scan_iter("names:*"):
value = int(conn.get(key))
max_value = max(value, max_value)
count_value += 1
total_value += value
# total = total_value / count_value
total = max_value
# total = int(conn.get('_total'))
tokens = name_tokens(name)
keys = ["names:" + t for t in tokens]
values = conn.mget(keys)
total_score = 1
for value in values:
value = int(value or 1)
score = (max_value - value) / value
# score = 1 - (total / max(1, value))
total_score += score
total_score = total_score / max_value
print(name, total_score, total, values)
# values = [int(v or 0) for v in conn.mget(keys)]
# print(total, values)
if __name__ == "__main__":
# name_stats()
name_score("the bank")
name_score("zen koan")
name_score("nazarbayev")
name_score("friedrich lindenberg")
| 26.366337
| 56
| 0.59294
|
43d923207aacd51ca895dc209815396ca1361d22
| 7,664
|
py
|
Python
|
contrib/devtools/update-translations.py
|
pugmagician/primestone
|
423ffe90abaafb7660a71c1c6d77a8a961624d8c
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
pugmagician/primestone
|
423ffe90abaafb7660a71c1c6d77a8a961624d8c
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
pugmagician/primestone
|
423ffe90abaafb7660a71c1c6d77a8a961624d8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'primestone_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| 37.568627
| 124
| 0.629567
|
2418899fdcc7282df838a35f1455b3bd97cc8447
| 1,982
|
py
|
Python
|
gym_sudoku/envs/solver.py
|
vrundha/sudoku
|
adf1a8cf347cf58be3620e6a520cbe25036c2a55
|
[
"MIT"
] | null | null | null |
gym_sudoku/envs/solver.py
|
vrundha/sudoku
|
adf1a8cf347cf58be3620e6a520cbe25036c2a55
|
[
"MIT"
] | null | null | null |
gym_sudoku/envs/solver.py
|
vrundha/sudoku
|
adf1a8cf347cf58be3620e6a520cbe25036c2a55
|
[
"MIT"
] | null | null | null |
from gym_sudoku.envs.sudoku import Sudoku
class Solver:
"""
Class to solve sudoku with backtracking
"""
def __init__(self, sudoku: Sudoku):
"""
:param sudoku:
"""
self.sudoku = sudoku
def solve(self, state: Sudoku, i=0, j=0):
"""
:param state: object of type Sudoku
:param i: row index
:param j: column index
:return: tuple representing a boolean value to indicate if it is solved and the solved state
"""
number_found = False
for number in range(1, 10):
if state.puzzle[i][j] == 0:
if self.sudoku.check_if_number_ok(i, j, number):
number_found = True
state.puzzle[i][j] = number
if i == 8 and j == 8:
return True, state
elif j == 8:
solvable, ret_state = self.solve(state, i+1, 0)
else:
solvable, ret_state = self.solve(state, i, j+1)
if not solvable:
number_found = False
state.puzzle[i][j] = 0
continue
else:
return True, ret_state
else:
number_found = True
if i == 8 and j == 8:
return True, state
elif j == 8:
return self.solve(state, i + 1, 0)
else:
return self.solve(state, i, j + 1)
if not number_found:
return False, state
if __name__ == "__main__":
sud = Sudoku()
sud.generate(20)
sud.visualize()
print("\n\n")
solv = Solver(sud)
solvable, solution = solv.solve(sud)
if solvable:
solution.visualize()
else:
print("Not solvable")
assert len(sud.puzzle) == 9
assert len(sud.puzzle[0]) == 9
| 28.314286
| 100
| 0.462664
|
097287cdf9aaa75eb6346597b215bcb35a91b499
| 534
|
py
|
Python
|
recipes/paho-mqtt-c/all/test_package/conanfile.py
|
flexferrum/conan-center-index
|
5d0fb36e1e0b757e6deebc330c34d07dc1a5a7a5
|
[
"MIT"
] | 3
|
2020-04-16T15:01:33.000Z
|
2022-01-13T08:05:47.000Z
|
recipes/paho-mqtt-c/all/test_package/conanfile.py
|
flexferrum/conan-center-index
|
5d0fb36e1e0b757e6deebc330c34d07dc1a5a7a5
|
[
"MIT"
] | 33
|
2020-02-18T15:54:50.000Z
|
2022-03-28T08:54:10.000Z
|
recipes/paho-mqtt-c/all/test_package/conanfile.py
|
GavinNL/conan-center-index
|
0ae829a362c1cc6a20d97e023ca0aafc805797c3
|
[
"MIT"
] | 8
|
2020-03-06T14:38:18.000Z
|
2022-03-28T08:41:15.000Z
|
from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_ASYNC"] = self.options["paho-mqtt-c"].asynchronous
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| 31.411765
| 83
| 0.642322
|
08307ac8ed67efcebeaf758901f92cddfaf49ab6
| 9,583
|
py
|
Python
|
rnnmorph/batch_generator.py
|
AliceCyber/rnnmorph
|
adf687c7590f8d2bc3abba56850878e33fc4c3c0
|
[
"Apache-2.0"
] | 124
|
2017-10-06T17:29:39.000Z
|
2022-03-02T09:06:35.000Z
|
rnnmorph/batch_generator.py
|
AliceCyber/rnnmorph
|
adf687c7590f8d2bc3abba56850878e33fc4c3c0
|
[
"Apache-2.0"
] | 4
|
2019-02-12T16:51:39.000Z
|
2021-10-14T12:48:00.000Z
|
rnnmorph/batch_generator.py
|
AliceCyber/rnnmorph
|
adf687c7590f8d2bc3abba56850878e33fc4c3c0
|
[
"Apache-2.0"
] | 21
|
2017-10-14T18:46:05.000Z
|
2022-03-08T18:16:43.000Z
|
# -*- coding: utf-8 -*-
# Автор: Гусев Илья
# Описание: Генератор батчей с определёнными параметрами.
from typing import List, Tuple
from collections import namedtuple
import nltk
import numpy as np
from pymorphy2 import MorphAnalyzer
from russian_tagsets import converters
from rnnmorph.data_preparation.word_vocabulary import WordVocabulary
from rnnmorph.data_preparation.grammeme_vectorizer import GrammemeVectorizer
from rnnmorph.data_preparation.process_tag import convert_from_opencorpora_tag, process_gram_tag
from rnnmorph.util.tqdm_open import tqdm_open
from rnnmorph.config import TrainConfig, BuildModelConfig
WordForm = namedtuple("WordForm", "text gram_vector_index")
class BatchGenerator:
"""
Генератор наборов примеров для обучения.
"""
def __init__(self, language: str,
file_names: List[str],
config: TrainConfig,
grammeme_vectorizer_input: GrammemeVectorizer,
grammeme_vectorizer_output: GrammemeVectorizer,
indices: np.array,
word_vocabulary: WordVocabulary,
char_set: str,
build_config: BuildModelConfig):
self.language = language
self.file_names = file_names # type: List[str]
# Параметры батчей.
self.batch_size = config.external_batch_size # type: int
self.bucket_borders = config.sentence_len_groups # type: List[Tuple[int]]
self.buckets = [list() for _ in range(len(self.bucket_borders))]
self.build_config = build_config
self.word_vocabulary = word_vocabulary
self.char_set = char_set
# Разбиение на выборки.
self.indices = indices # type: np.array
# Подготовленные словари.
self.grammeme_vectorizer_input = grammeme_vectorizer_input # type: GrammemeVectorizer
self.grammeme_vectorizer_output = grammeme_vectorizer_output # type: GrammemeVectorizer
self.morph = MorphAnalyzer() if self.language == "ru" else None # type: MorphAnalyzer
self.converter = converters.converter('opencorpora-int', 'ud14') if self.language == "ru" else None
def __to_tensor(self, sentences: List[List[WordForm]]) -> Tuple[List, List]:
"""
Преобразование предложений в признаки и ответы.
:param sentences: предложения (с разобранными словоформами).
:return: индексы слов, грамматические векторы, индексы символов, ответы для всех предложений.
"""
n = len(sentences)
grammemes_count = self.grammeme_vectorizer_input.grammemes_count()
sentence_max_len = max([len(sentence) for sentence in sentences])
data = []
target = []
words = np.zeros((n, sentence_max_len), dtype=np.int)
grammemes = np.zeros((n, sentence_max_len, grammemes_count), dtype=np.float)
chars = np.zeros((n, sentence_max_len, self.build_config.char_max_word_length), dtype=np.int)
y = np.zeros((n, sentence_max_len), dtype=np.int)
for i, sentence in enumerate(sentences):
word_indices, gram_vectors, char_vectors = self.get_sample(
[x.text for x in sentence],
language=self.language,
converter=self.converter,
morph=self.morph,
grammeme_vectorizer=self.grammeme_vectorizer_input,
max_word_len=self.build_config.char_max_word_length,
word_vocabulary=self.word_vocabulary,
word_count=self.build_config.word_max_count,
char_set=self.char_set)
assert len(word_indices) == len(sentence) and \
len(gram_vectors) == len(sentence) and \
len(char_vectors) == len(sentence)
words[i, -len(sentence):] = word_indices
grammemes[i, -len(sentence):] = gram_vectors
chars[i, -len(sentence):] = char_vectors
y[i, -len(sentence):] = [word.gram_vector_index + 1 for word in sentence]
if self.build_config.use_word_embeddings:
data.append(words)
if self.build_config.use_gram:
data.append(grammemes)
if self.build_config.use_chars:
data.append(chars)
y = y.reshape(y.shape[0], y.shape[1], 1)
target.append(y)
if self.build_config.use_pos_lm:
y_prev = np.zeros_like(y)
y_prev[:, 1:] = y[:, :-1]
target.append(y_prev.reshape(y.shape[0], y.shape[1], 1))
y_next = np.zeros_like(y)
y_next[:, :-1] = y[:, 1:]
target.append(y_next.reshape(y.shape[0], y.shape[1], 1))
if self.build_config.use_word_lm:
words_prev = np.zeros_like(words)
words_prev[:, 1:] = words[:, :-1]
target.append(words_prev.reshape(words.shape[0], words.shape[1], 1))
words_next = np.zeros_like(words)
words_next[:, :-1] = words[:, 1:]
target.append(words_next.reshape(words.shape[0], words.shape[1], 1))
return data, target
@staticmethod
def get_sample(sentence: List[str],
language: str,
converter,
morph: MorphAnalyzer,
grammeme_vectorizer: GrammemeVectorizer,
max_word_len: int,
word_vocabulary: WordVocabulary,
word_count: int,
char_set: str):
"""
Получние признаков для отдельного предложения.
:param language: язык.
:param sentence: предложение.
:param morph: морфология.
:param converter: конвертер тегов в UD.
:param grammeme_vectorizer: грамматический словарь.
:param max_word_len: количество обрабатываемых букв в слове.
:param word_vocabulary: список слов.
:param word_count: максимальный индекс слова.
:param char_set: список возможных символов, для которых есть эмбеддинги.
:return: индексы слов, грамматические векторы, индексы символов.
"""
word_char_vectors = []
word_gram_vectors = []
word_indices = []
for word in sentence:
char_indices = np.zeros(max_word_len)
gram_value_indices = np.zeros(grammeme_vectorizer.grammemes_count())
# Индексы символов слова.
word_char_indices = [char_set.index(ch) if ch in char_set else len(char_set) for ch in word][-max_word_len:]
char_indices[-min(len(word), max_word_len):] = word_char_indices
word_char_vectors.append(char_indices)
# Индексы слов.
word_index = word_vocabulary.word_to_index[word.lower()] if word_vocabulary.has_word(word) else word_count
word_index = min(word_index, word_count)
word_indices.append(word_index)
# Грамматический вектор слова.
if language == "ru":
# Складываем все возможные варианты разбора поэлементно.
for parse in morph.parse(word):
pos, gram = convert_from_opencorpora_tag(converter, parse.tag, word)
gram = process_gram_tag(gram)
gram_value_indices += np.array(grammeme_vectorizer.get_vector(pos + "#" + gram))
elif language == "en":
_, tags = zip(*nltk.pos_tag([word], tagset='universal'))
pos = tags[0]
gram_value_indices += np.array(grammeme_vectorizer.get_vector(pos + "#_"))
# Нормируем по каждой категории отдельно.
sorted_grammemes = sorted(grammeme_vectorizer.all_grammemes.items(), key=lambda x: x[0])
index = 0
for category, values in sorted_grammemes:
mask = gram_value_indices[index:index + len(values)]
s = sum(mask)
gram_value_indices[index:index + len(values)] = mask / s if s != 0 else 0.0
index += len(values)
word_gram_vectors.append(gram_value_indices)
return word_indices, word_gram_vectors, word_char_vectors
def __iter__(self):
"""
Получение очередного батча.
:return: индексы словоформ, грамматические векторы, ответы-индексы.
"""
last_sentence = []
i = 0
for filename in self.file_names:
with tqdm_open(filename, encoding='utf-8') as f:
for line in f:
line = line.strip()
if len(line) == 0:
if i not in self.indices:
last_sentence = []
i += 1
continue
for index, bucket in enumerate(self.buckets):
if self.bucket_borders[index][0] <= len(last_sentence) < self.bucket_borders[index][1]:
bucket.append(last_sentence)
if len(bucket) >= self.batch_size:
yield self.__to_tensor(bucket)
self.buckets[index] = []
last_sentence = []
i += 1
else:
word, _, pos, tags = line.split('\t')[0:4]
gram_vector_index = self.grammeme_vectorizer_output.get_index_by_name(pos + "#" + tags)
last_sentence.append(WordForm(text=word, gram_vector_index=gram_vector_index))
for index, bucket in enumerate(self.buckets):
yield self.__to_tensor(bucket)
| 45.20283
| 120
| 0.600856
|
ac638053101c0a2037745a2a6ce42a23b072b505
| 10,400
|
py
|
Python
|
gqnlib/slim_generator.py
|
rnagumo/gqnlib
|
96bd8499f90c00b29817f71e6380bc622ce78479
|
[
"MIT"
] | 1
|
2020-08-13T01:54:52.000Z
|
2020-08-13T01:54:52.000Z
|
gqnlib/slim_generator.py
|
rnagumo/gqnlib
|
96bd8499f90c00b29817f71e6380bc622ce78479
|
[
"MIT"
] | null | null | null |
gqnlib/slim_generator.py
|
rnagumo/gqnlib
|
96bd8499f90c00b29817f71e6380bc622ce78479
|
[
"MIT"
] | 1
|
2021-01-03T16:02:55.000Z
|
2021-01-03T16:02:55.000Z
|
"""Generator for SLIM (Spatial Language Integrating Model)."""
from typing import Tuple
import torch
from torch import nn, Tensor
from .utils import kl_divergence_normal
from .generation import Conv2dLSTMCell
class LatentEncoder(nn.Module):
"""Latent encoder p(z|x).
Args:
x_channel (int): Number of channels in input observation.
e_channel (int): Number of channels in the conv. layer mapping input
images to LSTM input.
h_channel (int): Number of channels in LSTM layer.
z_channel (int): Number of channels in the stochastic latent in each
DRAW step.
"""
def __init__(self, x_channel: int, e_channel: int, h_channel: int,
z_channel: int, stride: int) -> None:
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(x_channel, 32, kernel_size=2, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=2, stride=2),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=2, stride=2),
nn.ReLU(),
nn.Conv2d(128, e_channel, kernel_size=1, stride=1),
nn.ReLU(),
)
self.lstm_cell = Conv2dLSTMCell(e_channel + z_channel, h_channel,
kernel_size=5, stride=1, padding=2)
self.conv2 = nn.Conv2d(h_channel, z_channel * 2, kernel_size=5,
stride=1, padding=2)
def forward(self, x: Tensor, z: Tensor, h: Tensor, c: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Forward p(z|x, z_prev, h_prev, c_prev).
Args:
x (torch.Tensor): Representations, size `(b, r, 64, 64)`.
z (torch.Tensor): Previous latents, size `(b, z, 8, 8)`.
h (torch.Tensor): Previous hidden states, size `(b, h, 8, 8)`.
c (torch.Tensor): Previous cell states, size `(b, h, 8, 8)`.
Returns:
mu (torch.Tensor): Mean of `z` distribution, size `(b, z, 8, 8)`.
logvar (torch.Tensor): Log variance of `z` distribution, size
`(b, z, 8, 8)`.
h (torch.Tensor): Current hidden states, size `(b, h, 8, 8)`.
c (torch.Tensor): Current cell states, size `(b, h, 8, 8)`.
"""
lstm_input = self.conv(x)
h, c = self.lstm_cell(torch.cat([lstm_input, z], dim=1), (h, c))
mu, logvar = torch.chunk(self.conv2(h), 2, dim=1)
return mu, logvar, h, c
class VisualDecoder(nn.Module):
"""Visual decoder u = f(z, v, r).
Args:
h_channel (int): Number of channels in LSTM layer.
d_channel (int): Number of channels in the conv. layer mapping the
canvas state to the LSTM input.
z_channel (int): Number of channels in the latents.
r_dim (int): Dimension size of representations.
u_channel (int): Number of channels in the hidden layer between
LSTM states and the canvas.
v_dim (int): Dimension size of viewpoints.
stride (int): Kernel size of transposed conv. layer.
"""
def __init__(self, h_channel: int, d_channel: int, z_channel: int,
r_dim: int, u_channel: int, v_dim: int, stride: int) -> None:
super().__init__()
self.conv = nn.Conv2d(
u_channel, d_channel, kernel_size=stride, stride=stride)
self.lstm_cell = Conv2dLSTMCell(
z_channel + v_dim + r_dim + d_channel, h_channel,
kernel_size=5, stride=1, padding=2)
self.deconv = nn.ConvTranspose2d(
h_channel, u_channel, kernel_size=stride, stride=stride)
def forward(self, z: Tensor, v: Tensor, r: Tensor, u: Tensor, h: Tensor,
c: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""Render u = M(z, v)
Args:
z (torch.Tensor): Latent states, size `(b, z, 8, 8)`.
v (torch.Tensor): Query viewpoints, size `(b, v)`.
r (torch.Tensor): Context representations, size `(b, r, 1, 1)`.
u (torch.Tensor): Canvas for images, size `(b, u, h*st, w*st)`.
h (torch.Tensor): Previous hidden states, size `(b, h, 8, 8)`.
c (torch.Tensor): Previous cell states, size `(b, h, 8, 8)`.
Returns:
u (torch.Tensor): Aggregated canvas, size `(b, u, h*st, w*st)`.
h (torch.Tensor): Current hidden states, size `(b, h, 8, 8)`.
c (torch.Tensor): Current cell states, size `(b, h, 8, 8)`.
"""
# Resize viewpoints and representations
batch, _, height, width = z.size()
v = v.contiguous().view(batch, -1, 1, 1).repeat(1, 1, height, width)
r = r.contiguous().view(batch, -1, 1, 1).repeat(1, 1, height, width)
lstm_input = self.conv(u)
h, c = self.lstm_cell(torch.cat([z, v, r, lstm_input], dim=1), (h, c))
u = u + self.deconv(h)
return u, h, c
class SlimGenerator(nn.Module):
"""Generator class for SLIM.
Args:
x_channel (int, optional): Number of channels in input observation.
u_channel (int, optional): Number of channels in the hidden layer
between LSTM states and the canvas.
r_dim (int, optional): Dimension size of representations.
e_channel (int, optional): Number of channels in the conv. layer
mapping input images to LSTM input.
d_channel (int, optional): Number of channels in the conv. layer
mapping the canvas state to the LSTM input.
h_channel (int, optional): Number of channels in LSTM layer.
z_channel (int, optional): Number of channels in the stochastic latent
in each DRAW step.
stride (int, optional): Kernel size of transposed conv. layer.
v_dim (int, optional): Dimension size of viewpoints.
n_layer (int, optional): Number of recurrent layers in DRAW.
scale (int, optional): Scale of image in generation process.
"""
def __init__(self, x_channel: int = 3, u_channel: int = 128,
r_dim: int = 256, e_channel: int = 128, d_channel: int = 128,
h_channel: int = 128, z_channel: int = 3, stride: int = 2,
v_dim: int = 4, n_layer: int = 8, scale: int = 4) -> None:
super().__init__()
self.u_channel = u_channel
self.h_channel = h_channel
self.z_channel = z_channel
self.stride = stride
self.n_layer = n_layer
self.scale = scale
self.encoder = LatentEncoder(
x_channel, e_channel, h_channel, z_channel, stride)
self.decoder = VisualDecoder(
h_channel, d_channel, z_channel, r_dim, u_channel, v_dim, stride)
self.translation = nn.ConvTranspose2d(u_channel, x_channel,
kernel_size=4, stride=4)
def forward(self, x_q: Tensor, v_q: Tensor, r_c: Tensor
) -> Tuple[Tensor, Tensor]:
"""Inferences with query pair `(x_q, v_q)` and context representation
`r_c`.
Args:
x_q (torch.Tensor): True queried iamges, size `(b, c, h, w)`.
v_q (torch.Tensor): Query of viewpoints, size `(b, v)`.
r_c (torch.Tensor): Representation of context, size `(b, r, x, y)`.
Returns:
canvas (torch.Tensor): Reconstructed images, size `(b, c, h, w)`.
kl_loss (torch.Tensor): Calculated KL loss, size `(b,)`.
"""
# Data size
batch_size, _, h, w = x_q.size()
h_scale = h // (self.scale * self.stride)
w_scale = w // (self.scale * self.stride)
# Encoder initial state
h_enc = x_q.new_zeros((batch_size, self.h_channel, h_scale, w_scale))
c_enc = x_q.new_zeros((batch_size, self.h_channel, h_scale, w_scale))
# Decoder initial state
h_dec = x_q.new_zeros((batch_size, self.h_channel, h_scale, w_scale))
c_dec = x_q.new_zeros((batch_size, self.h_channel, h_scale, w_scale))
# Canvas that data is drawn on
u = x_q.new_zeros((batch_size, self.u_channel, h_scale * self.stride,
w_scale * self.stride))
# Latent state
z = x_q.new_zeros((batch_size, self.z_channel, h_scale, w_scale))
# KL loss
kl_loss = x_q.new_zeros((batch_size,))
for _ in range(self.n_layer):
# Posterior sample
q_mu, q_logvar, h_enc, c_enc = self.encoder(x_q, z, h_enc, c_enc)
z = q_mu + (0.5 * q_logvar).exp() * torch.randn_like(q_logvar)
# Generator state update
u, h_dec, c_dec = self.decoder(z, v_q, r_c, u, h_dec, c_dec)
# Calculate KL loss (prior: N(0, I))
p_mu = torch.zeros_like(q_mu)
p_logvar = torch.zeros_like(q_logvar)
_kl_tmp = kl_divergence_normal(
q_mu, q_logvar.exp(), p_mu, p_logvar.exp(), reduce=False)
kl_loss += _kl_tmp.sum([1, 2, 3])
canvas = self.translation(u)
return canvas, kl_loss
def sample(self, v_q: Tensor, r_c: Tensor,
x_shape: Tuple[int, int] = (64, 64)) -> Tensor:
"""Samples images from the prior given viewpoint and representations.
Args:
v_q (torch.Tensor): Query of viewpoints, size `(b, v)`.
r_c (torch.Tensor): Representation of context, size `(b, r, x, y)`.
x_shape (tuple of int, optional): Sampled x shape.
Returns:
canvas (torch.Tensor): Sampled data, size `(b, c, h, w)`.
"""
# Data size
batch_size = v_q.size(0)
h, w = x_shape
h_scale = h // (self.scale * self.stride)
w_scale = w // (self.scale * self.stride)
# Decoder initial state
h_dec = v_q.new_zeros((batch_size, self.h_channel, h_scale, w_scale))
c_dec = v_q.new_zeros((batch_size, self.h_channel, h_scale, w_scale))
# Canvas that data is drawn on
u = v_q.new_zeros((batch_size, self.u_channel, h_scale * self.stride,
w_scale * self.stride))
for _ in range(self.n_layer):
# Sample prior z ~ N(0, I)
z = torch.randn(batch_size, self.z_channel, h_scale, w_scale)
# Generator state update
u, h_dec, c_dec = self.decoder(z, v_q, r_c, u, h_dec, c_dec)
canvas = self.translation(u)
return canvas
| 39.694656
| 79
| 0.575
|
6b9001e01313f90e7834d5711e2ed640ac875b8e
| 14,791
|
py
|
Python
|
build_tools/testing/gen_test_matrix.py
|
smit-hinsu/iree
|
a385d311b701cdc06cb825000ddb34c8a11c6eef
|
[
"Apache-2.0"
] | 1,163
|
2019-09-13T19:10:42.000Z
|
2022-03-31T05:44:10.000Z
|
build_tools/testing/gen_test_matrix.py
|
okkwon/iree
|
e32cc76952d37a14f73c8a7da889edf47fcc2fce
|
[
"Apache-2.0"
] | 3,346
|
2019-09-23T04:16:36.000Z
|
2022-03-31T23:56:57.000Z
|
build_tools/testing/gen_test_matrix.py
|
okkwon/iree
|
e32cc76952d37a14f73c8a7da889edf47fcc2fce
|
[
"Apache-2.0"
] | 311
|
2019-09-19T02:34:23.000Z
|
2022-03-28T16:06:57.000Z
|
#!/usr/bin/env python
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Generates runners for a matrix of test configurations.
This tool is used for sources under test which should be run in a matrix of
different real-world configurations. It is aimed at integration tests which
typically need a greater degree of flexibility and less "framework-ness"
compared to (for example), lit-tests or more classic unit tests.
As with all modern things, it starts with directories that contain
test_matrix.yaml files, which describe tests.
Conceptually, the test_matrix.yaml file declares a list of test_groups
at the top-level. Each test group has an interpolated "id" which is unique
in the scope of all tests. Example:
test_groups:
- id: "math_real_{backend}_vs_{ref_backend}_{function}_dyndims_{dyndims}"
Interpolation is done relative to placeholders declared in a "matrix", where
one concrete test is instantiated for the cartesian product of all combinations
in the matrix. Here is an example matrix:
matrix:
backend: *BACKENDS
ref_backend: *REF_BACKENDS
function: *MATH_FUNCTIONS
dyndims: ["true", "false"]
Each key in the matrix expands to a list (or list of lists that are flattened)
of string values. Note in the above that we have used YAML references to
include fragments defined elsewhere in the file.
Each concrete test is then generated according to one or more runners, which
are responsible for creating actual generated test sources. Example:
runner:
- type: tfhost
main: math_test.py
args:
- "--functions={function}"
- "--target_backends={backend}"
- "--reference_backend={ref_backend}"
- "--dynamic_dims={dyndims}"
The type of runner is a hard-coded feature of this script. See the
implementations at the end.
By default, every expanded combination is included in the test suite and is
expected to pass. This can be changed by including 'xfail' and/or 'xpass'
lists. A test will be marked expected-failing if it matches any of the
predicates in xfail and none of the predicates in xpass.
Example:
xfail:
# Vulkan is currently structurally broken with dyndims.
- matrix.backend: iree_vulkan
matrix.dyndims: "true"
xpass:
# Some dynamic dim functions pass on vulkan regardless of overall
# broken support.
- matrix.backend: iree_vulkan
matrix.function: *VULKAN_DYNDIM_PASS_MATH_FUNCTIONS
matrix.dyndims: "true"
Note that each entry in xfail/xpass is a mapping of key/value pairs. Any keys
that start with 'matrix.' are taken to match an expanded matrix placeholder.
A mapping predicate is evaluated as a product-of-sums where the overall
predicate is true if all of the listed keys have a match for any of their
values.
TODO: Add support for skipping combinations.
TODO: Add support for annotating combinations with 'requires' for env feature
checking.
"""
from typing import Set, Tuple
import argparse
from contextlib import contextmanager
import os
import shutil
try:
import yaml
except ModuleNotFoundError as e:
raise RuntimeError(
f"PyYAML is not installed. Typically: 'python -m pip install PyYAML"
) from e
################################################################################
# Base classes and types
################################################################################
class Environment:
"""Runtime environment for processing a directory."""
def __init__(self, args, root_dir: str, output_dir: str):
self.args = args
self.root_dir = root_dir
self.output_dir = output_dir
# Set of directories containing purely generated files.
self.gen_dirs = set() # type: Set[str]
# Set of (gen_dir, file_name) for all files in a given directory that have
# been generated.
self.gen_files = set() # type: Set[Tuple[str, str]]
def remember_gen_file(self, gen_file_path: str):
gen_dir = os.path.dirname(gen_file_path)
gen_file = os.path.basename(gen_file_path)
self.gen_dirs.add(gen_dir)
self.gen_files.add((gen_dir, gen_file))
def prune_gen_files(self):
found_gen_files = set()
for gen_dir in self.gen_dirs:
dir_listing = os.listdir(gen_dir)
for fname in dir_listing:
found_gen_files.add((gen_dir, fname))
obsolete_gen_files = found_gen_files - self.gen_files
if obsolete_gen_files:
for gen_dir, fname in obsolete_gen_files:
obsolete_path = os.path.join(gen_dir, fname)
log(f"Removing obsolete file {obsolete_path}")
if os.path.isdir(obsolete_path):
shutil.rmtree(obsolete_path)
else:
os.remove(obsolete_path)
class Runner:
"""Base class for a runner."""
RUNNER_IDENT = None
def __init__(self, env: Environment, test_id: str):
self.env = env
self.test_id = test_id
self.gen_dir = os.path.join(self.env.output_dir, "generated")
self.xfail = False
@property
def runner_ident(self) -> str:
assert self.RUNNER_IDENT, "Must define RUNNER_IDENT"
return self.RUNNER_IDENT
def create_gen_file(self, file_name: str, mode: str = "wt"):
os.makedirs(self.gen_dir, exist_ok=True)
full_path = os.path.join(self.gen_dir, file_name)
handle = open(full_path, mode)
self.env.remember_gen_file(full_path)
return handle
def link_file(self, from_path: str, to_path: str):
if from_path == to_path:
return
from_path = os.path.realpath(from_path)
os.makedirs(os.path.dirname(to_path), exist_ok=True)
if os.path.exists(to_path):
os.remove(to_path)
os.symlink(from_path, to_path)
def generate(self):
raise NotImplementedError(f"Generate not implemented for {self.__class__}")
################################################################################
# Main logic
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser(description="Test matrix generator")
parser.add_argument("--dir",
required=True,
type=str,
help="Directory to process")
parser.add_argument("--output_dir",
required=True,
type=str,
help="Output directory")
args = parser.parse_args()
return args
def main(args):
env = Environment(args, args.dir, args.output_dir)
process_directory(env)
def process_directory(env: Environment):
dir = os.path.realpath(env.root_dir)
try:
config_sections = read_directory_config(dir)
except Exception as e:
raise RuntimeError(f"Could not read configuration from {dir}") from e
for section in config_sections:
require_mapping(section)
for config_key, config_value in section.items():
if config_key == "lists":
# Ignore: a place to stash anchors and references.
pass
elif config_key == "test_groups":
require_list(config_value)
for test_group in config_value:
require_mapping(test_group)
process_test_group(env, test_group)
else:
raise ValueError(f"Unexpected top-level section {config_key}")
env.prune_gen_files()
def process_test_group(env: Environment, test_group):
group_id = get_mapping_key(test_group, "id", require_str)
matrix = generate_matrix(
get_mapping_key(test_group, "matrix", require_mapping))
matrix_id_map = {group_id.format(**m): m for m in matrix}
for runner_map in get_mapping_key(test_group, "runner", require_list):
for matrix_id, matrix_map in matrix_id_map.items():
runner = create_runner(env, matrix_id, runner_map, matrix_map)
runner.xfail = (evaluate_xfail(test_group, matrix_map) and
not evaluate_xpass(test_group, matrix_map))
runner.generate()
def evaluate_xfail(test_group, matrix_map) -> bool:
try:
xfail_list = flatten_lists(require_list(test_group["xfail"]))
except KeyError:
return False
for xfail_group in xfail_list:
if evaluate_matrix_map_predicate(matrix_map, xfail_group):
return True
return False
def evaluate_xpass(test_group, matrix_map) -> bool:
try:
xpass_list = flatten_lists(require_list(test_group["xpass"]))
except KeyError:
return False
for xpass_group in xpass_list:
if evaluate_matrix_map_predicate(matrix_map, xpass_group):
return True
return False
def evaluate_matrix_map_predicate(matrix_map, predicate_group) -> bool:
# Each key is something like 'matrix.<key>' which are and'ed
# together. Each value is either a literal or a list that is
# or'd together.
for pred_key, pred_value in predicate_group.items():
match_value = None
if pred_key.startswith("matrix."):
try:
match_value = matrix_map[pred_key[len("matrix."):]]
except KeyError:
raise ValueError(
f"Could not match matrix predicate to matrix value: {pred_key}")
else:
raise ValueError(
f"Expected a matrix predicate (i.e. matrix.) but got {pred_key}")
# Match list (OR) or literal (==)
if isinstance(pred_value, list):
if match_value not in flatten_lists(pred_value):
return False
else:
if pred_value != match_value:
return False
return True
################################################################################
# Utilities
################################################################################
def generate_matrix(matrix_map):
# List of (key, [value, value, ...])
matrix_entries = [(k, flatten_lists(v)) for k, v in matrix_map.items()]
# Permute.
permuted = []
def accumulate(prior: dict, i: int):
if i == len(matrix_entries):
permuted.append(prior)
return
next_key, next_values = matrix_entries[i]
for next_value in next_values:
current = dict(prior)
current[next_key] = next_value
accumulate(current, i + 1)
accumulate({}, 0)
return permuted
def read_directory_config(dir: str) -> list:
sections = []
matrix_path = os.path.join(dir, "test_matrix.yaml")
with open(matrix_path, "r") as stream:
for section in yaml.safe_load_all(stream):
sections.append(section)
return sections
INDENT = 0
def log(msg: str):
print(" " * INDENT + msg)
@contextmanager
def indent():
global INDENT
INDENT += 1
yield
INDENT -= 1
def flatten_lists(l):
results = list()
for item in l:
if isinstance(item, list):
results.extend(flatten_lists(item))
else:
results.append(item)
return results
def require_mapping(v):
if isinstance(v, dict):
return v
raise ValueError(f"Expected a YAML mapping for {v}")
def require_list(v):
if isinstance(v, list):
return v
raise ValueError(f"Expected YAML list for {v}")
def require_str(v):
if isinstance(v, str):
return v
raise ValueError(f"Expected str for {v}")
def get_mapping_key(mapping, key: str, checker=None):
if key not in mapping:
raise ValueError(f"Expected key '{key}' in {mapping}")
value = mapping[key]
if checker:
checker(value)
return value
################################################################################
# Runners
################################################################################
PYRUNNER_STUB = r"""
import importlib
import os
import sys
resolved_imports = False
for _ in range(2):
try:
for impname in REQUIRE_IMPORTS:
importlib.import_module(impname)
resolved_imports = True
except ModuleNotFoundError as e:
if os.path.exists(os.path.join(os.getcwd(), "CMakeCache.txt")):
d = os.path.join(os.getcwd(), "bindings", "python")
if os.path.exists(d):
print(f"Added {d} to sys.path", file=sys.stderr)
sys.path.append(d)
if not resolved_imports:
raise Exception(f"Cannot find required imports: {REQUIRE_IMPORTS}\n"
f"If running interactively, ensure that you are in the "
f"build directory or have packages installed")
sys.argv = [sys.argv[0]] + ARGS + sys.argv[1:]
with open(MAIN, "rt") as f:
script = f.read()
FAILED = False
try:
exec(script, globals())
except SystemExit as exitex:
# unittest like to sys.exit() itself. Catch that here so we can
# process XFAIL properly.
if exitex.code: FAILED = True
except:
FAILED = True
import traceback
traceback.print_exc()
if not XFAIL:
raise
if XFAIL:
if FAILED:
print("=== TEST FAILED AS EXPECTED ===", file=sys.stderr)
sys.exit(0)
else:
print("=== TEST PASSED BUT WAS EXPECTED TO FAIL ===", file=sys.stderr)
sys.exit(1)
if FAILED:
sys.exit(1)
"""
class TfHostRunner(Runner):
"""Runner for tf e2e host tests."""
RUNNER_IDENT = "tfhost"
def __init__(self, env: Environment, test_id: str, runner_map: dict,
matrix_map: dict):
super().__init__(env=env, test_id=test_id)
self.main_file = get_mapping_key(runner_map, "main", require_str)
raw_arg_list = get_mapping_key(runner_map, "args", require_list)
self.args = [
require_str(raw_arg).format(**matrix_map) for raw_arg in raw_arg_list
]
def generate(self):
# Generate the runner script.
file_name = (
f"{'XFAIL_' if self.xfail else ''}{self.test_id}_{self.runner_ident}.py"
)
with self.create_gen_file(file_name) as f:
parts = [
"import os",
"import sys",
"REQUIRE_IMPORTS = ['iree.tf.support.tf_utils', 'iree.tf.support.tf_test_utils']",
f"ARGS = {repr(self.args)}",
f"MAIN = os.path.join(os.path.dirname(__file__), '..', {repr(self.main_file)})",
f"XFAIL = {self.xfail}",
PYRUNNER_STUB,
]
f.write("\n".join(parts))
# Copy/link the main file.
main_file_src_path = os.path.join(self.env.root_dir, self.main_file)
main_file_dst_path = os.path.join(self.env.output_dir, self.main_file)
if not os.path.exists(main_file_src_path):
raise RuntimeError(
f"Referenced main file '{main_file_src_path}' does not exist")
self.link_file(main_file_src_path, main_file_dst_path)
RUNNER_CLASSES = {
"tfhost": TfHostRunner,
}
def create_runner(env: Environment, test_id: str, runner_map: dict,
matrix_map: dict):
runner_type = get_mapping_key(runner_map, "type", require_str)
try:
runner_class = RUNNER_CLASSES[runner_type]
except KeyError:
raise ValueError(f"Unknown runner type '{runner_type}'")
return runner_class(env=env,
test_id=test_id,
runner_map=runner_map,
matrix_map=matrix_map)
if __name__ == "__main__":
main(parse_arguments())
| 30.943515
| 92
| 0.661078
|
5aaab145c50bce4d889dc3a0aa9158362c4e3578
| 53,619
|
py
|
Python
|
trac/ticket/tests/model.py
|
exocad/exotrac
|
c8207efff78d4736745e975abaa359e8bb21ffdb
|
[
"BSD-3-Clause"
] | null | null | null |
trac/ticket/tests/model.py
|
exocad/exotrac
|
c8207efff78d4736745e975abaa359e8bb21ffdb
|
[
"BSD-3-Clause"
] | null | null | null |
trac/ticket/tests/model.py
|
exocad/exotrac
|
c8207efff78d4736745e975abaa359e8bb21ffdb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
from datetime import datetime, timedelta
from StringIO import StringIO
import tempfile
import shutil
import unittest
import trac.tests.compat
from trac import core
from trac.attachment import Attachment
from trac.core import TracError, implements
from trac.resource import ResourceNotFound
from trac.test import EnvironmentStub
from trac.ticket.model import (
Ticket, Component, Milestone, Priority, Type, Version
)
from trac.ticket.roadmap import MilestoneModule
from trac.ticket.api import (
IMilestoneChangeListener, ITicketChangeListener, TicketSystem
)
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
class TestTicketChangeListener(core.Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
self.action = 'created'
self.ticket = ticket
self.resource = ticket.resource
def ticket_changed(self, ticket, comment, author, old_values):
self.action = 'changed'
self.ticket = ticket
self.comment = comment
self.author = author
self.old_values = old_values
def ticket_deleted(self, ticket):
self.action = 'deleted'
self.ticket = ticket
# the listener has no ticket_comment_modified and ticket_change_deleted
class TestTicketChangeListener_2(core.Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
pass
def ticket_changed(self, ticket, comment, author, old_values):
pass
def ticket_deleted(self, ticket):
pass
def ticket_comment_modified(self, ticket, cdate, author, comment,
old_comment):
self.action = 'comment_modified'
self.ticket = ticket
self.cdate = cdate
self.author = author
self.comment = comment
self.old_comment = old_comment
def ticket_change_deleted(self, ticket, cdate, changes):
self.action = 'change_deleted'
self.ticket = ticket
self.cdate = cdate
self.changes = changes
class TicketTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'cbon', 'checkbox')
self.env.config.set('ticket-custom', 'cboff', 'checkbox')
def tearDown(self):
self.env.reset_db()
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def _create_a_ticket(self):
# 1. Creating ticket
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['foo'] = 'This is a custom field'
return ticket
def test_invalid_ticket_id(self):
self.assertEqual(Ticket.id_is_valid(-1), False)
self.assertEqual(Ticket.id_is_valid(0), False)
self.assertEqual(Ticket.id_is_valid(1), True)
self.assertEqual(Ticket.id_is_valid(1L << 31), True)
self.assertEqual(Ticket.id_is_valid(1L << 32), False)
self.assertRaises(ResourceNotFound, Ticket, self.env, -1)
self.assertRaises(ResourceNotFound, Ticket, self.env, 1L << 32)
def test_create_ticket_1(self):
ticket = self._create_a_ticket()
self.assertEqual('santa', ticket['reporter'])
self.assertEqual('Foo', ticket['summary'])
self.assertEqual('This is a custom field', ticket['foo'])
ticket.insert()
def test_create_ticket_2(self):
ticket = self._create_a_ticket()
ticket.insert()
self.assertEqual(1, ticket.id)
# Retrieving ticket
ticket2 = Ticket(self.env, 1)
self.assertEqual(1, ticket2.id)
self.assertEqual('santa', ticket2['reporter'])
self.assertEqual('Foo', ticket2['summary'])
self.assertEqual('This is a custom field', ticket2['foo'])
def _modify_a_ticket(self):
ticket2 = self._create_a_ticket()
ticket2.insert()
ticket2['summary'] = 'Bar'
ticket2['foo'] = 'New value'
ticket2.save_changes('santa', 'this is my comment')
return ticket2
def test_create_ticket_3(self):
self._modify_a_ticket()
# Retrieving ticket
ticket3 = Ticket(self.env, 1)
self.assertEqual(1, ticket3.id)
self.assertEqual(ticket3['reporter'], 'santa')
self.assertEqual(ticket3['summary'], 'Bar')
self.assertEqual(ticket3['foo'], 'New value')
def test_create_ticket_4(self):
ticket3 = self._modify_a_ticket()
# Testing get_changelog()
log = ticket3.get_changelog()
self.assertEqual(len(log), 3)
ok_vals = ['foo', 'summary', 'comment']
self.assertIn(log[0][2], ok_vals)
self.assertIn(log[1][2], ok_vals)
self.assertIn(log[2][2], ok_vals)
def test_create_ticket_5(self):
ticket3 = self._modify_a_ticket()
# Testing delete()
ticket3.delete()
log = ticket3.get_changelog()
self.assertEqual(len(log), 0)
self.assertRaises(TracError, Ticket, self.env, 1)
def _test_empty_strings_stored_as_null(self, ticket):
"""Ticket fields that contain empty strings are stored as NULLs
in the database. NULLs are cast to empty strings on fetch.
"""
std_fields = [name for name in ticket.std_fields
if name not in ticket.protected_fields]
cst_fields = [name for name in ticket.custom_fields
if name not in ticket.protected_fields]
# Values are stored as NULL in the database
self.assertEqual([(None,) * len(std_fields)],
self.env.db_query("""
SELECT %s FROM ticket WHERE id=%%s
""" % ','.join(std_fields), (ticket.id,)))
self.assertEqual([(None,)] * len(cst_fields),
self.env.db_query("""
SELECT value FROM ticket_custom
WHERE ticket=%%s AND name IN (%s)
""" % ','.join(['%s'] * len(cst_fields)),
[ticket.id] + cst_fields))
# Values are returned from the model as empty strings
for name in ticket.editable_fields:
self.assertEqual('', ticket[name], name)
def test_create_empty_strings_stored_as_null(self):
"""Ticket fields with empty strings are NULL when creating ticket.
"""
ticket = Ticket(self.env)
ticket.populate(dict((name, '') for name in ticket.editable_fields))
ticket.insert()
self._test_empty_strings_stored_as_null(ticket)
def test_change_empty_strings_stored_as_null(self):
"""Ticket fields with empty strings are NULL when changing ticket.
"""
ticket = Ticket(self.env)
ticket.insert()
ticket.populate(dict((name, '') for name in ticket.editable_fields))
ticket.save_changes()
self._test_empty_strings_stored_as_null(ticket)
def test_whitespace_stripped_from_text_field(self):
"""Whitespace is stripped from text fields.
Test for regression of #11891.
"""
ticket = Ticket(self.env)
ticket['keywords'] = 'kw1'
ticket['milestone'] = 'milestone1'
ticket.insert()
ticket['keywords'] = ' kw1'
ticket['milestone'] = 'milestone2'
ticket.save_changes()
changes = self.env.db_query("""
SELECT oldvalue, newvalue FROM ticket_change
""")
self.assertEqual('kw1', ticket['keywords'])
self.assertEqual('milestone2', ticket['milestone'])
self.assertEqual(2, len(changes))
self.assertIn(('milestone1', 'milestone2'), changes)
self.assertIn(('1', None), changes)
def test_ticket_id_is_always_int(self):
ticket_id = self._insert_ticket('Foo')
self.assertEqual(ticket_id, int(ticket_id))
ticket = Ticket(self.env, str(ticket_id))
self.assertEqual(ticket_id, ticket.id)
self.assertEqual(ticket.resource.id, ticket_id)
def test_can_save_ticket_without_explicit_comment(self):
ticket = Ticket(self.env)
ticket.insert()
ticket['summary'] = 'another summary'
ticket.save_changes('foo')
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][0]
self.assertEqual('1', comment_change[3])
self.assertEqual('', comment_change[4])
def test_can_save_ticket_without_explicit_username(self):
ticket = Ticket(self.env)
ticket.insert()
ticket['summary'] = 'another summary'
ticket.save_changes()
for change in ticket.get_changelog():
self.assertIsNone(change[1])
def test_comment_with_whitespace_only_is_not_saved(self):
ticket = Ticket(self.env)
ticket.insert()
ticket.save_changes(comment='\n \n ')
self.assertEqual(0, len(ticket.get_changelog()))
def test_prop_whitespace_change_is_not_saved(self):
ticket = Ticket(self.env)
ticket.populate({'summary': 'ticket summary'})
ticket.insert()
ticket['summary'] = ' ticket summary '
ticket.save_changes()
self.assertEqual(0, len(ticket.get_changelog()))
def test_ticket_default_values(self):
"""
Verify that a ticket uses default values specified in the configuration
when created.
"""
# Set defaults for some standard fields
self.env.config.set('ticket', 'default_type', 'defect')
self.env.config.set('ticket', 'default_component', 'component1')
# Add a custom field of type 'text' with a default value
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'foo.value', 'Something')
# Add a custom field of type 'select' with a default value specified as
# the value itself
self.env.config.set('ticket-custom', 'bar', 'select')
self.env.config.set('ticket-custom', 'bar.options', 'one|two|three')
self.env.config.set('ticket-custom', 'bar.value', 'two')
# Add a custom field of type 'select' with a default value specified as
# index into the options list
self.env.config.set('ticket-custom', 'baz', 'select')
self.env.config.set('ticket-custom', 'baz.options', 'one|two|three')
self.env.config.set('ticket-custom', 'baz.value', '2')
ticket = Ticket(self.env)
self.assertEqual('defect', ticket['type'])
self.assertEqual('component1', ticket['component'])
self.assertEqual('Something', ticket['foo'])
self.assertEqual('two', ticket['bar'])
self.assertEqual('three', ticket['baz'])
def test_set_field_stripped(self):
"""
Verify that whitespace around ticket fields is stripped, except for
textarea fields.
"""
ticket = Ticket(self.env)
ticket['component'] = ' foo '
ticket['description'] = ' bar '
self.assertEqual('foo', ticket['component'])
self.assertEqual(' bar ', ticket['description'])
def test_set_field_multi(self):
"""
Ticket fields can't yet be multi-valued
"""
ticket = Ticket(self.env)
def set_multi_valued():
ticket['component'] = [' foo ', ' bar ']
self.assertRaises(TracError, set_multi_valued)
def test_owner_from_component(self):
"""
Verify that the owner of a new ticket is set to the owner of the
component.
"""
component = Component(self.env)
component.name = 'test'
component.owner = 'joe'
component.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test'
ticket.insert()
self.assertEqual('joe', ticket['owner'])
def test_owner_from_changed_component(self):
"""
Verify that the owner of a new ticket is updated when the component is
changed.
"""
component1 = Component(self.env)
component1.name = 'test1'
component1.owner = 'joe'
component1.insert()
component2 = Component(self.env)
component2.name = 'test2'
component2.owner = 'kate'
component2.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test1'
ticket['status'] = 'new'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
ticket['component'] = 'test2'
ticket.save_changes('jane', 'Testing')
self.assertEqual('kate', ticket['owner'])
def test_no_disown_from_changed_component(self):
"""
Verify that a ticket is not disowned when the component is changed to
a non-assigned component.
"""
component1 = Component(self.env)
component1.name = 'test1'
component1.owner = 'joe'
component1.insert()
component2 = Component(self.env)
component2.name = 'test2'
component2.owner = ''
component2.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test1'
ticket['status'] = 'new'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
ticket['component'] = 'test2'
ticket.save_changes('jane', 'Testing')
self.assertEqual('joe', ticket['owner'])
def test_populate_ticket(self):
data = {'summary': 'Hello world', 'reporter': 'john',
'foo': 'bar', 'checkbox_cbon': '', 'cbon': 'on',
'checkbox_cboff': ''}
ticket = Ticket(self.env)
ticket.populate(data)
# Standard fields
self.assertEqual('Hello world', ticket['summary'])
self.assertEqual('john', ticket['reporter'])
# An unknown field
self.assertIsNone(ticket['bar'])
# Custom field
self.assertEqual('bar', ticket['foo'])
# Custom field of type 'checkbox'
self.assertEqual('on', ticket['cbon'])
self.assertEqual('0', ticket['cboff'])
def test_changelog(self):
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo',
milestone='bar')
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'bar'
ticket['milestone'] = 'foo'
now = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', now)
changelog = sorted(ticket.get_changelog())
self.assertEqual([(now, 'jane', 'comment', '1', 'Testing', True),
(now, 'jane', 'component', 'foo', 'bar', True),
(now, 'jane', 'milestone', 'bar', 'foo', True)],
changelog)
def test_changelog_with_attachment(self):
"""Verify ordering of attachments and comments in the changelog."""
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', t1)
t2 = datetime(2001, 1, 1, 1, 1, 2, 0, utc)
self.env.db_transaction("""
INSERT INTO attachment (type, id, filename, size, time,
description, author, ipnr)
VALUES ('ticket',%s,'file.txt',1234,%s, 'My file','mark','')
""", (str(tkt_id), to_utimestamp(t2)))
t3 = datetime(2001, 1, 1, 1, 1, 3, 0, utc)
ticket.save_changes('jim', 'Other', t3)
log = ticket.get_changelog()
self.assertEqual(4, len(log))
self.assertEqual((t1, 'jane', 'comment', '1', 'Testing', True), log[0])
self.assertEqual([(t2, 'mark', 'attachment', '', 'file.txt', False),
(t2, 'mark', 'comment', '', 'My file', False)],
sorted(log[1:3]))
self.assertEqual((t3, 'jim', 'comment', '2', 'Other', True), log[3])
def test_subsecond_change(self):
"""Perform two ticket changes within a second."""
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
t1 = datetime(2001, 1, 1, 1, 1, 1, 123456, utc)
ticket.save_changes('jane', 'Testing', t1)
t2 = datetime(2001, 1, 1, 1, 1, 1, 123789, utc)
ticket.save_changes('jim', 'Other', t2)
log = ticket.get_changelog()
self.assertEqual(2, len(log))
self.assertEqual((t1, 'jane', 'comment', '1', 'Testing', True), log[0])
self.assertEqual((t2, 'jim', 'comment', '2', 'Other', True), log[1])
def test_changelog_with_reverted_change(self):
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'bar'
ticket['component'] = 'foo'
now = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', now)
self.assertEqual([(now, 'jane', 'comment', '1', 'Testing', True)],
list(ticket.get_changelog()))
def test_change_listener_created(self):
listener = TestTicketChangeListener(self.env)
ticket = self._create_a_ticket()
ticket.insert()
self.assertEqual('created', listener.action)
self.assertEqual(ticket, listener.ticket)
self.assertEqual(ticket.id, ticket.resource.id)
def test_change_listener_changed(self):
listener = TestTicketChangeListener(self.env)
data = {'component': 'foo', 'milestone': 'bar'}
tkt_id = self._insert_ticket('Hello World', reporter='john', **data)
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'new component'
ticket['milestone'] = 'new milestone'
comment = 'changing ticket'
ticket.save_changes('author', comment)
self.assertEqual('changed', listener.action)
self.assertEqual(comment, listener.comment)
self.assertEqual('author', listener.author)
for key, value in data.iteritems():
self.assertEqual(value, listener.old_values[key])
def test_change_listener_deleted(self):
listener = TestTicketChangeListener(self.env)
ticket = self._create_a_ticket()
ticket.insert()
ticket.delete()
self.assertEqual('deleted', listener.action)
self.assertEqual(ticket, listener.ticket)
class TicketCommentTestCase(unittest.TestCase):
def _insert_ticket(self, summary, when, **kwargs):
ticket = Ticket(self.env)
for k, v in kwargs.iteritems():
ticket[k] = v
self.id = ticket.insert(when)
def _modify_ticket(self, author, comment, when, cnum, **kwargs):
ticket = Ticket(self.env, self.id)
for k, v in kwargs.iteritems():
ticket[k] = v
ticket.save_changes(author, comment, when, cnum=cnum)
def _find_change(self, ticket, cnum):
(ts, author, comment) = ticket._find_change(cnum)
return from_utimestamp(ts)
def assertChange(self, ticket, cnum, date, author, **fields):
change = ticket.get_change(cnum=cnum)
self.assertEqual(dict(date=date, author=author, fields=fields), change)
class TicketCommentEditTestCase(TicketCommentTestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.created = datetime(2001, 1, 1, 1, 0, 0, 0, utc)
self._insert_ticket('Test ticket', self.created,
owner='john', keywords='a, b, c')
self.t1 = self.created + timedelta(seconds=1)
self._modify_ticket('jack', 'Comment 1', self.t1, '1')
self.t2 = self.created + timedelta(seconds=2)
self._modify_ticket('john', 'Comment 2', self.t2, '1.2',
owner='jack')
self.t3 = self.created + timedelta(seconds=3)
self._modify_ticket('jim', 'Comment 3', self.t3, '3',
keywords='a, b')
def tearDown(self):
self.env.reset_db()
def test_modify_comment(self):
"""Check modification of a "standalone" comment"""
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Comment 1'))
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='1.2', new='Comment 2'))
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='3', new='Comment 3'))
t = self.created + timedelta(seconds=10)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'New comment 1', t)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='New comment 1'),
_comment0=dict(author='joe', old='Comment 1',
new=str(to_utimestamp(t))))
self.assertEqual(t, Ticket(self.env, self.id)['changetime'])
def test_threading(self):
"""Check modification of a "threaded" comment"""
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=20)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='1.2', new='New comment 2'),
_comment0=dict(author='joe', old='Comment 2',
new=str(to_utimestamp(t))))
def test_modify_missing_cnum(self):
"""Editing a comment with no cnum in oldvalue"""
self.env.db_transaction(
"UPDATE ticket_change SET oldvalue='' WHERE oldvalue='3'")
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=30)
ticket.modify_comment(self._find_change(ticket, 3),
'joe', 'New comment 3', t)
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='', new='New comment 3'),
_comment0=dict(author='joe', old='Comment 3',
new=str(to_utimestamp(t))))
def test_modify_missing_comment(self):
"""Editing a comment where the comment field is missing"""
self.env.db_transaction("""
DELETE FROM ticket_change WHERE field='comment' AND oldvalue='1.2'
""")
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=40)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='', new='New comment 2'),
_comment0=dict(author='joe', old='',
new=str(to_utimestamp(t))))
def test_modify_missing_cnums_and_comment(self):
"""Editing a comment when all cnums are missing and one comment
field is missing
"""
with self.env.db_transaction as db:
db("UPDATE ticket_change SET oldvalue='' WHERE oldvalue='1'")
db("""DELETE FROM ticket_change
WHERE field='comment' AND oldvalue='1.2'""")
db("UPDATE ticket_change SET oldvalue='' WHERE oldvalue='3'")
# Modify after missing comment
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=50)
ticket.modify_comment(self._find_change(ticket, 3),
'joe', 'New comment 3', t)
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='', new='New comment 3'),
_comment0=dict(author='joe', old='Comment 3',
new=str(to_utimestamp(t))))
# Modify missing comment
t = self.created + timedelta(seconds=60)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='', new='New comment 2'),
_comment0=dict(author='joe', old='',
new=str(to_utimestamp(t))))
def test_missing_comment_edit(self):
"""Modify a comment where one edit is missing"""
ticket = Ticket(self.env, self.id)
t1 = self.created + timedelta(seconds=70)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'New comment 1', t1)
t2 = self.created + timedelta(seconds=80)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'Other comment 1', t2)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Other comment 1'),
_comment0=dict(author='joe', old='Comment 1',
new=str(to_utimestamp(t1))),
_comment1=dict(author='joe', old='New comment 1',
new=str(to_utimestamp(t2))))
self.env.db_transaction(
"DELETE FROM ticket_change WHERE field='_comment0'")
t3 = self.created + timedelta(seconds=90)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'Newest comment 1', t3)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Newest comment 1'),
_comment1=dict(author='joe', old='New comment 1',
new=str(to_utimestamp(t2))),
_comment2=dict(author='joe', old='Other comment 1',
new=str(to_utimestamp(t3))))
def test_comment_history(self):
"""Check the generation of the comment history"""
ticket = Ticket(self.env, self.id)
t = [self.t1]
for i in range(1, 32):
t.append(self.created + timedelta(minutes=i))
ticket.modify_comment(self._find_change(ticket, 1),
'joe (%d)' % i,
'Comment 1 (%d)' % i, t[-1])
history = ticket.get_comment_history(cnum=1)
self.assertEqual((0, t[0], 'jack', 'Comment 1'), history[0])
for i in range(1, len(history)):
self.assertEqual((i, t[i], 'joe (%d)' % i,
'Comment 1 (%d)' % i), history[i])
history = ticket.get_comment_history(cdate=self.t1)
self.assertEqual((0, t[0], 'jack', 'Comment 1'), history[0])
for i in range(1, len(history)):
self.assertEqual((i, t[i], 'joe (%d)' % i,
'Comment 1 (%d)' % i), history[i])
def test_change_listener_comment_modified(self):
listener = TestTicketChangeListener_2(self.env)
ticket = Ticket(self.env, self.id)
ticket.modify_comment(cdate=self.t2, author='jack',
comment='New Comment 2', when=datetime.now(utc))
self.assertEqual('comment_modified', listener.action)
self.assertEqual(ticket, listener.ticket)
self.assertEqual(self.t2, listener.cdate)
self.assertEqual('jack', listener.author)
self.assertEqual('New Comment 2', listener.comment)
self.assertEqual('Comment 2', listener.old_comment)
class TicketCommentDeleteTestCase(TicketCommentTestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('ticket-custom', 'foo', 'text')
self.created = datetime(2001, 1, 1, 1, 0, 0, 0, utc)
self._insert_ticket('Test ticket', self.created,
owner='john', keywords='a, b, c', foo='initial')
self.t1 = self.created + timedelta(seconds=1)
self._modify_ticket('jack', 'Comment 1', self.t1, '1',
foo='change 1')
self.t2 = self.created + timedelta(seconds=2)
self._modify_ticket('john', 'Comment 2', self.t2, '1.2',
owner='jack', foo='change2')
self.t3 = self.created + timedelta(seconds=3)
self._modify_ticket('jim', 'Comment 3', self.t3, '3',
keywords='a, b', foo='change3')
self.t4 = self.created + timedelta(seconds=4)
self._modify_ticket('joe', 'Comment 4', self.t4, '4',
keywords='a', foo='change4')
def tearDown(self):
self.env.reset_db()
def test_delete_last_comment(self):
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
t = datetime.now(utc)
ticket.delete_change(cnum=4, when=t)
self.assertEqual('a, b', ticket['keywords'])
self.assertEqual('change3', ticket['foo'])
self.assertIsNone(ticket.get_change(cnum=4))
self.assertIsNotNone(ticket.get_change(cnum=3))
self.assertEqual(t, ticket.time_changed)
def test_delete_last_comment_when_custom_field_gone(self):
"""Regression test for http://trac.edgewall.org/ticket/10858"""
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
# we simulate the removal of the definition of the 'foo' custom field
self.env.config.remove('ticket-custom', 'foo')
del TicketSystem(self.env).fields
del TicketSystem(self.env).custom_fields
ticket = Ticket(self.env, self.id)
#
t = datetime.now(utc)
ticket.delete_change(cnum=4, when=t)
self.assertEqual('a, b', ticket['keywords'])
# 'foo' is no longer defined for the ticket
self.assertIsNone(ticket['foo'])
# however, 'foo=change3' is still in the database
self.assertEqual([('change3',)], self.env.db_query("""
SELECT value FROM ticket_custom WHERE ticket=%s AND name='foo'
""", (self.id,)))
self.assertIsNone(ticket.get_change(cnum=4))
self.assertIsNotNone(ticket.get_change(cnum=3))
self.assertEqual(t, ticket.time_changed)
def test_delete_last_comment_by_date(self):
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
t = datetime.now(utc)
ticket.delete_change(cdate=self.t4, when=t)
self.assertEqual('a, b', ticket['keywords'])
self.assertEqual('change3', ticket['foo'])
self.assertIsNone(ticket.get_change(cdate=self.t4))
self.assertIsNotNone(ticket.get_change(cdate=self.t3))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment(self):
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
t = datetime.now(utc)
ticket.delete_change(cnum=3, when=t)
self.assertIsNone(ticket.get_change(cnum=3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b, c', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment_by_date(self):
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
t = datetime.now(utc)
ticket.delete_change(cdate=self.t3, when=t)
self.assertIsNone(ticket.get_change(cdate=self.t3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b, c', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment_inconsistent(self):
# Make oldvalue on keywords for change 4 inconsistent. This should
# result in no change in oldvalue when deleting change 3. The
# oldvalue of foo should change normally.
self.env.db_transaction("""
UPDATE ticket_change SET oldvalue='1, 2'
WHERE field='keywords' AND oldvalue='a, b'
""")
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='1, 2', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
ticket.delete_change(3)
self.assertIsNone(ticket.get_change(3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='1, 2', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
def test_delete_all_comments(self):
ticket = Ticket(self.env, self.id)
ticket.delete_change(4)
ticket.delete_change(3)
ticket.delete_change(2)
t = datetime.now(utc)
ticket.delete_change(1, when=t)
self.assertEqual(t, ticket.time_changed)
def test_ticket_change_deleted(self):
listener = TestTicketChangeListener_2(self.env)
ticket = Ticket(self.env, self.id)
ticket.delete_change(cdate=self.t3, when=datetime.now(utc))
self.assertEqual('change_deleted', listener.action)
self.assertEqual(ticket, listener.ticket)
self.assertEqual(self.t3, listener.cdate)
self.assertEqual(dict(keywords=('a, b, c', 'a, b'),
foo=('change2', 'change3')),
listener.changes)
ticket.delete_change(cnum=2, when=datetime.now(utc))
self.assertEqual('change_deleted', listener.action)
self.assertEqual(ticket, listener.ticket)
self.assertEqual(self.t2, listener.cdate)
self.assertEqual(dict(owner=('john', 'jack'),
foo=('change 1', 'change2')),
listener.changes)
class EnumTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_priority_fetch(self):
prio = Priority(self.env, 'major')
self.assertEqual(prio.name, 'major')
self.assertEqual(prio.value, '3')
def test_priority_insert(self):
prio = Priority(self.env)
prio.name = 'foo'
prio.insert()
self.assertTrue(prio.exists)
def test_priority_insert_with_value(self):
prio = Priority(self.env)
prio.name = 'bar'
prio.value = 100
prio.insert()
self.assertTrue(prio.exists)
def test_priority_update(self):
prio = Priority(self.env, 'major')
prio.name = 'foo'
prio.update()
Priority(self.env, 'foo')
self.assertRaises(TracError, Priority, self.env, 'major')
def test_priority_delete(self):
prio = Priority(self.env, 'major')
self.assertEqual('3', prio.value)
prio.delete()
self.assertFalse(prio.exists)
self.assertRaises(TracError, Priority, self.env, 'major')
prio = Priority(self.env, 'minor')
self.assertEqual('3', prio.value)
def test_ticket_type_update(self):
tkttype = Type(self.env, 'task')
self.assertEqual(tkttype.name, 'task')
self.assertEqual(tkttype.value, '3')
tkttype.name = 'foo'
tkttype.update()
Type(self.env, 'foo')
class TestMilestoneChangeListener(core.Component):
implements(IMilestoneChangeListener)
def milestone_created(self, milestone):
self.action = 'created'
self.milestone = milestone
def milestone_changed(self, milestone, old_values):
self.action = 'changed'
self.milestone = milestone
self.old_values = old_values
def milestone_deleted(self, milestone):
self.action = 'deleted'
self.milestone = milestone
class MilestoneTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.path = tempfile.mkdtemp(prefix='trac-tempenv-')
self.created_at = datetime(2001, 1, 1, tzinfo=utc)
self.updated_at = self.created_at + timedelta(seconds=1)
def tearDown(self):
shutil.rmtree(self.env.path)
self.env.reset_db()
def _create_milestone(self, **values):
milestone = Milestone(self.env)
for k, v in values.iteritems():
setattr(milestone, k, v)
return milestone
def _insert_ticket(self, when=None, **kwargs):
ticket = Ticket(self.env)
for name, value in kwargs.iteritems():
ticket[name] = value
ticket.insert(when or self.created_at)
return ticket
def _update_ticket(self, ticket, author=None, comment=None, when=None,
**kwargs):
for name, value in kwargs.iteritems():
ticket[name] = value
ticket.save_changes(author, comment, when or self.updated_at)
def test_new_milestone(self):
milestone = Milestone(self.env)
self.assertFalse(milestone.exists)
self.assertIsNone(milestone.name)
self.assertIsNone(milestone.due)
self.assertIsNone(milestone.completed)
self.assertEqual('', milestone.description)
def test_new_milestone_empty_name(self):
"""
Verifies that specifying an empty milestone name results in the
milestone being correctly detected as non-existent.
"""
milestone = Milestone(self.env, '')
self.assertFalse(milestone.exists)
self.assertIsNone(milestone.name)
self.assertIsNone(milestone.due)
self.assertIsNone(milestone.completed)
self.assertEqual('', milestone.description)
def test_existing_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
self.assertTrue(milestone.exists)
self.assertEqual('Test', milestone.name)
self.assertIsNone(milestone.due)
self.assertIsNone(milestone.completed)
self.assertEqual('', milestone.description)
def test_create_and_update_milestone(self):
milestone = Milestone(self.env)
milestone.name = 'Test'
milestone.insert()
self.assertEqual([('Test', 0, 0, '')], self.env.db_query("""
SELECT name, due, completed, description FROM milestone
WHERE name='Test'
"""))
# Use the same model object to update the milestone
milestone.description = 'Some text'
milestone.update()
self.assertEqual([('Test', 0, 0, 'Some text')], self.env.db_query("""
SELECT name, due, completed, description FROM milestone
WHERE name='Test'
"""))
def test_move_tickets(self):
self.env.db_transaction.executemany(
"INSERT INTO milestone (name) VALUES (%s)",
[('Test',), ('Testing',)])
tkt1 = self._insert_ticket(status='new', summary='Foo',
milestone='Test')
tkt2 = self._insert_ticket(status='new', summary='Bar',
milestone='Test')
self._update_ticket(tkt2, status='closed', resolution='fixed')
milestone = Milestone(self.env, 'Test')
milestone.move_tickets('Testing', 'anonymous', 'Move tickets')
tkt1 = Ticket(self.env, tkt1.id)
tkt2 = Ticket(self.env, tkt2.id)
self.assertEqual('Testing', tkt1['milestone'])
self.assertEqual('Testing', tkt2['milestone'])
self.assertEqual(tkt1['changetime'], tkt2['changetime'])
self.assertNotEqual(self.updated_at, tkt1['changetime'])
def test_move_tickets_exclude_closed(self):
self.env.db_transaction.executemany(
"INSERT INTO milestone (name) VALUES (%s)",
[('Test',), ('Testing',)])
tkt1 = self._insert_ticket(status='new', summary='Foo',
milestone='Test')
tkt2 = self._insert_ticket(status='new', summary='Bar',
milestone='Test')
self._update_ticket(tkt2, status='closed', resolution='fixed')
milestone = Milestone(self.env, 'Test')
milestone.move_tickets('Testing', 'anonymous', 'Move tickets',
exclude_closed=True)
tkt1 = Ticket(self.env, tkt1.id)
tkt2 = Ticket(self.env, tkt2.id)
self.assertEqual('Testing', tkt1['milestone'])
self.assertEqual('Test', tkt2['milestone'])
self.assertNotEqual(self.updated_at, tkt1['changetime'])
self.assertEqual(self.updated_at, tkt2['changetime'])
def test_move_tickets_target_doesnt_exist(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
tkt1 = self._insert_ticket(status='new', summary='Foo',
milestone='Test')
tkt2 = self._insert_ticket(status='new', summary='Bar',
milestone='Test')
milestone = Milestone(self.env, 'Test')
self.assertRaises(ResourceNotFound, milestone.move_tickets,
'Testing', 'anonymous')
tkt1 = Ticket(self.env, tkt1.id)
tkt2 = Ticket(self.env, tkt2.id)
self.assertEqual('Test', tkt1['milestone'])
self.assertEqual('Test', tkt2['milestone'])
self.assertNotEqual(self.updated_at, tkt1['changetime'])
self.assertNotEqual(self.updated_at, tkt2['changetime'])
def test_create_milestone_without_name(self):
milestone = Milestone(self.env)
self.assertRaises(TracError, milestone.insert)
def test_delete_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
tkt1 = self._insert_ticket(status='new', summary='Foo',
milestone='Test')
tkt2 = self._insert_ticket(status='new', summary='Bar',
milestone='Test')
self._update_ticket(tkt2, status='closed', resolution='fixed')
milestone = Milestone(self.env, 'Test')
milestone.delete()
self.assertFalse(milestone.exists)
self.assertEqual([],
self.env.db_query("SELECT * FROM milestone WHERE name='Test'"))
tkt1 = Ticket(self.env, tkt1.id)
tkt2 = Ticket(self.env, tkt2.id)
self.assertEqual('', tkt1['milestone'])
self.assertEqual('', tkt2['milestone'])
self.assertEqual(tkt1['changetime'], tkt2['changetime'])
self.assertNotEqual(self.updated_at, tkt1['changetime'])
def test_delete_milestone_with_attachment(self):
milestone = Milestone(self.env)
milestone.name = 'MilestoneWithAttachment'
milestone.insert()
attachment = Attachment(self.env, 'milestone', milestone.name)
attachment.insert('foo.txt', StringIO(), 0, 1)
milestone.delete()
self.assertEqual(False, milestone.exists)
attachments = Attachment.select(self.env, 'milestone', milestone.name)
self.assertRaises(StopIteration, attachments.next)
def test_delete_milestone_retarget_tickets(self):
self.env.db_transaction.executemany(
"INSERT INTO milestone (name) VALUES (%s)",
[('Test',), ('Other',)])
tkt1 = self._insert_ticket(status='new', summary='Foo',
milestone='Test')
tkt2 = self._insert_ticket(status='new', summary='Bar',
milestone='Test')
self._update_ticket(tkt2, status='closed', resolution='fixed')
milestone = Milestone(self.env, 'Test')
milestone.delete(retarget_to='Other')
self.assertFalse(milestone.exists)
tkt1 = Ticket(self.env, tkt1.id)
tkt2 = Ticket(self.env, tkt2.id)
self.assertEqual('Other', tkt1['milestone'])
self.assertEqual('Other', tkt2['milestone'])
self.assertEqual(tkt1['changetime'], tkt2['changetime'])
self.assertNotEqual(self.updated_at, tkt1['changetime'])
def test_update_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
t1 = datetime(2001, 01, 01, tzinfo=utc)
t2 = datetime(2002, 02, 02, tzinfo=utc)
milestone.due = t1
milestone.completed = t2
milestone.description = 'Foo bar'
milestone.update()
self.assertEqual(
[('Test', to_utimestamp(t1), to_utimestamp(t2), 'Foo bar')],
self.env.db_query("SELECT * FROM milestone WHERE name='Test'"))
def test_update_milestone_without_name(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
milestone.name = None
self.assertRaises(TracError, milestone.update)
def test_rename_milestone(self):
milestone = Milestone(self.env)
milestone.name = 'OldName'
milestone.insert()
attachment = Attachment(self.env, 'milestone', 'OldName')
attachment.insert('foo.txt', StringIO(), 0, 1)
milestone = Milestone(self.env, 'OldName')
milestone.name = 'NewName'
milestone.update()
self.assertRaises(ResourceNotFound, Milestone, self.env, 'OldName')
self.assertEqual('NewName', Milestone(self.env, 'NewName').name)
attachments = Attachment.select(self.env, 'milestone', 'OldName')
self.assertRaises(StopIteration, attachments.next)
attachments = Attachment.select(self.env, 'milestone', 'NewName')
self.assertEqual('foo.txt', attachments.next().filename)
self.assertRaises(StopIteration, attachments.next)
def test_rename_milestone_retarget_tickets(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
tkt1 = self._insert_ticket(status='new', summary='Foo',
milestone='Test')
tkt2 = self._insert_ticket(status='new', summary='Bar',
milestone='Test')
self._update_ticket(tkt2, status='closed', resolution='fixed')
milestone = Milestone(self.env, 'Test')
milestone.name = 'Testing'
milestone.update()
tkt1 = Ticket(self.env, tkt1.id)
tkt2 = Ticket(self.env, tkt2.id)
self.assertEqual('Testing', tkt1['milestone'])
self.assertEqual('Testing', tkt2['milestone'])
self.assertEqual(tkt1['changetime'], tkt2['changetime'])
self.assertNotEqual(self.updated_at, tkt1['changetime'])
def test_select_milestones(self):
self.env.db_transaction.executemany(
"INSERT INTO milestone (name) VALUES (%s)",
[('1.0',), ('2.0',)])
milestones = list(Milestone.select(self.env))
self.assertEqual('1.0', milestones[0].name)
self.assertTrue(milestones[0].exists)
self.assertEqual('2.0', milestones[1].name)
self.assertTrue(milestones[1].exists)
def test_change_listener_created(self):
listener = TestMilestoneChangeListener(self.env)
milestone = self._create_milestone(name='Milestone 1')
milestone.insert()
self.assertEqual('created', listener.action)
self.assertEqual(milestone, listener.milestone)
def test_change_listener_changed(self):
listener = TestMilestoneChangeListener(self.env)
milestone = self._create_milestone(
name='Milestone 1',
due=datetime(2001, 01, 01, tzinfo=utc),
description='The milestone description')
milestone.insert()
milestone.name = 'Milestone 2'
milestone.completed = datetime(2001, 02, 03, tzinfo=utc)
milestone.description = 'The changed description'
milestone.update()
self.assertEqual('changed', listener.action)
self.assertEqual(milestone, listener.milestone)
self.assertEqual({'name': 'Milestone 1', 'completed': None,
'description': 'The milestone description'},
listener.old_values)
def test_change_listener_deleted(self):
listener = TestMilestoneChangeListener(self.env)
milestone = self._create_milestone(name='Milestone 1')
milestone.insert()
self.assertTrue(milestone.exists)
milestone.delete()
self.assertEqual('Milestone 1', milestone.name)
self.assertFalse(milestone.exists)
self.assertEqual('deleted', listener.action)
self.assertEqual(milestone, listener.milestone)
class ComponentTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_exists_negative(self):
def get_fake_component():
return Component(self.env, "Shrubbery")
self.assertRaises(TracError, get_fake_component)
def test_exists(self):
"""
http://trac.edgewall.org/ticket/4247
"""
for c in Component.select(self.env):
self.assertEqual(c.exists, True)
def test_create_and_update(self):
component = Component(self.env)
component.name = 'Test'
component.insert()
self.assertEqual([('Test', None, None)], self.env.db_query("""
SELECT name, owner, description FROM component
WHERE name='Test'"""))
# Use the same model object to update the component
component.owner = 'joe'
component.update()
self.assertEqual([('Test', 'joe', None)], self.env.db_query(
"SELECT name, owner, description FROM component WHERE name='Test'"))
class VersionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_exists_negative(self):
def get_fake_version():
return Version(self.env, "-1")
self.assertRaises(TracError, get_fake_version)
def test_exists(self):
"""
http://trac.edgewall.org/ticket/4247
"""
for v in Version.select(self.env):
self.assertEqual(v.exists, True)
def test_create_and_update(self):
version = Version(self.env)
version.name = 'Test'
version.insert()
self.assertEqual([('Test', 0, None)], self.env.db_query(
"SELECT name, time, description FROM version WHERE name='Test'"))
# Use the same model object to update the version
version.description = 'Some text'
version.update()
self.assertEqual([('Test', 0, 'Some text')], self.env.db_query(
"SELECT name, time, description FROM version WHERE name='Test'"))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TicketTestCase))
suite.addTest(unittest.makeSuite(TicketCommentEditTestCase))
suite.addTest(unittest.makeSuite(TicketCommentDeleteTestCase))
suite.addTest(unittest.makeSuite(EnumTestCase))
suite.addTest(unittest.makeSuite(MilestoneTestCase))
suite.addTest(unittest.makeSuite(ComponentTestCase))
suite.addTest(unittest.makeSuite(VersionTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 40.254505
| 80
| 0.604711
|
073f6fefff1548333ceab975dcde109e5403a273
| 614
|
py
|
Python
|
third_party/typ/typ/version.py
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
third_party/typ/typ/version.py
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
third_party/typ/typ/version.py
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-04-04T13:34:56.000Z
|
2020-11-04T07:17:52.000Z
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = '0.8.9'
| 38.375
| 74
| 0.757329
|
14012837925aba224dee38262db83a179ce1818f
| 14,863
|
py
|
Python
|
Lib/email/_parseaddr.py
|
deadsnakes/python2.4
|
f493d5415b662e99a73d017bcafe2148c5bc8fb5
|
[
"PSF-2.0"
] | 33
|
2015-02-16T02:52:08.000Z
|
2022-02-18T08:46:32.000Z
|
lib/IPCE/Lib/email/_parseaddr.py
|
bensku/MultiversePlatform
|
7e1aad33d48b9e47f3db2ca638cb57592336ddb7
|
[
"MIT"
] | 1
|
2017-09-09T18:50:23.000Z
|
2020-12-29T18:13:56.000Z
|
lib/IPCE/Lib/email/_parseaddr.py
|
bensku/MultiversePlatform
|
7e1aad33d48b9e47f3db2ca638cb57592336ddb7
|
[
"MIT"
] | 31
|
2015-02-07T16:20:24.000Z
|
2022-02-23T15:02:43.000Z
|
# Copyright (C) 2002-2006 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if _timezones.has_key(tz):
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Add quotes around a string."""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
| 31.422833
| 78
| 0.526542
|
5f5f06a95307d2c56c040e7b5c832c6b686ab7ef
| 624
|
py
|
Python
|
room_booking_project/room_booking/urls.py
|
alsanuz/room_booking_app
|
12acc8fc0e5d9a3abe23eca1ea1d4ee88e732ccf
|
[
"Unlicense"
] | null | null | null |
room_booking_project/room_booking/urls.py
|
alsanuz/room_booking_app
|
12acc8fc0e5d9a3abe23eca1ea1d4ee88e732ccf
|
[
"Unlicense"
] | null | null | null |
room_booking_project/room_booking/urls.py
|
alsanuz/room_booking_app
|
12acc8fc0e5d9a3abe23eca1ea1d4ee88e732ccf
|
[
"Unlicense"
] | null | null | null |
from django.conf.urls import url
from views import *
urlpatterns = [
url(r'^$', RoomList.as_view(), name='room-list'),
url(r'^confirm-booking/(?P<pk>\d+)/(?P<start_date>\d{4}-\d{2}-\d{2})/(?P<end_date>\d{4}-\d{'
r'2}-\d{2})/(?P<amount_people>\d+)/(?P<total_booking_days>\d+)/$', ConfirmBooking.as_view(),
name='confirm-booking'),
url(r'^booking-list/$', BookingList.as_view(), name='booking-list'),
url(r'^booking-detail/(?P<pk>\d+)/$', BookingDetail.as_view(), name='booking-detail'),
url(r'^download-booking-pdf/(?P<pk>\d+)/$', BookingPdf.as_view(), name='download-booking-pdf'),
]
| 39
| 100
| 0.615385
|
36428875ccb435950e8879e3f922ac4ecf2e3f16
| 100,609
|
py
|
Python
|
dulwich/tests/test_porcelain.py
|
jessecureton-aurora/dulwich
|
e9ba8f854eaed54b7e90b7e58869705147a68404
|
[
"Apache-2.0"
] | null | null | null |
dulwich/tests/test_porcelain.py
|
jessecureton-aurora/dulwich
|
e9ba8f854eaed54b7e90b7e58869705147a68404
|
[
"Apache-2.0"
] | null | null | null |
dulwich/tests/test_porcelain.py
|
jessecureton-aurora/dulwich
|
e9ba8f854eaed54b7e90b7e58869705147a68404
|
[
"Apache-2.0"
] | null | null | null |
# test_porcelain.py -- porcelain tests
# Copyright (C) 2013 Jelmer Vernooij <jelmer@jelmer.uk>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for dulwich.porcelain."""
from io import BytesIO, StringIO
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
from unittest import skipIf
from dulwich import porcelain
from dulwich.diff_tree import tree_changes
from dulwich.errors import (
CommitError,
)
from dulwich.objects import (
Blob,
Tag,
Tree,
ZERO_SHA,
)
from dulwich.repo import (
NoIndexPresent,
Repo,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
build_commit_graph,
make_commit,
make_object,
)
def flat_walk_dir(dir_to_walk):
for dirpath, _, filenames in os.walk(dir_to_walk):
rel_dirpath = os.path.relpath(dirpath, dir_to_walk)
if not dirpath == dir_to_walk:
yield rel_dirpath
for filename in filenames:
if dirpath == dir_to_walk:
yield filename
else:
yield os.path.join(rel_dirpath, filename)
class PorcelainTestCase(TestCase):
def setUp(self):
super(PorcelainTestCase, self).setUp()
self.test_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.test_dir)
self.repo_path = os.path.join(self.test_dir, "repo")
self.repo = Repo.init(self.repo_path, mkdir=True)
self.addCleanup(self.repo.close)
class PorcelainGpgTestCase(PorcelainTestCase):
DEFAULT_KEY = """
-----BEGIN PGP PRIVATE KEY BLOCK-----
lQVYBGBjIyIBDADAwydvMPQqeEiK54FG1DHwT5sQejAaJOb+PsOhVa4fLcKsrO3F
g5CxO+/9BHCXAr8xQAtp/gOhDN05fyK3MFyGlL9s+Cd8xf34S3R4rN/qbF0oZmaa
FW0MuGnniq54HINs8KshadVn1Dhi/GYSJ588qNFRl/qxFTYAk+zaGsgX/QgFfy0f
djWXJLypZXu9D6DlyJ0cPSzUlfBkI2Ytx6grzIquRjY0FbkjK3l+iGsQ+ebRMdcP
Sqd5iTN9XuzIUVoBFAZBRjibKV3N2wxlnCbfLlzCyDp7rktzSThzjJ2pVDuLrMAx
6/L9hIhwmFwdtY4FBFGvMR0b0Ugh3kCsRWr8sgj9I7dUoLHid6ObYhJFhnD3GzRc
U+xX1uy3iTCqJDsG334aQIhC5Giuxln4SUZna2MNbq65ksh38N1aM/t3+Dc/TKVB
rb5KWicRPCQ4DIQkHMDCSPyj+dvRLCPzIaPvHD7IrCfHYHOWuvvPGCpwjo0As3iP
IecoMeguPLVaqgcAEQEAAQAL/i5/pQaUd4G7LDydpbixPS6r9UrfPrU/y5zvBP/p
DCynPDutJ1oq539pZvXQ2VwEJJy7x0UVKkjyMndJLNWly9wHC7o8jkHx/NalVP47
LXR+GWbCdOOcYYbdAWcCNB3zOtzPnWhdAEagkc2G9xRQDIB0dLHLCIUpCbLP/CWM
qlHnDsVMrVTWjgzcpsnyGgw8NeLYJtYGB8dsN+XgCCjo7a9LEvUBKNgdmWBbf14/
iBw7PCugazFcH9QYfZwzhsi3nqRRagTXHbxFRG0LD9Ro9qCEutHYGP2PJ59Nj8+M
zaVkJj/OxWxVOGvn2q16mQBCjKpbWfqXZVVl+G5DGOmiSTZqXy+3j6JCKdOMy6Qd
JBHOHhFZXYmWYaaPzoc33T/C3QhMfY5sOtUDLJmV05Wi4dyBeNBEslYgUuTk/jXb
5ZAie25eDdrsoqkcnSs2ZguMF7AXhe6il2zVhUUMs/6UZgd6I7I4Is0HXT/pnxEp
uiTRFu4v8E+u+5a8O3pffe5boQYA3TsIxceen20qY+kRaTOkURHMZLn/y6KLW8bZ
rNJyXWS9hBAcbbSGhfOwYfzbDCM17yPQO3E2zo8lcGdRklUdIIaCxQwtu36N5dfx
OLCCQc5LmYdl/EAm91iAhrr7dNntZ18MU09gdzUu+ONZwu4CP3cJT83+qYZULso8
4Fvd/X8IEfGZ7kM+ylrdqBwtlrn8yYXtom+ows2M2UuNR53B+BUOd73kVLTkTCjE
JH63+nE8BqG7tDLCMws+23SAA3xxBgDfDrr0x7zCozQKVQEqBzQr9Uoo/c/ZjAfi
syzNSrDz+g5gqJYtuL9XpPJVWf6V1GXVyJlSbxR9CjTkBxmlPxpvV25IsbVSsh0o
aqkf2eWpbCL6Qb2E0jd1rvf8sGeTTohzYfiSVVsC2t9ngRO/CmetizwQBvRzLGMZ
4mtAPiy7ZEDc2dFrPp7zlKISYmJZUx/DJVuZWuOrVMpBP+bSgJXoMTlICxZUqUnE
2VKVStb/L+Tl8XCwIWdrZb9BaDnHqfcGAM2B4HNPxP88Yj1tEDly/vqeb3vVMhj+
S1lunnLdgxp46YyuTMYAzj88eCGurRtzBsdxxlGAsioEnZGebEqAHQbieKq/DO6I
MOMZHMSVBDqyyIx3assGlxSX8BSFW0lhKyT7i0XqnAgCJ9f/5oq0SbFGq+01VQb7
jIx9PbcYJORxsE0JG/CXXPv27bRtQXsudkWGSYvC0NLOgk4z8+kQpQtyFh16lujq
WRwMeriu0qNDjCa1/eHIKDovhAZ3GyO5/9m1tBlUZXN0IFVzZXIgPHRlc3RAdGVz
dC5jb20+iQHOBBMBCAA4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEEjrR8
MQ4fJK44PYMvfN2AClLmXiYFAmDcEZEACgkQfN2AClLmXibZzgv/ZfeTpTuqQE1W
C1jT5KpQExnt0BizTX0U7BvSn8Fr6VXTyol6kYc3u71GLUuJyawCLtIzOXqOXJvz
bjcZqymcMADuftKcfMy513FhbF6MhdVd6QoeBP6+7/xXOFJCi+QVYF7SQ2h7K1Qm
+yXOiAMgSxhCZQGPBNJLlDUOd47nSIMANvlumFtmLY/1FD7RpG7WQWjeX1mnxNTw
hUU+Yv7GuFc/JprXCIYqHbhWfvXyVtae2ZK4xuVi5eqwA2RfggOVM7drb+CgPhG0
+9aEDDLOZqVi65wK7J73Puo3rFTbPQMljxw5s27rWqF+vB6hhVdJOPNomWy3naPi
k5MW0mhsacASz1WYndpZz+XaQTq/wJF5HUyyeUWJ0vlOEdwx021PHcqSTyfNnkjD
KncrE21t2sxWRsgGDETxIwkd2b2HNGAvveUD0ffFK/oJHGSXjAERFGc3wuiDj3mQ
BvKm4wt4QF9ZMrCdhMAA6ax5kfEUqQR4ntmrJk/khp/mV7TILaI4nQVYBGBjIyIB
DADghIo9wXnRxzfdDTvwnP8dHpLAIaPokgdpyLswqUCixJWiW2xcV6weUjEWwH6n
eN/t1uZYVehbrotxVPla+MPvzhxp6/cmG+2lhzEBOp6zRwnL1wIB6HoKJfpREhyM
c8rLR0zMso1L1bJTyydvnu07a7BWo3VWKjilb0rEZZUSD/2hidx5HxMOJSoidLWe
d/PPuv6yht3NtA4UThlcfldm9G6PbqCdm1kMEKAkq0wVJvhPJ6gEFRNJimgygfUw
MDFXEIhQtxjgdV5Uoz3O5452VLoRsDlgpi3E0WDGj7WXDaO5uSU0T5aJgVgHCP/f
xZhHuQFk2YYIl5nCBpOZyWWI0IKmscTuEwzpkhICQDQFvcMZ5ibsl7wA2P7YTrQf
FDMjjzuaK80GYPfxDFlyKUyLqFt8w/QzsZLDLX7+jxIEpbRAaMw/JsWqm5BMxxbS
3CIQiS5S3oSKDsNINelqWFfwvLhvlQra8gIxyNTlek25OdgG66BiiX+seH8A/ql+
F+MAEQEAAQAL/1jrNSLjMt9pwo6qFKClVQZP2vf7+sH7v7LeHIDXr3EnYUnVYnOq
B1FU5PspTp/+J9W25DB9CZLx7Gj8qeslFdiuLSOoIBB4RCToB3kAoeTH0DHqW/Gs
hFTrmJkuDp9zpo/ek6SIXJx5rHAyR9KVw0fizQprH2f6PcgLbTWeM61dJuqowmg3
7eCOyIKv7VQvFqEhYokLD+JNmrvg+Htg0DXGvdjRjAwPf/NezEXpj67a6cHTp1/C
hwp7pevG+3fTxaCJFesl5/TxxtnaBLE8m2uo/S6Hxgn9l0edonroe1QlTjEqGLy2
7qi2z5Rem+v6GWNDRgvAWur13v8FNdyduHlioG/NgRsU9mE2MYeFsfi3cfNpJQp/
wC9PSCIXrb/45mkS8KyjZpCrIPB9RV/m0MREq01TPom7rstZc4A1pD0Ot7AtUYS3
e95zLyEmeLziPJ9fV4fgPmEudDr1uItnmV0LOskKlpg5sc0hhdrwYoobfkKt2dx6
DqfMlcM1ZkUbLQYA4jwfpFJG4HmYvjL2xCJxM0ycjvMbqFN+4UjgYWVlRfOrm1V4
Op86FjbRbV6OOCNhznotAg7mul4xtzrrTkK8o3YLBeJseDgl4AWuzXtNa9hE0XpK
9gJoEHUuBOOsamVh2HpXESFyE5CclOV7JSh541TlZKfnqfZYCg4JSbp0UijkawCL
5bJJUiGGMD9rZUxIAKQO1DvUEzptS7Jl6S3y5sbIIhilp4KfYWbSk3PPu9CnZD5b
LhEQp0elxnb/IL8PBgD+DpTeC8unkGKXUpbe9x0ISI6V1D6FmJq/FxNg7fMa3QCh
fGiAyoTm80ZETynj+blRaDO3gY4lTLa3Opubof1EqK2QmwXmpyvXEZNYcQfQ2CCS
GOWUCK8jEQamUPf1PWndZXJUmROI1WukhlL71V/ir6zQeVCv1wcwPwclJPnAe87u
pEklnCYpvsEldwHUX9u0BWzoULIEsi+ddtHmT0KTeF/DHRy0W15jIHbjFqhqckj1
/6fmr7l7kIi/kN4vWe0F/0Q8IXX+cVMgbl3aIuaGcvENLGcoAsAtPGx88SfRgmfu
HK64Y7hx1m+Bo215rxJzZRjqHTBPp0BmCi+JKkaavIBrYRbsx20gveI4dzhLcUhB
kiT4Q7oz0/VbGHS1CEf9KFeS/YOGj57s4yHauSVI0XdP9kBRTWmXvBkzsooB2cKH
hwhUN7iiT1k717CiTNUT6Q/pcPFCyNuMoBBGQTU206JEgIjQvI3f8xMUMGmGVVQz
9/k716ycnhb2JZ/Q/AyQIeHJiQG2BBgBCAAgAhsMFiEEjrR8MQ4fJK44PYMvfN2A
ClLmXiYFAmDcEa4ACgkQfN2AClLmXiZxxQv/XaMN0hPCygtrQMbCsTNb34JbvJzh
hngPuUAfTbRHrR3YeATyQofNbL0DD3fvfzeFF8qESqvzCSZxS6dYsXPd4MCJTzlp
zYBZ2X0sOrgDqZvqCZKN72RKgdk0KvthdzAxsIm2dfcQOxxowXMxhJEXZmsFpusx
jKJxOcrfVRjXJnh9isY0NpCoqMQ+3k3wDJ3VGEHV7G+A+vFkWfbLJF5huQ96uaH9
Uc+jUsREUH9G82ZBqpoioEN8Ith4VXpYnKdTMonK/+ZcyeraJZhXrvbjnEomKdzU
0pu4bt1HlLR3dcnpjN7b009MBf2xLgEfQk2nPZ4zzY+tDkxygtPllaB4dldFjBpT
j7Q+t49sWMjmlJUbLlHfuJ7nUUK5+cGjBsWVObAEcyfemHWCTVFnEa2BJslGC08X
rFcjRRcMEr9ct4551QFBHsv3O/Wp3/wqczYgE9itSnGT05w+4vLt4smG+dnEHjRJ
brMb2upTHa+kjktjdO96/BgSnKYqmNmPB/qB
=ivA/
-----END PGP PRIVATE KEY BLOCK-----
"""
DEFAULT_KEY_ID = "8EB47C310E1F24AE383D832F7CDD800A52E65E26"
NON_DEFAULT_KEY = """
-----BEGIN PGP PRIVATE KEY BLOCK-----
lQVYBGBjI0ABDADGWBRp+t02emfzUlhrc1psqIhhecFm6Em0Kv33cfDpnfoMF1tK
Yy/4eLYIR7FmpdbFPcDThFNHbXJzBi00L1mp0XQE2l50h/2bDAAgREdZ+NVo5a7/
RSZjauNU1PxW6pnXMehEh1tyIQmV78jAukaakwaicrpIenMiFUN3fAKHnLuFffA6
t0f3LqJvTDhUw/o2vPgw5e6UDQhA1C+KTv1KXVrhJNo88a3hZqCZ76z3drKR411Q
zYgT4DUb8lfnbN+z2wfqT9oM5cegh2k86/mxAA3BYOeQrhmQo/7uhezcgbxtdGZr
YlbuaNDTSBrn10ZoaxLPo2dJe2zWxgD6MpvsGU1w3tcRW508qo/+xoWp2/pDzmok
+uhOh1NAj9zB05VWBz1r7oBgCOIKpkD/LD4VKq59etsZ/UnrYDwKdXWZp7uhshkU
M7N35lUJcR76a852dlMdrgpmY18+BP7+o7M+5ElHTiqQbMuE1nHTg8RgVpdV+tUx
dg6GWY/XHf5asm8AEQEAAQAL/A85epOp+GnymmEQfI3+5D178D//Lwu9n86vECB6
xAHCqQtdjZnXpDp/1YUsL59P8nzgYRk7SoMskQDoQ/cB/XFuDOhEdMSgHaTVlnrj
ktCCq6rqGnUosyolbb64vIfVaSqd/5SnCStpAsnaBoBYrAu4ZmV4xfjDQWwn0q5s
u+r56mD0SkjPgbwk/b3qTVagVmf2OFzUgWwm1e/X+bA1oPag1NV8VS4hZPXswT4f
qhiyqUFOgP6vUBcqehkjkIDIl/54xII7/P5tp3LIZawvIXqHKNTqYPCqaCqCj+SL
vMYDIb6acjescfZoM71eAeHAANeFZzr/rwfBT+dEP6qKmPXNcvgE11X44ZCr04nT
zOV/uDUifEvKT5qgtyJpSFEVr7EXubJPKoNNhoYqq9z1pYU7IedX5BloiVXKOKTY
0pk7JkLqf3g5fYtXh/wol1owemITJy5V5PgaqZvk491LkI6S+kWC7ANYUg+TDPIW
afxW3E5N1CYV6XDAl0ZihbLcoQYAy0Ky/p/wayWKePyuPBLwx9O89GSONK2pQljZ
yaAgxPQ5/i1vx6LIMg7k/722bXR9W3zOjWOin4eatPM3d2hkG96HFvnBqXSmXOPV
03Xqy1/B5Tj8E9naLKUHE/OBQEc363DgLLG9db5HfPlpAngeppYPdyWkhzXyzkgS
PylaE5eW3zkdjEbYJ6RBTecTZEgBaMvJNPdWbn//frpP7kGvyiCg5Es+WjLInUZ6
0sdifcNTCewzLXK80v/y5mVOdJhPBgD5zs9cYdyiQJayqAuOr+He1eMHMVUbm9as
qBmPrst398eBW9ZYF7eBfTSlUf6B+WnvyLKEGsUf/7IK0EWDlzoBuWzWiHjUAY1g
m9eTV2MnvCCCefqCErWwfFo2nWOasAZA9sKD+ICIBY4tbtvSl4yfLBzTMwSvs9ZS
K1ocPSYUnhm2miSWZ8RLZPH7roHQasNHpyq/AX7DahFf2S/bJ+46ZGZ8Pigr7hA+
MjmpQ4qVdb5SaViPmZhAKO+PjuCHm+EF/2H0Y3Sl4eXgxZWoQVOUeXdWg9eMfYrj
XDtUMIFppV/QxbeztZKvJdfk64vt/crvLsOp0hOky9cKwY89r4QaHfexU3qR+qDq
UlMvR1rHk7dS5HZAtw0xKsFJNkuDxvBkMqv8Los8zp3nUl+U99dfZOArzNkW38wx
FPa0ixkC9za2BkDrWEA8vTnxw0A2upIFegDUhwOByrSyfPPnG3tKGeqt3Izb/kDk
Q9vmo+HgxBOguMIvlzbBfQZwtbd/gXzlvPqCtCJBbm90aGVyIFRlc3QgVXNlciA8
dGVzdDJAdGVzdC5jb20+iQHOBBMBCAA4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4B
AheAFiEEapM5P1DF5qzT1vtFuTYhLttOFMAFAmDcEeEACgkQuTYhLttOFMDe0Qv/
Qx/bzXztJ3BCc+CYAVDx7Kr37S68etwwLgcWzhG+CDeMB5F/QE+upKgxy2iaqQFR
mxfOMgf/TIQkUfkbaASzK1LpnesYO85pk7XYjoN1bYEHiXTkeW+bgB6aJIxrRmO2
SrWasdBC/DsI3Mrya8YMt/TiHC6VpRJVxCe5vv7/kZC4CXrgTBnZocXx/YXimbke
poPMVdbvhYh6N0aGeS38jRKgyN10KXmhDTAQDwseVFavBWAjVfx3DEwjtK2Z2GbA
aL8JvAwRtqiPFkDMIKPL4UwxtXFws8SpMt6juroUkNyf6+BxNWYqmwXHPy8zCJAb
xkxIJMlEc+s7qQsP3fILOo8Xn+dVzJ5sa5AoARoXm1GMjsdqaKAzq99Dic/dHnaQ
Civev1PQsdwlYW2C2wNXNeIrxMndbDMFfNuZ6BnGHWJ/wjcp/pFs4YkyyZN8JH7L
hP2FO4Jgham3AuP13kC3Ivea7V6hR8QNcDZRwFPOMIX4tXwQv1T72+7DZGaA25O7
nQVXBGBjI0ABDADJMBYIcG0Yil9YxFs7aYzNbd7alUAr89VbY8eIGPHP3INFPM1w
lBQCu+4j6xdEbhMpppLBZ9A5TEylP4C6qLtPa+oLtPeuSw8gHDE10XE4lbgPs376
rL60XdImSOHhiduACUefYjqpcmFH9Bim1CC+koArYrSQJQx1Jri+OpnTaL/8UID0
KzD/kEgMVGlHIVj9oJmb4+j9pW8I/g0wDSnIaEKFMxqu6SIVJ1GWj+MUMvZigjLC
sNCZd7PnbOC5VeU3SsXj6he74Jx0AmGMPWIHi9M0DjHO5d1cCbXTnud8xxM1bOh4
7aCTnMK5cVyIr+adihgJpVVhrndSM8aklBPRgtozrGNCgF2CkYU2P1blxfloNr/8
UZpM83o+s1aObBszzRNLxnpNORqoLqjfPtLEPQnagxE+4EapCq0NZ/x6yO5VTwwp
NljdFAEk40uGuKyn1QA3uNMHy5DlpLl+tU7t1KEovdZ+OVYsYKZhVzw0MTpKogk9
JI7AN0q62ronPskAEQEAAQAL+O8BUSt1ZCVjPSIXIsrR+ZOSkszZwgJ1CWIoh0IH
YD2vmcMHGIhFYgBdgerpvhptKhaw7GcXDScEnYkyh5s4GE2hxclik1tbj/x1gYCN
8BNoyeDdPFxQG73qN12D99QYEctpOsz9xPLIDwmL0j1ehAfhwqHIAPm9Ca+i8JYM
x/F+35S/jnKDXRI+NVlwbiEyXKXxxIqNlpy9i8sDBGexO5H5Sg0zSN/B1duLekGD
biDw6gLc6bCgnS+0JOUpU07Z2fccMOY9ncjKGD2uIb/ePPUaek92GCQyq0eorCIV
brcQsRc5sSsNtnRKQTQtxioROeDg7kf2oWySeHTswlXW/219ihrSXgteHJd+rPm7
DYLEeGLRny8bRKv8rQdAtApHaJE4dAATXeY4RYo4NlXHYaztGYtU6kiM/3zCfWAe
9Nn+Wh9jMTZrjefUCagS5r6ZqAh7veNo/vgIGaCLh0a1Ypa0Yk9KFrn3LYEM3zgk
3m3bn+7qgy5cUYXoJ3DGJJEhBgDPonpW0WElqLs5ZMem1ha85SC38F0IkAaSuzuz
v3eORiKWuyJGF32Q2XHa1RHQs1JtUKd8rxFer3b8Oq71zLz6JtVc9dmRudvgcJYX
0PC11F6WGjZFSSp39dajFp0A5DKUs39F3w7J1yuDM56TDIN810ywufGAHARY1pZb
UJAy/dTqjFnCbNjpAakor3hVzqxcmUG+7Y2X9c2AGncT1MqAQC3M8JZcuZvkK8A9
cMk8B914ryYE7VsZMdMhyTwHmykGAPgNLLa3RDETeGeGCKWI+ZPOoU0ib5JtJZ1d
P3tNwfZKuZBZXKW9gqYqyBa/qhMip84SP30pr/TvulcdAFC759HK8sQZyJ6Vw24P
c+5ssRxrQUEw1rvJPWhmQCmCOZHBMQl5T6eaTOpR5u3aUKTMlxPKhK9eC1dCSTnI
/nyL8An3VKnLy+K/LI42YGphBVLLJmBewuTVDIJviWRdntiG8dElyEJMOywUltk3
2CEmqgsD9tPO8rXZjnMrMn3gfsiaoQYA6/6/e2utkHr7gAoWBgrBBdqVHsvqh5Ro
2DjLAOpZItO/EdCJfDAmbTYOa04535sBDP2tcH/vipPOPpbr1Y9Y/mNsKCulNxed
yqAmEkKOcerLUP5UHju0AB6VBjHJFdU2mqT+UjPyBk7WeKXgFomyoYMv3KpNOFWR
xi0Xji4kKHbttA6Hy3UcGPr9acyUAlDYeKmxbSUYIPhw32bbGrX9+F5YriTufRsG
3jftQVo9zqdcQSD/5pUTMn3EYbEcohYB2YWJAbYEGAEIACACGwwWIQRqkzk/UMXm
rNPW+0W5NiEu204UwAUCYNwR6wAKCRC5NiEu204UwOPnC/92PgB1c3h9FBXH1maz
g29fndHIHH65VLgqMiQ7HAMojwRlT5Xnj5tdkCBmszRkv5vMvdJRa3ZY8Ed/Inqr
hxBFNzpjqX4oj/RYIQLKXWWfkTKYVLJFZFPCSo00jesw2gieu3Ke/Yy4gwhtNodA
v+s6QNMvffTW/K3XNrWDB0E7/LXbdidzhm+MBu8ov2tuC3tp9liLICiE1jv/2xT4
CNSO6yphmk1/1zEYHS/mN9qJ2csBmte2cdmGyOcuVEHk3pyINNMDOamaURBJGRwF
XB5V7gTKUFU4jCp3chywKrBHJHxGGDUmPBmZtDtfWAOgL32drK7/KUyzZL/WO7Fj
akOI0hRDFOcqTYWL20H7+hAiX3oHMP7eou3L5C7wJ9+JMcACklN/WMjG9a536DFJ
4UgZ6HyKPP+wy837Hbe8b25kNMBwFgiaLR0lcgzxj7NyQWjVCMOEN+M55tRCjvL6
ya6JVZCRbMXfdCy8lVPgtNQ6VlHaj8Wvnn2FLbWWO2n2r3s=
=9zU5
-----END PGP PRIVATE KEY BLOCK-----
"""
NON_DEFAULT_KEY_ID = "6A93393F50C5E6ACD3D6FB45B936212EDB4E14C0"
def setUp(self):
super(PorcelainGpgTestCase, self).setUp()
self.gpg_dir = os.path.join(self.test_dir, "gpg")
os.mkdir(self.gpg_dir, mode=0o700)
self.addCleanup(shutil.rmtree, self.gpg_dir)
self._old_gnupghome = os.environ.get("GNUPGHOME")
os.environ["GNUPGHOME"] = self.gpg_dir
if self._old_gnupghome is None:
self.addCleanup(os.environ.__delitem__, "GNUPGHOME")
else:
self.addCleanup(os.environ.__setitem__, "GNUPGHOME", self._old_gnupghome)
def import_default_key(self):
subprocess.run(
["gpg", "--import"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
input=PorcelainGpgTestCase.DEFAULT_KEY,
universal_newlines=True,
)
def import_non_default_key(self):
subprocess.run(
["gpg", "--import"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
input=PorcelainGpgTestCase.NON_DEFAULT_KEY,
universal_newlines=True,
)
class ArchiveTests(PorcelainTestCase):
"""Tests for the archive command."""
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"refs/heads/master"] = c3.id
out = BytesIO()
err = BytesIO()
porcelain.archive(
self.repo.path, b"refs/heads/master", outstream=out, errstream=err
)
self.assertEqual(b"", err.getvalue())
tf = tarfile.TarFile(fileobj=out)
self.addCleanup(tf.close)
self.assertEqual([], tf.getnames())
class UpdateServerInfoTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"refs/heads/foo"] = c3.id
porcelain.update_server_info(self.repo.path)
self.assertTrue(
os.path.exists(os.path.join(self.repo.controldir(), "info", "refs"))
)
class CommitTests(PorcelainTestCase):
def test_custom_author(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
)
self.assertIsInstance(sha, bytes)
self.assertEqual(len(sha), 40)
def test_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path,
message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>",
)
self.assertIsInstance(sha, bytes)
self.assertEqual(len(sha), 40)
def test_no_verify(self):
if os.name != "posix":
self.skipTest("shell hook tests requires POSIX shell")
self.assertTrue(os.path.exists("/bin/sh"))
hooks_dir = os.path.join(self.repo.controldir(), "hooks")
os.makedirs(hooks_dir, exist_ok=True)
self.addCleanup(shutil.rmtree, hooks_dir)
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
hook_fail = "#!/bin/sh\nexit 1"
# hooks are executed in pre-commit, commit-msg order
# test commit-msg failure first, then pre-commit failure, then
# no_verify to skip both hooks
commit_msg = os.path.join(hooks_dir, "commit-msg")
with open(commit_msg, "w") as f:
f.write(hook_fail)
os.chmod(commit_msg, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
with self.assertRaises(CommitError):
porcelain.commit(
self.repo.path,
message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>",
)
pre_commit = os.path.join(hooks_dir, "pre-commit")
with open(pre_commit, "w") as f:
f.write(hook_fail)
os.chmod(pre_commit, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
with self.assertRaises(CommitError):
porcelain.commit(
self.repo.path,
message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>",
)
sha = porcelain.commit(
self.repo.path,
message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>",
no_verify=True,
)
self.assertIsInstance(sha, bytes)
self.assertEqual(len(sha), 40)
class CleanTests(PorcelainTestCase):
def put_files(self, tracked, ignored, untracked, empty_dirs):
"""Put the described files in the wd"""
all_files = tracked | ignored | untracked
for file_path in all_files:
abs_path = os.path.join(self.repo.path, file_path)
# File may need to be written in a dir that doesn't exist yet, so
# create the parent dir(s) as necessary
parent_dir = os.path.dirname(abs_path)
try:
os.makedirs(parent_dir)
except FileExistsError:
pass
with open(abs_path, "w") as f:
f.write("")
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.writelines(ignored)
for dir_path in empty_dirs:
os.mkdir(os.path.join(self.repo.path, "empty_dir"))
files_to_add = [os.path.join(self.repo.path, t) for t in tracked]
porcelain.add(repo=self.repo.path, paths=files_to_add)
porcelain.commit(repo=self.repo.path, message="init commit")
def assert_wd(self, expected_paths):
"""Assert paths of files and dirs in wd are same as expected_paths"""
control_dir_rel = os.path.relpath(self.repo._controldir, self.repo.path)
# normalize paths to simplify comparison across platforms
found_paths = {
os.path.normpath(p)
for p in flat_walk_dir(self.repo.path)
if not p.split(os.sep)[0] == control_dir_rel
}
norm_expected_paths = {os.path.normpath(p) for p in expected_paths}
self.assertEqual(found_paths, norm_expected_paths)
def test_from_root(self):
self.put_files(
tracked={"tracked_file", "tracked_dir/tracked_file", ".gitignore"},
ignored={"ignored_file"},
untracked={
"untracked_file",
"tracked_dir/untracked_dir/untracked_file",
"untracked_dir/untracked_dir/untracked_file",
},
empty_dirs={"empty_dir"},
)
porcelain.clean(repo=self.repo.path, target_dir=self.repo.path)
self.assert_wd(
{
"tracked_file",
"tracked_dir/tracked_file",
".gitignore",
"ignored_file",
"tracked_dir",
}
)
def test_from_subdir(self):
self.put_files(
tracked={"tracked_file", "tracked_dir/tracked_file", ".gitignore"},
ignored={"ignored_file"},
untracked={
"untracked_file",
"tracked_dir/untracked_dir/untracked_file",
"untracked_dir/untracked_dir/untracked_file",
},
empty_dirs={"empty_dir"},
)
porcelain.clean(
repo=self.repo,
target_dir=os.path.join(self.repo.path, "untracked_dir"),
)
self.assert_wd(
{
"tracked_file",
"tracked_dir/tracked_file",
".gitignore",
"ignored_file",
"untracked_file",
"tracked_dir/untracked_dir/untracked_file",
"empty_dir",
"untracked_dir",
"tracked_dir",
"tracked_dir/untracked_dir",
}
)
class CloneTests(PorcelainTestCase):
def test_simple_local(self):
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {
1: [(b"f1", f1_1), (b"f2", f1_1)],
2: [(b"f1", f1_1), (b"f2", f1_1)],
3: [(b"f1", f1_1), (b"f2", f1_1)],
}
c1, c2, c3 = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
self.repo.refs[b"refs/tags/foo"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
r = porcelain.clone(
self.repo.path, target_path, checkout=False, errstream=errstream
)
self.addCleanup(r.close)
self.assertEqual(r.path, target_path)
target_repo = Repo(target_path)
self.assertEqual(0, len(target_repo.open_index()))
self.assertEqual(c3.id, target_repo.refs[b"refs/tags/foo"])
self.assertNotIn(b"f1", os.listdir(target_path))
self.assertNotIn(b"f2", os.listdir(target_path))
c = r.get_config()
encoded_path = self.repo.path
if not isinstance(encoded_path, bytes):
encoded_path = encoded_path.encode("utf-8")
self.assertEqual(encoded_path, c.get((b"remote", b"origin"), b"url"))
self.assertEqual(
b"+refs/heads/*:refs/remotes/origin/*",
c.get((b"remote", b"origin"), b"fetch"),
)
def test_simple_local_with_checkout(self):
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {
1: [(b"f1", f1_1), (b"f2", f1_1)],
2: [(b"f1", f1_1), (b"f2", f1_1)],
3: [(b"f1", f1_1), (b"f2", f1_1)],
}
c1, c2, c3 = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream
) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
self.assertEqual(r.head(), c3.id)
self.assertIn("f1", os.listdir(target_path))
self.assertIn("f2", os.listdir(target_path))
def test_bare_local_with_checkout(self):
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {
1: [(b"f1", f1_1), (b"f2", f1_1)],
2: [(b"f1", f1_1), (b"f2", f1_1)],
3: [(b"f1", f1_1), (b"f2", f1_1)],
}
c1, c2, c3 = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(
self.repo.path, target_path, bare=True, errstream=errstream
) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
r.head()
self.assertRaises(NoIndexPresent, r.open_index)
self.assertNotIn(b"f1", os.listdir(target_path))
self.assertNotIn(b"f2", os.listdir(target_path))
def test_no_checkout_with_bare(self):
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1]]
trees = {1: [(b"f1", f1_1), (b"f2", f1_1)]}
(c1,) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
self.repo.refs[b"HEAD"] = c1.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
self.assertRaises(
porcelain.Error,
porcelain.clone,
self.repo.path,
target_path,
checkout=True,
bare=True,
errstream=errstream,
)
def test_no_head_no_checkout(self):
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1]]
trees = {1: [(b"f1", f1_1), (b"f2", f1_1)]}
(c1,) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = BytesIO()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream
)
r.close()
def test_no_head_no_checkout_outstream_errstream_autofallback(self):
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1]]
trees = {1: [(b"f1", f1_1), (b"f2", f1_1)]}
(c1,) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = porcelain.NoneStream()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream
)
r.close()
def test_source_broken(self):
with tempfile.TemporaryDirectory() as parent:
target_path = os.path.join(parent, "target")
self.assertRaises(
Exception, porcelain.clone, "/nonexistant/repo", target_path
)
self.assertFalse(os.path.exists(target_path))
def test_fetch_symref(self):
f1_1 = make_object(Blob, data=b"f1")
trees = {1: [(b"f1", f1_1), (b"f2", f1_1)]}
[c1] = build_commit_graph(self.repo.object_store, [[1]], trees)
self.repo.refs.set_symbolic_ref(b"HEAD", b"refs/heads/else")
self.repo.refs[b"refs/heads/else"] = c1.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
r = porcelain.clone(
self.repo.path, target_path, checkout=False, errstream=errstream
)
self.addCleanup(r.close)
self.assertEqual(r.path, target_path)
target_repo = Repo(target_path)
self.assertEqual(0, len(target_repo.open_index()))
self.assertEqual(c1.id, target_repo.refs[b"refs/heads/else"])
self.assertEqual(c1.id, target_repo.refs[b"HEAD"])
self.assertEqual(
{b"HEAD": b"refs/heads/else", b"refs/remotes/origin/HEAD": b"refs/remotes/origin/else"},
target_repo.refs.get_symrefs(),
)
class InitTests(TestCase):
def test_non_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir)
def test_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir, bare=True)
class AddTests(PorcelainTestCase):
def test_add_default_paths(self):
# create a file for initial commit
fullpath = os.path.join(self.repo.path, "blah")
with open(fullpath, "w") as f:
f.write("\n")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
repo=self.repo.path,
message=b"test",
author=b"test <email>",
committer=b"test <email>",
)
# Add a second test file and a file in a directory
with open(os.path.join(self.repo.path, "foo"), "w") as f:
f.write("\n")
os.mkdir(os.path.join(self.repo.path, "adir"))
with open(os.path.join(self.repo.path, "adir", "afile"), "w") as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
self.assertEqual(set(["foo", "blah", "adir", ".git"]), set(os.listdir(".")))
self.assertEqual(
(["foo", os.path.join("adir", "afile")], set()),
porcelain.add(self.repo.path),
)
finally:
os.chdir(cwd)
# Check that foo was added and nothing in .git was modified
index = self.repo.open_index()
self.assertEqual(sorted(index), [b"adir/afile", b"blah", b"foo"])
def test_add_default_paths_subdir(self):
os.mkdir(os.path.join(self.repo.path, "foo"))
with open(os.path.join(self.repo.path, "blah"), "w") as f:
f.write("\n")
with open(os.path.join(self.repo.path, "foo", "blie"), "w") as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(os.path.join(self.repo.path, "foo"))
porcelain.add(repo=self.repo.path)
porcelain.commit(
repo=self.repo.path,
message=b"test",
author=b"test <email>",
committer=b"test <email>",
)
finally:
os.chdir(cwd)
index = self.repo.open_index()
self.assertEqual(sorted(index), [b"foo/blie"])
def test_add_file(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
self.assertIn(b"foo", self.repo.open_index())
def test_add_ignored(self):
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("foo\nsubdir/")
with open(os.path.join(self.repo.path, "foo"), "w") as f:
f.write("BAR")
with open(os.path.join(self.repo.path, "bar"), "w") as f:
f.write("BAR")
os.mkdir(os.path.join(self.repo.path, "subdir"))
with open(os.path.join(self.repo.path, "subdir", "baz"), "w") as f:
f.write("BAZ")
(added, ignored) = porcelain.add(
self.repo.path,
paths=[
os.path.join(self.repo.path, "foo"),
os.path.join(self.repo.path, "bar"),
os.path.join(self.repo.path, "subdir"),
],
)
self.assertIn(b"bar", self.repo.open_index())
self.assertEqual(set(["bar"]), set(added))
self.assertEqual(set(["foo", os.path.join("subdir", "")]), ignored)
def test_add_file_absolute_path(self):
# Absolute paths are (not yet) supported
with open(os.path.join(self.repo.path, "foo"), "w") as f:
f.write("BAR")
porcelain.add(self.repo, paths=[os.path.join(self.repo.path, "foo")])
self.assertIn(b"foo", self.repo.open_index())
def test_add_not_in_repo(self):
with open(os.path.join(self.test_dir, "foo"), "w") as f:
f.write("BAR")
self.assertRaises(
ValueError,
porcelain.add,
self.repo,
paths=[os.path.join(self.test_dir, "foo")],
)
self.assertRaises(
(ValueError, FileNotFoundError),
porcelain.add,
self.repo,
paths=["../foo"],
)
self.assertEqual([], list(self.repo.open_index()))
def test_add_file_clrf_conversion(self):
# Set the right configuration to the repo
c = self.repo.get_config()
c.set("core", "autocrlf", "input")
c.write_to_path()
# Add a file with CRLF line-ending
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "wb") as f:
f.write(b"line1\r\nline2")
porcelain.add(self.repo.path, paths=[fullpath])
# The line-endings should have been converted to LF
index = self.repo.open_index()
self.assertIn(b"foo", index)
entry = index[b"foo"]
blob = self.repo[entry.sha]
self.assertEqual(blob.data, b"line1\nline2")
class RemoveTests(PorcelainTestCase):
def test_remove_file(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(
repo=self.repo,
message=b"test",
author=b"test <email>",
committer=b"test <email>",
)
self.assertTrue(os.path.exists(os.path.join(self.repo.path, "foo")))
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
self.assertFalse(os.path.exists(os.path.join(self.repo.path, "foo")))
def test_remove_file_staged(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path, paths=[fullpath])
self.assertRaises(Exception, porcelain.rm, self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
def test_remove_file_removed_on_disk(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
os.remove(fullpath)
porcelain.remove(self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
self.assertFalse(os.path.exists(os.path.join(self.repo.path, "foo")))
class LogTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream)
self.assertEqual(3, outstream.getvalue().count("-" * 50))
def test_max_entries(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream, max_entries=1)
self.assertEqual(1, outstream.getvalue().count("-" * 50))
class ShowTests(PorcelainTestCase):
def test_nolist(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=c3.id, outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_blob(self):
b = Blob.from_string(b"The Foo\n")
self.repo.object_store.add_object(b)
outstream = StringIO()
porcelain.show(self.repo.path, objects=[b.id], outstream=outstream)
self.assertEqual(outstream.getvalue(), "The Foo\n")
def test_commit_no_parent(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[ca.id], outstream=outstream)
self.assertMultiLineEqual(
outstream.getvalue(),
"""\
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""",
)
def test_tag(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
porcelain.tag_create(
self.repo.path,
b"tryme",
b"foo <foo@bar.com>",
b"bar",
annotated=True,
objectish=ca.id,
tag_time=1552854211,
tag_timezone=0,
)
outstream = StringIO()
porcelain.show(self.repo, objects=[b"refs/tags/tryme"], outstream=outstream)
self.maxDiff = None
self.assertMultiLineEqual(
outstream.getvalue(),
"""\
Tagger: foo <foo@bar.com>
Date: Sun Mar 17 2019 20:23:31 +0000
bar
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""",
)
def test_commit_with_change(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
b = Blob.from_string(b"The Bar\n")
tb = Tree()
tb.add(b"somename", 0o100644, b.id)
cb = make_commit(tree=tb.id, parents=[ca.id])
self.repo.object_store.add_objects(
[
(a, None),
(b, None),
(ta, None),
(tb, None),
(ca, None),
(cb, None),
]
)
outstream = StringIO()
porcelain.show(self.repo.path, objects=[cb.id], outstream=outstream)
self.assertMultiLineEqual(
outstream.getvalue(),
"""\
--------------------------------------------------
commit: 2c6b6c9cb72c130956657e1fdae58e5b103744fa
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
index ea5c7bf..fd38bcb 100644
--- a/somename
+++ b/somename
@@ -1 +1 @@
-The Foo
+The Bar
""",
)
class SymbolicRefTests(PorcelainTestCase):
def test_set_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
self.assertRaises(
porcelain.Error, porcelain.symbolic_ref, self.repo.path, b"foobar"
)
def test_set_force_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b"force_foobar", force=True)
# test if we actually changed the file
with self.repo.get_named_file("HEAD") as f:
new_ref = f.read()
self.assertEqual(new_ref, b"ref: refs/heads/force_foobar\n")
def test_set_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b"master")
def test_set_symbolic_ref_other_than_master(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store,
[[1], [2, 1], [3, 1, 2]],
attrs=dict(refs="develop"),
)
self.repo.refs[b"HEAD"] = c3.id
self.repo.refs[b"refs/heads/develop"] = c3.id
porcelain.symbolic_ref(self.repo.path, b"develop")
# test if we actually changed the file
with self.repo.get_named_file("HEAD") as f:
new_ref = f.read()
self.assertEqual(new_ref, b"ref: refs/heads/develop\n")
class DiffTreeTests(PorcelainTestCase):
def test_empty(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
outstream = BytesIO()
porcelain.diff_tree(self.repo.path, c2.tree, c3.tree, outstream=outstream)
self.assertEqual(outstream.getvalue(), b"")
class CommitTreeTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
b = Blob()
b.data = b"foo the bar"
t = Tree()
t.add(b"somename", 0o100644, b.id)
self.repo.object_store.add_object(t)
self.repo.object_store.add_object(b)
sha = porcelain.commit_tree(
self.repo.path,
t.id,
message=b"Withcommit.",
author=b"Joe <joe@example.com>",
committer=b"Jane <jane@example.com>",
)
self.assertIsInstance(sha, bytes)
self.assertEqual(len(sha), 40)
class RevListTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
outstream = BytesIO()
porcelain.rev_list(self.repo.path, [c3.id], outstream=outstream)
self.assertEqual(
c3.id + b"\n" + c2.id + b"\n" + c1.id + b"\n", outstream.getvalue()
)
@skipIf(platform.python_implementation() == "PyPy" or sys.platform == "win32", "gpgme not easily available or supported on Windows and PyPy")
class TagCreateSignTests(PorcelainGpgTestCase):
def test_default_key(self):
import gpg
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
cfg = self.repo.get_config()
cfg.set(("user",), "signingKey", PorcelainGpgTestCase.DEFAULT_KEY_ID)
self.import_default_key()
porcelain.tag_create(
self.repo.path,
b"tryme",
b"foo <foo@bar.com>",
b"bar",
annotated=True,
sign=True,
)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b"refs/tags/tryme"]
self.assertIsInstance(tag, Tag)
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar\n", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
tag = self.repo[b'refs/tags/tryme']
# GPG Signatures aren't deterministic, so we can't do a static assertion.
tag.verify()
tag.verify(keyids=[PorcelainGpgTestCase.DEFAULT_KEY_ID])
self.import_non_default_key()
self.assertRaises(
gpg.errors.MissingSignatures,
tag.verify,
keyids=[PorcelainGpgTestCase.NON_DEFAULT_KEY_ID],
)
tag._chunked_text = [b"bad data", tag._signature]
self.assertRaises(
gpg.errors.BadSignatures,
tag.verify,
)
def test_non_default_key(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
cfg = self.repo.get_config()
cfg.set(("user",), "signingKey", PorcelainGpgTestCase.DEFAULT_KEY_ID)
self.import_non_default_key()
porcelain.tag_create(
self.repo.path,
b"tryme",
b"foo <foo@bar.com>",
b"bar",
annotated=True,
sign=PorcelainGpgTestCase.NON_DEFAULT_KEY_ID,
)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b"refs/tags/tryme"]
self.assertIsInstance(tag, Tag)
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar\n", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
tag = self.repo[b'refs/tags/tryme']
# GPG Signatures aren't deterministic, so we can't do a static assertion.
tag.verify()
class TagCreateTests(PorcelainTestCase):
def test_annotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(
self.repo.path,
b"tryme",
b"foo <foo@bar.com>",
b"bar",
annotated=True,
)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b"refs/tags/tryme"]
self.assertIsInstance(tag, Tag)
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar\n", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
def test_unannotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b"refs/tags/tryme"]
self.assertEqual(list(tags.values()), [self.repo.head()])
def test_unannotated_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]]
)
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, "tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b"refs/tags/tryme"]
self.assertEqual(list(tags.values()), [self.repo.head()])
class TagListTests(PorcelainTestCase):
def test_empty(self):
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([], tags)
def test_simple(self):
self.repo.refs[b"refs/tags/foo"] = b"aa" * 20
self.repo.refs[b"refs/tags/bar/bla"] = b"bb" * 20
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([b"bar/bla", b"foo"], tags)
class TagDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.tag_create(self.repo, b"foo")
self.assertIn(b"foo", porcelain.tag_list(self.repo))
porcelain.tag_delete(self.repo, b"foo")
self.assertNotIn(b"foo", porcelain.tag_list(self.repo))
class ResetTests(PorcelainTestCase):
def test_hard_head(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path,
message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
with open(os.path.join(self.repo.path, "foo"), "wb") as f:
f.write(b"OOH")
porcelain.reset(self.repo, "hard", b"HEAD")
index = self.repo.open_index()
changes = list(
tree_changes(
self.repo,
index.commit(self.repo.object_store),
self.repo[b"HEAD"].tree,
)
)
self.assertEqual([], changes)
def test_hard_commit(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path,
message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
with open(fullpath, "wb") as f:
f.write(b"BAZ")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path,
message=b"Some other message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
porcelain.reset(self.repo, "hard", sha)
index = self.repo.open_index()
changes = list(
tree_changes(
self.repo,
index.commit(self.repo.object_store),
self.repo[sha].tree,
)
)
self.assertEqual([], changes)
class ResetFileTests(PorcelainTestCase):
def test_reset_modify_file_to_commit(self):
file = 'foo'
full_path = os.path.join(self.repo.path, file)
with open(full_path, 'w') as f:
f.write('hello')
porcelain.add(self.repo, paths=[full_path])
sha = porcelain.commit(
self.repo,
message=b"unitest",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
with open(full_path, 'a') as f:
f.write('something new')
porcelain.reset_file(self.repo, file, target=sha)
with open(full_path, 'r') as f:
self.assertEqual('hello', f.read())
def test_reset_remove_file_to_commit(self):
file = 'foo'
full_path = os.path.join(self.repo.path, file)
with open(full_path, 'w') as f:
f.write('hello')
porcelain.add(self.repo, paths=[full_path])
sha = porcelain.commit(
self.repo,
message=b"unitest",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
os.remove(full_path)
porcelain.reset_file(self.repo, file, target=sha)
with open(full_path, 'r') as f:
self.assertEqual('hello', f.read())
def test_resetfile_with_dir(self):
os.mkdir(os.path.join(self.repo.path, 'new_dir'))
full_path = os.path.join(self.repo.path, 'new_dir', 'foo')
with open(full_path, 'w') as f:
f.write('hello')
porcelain.add(self.repo, paths=[full_path])
sha = porcelain.commit(
self.repo,
message=b"unitest",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
with open(full_path, 'a') as f:
f.write('something new')
porcelain.commit(
self.repo,
message=b"unitest 2",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>",
)
porcelain.reset_file(self.repo, os.path.join('new_dir', 'foo'), target=sha)
with open(full_path, 'r') as f:
self.assertEqual('hello', f.read())
class PushTests(PorcelainTestCase):
def test_simple(self):
"""
Basic test of porcelain push where self.repo is the remote. First
clone the remote, commit a file to the clone, then push the changes
back to the remote.
"""
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(
repo=self.repo.path,
message=b"init",
author=b"author <email>",
committer=b"committer <email>",
)
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(
self.repo.path, target=clone_path, errstream=errstream
)
try:
self.assertEqual(target_repo[b"HEAD"], self.repo[b"HEAD"])
finally:
target_repo.close()
# create a second file to be pushed back to origin
handle, fullpath = tempfile.mkstemp(dir=clone_path)
os.close(handle)
porcelain.add(repo=clone_path, paths=[fullpath])
porcelain.commit(
repo=clone_path,
message=b"push",
author=b"author <email>",
committer=b"committer <email>",
)
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b"HEAD"].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(
clone_path,
"origin",
b"HEAD:" + refs_path,
outstream=outstream,
errstream=errstream,
)
self.assertEqual(
target_repo.refs[b"refs/remotes/origin/foo"],
target_repo.refs[b"HEAD"],
)
# Check that the target and source
with Repo(clone_path) as r_clone:
self.assertEqual(
{
b"HEAD": new_id,
b"refs/heads/foo": r_clone[b"HEAD"].id,
b"refs/heads/master": new_id,
},
self.repo.get_refs(),
)
self.assertEqual(r_clone[b"HEAD"].id, self.repo[refs_path].id)
# Get the change in the target repo corresponding to the add
# this will be in the foo branch.
change = list(
tree_changes(
self.repo,
self.repo[b"HEAD"].tree,
self.repo[b"refs/heads/foo"].tree,
)
)[0]
self.assertEqual(
os.path.basename(fullpath), change.new.path.decode("ascii")
)
def test_local_missing(self):
"""Pushing a new branch."""
outstream = BytesIO()
errstream = BytesIO()
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.init(clone_path)
target_repo.close()
self.assertRaises(
porcelain.Error,
porcelain.push,
self.repo,
clone_path,
b"HEAD:refs/heads/master",
outstream=outstream,
errstream=errstream,
)
def test_new(self):
"""Pushing a new branch."""
outstream = BytesIO()
errstream = BytesIO()
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.init(clone_path)
target_repo.close()
# create a second file to be pushed back to origin
handle, fullpath = tempfile.mkstemp(dir=clone_path)
os.close(handle)
porcelain.add(repo=clone_path, paths=[fullpath])
new_id = porcelain.commit(
repo=self.repo,
message=b"push",
author=b"author <email>",
committer=b"committer <email>",
)
# Push to the remote
porcelain.push(
self.repo,
clone_path,
b"HEAD:refs/heads/master",
outstream=outstream,
errstream=errstream,
)
with Repo(clone_path) as r_clone:
self.assertEqual(
{
b"HEAD": new_id,
b"refs/heads/master": new_id,
},
r_clone.get_refs(),
)
def test_delete(self):
"""Basic test of porcelain push, removing a branch."""
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(
repo=self.repo.path,
message=b"init",
author=b"author <email>",
committer=b"committer <email>",
)
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(
self.repo.path, target=clone_path, errstream=errstream
)
target_repo.close()
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b"HEAD"].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(
clone_path,
self.repo.path,
b":" + refs_path,
outstream=outstream,
errstream=errstream,
)
self.assertEqual(
{
b"HEAD": new_id,
b"refs/heads/master": new_id,
},
self.repo.get_refs(),
)
def test_diverged(self):
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(
repo=self.repo.path,
message=b"init",
author=b"author <email>",
committer=b"committer <email>",
)
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(
self.repo.path, target=clone_path, errstream=errstream
)
target_repo.close()
remote_id = porcelain.commit(
repo=self.repo.path,
message=b"remote change",
author=b"author <email>",
committer=b"committer <email>",
)
local_id = porcelain.commit(
repo=clone_path,
message=b"local change",
author=b"author <email>",
committer=b"committer <email>",
)
outstream = BytesIO()
errstream = BytesIO()
# Push to the remote
self.assertRaises(
porcelain.DivergedBranches,
porcelain.push,
clone_path,
self.repo.path,
b"refs/heads/master",
outstream=outstream,
errstream=errstream,
)
self.assertEqual(
{
b"HEAD": remote_id,
b"refs/heads/master": remote_id,
},
self.repo.get_refs(),
)
self.assertEqual(b"", outstream.getvalue())
self.assertEqual(b"", errstream.getvalue())
outstream = BytesIO()
errstream = BytesIO()
# Push to the remote with --force
porcelain.push(
clone_path,
self.repo.path,
b"refs/heads/master",
outstream=outstream,
errstream=errstream,
force=True,
)
self.assertEqual(
{
b"HEAD": local_id,
b"refs/heads/master": local_id,
},
self.repo.get_refs(),
)
self.assertEqual(b"", outstream.getvalue())
self.assertTrue(re.match(b"Push to .* successful.\n", errstream.getvalue()))
class PullTests(PorcelainTestCase):
def setUp(self):
super(PullTests, self).setUp()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test",
author=b"test <email>",
committer=b"test <email>",
)
# Setup target repo
self.target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.target_path)
target_repo = porcelain.clone(
self.repo.path, target=self.target_path, errstream=BytesIO()
)
target_repo.close()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test2",
author=b"test2 <email>",
committer=b"test2 <email>",
)
self.assertIn(b"refs/heads/master", self.repo.refs)
self.assertIn(b"refs/heads/master", target_repo.refs)
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(
self.target_path,
self.repo.path,
b"refs/heads/master",
outstream=outstream,
errstream=errstream,
)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b"HEAD"].id, self.repo[b"HEAD"].id)
def test_diverged(self):
outstream = BytesIO()
errstream = BytesIO()
c3a = porcelain.commit(
repo=self.target_path,
message=b"test3a",
author=b"test2 <email>",
committer=b"test2 <email>",
)
porcelain.commit(
repo=self.repo.path,
message=b"test3b",
author=b"test2 <email>",
committer=b"test2 <email>",
)
# Pull changes into the cloned repo
self.assertRaises(
porcelain.DivergedBranches,
porcelain.pull,
self.target_path,
self.repo.path,
b"refs/heads/master",
outstream=outstream,
errstream=errstream,
)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b"refs/heads/master"].id, c3a)
self.assertRaises(
NotImplementedError,
porcelain.pull,
self.target_path,
self.repo.path,
b"refs/heads/master",
outstream=outstream,
errstream=errstream,
fast_forward=False,
)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b"refs/heads/master"].id, c3a)
def test_no_refspec(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(
self.target_path,
self.repo.path,
outstream=outstream,
errstream=errstream,
)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b"HEAD"].id, self.repo[b"HEAD"].id)
def test_no_remote_location(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(
self.target_path,
refspecs=b"refs/heads/master",
outstream=outstream,
errstream=errstream,
)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b"HEAD"].id, self.repo[b"HEAD"].id)
class StatusTests(PorcelainTestCase):
def test_empty(self):
results = porcelain.status(self.repo)
self.assertEqual({"add": [], "delete": [], "modify": []}, results.staged)
self.assertEqual([], results.unstaged)
def test_status_base(self):
"""Integration test for `status` functionality."""
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("origstuff")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
# modify access and modify time of path
os.utime(fullpath, (0, 0))
with open(fullpath, "wb") as f:
f.write(b"stuff")
# Make a dummy file and stage it
filename_add = "bar"
fullpath = os.path.join(self.repo.path, filename_add)
with open(fullpath, "w") as f:
f.write("stuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
results = porcelain.status(self.repo)
self.assertEqual(results.staged["add"][0], filename_add.encode("ascii"))
self.assertEqual(results.unstaged, [b"foo"])
def test_status_all(self):
del_path = os.path.join(self.repo.path, "foo")
mod_path = os.path.join(self.repo.path, "bar")
add_path = os.path.join(self.repo.path, "baz")
us_path = os.path.join(self.repo.path, "blye")
ut_path = os.path.join(self.repo.path, "blyat")
with open(del_path, "w") as f:
f.write("origstuff")
with open(mod_path, "w") as f:
f.write("origstuff")
with open(us_path, "w") as f:
f.write("origstuff")
porcelain.add(repo=self.repo.path, paths=[del_path, mod_path, us_path])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
porcelain.remove(self.repo.path, [del_path])
with open(add_path, "w") as f:
f.write("origstuff")
with open(mod_path, "w") as f:
f.write("more_origstuff")
with open(us_path, "w") as f:
f.write("more_origstuff")
porcelain.add(repo=self.repo.path, paths=[add_path, mod_path])
with open(us_path, "w") as f:
f.write("\norigstuff")
with open(ut_path, "w") as f:
f.write("origstuff")
results = porcelain.status(self.repo.path)
self.assertDictEqual(
{"add": [b"baz"], "delete": [b"foo"], "modify": [b"bar"]},
results.staged,
)
self.assertListEqual(results.unstaged, [b"blye"])
self.assertListEqual(results.untracked, ["blyat"])
def test_status_crlf_mismatch(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, "crlf")
with open(file_path, "wb") as f:
f.write(b"line1\nline2")
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, "wb") as f:
f.write(b"line1\r\nline2")
results = porcelain.status(self.repo)
self.assertDictEqual({"add": [], "delete": [], "modify": []}, results.staged)
self.assertListEqual(results.unstaged, [b"crlf"])
self.assertListEqual(results.untracked, [])
def test_status_autocrlf_true(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, "crlf")
with open(file_path, "wb") as f:
f.write(b"line1\nline2")
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, "wb") as f:
f.write(b"line1\r\nline2")
# TODO: It should be set automatically by looking at the configuration
c = self.repo.get_config()
c.set("core", "autocrlf", True)
c.write_to_path()
results = porcelain.status(self.repo)
self.assertDictEqual({"add": [], "delete": [], "modify": []}, results.staged)
self.assertListEqual(results.unstaged, [])
self.assertListEqual(results.untracked, [])
def test_status_autocrlf_input(self):
# Commit existing file with CRLF
file_path = os.path.join(self.repo.path, "crlf-exists")
with open(file_path, "wb") as f:
f.write(b"line1\r\nline2")
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
c = self.repo.get_config()
c.set("core", "autocrlf", "input")
c.write_to_path()
# Add new (untracked) file
file_path = os.path.join(self.repo.path, "crlf-new")
with open(file_path, "wb") as f:
f.write(b"line1\r\nline2")
porcelain.add(repo=self.repo.path, paths=[file_path])
results = porcelain.status(self.repo)
self.assertDictEqual({"add": [b"crlf-new"], "delete": [], "modify": []}, results.staged)
self.assertListEqual(results.unstaged, [])
self.assertListEqual(results.untracked, [])
def test_get_tree_changes_add(self):
"""Unit test for get_tree_changes add."""
# Make a dummy file, stage
filename = "bar"
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, "w") as f:
f.write("stuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
filename = "foo"
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, "w") as f:
f.write("stuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes["add"][0], filename.encode("ascii"))
self.assertEqual(len(changes["add"]), 1)
self.assertEqual(len(changes["modify"]), 0)
self.assertEqual(len(changes["delete"]), 0)
def test_get_tree_changes_modify(self):
"""Unit test for get_tree_changes modify."""
# Make a dummy file, stage, commit, modify
filename = "foo"
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, "w") as f:
f.write("stuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
with open(fullpath, "w") as f:
f.write("otherstuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes["modify"][0], filename.encode("ascii"))
self.assertEqual(len(changes["add"]), 0)
self.assertEqual(len(changes["modify"]), 1)
self.assertEqual(len(changes["delete"]), 0)
def test_get_tree_changes_delete(self):
"""Unit test for get_tree_changes delete."""
# Make a dummy file, stage, commit, remove
filename = "foo"
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, "w") as f:
f.write("stuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(repo=self.repo.path, paths=[filename])
finally:
os.chdir(cwd)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes["delete"][0], filename.encode("ascii"))
self.assertEqual(len(changes["add"]), 0)
self.assertEqual(len(changes["modify"]), 0)
self.assertEqual(len(changes["delete"]), 1)
def test_get_untracked_paths(self):
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("ignored\n")
with open(os.path.join(self.repo.path, "ignored"), "w") as f:
f.write("blah\n")
with open(os.path.join(self.repo.path, "notignored"), "w") as f:
f.write("blah\n")
os.symlink(
os.path.join(self.repo.path, os.pardir, "external_target"),
os.path.join(self.repo.path, "link"),
)
self.assertEqual(
set(["ignored", "notignored", ".gitignore", "link"]),
set(
porcelain.get_untracked_paths(
self.repo.path, self.repo.path, self.repo.open_index()
)
),
)
self.assertEqual(
set([".gitignore", "notignored", "link"]),
set(porcelain.status(self.repo).untracked),
)
self.assertEqual(
set([".gitignore", "notignored", "ignored", "link"]),
set(porcelain.status(self.repo, ignored=True).untracked),
)
def test_get_untracked_paths_subrepo(self):
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("nested/\n")
with open(os.path.join(self.repo.path, "notignored"), "w") as f:
f.write("blah\n")
subrepo = Repo.init(os.path.join(self.repo.path, "nested"), mkdir=True)
with open(os.path.join(subrepo.path, "ignored"), "w") as f:
f.write("bleep\n")
with open(os.path.join(subrepo.path, "with"), "w") as f:
f.write("bloop\n")
with open(os.path.join(subrepo.path, "manager"), "w") as f:
f.write("blop\n")
self.assertEqual(
set([".gitignore", "notignored", os.path.join("nested", "")]),
set(
porcelain.get_untracked_paths(
self.repo.path, self.repo.path, self.repo.open_index()
)
),
)
self.assertEqual(
set([".gitignore", "notignored"]),
set(
porcelain.get_untracked_paths(
self.repo.path,
self.repo.path,
self.repo.open_index(),
exclude_ignored=True,
)
),
)
self.assertEqual(
set(["ignored", "with", "manager"]),
set(
porcelain.get_untracked_paths(
subrepo.path, subrepo.path, subrepo.open_index()
)
),
)
self.assertEqual(
set(),
set(
porcelain.get_untracked_paths(
subrepo.path,
self.repo.path,
self.repo.open_index(),
)
),
)
self.assertEqual(
set([os.path.join('nested', 'ignored'),
os.path.join('nested', 'with'),
os.path.join('nested', 'manager')]),
set(
porcelain.get_untracked_paths(
self.repo.path,
subrepo.path,
self.repo.open_index(),
)
),
)
def test_get_untracked_paths_subdir(self):
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("subdir/\nignored")
with open(os.path.join(self.repo.path, "notignored"), "w") as f:
f.write("blah\n")
os.mkdir(os.path.join(self.repo.path, "subdir"))
with open(os.path.join(self.repo.path, "ignored"), "w") as f:
f.write("foo")
with open(os.path.join(self.repo.path, "subdir", "ignored"), "w") as f:
f.write("foo")
self.assertEqual(
set(
[
".gitignore",
"notignored",
"ignored",
os.path.join("subdir", ""),
]
),
set(
porcelain.get_untracked_paths(
self.repo.path,
self.repo.path,
self.repo.open_index(),
)
)
)
self.assertEqual(
set([".gitignore", "notignored"]),
set(
porcelain.get_untracked_paths(
self.repo.path,
self.repo.path,
self.repo.open_index(),
exclude_ignored=True,
)
)
)
# TODO(jelmer): Add test for dulwich.porcelain.daemon
class UploadPackTests(PorcelainTestCase):
"""Tests for upload_pack."""
def test_upload_pack(self):
outf = BytesIO()
exitcode = porcelain.upload_pack(self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([b"0000"], outlines)
self.assertEqual(0, exitcode)
class ReceivePackTests(PorcelainTestCase):
"""Tests for receive_pack."""
def test_receive_pack(self):
filename = "foo"
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, "w") as f:
f.write("stuff")
porcelain.add(repo=self.repo.path, paths=fullpath)
self.repo.do_commit(
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
author_timestamp=1402354300,
commit_timestamp=1402354300,
author_timezone=0,
commit_timezone=0,
)
outf = BytesIO()
exitcode = porcelain.receive_pack(self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual(
[
b"0091319b56ce3aee2d489f759736a79cc552c9bb86d9 HEAD\x00 report-status "
b"delete-refs quiet ofs-delta side-band-64k "
b"no-done symref=HEAD:refs/heads/master",
b"003f319b56ce3aee2d489f759736a79cc552c9bb86d9 refs/heads/master",
b"0000",
],
outlines,
)
self.assertEqual(0, exitcode)
class BranchListTests(PorcelainTestCase):
def test_standard(self):
self.assertEqual(set([]), set(porcelain.branch_list(self.repo)))
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]), set(porcelain.branch_list(self.repo))
)
class BranchCreateTests(PorcelainTestCase):
def test_branch_exists(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertRaises(porcelain.Error, porcelain.branch_create, self.repo, b"foo")
porcelain.branch_create(self.repo, b"foo", force=True)
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]), set(porcelain.branch_list(self.repo))
)
class BranchDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertIn(b"foo", porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, b"foo")
self.assertNotIn(b"foo", porcelain.branch_list(self.repo))
def test_simple_unicode(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, "foo")
self.assertIn(b"foo", porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, "foo")
self.assertNotIn(b"foo", porcelain.branch_list(self.repo))
class FetchTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test",
author=b"test <email>",
committer=b"test <email>",
)
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(
self.repo.path, target=target_path, errstream=errstream
)
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test2",
author=b"test2 <email>",
committer=b"test2 <email>",
)
self.assertNotIn(self.repo[b"HEAD"].id, target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, "origin", outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes
with Repo(target_path) as r:
self.assertIn(self.repo[b"HEAD"].id, r)
def test_with_remote_name(self):
remote_name = "origin"
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test",
author=b"test <email>",
committer=b"test <email>",
)
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(
self.repo.path, target=target_path, errstream=errstream
)
# Capture current refs
target_refs = target_repo.get_refs()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(
repo=self.repo.path,
message=b"test2",
author=b"test2 <email>",
committer=b"test2 <email>",
)
self.assertNotIn(self.repo[b"HEAD"].id, target_repo)
target_config = target_repo.get_config()
target_config.set(
(b"remote", remote_name.encode()), b"url", self.repo.path.encode()
)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(
target_path, remote_name, outstream=outstream, errstream=errstream
)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes, as well as updates
# for the refs
with Repo(target_path) as r:
self.assertIn(self.repo[b"HEAD"].id, r)
self.assertNotEqual(self.repo.get_refs(), target_refs)
def assert_correct_remote_refs(
self, local_refs, remote_refs, remote_name=b"origin"
):
"""Assert that known remote refs corresponds to actual remote refs."""
local_ref_prefix = b"refs/heads"
remote_ref_prefix = b"refs/remotes/" + remote_name
locally_known_remote_refs = {
k[len(remote_ref_prefix) + 1 :]: v
for k, v in local_refs.items()
if k.startswith(remote_ref_prefix)
}
normalized_remote_refs = {
k[len(local_ref_prefix) + 1 :]: v
for k, v in remote_refs.items()
if k.startswith(local_ref_prefix)
}
if b"HEAD" in locally_known_remote_refs and b"HEAD" in remote_refs:
normalized_remote_refs[b"HEAD"] = remote_refs[b"HEAD"]
self.assertEqual(locally_known_remote_refs, normalized_remote_refs)
class RepackTests(PorcelainTestCase):
def test_empty(self):
porcelain.repack(self.repo)
def test_simple(self):
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.repack(self.repo)
class LsTreeTests(PorcelainTestCase):
def test_empty(self):
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(f.getvalue(), "")
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("origstuff")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
"100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tfoo\n",
)
def test_recursive(self):
# Create a directory then write a dummy file in it
dirpath = os.path.join(self.repo.path, "adir")
filepath = os.path.join(dirpath, "afile")
os.mkdir(dirpath)
with open(filepath, "w") as f:
f.write("origstuff")
porcelain.add(repo=self.repo.path, paths=[filepath])
porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
"40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n",
)
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f, recursive=True)
self.assertEqual(
f.getvalue(),
"40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n"
"100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tadir"
"/afile\n",
)
class LsRemoteTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual({}, porcelain.ls_remote(self.repo.path))
def test_some(self):
cid = porcelain.commit(
repo=self.repo.path,
message=b"test status",
author=b"author <email>",
committer=b"committer <email>",
)
self.assertEqual(
{b"refs/heads/master": cid, b"HEAD": cid},
porcelain.ls_remote(self.repo.path),
)
class LsFilesTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual([], list(porcelain.ls_files(self.repo)))
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("origstuff")
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual([b"foo"], list(porcelain.ls_files(self.repo)))
class RemoteAddTests(PorcelainTestCase):
def test_new(self):
porcelain.remote_add(self.repo, "jelmer", "git://jelmer.uk/code/dulwich")
c = self.repo.get_config()
self.assertEqual(
c.get((b"remote", b"jelmer"), b"url"),
b"git://jelmer.uk/code/dulwich",
)
def test_exists(self):
porcelain.remote_add(self.repo, "jelmer", "git://jelmer.uk/code/dulwich")
self.assertRaises(
porcelain.RemoteExists,
porcelain.remote_add,
self.repo,
"jelmer",
"git://jelmer.uk/code/dulwich",
)
class CheckIgnoreTests(PorcelainTestCase):
def test_check_ignored(self):
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("foo")
foo_path = os.path.join(self.repo.path, "foo")
with open(foo_path, "w") as f:
f.write("BAR")
bar_path = os.path.join(self.repo.path, "bar")
with open(bar_path, "w") as f:
f.write("BAR")
self.assertEqual(["foo"], list(porcelain.check_ignore(self.repo, [foo_path])))
self.assertEqual([], list(porcelain.check_ignore(self.repo, [bar_path])))
def test_check_added_abs(self):
path = os.path.join(self.repo.path, "foo")
with open(path, "w") as f:
f.write("BAR")
self.repo.stage(["foo"])
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("foo\n")
self.assertEqual([], list(porcelain.check_ignore(self.repo, [path])))
self.assertEqual(
["foo"],
list(porcelain.check_ignore(self.repo, [path], no_index=True)),
)
def test_check_added_rel(self):
with open(os.path.join(self.repo.path, "foo"), "w") as f:
f.write("BAR")
self.repo.stage(["foo"])
with open(os.path.join(self.repo.path, ".gitignore"), "w") as f:
f.write("foo\n")
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, "bar"))
os.chdir(os.path.join(self.repo.path, "bar"))
try:
self.assertEqual(list(porcelain.check_ignore(self.repo, ["../foo"])), [])
self.assertEqual(
["../foo"],
list(porcelain.check_ignore(self.repo, ["../foo"], no_index=True)),
)
finally:
os.chdir(cwd)
class UpdateHeadTests(PorcelainTestCase):
def test_set_to_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b"ref: refs/heads/blah", self.repo.refs.read_ref(b"HEAD"))
def test_set_to_branch_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b"HEAD"))
def test_set_to_commit_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, c1.id, detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b"HEAD"))
def test_set_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", new_branch="bar")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b"ref: refs/heads/bar", self.repo.refs.read_ref(b"HEAD"))
class MailmapTests(PorcelainTestCase):
def test_no_mailmap(self):
self.assertEqual(
b"Jelmer Vernooij <jelmer@samba.org>",
porcelain.check_mailmap(self.repo, b"Jelmer Vernooij <jelmer@samba.org>"),
)
def test_mailmap_lookup(self):
with open(os.path.join(self.repo.path, ".mailmap"), "wb") as f:
f.write(
b"""\
Jelmer Vernooij <jelmer@debian.org>
"""
)
self.assertEqual(
b"Jelmer Vernooij <jelmer@debian.org>",
porcelain.check_mailmap(self.repo, b"Jelmer Vernooij <jelmer@samba.org>"),
)
class FsckTests(PorcelainTestCase):
def test_none(self):
self.assertEqual([], list(porcelain.fsck(self.repo)))
def test_git_dir(self):
obj = Tree()
a = Blob()
a.data = b"foo"
obj.add(b".git", 0o100644, a.id)
self.repo.object_store.add_objects([(a, None), (obj, None)])
self.assertEqual(
[(obj.id, "invalid name .git")],
[(sha, str(e)) for (sha, e) in porcelain.fsck(self.repo)],
)
class DescribeTests(PorcelainTestCase):
def test_no_commits(self):
self.assertRaises(KeyError, porcelain.describe, self.repo.path)
def test_single_commit(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
)
self.assertEqual(
"g{}".format(sha[:7].decode("ascii")),
porcelain.describe(self.repo.path),
)
def test_tag(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
)
porcelain.tag_create(
self.repo.path,
b"tryme",
b"foo <foo@bar.com>",
b"bar",
annotated=True,
)
self.assertEqual("tryme", porcelain.describe(self.repo.path))
def test_tag_and_commit(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
)
porcelain.tag_create(
self.repo.path,
b"tryme",
b"foo <foo@bar.com>",
b"bar",
annotated=True,
)
with open(fullpath, "w") as f:
f.write("BAR2")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
)
self.assertEqual(
"tryme-1-g{}".format(sha[:7].decode("ascii")),
porcelain.describe(self.repo.path),
)
class PathToTreeTests(PorcelainTestCase):
def setUp(self):
super(PathToTreeTests, self).setUp()
self.fp = os.path.join(self.test_dir, "bar")
with open(self.fp, "w") as f:
f.write("something")
oldcwd = os.getcwd()
self.addCleanup(os.chdir, oldcwd)
os.chdir(self.test_dir)
def test_path_to_tree_path_base(self):
self.assertEqual(b"bar", porcelain.path_to_tree_path(self.test_dir, self.fp))
self.assertEqual(b"bar", porcelain.path_to_tree_path(".", "./bar"))
self.assertEqual(b"bar", porcelain.path_to_tree_path(".", "bar"))
cwd = os.getcwd()
self.assertEqual(
b"bar", porcelain.path_to_tree_path(".", os.path.join(cwd, "bar"))
)
self.assertEqual(b"bar", porcelain.path_to_tree_path(cwd, "bar"))
def test_path_to_tree_path_syntax(self):
self.assertEqual(b"bar", porcelain.path_to_tree_path(".", "./bar"))
def test_path_to_tree_path_error(self):
with self.assertRaises(ValueError):
with tempfile.TemporaryDirectory() as od:
porcelain.path_to_tree_path(od, self.fp)
def test_path_to_tree_path_rel(self):
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, "foo"))
os.mkdir(os.path.join(self.repo.path, "foo/bar"))
try:
os.chdir(os.path.join(self.repo.path, "foo/bar"))
with open("baz", "w") as f:
f.write("contents")
self.assertEqual(b"bar/baz", porcelain.path_to_tree_path("..", "baz"))
self.assertEqual(
b"bar/baz",
porcelain.path_to_tree_path(
os.path.join(os.getcwd(), ".."),
os.path.join(os.getcwd(), "baz"),
),
)
self.assertEqual(
b"bar/baz",
porcelain.path_to_tree_path("..", os.path.join(os.getcwd(), "baz")),
)
self.assertEqual(
b"bar/baz",
porcelain.path_to_tree_path(os.path.join(os.getcwd(), ".."), "baz"),
)
finally:
os.chdir(cwd)
class GetObjectByPathTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
)
self.assertEqual(b"BAR", porcelain.get_object_by_path(self.repo, "foo").data)
self.assertEqual(b"BAR", porcelain.get_object_by_path(self.repo, b"foo").data)
def test_encoding(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path,
message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>",
encoding=b"utf-8",
)
self.assertEqual(b"BAR", porcelain.get_object_by_path(self.repo, "foo").data)
self.assertEqual(b"BAR", porcelain.get_object_by_path(self.repo, b"foo").data)
def test_missing(self):
self.assertRaises(KeyError, porcelain.get_object_by_path, self.repo, "foo")
class WriteTreeTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, "foo")
with open(fullpath, "w") as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual(
b"d2092c8a9f311f0311083bf8d177f2ca0ab5b241",
porcelain.write_tree(self.repo),
)
class ActiveBranchTests(PorcelainTestCase):
def test_simple(self):
self.assertEqual(b"master", porcelain.active_branch(self.repo))
| 35.727628
| 141
| 0.606705
|
d65e31a66b9b91376d1d4aa5232ebd38559c7f06
| 12,836
|
py
|
Python
|
code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_ratios_price_free_cash_flow.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_ratios_price_free_cash_flow.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_ratios_price_free_cash_flow.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.StocksAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.StocksAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.StocksAPIforDigitalPortals.model.stock_notation_screener_search_data_currency_dependent_key_figures_market_capitalization_maximum import StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMaximum
from fds.sdk.StocksAPIforDigitalPortals.model.stock_notation_screener_search_data_currency_dependent_key_figures_market_capitalization_minimum import StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMinimum
globals()['StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMaximum'] = StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMaximum
globals()['StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMinimum'] = StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMinimum
class StockNotationScreenerSearchDataRatiosPriceFreeCashFlow(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'minimum': (StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMinimum,), # noqa: E501
'maximum': (StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMaximum,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'minimum': 'minimum', # noqa: E501
'maximum': 'maximum', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""StockNotationScreenerSearchDataRatiosPriceFreeCashFlow - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
minimum (StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMinimum): [optional] # noqa: E501
maximum (StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMaximum): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StockNotationScreenerSearchDataRatiosPriceFreeCashFlow - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
minimum (StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMinimum): [optional] # noqa: E501
maximum (StockNotationScreenerSearchDataCurrencyDependentKeyFiguresMarketCapitalizationMaximum): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.895522
| 239
| 0.615223
|
fd1ecede9fdfc14f41bb7300ce6d8417207f82e0
| 959
|
py
|
Python
|
app/inventory/migrations/0001_initial.py
|
borismnq/easy-inventory
|
fb6f53d5cdd7b1386b3c2a3d9b8795f3e9b636b3
|
[
"MIT"
] | 2
|
2020-12-10T05:03:15.000Z
|
2020-12-10T05:03:27.000Z
|
app/inventory/migrations/0001_initial.py
|
borismnq/easy-inventory
|
fb6f53d5cdd7b1386b3c2a3d9b8795f3e9b636b3
|
[
"MIT"
] | null | null | null |
app/inventory/migrations/0001_initial.py
|
borismnq/easy-inventory
|
fb6f53d5cdd7b1386b3c2a3d9b8795f3e9b636b3
|
[
"MIT"
] | 1
|
2020-12-10T05:15:22.000Z
|
2020-12-10T05:15:22.000Z
|
# Generated by Django 3.1.4 on 2020-12-05 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Product",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200)),
("description", models.CharField(blank=True, max_length=300)),
("price", models.DecimalField(decimal_places=2, max_digits=6)),
("quantity", models.DecimalField(decimal_places=2, max_digits=6)),
("sku", models.IntegerField(blank=True)),
],
)
]
| 29.060606
| 82
| 0.478624
|
c7fc7157d85ba6e9f63fae0ad409c35d11593ab2
| 558
|
py
|
Python
|
prev_exams_and_ans/2019ccc/q3_cold_compress/cold_compress.py
|
Yueleng/ccc_cco_material
|
e064a76a8bdc07cbfdba3ec8f6849cc1b90317e0
|
[
"MIT"
] | null | null | null |
prev_exams_and_ans/2019ccc/q3_cold_compress/cold_compress.py
|
Yueleng/ccc_cco_material
|
e064a76a8bdc07cbfdba3ec8f6849cc1b90317e0
|
[
"MIT"
] | null | null | null |
prev_exams_and_ans/2019ccc/q3_cold_compress/cold_compress.py
|
Yueleng/ccc_cco_material
|
e064a76a8bdc07cbfdba3ec8f6849cc1b90317e0
|
[
"MIT"
] | null | null | null |
num_of_lines = int(input())
def cold_compress(cum, rs):
N = len(cum)
# exit
if (rs == ""):
output_string = ""
for s in cum:
output_string += str(s) + " "
return output_string
# iteration
if (N >= 2 and cum[N-1] == rs[0]):
cnt = cum[N-2]
cnt += 1
cum[N-2] = cnt
return cold_compress(cum, rs[1:])
else:
cum.append(1)
cum.append(rs[0])
return cold_compress(cum, rs[1:])
for i in range(0, num_of_lines):
print(cold_compress([], input()))
| 21.461538
| 41
| 0.503584
|
28260e33f0ea245d305422017e6b6dbae7ee6831
| 9,024
|
py
|
Python
|
main.py
|
OldedTester/simpy_simu
|
30edffe5180cb46b176c667c96493318072bdb5f
|
[
"MIT"
] | 1
|
2021-04-18T14:36:25.000Z
|
2021-04-18T14:36:25.000Z
|
main.py
|
OldedTester/simpy_simu
|
30edffe5180cb46b176c667c96493318072bdb5f
|
[
"MIT"
] | null | null | null |
main.py
|
OldedTester/simpy_simu
|
30edffe5180cb46b176c667c96493318072bdb5f
|
[
"MIT"
] | null | null | null |
import simpy
import random
import matplotlib.pyplot as plt
# Use simpy to simulate toyota automobile's production
class Toyota_Factory:
def __init__(self, env):
# input process
self.machinery = simpy.Container(env, capacity=machinery_capacity, init=initial_machinery)
self.machinery_control = env.process(self.machinery_stock_control(env))
self.components = simpy.Container(env, capacity=components_capacity, init=initial_components)
self.components_control = env.process(self.components_stock_control(env))
self.assembler = simpy.Container(env, capacity=assembler_capacity, init=0)
self.assembler_control = env.process(self.assembler_stock_control(env))
# activities
#self.parts_sequencing = env.process(self.parts_sequencing_control(env))
#self.setting_up_schedule = env.process(self.setting_up_schedule_control(env))
#self.production_commence = env.process(self.production_commence_control(env))
# output
#self.quality_check = env.process(self.quality_check_control(env))
#self.vehicle_assemble = env.process(self.vehicle_assemble_control(env))
self.dispatch = simpy.Container(env, capacity=dispatch_capacity, init=0)
self.dispatch_control = env.process(self.dispatch_vehicle_control(env))
# Closure & monitor
#self.transport_dealer = env.process(self.transport_dealer_control(env))
self.env_status_monitor = env.process(self.env_status(env))
def machinery_stock_control(self, env):
yield env.timeout(0)
while True:
if self.machinery.level <= machinery_critial_stock:
print('vehicle machinery storage ({0})below the warning storage level DAY={1},HOUR={2}'.format(
self.machinery.level,int(env.now / 8), env.now % 8))
print('please contact toyota of japan!')
print('----------------------------------')
yield env.timeout(16)
print('vehicle machinery arrived. DAY = {0}, HOUR={1}'.format(int(env.now / 8), env.now % 8))
yield self.machinery.put(300)
print('vehicle machinery storage added to {0}'.format(
self.machinery.level))
print('----------------------------------')
yield env.timeout(8*2)
else:
yield env.timeout(1)
def components_stock_control(self, env):
yield env.timeout(0)
while True:
if self.components.level <= components_critical_stock:
print('components storage ({0}) below warning storage.DAY={1},HOUR={2}'.format(
self.components.level, int(env.now / 8), env.now % 8))
print('please contact the components department of toyota')
print('----------------------------------')
yield env.timeout(9)
print('components arrived.DAY={0},HOUR={1}'.format(
int(env.now / 8), env.now % 8))
yield self.components.put(300)
print('storage components are added up to {0}'.format(
self.components.level))
print('----------------------------------')
yield env.timeout(8*2)
else:
yield env.timeout(1)
def assembler_stock_control(self, env):
global vehicle_made
yield env.timeout(0)
while True:
if self.assembler.level >= dispatch_max_capacity:
print('assembler storage is :{0}, waiting for production commence. DAY:{1},HOUR:{2}'.format(self.assembler.level, int(env.now / 8), env.now % 8))
print('xxxxxxxxxxxxxxxxxxx')
yield env.timeout(4)
print('production commence take {0} machinery and {1} components. DAY:{2},HOUR:{3}' .format(self.machinery.level,self.components.level,int(env.now / 8), env.now % 8))
yield self.assembler.get(self.assembler.level)
print('xxxxxxxxxx')
yield env.timeout(8*2)
else:
yield env.timeout(1)
def dispatch_vehicle_control(self,env):
yield env.timeout(0)
while True:
if self.dispatch.level >= 50:
print('dispatch storage is :{0}, waiting for transform to dealer. DAY={1},HOUR={2}'.format(
self.dispatch.level, int(env.now / 8), env.now % 8))
print('----------------------------------')
yield env.process(self.transport_dealer(env))
print('----------------------------------')
yield env.timeout(8*2)
else:
yield env.timeout(1)
def transport_dealer(self,env):
dealer_time = max(1,int(abs(random.gauss(mean_dealer, std_dealer))))
yield env.timeout(dealer_time)
dealer_get = int(self.dispatch.level/2)
yield self.dispatch.get(dealer_get)
print('dealer take {0} vehicles. DAY={1},HOUR={2}'.format(dealer_get,int(env.now / 8), env.now % 8))
def env_status(self, env):
current_x_list = []
current_y_list = []
while True:
plt.title('dispatch by superman')
print('storage of dispatch:{0}. DAY={1}, HOUR={2}'.format(self.dispatch.level,int(env.now / 8),env.now % 8))
current_x_list.append(env.now/8)
current_y_list.append(self.dispatch.level)
plt.clf()
plt.plot(current_x_list,current_y_list,ls='-',lw=2,color='#ff7f0e')
plt.show()
plt.pause(0.001)
yield env.timeout(1)
def assembler(env, toyoto_factory):
while True:
yield toyoto_factory.machinery.get(10)
yield toyoto_factory.components.get(10)
assembler_time = abs(random.gauss(mean_assembler, std_assembler))
yield env.timeout(assembler_time)
# wait for quality check finished
quality_check_time = abs(random.gauss(mean_quality_check,std_quality_check))
yield env.timeout(quality_check_time)
yield toyoto_factory.dispatch.put(10)
def dispatcher(env,toyoto_factory):
while True:
# wait to be transported to the dealer
dealer_time = abs(random.gauss(mean_dealer, std_dealer))
yield env.timeout(dealer_time)
yield toyoto_factory.dispatch.get(1)
# Generators
def assembler_gen(env, toyoto_factory):
for i in range(num_assembler):
env.process(assembler(env, toyoto_factory))
yield env.timeout(0)
# Generators
def dispatcher_gen(env, toyoto_factory):
for i in range(num_dispatcher):
env.process(dispatcher(env, toyoto_factory))
yield env.timeout(0)
if __name__ == '__main__':
vehicle_made = 0
print('simulation begins:')
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
# 8 hours /day
hours = 8
# 30 days /month
days = 30
# total time unit
total_time = hours * days
# init storage capacity
machinery_capacity = 300
components_capacity = 300
assembler_capacity = 300
dispatch_capacity = 300
# initial machinery and components storage capacity
initial_machinery = 100
initial_components = 100
dispatch_max_capacity = 80
num_machinery = 2
mean_machinery = 1
std_machinery = 0.1
num_components = 2
mean_components = 1
std_components = 0.1
num_assembler = 2
mean_assembler = 1
std_assembler = 0.1
num_dispatcher = 2
mean_dispatcher = 1
std_dispatcher = 0.1
mean_dealer = 1
std_dealer = 0.2
mean_dealer_get = 1
std_dealer_get = 0.2
mean_quality_check = 1
std_quality_check =0.2
# machinery storage warning capacity
machinery_critial_stock = (4 / mean_machinery) * num_machinery
# components storage warning capacity
components_critical_stock = (4 / mean_components) * num_components
env = simpy.Environment()
toyota_factory = Toyota_Factory(env)
vehicle_gen = env.process(assembler_gen(env, toyota_factory))
dispatch_gen = env.process(dispatcher_gen(env, toyota_factory))
env.run(until=total_time)
print('current machinery store \'s number:{0};current components store \' number: {1}'.format(
toyota_factory.machinery.level, toyota_factory.components.level))
print('current in assembler\' store number:{0};current in dispatcher\' store number: {1}'.format(
toyota_factory.assembler.level, toyota_factory.dispatch.level))
print('in this circle the total vehicle number is : {0}'.format(vehicle_made + toyota_factory.dispatch.level))
print(f'----------------------------------')
print(f'----------------------------------')
print(f'simulation finished!')
| 41.018182
| 185
| 0.600842
|
50586836662157f0c1d68a7e8acc3a6f823a27a7
| 71,786
|
py
|
Python
|
rss/rss.py
|
kaogurai/aikaterna
|
3419e489b0ebc148e41e7f945e504c4ada2e8ee8
|
[
"Apache-2.0"
] | 98
|
2017-09-12T01:52:17.000Z
|
2022-03-17T16:43:01.000Z
|
rss/rss.py
|
kaogurai/aikaterna
|
3419e489b0ebc148e41e7f945e504c4ada2e8ee8
|
[
"Apache-2.0"
] | 147
|
2016-12-01T04:39:05.000Z
|
2022-02-13T02:20:14.000Z
|
rss/rss.py
|
kaogurai/aikaterna
|
3419e489b0ebc148e41e7f945e504c4ada2e8ee8
|
[
"Apache-2.0"
] | 160
|
2016-12-01T20:19:44.000Z
|
2022-03-30T10:32:41.000Z
|
import asyncio
import aiohttp
from bs4 import BeautifulSoup
import copy
import datetime
import discord
import feedparser
import imghdr
import io
import logging
import re
import time
from typing import Optional
from types import MappingProxyType, SimpleNamespace
from urllib.parse import urlparse
from redbot.core import checks, commands, Config
from redbot.core.utils.chat_formatting import bold, box, escape, humanize_list, pagify
from .color import Color
from .quiet_template import QuietTemplate
from .rss_feed import RssFeed
from .tag_type import INTERNAL_TAGS, VALID_IMAGES, TagType
log = logging.getLogger("red.aikaterna.rss")
IPV4_RE = re.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}")
IPV6_RE = re.compile("([a-f0-9:]+:+)+[a-f0-9]+")
__version__ = "1.6.2"
class RSS(commands.Cog):
"""RSS feeds for your server."""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 2761331001, force_registration=True)
self.config.register_channel(feeds={})
self.config.register_global(use_published=["www.youtube.com"])
self._post_queue = asyncio.PriorityQueue()
self._post_queue_size = None
self._read_feeds_loop = None
self._headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0"}
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete"""
return
def initialize(self):
self._read_feeds_loop = self.bot.loop.create_task(self.read_feeds())
def cog_unload(self):
if self._read_feeds_loop:
self._read_feeds_loop.cancel()
def _add_content_images(self, bs4_soup: BeautifulSoup, rss_object: feedparser.util.FeedParserDict):
"""
$content_images should always be marked as a special tag as the tags will
be dynamically generated based on the content included in the latest post.
"""
content_images = bs4_soup.find_all("img")
if content_images:
for i, image in enumerate(content_images):
tag_name = f"content_image{str(i + 1).zfill(2)}"
try:
rss_object[tag_name] = image["src"]
rss_object["is_special"].append(tag_name)
except KeyError:
pass
return rss_object
async def _add_feed(self, ctx, feed_name: str, channel: discord.TextChannel, url: str):
"""Helper for rss add."""
rss_exists = await self._check_feed_existing(ctx, feed_name, channel)
if not rss_exists:
feedparser_obj = await self._fetch_feedparser_object(url)
if not feedparser_obj:
await ctx.send("Couldn't fetch that feed: there were no feed objects found.")
return
# sort everything by time if a time value is present
if feedparser_obj.entries:
# this feed has posts
sorted_feed_by_post_time = await self._sort_by_post_time(feedparser_obj.entries)
else:
# this feed does not have posts, but it has a header with channel information
sorted_feed_by_post_time = [feedparser_obj.feed]
# add additional tags/images/clean html
feedparser_plus_obj = await self._add_to_feedparser_object(sorted_feed_by_post_time[0], url)
rss_object = await self._convert_feedparser_to_rssfeed(feed_name, feedparser_plus_obj, url)
async with self.config.channel(channel).feeds() as feed_data:
feed_data[feed_name] = rss_object.to_json()
msg = (
f"Feed `{feed_name}` added in channel: {channel.mention}\n"
f"List the template tags with `{ctx.prefix}rss listtags` "
f"and modify the template using `{ctx.prefix}rss template`."
)
await ctx.send(msg)
else:
await ctx.send(f"There is already an existing feed named {bold(feed_name)} in {channel.mention}.")
return
def _add_generic_html_plaintext(self, bs4_soup: BeautifulSoup):
"""
Bs4's .text attribute on a soup strips newlines and spaces
This provides newlines and more readable content.
"""
text = ""
for element in bs4_soup.descendants:
if isinstance(element, str):
text += element
elif element.name == "br" or element.name == "p" or element.name == "li":
text += "\n"
text = re.sub("\\n+", "\n", text)
text = text.replace("*", "\\*")
text = text.replace("SC_OFF", "").replace("SC_ON", "\n")
text = text.replace("[link]", "").replace("[comments]", "")
return escape(text)
async def _append_bs4_tags(self, rss_object: feedparser.util.FeedParserDict, url: str):
"""Append bs4-discovered tags to an rss_feed/feedparser object."""
rss_object["is_special"] = []
soup = None
temp_rss_obect = copy.deepcopy(rss_object)
for tag_name, tag_content in temp_rss_obect.items():
if tag_name in INTERNAL_TAGS:
continue
tag_content_check = await self._get_tag_content_type(tag_content)
if tag_content_check == TagType.HTML:
# this is a tag that is only html content
try:
soup = BeautifulSoup(tag_content, "html.parser")
except TypeError:
pass
# this is a standard html format summary_detail tag
# the tag was determined to be html through the type attrib that
# was attached from the feed publisher but it's really a dict.
try:
soup = BeautifulSoup(tag_content["value"], "html.parser")
except (KeyError, TypeError):
pass
# this is a standard html format content or summary tag
try:
soup = BeautifulSoup(tag_content[0]["value"], "html.parser")
except (KeyError, TypeError):
pass
if soup:
rss_object[f"{tag_name}_plaintext"] = self._add_generic_html_plaintext(soup)
if tag_content_check == TagType.LIST:
tags_list = []
tags_content_counter = 0
for list_item in tag_content:
list_item_check = await self._get_tag_content_type(list_item)
# for common "links" format or when "content" is a list
list_html_content_counter = 0
if list_item_check == TagType.HTML:
list_tags = ["value", "href"]
for tag in list_tags:
try:
url_check = await self._valid_url(list_item[tag], feed_check=False)
if not url_check:
# bs4 will cry if you try to give it a url to parse, so let's only
# parse non-url content
tag_content = BeautifulSoup(list_item[tag], "html.parser")
tag_content = self._add_generic_html_plaintext(tag_content)
else:
tag_content = list_item[tag]
list_html_content_counter += 1
name = f"{tag_name}_plaintext{str(list_html_content_counter).zfill(2)}"
rss_object[name] = tag_content
rss_object["is_special"].append(name)
except (KeyError, TypeError):
pass
if list_item_check == TagType.DICT:
authors_content_counter = 0
# common "authors" tag format
try:
authors_content_counter += 1
name = f"{tag_name}_plaintext{str(authors_content_counter).zfill(2)}"
tag_content = BeautifulSoup(list_item["name"], "html.parser")
rss_object[name] = tag_content.get_text()
rss_object["is_special"].append(name)
except KeyError:
pass
# common "tags" tag format
try:
tag = list_item["term"]
tags_content_counter += 1
name = f"{tag_name}_plaintext{str(tags_content_counter).zfill(2)}"
rss_object[name] = tag
rss_object["is_special"].append(name)
tags_list.append(tag)
except KeyError:
pass
if len(tags_list) > 0:
rss_object["tags_list"] = tags_list
rss_object["tags_plaintext_list"] = humanize_list(tags_list)
rss_object["is_special"].append("tags_list")
rss_object["is_special"].append("tags_plaintext_list")
# if image dict tag exists, check for an image
try:
rss_object["image_plaintext"] = rss_object["image"]["href"]
rss_object["is_special"].append("image_plaintext")
except KeyError:
pass
# if media_thumbnail or media_content exists, return the first friendly url
try:
rss_object["media_content_plaintext"] = rss_object["media_content"][0]["url"]
rss_object["is_special"].append("media_content_plaintext")
except KeyError:
pass
try:
rss_object["media_thumbnail_plaintext"] = rss_object["media_thumbnail"][0]["url"]
rss_object["is_special"].append("media_thumbnail_plaintext")
except KeyError:
pass
# change published_parsed and updated_parsed into a datetime object for embed footers
for time_tag in ["updated_parsed", "published_parsed"]:
try:
if isinstance(rss_object[time_tag], time.struct_time):
rss_object[f"{time_tag}_datetime"] = datetime.datetime(*rss_object[time_tag][:6])
except KeyError:
pass
if soup:
rss_object = self._add_content_images(soup, rss_object)
# add special tag/special site formatter here if needed in the future
return rss_object
async def _check_channel_permissions(self, ctx, channel: discord.TextChannel, addl_send_messages_check=True):
"""Helper for rss functions."""
if not channel.permissions_for(ctx.me).read_messages:
await ctx.send("I don't have permissions to read that channel.")
return False
elif not channel.permissions_for(ctx.author).read_messages:
await ctx.send("You don't have permissions to read that channel.")
return False
elif addl_send_messages_check:
# check for send messages perm if needed, like on an rss add
# not needed on something like rss delete
if not channel.permissions_for(ctx.me).send_messages:
await ctx.send("I don't have permissions to send messages in that channel.")
return False
else:
return True
else:
return True
async def _check_feed_existing(self, ctx, feed_name: str, channel: discord.TextChannel):
"""Helper for rss functions."""
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
return False
return True
async def _delete_feed(self, ctx, feed_name: str, channel: discord.TextChannel):
"""Helper for rss delete."""
rss_exists = await self._check_feed_existing(ctx, feed_name, channel)
if rss_exists:
async with self.config.channel(channel).feeds() as rss_data:
rss_data.pop(feed_name, None)
return True
return False
async def _edit_template(self, ctx, feed_name: str, channel: discord.TextChannel, template: str):
"""Helper for rss template."""
rss_exists = await self._check_feed_existing(ctx, feed_name, channel)
if rss_exists:
async with self.config.channel(channel).feeds.all() as feed_data:
if feed_name not in feed_data:
feed_data[feed_name] = {}
feed_data[feed_name]["template"] = template
return True
return False
@staticmethod
def _find_website(website_url: str):
"""Helper for rss parse."""
result = urlparse(website_url)
if result.scheme:
# https://www.website.com/...
if result.netloc:
website = result.netloc
else:
return None
else:
# www.website.com/...
if result.path:
website = result.path.split("/")[0]
else:
return None
if len(website.split(".")) < 3:
return None
return website
async def _get_channel_object(self, channel_id: int):
"""Helper for rss feed loop."""
channel = self.bot.get_channel(channel_id)
if not channel:
try:
channel = await self.bot.fetch_channel(channel_id)
except (discord.errors.Forbidden, discord.errors.NotFound):
return None
if channel and channel.permissions_for(channel.guild.me).send_messages:
return channel
return None
async def _get_feed_names(self, channel: discord.TextChannel):
"""Helper for rss list."""
feed_list = []
space = "\N{SPACE}"
all_feeds = await self.config.channel(channel).feeds.all()
if not all_feeds:
return ["None."]
longest_name_len = len(max(list(all_feeds.keys()), key=len))
for name, data in all_feeds.items():
extra_spacing = longest_name_len - len(name)
feed_list.append(f"{name}{space * extra_spacing} {data['url']}")
return feed_list
async def _get_tag_content_type(self, tag_content):
"""
Tag content type can be:
str, list, dict (FeedParserDict), bool, datetime.datetime object or time.struct_time
"""
try:
if tag_content["type"] == "text/html":
return TagType(2)
except (KeyError, TypeError):
html_tags = ["<a>", "<a href", "<img", "<p>", "<b>", "</li>", "</ul>"]
if any(word in str(tag_content) for word in html_tags):
return TagType(2)
if isinstance(tag_content, dict):
return TagType(3)
elif isinstance(tag_content, list):
return TagType(4)
else:
return TagType(1)
async def _get_url_content(self, url):
"""Helper for rss add/_valid_url."""
try:
timeout = aiohttp.ClientTimeout(total=20)
async with aiohttp.ClientSession(headers=self._headers, timeout=timeout) as session:
async with session.get(url) as resp:
html = await resp.read()
return html, None
except aiohttp.client_exceptions.ClientConnectorError:
friendly_msg = "There was an OSError or the connection failed."
msg = f"aiohttp failure accessing feed at url:\n\t{url}"
log.error(msg, exc_info=True)
return None, friendly_msg
except aiohttp.client_exceptions.ClientPayloadError as e:
friendly_msg = "The website closed the connection prematurely or the response was malformed.\n"
friendly_msg += f"The error returned was: `{str(e)}`\n"
friendly_msg += "For more technical information, check your bot's console or logs."
msg = f"content error while reading feed at url:\n\t{url}"
log.error(msg, exc_info=True)
return None, friendly_msg
except asyncio.exceptions.TimeoutError:
friendly_msg = "The bot timed out while trying to access that content."
msg = f"asyncio timeout while accessing feed at url:\n\t{url}"
log.error(msg, exc_info=True)
return None, friendly_msg
except Exception:
friendly_msg = "There was an unexpected error. Check your console for more information."
msg = f"General failure accessing feed at url:\n\t{url}"
log.error(msg, exc_info=True)
return None, friendly_msg
async def _fetch_feedparser_object(self, url: str):
"""Get a full feedparser object from a url: channel header + items."""
html, error_msg = await self._get_url_content(url)
if not html:
return SimpleNamespace(entries=None, error=error_msg, url=url)
feedparser_obj = feedparser.parse(html)
if feedparser_obj.bozo:
error_msg = f"Bozo feed: feedparser is unable to parse the response from {url}.\n"
error_msg += f"Feedparser error message: `{feedparser_obj.bozo_exception}`"
return SimpleNamespace(entries=None, error=error_msg, url=url)
return feedparser_obj
async def _add_to_feedparser_object(self, feedparser_obj: feedparser.util.FeedParserDict, url: str):
"""
Input: A feedparser object
Process: Append custom tags to the object from the custom formatters
Output: A feedparser object with additional attributes
"""
feedparser_plus_obj = await self._append_bs4_tags(feedparser_obj, url)
feedparser_plus_obj["template_tags"] = sorted(feedparser_plus_obj.keys())
return feedparser_plus_obj
async def _convert_feedparser_to_rssfeed(
self, feed_name: str, feedparser_plus_obj: feedparser.util.FeedParserDict, url: str
):
"""
Converts any feedparser/feedparser_plus object to an RssFeed object.
Used in rss add when saving a new feed.
"""
entry_time = await self._time_tag_validation(feedparser_plus_obj)
rss_object = RssFeed(
name=feed_name.lower(),
last_title=feedparser_plus_obj["title"],
last_link=feedparser_plus_obj["link"],
last_time=entry_time,
template="$title\n$link",
url=url,
template_tags=feedparser_plus_obj["template_tags"],
is_special=feedparser_plus_obj["is_special"],
embed=True,
)
return rss_object
async def _sort_by_post_time(self, feedparser_obj: feedparser.util.FeedParserDict):
base_url = urlparse(feedparser_obj[0].get("link")).netloc
use_published_parsed_override = await self.config.use_published()
if base_url in use_published_parsed_override:
time_tag = ["published_parsed"]
else:
time_tag = ["updated_parsed", "published_parsed"]
for tag in time_tag:
try:
baseline_time = time.struct_time((2021, 1, 1, 12, 0, 0, 4, 1, -1))
sorted_feed_by_post_time = sorted(feedparser_obj, key=lambda x: x.get(tag, baseline_time), reverse=True)
break
except TypeError:
sorted_feed_by_post_time = feedparser_obj
return sorted_feed_by_post_time
async def _time_tag_validation(self, entry: feedparser.util.FeedParserDict):
"""Gets a unix timestamp if it's available from a single feedparser post entry."""
feed_link = entry.get("link", None)
if feed_link:
base_url = urlparse(feed_link).netloc
else:
return None
# check for a feed time override, if a feed is being problematic regarding updated_parsed
# usage (i.e. a feed entry keeps reposting with no perceived change in content)
use_published_parsed_override = await self.config.use_published()
if base_url in use_published_parsed_override:
entry_time = entry.get("published_parsed", None)
else:
entry_time = entry.get("updated_parsed", None)
if not entry_time:
entry_time = entry.get("published_parsed", None)
if isinstance(entry_time, time.struct_time):
entry_time = time.mktime(entry_time)
if entry_time:
return int(entry_time)
return None
@staticmethod
async def _title_case(phrase: str):
exceptions = ["a", "and", "in", "of", "or", "on", "the"]
lowercase_words = re.split(" ", phrase.lower())
final_words = [lowercase_words[0].capitalize()]
final_words += [word if word in exceptions else word.capitalize() for word in lowercase_words[1:]]
return " ".join(final_words)
async def _update_last_scraped(
self,
channel: discord.TextChannel,
feed_name: str,
current_feed_title: str,
current_feed_link: str,
current_feed_time: int,
):
"""Updates last title and last link seen for comparison on next feed pull."""
async with self.config.channel(channel).feeds() as feed_data:
try:
feed_data[feed_name]["last_title"] = current_feed_title
feed_data[feed_name]["last_link"] = current_feed_link
feed_data[feed_name]["last_time"] = current_feed_time
except KeyError:
# the feed was deleted during a _get_current_feed execution
pass
async def _valid_url(self, url: str, feed_check=True):
"""Helper for rss add."""
try:
result = urlparse(url)
except Exception as e:
log.exception(e, exc_info=e)
return False
if all([result.scheme, result.netloc, result.path]):
if feed_check:
text, error_msg = await self._get_url_content(url)
if not text:
raise NoFeedContent(error_msg)
return False
rss = feedparser.parse(text)
if rss.bozo:
error_message = rss.feed.get("summary", str(rss))[:1500]
error_message = re.sub(IPV4_RE, "[REDACTED IP ADDRESS]", error_message)
error_message = re.sub(IPV6_RE, "[REDACTED IP ADDRESS]", error_message)
msg = f"Bozo feed: feedparser is unable to parse the response from {url}.\n\n"
msg += "Received content preview:\n"
msg += box(error_message)
raise NoFeedContent(msg)
return False
else:
return True
else:
return True
else:
return False
async def _validate_image(self, url: str):
"""Helper for _get_current_feed_embed."""
try:
timeout = aiohttp.ClientTimeout(total=20)
async with aiohttp.ClientSession(headers=self._headers, timeout=timeout) as session:
async with session.get(url) as resp:
image = await resp.read()
img = io.BytesIO(image)
image_test = imghdr.what(img)
return image_test
except aiohttp.client_exceptions.InvalidURL:
return None
except asyncio.exceptions.TimeoutError:
log.error(f"asyncio timeout while accessing image at url:\n\t{url}", exc_info=True)
return None
except Exception:
log.error(f"Failure accessing image in embed feed at url:\n\t{url}", exc_info=True)
return None
@commands.guild_only()
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
async def rss(self, ctx):
"""RSS feed stuff."""
pass
@rss.command(name="add")
async def _rss_add(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, *, url: str):
"""
Add an RSS feed to a channel.
Defaults to the current channel if no channel is specified.
"""
if feed_name.startswith("<#"):
# someone typed a channel name but not a feed name
msg = "Try again with a feed name included in the right spot so that you can refer to the feed later.\n"
msg += f"Example: `{ctx.prefix}rss add feed_name channel_name feed_url`"
await ctx.send(msg)
return
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel)
if not channel_permission_check:
return
async with ctx.typing():
try:
valid_url = await self._valid_url(url)
except NoFeedContent as e:
await ctx.send(str(e))
return
if valid_url:
await self._add_feed(ctx, feed_name.lower(), channel, url)
else:
await ctx.send("Invalid or unavailable URL.")
@rss.group(name="embed")
async def _rss_embed(self, ctx):
"""Embed feed settings."""
pass
@_rss_embed.command(name="color", aliases=["colour"])
async def _rss_embed_color(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, *, color: str = None
):
"""
Set an embed color for a feed.
Use this command with no color to reset to the default.
`color` must be a hex code like #990000, a [Discord color name](https://discordpy.readthedocs.io/en/latest/api.html#colour),
or a [CSS3 color name](https://www.w3.org/TR/2018/REC-css-color-3-20180619/#svg-color).
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
embed_toggle = rss_feed["embed"]
embed_state_message = ""
if not embed_toggle:
embed_state_message += (
f"{bold(feed_name)} is not currently set to be in an embed. "
f"Toggle it on with `{ctx.prefix}rss embed toggle`.\n"
)
if not color:
async with self.config.channel(channel).feeds() as feed_data:
feed_data[feed_name]["embed_color"] = None
await ctx.send(
f"{embed_state_message}The color for {bold(feed_name)} has been reset. "
"Use this command with a color argument to set a color for this feed."
)
return
color = color.replace(" ", "_")
hex_code = await Color()._color_converter(color)
if not hex_code:
await ctx.send(
"Not a valid color code. Use a hex code like #990000, a "
"Discord color name or a CSS3 color name.\n"
"<https://discordpy.readthedocs.io/en/latest/api.html#colour>\n"
"<https://www.w3.org/TR/2018/REC-css-color-3-20180619/#svg-color>"
)
return
user_facing_hex = hex_code.replace("0x", "#")
color_name = await Color()._hex_to_css3_name(hex_code)
# 0xFFFFFF actually doesn't show up as white in an embed
# so let's make it close enough to count
if hex_code == "0xFFFFFF":
hex_code = "0xFFFFFE"
async with self.config.channel(channel).feeds() as feed_data:
# data is always a 0xFFFFFF style value
feed_data[feed_name]["embed_color"] = hex_code
await ctx.send(f"Embed color for {bold(feed_name)} set to {user_facing_hex} ({color_name}).")
@_rss_embed.command(name="image")
async def _rss_embed_image(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, image_tag_name: str = None
):
"""
Set a tag to be a large embed image.
This image will be applied to the last embed in the paginated list.
Use this command with no image_tag_name to clear the embed image.
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
embed_toggle = rss_feed["embed"]
embed_state_message = ""
if not embed_toggle:
embed_state_message += (
f"{bold(feed_name)} is not currently set to be in an embed. "
f"Toggle it on with `{ctx.prefix}rss embed toggle`.\n"
)
if image_tag_name is not None:
if image_tag_name.startswith("$"):
image_tag_name = image_tag_name.strip("$")
else:
msg = "You must use a feed tag for this setting. "
msg += f"Feed tags start with `$` and can be found by using `{ctx.prefix}rss listtags` "
msg += "with the saved feed name.\nImages that are scraped from feed content are usually "
msg += "stored under the tags styled similar to `$content_image01`: subsequent scraped images "
msg += "will be in tags named `$content_image02`, `$content_image03`, etc. Not every feed entry "
msg += "will have the same amount of scraped image tags. Images can also be found under tags named "
msg += "`$media_content_plaintext`, if present.\nExperiment with tags by setting them as your "
msg += (
f"template with `{ctx.prefix}rss template` and using `{ctx.prefix}rss force` to view the content."
)
await ctx.send(msg)
return
async with self.config.channel(channel).feeds() as feed_data:
feed_data[feed_name]["embed_image"] = image_tag_name
if image_tag_name:
await ctx.send(f"{embed_state_message}Embed image set to the ${image_tag_name} tag.")
else:
await ctx.send(
"Embed image has been cleared. Use this command with a tag name if you intended to set an image tag."
)
@_rss_embed.command(name="thumbnail")
async def _rss_embed_thumbnail(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, thumbnail_tag_name: str = None
):
"""
Set a tag to be a thumbnail image.
This thumbnail will be applied to the first embed in the paginated list.
Use this command with no thumbnail_tag_name to clear the embed thumbnail.
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
embed_toggle = rss_feed["embed"]
embed_state_message = ""
if not embed_toggle:
embed_state_message += (
f"{bold(feed_name)} is not currently set to be in an embed. "
f"Toggle it on with `{ctx.prefix}rss embed toggle`.\n"
)
if thumbnail_tag_name is not None:
if thumbnail_tag_name.startswith("$"):
thumbnail_tag_name = thumbnail_tag_name.strip("$")
else:
msg = "You must use a feed tag for this setting. "
msg += f"Feed tags start with `$` and can be found by using `{ctx.prefix}rss listtags` "
msg += "with the saved feed name.\nImages that are scraped from feed content are usually "
msg += "stored under the tags styled similar to `$content_image01`: subsequent scraped images "
msg += "will be in tags named `$content_image02`, `$content_image03`, etc. Not every feed entry "
msg += "will have the same amount of scraped image tags. Images can also be found under tags named "
msg += "`$media_content_plaintext`, if present.\nExperiment with tags by setting them as your "
msg += (
f"template with `{ctx.prefix}rss template` and using `{ctx.prefix}rss force` to view the content."
)
await ctx.send(msg)
return
async with self.config.channel(channel).feeds() as feed_data:
feed_data[feed_name]["embed_thumbnail"] = thumbnail_tag_name
if thumbnail_tag_name:
await ctx.send(f"{embed_state_message}Embed thumbnail set to the ${thumbnail_tag_name} tag.")
else:
await ctx.send(
"Embed thumbnail has been cleared. "
"Use this command with a tag name if you intended to set a thumbnail tag."
)
@_rss_embed.command(name="toggle")
async def _rss_embed_toggle(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None):
"""
Toggle whether a feed is sent in an embed or not.
If the bot doesn't have permissions to post embeds,
the feed will always be plain text, even if the embed
toggle is set.
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
embed_toggle = rss_feed["embed"]
toggle_text = "disabled" if embed_toggle else "enabled"
async with self.config.channel(channel).feeds() as feed_data:
feed_data[feed_name]["embed"] = not embed_toggle
await ctx.send(f"Embeds for {bold(feed_name)} are {toggle_text}.")
@rss.command(name="find")
async def _rss_find(self, ctx, website_url: str):
"""
Attempts to find RSS feeds from a URL/website.
The site must have identified their feed in the html of the page based on RSS feed type standards.
"""
async with ctx.typing():
timeout = aiohttp.ClientTimeout(total=20)
async with aiohttp.ClientSession(headers=self._headers, timeout=timeout) as session:
try:
async with session.get(website_url) as response:
soup = BeautifulSoup(await response.text(errors="replace"), "html.parser")
except (aiohttp.client_exceptions.ClientConnectorError, aiohttp.client_exceptions.ClientPayloadError):
await ctx.send("I can't reach that website.")
return
except aiohttp.client_exceptions.InvalidURL:
await ctx.send(
"That seems to be an invalid URL. Use a full website URL like `https://www.site.com/`."
)
return
except asyncio.exceptions.TimeoutError:
await ctx.send("The site didn't respond in time or there was no response.")
return
if "403 Forbidden" in soup.get_text():
await ctx.send("I received a '403 Forbidden' message while trying to reach that site.")
return
if not soup:
await ctx.send("I didn't find anything at all on that link.")
return
msg = ""
url_parse = urlparse(website_url)
base_url = url_parse.netloc
url_scheme = url_parse.scheme
feed_url_types = ["application/rss+xml", "application/atom+xml", "text/xml", "application/rdf+xml"]
for feed_type in feed_url_types:
possible_feeds = soup.find_all("link", rel="alternate", type=feed_type, href=True)
for feed in possible_feeds:
feed_url = feed.get("href", None)
ls_feed_url = feed_url.lstrip("/")
if not feed_url:
continue
if feed_url.startswith("//"):
final_url = f"{url_scheme}:{feed_url}"
elif (not ls_feed_url.startswith(url_scheme)) and (not ls_feed_url.startswith(base_url)):
final_url = f"{url_scheme}://{base_url}/{ls_feed_url}"
elif ls_feed_url.startswith(base_url):
final_url = f"{url_scheme}://{base_url}"
else:
final_url = feed_url
msg += f"[Feed Title]: {feed.get('title', None)}\n"
msg += f"[Feed URL]: {final_url}\n\n"
if msg:
await ctx.send(box(msg, lang="ini"))
else:
await ctx.send("No RSS feeds found in the link provided.")
@rss.command(name="force")
async def _rss_force(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None):
"""Forces a feed alert."""
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel)
if not channel_permission_check:
return
feeds = await self.config.all_channels()
try:
feeds[channel.id]
except KeyError:
await ctx.send("There are no feeds in this channel.")
return
if feed_name not in feeds[channel.id]["feeds"]:
await ctx.send("That feed name doesn't exist in this channel.")
return
rss_feed = feeds[channel.id]["feeds"][feed_name]
await self.get_current_feed(channel, feed_name, rss_feed, force=True)
@rss.command(name="limit")
async def _rss_limit(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, character_limit: int = None
):
"""
Set a character limit for feed posts. Use 0 for unlimited.
RSS posts are naturally split at around 2000 characters to fit within the Discord character limit per message.
If you only want the first embed or first message in a post feed to show, use 2000 or less characters for this setting.
Note that this setting applies the character limit to the entire post, for all template values on the feed together.
For example, if the template is `$title\\n$content\\n$link`, and title + content + link is longer than the limit, the link will not show.
"""
extra_msg = ""
if character_limit is None:
await ctx.send_help()
return
if character_limit < 0:
await ctx.send("Character limit cannot be less than zero.")
return
if character_limit > 20000:
character_limit = 0
if 0 < character_limit < 20:
extra_msg = "Character limit has a 20 character minimum.\n"
character_limit = 20
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
async with self.config.channel(channel).feeds() as feed_data:
feed_data[feed_name]["limit"] = character_limit
characters = f"approximately {character_limit}" if character_limit > 0 else "an unlimited amount of"
await ctx.send(f"{extra_msg}Character limit for {bold(feed_name)} is now {characters} characters.")
@rss.command(name="list")
async def _rss_list(self, ctx, channel: discord.TextChannel = None):
"""List saved feeds for this channel or a specific channel."""
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel)
if not channel_permission_check:
return
feeds = await self._get_feed_names(channel)
msg = f"[ Available Feeds for #{channel.name} ]\n\n\t"
if feeds:
msg += "\n\t".join(sorted(feeds))
else:
msg += "\n\tNone."
for page in pagify(msg, delims=["\n"], page_length=1800):
await ctx.send(box(page, lang="ini"))
@rss.command(name="listtags")
async def _rss_list_tags(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None):
"""List the tags available from a specific feed."""
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel)
if not channel_permission_check:
return
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("No feed with that name in this channel.")
return
async with ctx.typing():
await self._rss_list_tags_helper(ctx, rss_feed, feed_name)
async def _rss_list_tags_helper(self, ctx, rss_feed: dict, feed_name: str):
"""Helper function for rss listtags."""
msg = f"[ Available Tags for {feed_name} ]\n\n\t"
feedparser_obj = await self._fetch_feedparser_object(rss_feed["url"])
if not feedparser_obj:
await ctx.send("Couldn't fetch that feed.")
return
if feedparser_obj.entries:
# this feed has posts
feedparser_plus_obj = await self._add_to_feedparser_object(feedparser_obj.entries[0], rss_feed["url"])
else:
# this feed does not have posts, but it has a header with channel information
feedparser_plus_obj = await self._add_to_feedparser_object(feedparser_obj.feed, rss_feed["url"])
for tag_name, tag_content in sorted(feedparser_plus_obj.items()):
if tag_name in INTERNAL_TAGS:
# these tags attached to the rss feed object are for internal handling options
continue
tag_content_check = await self._get_tag_content_type(tag_content)
if tag_content_check == TagType.HTML:
msg += f"[X] ${tag_name}\n\t"
elif tag_content_check == TagType.DICT:
msg += f"[\\] ${tag_name} \n\t"
elif tag_content_check == TagType.LIST:
msg += f"[-] ${tag_name} \n\t"
elif tag_name in feedparser_plus_obj["is_special"]:
msg += f"[*] ${tag_name} \n\t"
else:
msg += f"[ ] ${tag_name} \n\t"
msg += "\n\n\t[X] = html | [\\] = dictionary | [-] = list | [ ] = plain text"
msg += "\n\t[*] = specially-generated tag, may not be present in every post"
await ctx.send(box(msg, lang="ini"))
@checks.is_owner()
@rss.group(name="parse")
async def _rss_parse(self, ctx):
"""
Change feed parsing for a specfic domain.
This is a global change per website.
The default is to use the feed's updated_parsed tag, and adding a website to this list will change the check to published_parsed.
Some feeds may spam feed entries as they are updating the updated_parsed slot on their feed, but not updating feed content.
In this case we can force specific sites to use the published_parsed slot instead by adding the website to this override list.
"""
pass
@_rss_parse.command(name="add")
async def _rss_parse_add(self, ctx, website_url: str):
"""
Add a website to the list for a time parsing override.
Use a website link formatted like `www.website.com` or `https://www.website.com`.
For more information, use `[p]help rss parse`.
"""
website = self._find_website(website_url)
if not website:
msg = f"I can't seem to find a website in `{website_url}`. "
msg += "Use something like `https://www.website.com/` or `www.website.com`."
await ctx.send(msg)
return
override_list = await self.config.use_published()
if website in override_list:
await ctx.send(f"`{website}` is already in the parsing override list.")
else:
override_list.append(website)
await self.config.use_published.set(override_list)
await ctx.send(f"`{website}` was added to the parsing override list.")
@_rss_parse.command(name="list")
async def _rss_parse_list(self, ctx):
"""
Show the list for time parsing overrides.
For more information, use `[p]help rss parse`.
"""
override_list = await self.config.use_published()
if not override_list:
msg = "No site overrides saved."
else:
msg = "Active for:\n" + "\n".join(override_list)
await ctx.send(box(msg))
@_rss_parse.command(name="remove", aliases=["delete", "del"])
async def _rss_parse_remove(self, ctx, website_url: str = None):
"""
Remove a website from the list for a time parsing override.
Use a website link formatted like `www.website.com` or `https://www.website.com`.
For more information, use `[p]help rss parse`.
"""
website = self._find_website(website_url)
override_list = await self.config.use_published()
if website in override_list:
override_list.remove(website)
await self.config.use_published.set(override_list)
await ctx.send(f"`{website}` was removed from the parsing override list.")
else:
await ctx.send(f"`{website}` isn't in the parsing override list.")
@rss.command(name="remove", aliases=["delete", "del"])
async def _rss_remove(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None):
"""
Removes a feed from a channel.
Defaults to the current channel if no channel is specified.
"""
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel, addl_send_messages_check=False)
if not channel_permission_check:
return
success = await self._delete_feed(ctx, feed_name, channel)
if success:
await ctx.send("Feed deleted.")
else:
await ctx.send("Feed not found!")
@rss.command(name="showtemplate")
async def _rss_show_template(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None):
"""Show the template in use for a specific feed."""
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel)
if not channel_permission_check:
return
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("No feed with that name in this channel.")
return
space = "\N{SPACE}"
embed_toggle = f"[ ] Embed:{space*16}Off" if not rss_feed["embed"] else f"[X] Embed:{space*16}On"
embed_image = (
f"[ ] Embed image tag:{space*6}None"
if not rss_feed["embed_image"]
else f"[X] Embed image tag:{space*6}${rss_feed['embed_image']}"
)
embed_thumbnail = (
f"[ ] Embed thumbnail tag:{space*2}None"
if not rss_feed["embed_thumbnail"]
else f"[X] Embed thumbnail tag:{space*2}${rss_feed['embed_thumbnail']}"
)
hex_color = rss_feed.get("embed_color", None)
if hex_color:
color_name = await Color()._hex_to_css3_name(hex_color)
hex_color = hex_color.lstrip("0x")
embed_color = (
f"[ ] Embed hex color:{space*6}None"
if not hex_color
else f"[X] Embed hex color:{space*6}{hex_color} ({color_name})"
)
allowed_tags = rss_feed.get("allowed_tags", [])
if not allowed_tags:
tag_msg = "[ ] No restrictions\n\tAll tags are allowed."
else:
tag_msg = "[X] Feed is restricted to posts that include:"
for tag in allowed_tags:
tag_msg += f"\n\t{await self._title_case(tag)}"
character_limit = rss_feed.get("limit", 0)
if character_limit == 0:
length_msg = "[ ] Feed length is unlimited."
else:
length_msg = f"[X] Feed length is capped at {character_limit} characters."
embed_settings = f"{embed_toggle}\n{embed_color}\n{embed_image}\n{embed_thumbnail}"
rss_template = rss_feed["template"].replace("\n", "\\n").replace("\t", "\\t")
msg = f"Template for {bold(feed_name)}:\n\n`{rss_template}`\n\n{box(embed_settings, lang='ini')}\n{box(tag_msg, lang='ini')}\n{box(length_msg, lang='ini')}"
for page in pagify(msg, delims=["\n"], page_length=1800):
await ctx.send(page)
@rss.group(name="tag")
async def _rss_tag(self, ctx):
"""RSS post tag qualification."""
pass
@_rss_tag.command(name="allow")
async def _rss_tag_allow(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, *, tag: str = None
):
"""
Set an allowed tag for a feed to be posted. The tag must match exactly (without regard to title casing).
No regex or placeholder qualification.
Tags can be found in `[p]rss listtags` under `$tags` or `$tags_list` (if tags are present in the feed - not all feeds have tags).
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
async with self.config.channel(channel).feeds() as feed_data:
allowed_tags = feed_data[feed_name].get("allowed_tags", [])
if tag.lower() in [x.lower() for x in allowed_tags]:
return await ctx.send(
f"{bold(await self._title_case(tag))} is already in the allowed list for {bold(feed_name)}."
)
allowed_tags.append(tag.lower())
feed_data[feed_name]["allowed_tags"] = allowed_tags
await ctx.send(
f"{bold(await self._title_case(tag))} was added to the list of allowed tags for {bold(feed_name)}. "
"If a feed post's `$tags` does not include this value, the feed will not post."
)
@_rss_tag.command(name="allowlist")
async def _rss_tag_allowlist(self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None):
"""
List allowed tags for feed post qualification.
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
msg = f"[ Allowed Tags for {feed_name} ]\n\n\t"
allowed_tags = rss_feed.get("allowed_tags", [])
if not allowed_tags:
msg += "All tags are allowed."
else:
for tag in allowed_tags:
msg += f"{await self._title_case(tag)}\n"
await ctx.send(box(msg, lang="ini"))
@_rss_tag.command(name="remove", aliases=["delete"])
async def _rss_tag_remove(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, *, tag: str = None
):
"""
Remove a tag from the allow list. The tag must match exactly (without regard to title casing).
No regex or placeholder qualification.
"""
channel = channel or ctx.channel
rss_feed = await self.config.channel(channel).feeds.get_raw(feed_name, default=None)
if not rss_feed:
await ctx.send("That feed name doesn't exist in this channel.")
return
async with self.config.channel(channel).feeds() as feed_data:
allowed_tags = feed_data[feed_name].get("allowed_tags", [])
try:
allowed_tags.remove(tag.lower())
feed_data[feed_name]["allowed_tags"] = allowed_tags
await ctx.send(
f"{bold(await self._title_case(tag))} was removed from the list of allowed tags for {bold(feed_name)}."
)
except ValueError:
await ctx.send(
f"{bold(await self._title_case(tag))} was not found in the allow list for {bold(feed_name)}."
)
@rss.command(name="template")
async def _rss_template(
self, ctx, feed_name: str, channel: Optional[discord.TextChannel] = None, *, template: str = None
):
"""
Set a template for the feed alert.
Each variable must start with $, valid variables can be found with `[p]rss listtags`.
"""
channel = channel or ctx.channel
channel_permission_check = await self._check_channel_permissions(ctx, channel)
if not channel_permission_check:
return
if not template:
await ctx.send_help()
return
template = template.replace("\\t", "\t")
template = template.replace("\\n", "\n")
success = await self._edit_template(ctx, feed_name, channel, template)
if success:
await ctx.send("Template added successfully.")
else:
await ctx.send("Feed not found!")
@rss.command(name="version", hidden=True)
async def _rss_version(self, ctx):
"""Show the RSS version."""
await ctx.send(f"RSS version {__version__}")
async def get_current_feed(self, channel: discord.TextChannel, name: str, rss_feed: dict, *, force: bool = False):
"""Takes an RSS feed and builds an object with all extra tags"""
log.debug(f"getting feed {name} on cid {channel.id}")
url = rss_feed["url"]
last_title = rss_feed["last_title"]
# last_link is a get for feeds saved before RSS 1.1.5 which won't have this attrib till it's checked once
last_link = rss_feed.get("last_link", None)
# last_time is a get for feeds saved before RSS 1.1.7 which won't have this attrib till it's checked once
last_time = rss_feed.get("last_time", None)
template = rss_feed["template"]
message = None
feedparser_obj = await self._fetch_feedparser_object(url)
if not feedparser_obj:
return
try:
log.debug(f"{feedparser_obj.error} Channel: {channel.id}")
return
except AttributeError:
pass
# sorting the entire feedparser object by updated_parsed time if it exists, if not then published_parsed
# certain feeds can be rearranged by a user, causing all posts to be out of sequential post order
# or some feeds are out of time order by default
if feedparser_obj.entries:
# this feed has posts
sorted_feed_by_post_time = await self._sort_by_post_time(feedparser_obj.entries)
else:
# this feed does not have posts, but it has a header with channel information
sorted_feed_by_post_time = [feedparser_obj.feed]
if not force:
entry_time = await self._time_tag_validation(sorted_feed_by_post_time[0])
if (last_time and entry_time) is not None:
if last_time > entry_time:
log.debug("Not posting because new entry is older than last saved entry.")
return
try:
title = sorted_feed_by_post_time[0].title
except AttributeError:
title = ""
await self._update_last_scraped(channel, name, title, sorted_feed_by_post_time[0].link, entry_time)
feedparser_plus_objects = []
for entry in sorted_feed_by_post_time:
# sometimes there's no title attribute and feedparser doesn't really play nice with that
try:
entry_title = entry.title
except AttributeError:
entry_title = ""
# find the updated_parsed (checked first) or an published_parsed tag if they are present
entry_time = await self._time_tag_validation(entry)
# we only need one feed entry if this is from rss force
if force:
feedparser_plus_obj = await self._add_to_feedparser_object(entry, url)
feedparser_plus_objects.append(feedparser_plus_obj)
break
# if this feed has a published_parsed or an updated_parsed tag, it will use
# that time value present in entry_time to verify that the post is new.
elif (entry_time and last_time) is not None:
# now that we are sorting by/saving updated_parsed instead of published_parsed (rss 1.4.0+)
# we can post an update for a post that already exists and has already been posted.
# this will only work for rss sites that are single-use like cloudflare status, discord status, etc
# where an update on the last post should be posted
# this can be overridden by a bot owner in the rss parse command, per problematic website
if (last_title == entry_title) and (last_link == entry.link) and (entry_time > last_time):
log.debug(f"New update found for an existing post in {name} on cid {channel.id}")
feedparser_plus_obj = await self._add_to_feedparser_object(entry, url)
feedparser_plus_objects.append(feedparser_plus_obj)
# regular feed qualification after this
if (last_title != entry_title) and (last_link != entry.link) and (last_time < entry_time):
log.debug(f"New entry found via time validation for feed {name} on cid {channel.id}")
feedparser_plus_obj = await self._add_to_feedparser_object(entry, url)
feedparser_plus_objects.append(feedparser_plus_obj)
if (last_title == "" and entry_title == "") and (last_link != entry.link) and (last_time < entry_time):
log.debug(f"New entry found via time validation for feed {name} on cid {channel.id} - no title")
feedparser_plus_obj = await self._add_to_feedparser_object(entry, url)
feedparser_plus_objects.append(feedparser_plus_obj)
# this is a post that has no time information attached to it and we can only
# verify that the title and link did not match the previously posted entry
elif (entry_time or last_time) is None:
if last_title == entry_title and last_link == entry.link:
log.debug(f"Breaking rss entry loop for {name} on {channel.id}, via link match")
break
else:
log.debug(f"New entry found for feed {name} on cid {channel.id} via new link or title")
feedparser_plus_obj = await self._add_to_feedparser_object(entry, url)
feedparser_plus_objects.append(feedparser_plus_obj)
# we found a match for a previous feed post
else:
log.debug(
f"Breaking rss entry loop for {name} on {channel.id}, we found where we are supposed to be caught up to"
)
break
# nothing in the whole feed matched to what was saved, so let's only post 1 instead of every single post
if len(feedparser_plus_objects) == len(sorted_feed_by_post_time):
log.debug(f"Couldn't match anything for feed {name} on cid {channel.id}, only posting 1 post")
feedparser_plus_objects = [feedparser_plus_objects[0]]
if not feedparser_plus_objects:
# early-exit so that we don't dispatch when there's no updates
return
# post oldest first
feedparser_plus_objects.reverse()
# list of feedparser_plus_objects wrapped in MappingProxyType
# filled during the loop below
proxied_dicts = []
for feedparser_plus_obj in feedparser_plus_objects:
try:
curr_title = feedparser_plus_obj.title
except AttributeError:
curr_title = ""
except IndexError:
log.debug(f"No entries found for feed {name} on cid {channel.id}")
return
# allowed tag verification section
allowed_tags = rss_feed.get("allowed_tags", [])
if len(allowed_tags) > 0:
allowed_post_tags = [x.lower() for x in allowed_tags]
feed_tag_list = [x.lower() for x in feedparser_plus_obj.get("tags_list", [])]
intersection = list(set(feed_tag_list).intersection(allowed_post_tags))
if len(intersection) == 0:
log.debug(
f"{name} feed post in {channel.name} ({channel.id}) was denied because of an allowed tag mismatch."
)
continue
# starting to fill out the template for feeds that passed tag verification (if present)
to_fill = QuietTemplate(template)
message = to_fill.quiet_safe_substitute(name=bold(name), **feedparser_plus_obj)
if len(message.strip(" ")) == 0:
message = None
if not message:
log.debug(f"{name} feed in {channel.name} ({channel.id}) has no valid tags, not posting anything.")
return
embed_toggle = rss_feed["embed"]
red_embed_settings = await self.bot.embed_requested(channel, None)
embed_permissions = channel.permissions_for(channel.guild.me).embed_links
rss_limit = rss_feed.get("limit", 0)
if rss_limit > 0:
# rss_limit needs + 8 characters for pagify counting codeblock characters
message = list(pagify(message, delims=["\n", " "], priority=True, page_length=(rss_limit + 8)))[0]
if embed_toggle and red_embed_settings and embed_permissions:
await self._get_current_feed_embed(channel, rss_feed, feedparser_plus_obj, message)
else:
for page in pagify(message, delims=["\n"]):
await channel.send(page)
# This event can be used in 3rd-party using listeners.
# This may (and most likely will) get changes in the future
# so I suggest accepting **kwargs in the listeners using this event.
#
# channel: discord.TextChannel
# The channel feed alert went to.
# feed_data: Mapping[str, Any]
# Read-only mapping with feed's data.
# The available data depends on what this cog needs
# and there most likely will be changes here in future.
# Available keys include: `name`, `template`, `url`, `embed`, etc.
# feedparser_dict: Mapping[str, Any]
# Read-only mapping with parsed data from the feed.
# See documentation of feedparser.FeedParserDict for more information.
# force: bool
# True if the update was forced (through `[p]rss force`), False otherwise.
feedparser_dict_proxy = MappingProxyType(feedparser_plus_obj)
proxied_dicts.append(feedparser_dict_proxy)
self.bot.dispatch(
"aikaternacogs_rss_message",
channel=channel,
feed_data=MappingProxyType(rss_feed),
feedparser_dict=feedparser_dict_proxy,
force=force,
)
# This event can be used in 3rd-party using listeners.
# This may (and most likely will) get changes in the future
# so I suggest accepting **kwargs in the listeners using this event.
#
# channel: discord.TextChannel
# The channel feed alerts went to.
# feed_data: Mapping[str, Any]
# Read-only mapping with feed's data.
# The available data depends on what this cog needs
# and there most likely will be changes here in future.
# Available keys include: `name`, `template`, `url`, `embed`, etc.
# feedparser_dicts: List[Mapping[str, Any]]
# List of read-only mappings with parsed data
# from each **new** entry in the feed.
# See documentation of feedparser.FeedParserDict for more information.
# force: bool
# True if the update was forced (through `[p]rss force`), False otherwise.
self.bot.dispatch(
"aikaternacogs_rss_feed_update",
channel=channel,
feed_data=MappingProxyType(rss_feed),
feedparser_dicts=proxied_dicts,
force=force,
)
async def _get_current_feed_embed(
self,
channel: discord.TextChannel,
rss_feed: dict,
feedparser_plus_obj: feedparser.util.FeedParserDict,
message: str,
):
embed_list = []
for page in pagify(message, delims=["\n"]):
embed = discord.Embed(description=page)
if rss_feed["embed_color"]:
color = int(rss_feed["embed_color"], 16)
embed.color = discord.Color(color)
embed_list.append(embed)
# Add published timestamp to the last footer if it exists
time_tags = ["updated_parsed_datetime", "published_parsed_datetime"]
for time_tag in time_tags:
try:
published_time = feedparser_plus_obj[time_tag]
embed = embed_list[-1]
embed.timestamp = published_time
break
except KeyError:
pass
# Add embed image to last embed if it's set
try:
embed_image_tag = rss_feed["embed_image"]
embed_image_url = feedparser_plus_obj[embed_image_tag]
img_type = await self._validate_image(embed_image_url)
if img_type in VALID_IMAGES:
embed = embed_list[-1]
embed.set_image(url=embed_image_url)
except KeyError:
pass
# Add embed thumbnail to first embed if it's set
try:
embed_thumbnail_tag = rss_feed["embed_thumbnail"]
embed_thumbnail_url = feedparser_plus_obj[embed_thumbnail_tag]
img_type = await self._validate_image(embed_thumbnail_url)
if img_type in VALID_IMAGES:
embed = embed_list[0]
embed.set_thumbnail(url=embed_thumbnail_url)
except KeyError:
pass
for embed in embed_list:
await channel.send(embed=embed)
async def read_feeds(self):
"""Feed poster loop."""
await self.bot.wait_until_red_ready()
await self._put_feeds_in_queue()
self._post_queue_size = self._post_queue.qsize()
while True:
try:
queue_item = await self._get_next_in_queue()
if not queue_item:
# the queue is empty
config_data = await self.config.all_channels()
if not config_data:
# nothing to check
log.debug(f"Sleeping, nothing to do")
await asyncio.sleep(30)
continue
if self._post_queue_size < 300:
# less than 300 entries to check means 1/sec check times
# the wait is (5 min - entry count) before posting again
wait = 300 - self._post_queue_size
else:
# more than 300 entries means we used the whole 5 min
# to check and post feeds so don't wait any longer to start again
wait = 0
log.debug(f"Waiting {wait}s before starting...")
await asyncio.sleep(wait)
await self._put_feeds_in_queue()
if self._post_queue.qsize() > self._post_queue_size:
# there's been more feeds added so let's update the total size
# so feeds have the proper wait time @ > 300 feeds
log.debug(f"Updating total queue size to {self._post_queue.qsize()}")
self._post_queue_size = self._post_queue.qsize()
continue
else:
try:
# queue_item is a List of channel_priority: int, total_priority: int, queue_item: SimpleNamespace
await self.get_current_feed(
queue_item[2].channel, queue_item[2].feed_name, queue_item[2].feed_data
)
except aiohttp.client_exceptions.InvalidURL:
log.debug(f"Feed at {url} is bad or took too long to respond.")
continue
if self._post_queue_size < 300:
wait = 1
else:
wait = (300 - 10) / self._post_queue_size
log.debug(f"sleeping for {wait}...")
await asyncio.sleep(wait)
except asyncio.CancelledError:
break
except Exception as e:
log.error("An error has occurred in the RSS cog. Please report it.", exc_info=e)
continue
async def _put_feeds_in_queue(self):
log.debug("Putting feeds in queue")
try:
config_data = await self.config.all_channels()
total_index = 0
for channel_id, channel_feed_list in config_data.items():
channel = await self._get_channel_object(channel_id)
if not channel:
log.info(
f"Response channel {channel_id} not found, forbidden to access, or no perms to send messages, removing channel from config"
)
await self.config.channel_from_id(int(channel_id)).clear() # Remove entries from dead channel
continue
if await self.bot.cog_disabled_in_guild(self, channel.guild):
continue
for feed_key, feed in channel_feed_list.items():
for feed_name, feed_data in feed.items():
rss_feed = SimpleNamespace(channel=channel, feed_name=feed_name, feed_data=feed_data)
keys = list(feed.keys())
channel_index = keys.index(feed_name)
total_index += 1
queue_entry = [channel_index, total_index, rss_feed]
log.debug(f"Putting {channel_index}-{total_index}-{channel}-{feed_name} in queue")
await self._post_queue.put(queue_entry)
except Exception as e:
log.exception(e, exc_info=e)
async def _get_next_in_queue(self):
try:
to_check = self._post_queue.get_nowait()
except asyncio.queues.QueueEmpty:
return None
return to_check
class NoFeedContent(Exception):
def __init__(self, m):
self.message = m
def __str__(self):
return self.message
| 44.532258
| 164
| 0.59279
|
02a215ee0948e817c240ed46cd01a6eadf7b38ca
| 17,867
|
py
|
Python
|
anuga/abstract_2d_finite_volumes/file_function.py
|
GeoscienceAustralia/anuga_core
|
372e21a5c1c88867437374c851f1ff629bd3dab3
|
[
"Python-2.0",
"OLDAP-2.7"
] | 136
|
2015-05-07T05:47:43.000Z
|
2022-02-16T03:07:40.000Z
|
anuga/abstract_2d_finite_volumes/file_function.py
|
GeoscienceAustralia/anuga_core
|
372e21a5c1c88867437374c851f1ff629bd3dab3
|
[
"Python-2.0",
"OLDAP-2.7"
] | 184
|
2015-05-03T09:27:54.000Z
|
2021-12-20T04:22:48.000Z
|
anuga/abstract_2d_finite_volumes/file_function.py
|
GeoscienceAustralia/anuga_core
|
372e21a5c1c88867437374c851f1ff629bd3dab3
|
[
"Python-2.0",
"OLDAP-2.7"
] | 70
|
2015-03-18T07:35:22.000Z
|
2021-11-01T07:07:29.000Z
|
#!/usr/bin/env python
"""File function
Takes a file as input, and returns it as a mathematical function.
For example, you can load an arbitrary 2D heightfield mesh, and treat it as a
function like so:
F = file_function('my_mesh.sww', ...)
evaluated_point = F(x, y)
Values will be interpolated across the surface of the mesh. Holes in the mesh
have an undefined value.
"""
from builtins import str
from six import string_types
from builtins import range
from past.builtins import basestring
import numpy as num
from anuga.geospatial_data.geospatial_data import ensure_absolute
from anuga.file.netcdf import NetCDFFile
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
from anuga.utilities.numerical_tools import ensure_numeric
import anuga.utilities.log as log
def file_function(filename,
domain=None,
quantities=None,
interpolation_points=None,
time_thinning=1,
time_limit=None,
verbose=False,
use_cache=False,
boundary_polygon=None,
output_centroids=False):
"""Read time history of spatial data from NetCDF file and return
a callable object.
Input variables:
filename - Name of sww, tms or sts file
If the file has extension 'sww' then it is assumed to be spatio-temporal
or temporal and the callable object will have the form f(t,x,y) or f(t)
depending on whether the file contains spatial data
If the file has extension 'tms' then it is assumed to be temporal only
and the callable object will have the form f(t)
Either form will return interpolated values based on the input file
using the underlying interpolation_function.
domain - Associated domain object
If domain is specified, model time (domain.starttime)
will be checked and possibly modified.
All times are assumed to be in UTC
All spatial information is assumed to be in absolute UTM coordinates.
quantities - the name of the quantity to be interpolated or a
list of quantity names. The resulting function will return
a tuple of values - one for each quantity
If quantities are None, the default quantities are
['stage', 'xmomentum', 'ymomentum']
interpolation_points - list of absolute UTM coordinates for points (N x 2)
or geospatial object or points file name at which values are sought
time_thinning -
verbose -
use_cache: True means that caching of intermediate result of
Interpolation_function is attempted
boundary_polygon -
See Interpolation function in anuga.fit_interpolate.interpolation for
further documentation
"""
# FIXME (OLE): Should check origin of domain against that of file
# In fact, this is where origin should be converted to that of domain
# Also, check that file covers domain fully.
# Take into account:
# - domain's georef
# - sww file's georef
# - interpolation points as absolute UTM coordinates
if quantities is None:
if verbose:
msg = 'Quantities specified in file_function are None,'
msg += ' so using stage, xmomentum, and ymomentum in that order'
log.critical(msg)
quantities = ['stage', 'xmomentum', 'ymomentum']
# Use domain's startime if available
if domain is not None:
domain_starttime = domain.get_starttime()
else:
domain_starttime = None
# Build arguments and keyword arguments for use with caching or apply.
args = (filename,)
# FIXME (Ole): Caching this function will not work well
# if domain is passed in as instances change hash code.
# Instead we pass in those attributes that are needed (and return them
# if modified)
kwargs = {'quantities': quantities,
'interpolation_points': interpolation_points,
'domain_starttime': domain_starttime,
'time_thinning': time_thinning,
'time_limit': time_limit,
'verbose': verbose,
'boundary_polygon': boundary_polygon,
'output_centroids': output_centroids}
# Call underlying engine with or without caching
if use_cache is True:
try:
from anuga.caching import cache
except:
msg = 'Caching was requested, but caching module'+\
'could not be imported'
raise Exception(msg)
f, starttime = cache(_file_function,
args, kwargs,
dependencies=[filename],
compression=False,
verbose=verbose)
else:
f, starttime = _file_function(*args, **kwargs)
#FIXME (Ole): Pass cache arguments, such as compression, in some sort of
#structure
f.starttime = starttime
f.filename = filename
if domain is not None:
#Update domain.startime if it is *earlier* than starttime from file
if starttime > domain.starttime:
msg = 'WARNING: Start time as specified in domain (%f)' \
% domain.starttime
msg += ' is earlier than the starttime of file %s (%f).' \
% (filename, starttime)
msg += ' Modifying domain starttime accordingly.'
if verbose: log.critical(msg)
domain.set_starttime(starttime) #Modifying model time
if verbose: log.critical('Domain starttime is now set to %f'
% domain.starttime)
return f
def _file_function(filename,
quantities=None,
interpolation_points=None,
domain_starttime=None,
time_thinning=1,
time_limit=None,
verbose=False,
boundary_polygon=None,
output_centroids=False):
"""Internal function
See file_function for documentatiton
"""
assert isinstance(filename,string_types) or isinstance(filename, str),\
'First argument to File_function must be a string'
#try:
# fid = open(filename)
#except IOError, e:
# msg = 'File "%s" could not be opened: Error="%s"' % (filename, e)
# raise IOError(msg)
# read first line of file, guess file type
#line = fid.readline()
#fid.close()
import os
ext = os.path.splitext(filename)[1]
msg = 'Extension should be csv sww, tms or sts '
assert ext in [".csv", ".sww", ".tms", ".sts"], msg
if ext in [".sww", ".tms", ".sts"]:
return get_netcdf_file_function(filename,
quantities,
interpolation_points,
domain_starttime,
time_thinning=time_thinning,
time_limit=time_limit,
verbose=verbose,
boundary_polygon=boundary_polygon,
output_centroids=output_centroids)
elif ext in [".csv"]:
# FIXME (Ole): Could add csv file here to address Ted Rigby's
# suggestion about reading hydrographs.
# This may also deal with the gist of ticket:289
raise Exception('Must be a NetCDF File')
else:
raise Exception('Must be a NetCDF File')
def get_netcdf_file_function(filename,
quantity_names=None,
interpolation_points=None,
domain_starttime=None,
time_thinning=1,
time_limit=None,
verbose=False,
boundary_polygon=None,
output_centroids=False):
"""Read time history of spatial data from NetCDF sww file and
return a callable object f(t,x,y)
which will return interpolated values based on the input file.
Model time (domain_starttime)
will be checked, possibly modified and returned
All times are assumed to be in UTC
See Interpolation function for further documentation
"""
# FIXME: Check that model origin is the same as file's origin
# (both in UTM coordinates)
# If not - modify those from file to match domain
# (origin should be passed in)
# Take this code from e.g. dem2pts in data_manager.py
# FIXME: Use geo_reference to read and write xllcorner...
import time, calendar
from anuga.config import time_format
# Open NetCDF file
if verbose: log.critical('Reading %s' % filename)
fid = NetCDFFile(filename, netcdf_mode_r)
if isinstance(quantity_names, string_types):
quantity_names = [quantity_names]
if quantity_names is None or len(quantity_names) < 1:
msg = 'No quantities are specified in file_function'
raise Exception(msg)
if interpolation_points is not None:
#interpolation_points = num.array(interpolation_points, num.float)
interpolation_points = ensure_absolute(interpolation_points)
msg = 'Points must by N x 2. I got %d' % interpolation_points.shape[1]
assert interpolation_points.shape[1] == 2, msg
# Now assert that requested quantitites (and the independent ones)
# are present in file
missing = []
for quantity in ['time'] + quantity_names:
if quantity not in fid.variables:
missing.append(quantity)
if len(missing) > 0:
msg = 'Quantities %s could not be found in file %s'\
% (str(missing), filename)
fid.close()
raise Exception(msg)
# Decide whether this data has a spatial dimension
spatial = True
for quantity in ['x', 'y']:
if quantity not in fid.variables:
spatial = False
if filename[-3:] == 'tms' and spatial is True:
msg = 'Files of type TMS must not contain spatial information'
raise Exception(msg)
if filename[-3:] == 'sww' and spatial is False:
msg = 'Files of type SWW must contain spatial information'
raise Exception(msg)
if filename[-3:] == 'sts' and spatial is False:
#What if mux file only contains one point
msg = 'Files of type STS must contain spatial information'
raise Exception(msg)
# JJ REMOVED
#if filename[-3:] == 'sts' and boundary_polygon is None:
# #What if mux file only contains one point
# msg = 'Files of type sts require boundary polygon'
# raise Exception(msg)
# Get first timestep
try:
starttime = float(fid.starttime)
except ValueError:
msg = 'Could not read starttime from file %s' % filename
raise Exception(msg)
# Get variables
# if verbose: log.critical('Get variables' )
time = fid.variables['time'][:]
# FIXME(Ole): Is time monotoneous?
# Apply time limit if requested
upper_time_index = len(time)
msg = 'Time vector obtained from file %s has length 0' % filename
assert upper_time_index > 0, msg
if time_limit is not None:
# Adjust given time limit to given start time
time_limit = time_limit - starttime
# Find limit point
for i, t in enumerate(time):
if t > time_limit:
upper_time_index = i
break
msg = 'Time vector is zero. Requested time limit is %f' % time_limit
assert upper_time_index > 0, msg
if time_limit < time[-1] and verbose is True:
log.critical('Limited time vector from %.2fs to %.2fs'
% (time[-1], time_limit))
time = time[:upper_time_index]
# Get time independent stuff
if spatial:
# Get origin
#xllcorner = fid.xllcorner[0]
#yllcorner = fid.yllcorner[0]
#zone = fid.zone[0]
xllcorner = fid.xllcorner
yllcorner = fid.yllcorner
zone = fid.zone
x = fid.variables['x'][:]
y = fid.variables['y'][:]
if filename.endswith('sww'):
triangles = fid.variables['volumes'][:]
x = num.reshape(x, (len(x), 1))
y = num.reshape(y, (len(y), 1))
vertex_coordinates = num.concatenate((x, y), axis=1) #m x 2 array
if boundary_polygon is not None:
# Remove sts points that do not lie on boundary
# FIXME(Ole): Why don't we just remove such points from the list of
# points and associated data?
# I am actually convinced we can get rid of neighbour_gauge_id
# altogether as the sts file is produced using the ordering file.
# All sts points are therefore always present in the boundary.
# In fact, they *define* parts of the boundary.
boundary_polygon=ensure_numeric(boundary_polygon)
boundary_polygon[:, 0] -= xllcorner
boundary_polygon[:, 1] -= yllcorner
temp=[]
boundary_id=[]
gauge_id=[]
for i in range(len(boundary_polygon)):
for j in range(len(x)):
if num.allclose(vertex_coordinates[j],
boundary_polygon[i], rtol=1e-4, atol=1e-4):
#FIXME:
#currently gauges lat and long is stored as float and
#then cast to double. This cuases slight repositioning
#of vertex_coordinates.
temp.append(boundary_polygon[i])
gauge_id.append(j)
boundary_id.append(i)
break
gauge_neighbour_id=[]
for i in range(len(boundary_id)-1):
if boundary_id[i]+1==boundary_id[i+1]:
gauge_neighbour_id.append(i+1)
else:
gauge_neighbour_id.append(-1)
if boundary_id[len(boundary_id)-1]==len(boundary_polygon)-1 \
and boundary_id[0]==0:
gauge_neighbour_id.append(0)
else:
gauge_neighbour_id.append(-1)
gauge_neighbour_id=ensure_numeric(gauge_neighbour_id)
if len(num.compress(gauge_neighbour_id>=0, gauge_neighbour_id)) \
!= len(temp)-1:
msg='incorrect number of segments'
raise Exception(msg)
vertex_coordinates=ensure_numeric(temp)
if len(vertex_coordinates)==0:
msg = 'None of the sts gauges fall on the boundary'
raise Exception(msg)
else:
gauge_neighbour_id=None
if interpolation_points is not None:
# Adjust for georef
interpolation_points[:, 0] -= xllcorner
interpolation_points[:, 1] -= yllcorner
else:
gauge_neighbour_id=None
if domain_starttime is not None:
# If domain_startime is *later* than starttime,
# move time back - relative to domain's time
if domain_starttime > starttime:
time = time - domain_starttime + starttime
# FIXME Use method in geo to reconcile
# if spatial:
# assert domain.geo_reference.xllcorner == xllcorner
# assert domain.geo_reference.yllcorner == yllcorner
# assert domain.geo_reference.zone == zone
if verbose:
log.critical('File_function data obtained from: %s' % filename)
log.critical(' References:')
if spatial:
log.critical(' Lower left corner: [%f, %f]'
% (xllcorner, yllcorner))
log.critical(' Start time: %f' % starttime)
# Produce values for desired data points at
# each timestep for each quantity
quantities = {}
for i, name in enumerate(quantity_names):
quantities[name] = fid.variables[name][:]
if boundary_polygon is not None:
#removes sts points that do not lie on boundary
quantities[name] = num.take(quantities[name], gauge_id, axis=1)
# Close sww, tms or sts netcdf file
fid.close()
from anuga.fit_interpolate.interpolate import Interpolation_function
if not spatial:
vertex_coordinates = triangles = interpolation_points = None
if filename[-3:] == 'sts':#added
triangles = None
#vertex coordinates is position of urs gauges
if verbose:
log.critical('Calling interpolation function')
# Return Interpolation_function instance as well as
# starttime for use to possible modify that of domain
return (Interpolation_function(time,
quantities,
quantity_names,
vertex_coordinates,
triangles,
interpolation_points,
time_thinning=time_thinning,
verbose=verbose,
gauge_neighbour_id=gauge_neighbour_id,
output_centroids=output_centroids),
starttime)
# NOTE (Ole): Caching Interpolation function is too slow as
# the very long parameters need to be hashed.
| 36.612705
| 79
| 0.581631
|
4fe13887e1c905dc3b57e57301f8f1de69c10687
| 31,645
|
py
|
Python
|
test/orm/inheritance/test_poly_loading.py
|
lvillis/sqlalchemy
|
889d05c444264bf1b6d11386459d3360cc529d27
|
[
"MIT"
] | null | null | null |
test/orm/inheritance/test_poly_loading.py
|
lvillis/sqlalchemy
|
889d05c444264bf1b6d11386459d3360cc529d27
|
[
"MIT"
] | null | null | null |
test/orm/inheritance/test_poly_loading.py
|
lvillis/sqlalchemy
|
889d05c444264bf1b6d11386459d3360cc529d27
|
[
"MIT"
] | null | null | null |
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import backref
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectin_polymorphic
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assertsql
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertions import expect_raises_message
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import EachOf
from sqlalchemy.testing.assertsql import Or
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from ._poly_fixtures import _Polymorphic
from ._poly_fixtures import Company
from ._poly_fixtures import Engineer
from ._poly_fixtures import GeometryFixtureBase
from ._poly_fixtures import Manager
from ._poly_fixtures import Person
class BaseAndSubFixture:
use_options = False
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
adata = Column(String(50))
bs = relationship("B")
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "a",
}
class ASub(A):
__tablename__ = "asub"
id = Column(ForeignKey("a.id"), primary_key=True)
asubdata = Column(String(50))
cs = relationship("C")
if cls.use_options:
__mapper_args__ = {"polymorphic_identity": "asub"}
else:
__mapper_args__ = {
"polymorphic_load": "selectin",
"polymorphic_identity": "asub",
}
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_sub_id = Column(ForeignKey("asub.id"))
@classmethod
def insert_data(cls, connection):
A, B, ASub, C = cls.classes("A", "B", "ASub", "C")
s = Session(connection)
s.add(A(id=1, adata="adata", bs=[B(), B()]))
s.add(
ASub(
id=2,
adata="adata",
asubdata="asubdata",
bs=[B(), B()],
cs=[C(), C()],
)
)
s.commit()
def _run_query(self, q):
ASub = self.classes.ASub
for a in q:
a.bs
if isinstance(a, ASub):
a.cs
def _assert_all_selectin(self, q):
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.adata AS a_adata, "
"a.type AS a_type FROM a ORDER BY a.id",
{},
),
AllOf(
EachOf(
CompiledSQL(
"SELECT asub.id AS asub_id, a.id AS a_id, "
"a.type AS a_type, "
"asub.asubdata AS asub_asubdata FROM a JOIN asub "
"ON a.id = asub.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
{"primary_keys": [2]},
),
CompiledSQL(
# note this links c.a_sub_id to a.id, even though
# primaryjoin is to asub.id. this is because the
# cols a.id / asub.id are listed in the mapper's
# equivalent_columns so they are guaranteed to store
# the same value.
"SELECT c.a_sub_id AS c_a_sub_id, "
"c.id AS c_id "
"FROM c WHERE c.a_sub_id "
"IN (__[POSTCOMPILE_primary_keys])",
{"primary_keys": [2]},
),
),
CompiledSQL(
"SELECT b.a_id AS b_a_id, b.id AS b_id FROM b "
"WHERE b.a_id IN (__[POSTCOMPILE_primary_keys])",
{"primary_keys": [1, 2]},
),
),
)
self.assert_sql_execution(testing.db, lambda: self._run_query(result))
class LoadBaseAndSubWEagerRelOpt(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = True
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(
selectin_polymorphic(A, [ASub]),
selectinload(ASub.cs),
selectinload(A.bs),
)
)
self._assert_all_selectin(q)
class LoadBaseAndSubWEagerRelMapped(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = False
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(selectinload(ASub.cs), selectinload(A.bs))
)
self._assert_all_selectin(q)
class FixtureLoadTest(_Polymorphic, testing.AssertsExecutionResults):
def test_person_selectin_subclasses(self):
s = fixture_session()
q = s.query(Person).options(
selectin_polymorphic(Person, [Engineer, Manager])
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, "
"people.type AS people_type FROM people",
{},
),
AllOf(
CompiledSQL(
"SELECT engineers.person_id AS engineers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2, 5]},
),
CompiledSQL(
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [3, 4]},
),
),
)
eq_(result, self.all_employees)
def test_load_company_plus_employees(self):
s = fixture_session()
q = (
s.query(Company)
.options(
selectinload(Company.employees).selectin_polymorphic(
[Engineer, Manager]
)
)
.order_by(Company.company_id)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"ORDER BY companies.company_id",
{},
),
CompiledSQL(
"SELECT people.company_id AS people_company_id, "
"people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type "
"FROM people WHERE people.company_id "
"IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2]},
),
AllOf(
CompiledSQL(
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [3, 4]},
),
CompiledSQL(
"SELECT engineers.person_id AS engineers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2, 5]},
),
),
)
eq_(result, [self.c1, self.c2])
class TestGeometries(GeometryFixtureBase):
def test_threelevel_selectin_to_inline_mapped(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {"polymorphic_load": "selectin"},
"c": {
"subclasses": {
"d": {
"polymorphic_load": "inline",
"single": True,
},
"e": {
"polymorphic_load": "inline",
"single": True,
},
},
"polymorphic_load": "selectin",
},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
q = sess.query(a)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a",
{},
),
Or(
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.e_data AS c_e_data, "
"c.d_data AS c_d_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, "
"c.d_data AS c_d_data, c.e_data AS c_e_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_threelevel_selectin_to_inline_options(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {},
"c": {
"subclasses": {
"d": {"single": True},
"e": {"single": True},
}
},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
c_alias = with_polymorphic(c, (d, e))
q = sess.query(a).options(selectin_polymorphic(a, [b, c_alias]))
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a",
{},
),
Or(
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.e_data AS c_e_data, "
"c.d_data AS c_d_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.d_data AS c_d_data, "
"c.e_data AS c_e_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_threelevel_selectin_to_inline_awkward_alias_options(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {},
"c": {"subclasses": {"d": {}, "e": {}}},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
from sqlalchemy import select
a_table, c_table, d_table, e_table = self.tables("a", "c", "d", "e")
poly = (
select(a_table.c.id, a_table.c.type, c_table, d_table, e_table)
.select_from(
a_table.join(c_table).outerjoin(d_table).outerjoin(e_table)
)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("poly")
)
c_alias = with_polymorphic(c, (d, e), poly)
q = (
sess.query(a)
.options(selectin_polymorphic(a, [b, c_alias]))
.order_by(a.id)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a ORDER BY a.id",
{},
),
Or(
# here, the test is that the adaptation of "a" takes place
CompiledSQL(
"SELECT poly.a_type AS poly_a_type, "
"poly.c_id AS poly_c_id, "
"poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, "
"poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data, "
"poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data "
"FROM (SELECT a.id AS a_id, a.type AS a_type, "
"c.id AS c_id, "
"c.c_data AS c_c_data, d.id AS d_id, "
"d.d_data AS d_d_data, "
"e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c "
"ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id "
"LEFT OUTER JOIN e ON c.id = e.id) AS poly "
"WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY poly.a_id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT poly.a_type AS poly_a_type, "
"poly.c_id AS poly_c_id, "
"poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, "
"poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data, "
"poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data "
"FROM (SELECT a.id AS a_id, a.type AS a_type, "
"c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, "
"d.d_data AS d_d_data, e.id AS e_id, "
"e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id "
"LEFT OUTER JOIN d ON c.id = d.id "
"LEFT OUTER JOIN e ON c.id = e.id) AS poly "
"WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY poly.a_id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_partial_load_no_invoke_eagers(self):
# test issue #4199
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"a1": {"polymorphic_load": "selectin"},
"a2": {"polymorphic_load": "selectin"},
}
}
}
)
a, a1, a2 = self.classes("a", "a1", "a2")
sess = fixture_session()
a1_obj = a1()
a2_obj = a2()
sess.add_all([a1_obj, a2_obj])
del a2_obj
sess.flush()
sess.expire_all()
# _with_invoke_all_eagers(False), used by the lazy loader
# strategy, will cause one less state to be present such that
# the poly loader won't locate a state limited to the "a1" mapper,
# needs to test that it has states
sess.query(a)._with_invoke_all_eagers(False).all()
class LoaderOptionsTest(
fixtures.DeclarativeMappedTest, testing.AssertsExecutionResults
):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(fixtures.ComparableEntity, Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
class Child(fixtures.ComparableEntity, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship("Parent", backref=backref("children"))
type = Column(String(50), nullable=False)
__mapper_args__ = {"polymorphic_on": type}
class ChildSubclass1(Child):
__tablename__ = "child_subclass1"
id = Column(Integer, ForeignKey("child.id"), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "subclass1",
"polymorphic_load": "selectin",
}
class Other(fixtures.ComparableEntity, Base):
__tablename__ = "other"
id = Column(Integer, primary_key=True)
child_subclass_id = Column(
Integer, ForeignKey("child_subclass1.id")
)
child_subclass = relationship(
"ChildSubclass1", backref=backref("others")
)
@classmethod
def insert_data(cls, connection):
Parent, ChildSubclass1, Other = cls.classes(
"Parent", "ChildSubclass1", "Other"
)
session = Session(connection)
parent = Parent(id=1)
subclass1 = ChildSubclass1(id=1, parent=parent)
other = Other(id=1, child_subclass=subclass1)
session.add_all([parent, subclass1, other])
session.commit()
def test_options_dont_pollute_baked(self):
self._test_options_dont_pollute(True)
def test_options_dont_pollute_unbaked(self):
self._test_options_dont_pollute(False)
def _test_options_dont_pollute(self, enable_baked):
Parent, ChildSubclass1, Other = self.classes(
"Parent", "ChildSubclass1", "Other"
)
session = fixture_session(enable_baked_queries=enable_baked)
def no_opt():
q = session.query(Parent).options(
joinedload(Parent.children.of_type(ChildSubclass1))
)
return self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT parent.id AS parent_id, "
"anon_1.child_id AS anon_1_child_id, "
"anon_1.child_parent_id AS anon_1_child_parent_id, "
"anon_1.child_type AS anon_1_child_type, "
"anon_1.child_subclass1_id AS anon_1_child_subclass1_id "
"FROM parent "
"LEFT OUTER JOIN (SELECT child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type, "
"child_subclass1.id AS child_subclass1_id "
"FROM child "
"LEFT OUTER JOIN child_subclass1 "
"ON child.id = child_subclass1.id) AS anon_1 "
"ON parent.id = anon_1.child_parent_id",
{},
),
CompiledSQL(
"SELECT child_subclass1.id AS child_subclass1_id, "
"child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type "
"FROM child JOIN child_subclass1 "
"ON child.id = child_subclass1.id "
"WHERE child.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY child.id",
[{"primary_keys": [1]}],
),
)
result = no_opt()
with self.assert_statement_count(testing.db, 1):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
session.expunge_all()
q = session.query(Parent).options(
joinedload(Parent.children.of_type(ChildSubclass1)).joinedload(
ChildSubclass1.others
)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT parent.id AS parent_id, "
"anon_1.child_id AS anon_1_child_id, "
"anon_1.child_parent_id AS anon_1_child_parent_id, "
"anon_1.child_type AS anon_1_child_type, "
"anon_1.child_subclass1_id AS anon_1_child_subclass1_id, "
"other_1.id AS other_1_id, "
"other_1.child_subclass_id AS other_1_child_subclass_id "
"FROM parent LEFT OUTER JOIN "
"(SELECT child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type, "
"child_subclass1.id AS child_subclass1_id "
"FROM child LEFT OUTER JOIN child_subclass1 "
"ON child.id = child_subclass1.id) AS anon_1 "
"ON parent.id = anon_1.child_parent_id "
"LEFT OUTER JOIN other AS other_1 "
"ON anon_1.child_subclass1_id = other_1.child_subclass_id",
{},
),
CompiledSQL(
"SELECT child_subclass1.id AS child_subclass1_id, "
"child.id AS child_id, child.parent_id AS child_parent_id, "
"child.type AS child_type, other_1.id AS other_1_id, "
"other_1.child_subclass_id AS other_1_child_subclass_id "
"FROM child JOIN child_subclass1 "
"ON child.id = child_subclass1.id "
"LEFT OUTER JOIN other AS other_1 "
"ON child_subclass1.id = other_1.child_subclass_id "
"WHERE child.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY child.id",
[{"primary_keys": [1]}],
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
session.expunge_all()
result = no_opt()
with self.assert_statement_count(testing.db, 1):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
class IgnoreOptionsOnSubclassAttrLoad(fixtures.DeclarativeMappedTest):
"""test #7304 and related cases
in this case we trigger the subclass attribute load, while at the same
time there will be a deferred loader option present in the state's
options that was established by the previous loader.
test both that the option takes effect (i.e. raiseload) and that a deferred
loader doesn't interfere with the mapper's load of the attribute.
"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base):
__tablename__ = "parent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
entity_id = Column(ForeignKey("entity.id"))
entity = relationship("Entity")
class Entity(Base):
__tablename__ = "entity"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(32))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "entity",
}
class SubEntity(Entity):
__tablename__ = "sub_entity"
id = Column(ForeignKey(Entity.id), primary_key=True)
name = Column(String(32))
__mapper_args__ = {"polymorphic_identity": "entity_two"}
@classmethod
def insert_data(cls, connection):
Parent, SubEntity = cls.classes("Parent", "SubEntity")
with Session(connection) as session:
session.add(Parent(entity=SubEntity(name="some name")))
session.commit()
@testing.combinations(
defaultload,
joinedload,
selectinload,
lazyload,
argnames="first_option",
)
@testing.combinations(
("load_only", "id", True),
("defer", "name", True),
("undefer", "name", True),
("raise", "name", False),
(None, None, True),
# these don't seem possible at the moment as the "type" column
# doesn't load and it can't recognize the polymorphic identity.
# we assume load_only() is smart enough to include this column
# ("defer", '*', True),
# ("undefer", '*', True),
# ("raise", '*', False),
argnames="second_option,second_argument,expect_load",
)
def test_subclass_loadattr(
self, first_option, second_option, second_argument, expect_load
):
Parent, Entity, SubEntity = self.classes(
"Parent", "Entity", "SubEntity"
)
stmt = select(Parent)
will_lazyload = first_option in (defaultload, lazyload)
if second_argument == "name":
second_argument = SubEntity.name
opt = first_option(Parent.entity.of_type(SubEntity))
elif second_argument == "id":
opt = first_option(Parent.entity)
second_argument = Entity.id
else:
opt = first_option(Parent.entity)
if second_option is None:
sub_opt = opt
elif second_option == "raise":
sub_opt = opt.defer(second_argument, raiseload=True)
else:
sub_opt = getattr(opt, second_option)(second_argument)
stmt = stmt.options(sub_opt)
session = fixture_session()
result = session.execute(stmt).scalars()
parent_obj = result.first()
entity_id = parent_obj.__dict__["entity_id"]
with assertsql.assert_engine(testing.db) as asserter_:
if expect_load:
eq_(parent_obj.entity.name, "some name")
else:
with expect_raises_message(
exc.InvalidRequestError,
"'SubEntity.name' is not available due to raiseload=True",
):
parent_obj.entity.name
expected = []
if will_lazyload:
expected.append(
CompiledSQL(
"SELECT entity.id AS entity_id, "
"entity.type AS entity_type FROM entity "
"WHERE entity.id = :pk_1",
[{"pk_1": entity_id}],
)
)
if second_option in ("load_only", None) or (
second_option == "undefer"
and first_option in (defaultload, lazyload)
):
# load will be a mapper optimized load for the name alone
expected.append(
CompiledSQL(
"SELECT sub_entity.name AS sub_entity_name "
"FROM sub_entity "
"WHERE :param_1 = sub_entity.id",
[{"param_1": entity_id}],
)
)
elif second_option == "defer":
# load will be a deferred load. this is because the explicit
# call to the deferred load put a deferred loader on the attribute
expected.append(
CompiledSQL(
"SELECT sub_entity.name AS sub_entity_name "
"FROM sub_entity "
"WHERE :param_1 = sub_entity.id",
[{"param_1": entity_id}],
)
)
asserter_.assert_(*expected)
| 36.668598
| 79
| 0.510728
|
f07b96226eafcca2debd0b7abee3415bb9d2b80b
| 15,357
|
py
|
Python
|
tlslite/utils/codec.py
|
t184256/tlslite-ng
|
cef7d75f79fd746c001b339399257098a00b46be
|
[
"Unlicense"
] | null | null | null |
tlslite/utils/codec.py
|
t184256/tlslite-ng
|
cef7d75f79fd746c001b339399257098a00b46be
|
[
"Unlicense"
] | null | null | null |
tlslite/utils/codec.py
|
t184256/tlslite-ng
|
cef7d75f79fd746c001b339399257098a00b46be
|
[
"Unlicense"
] | null | null | null |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Classes for reading/writing binary data (such as TLS records)."""
from __future__ import division
import sys
import struct
from struct import pack
class DecodeError(SyntaxError):
"""Exception raised in case of decoding errors."""
pass
class Writer(object):
"""Serialisation helper for complex byte-based structures."""
def __init__(self):
"""Initialise the serializer with no data."""
self.bytes = bytearray(0)
def addOne(self, val):
"""Add a single-byte wide element to buffer, see add()."""
self.bytes.append(val)
if sys.version_info < (2, 7):
# struct.pack on Python2.6 does not raise exception if the value
# is larger than can fit inside the specified size
def addTwo(self, val):
"""Add a double-byte wide element to buffer, see add()."""
if not 0 <= val <= 0xffff:
raise ValueError("Can't represent value in specified length")
self.bytes += pack('>H', val)
def addThree(self, val):
"""Add a three-byte wide element to buffer, see add()."""
if not 0 <= val <= 0xffffff:
raise ValueError("Can't represent value in specified length")
self.bytes += pack('>BH', val >> 16, val & 0xffff)
def addFour(self, val):
"""Add a four-byte wide element to buffer, see add()."""
if not 0 <= val <= 0xffffffff:
raise ValueError("Can't represent value in specified length")
self.bytes += pack('>I', val)
else:
def addTwo(self, val):
"""Add a double-byte wide element to buffer, see add()."""
try:
self.bytes += pack('>H', val)
except struct.error:
raise ValueError("Can't represent value in specified length")
def addThree(self, val):
"""Add a three-byte wide element to buffer, see add()."""
try:
self.bytes += pack('>BH', val >> 16, val & 0xffff)
except struct.error:
raise ValueError("Can't represent value in specified length")
def addFour(self, val):
"""Add a four-byte wide element to buffer, see add()."""
try:
self.bytes += pack('>I', val)
except struct.error:
raise ValueError("Can't represent value in specified length")
if sys.version_info >= (3, 0):
# the method is called thousands of times, so it's better to extern
# the version info check
def add(self, x, length):
"""
Add a single positive integer value x, encode it in length bytes
Encode positive integer x in big-endian format using length bytes,
add to the internal buffer.
:type x: int
:param x: value to encode
:type length: int
:param length: number of bytes to use for encoding the value
"""
try:
self.bytes += x.to_bytes(length, 'big')
except OverflowError:
raise ValueError("Can't represent value in specified length")
else:
_addMethods = {1: addOne, 2: addTwo, 3: addThree, 4: addFour}
def add(self, x, length):
"""
Add a single positive integer value x, encode it in length bytes
Encode positive iteger x in big-endian format using length bytes,
add to the internal buffer.
:type x: int
:param x: value to encode
:type length: int
:param length: number of bytes to use for encoding the value
"""
try:
self._addMethods[length](self, x)
except KeyError:
self.bytes += bytearray(length)
newIndex = len(self.bytes) - 1
for i in range(newIndex, newIndex - length, -1):
self.bytes[i] = x & 0xFF
x >>= 8
if x != 0:
raise ValueError("Can't represent value in specified "
"length")
def addFixSeq(self, seq, length):
"""
Add a list of items, encode every item in length bytes
Uses the unbounded iterable seq to produce items, each of
which is then encoded to length bytes
:type seq: iterable of int
:param seq: list of positive integers to encode
:type length: int
:param length: number of bytes to which encode every element
"""
for e in seq:
self.add(e, length)
if sys.version_info < (2, 7):
# struct.pack on Python2.6 does not raise exception if the value
# is larger than can fit inside the specified size
def _addVarSeqTwo(self, seq):
"""Helper method for addVarSeq"""
if not all(0 <= i <= 0xffff for i in seq):
raise ValueError("Can't represent value in specified "
"length")
self.bytes += pack('>' + 'H' * len(seq), *seq)
def addVarSeq(self, seq, length, lengthLength):
"""
Add a bounded list of same-sized values
Create a list of specific length with all items being of the same
size
:type seq: list of int
:param seq: list of positive integers to encode
:type length: int
:param length: amount of bytes in which to encode every item
:type lengthLength: int
:param lengthLength: amount of bytes in which to encode the overall
length of the array
"""
self.add(len(seq)*length, lengthLength)
if length == 1:
self.bytes.extend(seq)
elif length == 2:
self._addVarSeqTwo(seq)
else:
for i in seq:
self.add(i, length)
else:
def addVarSeq(self, seq, length, lengthLength):
"""
Add a bounded list of same-sized values
Create a list of specific length with all items being of the same
size
:type seq: list of int
:param seq: list of positive integers to encode
:type length: int
:param length: amount of bytes in which to encode every item
:type lengthLength: int
:param lengthLength: amount of bytes in which to encode the overall
length of the array
"""
seqLen = len(seq)
self.add(seqLen*length, lengthLength)
if length == 1:
self.bytes.extend(seq)
elif length == 2:
try:
self.bytes += pack('>' + 'H' * seqLen, *seq)
except struct.error:
raise ValueError("Can't represent value in specified "
"length")
else:
for i in seq:
self.add(i, length)
def addVarTupleSeq(self, seq, length, lengthLength):
"""
Add a variable length list of same-sized element tuples.
Note that all tuples must have the same size.
Inverse of Parser.getVarTupleList()
:type seq: enumerable
:param seq: list of tuples
:type length: int
:param length: length of single element in tuple
:type lengthLength: int
:param lengthLength: length in bytes of overall length field
"""
if not seq:
self.add(0, lengthLength)
else:
startPos = len(self.bytes)
dataLength = len(seq) * len(seq[0]) * length
self.add(dataLength, lengthLength)
# since at the time of writing, all the calls encode single byte
# elements, and it's very easy to speed up that case, give it
# special case
if length == 1:
for elemTuple in seq:
self.bytes.extend(elemTuple)
else:
for elemTuple in seq:
self.addFixSeq(elemTuple, length)
if startPos + dataLength + lengthLength != len(self.bytes):
raise ValueError("Tuples of different lengths")
def add_var_bytes(self, data, length_length):
"""
Add a variable length array of bytes.
Inverse of Parser.getVarBytes()
:type data: bytes
:param data: bytes to add to the buffer
:param int length_length: size of the field to represent the length
of the data string
"""
length = len(data)
self.add(length, length_length)
self.bytes += data
class Parser(object):
"""
Parser for TLV and LV byte-based encodings.
Parser that can handle arbitrary byte-based encodings usually employed in
Type-Length-Value or Length-Value binary encoding protocols like ASN.1
or TLS
Note: if the raw bytes don't match expected values (like trying to
read a 4-byte integer from a 2-byte buffer), most methods will raise a
DecodeError exception.
TODO: don't use an exception used by language parser to indicate errors
in application code.
:vartype bytes: bytearray
:ivar bytes: data to be interpreted (buffer)
:vartype index: int
:ivar index: current position in the buffer
:vartype lengthCheck: int
:ivar lengthCheck: size of struct being parsed
:vartype indexCheck: int
:ivar indexCheck: position at which the structure begins in buffer
"""
def __init__(self, bytes):
"""
Bind raw bytes with parser.
:type bytes: bytearray
:param bytes: bytes to be parsed/interpreted
"""
self.bytes = bytes
self.index = 0
self.indexCheck = 0
self.lengthCheck = 0
def get(self, length):
"""
Read a single big-endian integer value encoded in 'length' bytes.
:type length: int
:param length: number of bytes in which the value is encoded in
:rtype: int
"""
if self.index + length > len(self.bytes):
raise DecodeError("Read past end of buffer")
x = 0
for _ in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
"""
Read a string of bytes encoded in 'lengthBytes' bytes.
:type lengthBytes: int
:param lengthBytes: number of bytes to return
:rtype: bytearray
"""
if self.index + lengthBytes > len(self.bytes):
raise DecodeError("Read past end of buffer")
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
"""
Read a variable length string with a fixed length.
see Writer.add_var_bytes() for an inverse of this method
:type lengthLength: int
:param lengthLength: number of bytes in which the length of the string
is encoded in
:rtype: bytearray
"""
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
"""
Read a list of static length with same-sized ints.
:type length: int
:param length: size in bytes of a single element in list
:type lengthList: int
:param lengthList: number of elements in list
:rtype: list of int
"""
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
"""
Read a variable length list of same-sized integers.
:type length: int
:param length: size in bytes of a single element
:type lengthLength: int
:param lengthLength: size of the encoded length of the list
:rtype: list of int
"""
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise DecodeError("Encoded length not a multiple of element "
"length")
lengthList = lengthList // length
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarTupleList(self, elemLength, elemNum, lengthLength):
"""
Read a variable length list of same sized tuples.
:type elemLength: int
:param elemLength: length in bytes of single tuple element
:type elemNum: int
:param elemNum: number of elements in tuple
:type lengthLength: int
:param lengthLength: length in bytes of the list length variable
:rtype: list of tuple of int
"""
lengthList = self.get(lengthLength)
if lengthList % (elemLength * elemNum) != 0:
raise DecodeError("Encoded length not a multiple of element "
"length")
tupleCount = lengthList // (elemLength * elemNum)
tupleList = []
for _ in range(tupleCount):
currentTuple = []
for _ in range(elemNum):
currentTuple.append(self.get(elemLength))
tupleList.append(tuple(currentTuple))
return tupleList
def startLengthCheck(self, lengthLength):
"""
Read length of struct and start a length check for parsing.
:type lengthLength: int
:param lengthLength: number of bytes in which the length is encoded
"""
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
"""
Set length of struct and start a length check for parsing.
:type length: int
:param length: expected size of parsed struct in bytes
"""
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
"""
Stop struct parsing, verify that no under- or overflow occurred.
In case the expected length was mismatched with actual length of
processed data, raises an exception.
"""
if (self.index - self.indexCheck) != self.lengthCheck:
raise DecodeError("Under- or over-flow while reading buffer")
def atLengthCheck(self):
"""
Check if there is data in structure left for parsing.
Returns True if the whole structure was parsed, False if there is
some data left.
Will raise an exception if overflow occured (amount of data read was
greater than expected size)
"""
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise DecodeError("Read past end of buffer")
def getRemainingLength(self):
"""Return amount of data remaining in struct being parsed."""
return len(self.bytes) - self.index
| 33.457516
| 79
| 0.570098
|
c9f8c5cfa20640bac7ad7d9c88dfcba1e85e8659
| 1,112
|
py
|
Python
|
tests/unit/test_models.py
|
christopherjmedlin/grocernet
|
5e45b9fcf72cb7701d57326548010ed2dba13c83
|
[
"MIT"
] | null | null | null |
tests/unit/test_models.py
|
christopherjmedlin/grocernet
|
5e45b9fcf72cb7701d57326548010ed2dba13c83
|
[
"MIT"
] | null | null | null |
tests/unit/test_models.py
|
christopherjmedlin/grocernet
|
5e45b9fcf72cb7701d57326548010ed2dba13c83
|
[
"MIT"
] | null | null | null |
import pytest
import os
from grocernet.users.models import User
from grocernet.vendors.models import Vendor
def test_user():
username = "user12345"
password = "s3cur3p@$$word"
email = "email@gmail.com"
user = User(username, password, email, False)
assert user.password != password
assert "pbkdf2:sha256" in user.password
assert user.username is username
assert user.email is email
assert not user.admin
user.set_password("newpassword")
assert user.password != "newpassword"
dictionary = user.to_dict()
assert dictionary["username"] == username
assert "password" not in dictionary
assert not dictionary["admin"]
assert str(user) == "<User 'user12345'>"
@pytest.mark.skipif("MAPBOX_ACCESS_TOKEN" not in os.environ,
reason="No mapbox token found.")
def test_vendor():
vendor = Vendor("Safeway", "1010 sw jefferson street portland",
"POINT(10, 10)")
assert vendor.name == "Safeway"
assert vendor.address == "1010 sw jefferson street portland"
assert str(vendor) == "<Vendor 'Safeway'>"
| 27.8
| 67
| 0.673561
|
36149edf77392198eb5cb0d6c2bfbfa0eaf96a1f
| 1,037
|
py
|
Python
|
unit/test_uvm_ports.py
|
rodrigomelo9/uvm-python
|
e3127eba2cc1519a61dc6f736d862a8dcd6fce20
|
[
"Apache-2.0"
] | 140
|
2020-01-18T00:14:17.000Z
|
2022-03-29T10:57:24.000Z
|
unit/test_uvm_ports.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 24
|
2020-01-18T18:40:58.000Z
|
2021-03-25T17:39:07.000Z
|
unit/test_uvm_ports.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 34
|
2020-01-18T12:22:59.000Z
|
2022-02-11T07:03:11.000Z
|
import unittest
from uvm.base.uvm_component import UVMComponent
from uvm.tlm1.uvm_ports import (
UVMBlockingPutPort, UVMNonBlockingPutPort,
UVMBlockingGetPort, UVMNonBlockingGetPort,
)
from uvm.tlm1.uvm_imps import (
UVMNonBlockingPutImp, UVMNonBlockingGetImp
)
class TestUVMPorts(unittest.TestCase):
def test_non_blocking_put_port(self):
source = UVMComponent('uvm_ports_comp', None)
sink = UVMComponent('uvm_ports_comp2', None)
def try_put(item):
pass
setattr(sink, 'try_put', try_put)
put_port = UVMNonBlockingPutPort('my_port', source)
put_imp = UVMNonBlockingPutImp('put_imp', sink)
put_port.connect(put_imp)
put_port.resolve_bindings()
put_port.try_put(0x1234)
# TODO add assert
def test_blocking_put_port(self):
comp = UVMComponent('comp', None)
put_port = UVMBlockingPutPort('my_port', comp)
self.assertEqual(put_port.is_port(), True)
if __name__ == '__main__':
unittest.main()
| 26.589744
| 59
| 0.693346
|
c37ae83983cbc06c309cda0aa8a1f1c2533d112b
| 5,614
|
py
|
Python
|
code_examples/tensorflow2/gnn/qm9_ipu.py
|
payoto/graphcore_examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
[
"MIT"
] | 260
|
2019-11-18T01:50:00.000Z
|
2022-03-28T23:08:53.000Z
|
code_examples/tensorflow2/gnn/qm9_ipu.py
|
payoto/graphcore_examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
[
"MIT"
] | 27
|
2020-01-28T23:07:50.000Z
|
2022-02-14T15:37:06.000Z
|
code_examples/tensorflow2/gnn/qm9_ipu.py
|
payoto/graphcore_examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
[
"MIT"
] | 56
|
2019-11-18T02:13:12.000Z
|
2022-02-28T14:36:09.000Z
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
# Copyright (c) 2019 Daniele Grattarola.
# Original code here https://github.com/danielegrattarola/spektral/blob/a2cd265a9440831afc441c1774dd1b7d080a59f8/examples/graph_prediction/qm9_batch.py
"""
IPU Implementation
This example shows how to perform regression of molecular properties with the
QM9 database, using a GNN based on edge-conditioned convolutions in batch mode.
"""
import time
from tensorflow.python.ipu.config import IPUConfig
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow import keras
from tensorflow.python import ipu
from spektral.datasets import qm9
from spektral.layers import EdgeConditionedConv, GlobalSumPool
from spektral.utils import label_to_one_hot
from qm9_argparser import get_argparser
################################################################################
# PARAMETERS (defaults set in get_argparser())
################################################################################
parser = get_argparser()
args = parser.parse_args()
gradient_accumulation_count, epochs = (1, 2) if args.profile else (6, args.epochs)
################################################################################
# CONFIGURE THE DEVICE
################################################################################
cfg = IPUConfig()
cfg.auto_select_ipus = args.num_ipus
cfg.configure_ipu_system()
# Mixed precision support
tf.keras.backend.set_floatx('float16')
################################################################################
# LOAD DATA
################################################################################
A, X, E, y = qm9.load_data(return_type='numpy',
nf_keys='atomic_num',
ef_keys='type',
self_loops=True,
amount=args.amount) # Set to None to train on whole dataset
y = y[['cv']].values # Heat capacity at 298.15K
# Preprocessing
X_uniq = np.unique(X)
X_uniq = X_uniq[X_uniq != 0]
E_uniq = np.unique(E)
E_uniq = E_uniq[E_uniq != 0]
X = label_to_one_hot(X, X_uniq)
E = label_to_one_hot(E, E_uniq)
# Parameters
N = X.shape[-2] # Number of nodes in the graphs
F = X[0].shape[-1] # Dimension of node features
S = E[0].shape[-1] # Dimension of edge features
n_out = y.shape[-1] # Dimension of the target
# Train/test split
data = train_test_split(A, X, E, y, test_size=0.1, random_state=0)
A_train, A_test, X_train, X_test, E_train, E_test, y_train, y_test = [x.astype(np.float32) for x in data]
train_data_len = A_train.shape[0]
test_data_len = A_test.shape[0]
train_dataset = tf.data.Dataset.from_tensor_slices(((X_train, A_train, E_train), y_train))
train_dataset = train_dataset.repeat().batch(args.batch_size, drop_remainder=True)
test_dataset = tf.data.Dataset.from_tensor_slices(((X_test, A_test, E_test), y_test))
test_dataset = test_dataset.batch(1, drop_remainder=True)
################################################################################
# RUN INSIDE OF A STRATEGY
################################################################################
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
############################################################################
# BUILD MODEL
############################################################################
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))
E_in = Input(shape=(N, N, S))
X_1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
X_2 = EdgeConditionedConv(32, activation='relu')([X_1, A_in, E_in])
X_3 = GlobalSumPool()(X_2)
output = Dense(n_out)(X_3)
model = tf.keras.Model(inputs=[X_in, A_in, E_in],
outputs=output)
model.set_gradient_accumulation_options(
gradient_accumulation_count=gradient_accumulation_count
)
optimizer = Adam(lr=args.learning_rate)
# `steps_per_execution` must divide the gradient accumulation count and the number of replicas
# so we use the lowest common denominator, which is the product divided by the greatest
# common divisor
model.compile(optimizer=optimizer, loss='mse', steps_per_execution=args.num_ipus)
model.summary()
############################################################################
# FIT MODEL
############################################################################
train_steps_per_epoch = args.num_ipus if args.profile else (train_data_len - train_data_len % args.num_ipus)
tic = time.perf_counter()
model.fit(train_dataset, batch_size=args.batch_size, epochs=epochs, steps_per_epoch=train_steps_per_epoch)
toc = time.perf_counter()
duration = toc - tic
print(f"Training time duration {duration}")
if not args.profile:
############################################################################
# EVALUATE MODEL
############################################################################
print('Testing model')
test_steps = test_data_len - test_data_len % args.num_ipus
tic = time.perf_counter()
model_loss = model.evaluate(test_dataset, batch_size=1, steps=test_steps)
print(f"Done. Test loss {model_loss}")
toc = time.perf_counter()
duration = toc - tic
print(f"Testing time duration {duration}")
print('Completed')
| 39.815603
| 151
| 0.571785
|
bfccf89b0fda0be1100764290681a53e022947e0
| 2,327
|
py
|
Python
|
youtube_dl/extractor/karaoketv.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 66,635
|
2019-03-10T21:34:18.000Z
|
2022-03-31T23:50:31.000Z
|
youtube_dl/extractor/karaoketv.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 10,936
|
2019-03-10T21:35:47.000Z
|
2022-03-31T23:46:52.000Z
|
youtube_dl/extractor/karaoketv.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 15,194
|
2019-03-10T21:09:27.000Z
|
2022-03-31T22:13:49.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KaraoketvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?karaoketv\.co\.il/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.karaoketv.co.il/%D7%A9%D7%99%D7%A8%D7%99_%D7%A7%D7%A8%D7%99%D7%95%D7%A7%D7%99/58356/%D7%90%D7%99%D7%96%D7%95%D7%9F',
'info_dict': {
'id': '58356',
'ext': 'flv',
'title': 'קריוקי של איזון',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
api_page_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.karaoke\.co\.il/api_play\.php\?.+?)\1',
webpage, 'API play URL', group='url')
api_page = self._download_webpage(api_page_url, video_id)
video_cdn_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.video-cdn\.com/embed/iframe/.+?)\1',
api_page, 'video cdn URL', group='url')
video_cdn = self._download_webpage(video_cdn_url, video_id)
play_path = self._parse_json(
self._search_regex(
r'var\s+options\s*=\s*({.+?});', video_cdn, 'options'),
video_id)['clip']['url']
settings = self._parse_json(
self._search_regex(
r'var\s+settings\s*=\s*({.+?});', video_cdn, 'servers', default='{}'),
video_id, fatal=False) or {}
servers = settings.get('servers')
if not servers or not isinstance(servers, list):
servers = ('wowzail.video-cdn.com:80/vodcdn', )
formats = [{
'url': 'rtmp://%s' % server if not server.startswith('rtmp') else server,
'play_path': play_path,
'app': 'vodcdn',
'page_url': video_cdn_url,
'player_url': 'http://www.video-cdn.com/assets/flowplayer/flowplayer.commercial-3.2.18.swf',
'rtmp_real_time': True,
'ext': 'flv',
} for server in servers]
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
}
| 35.8
| 143
| 0.539321
|
7af338af427cbdb17abd44d71e8dff64740de4ac
| 9,860
|
py
|
Python
|
qiskit_aqua/quantumalgorithm.py
|
eugescu/aqua
|
dff039691c0246be025c62027f99eee63f0aa3e1
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/quantumalgorithm.py
|
eugescu/aqua
|
dff039691c0246be025c62027f99eee63f0aa3e1
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/quantumalgorithm.py
|
eugescu/aqua
|
dff039691c0246be025c62027f99eee63f0aa3e1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
This module implements the abstract base class for algorithm modules.
To create add-on algorithm modules subclass the QuantumAlgorithm
class in this module.
Doing so requires that the required algorithm interface is implemented.
"""
from abc import ABC, abstractmethod
import logging
import sys
import numpy as np
import qiskit
from qiskit import __version__ as qiskit_version
from qiskit.backends.ibmq.credentials import Credentials
from qiskit.backends.ibmq.ibmqsingleprovider import IBMQSingleProvider
from qiskit_aqua import AlgorithmError
from qiskit_aqua.utils import run_circuits
from qiskit_aqua import Preferences
logger = logging.getLogger(__name__)
class QuantumAlgorithm(ABC):
# Configuration dictionary keys
SECTION_KEY_ALGORITHM = 'algorithm'
SECTION_KEY_OPTIMIZER = 'optimizer'
SECTION_KEY_VAR_FORM = 'variational_form'
SECTION_KEY_INITIAL_STATE = 'initial_state'
SECTION_KEY_IQFT = 'iqft'
SECTION_KEY_ORACLE = 'oracle'
SECTION_KEY_FEATURE_MAP = 'feature_map'
SECTION_KEY_MULTICLASS_EXTENSION = 'multiclass_extension'
MAX_CIRCUITS_PER_JOB = 300
UNSUPPORTED_BACKENDS = [
'unitary_simulator', 'clifford_simulator']
EQUIVALENT_BACKENDS = {'statevector_simulator_py': 'statevector_simulator',
'statevector_simulator_sympy': 'statevector_simulator',
'statevector_simulator_projectq': 'statevector_simulator',
'qasm_simulator_py': 'qasm_simulator',
'qasm_simulator_projectq': 'qasm_simulator'
}
"""
Base class for Algorithms.
This method should initialize the module and its configuration, and
use an exception if a component of the module is available.
Args:
configuration (dict): configuration dictionary
"""
@abstractmethod
def __init__(self, configuration=None):
self._configuration = configuration
self._backend = None
self._execute_config = {}
self._qjob_config = {}
self._random_seed = None
self._random = None
self._show_circuit_summary = False
@property
def configuration(self):
"""Return algorithm configuration"""
return self._configuration
@property
def random_seed(self):
"""Return random seed"""
return self._random_seed
@random_seed.setter
def random_seed(self, seed):
"""Set random seed"""
self._random_seed = seed
@property
def random(self):
"""Return a numpy random"""
if self._random is None:
if self._random_seed is None:
self._random = np.random
else:
self._random = np.random.RandomState(self._random_seed)
return self._random
@property
def backend(self):
"""Return backend"""
return self._backend
def enable_circuit_summary(self):
"""Enable showing the summary of circuits"""
self._show_circuit_summary = True
def disable_circuit_summary(self):
"""Disable showing the summary of circuits"""
self._show_circuit_summary = False
def setup_quantum_backend(self, backend='statevector_simulator', shots=1024, skip_transpiler=False,
noise_params=None, coupling_map=None, initial_layout=None, hpc_params=None,
basis_gates=None, max_credits=10, timeout=None, wait=5):
"""
Setup the quantum backend.
Args:
backend (str): name of selected backend
shots (int): number of shots for the backend
skip_transpiler (bool): skip most of the compile steps and produce qobj directly
noise_params (dict): the noise setting for simulator
coupling_map (list): coupling map (perhaps custom) to target in mapping
initial_layout (dict): initial layout of qubits in mapping
hpc_params (dict): HPC simulator parameters
basis_gates (str): comma-separated basis gate set to compile to
max_credits (int): maximum credits to use
timeout (float or None): seconds to wait for job. If None, wait indefinitely.
wait (float): seconds between queries
Raises:
AlgorithmError: set backend with invalid Qconfig
"""
operational_backends = self.register_and_get_operational_backends()
if QuantumAlgorithm.EQUIVALENT_BACKENDS.get(backend, backend) not in operational_backends:
raise AlgorithmError("This backend '{}' is not operational for the quantum algorithm, \
select any one below: {}".format(backend, operational_backends))
self._backend = backend
self._qjob_config = {'timeout': timeout,
'wait': wait}
shots = 1 if 'statevector' in backend else shots
noise_params = noise_params if 'simulator' in backend else None
my_backend = None
try:
my_backend = qiskit.Aer.get_backend(backend)
self._qjob_config.pop('wait', None)
self.MAX_CIRCUITS_PER_JOB = sys.maxsize
except KeyError:
my_backend = qiskit.IBMQ.get_backend(backend)
if coupling_map is None:
coupling_map = my_backend.configuration()['coupling_map']
if basis_gates is None:
basis_gates = my_backend.configuration()['basis_gates']
self._execute_config = {'shots': shots,
'skip_transpiler': skip_transpiler,
'config': {"noise_params": noise_params},
'basis_gates': basis_gates,
'coupling_map': coupling_map,
'initial_layout': initial_layout,
'max_credits': max_credits,
'seed': self._random_seed,
'qobj_id': None,
'hpc': hpc_params}
info = "Algorithm: '{}' setup with backend '{}', with following setting:\n {}\n{}".format(
self._configuration['name'], my_backend.configuration()['name'], self._execute_config, self._qjob_config)
logger.info('Qiskit Terra version {}'.format(qiskit_version))
logger.info(info)
def execute(self, circuits):
"""
A wrapper for all algorithms to interface with quantum backend.
Args:
circuits (QuantumCircuit or list[QuantumCircuit]): circuits to execute
Returns:
Result: Result object
"""
result = run_circuits(circuits, self._backend, self._execute_config,
self._qjob_config, max_circuits_per_job=self.MAX_CIRCUITS_PER_JOB,
show_circuit_summary=self._show_circuit_summary)
if self._show_circuit_summary:
self.disable_circuit_summary()
return result
@staticmethod
def register_and_get_operational_backends(token=None, url=None, **kwargs):
# update registration info using internal methods because:
# at this point I don't want to save to or removecredentials from disk
# I want to update url, proxies etc without removing token and
# re-adding in 2 methods
try:
credentials = None
if token is not None:
credentials = Credentials(token, url, **kwargs)
else:
preferences = Preferences()
if preferences.get_token() is not None:
credentials = Credentials(preferences.get_token(),
preferences.get_url(),
proxies=preferences.get_proxies({}))
if credentials is not None:
qiskit.IBMQ._accounts[credentials.unique_id()] = IBMQSingleProvider(
credentials, qiskit.IBMQ)
logger.debug("Registered with Qiskit successfully.")
except Exception as e:
logger.debug(
"Failed to register with Qiskit: {}".format(str(e)))
backends = set()
local_backends = [x.name() for x in qiskit.Aer.backends()]
for local_backend in local_backends:
backend = None
for group_name, names in qiskit.Aer.grouped_backend_names().items():
if local_backend in names:
backend = group_name
break
if backend is None:
backend = local_backend
supported = True
for unsupported_backend in QuantumAlgorithm.UNSUPPORTED_BACKENDS:
if backend.startswith(unsupported_backend):
supported = False
break
if supported:
backends.add(backend)
return list(backends) + [x.name() for x in qiskit.IBMQ.backends()]
@abstractmethod
def init_params(self, params, algo_input):
pass
@abstractmethod
def run(self):
pass
| 38.217054
| 117
| 0.616734
|
9f9d2e65e7b07cc7298e8bb3c3c04602b2eca1ce
| 20,550
|
py
|
Python
|
flux_combined_high_binding/model_346.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_346.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_346.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 40000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 100000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.138889
| 798
| 0.804136
|
ca643e38acf3a1cdd4a8a41897cf3fb77ece8f13
| 586
|
py
|
Python
|
tf/za/yield/test.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
tf/za/yield/test.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
tf/za/yield/test.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
#%%
import numpy as np
def data_generator():
dataset = np.array(range(2))
for d in dataset:
yield d
# %%
for i in data_generator():
print(i)
#%%
import tensorflow as tf
dataset = tf.data.Dataset.from_generator(data_generator, (tf.int32))
dataset = dataset.repeat(3)
dataset = dataset.batch(10)
for i in dataset:
print(i)
break
# %%
class DataGenerator:
def __init__(self):
pass
def __call__(self):
dataset = np.array(range(2))
for d in dataset:
yield d
t = DataGenerator()
for i in t():
print(i)
# %%
| 15.421053
| 68
| 0.607509
|
f97ffef5aca53573c532f11418c516d6a6945744
| 138
|
py
|
Python
|
examples/override_404.py
|
izi-global/izir
|
d1a4bfb5c082c3de1956402ef0280564014a3bd8
|
[
"MIT"
] | null | null | null |
examples/override_404.py
|
izi-global/izir
|
d1a4bfb5c082c3de1956402ef0280564014a3bd8
|
[
"MIT"
] | 5
|
2021-03-18T21:01:05.000Z
|
2022-03-11T23:29:48.000Z
|
examples/override_404.py
|
izi-global/izir
|
d1a4bfb5c082c3de1956402ef0280564014a3bd8
|
[
"MIT"
] | null | null | null |
import izi
@izi.get()
def hello_world():
return 'Hello world!'
@izi.not_found()
def not_found():
return {'Nothing': 'to see'}
| 11.5
| 32
| 0.630435
|
33846895a25c8a162a51d8194ba3bfd6a9dd1caa
| 4,192
|
py
|
Python
|
libs/chat_stats.py
|
Cactiw/Room-Bot-Public
|
f627fe4485d6949ee49bcd7afb2f33cbc0243e73
|
[
"MIT"
] | 3
|
2018-12-27T19:46:38.000Z
|
2019-06-16T17:43:32.000Z
|
libs/chat_stats.py
|
Cactiw/Room-Bot-Public
|
f627fe4485d6949ee49bcd7afb2f33cbc0243e73
|
[
"MIT"
] | 1
|
2019-03-14T21:16:42.000Z
|
2019-03-14T21:16:42.000Z
|
libs/chat_stats.py
|
Cactiw/Room-Bot-Public
|
f627fe4485d6949ee49bcd7afb2f33cbc0243e73
|
[
"MIT"
] | 2
|
2019-03-12T16:09:46.000Z
|
2019-03-14T20:18:38.000Z
|
from work_materials.globals import cursor, conn
from telegram.ext import BaseFilter
CREATE_ROW_RETURN_CODE = 10
class ChatStats:
def __init__(self, chat_id, chat_name, message_count, text_messages_count, stickers_messages_count, document_messages_count,
audio_messages_count, voice_messages_count, photo_messages_count, video_messages_count):
self.chat_id = int(chat_id)
self.chat_name = chat_name
self.message_count = int(message_count)
self.text_messages_count = int(text_messages_count)
self.stickers_messages_count = int(stickers_messages_count)
self.document_messages_count = int(document_messages_count)
self.voice_messages_count = int(voice_messages_count)
self.audio_messages_count = int(audio_messages_count)
self.photo_messages_count = int(photo_messages_count)
self.video_messages_count = int(video_messages_count)
self.messages_without_update = 0
def process_message(self, message):
self.message_count += 1
if message.text:
self.text_messages_count += 1
elif message.video:
self.video_messages_count += 1
elif message.audio:
self.audio_messages_count += 1
elif message.photo:
self.photo_messages_count += 1
elif message.document:
self.document_messages_count += 1
elif message.sticker:
self.stickers_messages_count += 1
elif message.voice:
self.voice_messages_count += 1
self.messages_without_update += 1
if self.messages_without_update >= 20:
self.update_to_database()
def update_from_database(self):
request = "select chat_name, message_count, text_messages_count, stickers_messages_count, " \
"audio_messages_count, photo_messages_count, video_messages_count, document_messages_count, " \
"voice__messages_count from stats where chat_id = %s"
cursor.execute(request, (self.chat_id,))
row = cursor.fetchone()
if row is None:
request = "insert into stats(chat_id, chat_name, message_count, text_messages_count, stickers_messages_count, " \
"audio_messages_count, photo_messages_count, video_messages_count, document_messages_count, " \
"voice__messages_count) values(%s, %s, %s,%s, %s, %s,%s, %s, %s, %s)"
cursor.execute(request, (self.chat_id, self.chat_name, self.message_count, self.text_messages_count,
self.stickers_messages_count, self.audio_messages_count, self.photo_messages_count,
self.video_messages_count, self.document_messages_count, self.voice_messages_count))
conn.commit()
return CREATE_ROW_RETURN_CODE
self.chat_name = row[0]
self.message_count = row[1]
self.text_messages_count = row[2]
self.stickers_messages_count = row[3]
self.audio_messages_count = row[4]
self.photo_messages_count = row[5]
self.video_messages_count = row[6]
self.document_messages_count = row[7]
self.voice_messages_count = row[8]
return
def update_to_database(self):
request = "update stats set message_count = %s, text_messages_count = %s, stickers_messages_count = %s, " \
"audio_messages_count = %s, photo_messages_count = %s," \
"video_messages_count = %s, document_messages_count = %s, voice__messages_count = %s" \
" where chat_id = %s"
cursor.execute(request, (self.message_count, self.text_messages_count,
self.stickers_messages_count, self.audio_messages_count,
self.photo_messages_count, self.video_messages_count,
self.document_messages_count, self.voice_messages_count, self.chat_id))
conn.commit()
class FilterAnyMessage(BaseFilter):
def filter(self, message):
return True
filter_any_message = FilterAnyMessage()
| 47.636364
| 128
| 0.652433
|
6591d47dcb020e8ca562c1f727a1215a78d1623a
| 4,499
|
py
|
Python
|
src/api/transaction/user.py
|
piwaniuk/critic
|
28ed20bb8032d7cc5aa23de98da51e619fd84164
|
[
"Apache-2.0"
] | 216
|
2015-01-05T12:48:10.000Z
|
2022-03-08T00:12:23.000Z
|
src/api/transaction/user.py
|
piwaniuk/critic
|
28ed20bb8032d7cc5aa23de98da51e619fd84164
|
[
"Apache-2.0"
] | 55
|
2015-02-28T12:10:26.000Z
|
2020-11-18T17:45:16.000Z
|
src/api/transaction/user.py
|
piwaniuk/critic
|
28ed20bb8032d7cc5aa23de98da51e619fd84164
|
[
"Apache-2.0"
] | 34
|
2015-05-02T15:15:10.000Z
|
2020-06-15T19:20:37.000Z
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2015 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
class ModifyUser(object):
def __init__(self, transaction, user):
self.transaction = transaction
self.user = user
def setFullname(self, value):
self.transaction.tables.add("users")
self.transaction.items.append(
api.transaction.Query(
"""UPDATE users
SET fullname=%s
WHERE id=%s""",
(value, self.user.id)))
# Repository filters
# ==================
def createFilter(self, filter_type, repository, path, delegates,
callback=None):
assert filter_type in ("reviewer", "watcher", "ignore")
assert isinstance(repository, api.repository.Repository)
assert all(isinstance(delegate, api.user.User)
for delegate in delegates)
def collectCreatedFilter(filter_id):
if callback:
callback(api.filters.fetchRepositoryFilter(
self.transaction.critic, filter_id))
self.transaction.tables.add("filters")
self.transaction.items.append(
api.transaction.Query(
"""INSERT
INTO filters (uid, repository, path, type, delegate)
VALUES (%s, %s, %s, %s, %s)
RETURNING id""",
(self.user.id, repository.id, path, filter_type,
",".join(delegate.name for delegate in delegates)),
collector=collectCreatedFilter))
def modifyFilter(self, repository_filter):
from filters import ModifyRepositoryFilter
assert repository_filter.subject == self.user
return ModifyRepositoryFilter(self.transaction, repository_filter)
# Access tokens
# =============
def createAccessToken(self, access_type, title, callback=None):
import auth
import base64
from accesstoken import CreatedAccessToken
critic = self.transaction.critic
if access_type != "user":
api.PermissionDenied.raiseUnlessAdministrator(critic)
user_id = self.user.id if access_type == "user" else None
part1 = auth.getToken(encode=base64.b64encode, length=12)
part2 = auth.getToken(encode=base64.b64encode, length=21)
access_token = CreatedAccessToken(
critic, self.user if access_type == "user" else None, callback)
self.transaction.tables.update(("accesstokens",
"accesscontrolprofiles"))
self.transaction.items.append(
api.transaction.Query(
"""INSERT
INTO accesstokens (access_type, uid, part1, part2, title)
VALUES (%s, %s, %s, %s, %s)
RETURNING id""",
(access_type, user_id, part1, part2, title),
collector=access_token))
self.transaction.items.append(
api.transaction.Query(
"""INSERT
INTO accesscontrolprofiles (access_token)
VALUES (%s)
RETURNING id""",
(access_token,),
collector=access_token.profile))
return access_token
def modifyAccessToken(self, access_token):
from accesstoken import ModifyAccessToken
assert access_token.user == self.user
critic = self.transaction.critic
if critic.access_token and critic.access_token == access_token:
# Don't allow any modifications of the access token used to
# authenticate. This could for instance be used to remove the
# access restrictions of the token, which would obviously be bad.
raise api.PermissionDenied("Access token used to authenticate")
return ModifyAccessToken(self.transaction, access_token)
| 37.491667
| 80
| 0.611691
|
f5f5418ac0d30c264db1d347cf0728626a62bec4
| 1,295
|
py
|
Python
|
generate.py
|
Hwa-Jong/GAN
|
0977f1483a9a777b50ccabbde3b8e919aae7f057
|
[
"MIT"
] | null | null | null |
generate.py
|
Hwa-Jong/GAN
|
0977f1483a9a777b50ccabbde3b8e919aae7f057
|
[
"MIT"
] | null | null | null |
generate.py
|
Hwa-Jong/GAN
|
0977f1483a9a777b50ccabbde3b8e919aae7f057
|
[
"MIT"
] | null | null | null |
import argparse
import os
from training.train_loop import validation
# ----------------------------------------------------------------------------
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# ----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='DCGAN',
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--load_path', help='model load path', required=True)
parser.add_argument('--generate_num', help='The number of generated images', default=16, type=int)
parser.add_argument('--result_dir', help='Root directory for run results', default='results')
parser.add_argument('--seed', help='Set seed', default=22222, type=int)
args = parser.parse_args()
validation(**vars(args))
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------
| 30.833333
| 102
| 0.489575
|
4b1105cf59266fd1dbeae52f2c036ed74dfcd9ae
| 640
|
py
|
Python
|
atcoder/other/s8_1b.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/other/s8_1b.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/other/s8_1b.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
H, W, N = map(int, input().split())
berry = [tuple(int(x) for x in input().split()) for _ in range(N)]
ans = []
for i in range(1, W+1):
up = 0
for x, y in berry:
if i * y == H * x:
break
elif i * y > H * x:
up += 1
else:
if up * 2 == N:
ans.append((i, H))
for i in range(1, H):
up = 0
for x, y in berry:
if W * y == i * x:
break
elif W * y > i * x:
up += 1
else:
if up * 2 == N:
ans.append((W, i))
ans.sort()
if ans:
for x, y in ans:
print('({},{})'.format(x, y))
else:
print(-1)
| 21.333333
| 66
| 0.39375
|
76839e9b66e8baf1e5f18a056db6358ab54c0ebf
| 1,351
|
py
|
Python
|
myapp/retrieve data.py
|
NirajDharamshi/IBM-Hackathon
|
ef5c9bfa6a62dbe5428f6864316d04cfee1ba1e5
|
[
"Apache-2.0"
] | null | null | null |
myapp/retrieve data.py
|
NirajDharamshi/IBM-Hackathon
|
ef5c9bfa6a62dbe5428f6864316d04cfee1ba1e5
|
[
"Apache-2.0"
] | null | null | null |
myapp/retrieve data.py
|
NirajDharamshi/IBM-Hackathon
|
ef5c9bfa6a62dbe5428f6864316d04cfee1ba1e5
|
[
"Apache-2.0"
] | 1
|
2019-01-25T23:30:49.000Z
|
2019-01-25T23:30:49.000Z
|
import cloudant
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey, QueryResult
serviceURL = "https://fe2e18ed-d0e4-4636-8a9d-1c154f97609f-bluemix:9cea356b0d2b461434c454fa379bf5fba7b48b99d90d072242dd7a2b61885f74@fe2e18ed-d0e4-4636-8a9d-1c154f97609f-bluemix.cloudant.com"
client = Cloudant("fe2e18ed-d0e4-4636-8a9d-1c154f97609f-bluemix","9cea356b0d2b461434c454fa379bf5fba7b48b99d90d072242dd7a2b61885f74", url="https://fe2e18ed-d0e4-4636-8a9d-1c154f97609f-bluemix:9cea356b0d2b461434c454fa379bf5fba7b48b99d90d072242dd7a2b61885f74@fe2e18ed-d0e4-4636-8a9d-1c154f97609f-bluemix.cloudant.com")
client.connect()
databaseName = "volunteer"
myDatabase = client.create_database(databaseName)
if myDatabase.exists():
print("'{0}' successfully created.\n".format(databaseName))
#end_point = '{0}/{1}'.format(serviceURL, databaseName + "/_all_docs")
#params = {'include_docs': 'true'}
#response = client.r_session.get(end_point, params=params)
#print "{0}\n".format(response.json())
selector = {"selector": {"_id": {"$gt": "0"}},"fields": ["latField","lonField"],"sort": [{"_id": "asc"}]}
query = cloudant.query.Query(myDatabase, selector={'_id': {'$gt': 0}},fields=['latField', 'lonField', 'weightField', 'itemsField'])
for doc in query(limit=100)['docs']:
print doc
| 43.580645
| 315
| 0.771281
|
11a32402cda2ddb4b107fa1f21e44ac2ad4f0afe
| 35,763
|
py
|
Python
|
tornado/http1connection.py
|
LetItGrow/tornado
|
b99358e2be389b0cb1c3340431f9bbbabeb09d98
|
[
"Apache-2.0"
] | 1
|
2019-06-17T08:55:53.000Z
|
2019-06-17T08:55:53.000Z
|
tornado/http1connection.py
|
sgy-ecnu/tornado
|
c50aed0f96d92f9b0ef4cd0837c0104f140ca77e
|
[
"Apache-2.0"
] | null | null | null |
tornado/http1connection.py
|
sgy-ecnu/tornado
|
c50aed0f96d92f9b0ef4cd0837c0104f140ca77e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client and server implementations of HTTP/1.x.
.. versionadded:: 4.0
"""
import asyncio
import logging
import re
import types
from tornado.concurrent import (
Future,
future_add_done_callback,
future_set_result_unless_cancelled,
)
from tornado.escape import native_str, utf8
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado.log import gen_log, app_log
from tornado.util import GzipDecompressor
from typing import cast, Optional, Type, Awaitable, Callable, Union, Tuple
class _QuietException(Exception):
def __init__(self) -> None:
pass
class _ExceptionLoggingContext(object):
"""Used with the ``with`` statement when calling delegate methods to
log any exceptions with the given logger. Any exceptions caught are
converted to _QuietException
"""
def __init__(self, logger: logging.Logger) -> None:
self.logger = logger
def __enter__(self) -> None:
pass
def __exit__(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: types.TracebackType,
) -> None:
if value is not None:
assert typ is not None
self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
raise _QuietException
class HTTP1ConnectionParameters(object):
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
"""
def __init__(
self,
no_keep_alive: bool = False,
chunk_size: Optional[int] = None,
max_header_size: Optional[int] = None,
header_timeout: Optional[float] = None,
max_body_size: Optional[int] = None,
body_timeout: Optional[float] = None,
decompress: bool = False,
) -> None:
"""
:arg bool no_keep_alive: If true, always close the connection after
one request.
:arg int chunk_size: how much data to read into memory at once
:arg int max_header_size: maximum amount of data for HTTP headers
:arg float header_timeout: how long to wait for all headers (seconds)
:arg int max_body_size: maximum amount of data for body
:arg float body_timeout: how long to wait while reading body (seconds)
:arg bool decompress: if true, decode incoming
``Content-Encoding: gzip``
"""
self.no_keep_alive = no_keep_alive
self.chunk_size = chunk_size or 65536
self.max_header_size = max_header_size or 65536
self.header_timeout = header_timeout
self.max_body_size = max_body_size
self.body_timeout = body_timeout
self.decompress = decompress
class HTTP1Connection(httputil.HTTPConnection):
"""Implements the HTTP/1.x protocol.
This class can be on its own for clients, or via `HTTP1ServerConnection`
for servers.
"""
def __init__(
self,
stream: iostream.IOStream,
is_client: bool,
params: Optional[HTTP1ConnectionParameters] = None,
context: Optional[object] = None,
) -> None:
"""
:arg stream: an `.IOStream`
:arg bool is_client: client or server
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
:arg context: an opaque application-defined object that can be accessed
as ``connection.context``.
"""
self.is_client = is_client
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self.no_keep_alive = params.no_keep_alive
# The body limits can be altered by the delegate, so save them
# here instead of just referencing self.params later.
self._max_body_size = self.params.max_body_size or self.stream.max_buffer_size
self._body_timeout = self.params.body_timeout
# _write_finished is set to True when finish() has been called,
# i.e. there will be no more data sent. Data may still be in the
# stream's write buffer.
self._write_finished = False
# True when we have read the entire incoming body.
self._read_finished = False
# _finish_future resolves when all data has been written and flushed
# to the IOStream.
self._finish_future = Future() # type: Future[None]
# If true, the connection should be closed after this request
# (after the response has been written in the server side,
# and after it has been read in the client)
self._disconnect_on_finish = False
self._clear_callbacks()
# Save the start lines after we read or write them; they
# affect later processing (e.g. 304 responses and HEAD methods
# have content-length but no bodies)
self._request_start_line = None # type: Optional[httputil.RequestStartLine]
self._response_start_line = None # type: Optional[httputil.ResponseStartLine]
self._request_headers = None # type: Optional[httputil.HTTPHeaders]
# True if we are writing output with chunked encoding.
self._chunking_output = False
# While reading a body with a content-length, this is the
# amount left to read.
self._expected_content_remaining = None # type: Optional[int]
# A Future for our outgoing writes, returned by IOStream.write.
self._pending_write = None # type: Optional[Future[None]]
def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to a bool after the full response has
been read. The result is true if the stream is still open.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate)
async def _read_message(self, delegate: httputil.HTTPMessageDelegate) -> bool:
need_delegate_close = False
try:
header_future = self.stream.read_until_regex(
b"\r?\n\r?\n", max_bytes=self.params.max_header_size
)
if self.params.header_timeout is None:
header_data = await header_future
else:
try:
header_data = await gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout,
header_future,
quiet_exceptions=iostream.StreamClosedError,
)
except gen.TimeoutError:
self.close()
return False
start_line_str, headers = self._parse_headers(header_data)
if self.is_client:
resp_start_line = httputil.parse_response_start_line(start_line_str)
self._response_start_line = resp_start_line
start_line = (
resp_start_line
) # type: Union[httputil.RequestStartLine, httputil.ResponseStartLine]
# TODO: this will need to change to support client-side keepalive
self._disconnect_on_finish = False
else:
req_start_line = httputil.parse_request_start_line(start_line_str)
self._request_start_line = req_start_line
self._request_headers = headers
start_line = req_start_line
self._disconnect_on_finish = not self._can_keep_alive(
req_start_line, headers
)
need_delegate_close = True
with _ExceptionLoggingContext(app_log):
header_recv_future = delegate.headers_received(start_line, headers)
if header_recv_future is not None:
await header_recv_future
if self.stream is None:
# We've been detached.
need_delegate_close = False
return False
skip_body = False
if self.is_client:
assert isinstance(start_line, httputil.ResponseStartLine)
if (
self._request_start_line is not None
and self._request_start_line.method == "HEAD"
):
skip_body = True
code = start_line.code
if code == 304:
# 304 responses may include the content-length header
# but do not actually have a body.
# http://tools.ietf.org/html/rfc7230#section-3.3
skip_body = True
if code >= 100 and code < 200:
# 1xx responses should never indicate the presence of
# a body.
if "Content-Length" in headers or "Transfer-Encoding" in headers:
raise httputil.HTTPInputError(
"Response code %d cannot have body" % code
)
# TODO: client delegates will get headers_received twice
# in the case of a 100-continue. Document or change?
await self._read_message(delegate)
else:
if headers.get("Expect") == "100-continue" and not self._write_finished:
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
if not skip_body:
body_future = self._read_body(
resp_start_line.code if self.is_client else 0, headers, delegate
)
if body_future is not None:
if self._body_timeout is None:
await body_future
else:
try:
await gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout,
body_future,
quiet_exceptions=iostream.StreamClosedError,
)
except gen.TimeoutError:
gen_log.info("Timeout reading body from %s", self.context)
self.stream.close()
return False
self._read_finished = True
if not self._write_finished or self.is_client:
need_delegate_close = False
with _ExceptionLoggingContext(app_log):
delegate.finish()
# If we're waiting for the application to produce an asynchronous
# response, and we're not detached, register a close callback
# on the stream (we didn't need one while we were reading)
if (
not self._finish_future.done()
and self.stream is not None
and not self.stream.closed()
):
self.stream.set_close_callback(self._on_connection_close)
await self._finish_future
if self.is_client and self._disconnect_on_finish:
self.close()
if self.stream is None:
return False
except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
if not self.is_client:
await self.stream.write(b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.close()
return False
finally:
if need_delegate_close:
with _ExceptionLoggingContext(app_log):
delegate.on_connection_close()
header_future = None # type: ignore
self._clear_callbacks()
return True
def _clear_callbacks(self) -> None:
"""Clears the callback attributes.
This allows the request handler to be garbage collected more
quickly in CPython by breaking up reference cycles.
"""
self._write_callback = None
self._write_future = None # type: Optional[Future[None]]
self._close_callback = None # type: Optional[Callable[[], None]]
if self.stream is not None:
self.stream.set_close_callback(None)
def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None:
"""Sets a callback that will be run when the connection is closed.
Note that this callback is slightly different from
`.HTTPMessageDelegate.on_connection_close`: The
`.HTTPMessageDelegate` method is called when the connection is
closed while recieving a message. This callback is used when
there is not an active delegate (for example, on the server
side this callback is used if the client closes the connection
after sending its request but before receiving all the
response.
"""
self._close_callback = callback
def _on_connection_close(self) -> None:
# Note that this callback is only registered on the IOStream
# when we have finished reading the request and are waiting for
# the application to produce its response.
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
self._clear_callbacks()
def close(self) -> None:
if self.stream is not None:
self.stream.close()
self._clear_callbacks()
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
def detach(self) -> iostream.IOStream:
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all further
HTTP processing. May only be called during
`.HTTPMessageDelegate.headers_received`. Intended for implementing
protocols like websockets that tunnel over an HTTP handshake.
"""
self._clear_callbacks()
stream = self.stream
self.stream = None # type: ignore
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
return stream
def set_body_timeout(self, timeout: float) -> None:
"""Sets the body timeout for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._body_timeout = timeout
def set_max_body_size(self, max_body_size: int) -> None:
"""Sets the body size limit for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._max_body_size = max_body_size
def write_headers(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
chunk: Optional[bytes] = None,
) -> "Future[None]":
"""Implements `.HTTPConnection.write_headers`."""
lines = []
if self.is_client:
assert isinstance(start_line, httputil.RequestStartLine)
self._request_start_line = start_line
lines.append(utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1])))
# Client requests with a non-empty body must have either a
# Content-Length or a Transfer-Encoding.
self._chunking_output = (
start_line.method in ("POST", "PUT", "PATCH")
and "Content-Length" not in headers
and (
"Transfer-Encoding" not in headers
or headers["Transfer-Encoding"] == "chunked"
)
)
else:
assert isinstance(start_line, httputil.ResponseStartLine)
assert self._request_start_line is not None
assert self._request_headers is not None
self._response_start_line = start_line
lines.append(utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2])))
self._chunking_output = (
# TODO: should this use
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == "HTTP/1.1"
# 1xx, 204 and 304 responses have no body (not even a zero-length
# body), and so should not have either Content-Length or
# Transfer-Encoding headers.
and start_line.code not in (204, 304)
and (start_line.code < 100 or start_line.code >= 200)
# No need to chunk the output if a Content-Length is specified.
and "Content-Length" not in headers
# Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone.
and "Transfer-Encoding" not in headers
)
# If connection to a 1.1 client will be closed, inform client
if (
self._request_start_line.version == "HTTP/1.1"
and self._disconnect_on_finish
):
headers["Connection"] = "close"
# If a 1.0 client asked for keep-alive, add the header.
if (
self._request_start_line.version == "HTTP/1.0"
and self._request_headers.get("Connection", "").lower() == "keep-alive"
):
headers["Connection"] = "Keep-Alive"
if self._chunking_output:
headers["Transfer-Encoding"] = "chunked"
if not self.is_client and (
self._request_start_line.method == "HEAD"
or cast(httputil.ResponseStartLine, start_line).code == 304
):
self._expected_content_remaining = 0
elif "Content-Length" in headers:
self._expected_content_remaining = int(headers["Content-Length"])
else:
self._expected_content_remaining = None
# TODO: headers are supposed to be of type str, but we still have some
# cases that let bytes slip through. Remove these native_str calls when those
# are fixed.
header_lines = (
native_str(n) + ": " + native_str(v) for n, v in headers.get_all()
)
lines.extend(l.encode("latin1") for l in header_lines)
for line in lines:
if b"\n" in line:
raise ValueError("Newline in header: " + repr(line))
future = None
if self.stream.closed():
future = self._write_future = Future()
future.set_exception(iostream.StreamClosedError())
future.exception()
else:
future = self._write_future = Future()
data = b"\r\n".join(lines) + b"\r\n\r\n"
if chunk:
data += self._format_chunk(chunk)
self._pending_write = self.stream.write(data)
future_add_done_callback(self._pending_write, self._on_write_complete)
return future
def _format_chunk(self, chunk: bytes) -> bytes:
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
# Close the stream now to stop further framing errors.
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write more data than Content-Length"
)
if self._chunking_output and chunk:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
else:
return chunk
def write(self, chunk: bytes) -> "Future[None]":
"""Implements `.HTTPConnection.write`.
For backwards compatibility it is allowed but deprecated to
skip `write_headers` and instead call `write()` with a
pre-encoded header block.
"""
future = None
if self.stream.closed():
future = self._write_future = Future()
self._write_future.set_exception(iostream.StreamClosedError())
self._write_future.exception()
else:
future = self._write_future = Future()
self._pending_write = self.stream.write(self._format_chunk(chunk))
future_add_done_callback(self._pending_write, self._on_write_complete)
return future
def finish(self) -> None:
"""Implements `.HTTPConnection.finish`."""
if (
self._expected_content_remaining is not None
and self._expected_content_remaining != 0
and not self.stream.closed()
):
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length"
% self._expected_content_remaining
)
if self._chunking_output:
if not self.stream.closed():
self._pending_write = self.stream.write(b"0\r\n\r\n")
self._pending_write.add_done_callback(self._on_write_complete)
self._write_finished = True
# If the app finished the request while we're still reading,
# divert any remaining data away from the delegate and
# close the connection when we're done sending our response.
# Closing the connection is the only way to avoid reading the
# whole input body.
if not self._read_finished:
self._disconnect_on_finish = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if self._pending_write is None:
self._finish_request(None)
else:
future_add_done_callback(self._pending_write, self._finish_request)
def _on_write_complete(self, future: "Future[None]") -> None:
exc = future.exception()
if exc is not None and not isinstance(exc, iostream.StreamClosedError):
future.result()
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
self.stream.io_loop.add_callback(callback)
if self._write_future is not None:
future = self._write_future
self._write_future = None
future_set_result_unless_cancelled(future, None)
def _can_keep_alive(
self, start_line: httputil.RequestStartLine, headers: httputil.HTTPHeaders
) -> bool:
if self.params.no_keep_alive:
return False
connection_header = headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if start_line.version == "HTTP/1.1":
return connection_header != "close"
elif (
"Content-Length" in headers
or headers.get("Transfer-Encoding", "").lower() == "chunked"
or getattr(start_line, "method", None) in ("HEAD", "GET")
):
# start_line may be a request or response start line; only
# the former has a method attribute.
return connection_header == "keep-alive"
return False
def _finish_request(self, future: "Optional[Future[None]]") -> None:
self._clear_callbacks()
if not self.is_client and self._disconnect_on_finish:
self.close()
return
# Turn Nagle's algorithm back on, leaving the stream in its
# default state for the next request.
self.stream.set_nodelay(False)
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
def _parse_headers(self, data: bytes) -> Tuple[str, httputil.HTTPHeaders]:
# The lstrip removes newlines that some implementations sometimes
# insert between messages of a reused connection. Per RFC 7230,
# we SHOULD ignore at least one empty line before the request.
# http://tools.ietf.org/html/rfc7230#section-3.5
data_str = native_str(data.decode("latin1")).lstrip("\r\n")
# RFC 7230 section allows for both CRLF and bare LF.
eol = data_str.find("\n")
start_line = data_str[:eol].rstrip("\r")
headers = httputil.HTTPHeaders.parse(data_str[eol:])
return start_line, headers
def _read_body(
self,
code: int,
headers: httputil.HTTPHeaders,
delegate: httputil.HTTPMessageDelegate,
) -> Optional[Awaitable[None]]:
if "Content-Length" in headers:
if "Transfer-Encoding" in headers:
# Response cannot contain both Content-Length and
# Transfer-Encoding headers.
# http://tools.ietf.org/html/rfc7230#section-3.3.3
raise httputil.HTTPInputError(
"Response with both Transfer-Encoding and Content-Length"
)
if "," in headers["Content-Length"]:
# Proxies sometimes cause Content-Length headers to get
# duplicated. If all the values are identical then we can
# use them but if they differ it's an error.
pieces = re.split(r",\s*", headers["Content-Length"])
if any(i != pieces[0] for i in pieces):
raise httputil.HTTPInputError(
"Multiple unequal Content-Lengths: %r"
% headers["Content-Length"]
)
headers["Content-Length"] = pieces[0]
try:
content_length = int(headers["Content-Length"]) # type: Optional[int]
except ValueError:
# Handles non-integer Content-Length value.
raise httputil.HTTPInputError(
"Only integer Content-Length is allowed: %s"
% headers["Content-Length"]
)
if cast(int, content_length) > self._max_body_size:
raise httputil.HTTPInputError("Content-Length too long")
else:
content_length = None
if code == 204:
# This response code is not allowed to have a non-empty body,
# and has an implicit length of zero instead of read-until-close.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
if "Transfer-Encoding" in headers or content_length not in (None, 0):
raise httputil.HTTPInputError(
"Response with code %d should not have body" % code
)
content_length = 0
if content_length is not None:
return self._read_fixed_body(content_length, delegate)
if headers.get("Transfer-Encoding", "").lower() == "chunked":
return self._read_chunked_body(delegate)
if self.is_client:
return self._read_body_until_close(delegate)
return None
async def _read_fixed_body(
self, content_length: int, delegate: httputil.HTTPMessageDelegate
) -> None:
while content_length > 0:
body = await self.stream.read_bytes(
min(self.params.chunk_size, content_length), partial=True
)
content_length -= len(body)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
await ret
async def _read_chunked_body(self, delegate: httputil.HTTPMessageDelegate) -> None:
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
total_size = 0
while True:
chunk_len_str = await self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len_str.strip(), 16)
if chunk_len == 0:
crlf = await self.stream.read_bytes(2)
if crlf != b"\r\n":
raise httputil.HTTPInputError(
"improperly terminated chunked request"
)
return
total_size += chunk_len
if total_size > self._max_body_size:
raise httputil.HTTPInputError("chunked body too large")
bytes_to_read = chunk_len
while bytes_to_read:
chunk = await self.stream.read_bytes(
min(bytes_to_read, self.params.chunk_size), partial=True
)
bytes_to_read -= len(chunk)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(chunk)
if ret is not None:
await ret
# chunk ends with \r\n
crlf = await self.stream.read_bytes(2)
assert crlf == b"\r\n"
async def _read_body_until_close(
self, delegate: httputil.HTTPMessageDelegate
) -> None:
body = await self.stream.read_until_close()
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
await ret
class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
"""Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
"""
def __init__(self, delegate: httputil.HTTPMessageDelegate, chunk_size: int) -> None:
self._delegate = delegate
self._chunk_size = chunk_size
self._decompressor = None # type: Optional[GzipDecompressor]
def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> Optional[Awaitable[None]]:
if headers.get("Content-Encoding") == "gzip":
self._decompressor = GzipDecompressor()
# Downstream delegates will only see uncompressed data,
# so rename the content-encoding header.
# (but note that curl_httpclient doesn't do this).
headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
del headers["Content-Encoding"]
return self._delegate.headers_received(start_line, headers)
async def data_received(self, chunk: bytes) -> None:
if self._decompressor:
compressed_data = chunk
while compressed_data:
decompressed = self._decompressor.decompress(
compressed_data, self._chunk_size
)
if decompressed:
ret = self._delegate.data_received(decompressed)
if ret is not None:
await ret
compressed_data = self._decompressor.unconsumed_tail
else:
ret = self._delegate.data_received(chunk)
if ret is not None:
await ret
def finish(self) -> None:
if self._decompressor is not None:
tail = self._decompressor.flush()
if tail:
# The tail should always be empty: decompress returned
# all that it can in data_received and the only
# purpose of the flush call is to detect errors such
# as truncated input. If we did legitimately get a new
# chunk at this point we'd need to change the
# interface to make finish() a coroutine.
raise ValueError(
"decompressor.flush returned data; possile truncated input"
)
return self._delegate.finish()
def on_connection_close(self) -> None:
return self._delegate.on_connection_close()
class HTTP1ServerConnection(object):
"""An HTTP/1.x server."""
def __init__(
self,
stream: iostream.IOStream,
params: Optional[HTTP1ConnectionParameters] = None,
context: Optional[object] = None,
) -> None:
"""
:arg stream: an `.IOStream`
:arg params: a `.HTTP1ConnectionParameters` or None
:arg context: an opaque application-defined object that is accessible
as ``connection.context``
"""
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self._serving_future = None # type: Optional[Future[None]]
async def close(self) -> None:
"""Closes the connection.
Returns a `.Future` that resolves after the serving loop has exited.
"""
self.stream.close()
# Block until the serving loop is done, but ignore any exceptions
# (start_serving is already responsible for logging them).
assert self._serving_future is not None
try:
await self._serving_future
except Exception:
pass
def start_serving(self, delegate: httputil.HTTPServerConnectionDelegate) -> None:
"""Starts serving requests on this connection.
:arg delegate: a `.HTTPServerConnectionDelegate`
"""
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
fut = gen.convert_yielded(self._server_request_loop(delegate))
self._serving_future = fut
# Register the future on the IOLoop so its errors get logged.
self.stream.io_loop.add_future(fut, lambda f: f.result())
async def _server_request_loop(
self, delegate: httputil.HTTPServerConnectionDelegate
) -> None:
try:
while True:
conn = HTTP1Connection(self.stream, False, self.params, self.context)
request_delegate = delegate.start_request(self, conn)
try:
ret = await conn.read_response(request_delegate)
except (
iostream.StreamClosedError,
iostream.UnsatisfiableReadError,
asyncio.CancelledError,
):
return
except _QuietException:
# This exception was already logged.
conn.close()
return
except Exception:
gen_log.error("Uncaught exception", exc_info=True)
conn.close()
return
if not ret:
return
await asyncio.sleep(0)
finally:
delegate.on_close(self)
| 42.727599
| 88
| 0.597461
|
dfb5bb8a358e5af1fde329c5bb2a224e04462677
| 1,185
|
py
|
Python
|
aoc_wim/aoc2016/q07.py
|
wimglenn/advent-of-code-wim
|
6308c3fa5d29b318680419f877fd5b8ac1359b5d
|
[
"WTFPL"
] | 20
|
2019-10-15T07:33:13.000Z
|
2022-01-19T13:40:36.000Z
|
aoc_wim/aoc2016/q07.py
|
wimglenn/advent-of-code-wim
|
6308c3fa5d29b318680419f877fd5b8ac1359b5d
|
[
"WTFPL"
] | 5
|
2019-02-01T23:31:27.000Z
|
2021-12-03T06:55:58.000Z
|
aoc_wim/aoc2016/q07.py
|
wimglenn/advent-of-code-wim
|
6308c3fa5d29b318680419f877fd5b8ac1359b5d
|
[
"WTFPL"
] | 8
|
2019-12-03T15:41:23.000Z
|
2021-12-06T17:13:57.000Z
|
"""
--- Day 7: Internet Protocol Version 7 ---
https://adventofcode.com/2016/day/7
"""
import re
from aocd import data
def splitter(s):
pattern = r"\[([a-z]*)\]"
ins = re.findall(pattern, s)
outs = re.sub(pattern, " ", s).split()
return ins, outs
def chunker(s, size):
i = 0
while True:
chunk = s[i : i + size]
if len(chunk) != size:
break
yield chunk
i += 1
def has_abba(s):
for a, b, b_, a_ in chunker(s, 4):
if a == a_ and b == b_ and a != b:
return True
return False
def gen_aba(s):
for a, b, a_ in chunker(s, 3):
if a == a_ and a != b:
yield a + b + a
def has_aba(s):
return bool(next(gen_aba(s), False))
def support_tls(s):
ins, outs = splitter(s)
return has_abba(".".join(outs)) and not has_abba(".".join(ins))
def support_ssl(s):
ins, outs = splitter(s)
for a, b, a in gen_aba("..".join(ins)):
bab = b + a + b
if bab in "..".join(outs):
return True
return False
tls = ssl = 0
for line in data.splitlines():
tls += support_tls(line)
ssl += support_ssl(line)
print(tls)
print(ssl)
| 18.230769
| 67
| 0.534177
|
ef7acc33ad2d70daac33a9c4f62134966dc50e76
| 922
|
py
|
Python
|
setup.py
|
grissomlab/sigpy-rf
|
673f6c64215c5eb563d8f345ec1a30eb6667136d
|
[
"BSD-3-Clause"
] | 14
|
2019-07-09T00:45:23.000Z
|
2022-01-14T17:58:18.000Z
|
setup.py
|
grissomlab/sigpy-rf
|
673f6c64215c5eb563d8f345ec1a30eb6667136d
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
grissomlab/sigpy-rf
|
673f6c64215c5eb563d8f345ec1a30eb6667136d
|
[
"BSD-3-Clause"
] | 4
|
2020-01-31T18:46:07.000Z
|
2022-03-31T11:37:05.000Z
|
import sys
from setuptools import setup, find_packages
from sigpy.version import __version__ # noqa
if sys.version_info < (3, 5):
sys.exit('Sorry, Python < 3.5 is not supported')
REQUIRED_PACKAGES = ['numpy', 'pywavelets', 'numba', 'scipy', 'tqdm']
with open("README.rst", "r") as f:
long_description = f.read()
setup(name='sigpy',
version=__version__,
description='Python package for signal reconstruction.',
long_description=long_description,
long_description_content_type="text/x-rst",
url='http://github.com/mikgroup/sigpy',
author='Frank Ong',
author_email='frankong@berkeley.edu',
license='BSD',
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
]
)
| 30.733333
| 69
| 0.655098
|
77281d522a6a9c5f0c797963147f0513d72cc8c4
| 8,660
|
py
|
Python
|
qcm/ProcessTransferFunSpecialCheck.py
|
golfit/work-archive
|
bdd37d46fda3fde15ec2164d3335d6b4ed576bd7
|
[
"MIT"
] | null | null | null |
qcm/ProcessTransferFunSpecialCheck.py
|
golfit/work-archive
|
bdd37d46fda3fde15ec2164d3335d6b4ed576bd7
|
[
"MIT"
] | null | null | null |
qcm/ProcessTransferFunSpecialCheck.py
|
golfit/work-archive
|
bdd37d46fda3fde15ec2164d3335d6b4ed576bd7
|
[
"MIT"
] | null | null | null |
#This script uses the same processing for the transfer function as normal, but does so for a particular situation: take the fluctuation data from one shot in which the Shoelace antenna didn't run, and the current data from a shot in which the Shoelace was operating, and calculate the transfer function. This way, we can perform a check on whether there are false positives in the scheme for picking up poles.
#Ted Golfinopoulos, 13 June 2012
from MDSplus import *
import sys #For getting command line arguments
import numpy
import myTools
from scipy.fftpack import hilbert
from scipy.interpolate import interp1d
import re
from calcTransFun import *
#Parse shot information, contained in first command-line argument
sList=[1120815018]#1120712021] #A shot for which there was a QCM, but the Shoelace wasn't operating.
sShoelace=1120814021#1120712023 #A shot for which the Shoelace was running.
sOut=1120814800 #Put data here.
#List of nodes in remaining command line arguments
stringArgs=sys.argv[1:]
def getDataInRange(srcNode,t1,t2) :
myStr="_y="+n.getPath()+", _t=dim_of("+n.getPath()+"), pack(_y, _t > {0:f} and _t < {0:f})".format(t1,t2)
t1Actual=Data.compile("_t=dim_of("+n.getPath()+ "), _t[firstloc(_t > {0:f} and _t < {0:f})]".format(t1,t2) ).evaluate().data()
t2Actual=Data.compile("_t=dim_of("+n.getPath()+ "), _t[lastloc(_t > {0:f} and _t < {0:f})]".format(t1,t2) ).evaluate().data()
y=Data.compile( myStr ).evaluate().data() #Grab data in specified range
return y, linspace(t1Actual,t2Actual,len(y)) #Regenerate timebase locally
#Loop through shots in list
for s in sList :
#Open magnetics tree to get Shoelace antenna signal.
magTree=Tree('magnetics',s)
shoeTree=Tree('magnetics',sShoelace)
outTree=Tree('magnetics',sOut,'edit')
#Get frequency points to extract range of frequencies.
fpts=shoeTree.getNode('shoelace.freq_program.freq_wave_hz').getData().evaluate().data() #For Shoelace antenna work
freqPad=5.E3 #Extend frequency range by this amount - padding.
fRange=numpy.array([min(fpts)-freqPad,max(fpts)+freqPad])
#Get amplifier on and off times
try :
t1=shoeTree.getNode('shoelace.rf_gate.on').getData().evaluate().data()
t2=shoeTree.getNode('shoelace.rf_gate.off').getData().evaluate().data()
except :
t1=0.5
t2=1.5
print('Gate times set at 0.5 and 1.5 s')
antNode=shoeTree.getNode('shoelace.ant_i') #Shoelace antenna current node.
#Check to see if antenna current node is on - if it is not, skip and continue.
if(not(antNode.isOn())):
print("Antenna current node is off for Shot {0:d} - skip and continue".format(s))
continue
elif(t2-t1<=0.001) : #Check to make sure antenna actually ran - this is not a perfect check, but is one way to avoid wasted computation during the shot cycle.
print("Antenna is gated off for Shot {0:d} - skip and continue".format(s))
continue
#Get timebase and signal.
ti=antNode.getData().evaluate().dim_of().data() #Timebase of current signal
fs=1./(ti[1]-ti[0]) #Sampling frequency.
ia=antNode.getData().evaluate().data() #Antenna current signal [A]
try :
iaHilb=ia+1j*antNode.getNode('hilb').getData().evaluate().data() #Hilbert transform of current signal
except : #If there's no hilbert transform node, calculate it.
iaHilb=ia-1j*hilbert(ia)
expr=Data.compile("BUILD_SIGNAL($VALUE, $1, $2)", numpy.imag(iaHilb), ti)
antNode.getNode('hilb').putData(expr)
usePremadeListFlag=False #Flag to indicate whether user has specified a premade list of nodes, or has entered them explicitly at the command line.
nodeList=[] #Initialize variable to hold paths of list of "output signal" nodes - calculate transfer function for these signals.
nodeList,ceceNodes=myTools.parsePremadeNodeList(stringArgs, s)
if(len(nodeList)>0) : usePremadeListFlag=True
#Calculate cross-correlation component if requested.
if( any([x=="crosscorr" for x in stringArgs]) or any([x=="mscohere" for x in stringArgs]) ) : crossCorrFlag=True
else : crossCorrFlag=False
if(not(usePremadeListFlag)) :
nodeList=stringArgs #Otherwise, user has specified list of signals, explicitly.
#Loop through nodes for which transfer function is desired
for nodeName in nodeList :
#Parse tree name
extractTreeName=re.findall('(\\\)(.+)(::)',nodeName)
if(len(extractTreeName)==0) :
sigTree=magTree #Default tree is magnetics
treeName='magnetics'
else :
treeName = extractTreeName[0][1] #Second argument from regular expression output is tree name
sigTree=Tree(treeName,s)
extractProbeName=re.split('(:)|(\\.)',nodeName)
probeName=extractProbeName[-1] #Last argument in nodeName broken at node dividers is probe name.
#Add up and down to names of ASP probes.
if( re.search('.UP:',nodeName) ) : probeName=probeName+"UP"
elif( re.search('.DN:',nodeName ) ) : probeName=probeName+"DN"
print('test')
print(nodeName)
sigNode=sigTree.getNode(nodeName)
#See if node is on; if it is off, skip and go to next.
if( not(sigNode.isOn()) ) :
print("Node, "+str(sigNode.getPath())+" is off; skipping and continuing")
continue
#Get signal of node.
y=sigNode.getData().evaluate().data() #"Output" signal - calculate transfer function with respect to this.
#If signal is not from magnetics tree, resample onto magnetics timebase.
if(treeName.lower()!='magnetics') :
tsig=sigTree.getNode(nodeName).getData().evaluate().dim_of().data() #Signal timebase
t1,t2=[max(t1,tsig[0]),min(t2,tsig[-1])] #Might need to change data range if signal time base does not extend to full pulse of input signal.
interpObj=interp1d(tsig,y)
y=interpObj(ti[numpy.logical_and(ti>t1,ti<t2)]) #Interpolate y only in time range of interest.
else : #Otherwise, just reduce size of y to time range of interest.
y = y[numpy.logical_and(ti>t1,ti<t2)]
#Calculate transfer function
H=calcTransFun(y,iaHilb[numpy.logical_and(ti>t1,ti<t2)],fRange,fs)
if( crossCorrFlag == True ) :
Hyx=calcTransFun(ia[numpy.logical_and(ti>t1,ti<t2)],y-1j*hilbert(y),fRange,fs)
Cxy=H*Hyx #Cross-coherence spectrum for component. This follows since Hxy=Pxy/Pxx, and Hyx=Pyx/Pyy=Pxy'/Pyy, and Cxy:=|Pxy|^2/(Pxx*Pyy), where the prime denotes complex conjugation.
tH=linspace(t1,t2,len(H)) #Make a new timebase corresponding to transfer function.
topNode=outTree.getNode('shoelace')
try :
#Add new tree node to hold transfer functions
topNode.addNode('trans_fun','structure')
#Write changes to tree.
outTree.write()
except :
print("...can't add trans_fun structure node - may exist already")
transFunNode=topNode.getNode('trans_fun')
try :
#Add a node for each coil
transFunNode.addNode(probeName,'signal')
#Add sub-nodes for node.
n=transFunNode.getNode(probeName)
n.addNode('raw','signal')
n.addNode('Hr','signal')
n.addNode('Hi','signal')
#Write changes to tree.
outTree.write()
except :
n=transFunNode.getNode(probeName)
print("...transfer function subnodes for "+probeName+" are already there")
#Prepare expression to put in tree with data.
#Put transfer function in tree as array of complex numbers.
expr=Data.compile("BUILD_SIGNAL($VALUE, $1, $2)", H, tH)
n.putData(expr)
n.getNode('raw').putData(Data.compile(nodeName)) #Put pointer to raw data of signal.
#Make nodes that get real and imaginary parts of transfer function for implementations of MDS interface which can't handle getting complex types.
n.getNode('Hr').putData(Data.compile("real("+n.getPath()+")"))
n.getNode('Hi').putData(Data.compile("aimag("+n.getPath()+")"))
print("Put transfer function data in "+n.getPath())
#######################Cross-coherence
if(crossCorrFlag) :
try :
#Add new tree node to hold cross-coherence
topNode.addNode('mscohere','structure')
#Write changes to tree.
outTree.write()
except :
print("...can't add mscohere structure node - may exist already")
mscohereNode=topNode.getNode('mscohere')
try :
#Add a node for each coil
mscohereNode.addNode(probeName,'signal')
#Add sub-nodes for node.
n=mscohereNode.getNode(probeName)
n.addNode('raw','signal')
#Write changes to tree.
outTree.write()
except :
n=mscohereNode.getNode(probeName)
print("...magnitude squared coherence subnodes for "+probeName+" are already there")
#Prepare expression to put in tree with data.
#Put transfer function in tree as array of complex numbers.
expr=Data.compile("BUILD_SIGNAL($VALUE, $1, $2)", Cxy, tH)
n.putData(expr)
n.getNode('raw').putData(Data.compile(nodeName)) #Put pointer to raw data of signal.
print("Put magnitude square coherence component data in "+n.getPath())
| 40.46729
| 410
| 0.720323
|
edb8df8e7ccf2b99c13e6074e923ea19b062ab2e
| 432
|
py
|
Python
|
paytm/migrations/0002_paytm_history_merc_unq_ref.py
|
jaswal72/paytm_django
|
03cb130d7728e71703dedf8d849e6fc646d4db8b
|
[
"MIT"
] | 4
|
2019-03-30T08:56:11.000Z
|
2019-11-19T11:59:53.000Z
|
paytm/migrations/0002_paytm_history_merc_unq_ref.py
|
tanmayag8958/paytm_django
|
dfba1c9185f4a8defcfc609888ad66e57715ec48
|
[
"MIT"
] | 3
|
2020-02-11T23:57:58.000Z
|
2021-06-10T21:17:48.000Z
|
paytm/migrations/0002_paytm_history_merc_unq_ref.py
|
tanmayag8958/paytm_django
|
dfba1c9185f4a8defcfc609888ad66e57715ec48
|
[
"MIT"
] | 3
|
2019-03-31T12:23:09.000Z
|
2019-11-19T11:59:59.000Z
|
# Generated by Django 2.1.7 on 2019-03-29 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paytm', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='paytm_history',
name='MERC_UNQ_REF',
field=models.CharField(default='not_set', max_length=30, verbose_name='MERC_UNQ_REF'),
),
]
| 22.736842
| 98
| 0.618056
|
e28fd93d8fd766fc178b92f00e89505de86fa46a
| 5,181
|
py
|
Python
|
votr/initialize_votr_database.py
|
RobSpectre/salt-states
|
aa84fc8e4c53b56697f2d4d36177c0d491294ede
|
[
"MIT"
] | 3
|
2015-08-13T11:15:03.000Z
|
2016-05-16T01:02:21.000Z
|
votr/initialize_votr_database.py
|
RobSpectre/salt-states
|
aa84fc8e4c53b56697f2d4d36177c0d491294ede
|
[
"MIT"
] | 1
|
2021-02-08T20:18:05.000Z
|
2021-02-08T20:18:05.000Z
|
votr/initialize_votr_database.py
|
RobSpectre/salt-states
|
aa84fc8e4c53b56697f2d4d36177c0d491294ede
|
[
"MIT"
] | 1
|
2015-09-22T22:33:36.000Z
|
2015-09-22T22:33:36.000Z
|
import sys
import urllib2
import json
import argparse
import base64
class VotrCouchDBInitializer(object):
def __init__(self, path=None, username=None, password=None):
self.path = path
self.username = username
self.password = password
self.result = None
def exists(self):
try:
response = urllib2.urlopen(self.path)
response_dict = json.loads(response.read())
success = response_dict['couchdb']
return True
except urllib2.URLError, e:
self.result = {'changed': False,
'comment': "Unable to connect to CouchDB: "
"{0}".format(e)}
return False
except urllib2.HTTPError, e:
self.result = {'changed': False,
'comment': "Error connecting to CouchDB: "
"{0}".format(e)}
return False
except urllib2.KeyError, e:
self.result = {'changed': False,
'comment': "Unknown response from CouchDB: {0}"
.format(e)}
return False
except urllib2.Exception, e:
self.result = {'changed': False,
'comment': "Unknown error connecting to "
"CouchDB: {0}".format(e)}
return False
def user_exists(self):
try:
response = urllib2.urlopen("{0}/_config/admins/{1}"
.format(self.path, self.username))
response_dict = json.loads(response.read())
if response_dict.get('error', None):
return False
else:
return True
except urllib2.HTTPError, e:
return False
except Exception, e:
self.result = {'changed': False,
'comment': "Votr CouchDB user already exists."}
return True
def create_user(self):
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request("{0}/_config/admins/{1}"
.format(self.path, self.username),
data='"{0}"'.format(self.password))
request.get_method = lambda: 'PUT'
try:
url = opener.open(request)
self.result = {'changed': True,
'comment': "Votr user created."}
return True
except Exception, e:
self.result = {'changed': False,
'comment': "Error creating user."}
return False
def database_exists(self):
try:
response = urllib2.urlopen("{0}/database".format(self.path))
response_dict = json.loads(response.read())
if response_dict.get('error', None):
return False
else:
self.result = {'changed': False,
'comment': "Votr CouchDB database "
"already exists."}
return True
except urllib2.HTTPError, e:
return False
def create_database(self):
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request("{0}/database"
.format(self.path))
request.get_method = lambda: 'PUT'
base64_string = base64.encodestring("{0}:{1}"
.format(self.username,
self.password)) \
.replace('\n', '')
request.add_header("Authorization", "Basic {0}".format(base64_string))
try:
url = opener.open(request)
self.result = {'changed': True,
'comment': "Votr database created."}
return True
except Exception, e:
self.result = {'changed': False,
'comment': "Error creating database."}
print Exception, e
return False
# Parser configuration
parser = argparse.ArgumentParser(description="Votr Database Initializer for "
"Salt.",
epilog="Written by Rob Spectre. Votr "
"written by Carter Rabasa.")
parser.add_argument('path',
help="Path to CouchDB interface.")
parser.add_argument('-u', '--username',
help="Username for CouchDB admin user you wish to create.")
parser.add_argument('-p', '--password',
help="Password for CouchDB admin user you wish to create.")
if __name__ == "__main__":
couchdb = VotrCouchDBInitializer()
parser.parse_args(namespace=couchdb)
if couchdb.exists():
if couchdb.user_exists():
pass
else:
couchdb.create_user()
if couchdb.database_exists():
pass
else:
couchdb.create_database()
sys.stdout.write(json.dumps(couchdb.result))
| 37.817518
| 79
| 0.494499
|
32a1dcc12c26e8326c42142f28f2ff39abb3707b
| 4,351
|
py
|
Python
|
cs15211/FindandReplacePattern.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1
|
2021-07-05T01:53:30.000Z
|
2021-07-05T01:53:30.000Z
|
cs15211/FindandReplacePattern.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | null | null | null |
cs15211/FindandReplacePattern.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1
|
2018-01-08T07:14:08.000Z
|
2018-01-08T07:14:08.000Z
|
__source__ = 'https://leetcode.com/problems/find-and-replace-pattern/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 890. Find and Replace Pattern
#
# You have a list of words and a pattern,
# and you want to know which words in words matches the pattern.
#
# A word matches the pattern if there exists a permutation of letters p
# so that after replacing every letter x in the pattern with p(x),
# we get the desired word.
#
# (Recall that a permutation of letters is a bijection from letters to letters:
# every letter maps to another letter, and no two letters map to the same letter.)
#
# Return a list of the words in words that match the given pattern.
#
# You may return the answer in any order.
#
# Example 1:
#
# Input: words = ["abc","deq","mee","aqq","dkd","ccc"], pattern = "abb"
# Output: ["mee","aqq"]
# Explanation: "mee" matches the pattern because there is a permutation {a -> m, b -> e, ...}.
# "ccc" does not match the pattern because {a -> c, b -> c, ...} is not a permutation,
# since a and b map to the same letter.
#
# Note:
#
# 1 <= words.length <= 50
# 1 <= pattern.length = words[i].length <= 20
#
import unittest
# 24ms 92.68%
class Solution(object):
def findAndReplacePattern(self, words, pattern):
"""
:type words: List[str]
:type pattern: str
:rtype: List[str]
"""
def match(word):
m1, m2 = {}, {}
for w, p in zip(word, pattern):
if w not in m1: m1[w] = p
if p not in m2: m2[p] = w
if (m1[w], m2[p]) != (p, w):
return False
return True
return filter(match, words)
# 32ms 26.50%
class Solution2(object):
def findAndReplacePattern(self, words, pattern):
"""
:type words: List[str]
:type pattern: str
:rtype: List[str]
"""
def match(word):
P = {}
for x, y in zip(pattern, word):
if P.setdefault(x, y) != y:
return False
return len(set(P.values())) == len(P.values())
return filter(match, words)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/find-and-replace-pattern/solution/
Approach 1: Two Maps
Complexity Analysis
Time Complexity: O(N * K), where N is the number of words, and K is the length of each word.
Space Complexity: O(N * K), the space used by the answer.
# 7ms 36.94%
class Solution {
public List<String> findAndReplacePattern(String[] words, String pattern) {
List<String> ans = new ArrayList();
for (String word: words)
if (match(word, pattern))
ans.add(word);
return ans;
}
private boolean match(String word, String pattern) {
Map<Character, Character> m1 = new HashMap();
Map<Character, Character> m2 = new HashMap();
for (int i = 0; i < word.length(); ++i) {
char w = word.charAt(i);
char p = pattern.charAt(i);
if (!m1.containsKey(w)) m1.put(w, p);
if (!m2.containsKey(p)) m2.put(p, w);
if (m1.get(w) != p || m2.get(p) != w) return false;
}
return true;
}
}
Approach 2: One Map
Complexity Analysis
Time Complexity: O(N * K), where N is the number of words, and K is the length of each word.
Space Complexity: O(N * K), the space used by the answer.
# 8ms 24.49%
class Solution {
public List<String> findAndReplacePattern(String[] words, String pattern) {
List<String> ans = new ArrayList();
for (String word: words)
if (match(word, pattern))
ans.add(word);
return ans;
}
private boolean match(String word, String pattern) {
Map<Character, Character> M = new HashMap();
for (int i = 0; i < word.length(); ++i) {
char w = word.charAt(i);
char p = pattern.charAt(i);
if (!M.containsKey(w)) M.put(w, p);
if (M.get(w) != p) return false;
}
boolean[] seen = new boolean[26];
for (char p: M.values()) {
if (seen[p - 'a']) return false;
seen[p - 'a'] = true;
}
return true;
}
}
'''
| 30.006897
| 94
| 0.571823
|
4fc487544ee898bd601823bb044ad9cf0b7fa7c2
| 3,322
|
py
|
Python
|
samples/media-player/main.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | 3
|
2020-01-02T08:43:36.000Z
|
2020-07-05T08:59:02.000Z
|
samples/media-player/main.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | null | null | null |
samples/media-player/main.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | 1
|
2020-03-11T17:38:45.000Z
|
2020-03-11T17:38:45.000Z
|
#!/usr/bin/env python
from panda3d.core import *
# Tell Panda3D to use OpenAL, not FMOD
loadPrcFileData("", "audio-library-name p3openal_audio")
from direct.showbase.DirectObject import DirectObject
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.ShowBase import ShowBase
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(0, 0, 0, 1), shadow=(1, 1, 1, 1),
parent=base.a2dTopLeft, align=TextNode.ALeft,
pos=(0.08, -pos - 0.04), scale=.06)
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, pos=(-0.1, 0.09), scale=.08,
parent=base.a2dBottomRight, align=TextNode.ARight,
fg=(1, 1, 1, 1), shadow=(0, 0, 0, 1))
class MediaPlayer(ShowBase):
def __init__(self, media_file):
# Initialize the ShowBase class from which we inherit, which will
# create a window and set up everything we need for rendering into it.
ShowBase.__init__(self)
self.title = addTitle("Panda3D: Tutorial - Media Player")
self.inst1 = addInstructions(0.06, "P: Play/Pause")
self.inst2 = addInstructions(0.12, "S: Stop and Rewind")
self.inst3 = addInstructions(0.18,
"M: Slow Motion / Normal Motion toggle")
# Load the texture. We could use loader.loadTexture for this,
# but we want to make sure we get a MovieTexture, since it
# implements synchronizeTo.
self.tex = MovieTexture("name")
success = self.tex.read(media_file)
assert success, "Failed to load video!"
# Set up a fullscreen card to set the video texture on.
cm = CardMaker("My Fullscreen Card")
cm.setFrameFullscreenQuad()
# Tell the CardMaker to create texture coordinates that take into
# account the padding region of the texture.
cm.setUvRange(self.tex)
# Now place the card in the scene graph and apply the texture to it.
card = NodePath(cm.generate())
card.reparentTo(self.render2d)
card.setTexture(self.tex)
self.sound = loader.loadSfx(media_file)
# Synchronize the video to the sound.
self.tex.synchronizeTo(self.sound)
self.accept('p', self.playpause)
self.accept('P', self.playpause)
self.accept('s', self.stopsound)
self.accept('S', self.stopsound)
self.accept('m', self.fastforward)
self.accept('M', self.fastforward)
def stopsound(self):
self.sound.stop()
self.sound.setPlayRate(1.0)
def fastforward(self):
if self.sound.status() == AudioSound.PLAYING:
t = self.sound.getTime()
self.sound.stop()
if self.sound.getPlayRate() == 1.0:
self.sound.setPlayRate(0.5)
else:
self.sound.setPlayRate(1.0)
self.sound.setTime(t)
self.sound.play()
def playpause(self):
if self.sound.status() == AudioSound.PLAYING:
t = self.sound.getTime()
self.sound.stop()
self.sound.setTime(t)
else:
self.sound.play()
player = MediaPlayer("PandaSneezes.ogv")
player.run()
| 35.72043
| 80
| 0.618603
|
01c8da784c96e4453f24183835c6440fb6cdf8b9
| 4,259
|
py
|
Python
|
benchmark/startQiskit_QC2253.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC2253.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC2253.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=14
prog.h(input_qubit[3]) # number=32
prog.cz(input_qubit[0],input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=34
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.cx(input_qubit[2],input_qubit[3]) # number=22
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=29
prog.x(input_qubit[2]) # number=30
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=21
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2253.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.909836
| 165
| 0.654379
|
8be53134c4da1d89cf24a2577c1ccd11e8ca9b15
| 12,471
|
py
|
Python
|
curtsies/events.py
|
bpython/curtsies
|
307aa877dffe28f5760b88f67d4e865134d949c3
|
[
"MIT"
] | 93
|
2018-02-18T03:13:32.000Z
|
2022-03-22T22:09:15.000Z
|
curtsies/events.py
|
bpython/curtsies
|
307aa877dffe28f5760b88f67d4e865134d949c3
|
[
"MIT"
] | 48
|
2018-02-13T19:16:36.000Z
|
2021-12-01T14:18:38.000Z
|
curtsies/events.py
|
bpython/curtsies
|
307aa877dffe28f5760b88f67d4e865134d949c3
|
[
"MIT"
] | 19
|
2018-02-13T20:36:29.000Z
|
2021-01-02T20:22:43.000Z
|
"""Events for keystrokes and other input events"""
import codecs
import encodings
import itertools
import sys
from typing import Optional, List, Union
from .termhelpers import Termmode
from .curtsieskeys import CURTSIES_NAMES as special_curtsies_names
chr_byte = lambda i: chr(i).encode("latin-1")
chr_uni = chr
CURTSIES_NAMES = {}
control_chars = {chr_byte(i): "<Ctrl-%s>" % chr(i + 0x60) for i in range(0x00, 0x1B)}
CURTSIES_NAMES.update(control_chars)
for i in range(0x00, 0x80):
CURTSIES_NAMES[b"\x1b" + chr_byte(i)] = "<Esc+%s>" % chr(i)
for i in range(0x00, 0x1B): # Overwrite the control keys with better labels
CURTSIES_NAMES[b"\x1b" + chr_byte(i)] = "<Esc+Ctrl-%s>" % chr(i + 0x40)
for i in range(0x00, 0x80):
CURTSIES_NAMES[chr_byte(i + 0x80)] = "<Meta-%s>" % chr(i)
for i in range(0x00, 0x1B): # Overwrite the control keys with better labels
CURTSIES_NAMES[chr_byte(i + 0x80)] = "<Meta-Ctrl-%s>" % chr(i + 0x40)
CURTSIES_NAMES.update(special_curtsies_names)
CURSES_NAMES = {
b"\x1bOP": "KEY_F(1)",
b"\x1bOQ": "KEY_F(2)",
b"\x1bOR": "KEY_F(3)",
b"\x1bOS": "KEY_F(4)",
b"\x1b[15~": "KEY_F(5)",
b"\x1b[17~": "KEY_F(6)",
b"\x1b[18~": "KEY_F(7)",
b"\x1b[19~": "KEY_F(8)",
b"\x1b[20~": "KEY_F(9)",
b"\x1b[21~": "KEY_F(10)",
b"\x1b[23~": "KEY_F(11)",
b"\x1b[24~": "KEY_F(12)",
# see bpython #626
b"\x1b[11~": "KEY_F(1)",
b"\x1b[12~": "KEY_F(2)",
b"\x1b[13~": "KEY_F(3)",
b"\x1b[14~": "KEY_F(4)",
b"\x1b[A": "KEY_UP",
b"\x1b[B": "KEY_DOWN",
b"\x1b[C": "KEY_RIGHT",
b"\x1b[D": "KEY_LEFT",
b"\x1b[F": "KEY_END", # https://github.com/bpython/bpython/issues/490
b"\x1b[H": "KEY_HOME", # https://github.com/bpython/bpython/issues/490
b"\x08": "KEY_BACKSPACE",
b"\x1b[Z": "KEY_BTAB",
# see curtsies #78 - taken from https://github.com/jquast/blessed/blob/e9ad7b85dfcbbba49010ab8c13e3a5920d81b010/blessed/keyboard.py#L409
b"\x1b[1~": "KEY_FIND", # find
b"\x1b[2~": "KEY_IC", # insert (0)
b"\x1b[3~": "KEY_DC", # delete (.), "Execute"
b"\x1b[4~": "KEY_SELECT", # select
b"\x1b[5~": "KEY_PPAGE", # pgup (9)
b"\x1b[6~": "KEY_NPAGE", # pgdown (3)
b"\x1b[7~": "KEY_HOME", # home
b"\x1b[8~": "KEY_END", # end
b"\x1b[OA": "KEY_UP", # up (8)
b"\x1b[OB": "KEY_DOWN", # down (2)
b"\x1b[OC": "KEY_RIGHT", # right (6)
b"\x1b[OD": "KEY_LEFT", # left (4)
b"\x1b[OF": "KEY_END", # end (1)
b"\x1b[OH": "KEY_HOME", # home (7)
}
KEYMAP_PREFIXES = set()
for table in (CURSES_NAMES, CURTSIES_NAMES):
for k in table:
if k.startswith(b"\x1b"):
for i in range(1, len(k)):
KEYMAP_PREFIXES.add(k[:i])
MAX_KEYPRESS_SIZE = max(
len(seq) for seq in itertools.chain(CURSES_NAMES.keys(), CURTSIES_NAMES.keys())
)
class Event:
pass
class ScheduledEvent(Event):
"""Event scheduled for a future time.
args:
when (float): unix time in seconds for which this event is scheduled
Custom events that occur at a specific time in the future should
be subclassed from ScheduledEvent."""
def __init__(self, when: float) -> None:
self.when = when
class WindowChangeEvent(Event):
def __init__(
self, rows: int, columns: int, cursor_dy: Optional[int] = None
) -> None:
self.rows = rows
self.columns = columns
self.cursor_dy = cursor_dy
x = width = property(lambda self: self.columns)
y = height = property(lambda self: self.rows)
def __repr__(self) -> str:
return "<WindowChangeEvent (%d, %d)%s>" % (
self.rows,
self.columns,
"" if self.cursor_dy is None else " cursor_dy: %d" % self.cursor_dy,
)
@property
def name(self) -> str:
return "<WindowChangeEvent>"
class SigIntEvent(Event):
"""Event signifying a SIGINT"""
def __repr__(self) -> str:
return "<SigInt Event>"
@property
def name(self) -> str:
return repr(self)
class PasteEvent(Event):
"""Multiple keypress events combined, likely from copy/paste.
The events attribute contains a list of keypress event strings.
"""
def __init__(self) -> None:
self.events: List[str] = []
def __repr__(self) -> str:
return "<Paste Event with data: %r>" % self.events
@property
def name(self) -> str:
return repr(self)
def decodable(seq: bytes, encoding: str) -> bool:
try:
u = seq.decode(encoding)
except UnicodeDecodeError:
return False
else:
return True
def get_key(
bytes_: List[bytes], encoding: str, keynames: str = "curtsies", full: bool = False
) -> Optional[str]:
"""Return key pressed from bytes_ or None
Return a key name or None meaning it's an incomplete sequence of bytes
(more bytes needed to determine the key pressed)
encoding is how the bytes should be translated to unicode - it should
match the terminal encoding.
keynames is a string describing how keys should be named:
* curtsies uses unicode strings like <F8>
* curses uses unicode strings similar to those returned by
the Python ncurses window.getkey function, like KEY_F(8),
plus a nonstandard representation of meta keys (bytes 128-255)
because returning the corresponding unicode code point would be
indistinguishable from the multibyte sequence that encodes that
character in the current encoding
* bytes returns the original bytes from stdin (NOT unicode)
if full, match a key even if it could be a prefix to another key
(useful for detecting a plain escape key for instance, since
escape is also a prefix to a bunch of char sequences for other keys)
Events are subclasses of Event, or unicode strings
Precondition: get_key(prefix, keynames) is None for all proper prefixes of
bytes. This means get_key should be called on progressively larger inputs
(for 'asdf', first on 'a', then on 'as', then on 'asd' - until a non-None
value is returned)
"""
if not all(isinstance(c, type(b"")) for c in bytes_):
raise ValueError("get key expects bytes, got %r" % bytes_) # expects raw bytes
if keynames not in ["curtsies", "curses", "bytes"]:
raise ValueError("keynames must be one of 'curtsies', 'curses' or 'bytes'")
seq = b"".join(bytes_)
if len(seq) > MAX_KEYPRESS_SIZE:
raise ValueError("unable to decode bytes %r" % seq)
def key_name() -> str:
if keynames == "curses":
# may not be here (and still not decodable) curses names incomplete
if seq in CURSES_NAMES:
return CURSES_NAMES[seq]
# Otherwise, there's no special curses name for this
try:
# for normal decodable text or a special curtsies sequence with bytes that can be decoded
return seq.decode(encoding)
except UnicodeDecodeError:
# this sequence can't be decoded with this encoding, so we need to represent the bytes
if len(seq) == 1:
return "x%02X" % ord(seq)
# TODO figure out a better thing to return here
else:
raise NotImplementedError(
"are multibyte unnameable sequences possible?"
)
return "bytes: " + "-".join(
"x%02X" % ord(seq[i : i + 1]) for i in range(len(seq))
)
# TODO if this isn't possible, return multiple meta keys as a paste event if paste events enabled
elif keynames == "curtsies":
if seq in CURTSIES_NAMES:
return CURTSIES_NAMES[seq]
return seq.decode(
encoding
) # assumes that curtsies names are a subset of curses ones
else:
assert keynames == "bytes"
return seq # type: ignore
key_known = seq in CURTSIES_NAMES or seq in CURSES_NAMES or decodable(seq, encoding)
if full and key_known:
return key_name()
elif seq in KEYMAP_PREFIXES or could_be_unfinished_char(seq, encoding):
return None # need more input to make up a full keypress
elif key_known:
return key_name()
else:
# this will raise a unicode error (they're annoying to raise ourselves)
seq.decode(encoding)
assert False, "should have raised an unicode decode error"
def could_be_unfinished_char(seq: bytes, encoding: str) -> bool:
"""Whether seq bytes might create a char in encoding if more bytes were added"""
if decodable(seq, encoding):
return False # any sensible encoding surely doesn't require lookahead (right?)
# (if seq bytes encoding a character, adding another byte shouldn't also encode something)
if codecs.getdecoder("utf8") is codecs.getdecoder(encoding):
return could_be_unfinished_utf8(seq)
elif codecs.getdecoder("ascii") is codecs.getdecoder(encoding):
return False
else:
return True # We don't know, it could be
def could_be_unfinished_utf8(seq: bytes) -> bool:
# http://en.wikipedia.org/wiki/UTF-8#Description
# fmt: off
if ord(seq[0:1]) & 0b10000000 == 0b10000000 and len(seq) < 1: return True
elif ord(seq[0:1]) & 0b11100000 == 0b11000000 and len(seq) < 2: return True
elif ord(seq[0:1]) & 0b11110000 == 0b11100000 and len(seq) < 3: return True
elif ord(seq[0:1]) & 0b11111000 == 0b11110000 and len(seq) < 4: return True
elif ord(seq[0:1]) & 0b11111100 == 0b11111000 and len(seq) < 5: return True
elif ord(seq[0:1]) & 0b11111110 == 0b11111100 and len(seq) < 6: return True
else: return False
# fmt: on
def pp_event(seq: Union[Event, str]) -> Union[str, bytes]:
"""Returns pretty representation of an Event or keypress"""
if isinstance(seq, Event):
return str(seq)
# Get the original sequence back if seq is a pretty name already
rev_curses = {v: k for k, v in CURSES_NAMES.items()}
rev_curtsies = {v: k for k, v in CURTSIES_NAMES.items()}
bytes_seq: Optional[bytes] = None
if seq in rev_curses:
bytes_seq = rev_curses[seq]
elif seq in rev_curtsies:
bytes_seq = rev_curtsies[seq]
if bytes_seq:
pretty = curtsies_name(bytes_seq)
if pretty != seq:
return pretty
return repr(seq).lstrip("u")[1:-1]
def curtsies_name(seq: bytes) -> Union[str, bytes]:
return CURTSIES_NAMES.get(seq, seq)
def try_keys() -> None:
print(
"press a bunch of keys (not at the same time, but you can hit them pretty quickly)"
)
import tty
import termios
import fcntl
import os
from .termhelpers import Cbreak
def ask_what_they_pressed(seq: bytes, Normal: Termmode) -> None:
print("Unidentified character sequence!")
with Normal:
while True:
r = input("type 'ok' to prove you're not pounding keys ")
if r.lower().strip() == "ok":
break
while True:
print(f"Press the key that produced {seq!r} again please")
retry = os.read(sys.stdin.fileno(), 1000)
if seq == retry:
break
print("nope, that wasn't it")
with Normal:
name = input("Describe in English what key you pressed: ")
f = open("keylog.txt", "a")
f.write(f"{seq!r} is called {name}\n")
f.close()
print(
"Thanks! Please open an issue at https://github.com/bpython/curtsies/issues"
)
print(
"or email thomasballinger@gmail.com. Include this terminal history or keylog.txt."
)
print("You can keep pressing keys")
with Cbreak(sys.stdin) as NoCbreak:
while True:
try:
chars = os.read(sys.stdin.fileno(), 1000)
print("---")
print(repr(chars))
if chars in CURTSIES_NAMES:
print(CURTSIES_NAMES[chars])
elif len(chars) == 1:
print("literal")
else:
print("unknown!!!")
ask_what_they_pressed(chars, NoCbreak)
except OSError:
pass
if __name__ == "__main__":
try_keys()
| 34.641667
| 140
| 0.604362
|
5db4730acc85ae5049b69a7833903cfab6e9b2d8
| 11,454
|
py
|
Python
|
HyperSphere/BO/run_BO.py
|
QUVA-Lab/HyperSphere
|
8e126dece948952ddb4ac0c01ac6ab1c8d63720e
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2018-07-11T11:14:38.000Z
|
2021-02-01T21:04:27.000Z
|
HyperSphere/BO/run_BO.py
|
QUVA-Lab/HyperSphere
|
8e126dece948952ddb4ac0c01ac6ab1c8d63720e
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2018-12-20T18:57:59.000Z
|
2019-10-25T08:32:02.000Z
|
HyperSphere/BO/run_BO.py
|
QUVA-Lab/HyperSphere
|
8e126dece948952ddb4ac0c01ac6ab1c8d63720e
|
[
"BSD-2-Clause-FreeBSD"
] | 8
|
2018-09-04T04:30:37.000Z
|
2020-02-12T08:52:36.000Z
|
import argparse
import os.path
import pickle
import sys
import time
from datetime import datetime
EXPERIMENT_DIR = '/home/coh1/Experiments/Hypersphere'
import torch
from torch.autograd import Variable
import torch.multiprocessing as multiprocessing
if os.path.realpath(__file__).rsplit('/', 3)[0] not in sys.path:
sys.path.append(os.path.realpath(__file__).rsplit('/', 3)[0])
from HyperSphere.BO.acquisition.acquisition_maximization import suggest, optimization_candidates, optimization_init_points, deepcopy_inference, N_INIT
from HyperSphere.GP.models.gp_regression import GPRegression
from HyperSphere.test_functions.benchmarks import *
# Kernels
from HyperSphere.GP.kernels.modules.matern52 import Matern52
from HyperSphere.GP.kernels.modules.radialization import RadializationKernel
# Inferences
from HyperSphere.GP.inference.inference import Inference
from HyperSphere.BO.shadow_inference.inference_sphere_satellite import ShadowInference as satellite_ShadowInference
from HyperSphere.BO.shadow_inference.inference_sphere_origin import ShadowInference as origin_ShadowInference
from HyperSphere.BO.shadow_inference.inference_sphere_origin_satellite import ShadowInference as both_ShadowInference
# feature_map
from HyperSphere.feature_map.modules.kumaraswamy import Kumaraswamy
# boundary conditions
from HyperSphere.feature_map.functionals import sphere_bound
def BO(geometry=None, n_eval=200, path=None, func=None, ndim=None, boundary=False, ard=False, origin=False, warping=False, parallel=False):
assert (path is None) != (func is None)
if path is None:
assert (func.dim == 0) != (ndim is None)
assert geometry is not None
if ndim is None:
ndim = func.dim
exp_conf_str = geometry
if geometry == 'sphere':
assert not ard
exp_conf_str += 'warping' if warping else ''
radius_input_map = Kumaraswamy(ndim=1, max_input=ndim ** 0.5) if warping else None
model = GPRegression(kernel=RadializationKernel(max_power=3, search_radius=ndim ** 0.5, radius_input_map=radius_input_map))
inference_method = None
if origin and boundary:
inference_method = both_ShadowInference
exp_conf_str += 'both'
elif origin:
inference_method = origin_ShadowInference
exp_conf_str += 'origin'
elif boundary:
inference_method = satellite_ShadowInference
exp_conf_str += 'boundary'
else:
inference_method = Inference
exp_conf_str += 'none'
bnd = sphere_bound(ndim ** 0.5)
elif geometry == 'cube':
assert not origin
exp_conf_str += ('ard' if ard else '') + ('boundary' if boundary else '')
model = GPRegression(kernel=Matern52(ndim=ndim, ard=ard))
inference_method = satellite_ShadowInference if boundary else Inference
bnd = (-1, 1)
if not os.path.isdir(EXPERIMENT_DIR):
raise ValueError('In file : ' + os.path.realpath(__file__) + '\nEXPERIMENT_DIR variable is not properly assigned. Please check it.')
dir_list = [elm for elm in os.listdir(EXPERIMENT_DIR) if os.path.isdir(os.path.join(EXPERIMENT_DIR, elm))]
folder_name = func.__name__ + '_D' + str(ndim) + '_' + exp_conf_str + '_' + datetime.now().strftime('%Y%m%d-%H:%M:%S:%f')
os.makedirs(os.path.join(EXPERIMENT_DIR, folder_name))
logfile_dir = os.path.join(EXPERIMENT_DIR, folder_name, 'log')
os.makedirs(logfile_dir)
model_filename = os.path.join(EXPERIMENT_DIR, folder_name, 'model.pt')
data_config_filename = os.path.join(EXPERIMENT_DIR, folder_name, 'data_config.pkl')
x_input = Variable(torch.stack([torch.zeros(ndim), torch.FloatTensor(ndim).uniform_(-1, 1)]))
if func.func_name == 'stochastic_depth_resnet_cifar100':
special_init_point = torch.FloatTensor([-0.88672996375809265, -0.83845553984377363, -0.80082455589209434, -0.76868080609344613, -0.74002860499719103, -0.71384507914214379, -0.6895229479156415, -0.66666666534211871, -0.64500158781765049, -0.62432778870160499, -0.60449429448743319, -0.58538383736427368, -0.56690311453886821, -0.54897644926147593, -0.53154137077618735, -0.51454570980003023, -0.49794520561122835, -0.4817019618876005, -0.46578329447738975, -0.45016063464220946, -0.43480887900991927, -0.41970588594137237, -0.40483184457290511, -0.39016909932337462, -0.37570168000845294, -0.36141512736958714, -0.34729635533386094, -0.33333334161175654, -0.31951507564952675, -0.30583136944490208, -0.29227292909996905, -0.27883100126437665, -0.26549747264739709, -0.25226475894331168, -0.23912574658399377, -0.22607369983030123, -0.2131023835975443, -0.20020577167418563, -0.18737817967669568, -0.1746141913340078, -0.16190858934371632, -0.14925649319813961, -0.13665309066289877, -0.12409378040195429, -0.11157411163518405, -0.099089726169870107, -0.086636502479268351, -0.074210299199806373, -0.061807101474520065, -0.049422967019945307, -0.037054013082912562, -0.024696364163967699, -0.012346298973719083, 0])
x_input = torch.cat([x_input, Variable(special_init_point)])
n_init_eval = x_input.size(0)
output = Variable(torch.zeros(n_init_eval, 1))
for i in range(n_init_eval):
output[i] = func(x_input[i])
time_list = [time.time()] * n_init_eval
elapse_list = [0] * n_init_eval
pred_mean_list = [0] * n_init_eval
pred_std_list = [0] * n_init_eval
pred_var_list = [0] * n_init_eval
pred_stdmax_list = [1] * n_init_eval
pred_varmax_list = [1] * n_init_eval
reference_list = [output.data.squeeze()[0]] * n_init_eval
refind_list = [1] * n_init_eval
dist_to_ref_list = [0] * n_init_eval
sample_info_list = [(10, 0, 10)] * n_init_eval
inference = inference_method((x_input, output), model)
inference.init_parameters()
inference.sampling(n_sample=1, n_burnin=99, n_thin=1)
else:
if not os.path.exists(path):
path = os.path.join(EXPERIMENT_DIR, path)
logfile_dir = os.path.join(path, 'log')
model_filename = os.path.join(path, 'model.pt')
data_config_filename = os.path.join(path, 'data_config.pkl')
model = torch.load(model_filename)
data_config_file = open(data_config_filename, 'r')
for key, value in pickle.load(data_config_file).iteritems():
if key != 'logfile_dir':
exec(key + '=value')
data_config_file.close()
ignored_variable_names = ['n_eval', 'path', 'i', 'key', 'value', 'logfile_dir', 'n_init_eval',
'data_config_file', 'dir_list', 'folder_name', 'model_filename', 'data_config_filename',
'kernel', 'model', 'inference', 'parallel', 'pool']
stored_variable_names = set(locals().keys()).difference(set(ignored_variable_names))
if path is None:
torch.save(model, model_filename)
stored_variable = dict()
for key in stored_variable_names:
stored_variable[key] = locals()[key]
f = open(data_config_filename, 'w')
pickle.dump(stored_variable, f)
f.close()
print('Experiment based on data in %s' % os.path.split(model_filename)[0])
# multiprocessing conflicts with pytorch linear algebra operation
pool = multiprocessing.Pool(N_INIT) if parallel else None
for _ in range(n_eval):
start_time = time.time()
logfile = open(os.path.join(logfile_dir, str(x_input.size(0) + 1).zfill(4) + '.out'), 'w')
inference = inference_method((x_input, output), model)
reference, ref_ind = torch.min(output, 0)
reference = reference.data.squeeze()[0]
gp_hyper_params = inference.sampling(n_sample=10, n_burnin=0, n_thin=1)
inferences = deepcopy_inference(inference, gp_hyper_params)
x0_cand = optimization_candidates(x_input, output, -1, 1)
x0, sample_info = optimization_init_points(x0_cand, reference=reference, inferences=inferences)
next_x_point, pred_mean, pred_std, pred_var, pred_stdmax, pred_varmax = suggest(x0=x0, reference=reference, inferences=inferences, bounds=bnd, pool=pool)
time_list.append(time.time())
elapse_list.append(time_list[-1] - time_list[-2])
pred_mean_list.append(pred_mean.squeeze()[0])
pred_std_list.append(pred_std.squeeze()[0])
pred_var_list.append(pred_var.squeeze()[0])
pred_stdmax_list.append(pred_stdmax.squeeze()[0])
pred_varmax_list.append(pred_varmax.squeeze()[0])
reference_list.append(reference)
refind_list.append(ref_ind.data.squeeze()[0] + 1)
dist_to_ref_list.append(torch.sum((next_x_point - x_input[ref_ind]).data ** 2) ** 0.5)
sample_info_list.append(sample_info)
x_input = torch.cat([x_input, next_x_point], 0)
output = torch.cat([output, func(x_input[-1]).resize(1, 1)])
min_ind = torch.min(output, 0)[1]
min_loc = x_input[min_ind]
min_val = output[min_ind]
dist_to_suggest = torch.sum((x_input - x_input[-1]).data ** 2, 1) ** 0.5
dist_to_min = torch.sum((x_input - min_loc).data ** 2, 1) ** 0.5
out_of_box = torch.sum((torch.abs(x_input.data) > 1), 1)
print('')
for i in range(x_input.size(0)):
time_str = time.strftime('%H:%M:%S', time.gmtime(time_list[i])) + '(' + time.strftime('%H:%M:%S', time.gmtime(elapse_list[i])) + ') '
data_str = ('%3d-th : %+12.4f(R:%8.4f[%4d]/ref:[%3d]%8.4f), sample([%2d] best:%2d/worst:%2d), '
'mean : %+.4E, std : %.4E(%5.4f), var : %.4E(%5.4f), '
'2ownMIN : %8.4f, 2curMIN : %8.4f, 2new : %8.4f' %
(i + 1, output.data.squeeze()[i], torch.sum(x_input.data[i] ** 2) ** 0.5, out_of_box[i], refind_list[i], reference_list[i],
sample_info_list[i][2], sample_info_list[i][0], sample_info_list[i][1],
pred_mean_list[i], pred_std_list[i], pred_std_list[i] / pred_stdmax_list[i], pred_var_list[i], pred_var_list[i] / pred_varmax_list[i],
dist_to_ref_list[i], dist_to_min[i], dist_to_suggest[i]))
min_str = ' <========= MIN' if i == min_ind.data.squeeze()[0] else ''
print(time_str + data_str + min_str)
logfile.writelines(time_str + data_str + min_str + '\n')
logfile.close()
torch.save(model, model_filename)
stored_variable = dict()
for key in stored_variable_names:
stored_variable[key] = locals()[key]
f = open(data_config_filename, 'w')
pickle.dump(stored_variable, f)
f.close()
if parallel:
pool.close()
print('Experiment based on data in %s' % os.path.split(model_filename)[0])
return os.path.split(model_filename)[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bayesian Optimization runner')
parser.add_argument('-g', '--geometry', dest='geometry', help='cube/sphere')
parser.add_argument('-e', '--n_eval', dest='n_eval', type=int, default=1)
parser.add_argument('-p', '--path', dest='path')
parser.add_argument('-d', '--dim', dest='ndim', type=int)
parser.add_argument('-f', '--func', dest='func_name')
parser.add_argument('--boundary', dest='boundary', action='store_true', default=False)
parser.add_argument('--origin', dest='origin', action='store_true', default=False)
parser.add_argument('--ard', dest='ard', action='store_true', default=False)
parser.add_argument('--warping', dest='warping', action='store_true', default=False)
parser.add_argument('--parallel', dest='parallel', action='store_true', default=False)
args = parser.parse_args()
# if args.n_eval == 0:
# args.n_eval = 3 if args.path is None else 1
assert (args.path is None) != (args.func_name is None)
args_dict = vars(args)
if args.func_name is not None:
exec 'func=' + args.func_name
args_dict['func'] = func
del args_dict['func_name']
if args.path is None:
assert (func.dim == 0) != (args.ndim is None)
assert args.geometry is not None
if args.geometry == 'sphere':
assert not args.ard
elif args.geometry == 'cube':
assert not args.origin
assert not args.warping
print(BO(**vars(args)))
| 47.924686
| 1,216
| 0.727606
|
a8089d13ef3cd3bf29c0011b95c352a8b8b2b562
| 28,910
|
py
|
Python
|
examples/classic_controllers/classic_controllers.py
|
RaviPandey33/gym-electric-motor-1
|
999e18dceed709decf43d646fb29dc7602b9a89c
|
[
"MIT"
] | null | null | null |
examples/classic_controllers/classic_controllers.py
|
RaviPandey33/gym-electric-motor-1
|
999e18dceed709decf43d646fb29dc7602b9a89c
|
[
"MIT"
] | null | null | null |
examples/classic_controllers/classic_controllers.py
|
RaviPandey33/gym-electric-motor-1
|
999e18dceed709decf43d646fb29dc7602b9a89c
|
[
"MIT"
] | null | null | null |
from gym.spaces import Discrete, Box, MultiDiscrete
from gym_electric_motor.physical_systems import SynchronousMotorSystem, DcMotorSystem, DcSeriesMotor, \
DcExternallyExcitedMotor, DoublyFedInductionMotorSystem, SquirrelCageInductionMotorSystem
from gym_electric_motor.reference_generators import MultipleReferenceGenerator, SwitchedReferenceGenerator
from gym_electric_motor.visualization import MotorDashboard
from gym_electric_motor import envs
from controllers.continuous_controller import ContinuousController
from controllers.pi_controller import PIController
from controllers.pid_controller import PIDController
from controllers.discrete_controller import DiscreteController
from controllers.on_off_controller import OnOffController
from controllers.three_point_controller import ThreePointController
from controllers.continuous_action_controller import ContinuousActionController
from controllers.dicrete_action_controller import DiscreteActionController
from controllers.cascaded_controller import CascadedController
from controllers.foc_controller import FieldOrientedController
from controllers.cascaded_foc_controller import CascadedFieldOrientedController
from controllers.induction_motor_foc import InductionMotorFieldOrientedController
from controllers.induction_motor_cascaded_foc import InductionMotorCascadedFieldOrientedController
from externally_referenced_state_plot import ExternallyReferencedStatePlot
from external_plot import ExternalPlot
import numpy as np
class Controller:
"""This is the base class for every controller along with the motor environments."""
@classmethod
def make(cls, environment, stages=None, **controller_kwargs):
"""
This function creates the controller structure and optionally tunes the controller.
Args:
environment: gym-electric-motor environment to be controlled
stages: stages of the controller, if no stages are passed, the controller is automatically designed und tuned
**controller_kwargs: setting parameters for the controller and visualization
Returns:
fully designed controller for the control of the gym-electric-motor environment, which is called using the
control function
the inputs of the control function are the state and the reference, both are given by the environment
"""
_controllers = {
'pi_controller': [ContinuousActionController, ContinuousController, PIController],
'pid_controller': [ContinuousActionController, ContinuousController, PIDController],
'on_off': [DiscreteActionController, DiscreteController, OnOffController],
'three_point': [DiscreteActionController, DiscreteController, ThreePointController],
'cascaded_controller': [CascadedController],
'foc_controller': [FieldOrientedController],
'cascaded_foc_controller': [CascadedFieldOrientedController],
'foc_rotor_flux_observer': [InductionMotorFieldOrientedController],
'cascaded_foc_rotor_flux_observer': [InductionMotorCascadedFieldOrientedController],
}
controller_kwargs = cls.reference_states(environment, **controller_kwargs)
controller_kwargs = cls.get_visualization(environment, **controller_kwargs)
if stages is not None:
controller_type, stages = cls.find_controller_type(environment, stages, **controller_kwargs)
assert controller_type in _controllers.keys(), f'Controller {controller_type} unknown'
stages = cls.automated_gain(environment, stages, controller_type, _controllers, **controller_kwargs)
controller = _controllers[controller_type][0](environment, stages, _controllers, **controller_kwargs)
else:
controller_type, stages = cls.automated_controller_design(environment, **controller_kwargs)
stages = cls.automated_gain(environment, stages, controller_type, _controllers, **controller_kwargs)
controller = _controllers[controller_type][0](environment, stages, _controllers, **controller_kwargs)
return controller
@staticmethod
def get_visualization(environment, **controller_kwargs):
"""This method separates external_plots and external_ref_plots. It also checks if a MotorDashboard is used."""
if 'external_plot' in controller_kwargs.keys():
ext_plot = []
ref_plot = []
for external_plots in controller_kwargs['external_plot']:
if isinstance(external_plots, ExternalPlot):
ext_plot.append(external_plots)
elif isinstance(external_plots, ExternallyReferencedStatePlot):
ref_plot.append(external_plots)
controller_kwargs['external_plot'] = ext_plot
controller_kwargs['external_ref_plots'] = ref_plot
for visualization in environment.visualizations:
if isinstance(visualization, MotorDashboard):
controller_kwargs['update_interval'] = visualization.update_interval
controller_kwargs['visualization'] = True
return controller_kwargs
controller_kwargs['visualization'] = False
return controller_kwargs
@staticmethod
def reference_states(environment, **controller_kwargs):
"""This method searches the environment for all referenced states and writes them to an array."""
ref_states = []
if isinstance(environment.reference_generator, MultipleReferenceGenerator):
for rg in environment.reference_generator._sub_generators:
if isinstance(rg, SwitchedReferenceGenerator):
ref_states.append(rg._sub_generators[0]._reference_state)
else:
ref_states.append(rg._reference_state)
elif isinstance(environment.reference_generator, SwitchedReferenceGenerator):
ref_states.append(environment.reference_generator._sub_generators[0]._reference_state)
else:
ref_states.append(environment.reference_generator._reference_state)
controller_kwargs['ref_states'] = np.array(ref_states)
return controller_kwargs
@staticmethod
def find_controller_type(environment, stages, **controller_kwargs):
_stages = stages
if isinstance(environment.physical_system, DcMotorSystem):
if type(stages) is list:
if len(stages) > 1:
if type(stages[0]) is list:
stages = stages[0]
if len(stages) > 1:
controller_type = 'cascaded_controller'
else:
controller_type = stages[0]['controller_type']
else:
controller_type = stages[0]['controller_type']
else:
if type(stages) is dict:
controller_type = stages['controller_type']
_stages = [stages]
else:
controller_type = stages
_stages = [{'controller_type': stages}]
elif isinstance(environment.physical_system, SynchronousMotorSystem):
if len(stages) == 2:
if len(stages[1]) == 1 and 'i_sq' in controller_kwargs['ref_states']:
controller_type = 'foc_controller'
else:
controller_type = 'cascaded_foc_controller'
else:
controller_type = 'cascaded_foc_controller'
elif isinstance(environment.physical_system, SquirrelCageInductionMotorSystem):
if len(stages) == 2:
if len(stages[1]) == 1 and 'i_sq' in controller_kwargs['ref_states']:
controller_type = 'foc_rotor_flux_observer'
else:
controller_type = 'cascaded_foc_rotor_flux_observer'
else:
controller_type = 'cascaded_foc_rotor_flux_observer'
elif isinstance(environment.physical_system, DoublyFedInductionMotorSystem):
if len(stages) == 2:
if len(stages[1]) == 1 and 'i_sq' in controller_kwargs['ref_states']:
controller_type = 'foc_rotor_flux_observer'
else:
controller_type = 'cascaded_foc_rotor_flux_observer'
else:
controller_type = 'cascaded_foc_rotor_flux_observer'
return controller_type, _stages
@staticmethod
def automated_controller_design(environment, **controller_kwargs):
"""This method automatically designs the controller based on the given motor environment and control task."""
action_space_type = type(environment.action_space)
ref_states = controller_kwargs['ref_states']
stages = []
if isinstance(environment.physical_system, DcMotorSystem): # Checking type of motor
if 'omega' in ref_states or 'torque' in ref_states: # Checking control task
controller_type = 'cascaded_controller'
for i in range(len(stages), 2):
if i == 0:
if action_space_type is Box: # Checking type of output stage (finite / cont)
stages.append({'controller_type': 'pi_controller'})
else:
stages.append({'controller_type': 'three_point'})
else:
stages.append({'controller_type': 'pi_controller'}) # Adding PI-Controller for overlaid stages
elif 'i' in ref_states or 'i_a' in ref_states:
# Checking type of output stage (finite / cont)
if action_space_type is Discrete or action_space_type is MultiDiscrete:
stages.append({'controller_type': 'three_point'})
elif action_space_type is Box:
stages.append({'controller_type': 'pi_controller'})
controller_type = stages[0]['controller_type']
# Add stage for i_e current of the ExtExDC
if isinstance(environment.physical_system.electrical_motor, DcExternallyExcitedMotor):
if action_space_type is Box:
stages = [stages, [{'controller_type': 'pi_controller'}]]
else:
stages = [stages, [{'controller_type': 'three_point'}]]
elif isinstance(environment.physical_system, SynchronousMotorSystem):
if 'i_sq' in ref_states or 'torque' in ref_states: # Checking control task
controller_type = 'foc_controller' if 'i_sq' in ref_states else 'cascaded_foc_controller'
if action_space_type is Discrete:
stages = [[{'controller_type': 'on_off'}], [{'controller_type': 'on_off'}],
[{'controller_type': 'on_off'}]]
else:
stages = [[{'controller_type': 'pi_controller'}, {'controller_type': 'pi_controller'}]]
elif 'omega' in ref_states:
controller_type = 'cascaded_foc_controller'
if action_space_type is Discrete:
stages = [[{'controller_type': 'on_off'}], [{'controller_type': 'on_off'}],
[{'controller_type': 'on_off'}], [{'controller_type': 'pi_controller'}]]
else:
stages = [[{'controller_type': 'pi_controller'},
{'controller_type': 'pi_controller'}], [{'controller_type': 'pi_controller'}]]
elif isinstance(environment.physical_system, (SquirrelCageInductionMotorSystem, DoublyFedInductionMotorSystem)):
if 'i_sq' in ref_states or 'torque' in ref_states:
controller_type = 'foc_rotor_flux_observer' if 'i_sq' in ref_states else 'cascaded_foc_rotor_flux_observer'
if action_space_type is Discrete:
stages = [[{'controller_type': 'on_off'}], [{'controller_type': 'on_off'}],
[{'controller_type': 'on_off'}]]
else:
stages = [[{'controller_type': 'pi_controller'}, {'controller_type': 'pi_controller'}]]
elif 'omega' in ref_states:
controller_type = 'cascaded_foc_rotor_flux_observer'
if action_space_type is Discrete:
stages = [[{'controller_type': 'on_off'}], [{'controller_type': 'on_off'}],
[{'controller_type': 'on_off'}], [{'controller_type': 'pi_controller'}]]
else:
stages = [[{'controller_type': 'pi_controller'},
{'controller_type': 'pi_controller'}], [{'controller_type': 'pi_controller'}]]
else:
controller_type = 'foc_controller'
return controller_type, stages
@staticmethod
def automated_gain(environment, stages, controller_type, _controllers, **controller_kwargs):
"""
This method automatically parameterizes a given controller design if the parameter automated_gain is True
(default True), based on the design according to the symmetric optimum (SO). Further information about the
design according to the SO can be found in the following paper (https://ieeexplore.ieee.org/document/55967).
Args:
environment: gym-electric-motor environment
stages: list of the stages of the controller
controller_type: string of the used controller type from the dictionary _controllers
_controllers: dictionary of all possible controllers and controller stages
controller_kwargs: further arguments of the controller
Returns:
list of stages, which are completely parameterized
"""
ref_states = controller_kwargs['ref_states']
mp = environment.physical_system.electrical_motor.motor_parameter
limits = environment.physical_system.limits
omega_lim = limits[environment.state_names.index('omega')]
if isinstance(environment.physical_system, DcMotorSystem):
i_a_lim = limits[environment.physical_system.CURRENTS_IDX[0]]
i_e_lim = limits[environment.physical_system.CURRENTS_IDX[-1]]
u_a_lim = limits[environment.physical_system.VOLTAGES_IDX[0]]
u_e_lim = limits[environment.physical_system.VOLTAGES_IDX[-1]]
elif isinstance(environment.physical_system, SynchronousMotorSystem):
i_sd_lim = limits[environment.state_names.index('i_sd')]
i_sq_lim = limits[environment.state_names.index('i_sq')]
u_sd_lim = limits[environment.state_names.index('u_sd')]
u_sq_lim = limits[environment.state_names.index('u_sq')]
torque_lim = limits[environment.state_names.index('torque')]
else:
i_sd_lim = limits[environment.state_names.index('i_sd')]
i_sq_lim = limits[environment.state_names.index('i_sq')]
u_sd_lim = limits[environment.state_names.index('u_sd')]
u_sq_lim = limits[environment.state_names.index('u_sq')]
torque_lim = limits[environment.state_names.index('torque')]
# The parameter a is a design parameter when designing a controller according to the SO
a = controller_kwargs.get('a', 4)
automated_gain = controller_kwargs.get('automated_gain', True)
if isinstance(environment.physical_system.electrical_motor, DcSeriesMotor):
mp['l'] = mp['l_a'] + mp['l_e']
elif isinstance(environment.physical_system, DcMotorSystem):
mp['l'] = mp['l_a']
if 'automated_gain' not in controller_kwargs.keys() or automated_gain:
cont_extex_envs = (
envs.ContSpeedControlDcExternallyExcitedMotorEnv,
envs.ContCurrentControlDcExternallyExcitedMotorEnv,
envs.ContTorqueControlDcExternallyExcitedMotorEnv
)
finite_extex_envs = (
envs.FiniteTorqueControlDcExternallyExcitedMotorEnv,
envs.FiniteSpeedControlDcExternallyExcitedMotorEnv,
envs.FiniteCurrentControlDcExternallyExcitedMotorEnv
)
if type(environment) in cont_extex_envs:
stages_a = stages[0]
stages_e = stages[1]
p_gain = mp['l_e'] / (environment.physical_system.tau * a) / u_e_lim * i_e_lim
i_gain = p_gain / (environment.physical_system.tau * a ** 2)
stages_e[0]['p_gain'] = stages_e[0].get('p_gain', p_gain)
stages_e[0]['i_gain'] = stages_e[0].get('i_gain', i_gain)
if stages_e[0]['controller_type'] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_e[0]['d_gain'] = stages_e[0].get('d_gain', d_gain)
elif type(environment) in finite_extex_envs:
stages_a = stages[0]
stages_e = stages[1]
else:
stages_a = stages
stages_e = False
if _controllers[controller_type][0] == ContinuousActionController:
if 'i' in ref_states or 'i_a' in ref_states or 'torque' in ref_states:
p_gain = mp['l'] / (environment.physical_system.tau * a) / u_a_lim * i_a_lim
i_gain = p_gain / (environment.physical_system.tau * a ** 2)
stages_a[0]['p_gain'] = stages_a[0].get('p_gain', p_gain)
stages_a[0]['i_gain'] = stages_a[0].get('i_gain', i_gain)
if _controllers[controller_type][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[0]['d_gain'] = stages_a[0].get('d_gain', d_gain)
elif 'omega' in ref_states:
p_gain = environment.physical_system.mechanical_load.j_total * mp['r_a'] ** 2 / (
a * mp['l']) / u_a_lim * omega_lim
i_gain = p_gain / (a * mp['l'])
stages_a[0]['p_gain'] = stages_a[0].get('p_gain', p_gain)
stages_a[0]['i_gain'] = stages_a[0].get('i_gain', i_gain)
if _controllers[controller_type][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[0]['d_gain'] = stages_a[0].get('d_gain', d_gain)
elif _controllers[controller_type][0] == CascadedController:
for i in range(len(stages)):
if _controllers[stages_a[i]['controller_type']][1] == ContinuousController:
if i == 0:
p_gain = mp['l'] / (environment.physical_system.tau * a) / u_a_lim * i_a_lim
i_gain = p_gain / (environment.physical_system.tau * a ** 2)
if _controllers[stages_a[i]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[i]['d_gain'] = stages_a[i].get('d_gain', d_gain)
elif i == 1:
t_n = environment.physical_system.tau * a ** 2
p_gain = environment.physical_system.mechanical_load.j_total / (
a * t_n) / i_a_lim * omega_lim
i_gain = p_gain / (a * t_n)
if _controllers[stages_a[i]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages_a[i]['d_gain'] = stages_a[i].get('d_gain', d_gain)
stages_a[i]['p_gain'] = stages_a[i].get('p_gain', p_gain)
stages_a[i]['i_gain'] = stages_a[i].get('i_gain', i_gain)
stages = stages_a if not stages_e else [stages_a, stages_e]
elif _controllers[controller_type][0] == FieldOrientedController:
if type(environment.action_space) == Box:
stage_d = stages[0][0]
stage_q = stages[0][1]
if 'i_sq' in ref_states and _controllers[stage_q['controller_type']][1] == ContinuousController:
p_gain_d = mp['l_d'] / (1.5 * environment.physical_system.tau * a) / u_sd_lim * i_sd_lim
i_gain_d = p_gain_d / (1.5 * environment.physical_system.tau * a ** 2)
p_gain_q = mp['l_q'] / (1.5 * environment.physical_system.tau * a) / u_sq_lim * i_sq_lim
i_gain_q = p_gain_q / (1.5 * environment.physical_system.tau * a ** 2)
stage_d['p_gain'] = stage_d.get('p_gain', p_gain_d)
stage_d['i_gain'] = stage_d.get('i_gain', i_gain_d)
stage_q['p_gain'] = stage_q.get('p_gain', p_gain_q)
stage_q['i_gain'] = stage_q.get('i_gain', i_gain_q)
if _controllers[stage_d['controller_type']][2] == PIDController:
d_gain_d = p_gain_d * environment.physical_system.tau
stage_d['d_gain'] = stage_d.get('d_gain', d_gain_d)
if _controllers[stage_q['controller_type']][2] == PIDController:
d_gain_q = p_gain_q * environment.physical_system.tau
stage_q['d_gain'] = stage_q.get('d_gain', d_gain_q)
stages = [[stage_d, stage_q]]
elif _controllers[controller_type][0] == CascadedFieldOrientedController:
if type(environment.action_space) is Box:
stage_d = stages[0][0]
stage_q = stages[0][1]
if 'torque' not in controller_kwargs['ref_states']:
overlaid = stages[1]
p_gain_d = mp['l_d'] / (1.5 * environment.physical_system.tau * a) / u_sd_lim * i_sd_lim
i_gain_d = p_gain_d / (1.5 * environment.physical_system.tau * a ** 2)
p_gain_q = mp['l_q'] / (1.5 * environment.physical_system.tau * a) / u_sq_lim * i_sq_lim
i_gain_q = p_gain_q / (1.5 * environment.physical_system.tau * a ** 2)
stage_d['p_gain'] = stage_d.get('p_gain', p_gain_d)
stage_d['i_gain'] = stage_d.get('i_gain', i_gain_d)
stage_q['p_gain'] = stage_q.get('p_gain', p_gain_q)
stage_q['i_gain'] = stage_q.get('i_gain', i_gain_q)
if _controllers[stage_d['controller_type']][2] == PIDController:
d_gain_d = p_gain_d * environment.physical_system.tau
stage_d['d_gain'] = stage_d.get('d_gain', d_gain_d)
if _controllers[stage_q['controller_type']][2] == PIDController:
d_gain_q = p_gain_q * environment.physical_system.tau
stage_q['d_gain'] = stage_q.get('d_gain', d_gain_q)
if 'torque' not in controller_kwargs['ref_states'] and \
_controllers[overlaid[0]['controller_type']][1] == ContinuousController:
t_n = p_gain_d / i_gain_d
j_total = environment.physical_system.mechanical_load.j_total
p_gain = j_total / (a ** 2 * t_n) / torque_lim * omega_lim
i_gain = p_gain / (a * t_n)
overlaid[0]['p_gain'] = overlaid[0].get('p_gain', p_gain)
overlaid[0]['i_gain'] = overlaid[0].get('i_gain', i_gain)
if _controllers[overlaid[0]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
overlaid[0]['d_gain'] = overlaid[0].get('d_gain', d_gain)
stages = [[stage_d, stage_q], overlaid]
else:
stages = [[stage_d, stage_q]]
else:
if 'omega' in ref_states and _controllers[stages[3][0]['controller_type']][1] == ContinuousController:
p_gain = environment.physical_system.mechanical_load.j_total / (
1.5 * a ** 2 * mp['p'] * np.abs(mp['l_d'] - mp['l_q'])) / i_sq_lim * omega_lim
i_gain = p_gain / (1.5 * environment.physical_system.tau * a)
stages[3][0]['p_gain'] = stages[3][0].get('p_gain', p_gain)
stages[3][0]['i_gain'] = stages[3][0].get('i_gain', i_gain)
if _controllers[stages[3][0]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
stages[3][0]['d_gain'] = stages[3][0].get('d_gain', d_gain)
elif _controllers[controller_type][0] == InductionMotorFieldOrientedController:
mp['l_s'] = mp['l_m'] + mp['l_sigs']
mp['l_r'] = mp['l_m'] + mp['l_sigr']
sigma = (mp['l_s'] * mp['l_r'] - mp['l_m'] ** 2) / (mp['l_s'] * mp['l_r'])
tau_sigma = (sigma * mp['l_s']) / (mp['r_s'] + mp['r_r'] * mp['l_m'] ** 2 / mp['l_r'] ** 2)
tau_r = mp['l_r'] / mp['r_r']
p_gain = tau_r / tau_sigma
i_gain = p_gain / tau_sigma
stages[0][0]['p_gain'] = stages[0][0].get('p_gain', p_gain)
stages[0][0]['i_gain'] = stages[0][0].get('i_gain', i_gain)
stages[0][1]['p_gain'] = stages[0][1].get('p_gain', p_gain)
stages[0][1]['i_gain'] = stages[0][1].get('i_gain', i_gain)
if _controllers[stages[0][0]['controller_type']][2] == PIDController:
d_gain = p_gain * tau_sigma
stages[0][0]['d_gain'] = stages[0][0].get('d_gain', d_gain)
if _controllers[stages[0][1]['controller_type']][2] == PIDController:
d_gain = p_gain * tau_sigma
stages[0][1]['d_gain'] = stages[0][1].get('d_gain', d_gain)
elif _controllers[controller_type][0] == InductionMotorCascadedFieldOrientedController:
if 'torque' not in controller_kwargs['ref_states']:
overlaid = stages[1]
mp['l_s'] = mp['l_m'] + mp['l_sigs']
mp['l_r'] = mp['l_m'] + mp['l_sigr']
sigma = (mp['l_s'] * mp['l_r'] - mp['l_m'] ** 2) / (mp['l_s'] * mp['l_r'])
tau_sigma = (sigma * mp['l_s']) / (mp['r_s'] + mp['r_r'] * mp['l_m'] ** 2 / mp['l_r'] ** 2)
tau_r = mp['l_r'] / mp['r_r']
p_gain = tau_r / tau_sigma
i_gain = p_gain / tau_sigma
stages[0][0]['p_gain'] = stages[0][0].get('p_gain', p_gain)
stages[0][0]['i_gain'] = stages[0][0].get('i_gain', i_gain)
stages[0][1]['p_gain'] = stages[0][1].get('p_gain', p_gain)
stages[0][1]['i_gain'] = stages[0][1].get('i_gain', i_gain)
if _controllers[stages[0][0]['controller_type']][2] == PIDController:
d_gain = p_gain * tau_sigma
stages[0][0]['d_gain'] = stages[0][0].get('d_gain', d_gain)
if _controllers[stages[0][1]['controller_type']][2] == PIDController:
d_gain = p_gain * tau_sigma
stages[0][1]['d_gain'] = stages[0][1].get('d_gain', d_gain)
if 'torque' not in controller_kwargs['ref_states'] and \
_controllers[overlaid[0]['controller_type']][1] == ContinuousController:
t_n = p_gain / i_gain
j_total = environment.physical_system.mechanical_load.j_total
p_gain = j_total / (a ** 2 * t_n) / torque_lim * omega_lim
i_gain = p_gain / (a * t_n)
overlaid[0]['p_gain'] = overlaid[0].get('p_gain', p_gain)
overlaid[0]['i_gain'] = overlaid[0].get('i_gain', i_gain)
if _controllers[overlaid[0]['controller_type']][2] == PIDController:
d_gain = p_gain * environment.physical_system.tau
overlaid[0]['d_gain'] = overlaid[0].get('d_gain', d_gain)
stages = [stages[0], overlaid]
return stages
| 54.961977
| 123
| 0.587617
|
927fef92ed8be34c7b77bf69f39e8db0744e3543
| 6,005
|
py
|
Python
|
bw2io/strategies/lcia.py
|
sc-gcoste/brightway2-io
|
3a1f6d4efeb1b1754343ca7a243c4c1bcaaca40a
|
[
"BSD-3-Clause"
] | 5
|
2020-06-03T20:59:37.000Z
|
2021-08-19T06:59:36.000Z
|
bw2io/strategies/lcia.py
|
sc-gcoste/brightway2-io
|
3a1f6d4efeb1b1754343ca7a243c4c1bcaaca40a
|
[
"BSD-3-Clause"
] | 54
|
2020-03-03T09:19:25.000Z
|
2022-03-09T17:48:05.000Z
|
bw2io/strategies/lcia.py
|
brightway-lca/brightway2-io-copy
|
8383adc2f0cb06852f689fb2aab62d5a29f41130
|
[
"BSD-3-Clause"
] | 16
|
2020-03-07T17:58:18.000Z
|
2022-02-08T15:52:37.000Z
|
import collections
import copy
from bw2data import Database
from ..utils import activity_hash
def add_activity_hash_code(data):
"""Add ``code`` field to characterization factors using ``activity_hash``, if ``code`` not already present."""
for method in data:
for cf in method["exchanges"]:
if cf.get("code"):
continue
cf[u"code"] = activity_hash(cf)
return data
def drop_unlinked_cfs(data):
"""Drop CFs which don't have ``input`` attribute"""
for method in data:
method[u"exchanges"] = [
cf for cf in method["exchanges"] if cf.get("input") is not None
]
return data
def set_biosphere_type(data):
"""Set CF types to 'biosphere', to keep compatibility with LCI strategies.
This will overwrite existing ``type`` values."""
for method in data:
for cf in method["exchanges"]:
cf[u"type"] = u"biosphere"
return data
def rationalize_method_names(data):
counts = collections.Counter()
for obj in data:
if isinstance(obj["name"], tuple):
counts[obj["name"][:2]] += 1
for obj in data:
if not isinstance(obj["name"], tuple):
continue
if " w/o LT" in obj["name"][0]:
obj["name"] = tuple([o.replace(" w/o LT", "") for o in obj["name"]]) + (
"without long-term",
)
if " no LT" in obj["name"][0]:
obj["name"] = tuple([o.replace(" no LT", "") for o in obj["name"]]) + (
"without long-term",
)
elif {o.lower() for o in obj["name"][1:]} == {"total"}:
obj["name"] = obj["name"][:2]
elif len(obj["name"]) > 2 and obj["name"][1].lower() == "total":
obj["name"] = (obj["name"][0],) + obj["name"][2:]
elif obj["name"][-1].lower() == "total" and counts[obj["name"][:2]] == 1:
obj["name"] = obj["name"][:2]
return data
def match_subcategories(data, biosphere_db_name, remove=True):
"""Given a characterization with a top-level category, e.g. ``('air',)``, find all biosphere flows with the same top-level categories, and add CFs for these flows as well. Doesn't replace CFs for existing flows with multi-level categories. If ``remove``, also delete the top-level CF, but only if it is unlinked."""
def add_amount(obj, amount):
obj["amount"] = amount
return obj
def add_subcategories(obj, mapping):
# Sorting needed for tests
new_objs = sorted(
mapping[
(
obj["categories"][0],
obj["name"],
obj["unit"],
)
],
key=lambda x: tuple([x[key] for key in sorted(x.keys())]),
)
# Need to create copies so data from later methods doesn't
# clobber amount values
return [add_amount(copy.deepcopy(elem), obj["amount"]) for elem in new_objs]
mapping = collections.defaultdict(list)
for flow in Database(biosphere_db_name):
# Try to filter our industrial activities and their flows
if not flow.get("type") in ("emission", "natural resource"):
continue
if len(flow.get("categories", [])) > 1:
mapping[(flow["categories"][0], flow["name"], flow["unit"])].append(
{
"categories": flow["categories"],
"database": flow["database"],
"input": flow.key,
"name": flow["name"],
"unit": flow["unit"],
}
)
for method in data:
already_have = {(obj["name"], obj["categories"]) for obj in method["exchanges"]}
new_cfs = []
for obj in method["exchanges"]:
if len(obj["categories"]) > 1:
continue
# Don't add subcategory flows which already have CFs
subcat_cfs = [
x
for x in add_subcategories(obj, mapping)
if (x["name"], x["categories"]) not in already_have
]
if subcat_cfs and remove and not obj.get("input"):
obj["remove_me"] = True
new_cfs.extend(subcat_cfs)
method[u"exchanges"].extend(new_cfs)
if remove:
method[u"exchanges"] = [
obj for obj in method["exchanges"] if not obj.get("remove_me")
]
return data
def fix_ecoinvent_38_lcia_implementation(data):
"""Ecoinvent 3.8 LCIA implmentation uses some flow names from 3.7.
Update these when possible, delete when not."""
MAPPING = {
(
"Cyfluthrin",
("soil", "agricultural"),
): "Beta-cyfluthrin", # Note: Not the same thing!!!
(
"Cyfluthrin",
("air", "non-urban air or from high stacks"),
): "Beta-cyfluthrin", # Note: Not the same thing!!!
(
"Carfentrazone ethyl ester",
("soil", "agricultural"),
): "[Deleted]Carfentrazone ethyl ester", # Note: Seriously, WTF!?
(
"Tri-allate",
("soil", "agricultural"),
): "Triallate", # But now there is ALSO a flow called "[Deleted]Tri-allate" into agricultural soil!
(
"Thiophanat-methyl",
("soil", "agricultural"),
): "Thiophanate-methyl", # Why not? Keep them on their toes! But please make sure to change it back in 3.9.
}
REMOVE = {
("Flurochloridone", ("soil", "agricultural")),
("Chlorotoluron", ("soil", "agricultural")),
}
for method in data:
method["exchanges"] = [
cf
for cf in method["exchanges"]
if (cf["name"], cf["categories"]) not in REMOVE
]
for cf in method["exchanges"]:
try:
cf["name"] = MAPPING[(cf["name"], cf["categories"])]
except KeyError:
pass
return data
| 35.116959
| 319
| 0.531391
|
6f43df9cb656ca715d94fc002ea96e9a71ababad
| 10,466
|
py
|
Python
|
Imaginary/2021/pwn/ICICLE_Jail/icicle.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1
|
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
Imaginary/2021/pwn/ICICLE_Jail/icicle.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
Imaginary/2021/pwn/ICICLE_Jail/icicle.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
from Crypto.Util.number import *
import time
MEMSIZE = 2**16
REGNAMES = ["r%d"%i for i in range(16)]+['rip']
DEBUG = 0
def prd(*args):
if DEBUG:
print(*args)
class MemoryException(Exception):
pass
class Memory():
def __init__(self, size=MEMSIZE, mem=None, rnames=REGNAMES):
if mem is not None:
self.size = mem.size
self.regs = {k:mem.regs[k] for k in mem.regs}
self.ram = {k:mem.ram[k] for k in mem.ram}
self.regs['rip'] = 0
else:
self.size = size
self.regs = {r:0 for r in rnames}
self.ram = {}
def setRam(self, addr, val):
if type(addr) != int:
raise MemoryException("Attempting to access invalid memory address "+str(addr))
if addr < 0 or addr >= self.size:
raise MemoryException("Memory "+str(addr)+" out of bounds!")
self.ram[addr] = val
def getRam(self, addr):
if type(addr) != int:
raise MemoryException("Attempting to access invalid memory address "+str(addr))
if addr < 0 or addr >= self.size:
raise MemoryException("Memory "+str(addr)+" out of bounds!")
if addr in self.ram:
return self.ram[addr]
self.ram[addr] = 0
return 0
def getVal(self, val):
if type(val) != str or len(val) == 0:
raise MemoryException("Bad value "+str(val)+" recieved!")
if val.isdigit():
return int(val)
if val in self.regs:
return self.regs[val]
if val[0] == '[' and val[-1] == ']':
return self.getRam(self.getVal(val[1:-1]))
if val[0] == '"' and val[-1] == '"':
return val[1:-1]
if val[0] == "'" and val[-1] == "'":
return val[1:-1]
raise MemoryException("Bad value "+str(val)+" recieved!")
def assign(self, loc, val):
if type(loc) != str or len(loc) == 0:
raise MemoryException("Bad location "+str(loc)+" recieved!")
if loc in self.regs:
self.regs[loc] = val
return
if loc[0] == '[' and loc[-1] == ']':
self.setRam(self.getVal(loc[1:-1]), val)
def rip(self):
return self.regs['rip']
def inc_rip(self):
self.regs['rip'] += 1
class VM():
def __init__(self, program='', memory=None):
self.program = self.parseStr(program)
if memory is not None:
self.mem = Memory(mem=memory)
else:
self.mem = Memory(size=MEMSIZE)
self.insns_map = {"add": self.add,
"sub": self.sub,
"mult": self.mult,
"div": self.div,
"mod": self.mod,
"xor": self.xor,
"and": self.andd,
"or": self.orr,
"rev": self.rev,
"mov": self.mov,
"strint": self.strint,
"intstr": self.intstr,
"pr": self.pr,
"readstr": self.readstr,
"readint": self.readint,
"j": self.jump,
"jnz": self.jnz,
"jz": self.jz,
"jl": self.jl,
"readf": self.readf,
"writef": self.writef,
"exec": self.exec,
} # also labels
def quit(self, msg='', exitcode=1, test=False):
if exitcode != 0:
print("Error running line '", self.c_insn(), "' at insn pointer", self.mem.rip())
if msg == '':
print("Quitting...")
else:
print(msg)
exit(exitcode)
def parseStr(self, s):
return [line.strip() for line in s.split('\n')]
def parseInsn(self, insn):
args = insn.split("#")[0].strip().split(" ")
cmd = args[0]
rest = " ".join(args[1:]).strip().split(", ")
return [cmd]+rest
def c_insn(self):
return self.program[self.mem.rip()]
def run(self):
try:
while self.mem.rip() < len(self.program):
prd("Executing '"+self.c_insn()+"' at insn_pointer "+str(self.mem.rip()))
self.execute_insn(self.c_insn())
self.mem.inc_rip()
except MemoryException as e:
self.quit(str(e))
except Exception as e:
print("Unknown error occurred.")
if DEBUG:
raise e
self.quit()
def execute_insn(self, insn):
if insn == '' or insn[-1] == ':' or insn[0] == "#":
return
args = self.parseInsn(insn)
prd(args)
self.insns_map[args[0]](*args[1:])
def setRam(self, addr, val):
return self.mem.setRam(addr, val)
def getRam(self, addr):
return self.mem.getRam(addr)
def getVal(self, val):
return self.mem.getVal(val)
def assign(self, loc, val):
return self.mem.assign(loc, val)
def reset(self):
self.mem = Memory()
def add(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) != type(a2):
a1 = str(a1)
a2 = str(a2)
self.assign(args[0], a1+a2)
def sub(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) != int or type(a2) != int:
self.quit("sub args not int!")
self.assign(args[0], a1-a2)
def mult(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) == str or type(a2) == str:
self.quit("Both mult args are strings!")
self.assign(args[0], a1*a2)
def div(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) != int or type(a2) != int:
self.quit("div args not int!")
self.assign(args[0], a1//a2)
def mod(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) != int or type(a2) != int:
self.quit("mod args not int!")
self.assign(args[0], a1%a2)
def andd(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) != int or type(a2) != int:
self.quit("and args not int!")
self.assign(args[0], a1&a2)
def orr(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) != int or type(a2) != int:
self.quit("or args not int!")
self.assign(args[0], a1|a2)
def xor(self, *args):
a1 = self.getVal(args[1])
a2 = self.getVal(args[2])
if type(a1) == int and type(a2) == int:
self.assign(args[0], a1^a2)
else:
a1 = long_to_bytes(a1).decode() if type(a1) == int else a1
a2 = long_to_bytes(a2).decode() if type(a2) == int else a2
self.assign(args[0], self.xorstr(a1, a2))
def xorstr(self, s1, s2):
l = max(len(s1), len(s2))
s1 = s1.encode()
s2 = s2.encode()
ret = ''
for i in range(l):
ret += chr(s1[i%len(s1)]^s2[i%len(s2)])
return ret
def readint(self, *args):
try:
self.assign(args[0], int(input()))
except ValueError as e:
self.quit("Bad int input!")
def readstr(self, *args):
self.assign(args[0], input())
def pr(self, *args):
print(self.getVal(args[0]), end='', flush=True)
def strint(self, *args):
a1 = self.getVal(args[1])
if type(a1) != str:
self.quit("Attempting to convert non-string to int!")
self.assign(args[0], bytes_to_long(bytes([ord(i) for i in a1])))
def intstr(self, *args):
a1 = self.getVal(args[1])
if type(a1) != int:
self.quit("Attempting to convert non-int to string!")
try:
b = bytes.fromhex(hex(a1)[2:])
except ValueError as e:
if 'non-hexadecimal' not in str(e):
raise e
b = bytes.fromhex('0'+hex(a1)[2:])
self.assign(args[0], ''.join([chr(i) for i in b]))
def revint(self, i):
sign = 0 if not i else (1 if i>0 else -1)
return sign*int(str(abs(i))[::-1])
def rev(self, *args):
a1 = self.getVal(args[1])
if type(a1) == int:
ret = self.revint(a1)
else:
ret = a1[::-1]
self.assign(args[0], ret)
def mov(self, *args):
self.assign(args[0], self.getVal(args[1]))
def jump(self, *args):
for i, insn in enumerate(self.program):
if insn == args[0]+':':
self.assign('rip', i)
return
self.quit("Could not find label "+args[0]+"!")
def jz(self, *args):
if self.getVal(args[0]) == 0:
self.jump(args[1])
def jnz(self, *args):
if self.getVal(args[0]) != 0:
self.jump(args[1])
def jl(self, *args):
if self.getVal(args[0]) < self.getVal(args[1]):
self.jump(args[2])
def readf(self, *args):
a1 = self.getVal(args[1])
if type(a1) != str:
self.quit("Filename not str!")
try:
with open(a1, 'r') as f:
self.assign(args[0], f.read())
except FileNotFoundError:
self.quit("File "+a1+" not found!")
def writef(self, *args):
a1 = self.getVal(args[1])
if type(a1) != str:
a1 = str(a1)
try:
with open(a1, 'w') as f:
f.write(str(self.getVal(args[0])))
except FileNotFoundError:
self.quit("File "+a1+" not found!")
def exec(self, *args):
code = self.getVal(args[0])
if type(code) == int:
code = ''.join([chr(i) for i in long_to_bytes(code)])
tempVM = VM(program=code, memory=self.mem)
ip = self.mem.rip()
tempVM.run()
self.mem = tempVM.mem
self.assign('rip', ip)
if __name__ == '__main__':
args = sys.argv
if len(args) < 2:
self.quit("Usage: ./icicle.py [filename]", 0)
with open(sys.argv[1], 'r') as f:
vm = VM(program=f.read())
vm.run()
| 31.14881
| 93
| 0.48347
|
e2d2878a3f56760f76cceaf89890cbccf581db85
| 9,268
|
py
|
Python
|
colabcode/code.py
|
sandyz1000/colabcode
|
c755608ca5fc0caa09241da2f190d9df6cf1974b
|
[
"MIT"
] | null | null | null |
colabcode/code.py
|
sandyz1000/colabcode
|
c755608ca5fc0caa09241da2f190d9df6cf1974b
|
[
"MIT"
] | null | null | null |
colabcode/code.py
|
sandyz1000/colabcode
|
c755608ca5fc0caa09241da2f190d9df6cf1974b
|
[
"MIT"
] | null | null | null |
import os
import sys
import socket
import subprocess
import uuid
import time
import platform
import tempfile
import secrets
from pgrok import pgrok
from pyngrok import ngrok
from http import HTTPStatus
from urllib.request import urlopen
import logging
from kafka_logger import init_kafka_logger, OutputLogger
import gdrivefs.config
from gdrivefs import gdfuse
from gdrivefs import oauth_authorize
DEFAULT_DOWNLOAD_TIMEOUT = 6
DEFAULT_RETRY_COUNT = 0
PRINT_PROGRESS_ENABLE = True
logger = logging.getLogger(__name__)
EXTENSIONS = ["ms-python.python", "ms-toolsai.jupyter", "mechatroner.rainbow-csv", "vscode-icons-team.vscode-icons"]
CODESERVER_VERSION = "3.11.1"
class ColabCode:
def __init__(
self,
port=10000,
password=None,
authtoken=None,
subdomain=None,
mount_drive=False,
lab=False,
interactive=False,
tunnel_backend='pgrok',
logger_name="kafka.logger",
settings_ini=None,
):
global logger
self.port = port
self.password = password
self.authtoken = authtoken
self._mount = mount_drive
self._lab = lab
self.auth_storage_filepath = gdrivefs.config.DEFAULT_CREDENTIALS_FILEPATH
self.subdomain = subdomain if subdomain else secrets.token_hex(4)
if settings_ini:
logger = init_kafka_logger(logger_name, settings_ini)
else:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(message)s", "%Y-%m-%dT%H:%M:%S"
))
logger.addHandler(stdout_handler)
self.redirector = OutputLogger(logger)
# Auth Google drive, interactive mode in notebook/shell env
if self._mount and interactive and platform.system().lower() == 'linux':
self._handle_auth_url()
authcode = input("Enter Auth code from your browser\n")
self._auth_write(authcode)
# Install and start server
if not self._lab:
self._install_code()
self._install_extensions()
if tunnel_backend == 'pgrok':
self._start_pgrok_server()
else:
self._start_server()
if self._lab:
self._run_lab()
else:
self._run_code()
def mount_gdrive(self):
gdfuse.mount(
auth_storage_filepath=self.auth_storage_filepath,
mountpoint=self._mount,
debug=gdrivefs.config.IS_DEBUG,
nothreads=gdrivefs.config.IS_DEBUG,
option_string=self.option_string
)
def _get_url(self):
# This won't actually be needed so set it to the default.
gdfuse.set_auth_cache_filepath(self.auth_storage_filepath)
oa = oauth_authorize.get_auth()
return oa.step1_get_auth_url()
def _handle_auth_url(self):
url = self._get_url()
with self.redirector:
print(
"To authorize FUSE to use your Google Drive account, visit the "
"following URL to produce an authorization code:\n\n%s\n" % (url,)
)
def _auth_write(self, authcode):
gdfuse.set_auth_cache_filepath(self.auth_storage_filepath)
oa = oauth_authorize.get_auth()
oa.step2_doexchange(authcode)
with self.redirector:
print("Authorization code recorded.")
def _print_progress(self, line):
if PRINT_PROGRESS_ENABLE:
sys.stdout.write("{}\r".format(line))
sys.stdout.flush()
def _clear_progress(self, spaces=100):
if PRINT_PROGRESS_ENABLE:
sys.stdout.write((" " * spaces) + "\r")
sys.stdout.flush()
def _download_code(self, url, retries=0, **kwargs):
"""
Given the url download the code binaries from the given url
Args:
url ([type]): [description]
retries ([type], optional): [description]. Defaults to 0.
Raises:
e: [description]
Returns:
[type]: [description]
"""
kwargs["timeout"] = kwargs.get("timeout", DEFAULT_DOWNLOAD_TIMEOUT)
try:
with self.redirector:
self._print_progress("Downloading code ...")
print("Downloading code from {} ...".format(url))
local_filename = url.split("/")[-1]
response = urlopen(url)
status_code = response.getcode()
if status_code != HTTPStatus.OK:
with self.redirector:
print("Response status code: {}".format(status_code))
return None
length = response.getheader("Content-Length")
if length:
length = int(length)
chunk_size = max(4096, length // 100)
else:
chunk_size = 64 * 1024
download_path = os.path.join(tempfile.gettempdir(), local_filename)
with open(download_path, "wb") as f, self.redirector:
size = 0
while True:
buffer = response.read(chunk_size)
if not buffer:
break
f.write(buffer)
size += len(buffer)
if length:
percent_done = int((float(size) / float(length)) * 100)
self._print_progress("Downloading code: {}%".format(percent_done))
self._clear_progress()
return download_path
except socket.timeout as e:
if retries < DEFAULT_RETRY_COUNT:
time.sleep(0.5)
return self._download_file(url, retries + 1, **kwargs)
else:
raise e
def _install_code(self):
downloded_path = self._download_code('https://raw.githubusercontent.com/cdr/code-server/main/install.sh')
os.chmod(downloded_path, int("777", 8))
subprocess.run(
[downloded_path, "--version", f"{CODESERVER_VERSION}"],
stdout=subprocess.PIPE, shell=True
)
def _install_extensions(self):
for ext in EXTENSIONS:
subprocess.run(["code-server", "--install-extension", f"{ext}"])
def _start_server(self):
if self.authtoken:
ngrok.set_auth_token(self.authtoken)
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(addr=self.port, bind_tls=True)
with self.redirector:
if not self._lab:
print(f"Code Server can be accessed on: {url}")
else:
print(f"Public URL: {url}")
def _start_pgrok_server(self):
active_tunnels = pgrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
pgrok.disconnect(public_url)
tunnel = pgrok.connect(addr=self.port, name=self.subdomain)
with self.redirector:
if not self._lab:
print(f"Code Server can be accessed on: {tunnel.proto}://{tunnel.public_url}")
else:
print(f"Public URL: {tunnel.proto}://{tunnel.public_url}")
def _run_lab(self):
token = str(uuid.uuid1())
logger.info(f"Jupyter lab token: {token}")
base_cmd = "jupyter-lab --ip='localhost' --allow-root --ServerApp.allow_remote_access=True --no-browser"
# TODO: GdriveFS only works for Linux now. Support for Mac will be added later
if self._mount and platform.system().lower() == 'linux':
os.system(f"fuser -n tcp -k {self.port}")
self.mount_gdrive()
if self.password:
lab_cmd = f" --ServerApp.token='{token}' --ServerApp.password='{self.password}' --port {self.port}"
else:
lab_cmd = f" --ServerApp.token='{token}' --ServerApp.password='' --port {self.port}"
lab_cmd = base_cmd + lab_cmd
with subprocess.Popen(
[lab_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc, self.redirector:
for line in proc.stdout:
print(line, end="")
def _run_code(self):
# TODO: GdriveFS only works for Linux now. Support for Mac will be added later
if self._mount and platform.system().lower() == 'linux':
os.system(f"fuser -n tcp -k {self.port}")
self.mount_gdrive()
if self.password:
code_cmd = f"PASSWORD={self.password} code-server --port {self.port} --disable-telemetry"
else:
code_cmd = f"code-server --port {self.port} --auth none --disable-telemetry"
with subprocess.Popen(
[code_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc, self.redirector:
for line in proc.stdout:
print(line, end="")
| 34.453532
| 116
| 0.584053
|
f627f3ca8157813e7f09a811f4ea8811f176811a
| 3,480
|
py
|
Python
|
scripts/maintenance/wikimedia_sites.py
|
metakgp/pywikibot
|
28e3125167bb3ec521a25ade859d0e0afcfb1262
|
[
"MIT"
] | 2
|
2017-06-19T16:48:34.000Z
|
2017-07-07T14:15:28.000Z
|
scripts/maintenance/wikimedia_sites.py
|
metakgp/pywikibot
|
28e3125167bb3ec521a25ade859d0e0afcfb1262
|
[
"MIT"
] | 11
|
2018-12-07T18:20:05.000Z
|
2022-03-11T23:12:42.000Z
|
scripts/maintenance/wikimedia_sites.py
|
metakgp/pywikibot
|
28e3125167bb3ec521a25ade859d0e0afcfb1262
|
[
"MIT"
] | 3
|
2018-12-09T10:18:35.000Z
|
2020-09-12T13:50:14.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script that updates the language lists in Wikimedia family files."""
#
# (C) xqt, 2009-2014
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: f627f3ca8157813e7f09a811f4ea8811f176811a $'
#
import re
import codecs
from xml.etree import cElementTree
import sys
import pywikibot
from pywikibot.family import Family
if sys.version_info[0] > 2:
from urllib.request import urlopen
else:
from urllib import urlopen
URL = 'https://wikistats.wmflabs.org/api.php?action=dump&table=%s&format=xml'
familiesDict = {
'anarchopedia': 'anarchopedias',
'wikibooks': 'wikibooks',
'wikinews': 'wikinews',
'wikipedia': 'wikipedias',
'wikiquote': 'wikiquotes',
'wikisource': 'wikisources',
'wikiversity': 'wikiversity',
'wikivoyage': 'wikivoyage',
'wiktionary': 'wiktionaries',
}
exceptions = ['www']
def update_family(families):
for family in families or familiesDict.keys():
pywikibot.output('\nChecking family %s:' % family)
original = Family.load(family).languages_by_size
obsolete = Family.load(family).obsolete
feed = urlopen(URL % familiesDict[family])
tree = cElementTree.parse(feed)
new = []
for field in tree.findall('row/field'):
if field.get('name') == 'prefix':
code = field.text
if not (code in obsolete or code in exceptions):
new.append(code)
continue
# put the missing languages to the right place
missing = original != new and set(original) - set(new)
if missing:
pywikibot.output(u"WARNING: ['%s'] not listed at wikistats."
% "', '".join(missing))
index = {}
for code in missing:
index[original.index(code)] = code
i = len(index) - 1
for key in sorted(index.keys(), reverse=True):
new.insert(key - i, index[key])
i -= 1
if original == new:
pywikibot.output(u'The lists match!')
else:
pywikibot.output(u"The lists don't match, the new list is:")
text = u' self.languages_by_size = [\r\n'
line = ' ' * 11
for code in new:
if len(line) + len(code) <= 76:
line += u" '%s'," % code
else:
text += u'%s\r\n' % line
line = ' ' * 11
line += u" '%s'," % code
text += u'%s\r\n' % line
text += u' ]'
pywikibot.output(text)
family_file_name = 'pywikibot/families/%s_family.py' % family
family_file = codecs.open(family_file_name, 'r', 'utf8')
family_text = family_file.read()
old = re.findall(r'(?msu)^ {8}self.languages_by_size.+?\]',
family_text)[0]
family_text = family_text.replace(old, text)
family_file = codecs.open(family_file_name, 'w', 'utf8')
family_file.write(family_text)
family_file.close()
if __name__ == '__main__':
fam = []
for arg in pywikibot.handleArgs():
if arg in familiesDict.keys() and arg not in fam:
fam.append(arg)
update_family(fam)
| 31.926606
| 77
| 0.556897
|
f358cd79f0be5ca6034b4be4d08b6509a08a8fc8
| 22
|
py
|
Python
|
btd6_memory_info/generated/Mono/Math/BigInteger/big_integer.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Mono/Math/BigInteger/big_integer.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Mono/Math/BigInteger/big_integer.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class BigInteger: pass
| 22
| 22
| 0.863636
|
4b62364ccfd2f4d8f32d545a7f1c395c1db895fc
| 2,372
|
py
|
Python
|
sorting.py
|
ice1x/sorting
|
df17c69e3c0e8c57f9bbb1c4de2c87fd0e4fb6d1
|
[
"Unlicense"
] | null | null | null |
sorting.py
|
ice1x/sorting
|
df17c69e3c0e8c57f9bbb1c4de2c87fd0e4fb6d1
|
[
"Unlicense"
] | null | null | null |
sorting.py
|
ice1x/sorting
|
df17c69e3c0e8c57f9bbb1c4de2c87fd0e4fb6d1
|
[
"Unlicense"
] | null | null | null |
import random
from datetime import datetime
def getArray():
return [int(10000 * random.random()) for i in range(10000)]
def shellSort(data):
"""
Shell sort using Shell's (original) gap sequence: n/2, n/4, ..., 1.
"""
gap = len(data) // 2
# loop over the gaps
while gap > 0:
# do the insertion sort
for i in range(gap, len(data)):
val = data[i]
j = i
while j >= gap and data[j - gap] > val:
data[j] = data[j - gap]
j -= gap
data[j] = val
gap //= 2
return data
def bubble(data):
"""
Ops: O(n^2)
"""
n = 1
while n < len(data):
for i in range(len(data) - n):
if data[i] > data[i + 1]:
data[i], data[i + 1] = data[i + 1], data[i]
n += 1
return data
def quicksort_original(data):
"""
Ops: O(n log n), worst case: O(n^2)
"""
if len(data) <= 1:
return data
else:
q = random.choice(data)
s_data = []
m_data = []
e_data = []
for n in data:
if n < q:
s_data.append(n)
elif n > q:
m_data.append(n)
else:
e_data.append(n)
return quicksort_original(s_data) + e_data + quicksort_original(m_data)
def quicksort_optimized(data, fst, lst):
"""
memory utilization optimized
"""
if fst >= lst: return
i, j = fst, lst
pivot = data[random.randint(fst, lst)]
while i <= j:
while data[i] < pivot: i += 1
while data[j] > pivot: j -= 1
if i <= j:
data[i], data[j] = data[j], data[i]
i, j = i + 1, j - 1
quicksort_optimized(data, fst, j)
quicksort_optimized(data, i, lst)
return data
nums = getArray()
t_start = datetime.now()
out = shellSort(nums)
t_end = datetime.now() - t_start
print('shellSort', t_end)
nums = getArray()
t_start = datetime.now()
out = bubble(nums)
t_end = datetime.now() - t_start
print('bubble', t_end)
nums = getArray()
t_start = datetime.now()
out = quicksort_original(nums)
t_end = datetime.now() - t_start
print('quicksort_original', t_end)
nums = getArray()
t_start = datetime.now()
out = quicksort_optimized(nums, 0, len(nums) - 1)
t_end = datetime.now() - t_start
print('quicksort_optimized', t_end)
| 22.590476
| 79
| 0.531619
|
85e695d5299effc49552a0808bba338376a367d2
| 2,775
|
py
|
Python
|
extensions/es_statistics.py
|
XIThing/esdl-mapeditor
|
9f4cd4a58714ea67aeb532e88e88f0435a87dbd5
|
[
"Apache-2.0"
] | null | null | null |
extensions/es_statistics.py
|
XIThing/esdl-mapeditor
|
9f4cd4a58714ea67aeb532e88e88f0435a87dbd5
|
[
"Apache-2.0"
] | null | null | null |
extensions/es_statistics.py
|
XIThing/esdl-mapeditor
|
9f4cd4a58714ea67aeb532e88e88f0435a87dbd5
|
[
"Apache-2.0"
] | null | null | null |
# This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
import base64
from flask import Flask
from flask_socketio import SocketIO
from extensions.session_manager import get_handler, get_session
import src.settings as settings
import requests
import urllib
import json
import src.log as log
logger = log.get_logger(__name__)
# ---------------------------------------------------------------------------------------------------------------------
# Get energy system statistics information
# ---------------------------------------------------------------------------------------------------------------------
class ESStatisticsService:
def __init__(self, flask_app: Flask, socket: SocketIO):
self.flask_app = flask_app
self.socketio = socket
self.register()
def register(self):
logger.info('Registering ESStatistics extension')
@self.socketio.on('get_es_statistics', namespace='/esdl')
def get_es_statistics():
with self.flask_app.app_context():
esh = get_handler()
active_es_id = get_session('active_es_id')
esdl_str = esh.to_string(active_es_id)
return self.call_es_statistics_service(esdl_str)
def call_es_statistics_service(self, esdl_str):
url = 'http://' + settings.statistics_settings_config['host'] + ':' + settings.statistics_settings_config['port']\
+ settings.statistics_settings_config['path']
esdlstr_bytes = esdl_str.encode('utf-8')
esdlstr_base64_bytes = base64.b64encode(esdlstr_bytes)
body = {"energysystem": esdlstr_base64_bytes.decode('utf-8')}
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"User-Agent": "ESDL Mapeditor/0.1"
}
reply = dict()
try:
r = requests.post(url, headers=headers, data=json.dumps(body))
if len(r.text) > 0:
reply = json.loads(r.text)
else:
print("WARNING: Empty response for energy system statistics service")
except Exception as e:
print('ERROR in accessing energy system statistics service: {}'.format(e))
return reply
| 37.5
| 123
| 0.584505
|
460788f6561a30c5e2022db7d4d9ee9aeb2867e6
| 4,369
|
py
|
Python
|
src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/operations/_express_route_service_providers_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/operations/_express_route_service_providers_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 3
|
2020-05-27T20:16:26.000Z
|
2020-07-23T19:46:49.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/operations/_express_route_service_providers_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ExpressRouteServiceProvidersOperations(object):
"""ExpressRouteServiceProvidersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-05-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-05-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the available express route service providers.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteServiceProvider
:rtype:
~azure.mgmt.network.v2020_05_01.models.ExpressRouteServiceProviderPaged[~azure.mgmt.network.v2020_05_01.models.ExpressRouteServiceProvider]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'}
| 40.831776
| 148
| 0.643397
|
7e89e5999bcdb7ee7e4a8a342f65f5592fb1b840
| 379
|
py
|
Python
|
askoclics/cli/commands/cmd_dataset.py
|
mboudet/askoclics
|
f49940b885c929a85c0bce72628e263a57a24b43
|
[
"MIT"
] | null | null | null |
askoclics/cli/commands/cmd_dataset.py
|
mboudet/askoclics
|
f49940b885c929a85c0bce72628e263a57a24b43
|
[
"MIT"
] | null | null | null |
askoclics/cli/commands/cmd_dataset.py
|
mboudet/askoclics
|
f49940b885c929a85c0bce72628e263a57a24b43
|
[
"MIT"
] | 1
|
2021-03-31T14:19:20.000Z
|
2021-03-31T14:19:20.000Z
|
import click
from askoclics.cli.commands.dataset.delete import cli as delete
from askoclics.cli.commands.dataset.list import cli as list
from askoclics.cli.commands.dataset.publicize import cli as publicize
@click.group()
def cli():
"""
Manipulate datasets managed by Askomics
"""
pass
cli.add_command(delete)
cli.add_command(list)
cli.add_command(publicize)
| 21.055556
| 69
| 0.76781
|
d82dd9367f7905177541817e5d7423130d8c74fd
| 1,849
|
py
|
Python
|
2017/week_7/queue_m1.py
|
regensa18/AdvProg2018
|
8e421663cac74c191b7ece33849d990a72dedec7
|
[
"BSD-3-Clause"
] | null | null | null |
2017/week_7/queue_m1.py
|
regensa18/AdvProg2018
|
8e421663cac74c191b7ece33849d990a72dedec7
|
[
"BSD-3-Clause"
] | null | null | null |
2017/week_7/queue_m1.py
|
regensa18/AdvProg2018
|
8e421663cac74c191b7ece33849d990a72dedec7
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import namedtuple
import multiprocessing as mp
import random
import time
VALUES = (100, 200, 500, 1000)
Coin = namedtuple('Coin', ['value'])
def reader(queue):
termination_threshold = 25
termination_count = 0
reader_sum = 0
while termination_count < termination_threshold:
if queue.empty():
print("[Process {}] Waiting for new items...".format(
mp.current_process().name))
time.sleep(random.random() * 0.50)
termination_count += 1
else:
termination_count = 0
coin = queue.get()
reader_sum += coin.value
time.sleep(random.random() * 0.50)
print("[Process {}] Read coin ({})".format(
mp.current_process().name, str(coin.value)))
print("[Process {}] Total value read: {}".format(
mp.current_process().name, reader_sum))
print("[Process {}] Exiting...".format(mp.current_process().name))
def writer(count, queue):
writer_sum = 0
for ii in range(count):
coin = Coin(random.choice(VALUES))
queue.put(coin)
writer_sum += coin.value
# No need to prepend string with process name since this
# function is executed in main interpreter thread
print("Put coin ({}) into queue".format(coin.value))
time.sleep(random.random() * 0.50)
print('Total value written: ' + str(writer_sum))
if __name__ == '__main__':
start_time = time.time()
count = 100
queue = mp.Queue() # Queue class from multiprocessing module
reader_p1 = mp.Process(target=reader, name='Reader 1', args=(queue,))
reader_p1.daemon = True
reader_p1.start()
writer(count, queue)
reader_p1.join()
end_time = time.time()
print('Total running time: ' + str(end_time - start_time))
| 28.890625
| 73
| 0.611682
|
495ad4cd39acbe0920edd408acd1545d222509bf
| 3,136
|
py
|
Python
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/service_policies.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/service_policies.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/service_policies.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.service_policy_extended import ServicePolicyExtended # noqa: F401,E501
class ServicePolicies(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'policies': 'list[ServicePolicyExtended]'
}
attribute_map = {
'policies': 'policies'
}
def __init__(self, policies=None): # noqa: E501
"""ServicePolicies - a model defined in Swagger""" # noqa: E501
self._policies = None
self.discriminator = None
if policies is not None:
self.policies = policies
@property
def policies(self):
"""Gets the policies of this ServicePolicies. # noqa: E501
:return: The policies of this ServicePolicies. # noqa: E501
:rtype: list[ServicePolicyExtended]
"""
return self._policies
@policies.setter
def policies(self, policies):
"""Sets the policies of this ServicePolicies.
:param policies: The policies of this ServicePolicies. # noqa: E501
:type: list[ServicePolicyExtended]
"""
self._policies = policies
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServicePolicies):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.269565
| 97
| 0.575893
|
29e803ea6ebe557e3f9df8c7af8a79c50cbe552d
| 33,158
|
py
|
Python
|
Tools/dea_tools/datahandling.py
|
mickwelli/dea-notebooks
|
2fca1f62e35831ed33c7d74380c4cc9e480fbeda
|
[
"Apache-2.0"
] | null | null | null |
Tools/dea_tools/datahandling.py
|
mickwelli/dea-notebooks
|
2fca1f62e35831ed33c7d74380c4cc9e480fbeda
|
[
"Apache-2.0"
] | null | null | null |
Tools/dea_tools/datahandling.py
|
mickwelli/dea-notebooks
|
2fca1f62e35831ed33c7d74380c4cc9e480fbeda
|
[
"Apache-2.0"
] | null | null | null |
## dea_datahandling.py
'''
Description: This file contains a set of python functions for handling
Digital Earth Australia data.
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Australia data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one on
Github (https://github.com/GeoscienceAustralia/dea-notebooks/issues/new).
Functions included:
load_ard
array_to_geotiff
mostcommon_crs
download_unzip
wofs_fuser
dilate
pan_sharpen_brovey
paths_to_datetimeindex
_select_along_axis
nearest
last
first
Last modified: September 2021
'''
# Import required packages
import os
import zipfile
import numexpr
import datetime
import requests
import warnings
import odc.algo
import dask
import numpy as np
import pandas as pd
import numexpr as ne
import dask.array as da
import xarray as xr
from osgeo import gdal
from random import randint
from collections import Counter
from odc.algo import mask_cleanup
from datacube.utils import masking
from scipy.ndimage import binary_dilation
from datacube.utils.dates import normalise_dt
def _dc_query_only(**kw):
"""
Remove load-only parameters, the rest can be passed to Query
Returns
-------
dict of query parameters
"""
def _impl(measurements=None,
output_crs=None,
resolution=None,
resampling=None,
skip_broken_datasets=None,
dask_chunks=None,
fuse_func=None,
align=None,
datasets=None,
progress_cbk=None,
group_by=None,
**query):
return query
return _impl(**kw)
def _common_bands(dc, products):
"""
Takes a list of products and returns a list of measurements/bands
that are present in all products
Returns
-------
List of band names
"""
common = None
bands = None
for p in products:
p = dc.index.products.get_by_name(p)
if common is None:
common = set(p.measurements)
bands = list(p.measurements)
else:
common = common.intersection(set(p.measurements))
return [band for band in bands if band in common]
def load_ard(dc,
products=None,
min_gooddata=0.0,
fmask_categories=['valid', 'snow', 'water'],
mask_pixel_quality=True,
mask_filters=None,
mask_contiguity=False,
ls7_slc_off=True,
predicate=None,
dtype='auto',
**kwargs):
"""
Loads and combines Landsat Collection 3 or Sentinel 2 Definitive
and Near Real Time data for multiple sensors (i.e. ls5t, ls7e and
ls8c for Landsat; s2a and s2b for Sentinel 2), optionally applies
pixel quality and contiguity masks, and drops time steps that
contain greater than a minimum proportion of good quality (e.g. non-
cloudy or shadowed) pixels.
The function supports loading the following DEA products:
ga_ls5t_ard_3
ga_ls7e_ard_3
ga_ls8c_ard_3
s2a_ard_granule
s2b_ard_granule
s2a_nrt_granule
s2b_nrt_granule
Last modified: April 2022
Parameters
----------
dc : datacube Datacube object
The Datacube to connect to, i.e. `dc = datacube.Datacube()`.
This allows you to also use development datacubes if required.
products : list
A list of product names to load data from. Valid options are
['ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3'] for Landsat,
['s2a_ard_granule', 's2b_ard_granule'] for Sentinel 2 Definitive,
and ['s2a_nrt_granule', 's2b_nrt_granule'] for Sentinel 2 Near
Real Time (on the DEA Sandbox only).
min_gooddata : float, optional
An optional float giving the minimum percentage of good quality
pixels required for a satellite observation to be loaded.
Defaults to 0.0 which will return all observations regardless of
pixel quality (set to e.g. 0.99 to return only observations with
more than 99% good quality pixels).
fmask_categories : list, optional
An optional list of fmask category names to treat as good
quality pixels in the above `min_gooddata` calculation, and for
masking data by pixel quality (if `mask_pixel_quality=True`).
The default is `['valid', 'snow', 'water']` which will return
non-cloudy or shadowed land, snow and water pixels. Choose from:
'nodata', 'valid', 'cloud', 'shadow', 'snow', and 'water'.
mask_pixel_quality : bool, optional
An optional boolean indicating whether to mask out poor quality
pixels using fmask based on the `fmask_categories` provided
above. The default is True, which will set poor quality pixels
to NaN if `dtype='auto'` (which will convert the data to
'float32'), or set poor quality pixels to the data's native
nodata value if `dtype='native' (which can be useful for
reducing memory).
mask_filters : iterable of tuples, optional
Iterable tuples of morphological operations - ("<operation>", <radius>)
to apply to the pixel quality mask, where:
operation: string, can be one of these morphological operations:
* ``'closing'`` = remove small holes in cloud - morphological closing
* ``'opening'`` = shrinks away small areas of the mask
* ``'dilation'`` = adds padding to the mask
* ``'erosion'`` = shrinks bright regions and enlarges dark regions
radius: int
e.g. ``mask_filters=[('erosion', 5),("opening", 2),("dilation", 2)]``
mask_contiguity : str or bool, optional
An optional string or boolean indicating whether to mask out
pixels missing data in any band (i.e. "non-contiguous" values).
This can be important for generating clean composite datasets.
The default is False, which will ignore non-contiguous values
completely. If loading NBART data, set the parameter to:
`mask_contiguity='nbart_contiguity'`. If loading NBAR data,
specify `mask_contiguity='nbar_contiguity'` instead.
Non-contiguous pixels will be set to NaN if `dtype='auto'`, or
set to the data's native nodata value if `dtype='native'`
(which can be useful for reducing memory).
dtype : string, optional
An optional parameter that controls the data type/dtype that
layers are coerced to after loading. Valid values: 'native',
'auto', 'float{16|32|64}'. When 'auto' is used, the data will be
converted to `float32` if masking is used, otherwise data will
be returned in the native data type of the data. Be aware that
if data is loaded in its native dtype, nodata and masked
pixels will be returned with the data's native nodata value
(typically -999), not NaN.
ls7_slc_off : bool, optional
An optional boolean indicating whether to include data from
after the Landsat 7 SLC failure (i.e. SLC-off). Defaults to
True, which keeps all Landsat 7 observations > May 31 2003.
predicate : function, optional
An optional function that can be passed in to restrict the
datasets that are loaded by the function. A predicate function
should take a `datacube.model.Dataset` object as an input (i.e.
as returned from `dc.find_datasets`), and return a boolean.
For example, a predicate function could be used to return True
for only datasets acquired in January:
`dataset.time.begin.month == 1`
**kwargs :
A set of keyword arguments to `dc.load` that define the
spatiotemporal query and load parameters used to extract data.
Keyword arguments can either be listed directly in the
`load_ard` call like any other parameter (e.g.
`measurements=['nbart_red']`), or by passing in a query kwarg
dictionary (e.g. `**query`). Keywords can include `measurements`,
`x`, `y`, `time`, `resolution`, `resampling`, `group_by`, `crs`;
see the `dc.load` documentation for all possible options:
https://datacube-core.readthedocs.io/en/latest/dev/api/generate/datacube.Datacube.load.html
Returns
-------
combined_ds : xarray Dataset
An xarray dataset containing only satellite observations that
contains greater than `min_gooddata` proportion of good quality
pixels.
"""
#########
# Setup #
#########
# Use 'nbart_contiguity' by default if mask_contiguity is true
if mask_contiguity is True:
mask_contiguity = 'nbart_contiguity'
# We deal with `dask_chunks` separately
dask_chunks = kwargs.pop('dask_chunks', None)
requested_measurements = kwargs.pop('measurements', None)
# Warn user if they combine lazy load with min_gooddata
if (min_gooddata > 0.0) and dask_chunks is not None:
warnings.warn("Setting 'min_gooddata' percentage to > 0.0 "
"will cause dask arrays to compute when "
"loading pixel-quality data to calculate "
"'good pixel' percentage. This can "
"slow the return of your dataset.")
# Verify that products were provided, and determine if Sentinel-2
# or Landsat data is being loaded
if not products:
raise ValueError("Please provide a list of product names "
"to load data from. Valid options are: \n"
"['ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3'] "
"for Landsat, ['s2a_ard_granule', "
"'s2b_ard_granule'] \nfor Sentinel 2 Definitive, or "
"['s2a_nrt_granule', 's2b_nrt_granule'] for "
"Sentinel 2 Near Real Time")
elif all(['ls' in product for product in products]):
product_type = 'ls'
elif all(['s2' in product for product in products]):
product_type = 's2'
fmask_band = 'fmask'
measurements = (requested_measurements.copy() if
requested_measurements else None)
if measurements is None:
# Deal with "load all" case: pick a set of bands common across
# all products
measurements = _common_bands(dc, products)
# If no `measurements` are specified, Landsat ancillary bands are
# loaded with a 'oa_' prefix, but Sentinel-2 bands are not. As a
# work-around, we need to rename the default contiguity and fmask
# bands if loading Landsat data without specifying `measurements`
if product_type == 'ls':
mask_contiguity = (f'oa_{mask_contiguity}' if
mask_contiguity else False)
fmask_band = f'oa_{fmask_band}'
# If `measurements` are specified but do not include fmask or
# contiguity variables, add these to `measurements`
if fmask_band not in measurements:
measurements.append(fmask_band)
if mask_contiguity and mask_contiguity not in measurements:
measurements.append(mask_contiguity)
# Get list of data and mask bands so that we can later exclude
# mask bands from being masked themselves
data_bands = [band for band in measurements if
band not in (fmask_band, mask_contiguity)]
mask_bands = [band for band in measurements if band not in data_bands]
#################
# Find datasets #
#################
# Pull out query params only to pass to dc.find_datasets
query = _dc_query_only(**kwargs)
# Extract datasets for each product using subset of dcload_kwargs
dataset_list = []
# Get list of datasets for each product
print('Finding datasets')
for product in products:
# Obtain list of datasets for product
print(f' {product} (ignoring SLC-off observations)'
if not ls7_slc_off and product == 'ga_ls7e_ard_3'
else f' {product}')
datasets = dc.find_datasets(product=product, **query)
# Remove Landsat 7 SLC-off observations if ls7_slc_off=False
if not ls7_slc_off and product == 'ga_ls7e_ard_3':
datasets = [i for i in datasets if
normalise_dt(i.time.begin) <
datetime.datetime(2003, 5, 31)]
# Add any returned datasets to list
dataset_list.extend(datasets)
# Raise exception if no datasets are returned
if len(dataset_list) == 0:
raise ValueError("No data available for query: ensure that "
"the products specified have data for the "
"time and location requested")
# If predicate is specified, use this function to filter the list
# of datasets prior to load
if predicate:
print(f'Filtering datasets using predicate function')
dataset_list = [ds for ds in dataset_list if predicate(ds)]
# Raise exception if filtering removes all datasets
if len(dataset_list) == 0:
raise ValueError("No data available after filtering with "
"predicate function")
#############
# Load data #
#############
# Note we always load using dask here so that we can lazy load data
# before filtering by good data
ds = dc.load(datasets=dataset_list,
measurements=measurements,
dask_chunks={} if dask_chunks is None else dask_chunks,
**kwargs)
####################
# Filter good data #
####################
# Calculate pixel quality mask
pq_mask = odc.algo.fmask_to_bool(ds[fmask_band],
categories=fmask_categories)
# The good data percentage calculation has to load in all `fmask`
# data, which can be slow. If the user has chosen no filtering
# by using the default `min_gooddata = 0`, we can skip this step
# completely to save processing time
if min_gooddata > 0.0:
# Compute good data for each observation as % of total pixels
print('Counting good quality pixels for each time step')
data_perc = (pq_mask.sum(axis=[1, 2], dtype='int32') /
(pq_mask.shape[1] * pq_mask.shape[2]))
keep = (data_perc >= min_gooddata).persist()
# Filter by `min_gooddata` to drop low quality observations
total_obs = len(ds.time)
ds = ds.sel(time=keep)
pq_mask = pq_mask.sel(time=keep)
print(f'Filtering to {len(ds.time)} out of {total_obs} '
f'time steps with at least {min_gooddata:.1%} '
f'good quality pixels')
# Morphological filtering on cloud masks
if (mask_filters is not None) & (mask_pixel_quality):
print(f"Applying morphological filters to pixel quality mask: {mask_filters}")
pq_mask = mask_cleanup(pq_mask, mask_filters=mask_filters)
###############
# Apply masks #
###############
# Create an overall mask to hold both pixel quality and contiguity
mask = None
# Add pixel quality mask to overall mask
if mask_pixel_quality:
print('Applying pixel quality/cloud mask')
mask = pq_mask
# Add contiguity mask to overall mask
if mask_contiguity:
print('Applying contiguity mask')
cont_mask = ds[mask_contiguity] == 1
# If mask already has data if mask_pixel_quality == True,
# multiply with cont_mask to perform a logical 'or' operation
# (keeping only pixels good in both)
mask = cont_mask if mask is None else mask * cont_mask
# Split into data/masks bands, as conversion to float and masking
# should only be applied to data bands
ds_data = ds[data_bands]
ds_masks = ds[mask_bands]
# Mask data if either of the above masks were generated
if mask is not None:
ds_data = odc.algo.keep_good_only(ds_data, where=mask)
# Automatically set dtype to either native or float32 depending
# on whether masking was requested
if dtype == 'auto':
dtype = 'native' if mask is None else 'float32'
# Set nodata values using odc.algo tools to reduce peak memory
# use when converting data dtype
if dtype != 'native':
ds_data = odc.algo.to_float(ds_data, dtype=dtype)
# Put data and mask bands back together
attrs = ds.attrs
ds = xr.merge([ds_data, ds_masks])
ds.attrs.update(attrs)
###############
# Return data #
###############
# Drop bands not originally requested by user
if requested_measurements:
ds = ds[requested_measurements]
# If user supplied dask_chunks, return data as a dask array without
# actually loading it in
if dask_chunks is not None:
print(f'Returning {len(ds.time)} time steps as a dask array')
return ds
else:
print(f'Loading {len(ds.time)} time steps')
return ds.compute()
def array_to_geotiff(fname, data, geo_transform, projection,
nodata_val=0, dtype=gdal.GDT_Float32):
"""
Create a single band GeoTIFF file with data from an array.
Because this works with simple arrays rather than xarray datasets
from DEA, it requires geotransform info ("(upleft_x, x_size,
x_rotation, upleft_y, y_rotation, y_size)") and projection data
(in "WKT" format) for the output raster. These are typically
obtained from an existing raster using the following GDAL calls:
import gdal
gdal_dataset = gdal.Open(raster_path)
geotrans = gdal_dataset.GetGeoTransform()
prj = gdal_dataset.GetProjection()
...or alternatively, directly from an xarray dataset:
geotrans = xarraydataset.geobox.transform.to_gdal()
prj = xarraydataset.geobox.crs.wkt
Parameters
----------
fname : str
Output geotiff file path including extension
data : numpy array
Input array to export as a geotiff
geo_transform : tuple
Geotransform for output raster; e.g. "(upleft_x, x_size,
x_rotation, upleft_y, y_rotation, y_size)"
projection : str
Projection for output raster (in "WKT" format)
nodata_val : int, optional
Value to convert to nodata in the output raster; default 0
dtype : gdal dtype object, optional
Optionally set the dtype of the output raster; can be
useful when exporting an array of float or integer values.
Defaults to gdal.GDT_Float32
"""
# Set up driver
driver = gdal.GetDriverByName('GTiff')
# Create raster of given size and projection
rows, cols = data.shape
dataset = driver.Create(fname, cols, rows, 1, dtype)
dataset.SetGeoTransform(geo_transform)
dataset.SetProjection(projection)
# Write data to array and set nodata values
band = dataset.GetRasterBand(1)
band.WriteArray(data)
band.SetNoDataValue(nodata_val)
# Close file
dataset = None
def mostcommon_crs(dc, product, query):
"""
Takes a given query and returns the most common CRS for observations
returned for that spatial extent. This can be useful when your study
area lies on the boundary of two UTM zones, forcing you to decide
which CRS to use for your `output_crs` in `dc.load`.
Parameters
----------
dc : datacube Datacube object
The Datacube to connect to, i.e. `dc = datacube.Datacube()`.
This allows you to also use development datacubes if required.
product : str
A product name (or list of product names) to load CRSs from.
query : dict
A datacube query including x, y and time range to assess for the
most common CRS
Returns
-------
A EPSG string giving the most common CRS from all datasets returned
by the query above
"""
# Find list of datasets matching query for either product or
# list of products
if isinstance(product, list):
matching_datasets = []
for i in product:
matching_datasets.extend(dc.find_datasets(product=i,
**query))
else:
matching_datasets = dc.find_datasets(product=product, **query)
# Extract all CRSs
crs_list = [str(i.crs) for i in matching_datasets]
# If CRSs are returned
if len(crs_list) > 0:
# Identify most common CRS
crs_counts = Counter(crs_list)
crs_mostcommon = crs_counts.most_common(1)[0][0]
# Warn user if multiple CRSs are encountered
if len(crs_counts.keys()) > 1:
warnings.warn(f'Multiple UTM zones {list(crs_counts.keys())} '
f'were returned for this query. Defaulting to '
f'the most common zone: {crs_mostcommon}',
UserWarning)
return crs_mostcommon
else:
raise ValueError(f'No CRS was returned as no data was found for '
f'the supplied product ({product}) and query. '
f'Please ensure that data is available for '
f'{product} for the spatial extents and time '
f'period specified in the query (e.g. by using '
f'the Data Cube Explorer for this datacube '
f'instance).')
def download_unzip(url,
output_dir=None,
remove_zip=True):
"""
Downloads and unzips a .zip file from an external URL to a local
directory.
Parameters
----------
url : str
A string giving a URL path to the zip file you wish to download
and unzip
output_dir : str, optional
An optional string giving the directory to unzip files into.
Defaults to None, which will unzip files in the current working
directory
remove_zip : bool, optional
An optional boolean indicating whether to remove the downloaded
.zip file after files are unzipped. Defaults to True, which will
delete the .zip file.
"""
# Get basename for zip file
zip_name = os.path.basename(url)
# Raise exception if the file is not of type .zip
if not zip_name.endswith('.zip'):
raise ValueError(f'The URL provided does not point to a .zip '
f'file (e.g. {zip_name}). Please specify a '
f'URL path to a valid .zip file')
# Download zip file
print(f'Downloading {zip_name}')
r = requests.get(url)
with open(zip_name, 'wb') as f:
f.write(r.content)
# Extract into output_dir
with zipfile.ZipFile(zip_name, 'r') as zip_ref:
zip_ref.extractall(output_dir)
print(f'Unzipping output files to: '
f'{output_dir if output_dir else os.getcwd()}')
# Optionally cleanup
if remove_zip:
os.remove(zip_name)
def wofs_fuser(dest, src):
"""
Fuse two WOfS water measurements represented as `ndarray`s.
Note: this is a copy of the function located here:
https://github.com/GeoscienceAustralia/digitalearthau/blob/develop/digitalearthau/utils.py
"""
empty = (dest & 1).astype(np.bool)
both = ~empty & ~((src & 1).astype(np.bool))
dest[empty] = src[empty]
dest[both] |= src[both]
def dilate(array, dilation=10, invert=True):
"""
Dilate a binary array by a specified nummber of pixels using a
disk-like radial dilation.
By default, invalid (e.g. False or 0) values are dilated. This is
suitable for applications such as cloud masking (e.g. creating a
buffer around cloudy or shadowed pixels). This functionality can
be reversed by specifying `invert=False`.
Parameters
----------
array : array
The binary array to dilate.
dilation : int, optional
An optional integer specifying the number of pixels to dilate
by. Defaults to 10, which will dilate `array` by 10 pixels.
invert : bool, optional
An optional boolean specifying whether to invert the binary
array prior to dilation. The default is True, which dilates the
invalid values in the array (e.g. False or 0 values).
Returns
-------
An array of the same shape as `array`, with valid data pixels
dilated by the number of pixels specified by `dilation`.
"""
y, x = np.ogrid[
-dilation: (dilation + 1),
-dilation: (dilation + 1),
]
# disk-like radial dilation
kernel = (x * x) + (y * y) <= (dilation + 0.5) ** 2
# If invert=True, invert True values to False etc
if invert:
array = ~array
return ~binary_dilation(array.astype(np.bool),
structure=kernel.reshape((1,) + kernel.shape))
def pan_sharpen_brovey(band_1, band_2, band_3, pan_band):
"""
Brovey pan sharpening on surface reflectance input using numexpr
and return three xarrays.
Parameters
----------
band_1, band_2, band_3 : xarray.DataArray or numpy.array
Three input multispectral bands, either as xarray.DataArrays or
numpy.arrays. These bands should have already been resampled to
the spatial resolution of the panchromatic band.
pan_band : xarray.DataArray or numpy.array
A panchromatic band corresponding to the above multispectral
bands that will be used to pan-sharpen the data.
Returns
-------
band_1_sharpen, band_2_sharpen, band_3_sharpen : numpy.arrays
Three numpy arrays equivelent to `band_1`, `band_2` and `band_3`
pan-sharpened to the spatial resolution of `pan_band`.
"""
# Calculate total
exp = 'band_1 + band_2 + band_3'
total = numexpr.evaluate(exp)
# Perform Brovey Transform in form of: band/total*panchromatic
exp = 'a/b*c'
band_1_sharpen = numexpr.evaluate(exp, local_dict={'a': band_1,
'b': total,
'c': pan_band})
band_2_sharpen = numexpr.evaluate(exp, local_dict={'a': band_2,
'b': total,
'c': pan_band})
band_3_sharpen = numexpr.evaluate(exp, local_dict={'a': band_3,
'b': total,
'c': pan_band})
return band_1_sharpen, band_2_sharpen, band_3_sharpen
def paths_to_datetimeindex(paths, string_slice=(0, 10)):
"""
Helper function to generate a Pandas datetimeindex object
from dates contained in a file path string.
Parameters
----------
paths : list of strings
A list of file path strings that will be used to extract times
string_slice : tuple
An optional tuple giving the start and stop position that
contains the time information in the provided paths. These are
applied to the basename (i.e. file name) in each path, not the
path itself. Defaults to (0, 10).
Returns
-------
A pandas.DatetimeIndex object containing a 'datetime64[ns]' derived
from the file paths provided by `paths`.
"""
date_strings = [os.path.basename(i)[slice(*string_slice)]
for i in paths]
return pd.to_datetime(date_strings)
def _select_along_axis(values, idx, axis):
other_ind = np.ix_(*[np.arange(s) for s in idx.shape])
sl = other_ind[:axis] + (idx,) + other_ind[axis:]
return values[sl]
def first(array: xr.DataArray,
dim: str,
index_name: str = None) -> xr.DataArray:
"""
Finds the first occuring non-null value along the given dimension.
Parameters
----------
array : xr.DataArray
The array to search.
dim : str
The name of the dimension to reduce by finding the first
non-null value.
Returns
-------
reduced : xr.DataArray
An array of the first non-null values.
The `dim` dimension will be removed, and replaced with a coord
of the same name, containing the value of that dimension where
the last value was found.
"""
axis = array.get_axis_num(dim)
idx_first = np.argmax(~pd.isnull(array), axis=axis)
reduced = array.reduce(_select_along_axis, idx=idx_first, axis=axis)
reduced[dim] = array[dim].isel({dim: xr.DataArray(idx_first,
dims=reduced.dims)})
if index_name is not None:
reduced[index_name] = xr.DataArray(idx_first, dims=reduced.dims)
return reduced
def last(array: xr.DataArray,
dim: str,
index_name: str = None) -> xr.DataArray:
"""
Finds the last occuring non-null value along the given dimension.
Parameters
----------
array : xr.DataArray
The array to search.
dim : str
The name of the dimension to reduce by finding the last non-null
value.
index_name : str, optional
If given, the name of a coordinate to be added containing the
index of where on the dimension the nearest value was found.
Returns
-------
reduced : xr.DataArray
An array of the last non-null values.
The `dim` dimension will be removed, and replaced with a coord
of the same name, containing the value of that dimension where
the last value was found.
"""
axis = array.get_axis_num(dim)
rev = (slice(None),) * axis + (slice(None, None, -1),)
idx_last = -1 - np.argmax(~pd.isnull(array)[rev], axis=axis)
reduced = array.reduce(_select_along_axis, idx=idx_last, axis=axis)
reduced[dim] = array[dim].isel({dim: xr.DataArray(idx_last,
dims=reduced.dims)})
if index_name is not None:
reduced[index_name] = xr.DataArray(idx_last, dims=reduced.dims)
return reduced
def nearest(array: xr.DataArray,
dim: str, target,
index_name: str = None) -> xr.DataArray:
"""
Finds the nearest values to a target label along the given
dimension, for all other dimensions.
E.g. For a DataArray with dimensions ('time', 'x', 'y')
nearest_array = nearest(array, 'time', '2017-03-12')
will return an array with the dimensions ('x', 'y'), with non-null
values found closest for each (x, y) pixel to that location along
the time dimension.
The returned array will include the 'time' coordinate for each x,y
pixel that the nearest value was found.
Parameters
----------
array : xr.DataArray
The array to search.
dim : str
The name of the dimension to look for the target label.
target : same type as array[dim]
The value to look up along the given dimension.
index_name : str, optional
If given, the name of a coordinate to be added containing the
index of where on the dimension the nearest value was found.
Returns
-------
nearest_array : xr.DataArray
An array of the nearest non-null values to the target label.
The `dim` dimension will be removed, and replaced with a coord
of the same name, containing the value of that dimension closest
to the given target label.
"""
before_target = slice(None, target)
after_target = slice(target, None)
da_before = array.sel({dim: before_target})
da_after = array.sel({dim: after_target})
da_before = (last(da_before, dim, index_name) if
da_before[dim].shape[0] else None)
da_after = (first(da_after, dim, index_name) if
da_after[dim].shape[0] else None)
if da_before is None and da_after is not None:
return da_after
if da_after is None and da_before is not None:
return da_before
target = array[dim].dtype.type(target)
is_before_closer = (abs(target - da_before[dim]) <
abs(target - da_after[dim]))
nearest_array = xr.where(is_before_closer, da_before, da_after)
nearest_array[dim] = xr.where(is_before_closer,
da_before[dim],
da_after[dim])
if index_name is not None:
nearest_array[index_name] = xr.where(is_before_closer,
da_before[index_name],
da_after[index_name])
return nearest_array
| 36.883204
| 99
| 0.632125
|
45a535622188ba4081ba89dd52d3400fcb39e1b2
| 3,784
|
py
|
Python
|
game.py
|
liangfengsid/dicegame
|
d38d23d595446d1c96442b4ef0d600ef2c284005
|
[
"MIT"
] | null | null | null |
game.py
|
liangfengsid/dicegame
|
d38d23d595446d1c96442b4ef0d600ef2c284005
|
[
"MIT"
] | null | null | null |
game.py
|
liangfengsid/dicegame
|
d38d23d595446d1c96442b4ef0d600ef2c284005
|
[
"MIT"
] | null | null | null |
import random
import sys
import time
import threading
from multiprocessing import Value
import state
import util
import param
from mcts import MCTS
from state import DEFAULT_TRANSFORM
from state import State
MCTS_GAME = 0
OPT_GAME = 1
RAND_GAME = 2
NORMAL_GAME = 3
class MCTSGame:
def __init__(self, path=''):
self.mcts = MCTS(path)
def learn(self, checkpoint_path=""):
start = time.time()
for n in range(0, param.NUM_PLAY):
root = state.State(0, 100, (n % 100) + 1)
self.mcts.play_one_game(root)
end = time.time()
print(n, "======:", end - start)
start = end
if (n + 1) % param.CHECKPOINT_INTERVAL == 0 and checkpoint_path != "":
self.mcts.save(checkpoint_path)
def play(self, stat):
(choice, value), _, _ = self.mcts.play_one_move(stat)
return choice, value
# OptGame always bet a value just cover the lost plus the goal.
class OptGame:
def play(self, stat):
value = min(stat.balance(), 100 + stat.goal() - stat.balance())
return state.SMALL, value
# RandGame always bet a random value within balance.
class RandGame:
def play(self, stat):
random.seed(time.time())
value = random.randint(1, stat.balance())
return state.SMALL, value
class NormalGame:
def play(self, stat):
return state.SMALL, 1
class GameThread(threading.Thread):
# type is either MCTS_TYPE or OPT_TYPE
def __init__(self, id, model_path, num_game, type, balance_ref, step_ref, lock):
threading.Thread.__init__(self)
self.__id = id
self.__model_path = model_path
self.__num_game = num_game
self.__type = type
self.__sum_balance = balance_ref
self.__sum_step = step_ref
self.__lock = lock
def run(self):
balances = 0
steps = 0
for i in range(0, self.__num_game):
# A game
if self.__type == MCTS_GAME:
g = MCTSGame(path=self.__model_path)
elif self.__type == NORMAL_GAME:
g = NormalGame()
elif self.__type == RAND_GAME:
g = RandGame()
else:
g = OptGame()
stat = State(0, 100, 10)
while not util.is_game_ended(stat):
choice, value = g.play(stat)
stat.transform(DEFAULT_TRANSFORM, choice, value)
#print("==Game ends with state: ", stat.value())
balances += stat.balance()
steps += stat.step()
if (i + 1) % 1 == 0:
print(i, '====Thread ', self.__id, ': game ends with state: ', stat.value(),
", average balance: ", 1.0 * balances / (1 + i))
with self.__lock:
self.__sum_balance.value += balances
self.__sum_step.value += steps
print('Thread ', self.__id, ': average balance ', 1.0 * balances / self.__num_game,
', average step: ', 1.0 * steps / self.__num_game)
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[1] == 'learn':
path = sys.argv[2]
g = MCTSGame(path)
g.learn(checkpoint_path=path)
elif (len(sys.argv) == 3 or len(sys.argv) == 4) and sys.argv[1] == 'play':
if len(sys.argv) == 3 and sys.argv[2] == 'opt':
path = ''
game_type = OPT_GAME
elif len(sys.argv) == 3 and sys.argv[2] == 'random':
path = ''
game_type = RAND_GAME
elif len(sys.argv) == 3 and sys.argv[2] == 'normal':
path = ''
game_type = NORMAL_GAME
else:
path = sys.argv[3]
game_type = MCTS_GAME
num_thread = param.NUM_THREAD
num_game = param.GAME_PER_THREAD
lock = threading.Lock()
sum_balance = Value('i', 0)
sum_step = Value('i', 0)
threads = [GameThread(i, path, num_game, game_type, sum_balance, sum_step, lock) for i in range(0, num_thread)]
for t in threads:
t.start()
for t in threads:
t.join()
print('Average balance ', 1.0 * sum_balance.value / num_game / num_thread,
', average step: ', 1.0 * sum_step.value / num_game / num_thread)
else:
print('Wrong argument. The input command should be in the format of "game.py [learn|play [opt|mcts]]"')
| 28.02963
| 113
| 0.658298
|
cd4920bddee20ebc570a427fbcd907c541631dc2
| 1,767
|
py
|
Python
|
ps10_test.py
|
heinerbehrends/OCW-MIT-6.00
|
26c492b1ca2efdf5a6f9c004f78e94fe55976b26
|
[
"bzip2-1.0.6"
] | null | null | null |
ps10_test.py
|
heinerbehrends/OCW-MIT-6.00
|
26c492b1ca2efdf5a6f9c004f78e94fe55976b26
|
[
"bzip2-1.0.6"
] | null | null | null |
ps10_test.py
|
heinerbehrends/OCW-MIT-6.00
|
26c492b1ca2efdf5a6f9c004f78e94fe55976b26
|
[
"bzip2-1.0.6"
] | null | null | null |
import ps10; reload(ps10)
from ps10 import *
def isClose(float1, float2):
"""
Helper function - are two floating point values close?
"""
return abs(float1 - float2) < .01
def testResult(boolean):
"""
Helper function - print 'Test Failed' if boolean is false, 'Test
Succeeded' otherwise.
"""
if boolean:
print 'Test Succeeded'
else:
print 'Test Failed'
def testHand():
"""
Test the hand class. Add your own test cases
"""
h = Hand(8, {'a':3, 'b':2, 'd':3})
h.update('bad')
testResult(h.containsLetters('aabdd') and not h.isEmpty())
h.update('dad')
testResult(h.containsLetters('ab') or not h.isEmpty())
h.update('ab')
testResult(h.isEmpty())
def testPlayer():
"""
Test the Player class. Add your own test cases.
"""
p = Player(1, Hand(6, {'c':1, 'a':1, 'b':1 ,'d':1, 'o':1, 'e':1}))
testResult(type(p.getHand()) == Hand)
p.addPoints(5.)
p.addPoints(12.)
testResult(isClose(p.getPoints(), 17))
def testComputerPlayer():
"""
Test the ComputerPlayer class. Add your own test cases.
"""
wordlist = Wordlist()
p = ComputerPlayer(1, Hand(6, {'c':1, 'a':1, 'b':1 ,'d':1, 'o':1, 'e':1}))
testResult(getWordScore(p.pickBestWord(wordlist)) == getWordScore('abode'))
def testAll():
"""
Run all Tests
"""
print "Uncomment the tests in this file as you complete each problem."
print 'PROBLEM 2 -----------------------------------------'
testHand()
print 'PROBLEM 3 -----------------------------------------'
testPlayer()
print 'PROBLEM 4 -----------------------------------------'
testComputerPlayer()
testAll()
| 26.373134
| 80
| 0.52858
|
ca284a622c902371c1fda911aefe871574ab21ab
| 22,479
|
py
|
Python
|
google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1.types import annotation_spec
from google.cloud.aiplatform_v1.types import dataset
from google.cloud.aiplatform_v1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1.types import dataset_service
from google.longrunning import operations_pb2 # type: ignore
from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO
class DatasetServiceGrpcTransport(DatasetServiceTransport):
"""gRPC backend transport for DatasetService.
The service that handles the CRUD of Vertex AI Dataset and
its child resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_dataset(
self,
) -> Callable[[dataset_service.CreateDatasetRequest], operations_pb2.Operation]:
r"""Return a callable for the create dataset method over gRPC.
Creates a Dataset.
Returns:
Callable[[~.CreateDatasetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_dataset" not in self._stubs:
self._stubs["create_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/CreateDataset",
request_serializer=dataset_service.CreateDatasetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_dataset"]
@property
def get_dataset(
self,
) -> Callable[[dataset_service.GetDatasetRequest], dataset.Dataset]:
r"""Return a callable for the get dataset method over gRPC.
Gets a Dataset.
Returns:
Callable[[~.GetDatasetRequest],
~.Dataset]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_dataset" not in self._stubs:
self._stubs["get_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/GetDataset",
request_serializer=dataset_service.GetDatasetRequest.serialize,
response_deserializer=dataset.Dataset.deserialize,
)
return self._stubs["get_dataset"]
@property
def update_dataset(
self,
) -> Callable[[dataset_service.UpdateDatasetRequest], gca_dataset.Dataset]:
r"""Return a callable for the update dataset method over gRPC.
Updates a Dataset.
Returns:
Callable[[~.UpdateDatasetRequest],
~.Dataset]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_dataset" not in self._stubs:
self._stubs["update_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/UpdateDataset",
request_serializer=dataset_service.UpdateDatasetRequest.serialize,
response_deserializer=gca_dataset.Dataset.deserialize,
)
return self._stubs["update_dataset"]
@property
def list_datasets(
self,
) -> Callable[
[dataset_service.ListDatasetsRequest], dataset_service.ListDatasetsResponse
]:
r"""Return a callable for the list datasets method over gRPC.
Lists Datasets in a Location.
Returns:
Callable[[~.ListDatasetsRequest],
~.ListDatasetsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_datasets" not in self._stubs:
self._stubs["list_datasets"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/ListDatasets",
request_serializer=dataset_service.ListDatasetsRequest.serialize,
response_deserializer=dataset_service.ListDatasetsResponse.deserialize,
)
return self._stubs["list_datasets"]
@property
def delete_dataset(
self,
) -> Callable[[dataset_service.DeleteDatasetRequest], operations_pb2.Operation]:
r"""Return a callable for the delete dataset method over gRPC.
Deletes a Dataset.
Returns:
Callable[[~.DeleteDatasetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_dataset" not in self._stubs:
self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/DeleteDataset",
request_serializer=dataset_service.DeleteDatasetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_dataset"]
@property
def import_data(
self,
) -> Callable[[dataset_service.ImportDataRequest], operations_pb2.Operation]:
r"""Return a callable for the import data method over gRPC.
Imports data into a Dataset.
Returns:
Callable[[~.ImportDataRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_data" not in self._stubs:
self._stubs["import_data"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/ImportData",
request_serializer=dataset_service.ImportDataRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_data"]
@property
def export_data(
self,
) -> Callable[[dataset_service.ExportDataRequest], operations_pb2.Operation]:
r"""Return a callable for the export data method over gRPC.
Exports data from a Dataset.
Returns:
Callable[[~.ExportDataRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_data" not in self._stubs:
self._stubs["export_data"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/ExportData",
request_serializer=dataset_service.ExportDataRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_data"]
@property
def list_data_items(
self,
) -> Callable[
[dataset_service.ListDataItemsRequest], dataset_service.ListDataItemsResponse
]:
r"""Return a callable for the list data items method over gRPC.
Lists DataItems in a Dataset.
Returns:
Callable[[~.ListDataItemsRequest],
~.ListDataItemsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_data_items" not in self._stubs:
self._stubs["list_data_items"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/ListDataItems",
request_serializer=dataset_service.ListDataItemsRequest.serialize,
response_deserializer=dataset_service.ListDataItemsResponse.deserialize,
)
return self._stubs["list_data_items"]
@property
def get_annotation_spec(
self,
) -> Callable[
[dataset_service.GetAnnotationSpecRequest], annotation_spec.AnnotationSpec
]:
r"""Return a callable for the get annotation spec method over gRPC.
Gets an AnnotationSpec.
Returns:
Callable[[~.GetAnnotationSpecRequest],
~.AnnotationSpec]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_annotation_spec" not in self._stubs:
self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec",
request_serializer=dataset_service.GetAnnotationSpecRequest.serialize,
response_deserializer=annotation_spec.AnnotationSpec.deserialize,
)
return self._stubs["get_annotation_spec"]
@property
def list_annotations(
self,
) -> Callable[
[dataset_service.ListAnnotationsRequest],
dataset_service.ListAnnotationsResponse,
]:
r"""Return a callable for the list annotations method over gRPC.
Lists Annotations belongs to a dataitem
Returns:
Callable[[~.ListAnnotationsRequest],
~.ListAnnotationsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_annotations" not in self._stubs:
self._stubs["list_annotations"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.DatasetService/ListAnnotations",
request_serializer=dataset_service.ListAnnotationsRequest.serialize,
response_deserializer=dataset_service.ListAnnotationsResponse.deserialize,
)
return self._stubs["list_annotations"]
def close(self):
self.grpc_channel.close()
@property
def kind(self) -> str:
return "grpc"
__all__ = ("DatasetServiceGrpcTransport",)
| 42.493384
| 90
| 0.634904
|
09c0f612258b9904317ec39bd44b5580c3914b69
| 23,887
|
py
|
Python
|
salt/modules/grains.py
|
GLaN1K/salt
|
ec1a907465c2d6dff126b747a52035e19b9a105b
|
[
"Apache-2.0"
] | 1
|
2021-02-26T07:37:19.000Z
|
2021-02-26T07:37:19.000Z
|
salt/modules/grains.py
|
GLaN1K/salt
|
ec1a907465c2d6dff126b747a52035e19b9a105b
|
[
"Apache-2.0"
] | 4
|
2021-02-06T14:30:48.000Z
|
2021-12-13T20:50:10.000Z
|
salt/modules/grains.py
|
GLaN1K/salt
|
ec1a907465c2d6dff126b747a52035e19b9a105b
|
[
"Apache-2.0"
] | 1
|
2021-05-10T13:59:33.000Z
|
2021-05-10T13:59:33.000Z
|
"""
Return/control aspects of the grains data
Grains set or altered with this module are stored in the 'grains'
file on the minions. By default, this file is located at: ``/etc/salt/grains``
.. Note::
This does **NOT** override any grains set in the minion config file.
"""
import collections
import logging
import math
import operator
import os
import random
from collections.abc import Mapping
from functools import reduce # pylint: disable=redefined-builtin
import salt.utils.compat
import salt.utils.data
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.yaml
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import SaltException
__proxyenabled__ = ["*"]
# Seed the grains dict so cython will build
__grains__ = {}
# Change the default outputter to make it more readable
__outputter__ = {
"items": "nested",
"item": "nested",
"setval": "nested",
}
# http://stackoverflow.com/a/12414913/127816
_infinitedict = lambda: collections.defaultdict(_infinitedict)
_non_existent_key = "NonExistentValueMagicNumberSpK3hnufdHfeBUXCfqVK"
log = logging.getLogger(__name__)
def _serial_sanitizer(instr):
"""Replaces the last 1/4 of a string with X's"""
length = len(instr)
index = int(math.floor(length * 0.75))
return "{}{}".format(instr[:index], "X" * (length - index))
_FQDN_SANITIZER = lambda x: "MINION.DOMAINNAME"
_HOSTNAME_SANITIZER = lambda x: "MINION"
_DOMAINNAME_SANITIZER = lambda x: "DOMAINNAME"
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
"serialnumber": _serial_sanitizer,
"domain": _DOMAINNAME_SANITIZER,
"fqdn": _FQDN_SANITIZER,
"id": _FQDN_SANITIZER,
"host": _HOSTNAME_SANITIZER,
"localhost": _HOSTNAME_SANITIZER,
"nodename": _HOSTNAME_SANITIZER,
}
def get(key, default="", delimiter=DEFAULT_TARGET_DELIM, ordered=True):
"""
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in grains looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
:param delimiter:
Specify an alternate delimiter to use when traversing a nested dict.
This is useful for when the desired key contains a colon. See CLI
example below for usage.
.. versionadded:: 2014.7.0
:param ordered:
Outputs an ordered dict if applicable (default: True)
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' grains.get pkg:apache
salt '*' grains.get abc::def|ghi delimiter='|'
"""
if ordered is True:
grains = __grains__
else:
grains = salt.utils.json.loads(salt.utils.json.dumps(__grains__))
return salt.utils.data.traverse_dict_and_list(grains, key, default, delimiter)
def has_value(key):
"""
Determine whether a key exists in the grains dictionary.
Given a grains dictionary that contains the following structure::
{'pkg': {'apache': 'httpd'}}
One would determine if the apache key in the pkg dict exists by::
pkg:apache
CLI Example:
.. code-block:: bash
salt '*' grains.has_value pkg:apache
"""
return (
salt.utils.data.traverse_dict_and_list(__grains__, key, KeyError)
is not KeyError
)
def items(sanitize=False):
"""
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
"""
if salt.utils.data.is_true(sanitize):
out = dict(__grains__)
for key, func in _SANITIZERS.items():
if key in out:
out[key] = func(out[key])
return out
else:
return dict(__grains__)
def item(*args, **kwargs):
"""
Return one or more grains
CLI Example:
.. code-block:: bash
salt '*' grains.item os
salt '*' grains.item os osrelease oscodename
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.item host sanitize=True
"""
ret = {}
default = kwargs.get("default", "")
delimiter = kwargs.get("delimiter", DEFAULT_TARGET_DELIM)
try:
for arg in args:
ret[arg] = salt.utils.data.traverse_dict_and_list(
__grains__, arg, default, delimiter
)
except KeyError:
pass
if salt.utils.data.is_true(kwargs.get("sanitize")):
for arg, func in _SANITIZERS.items():
if arg in ret:
ret[arg] = func(ret[arg])
return ret
def setvals(grains, destructive=False, refresh_pillar=True):
"""
Set new grains values in the grains config file
destructive
If an operation results in a key being removed, delete the key, too.
Defaults to False.
refresh_pillar
Whether pillar will be refreshed.
Defaults to True.
CLI Example:
.. code-block:: bash
salt '*' grains.setvals "{'key1': 'val1', 'key2': 'val2'}"
"""
new_grains = grains
if not isinstance(new_grains, Mapping):
raise SaltException("setvals grains must be a dictionary.")
grains = {}
if os.path.isfile(__opts__["conf_file"]):
if salt.utils.platform.is_proxy():
gfn = os.path.join(
os.path.dirname(__opts__["conf_file"]),
"proxy.d",
__opts__["id"],
"grains",
)
else:
gfn = os.path.join(os.path.dirname(__opts__["conf_file"]), "grains")
elif os.path.isdir(__opts__["conf_file"]):
if salt.utils.platform.is_proxy():
gfn = os.path.join(
__opts__["conf_file"], "proxy.d", __opts__["id"], "grains"
)
else:
gfn = os.path.join(__opts__["conf_file"], "grains")
else:
if salt.utils.platform.is_proxy():
gfn = os.path.join(
os.path.dirname(__opts__["conf_file"]),
"proxy.d",
__opts__["id"],
"grains",
)
else:
gfn = os.path.join(os.path.dirname(__opts__["conf_file"]), "grains")
if os.path.isfile(gfn):
with salt.utils.files.fopen(gfn, "rb") as fp_:
try:
grains = salt.utils.yaml.safe_load(fp_)
except salt.utils.yaml.YAMLError as exc:
return "Unable to read existing grains file: {}".format(exc)
if not isinstance(grains, dict):
grains = {}
for key, val in new_grains.items():
if val is None and destructive is True:
if key in grains:
del grains[key]
if key in __grains__:
del __grains__[key]
else:
grains[key] = val
__grains__[key] = val
try:
with salt.utils.files.fopen(gfn, "w+", encoding="utf-8") as fp_:
salt.utils.yaml.safe_dump(grains, fp_, default_flow_style=False)
except OSError:
log.error("Unable to write to grains file at %s. Check permissions.", gfn)
fn_ = os.path.join(__opts__["cachedir"], "module_refresh")
try:
with salt.utils.files.flopen(fn_, "w+"):
pass
except OSError:
log.error("Unable to write to cache file %s. Check permissions.", fn_)
if not __opts__.get("local", False):
# Refresh the grains
__salt__["saltutil.refresh_grains"](refresh_pillar=refresh_pillar)
# Return the grains we just set to confirm everything was OK
return new_grains
def setval(key, val, destructive=False, refresh_pillar=True):
"""
Set a grains value in the grains config file
key
The grain key to be set.
val
The value to set the grain key to.
destructive
If an operation results in a key being removed, delete the key, too.
Defaults to False.
refresh_pillar
Whether pillar will be refreshed.
Defaults to True.
CLI Example:
.. code-block:: bash
salt '*' grains.setval key val
salt '*' grains.setval key "{'sub-key': 'val', 'sub-key2': 'val2'}"
"""
return setvals({key: val}, destructive, refresh_pillar=refresh_pillar)
def append(key, val, convert=False, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 0.17.0
Append a value to a list in the grains config file. If the grain doesn't
exist, the grain key is added and the value is appended to the new grain
as a list item.
key
The grain key to be appended to
val
The value to append to the grain key
convert
If convert is True, convert non-list contents into a list.
If convert is False and the grain contains non-list contents, an error
is given. Defaults to False.
delimiter
The key can be a nested dict key. Use this parameter to
specify the delimiter you use, instead of the default ``:``.
You can now append values to a list in nested dictionary grains. If the
list doesn't exist at this level, it will be created.
.. versionadded:: 2014.7.6
CLI Example:
.. code-block:: bash
salt '*' grains.append key val
"""
grains = get(key, [], delimiter)
if convert:
if not isinstance(grains, list):
grains = [] if grains is None else [grains]
if not isinstance(grains, list):
return "The key {} is not a valid list".format(key)
if val in grains:
return "The val {} was already in the list {}".format(val, key)
if isinstance(val, list):
for item in val:
grains.append(item)
else:
grains.append(val)
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_grain = get(key, _infinitedict(), delimiter)
if isinstance(_grain, dict):
_grain.update({rest: grains})
grains = _grain
return setval(key, grains)
def remove(key, val, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 0.17.0
Remove a value from a list in the grains config file
key
The grain key to remove.
val
The value to remove.
delimiter
The key can be a nested dict key. Use this parameter to
specify the delimiter you use, instead of the default ``:``.
You can now append values to a list in nested dictionary grains. If the
list doesn't exist at this level, it will be created.
.. versionadded:: 2015.8.2
CLI Example:
.. code-block:: bash
salt '*' grains.remove key val
"""
grains = get(key, [], delimiter)
if not isinstance(grains, list):
return "The key {} is not a valid list".format(key)
if val not in grains:
return "The val {} was not in the list {}".format(val, key)
grains.remove(val)
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_grain = get(key, None, delimiter)
if isinstance(_grain, dict):
_grain.update({rest: grains})
grains = _grain
return setval(key, grains)
def delkey(key, force=False):
"""
.. versionadded:: 2017.7.0
Remove a grain completely from the grain system, this will remove the
grain key and value
key
The grain key from which to delete the value.
force
Force remove the grain even when it is a mapped value.
Defaults to False
CLI Example:
.. code-block:: bash
salt '*' grains.delkey key
"""
return delval(key, destructive=True, force=force)
def delval(key, destructive=False, force=False):
"""
.. versionadded:: 0.17.0
Delete a grain value from the grains config file. This will just set the
grain value to ``None``. To completely remove the grain, run ``grains.delkey``
or pass ``destructive=True`` to ``grains.delval``.
key
The grain key from which to delete the value.
destructive
Delete the key, too. Defaults to False.
force
Force remove the grain even when it is a mapped value.
Defaults to False
CLI Example:
.. code-block:: bash
salt '*' grains.delval key
"""
return set(key, None, destructive=destructive, force=force)
def ls(): # pylint: disable=C0103
"""
Return a list of all available grains
CLI Example:
.. code-block:: bash
salt '*' grains.ls
"""
return sorted(__grains__)
def filter_by(lookup_dict, grain="os_family", merge=None, default="default", base=None):
"""
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}, default='Debian') %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
.. versionchanged:: 2016.11.0
The dictionary key could be a globbing pattern. The function will
return the corresponding ``lookup_dict`` value where grain value
matches the pattern. For example:
.. code-block:: bash
# this will render 'got some salt' if Minion ID begins from 'salt'
salt '*' grains.filter_by '{salt*: got some salt, default: salt is not here}' id
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
.. versionchanged:: 2016.11.0
The grain value could be a list. The function will return the
``lookup_dict`` value for a first found item in the list matching
one of the ``lookup_dict`` keys.
:param merge: A dictionary to merge with the results of the grain selection
from ``lookup_dict``. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict. If unspecified
the value is "default".
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is unset.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F, G: H}}}' 'xxx' '{D: {E: I}, J: K}' 'C'
# next one renders {A: {B: G}, D: J}
salt '*' grains.filter_by '{default: {A: {B: C}, D: E}, F: {A: {B: G}}, H: {D: I}}' 'xxx' '{D: J}' 'F' 'default'
# next same as above when default='H' instead of 'F' renders {A: {B: C}, D: J}
"""
return salt.utils.data.filter_by(
lookup_dict=lookup_dict,
lookup=grain,
traverse=__grains__,
merge=merge,
default=default,
base=base,
)
def _dict_from_path(path, val, delimiter=DEFAULT_TARGET_DELIM):
"""
Given a lookup string in the form of 'foo:bar:baz" return a nested
dictionary of the appropriate depth with the final segment as a value.
>>> _dict_from_path('foo:bar:baz', 'somevalue')
{"foo": {"bar": {"baz": "somevalue"}}
"""
nested_dict = _infinitedict()
keys = path.rsplit(delimiter)
lastplace = reduce(operator.getitem, keys[:-1], nested_dict)
lastplace[keys[-1]] = val
return nested_dict
def get_or_set_hash(
name, length=8, chars="abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
):
"""
Perform a one-time generation of a hash and write it to the local grains.
If that grain has already been set return the value instead.
This is useful for generating passwords or keys that are specific to a
single minion that don't need to be stored somewhere centrally.
State Example:
.. code-block:: yaml
some_mysql_user:
mysql_user:
- present
- host: localhost
- password: {{ salt['grains.get_or_set_hash']('mysql:some_mysql_user') }}
CLI Example:
.. code-block:: bash
salt '*' grains.get_or_set_hash 'django:SECRET_KEY' 50
.. warning::
This function could return strings which may contain characters which are reserved
as directives by the YAML parser, such as strings beginning with ``%``. To avoid
issues when using the output of this function in an SLS file containing YAML+Jinja,
surround the call with single quotes.
"""
ret = get(name, None)
if ret is None:
val = "".join([random.SystemRandom().choice(chars) for _ in range(length)])
if DEFAULT_TARGET_DELIM in name:
root, rest = name.split(DEFAULT_TARGET_DELIM, 1)
curr = get(root, _infinitedict())
val = _dict_from_path(rest, val)
curr.update(val)
setval(root, curr)
else:
setval(name, val)
return get(name)
def set(key, val="", force=False, destructive=False, delimiter=DEFAULT_TARGET_DELIM):
"""
Set a key to an arbitrary value. It is used like setval but works
with nested keys.
This function is conservative. It will only overwrite an entry if
its value and the given one are not a list or a dict. The ``force``
parameter is used to allow overwriting in all cases.
.. versionadded:: 2015.8.0
:param force: Force writing over existing entry if given or existing
values are list or dict. Defaults to False.
:param destructive: If an operation results in a key being removed,
delete the key, too. Defaults to False.
:param delimiter:
Specify an alternate delimiter to use when traversing a nested dict,
the default being ``:``
CLI Example:
.. code-block:: bash
salt '*' grains.set 'apps:myApp:port' 2209
salt '*' grains.set 'apps:myApp' '{port: 2209}'
"""
ret = {"comment": "", "changes": {}, "result": True}
# Get val type
_new_value_type = "simple"
if isinstance(val, dict):
_new_value_type = "complex"
elif isinstance(val, list):
_new_value_type = "complex"
_non_existent = object()
_existing_value = get(key, _non_existent, delimiter)
_value = _existing_value
_existing_value_type = "simple"
if _existing_value is _non_existent:
_existing_value_type = None
elif isinstance(_existing_value, dict):
_existing_value_type = "complex"
elif isinstance(_existing_value, list):
_existing_value_type = "complex"
if (
_existing_value_type is not None
and _existing_value == val
and (val is not None or destructive is not True)
):
ret["comment"] = "Grain is already set"
return ret
if _existing_value is not None and not force:
if _existing_value_type == "complex":
ret["comment"] = (
"The key '{}' exists but is a dict or a list. "
"Use 'force=True' to overwrite.".format(key)
)
ret["result"] = False
return ret
elif _new_value_type == "complex" and _existing_value_type is not None:
ret["comment"] = (
"The key '{}' exists and the given value is a dict or a "
"list. Use 'force=True' to overwrite.".format(key)
)
ret["result"] = False
return ret
else:
_value = val
else:
_value = val
# Process nested grains
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_existing_value = get(key, {}, delimiter)
if isinstance(_existing_value, dict):
if _value is None and destructive:
if rest in _existing_value.keys():
_existing_value.pop(rest)
else:
_existing_value.update({rest: _value})
elif isinstance(_existing_value, list):
_list_updated = False
for _index, _item in enumerate(_existing_value):
if _item == rest:
_existing_value[_index] = {rest: _value}
_list_updated = True
elif isinstance(_item, dict) and rest in _item:
_item.update({rest: _value})
_list_updated = True
if not _list_updated:
_existing_value.append({rest: _value})
elif _existing_value == rest or force:
_existing_value = {rest: _value}
else:
ret["comment"] = (
"The key '{}' value is '{}', which is different from "
"the provided key '{}'. Use 'force=True' to overwrite.".format(
key, _existing_value, rest
)
)
ret["result"] = False
return ret
_value = _existing_value
_setval_ret = setval(key, _value, destructive=destructive)
if isinstance(_setval_ret, dict):
ret["changes"] = _setval_ret
else:
ret["comment"] = _setval_ret
ret["result"] = False
return ret
def equals(key, value):
"""
Used to make sure the minion's grain key/value matches.
Returns ``True`` if matches otherwise ``False``.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' grains.equals fqdn <expected_fqdn>
salt '*' grains.equals systemd:version 219
"""
return str(value) == str(get(key))
# Provide a jinja function call compatible get aliased as fetch
fetch = get
| 29.933584
| 120
| 0.613179
|
2aec34cd567069a9f1f6be03b1dc7733f6ee2738
| 4,215
|
py
|
Python
|
pyscf/pbc/gw/kgw_slow_supercell.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 501
|
2018-12-06T23:48:17.000Z
|
2022-03-31T11:53:18.000Z
|
pyscf/pbc/gw/kgw_slow_supercell.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 710
|
2018-11-26T22:04:52.000Z
|
2022-03-30T03:53:12.000Z
|
pyscf/pbc/gw/kgw_slow_supercell.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 273
|
2018-11-26T10:10:24.000Z
|
2022-03-30T12:25:28.000Z
|
"""
This module implements the G0W0 approximation on top of `pyscf.tdscf.rhf_slow` TDHF implementation. Unlike `gw.py`, all
integrals are stored in memory. Several variants of GW are available:
* `pyscf.gw_slow`: the molecular implementation;
* `pyscf.pbc.gw.gw_slow`: single-kpoint PBC (periodic boundary condition) implementation;
* (this module) `pyscf.pbc.gw.kgw_slow_supercell`: a supercell approach to PBC implementation with multiple k-points.
Runs the molecular code for a model with several k-points for the cost of discarding momentum conservation and using
dense instead of sparse matrixes;
* `pyscf.pbc.gw.kgw_slow`: a PBC implementation with multiple k-points;
"""
from pyscf.gw import gw_slow
from pyscf.lib import einsum
import numpy
# Convention for these modules:
# * IMDS contains routines for intermediates
# * kernel finds GW roots
# * GW provides a container
def corrected_moe(eri, k, p):
"""
Calculates the corrected orbital energy.
Args:
eri (PhysERI): a container with electron repulsion integrals;
k (int): the k-point index;
p (int): orbital;
Returns:
The corrected orbital energy.
"""
moe = eri.mo_energy[k][p]
moc = eri.mo_coeff[k][:, p]
nk = len(eri.mo_energy)
vk = 0
for k2 in range(nk):
vk -= eri.ao2mo_k((
moc[:, numpy.newaxis],
eri.mo_coeff_full[k2][:, :eri.nocc_full[k2]],
eri.mo_coeff_full[k2][:, :eri.nocc_full[k2]],
moc[:, numpy.newaxis],
), (k, k2, k2, k)).squeeze().trace().real
vk /= nk
mf = eri.model
v_mf = mf.get_veff()[k] - mf.get_j()[k]
v_mf = einsum("i,ij,j", moc.conj(), v_mf, moc).real
return moe + vk - v_mf
class IMDS(gw_slow.IMDS):
orb_dims = 2
def __init__(self, td, eri=None):
"""
GW intermediates (k-version/supercell).
Args:
td: a container with TD solution;
eri: a container with electron repulsion integrals;
"""
gw_slow.AbstractIMDS.__init__(self, td, eri=eri)
self.nk = len(self.td._scf.mo_energy)
# MF
self.nocc = sum(self.eri.nocc)
self.o = numpy.concatenate(tuple(e[:nocc] for e, nocc in zip(self.eri.mo_energy, self.eri.nocc)))
self.v = numpy.concatenate(tuple(e[nocc:] for e, nocc in zip(self.eri.mo_energy, self.eri.nocc)))
# TD
nroots, _, k1, k2, o, v = self.td.xy.shape
self.td_xy = self.td.xy.transpose(0, 1, 2, 4, 3, 5).reshape(nroots, 2, k1*o, k2*v)
self.td_e = self.td.e
self.tdm = self.construct_tdm()
def eri_ov(self, item):
result = []
k = numpy.arange(self.nk)
for k1 in k:
result.append([])
for k2 in k:
result[-1].append([])
for k3 in k:
result[-1][-1].append([])
for k4 in k:
result[-1][-1][-1].append(self.eri.eri_ov(item, (k1, k2, k3, k4)))
r = numpy.block(result)
return r / len(k)
__getitem__ = eri_ov
def __plain_index__(self, p, spec=True):
k, kp = p
if kp < self.eri.nocc[k]:
x = sum(self.eri.nocc[:k]) + kp
if spec:
return "o", x
else:
return x
else:
kp -= self.eri.nocc[k]
x = sum(self.eri.nmo[:k]) - sum(self.eri.nocc[:k]) + kp
if spec:
return "v", x
else:
return x + self.nocc
def get_rhs(self, p, components=False):
k, kp = p
# return self.eri.mo_energy[k][kp]
return corrected_moe(self.eri, k, kp)
def get_sigma_element(self, omega, p, eta, vir_sgn=1):
return super(IMDS, self).get_sigma_element(omega, self.__plain_index__(p, spec=False), eta, vir_sgn=vir_sgn)
def initial_guess(self, p):
k, kp = p
return self.eri.mo_energy[k][kp]
@property
def entire_space(self):
assert all(i == self.eri.nmo[0] for i in self.eri.nmo)
return [numpy.arange(self.nk), numpy.arange(self.eri.nmo[0])]
kernel = gw_slow.kernel
class GW(gw_slow.GW):
base_imds = IMDS
| 30.992647
| 119
| 0.581495
|
82dea2983f28f702269e9f6b8270797ffc211a86
| 332
|
py
|
Python
|
src/netbox_example/management/commands/show_examples.py
|
steffann/netbox-example-plugin
|
9f1c22ae60bb21e7d4685219eb76687c62f0161e
|
[
"Apache-2.0"
] | 4
|
2020-01-19T04:14:07.000Z
|
2020-06-03T17:36:19.000Z
|
src/netbox_example/management/commands/show_examples.py
|
steffann/netbox-example-plugin
|
9f1c22ae60bb21e7d4685219eb76687c62f0161e
|
[
"Apache-2.0"
] | null | null | null |
src/netbox_example/management/commands/show_examples.py
|
steffann/netbox-example-plugin
|
9f1c22ae60bb21e7d4685219eb76687c62f0161e
|
[
"Apache-2.0"
] | 1
|
2020-09-17T17:02:43.000Z
|
2020-09-17T17:02:43.000Z
|
from django.core.management.base import BaseCommand
from netbox_example.models import Example
class Command(BaseCommand):
def handle(self, *args, **options):
for example in Example.objects.select_related('device').all():
self.stdout.write("{example.device.name} - {example.name}".format(example=example))
| 33.2
| 95
| 0.725904
|
b7f70483085c102840ece73347b62be69ebe6d21
| 555
|
py
|
Python
|
automate the boring stuff with python/json_handle/quickWeather.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
automate the boring stuff with python/json_handle/quickWeather.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
automate the boring stuff with python/json_handle/quickWeather.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import json, requests, sys
# Compute location from command line arguments
if len(sys.argv) < 2:
print('Usage: quickWeather.py location')
sys.exit()
location = ' '.join(sys.argv[1:])
# Download the json data from openweathermap.org's api
url = 'http://openweathermap.org/data/2.5/forcecast/daily?q=%s&cnt=3' % (location)
response = requests.get(url)
response.raise_for_status()
with open('response.txt', 'wb') as writer:
writer.write(response.text)
# Load json data into a python variable
| 25.227273
| 82
| 0.704505
|
7c5250c5995690565a79f14a8c34072e7ea1110f
| 1,159
|
py
|
Python
|
tests/test_readers_writers/test_write_csv.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
tests/test_readers_writers/test_write_csv.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
tests/test_readers_writers/test_write_csv.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
import pytest
import spectrochempy as scp
def test_write_csv(IR_dataset_2D):
# 1D dataset without coords
ds = scp.NDDataset([1, 2, 3])
f = ds.write_csv("myfile.csv", confirm=False)
assert f.name == "myfile.csv"
f.unlink()
f = ds.write_csv("myfile", confirm=False)
assert f.name == "myfile.csv"
f.unlink()
# 1D dataset with coords
ds = IR_dataset_2D[0]
f = ds.write_csv("myfile.csv", confirm=False)
assert f.name == "myfile.csv"
f.unlink()
# 2D dataset with coords
ds = IR_dataset_2D
with pytest.raises(NotImplementedError):
f = ds.write_csv("myfile.csv", confirm=False)
| 33.114286
| 120
| 0.49094
|
8ae4ee40e11daa121284287d4cdd3b2d87e3aca0
| 8,556
|
py
|
Python
|
index.py
|
ZhangShunHai/netease-cloud
|
b7410f771bbca5fe1f87bc8b6b85430261e46b78
|
[
"MIT"
] | 1
|
2020-09-04T08:19:09.000Z
|
2020-09-04T08:19:09.000Z
|
index.py
|
hai980915/netease-cloud
|
b7410f771bbca5fe1f87bc8b6b85430261e46b78
|
[
"MIT"
] | null | null | null |
index.py
|
hai980915/netease-cloud
|
b7410f771bbca5fe1f87bc8b6b85430261e46b78
|
[
"MIT"
] | 1
|
2020-11-15T08:58:06.000Z
|
2020-11-15T08:58:06.000Z
|
#coding:utf-8
'''
@author: ZainCheung
@LastEditors: ZainCheung
@description:网易云音乐全自动每日打卡云函数版
@Date: 2020-06-25 14:28:48
@LastEditTime: 2020-09-01 18:20:00
'''
from configparser import ConfigParser
from threading import Timer
import requests
import random
import hashlib
import datetime
import time
import json
import logging
import math
logger = logging.getLogger()
grade = [10,40,70,130,200,400,1000,3000,8000,20000]
api = ''
class Task(object):
'''
对象的构造函数
'''
def __init__(self, uin, pwd, sckey, countrycode=86):
self.uin = uin
self.pwd = pwd
self.countrycode = countrycode
self.sckey = sckey
'''
带上用户的cookie去发送数据
url:完整的URL路径
postJson:要以post方式发送的数据
返回response
'''
def getResponse(self, url, postJson):
response = requests.post(url, data=postJson, headers={'Content-Type':'application/x-www-form-urlencoded'},cookies=self.cookies)
return response
'''
登录
'''
def login(self):
data = {"uin":self.uin,"pwd":self.pwd,"countrycode":self.countrycode,"r":random.random()}
if '@' in self.uin:
url = api + '?do=email'
else:
url = api + '?do=login'
response = requests.post(url, data=data, headers={'Content-Type':'application/x-www-form-urlencoded'})
code = json.loads(response.text)['code']
self.name = json.loads(response.text)['profile']['nickname']
self.uid = json.loads(response.text)['account']['id']
if code==200:
self.error = ''
else:
self.error = '登录失败,请检查账号'
self.cookies = response.cookies.get_dict()
self.log('登录成功')
'''
每日签到
'''
def sign(self):
url = api + '?do=sign'
response = self.getResponse(url, {"r":random.random()})
data = json.loads(response.text)
if data['code'] == 200:
self.log('签到成功')
else:
self.log('重复签到')
'''
每日打卡300首歌
'''
def daka(self):
url = api + '?do=daka'
response = self.getResponse(url, {"r":random.random()})
self.log(response.text)
'''
查询用户详情
'''
def detail(self):
url = api + '?do=detail'
data = {"uid":self.uid, "r":random.random()}
response = self.getResponse(url, data)
data = json.loads(response.text)
self.level = data['level']
self.listenSongs = data['listenSongs']
self.log('获取用户详情成功')
'''
Server推送
'''
def server(self):
if self.sckey == '':
return
url = 'https://sc.ftqq.com/' + self.sckey + '.send'
self.diyText() # 构造发送内容
response = requests.get(url,params={"text":self.title, "desp":self.content})
data = json.loads(response.text)
if data['errno'] == 0:
self.log('用户:' + self.name + ' Server酱推送成功')
else:
self.log('用户:' + self.name + ' Server酱推送失败,请检查sckey是否正确')
'''
自定义要推送到微信的内容
title:消息的标题
content:消息的内容,支持MarkDown格式
'''
def diyText(self):
# today = datetime.date.today()
# kaoyan_day = datetime.date(2020,12,21) #2021考研党的末日
# date = (kaoyan_day - today).days
one = requests.get('https://api.qinor.cn/soup/').text # 每日一句的api
for count in grade:
if self.level < 10:
if self.listenSongs < 20000:
if self.listenSongs < count:
self.tip = '还需听歌' + str(count-self.listenSongs) + '首即可升级'
break
else:
self.tip = '你已经听够20000首歌曲,如果登录天数达到800天即可满级'
else:
self.tip = '恭喜你已经满级!'
if self.error == '':
state = ("- 目前已完成签到\n"
"- 今日共打卡" + str(self.dakanum) + "次\n"
"- 今日共播放" + str(self.dakaSongs) + "首歌\n"
"- 还需要打卡" + str(self.day) +"天")
self.title = ("网易云今日打卡" + str(self.dakaSongs) + "首,已播放" + str(self.listenSongs) + "首")
else:
state = self.error
self.title = '网易云听歌任务出现问题!'
self.content = (
"------\n"
"#### 账户信息\n"
"- 用户名称:" + str(self.name) + "\n"
"- 当前等级:" + str(self.level) + "级\n"
"- 累计播放:" + str(self.listenSongs) + "首\n"
"- 升级提示:" + self.tip + "\n\n"
"------\n"
"#### 任务状态\n" + str(state) + "\n\n"
"------\n"
"#### 打卡日志\n" + self.dakaSongs_list + "\n\n"
"------\n"
"#### 今日一句\n- " + one + "\n\n")
'''
打印日志
'''
def log(self, text):
time_stamp = datetime.datetime.now()
print(time_stamp.strftime('%Y.%m.%d-%H:%M:%S') + ' ' + str(text))
self.time =time_stamp.strftime('%H:%M:%S')
self.list.append("- [" + self.time + "] " + str(text) + "\n\n")
'''
开始执行
'''
def start(self):
try:
self.list = []
self.list.append("- 初始化完成\n\n")
self.login()
self.sign()
self.detail()
counter = self.listenSongs
for i in range(1,10):
self.daka()
#self.log('用户:' + self.name + ' 第' + str(i) + '次打卡成功,即将休眠10秒')
self.log('第' + str(i) + '次打卡成功,即将休眠10秒')
time.sleep(10)
self.dakanum = i
self.detail()
self.dakaSongs = self.listenSongs - counter
self.log('今日已打卡播放' + str(self.dakaSongs) + '首')
if self.dakaSongs == 300:
break
if self.listenSongs >= 20000:
self.day = 0
else:
self.day = math.ceil((20000 - self.listenSongs)/300)
self.list.append("- 打卡结束,消息推送\n\n")
self.dakaSongs_list = ''.join(self.list)
self.server()
except:
self.log('用户任务执行中断,请检查账号密码是否正确')
else:
self.log('用户:' + self.name + ' 今日任务已完成')
'''
初始化:读取配置,配置文件为init.config
返回字典类型的配置对象
'''
def init():
global api # 初始化时设置api
config = ConfigParser()
config.read('init.config', encoding='UTF-8-sig')
uin = config['token']['account']
pwd = config['token']['password']
countrycode = config['token']['countrycode']
api = config['setting']['api']
md5Switch = config.getboolean('setting','md5Switch')
peopleSwitch = config.getboolean('setting','peopleSwitch')
sckey = config['setting']['sckey']
logger.info('配置文件读取完毕')
conf = {
'uin': uin,
'pwd': pwd,
'countrycode': countrycode,
'api': api,
'md5Switch': md5Switch,
'peopleSwitch':peopleSwitch,
'sckey':sckey
}
return conf
'''
MD5加密
str:待加密字符
返回加密后的字符
'''
def md5(str):
hl = hashlib.md5()
hl.update(str.encode(encoding='utf-8'))
return hl.hexdigest()
'''
加载Json文件
jsonPath:json文件的名字,例如account.json
'''
def loadJson(jsonPath):
with open(jsonPath,encoding='utf-8') as f:
account = json.load(f)
return account
'''
检查api
'''
def check():
url = api + '?do=check'
respones = requests.get(url)
if respones.status_code == 200:
logger.info('api测试正常')
else:
logger.error('api测试异常')
'''
任务池
'''
def taskPool():
config = init()
check() # 每天对api做一次检查
if config['peopleSwitch'] is True:
logger.info('多人开关已打开,即将执行进行多人任务')
account = loadJson("account.json")
for man in account:
logger.info('账号: ' + man['account'] + ' 开始执行\n========================================')
task = Task(man['account'], man['password'], man['sckey'])
task.start()
time.sleep(10)
logger.info('所有账号已全部完成任务,服务进入休眠中,等待明天重新启动')
else :
logger.info('账号: ' + config['uin'] + ' 开始执行\n========================================')
if config['md5Switch'] is True:
logger.info('MD5开关已打开,即将开始为你加密,密码不会上传至服务器,请知悉')
config['pwd'] = md5(config['pwd'])
task = Task(config['uin'], config['pwd'], config['sckey'], config['countrycode'])
task.start()
'''
程序的入口
'''
def main(event,content):
taskPool()
| 30.021053
| 136
| 0.494974
|
df83f2ddd0b50aba0b4353c850260c82f80aae75
| 50,315
|
py
|
Python
|
sourcecode/chesssimul/board.py
|
Epichess/chess-simul
|
a37e9baa65bf4691dc2d56f6c662bb70a8e65061
|
[
"MIT"
] | null | null | null |
sourcecode/chesssimul/board.py
|
Epichess/chess-simul
|
a37e9baa65bf4691dc2d56f6c662bb70a8e65061
|
[
"MIT"
] | 1
|
2021-06-22T09:48:37.000Z
|
2021-06-22T09:49:14.000Z
|
sourcecode/chesssimul/board.py
|
Epichess/chess-simul
|
a37e9baa65bf4691dc2d56f6c662bb70a8e65061
|
[
"MIT"
] | null | null | null |
from square import Square
from piece import *
from move import Move
from typing import Callable
from log import *
#######################################
# create logger with 'spam_application'
logger = logging.getLogger("My_app")
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
#######################################
IndexToLine = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
IndexToColumn = ['8', '7', '6', '5', '4', '3', '2', '1']
Columns: dict[str, int] = {
'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7
}
Lines: dict[str, int] = {
'1': 7,
'2': 6,
'3': 5,
'4': 4,
'5': 3,
'6': 2,
'7': 1,
'8': 0
}
class Board:
board: list[list[Square]]
move_list: list[Move]
to_move: Color
can_white_queen_side_castle: bool
can_white_king_side_castle: bool
can_black_queen_side_castle: bool
can_black_king_side_castle: bool
en_passant_target_square: tuple[int, int] or None
halfmove_clock: int
fullmove_number: int
white_check: bool
black_check: bool
pos_piece_check: list()
list_piece_pin: list()
pin_color: list()
def __init__(self):
self.move_list = list()
def getSquare(self, square: str) -> Square:
square_list = list(square)
return self.board[Lines.get(square_list[1])][Columns.get(square_list[0])]
def setEnPassantTargetSquare(self, line: int, column: int):
self.en_passant_target_square = (line, column)
def init_board(self):
self.pin_color = list()
self.pos_piece_check = list()
self.list_piece_pin = list()
self.board = list()
self.to_move = Color.WHITE
self.can_black_king_side_castle = True
self.can_black_queen_side_castle = True
self.can_white_king_side_castle = True
self.can_white_queen_side_castle = True
self.en_passant_target_square = None
self.white_check = False
self.black_check = False
self.halfmove_clock = 0
self.fullmove_number = 0
for lin in range(8):
self.board.append(list())
for col in range(8):
self.board[lin].append(Square())
for col in range(8):
self.board[Lines.get('7')][col].piece = str_to_piece['p']
self.board[Lines.get('2')][col].piece = str_to_piece['P']
self.getSquare('a1').piece = str_to_piece['R']
self.getSquare('b1').piece = str_to_piece['N']
self.getSquare('c1').piece = str_to_piece['B']
self.getSquare('d1').piece = str_to_piece['Q']
self.getSquare('e1').piece = str_to_piece['K']
self.getSquare('f1').piece = str_to_piece['B']
self.getSquare('g1').piece = str_to_piece['N']
self.getSquare('h1').piece = str_to_piece['R']
self.getSquare('a8').piece = str_to_piece['r']
self.getSquare('b8').piece = str_to_piece['n']
self.getSquare('c8').piece = str_to_piece['b']
self.getSquare('d8').piece = str_to_piece['q']
self.getSquare('e8').piece = str_to_piece['k']
self.getSquare('f8').piece = str_to_piece['b']
self.getSquare('g8').piece = str_to_piece['n']
self.getSquare('h8').piece = str_to_piece['r']
def to_unicode(self) -> str:
board_string = ''
for line in range(8):
board_string += '|'
for col in range(8):
square = self.board[line][col]
if square.isEmpty():
board_string += ' '
else:
board_string += square.piece.to_unicode()
board_string += '|'
board_string += '\n'
return board_string
def __str__(self):
return self.to_unicode()
def move(self, lam: str):
lamlist = list(lam)
pawn_passant = ''
start_square: Square = self.board[Lines.get(lam[1])][Columns.get(lam[0])]
arrival_square: Square = self.board[Lines.get(lam[3])][Columns.get(lam[2])]
arrival_square.piece = start_square.piece
start_square.piece = None
def make_move(self, move: Move) -> bool:
is_move_valid: bool
move_switcher: dict[PieceType, Callable] = {
PieceType.PAWN: self.move_pawn,
PieceType.KNIGHT: self.move_knight,
PieceType.ROOK: self.move_rook,
PieceType.BISHOP: self.move_bishop,
PieceType.QUEEN: self.move_queen,
PieceType.KING: self.move_king
}
if 0 <= move.end[0] <= 7 and 0 <= move.end[1] <= 7:
# postion de la pièce avant son déplacement dans l'échiquier
start_square: Square = self.board[move.start[0]][move.start[1]]
# vérification qu'il y a une pièce à déplacer et qu'elle ne soit pas dans la liste list_piece_pin qui empêche l'échec de son roi
self.king_check()
if start_square.piece.color == self.to_move and (not start_square.isEmpty()) and (((move.end[0], move.end[1]) in self.list_piece_pin) or (not start_square.piece.color in self.pin_color)):
type_piece_start: PieceType = start_square.piece.kind
if start_square.piece.color == self.to_move:
if (type_piece_start.piece.color == Color.WHITE and not self.white_check) or (type_piece_start.piece.color == Color.BLACK and not self.black_check):
is_move_valid = move_switcher.get(type_piece_start)(move, False)
else:
print("Aucune pièce à déplacer ou pièce bloquée par pin")
is_move_valid = False
else:
print("déplacement d'une pièce en dehors de l'échiquier")
is_move_valid = False
if is_move_valid:
self.move_list.append(move)
if self.to_move == Color.WHITE:
self.to_move = Color.BLACK
else:
self.to_move = Color.WHITE
return is_move_valid
#def is_move_valid(self, move: Move) -> bool:
# is_move_valid: bool
# move.piece = self.board[move.start[0]][move.start[1]].piece
# move_switcher: dict[PieceType, Callable] = {
# PieceType.PAWN: self.move_pawn,
# PieceType.KNIGHT: self.move_knight,
# PieceType.ROOK: self.move_rook,
# PieceType.BISHOP: self.move_bishop,
# PieceType.QUEEN: self.move_queen,
# PieceType.KING: self.move_king
# }
#
# if 0 <= move.end[0] <= 7 and 0 <= move.end[1] <= 7:
# start_square: Square = self.board[move.start[0]][move.start[1]]
# if not (start_square.isEmpty()):
# type_piece_start: PieceType = start_square.piece.kind
# is_move_valid = move_switcher.get(type_piece_start)(move)
# else:
# print("No piece to move")
# is_move_valid = False
# else:
# print("Trying to move piece outside of board")
# is_move_valid = False
#
# return is_move_valid
def take_piece(self, move: Move, check: bool) -> bool:
start_square: Square = self.board[move.start[0]][move.start[1]]
end_square: Square = self.board[move.end[0]][move.end[1]]
if end_square.piece.color != start_square.piece.color:
if check is False:
self.board[move.end[0]][move.end[1]].piece = start_square.piece
self.board[move.start[0]][move.start[1]].piece = None
move.eat = True
print("piece taken")
if end_square.piece.kind == PieceType.KING:
if end_square.piece.color == Color.WHITE:
self.pos_piece_check.append((move.start[0], move.start[1]))
self.white_check = True
elif end_square.piece.color == Color.BLACK:
self.pos_piece_check.append((move.start[0], move.start[1]))
self.black_check = True
return True
else:
print("impossible to take piece of your own color")
return False
def move_piece(self, move: Move, check: bool) -> bool:
if check is False:
start_square: Square = self.board[move.start[0]][move.start[1]]
self.board[move.end[0]][move.end[1]].piece = start_square.piece
self.board[move.start[0]][move.start[1]].piece = None
print("piece moved")
return True
def queen_castle(self, move: Move):
start_square: Square = self.board[move.start[0]][move.start[1]]
end_square: Square = self.board[move.end[0]][move.end[1]]
start_line = move.start[0]
start_col = move.start[1]
rook_start_square: Square = self.board[start_line][start_col - 4]
rook_end_square: Square = self.board[start_line][start_col - 1]
if self.king_check():
rook_end_square.piece = rook_start_square.piece
rook_start_square.piece = None
end_square.piece = start_square.piece
start_square.piece = None
move.is_castled = True
move.castling = 'QUEEN_CASTLING'
print("Queen castling.")
return True
else:
print("Cannot queen castle because king will be in check.")
return False
def king_castle(self, move: Move):
start_square: Square = self.board[move.start[0]][move.start[1]]
end_square: Square = self.board[move.end[0]][move.end[1]]
start_line = move.start[0]
start_col = move.start[1]
rook_start_square: Square = self.board[start_line][start_col + 3]
rook_end_square: Square = self.board[start_line][start_col + 1]
if self.king_check():
rook_end_square.piece = rook_start_square.piece
rook_start_square.piece = None
end_square.piece = start_square.piece
start_square.piece = None
move.is_castled = True
move.castling = 'KING_CASTLING'
print("King castling.")
return True
else:
print("Cannot king castle because king will be in check.")
return False
def king_check(self) -> bool:
# réinitialisation de la liste des pièces qui mettent en echec le roi
self.pos_piece_check.clear()
self.list_piece_pin.clear()
self.pin_color.clear()
self.white_check = False
self.black_check = False
move_switcher: dict[PieceType, Callable] = {
PieceType.ROOK: self.move_rook,
PieceType.BISHOP: self.move_bishop,
PieceType.QUEEN: self.move_queen,
}
pos_roi_blanc: tuple = tuple()
pos_roi_noir: tuple = tuple()
# modélisation du board en matrice 8x8
model_board = list()
#initialisation de la matrice du board à 0
for i in range(8):
model_board.append([0] * 8)
coup_knight: list[int] = list([2, 1])
sign: list[int] = list([1, -1])
for lin in range(8):
for col in range(8):
if not self.board[lin][col].isEmpty():
type_piece_start: PieceType = self.board[lin][col].piece.kind
#test move khight
if self.board[lin][col].piece.kind == PieceType.KNIGHT:
for i in range(2):
for n in range(2):
if 0 <= lin + sign[n] * coup_knight[i] <= 7 and 0 <= col + sign[n] * list(reversed(coup_knight))[i] <= 7:
if self.move_knight(Move([lin, col], [lin + sign[n] * coup_knight[i], col + sign[n] * list(reversed(coup_knight))[i]]), True):
model_board[lin + sign[n] * coup_knight[i]][col + sign[n] * list(reversed(coup_knight))[i]] = 1
else:
print("déplacement hors de l'échiquier")
for i in range(2):
for n in range(2):
if 0 <= lin + list(reversed(sign))[n] * coup_knight[i] <= 7 and 0 <= col - list(reversed(sign))[n] * list(reversed(coup_knight))[i] <= 7:
if self.move_knight(Move([lin, col], [lin + list(reversed(sign))[n] * coup_knight[i], col - list(reversed(sign))[n] * list(reversed(coup_knight))[i]]), True):
model_board[lin + list(reversed(sign))[n] * coup_knight[i]][col - list(reversed(sign))[n] * list(reversed(coup_knight))[i]] = 1
else:
print("déplacement hors de l'échiquier")
# test move rook et dame pour ses déplacements en lignes
elif self.board[lin][col].piece.kind == PieceType.ROOK or self.board[lin][col].piece.kind == PieceType.QUEEN:
for i in range(2):
for j in range(8):
if 0 <= lin + sign[i] * j <= 7 and 0 <= col <= 7:
if move_switcher.get(type_piece_start)(Move([lin, col], [lin + sign[i] * j, col]), True):
model_board[lin + sign[i] * j][col] = 1
# test si il y a une pièce qui empêche le check (stratégie pin)
# modifié pas testé
if not self.board[lin + sign[i] * j][col].isEmpty():
if self.board[lin + sign[i] * j][col].piece.kind != PieceType.KING:
if 0 < lin + sign[i] * j < 7 and 0 < col < 7:
for k in range(1, 8):
if 0 <= (lin + sign[i] * j) + (sign[i] * k) <= 7 and 0 <= col <= 7:
if not self.board[(lin + sign[i] * j) + (sign[i] * k)][col].isEmpty():
logger.warning("coordonné board : %s", self.board[(lin + sign[i] * j) + (sign[i] * k)][col].piece.kind)
if self.board[(lin + sign[i] * j) + (sign[i] * k)][col].piece.kind == PieceType.KING:
self.list_piece_pin.append((lin + sign[i] * j, col))
self.pin_color.append(self.board[lin + sign[i] * j][col].piece.color)
logger.warning("list_piece_pin : %s", self.list_piece_pin)
else:
break
else:
print("déplacement hors de l'échiquier")
for i in range(2):
for j in range(8):
if 0 <= lin <= 7 and 0 <= col + sign[i] * j <= 7:
if move_switcher.get(type_piece_start)(Move([lin, col], [lin, col + sign[i] * j]), True):
model_board[lin][col + sign[i] * j] = 1
# test si il y a une pièce qui empêche le check (stratégie pin)
if not self.board[lin][col + sign[i] * j].isEmpty():
if self.board[lin][col + sign[i] * j].piece.kind != PieceType.KING:
if 0 < lin < 7 and 0 < col + sign[i] * j < 7:
# savoir le nombre de déplacement qu'il faut faire et dans quelle direction
# modifié pas testé
for k in range(1, 8):
if 0 <= lin <= 7 and 0 <= ((col + sign[i] * j) + (sign[i] * k)) <= 7:
logger.warning("k et sens: %s %s", k, sign[i])
if not self.board[lin][(col + sign[i] * j) + (sign[i] * k)].isEmpty():
logger.warning("coordonné board : %s", self.board[lin][(col + sign[i] * j) + (sign[i] * k)].piece.kind)
if self.board[lin][(col + sign[i] * j) + (sign[i] * k)].piece.kind == PieceType.KING:
logger.warning("king")
self.list_piece_pin.append((lin, col + sign[i] * j))
self.pin_color.append(self.board[lin][col + sign[i] * j].piece.color)
logger.warning("list_piece_pin : %s", self.list_piece_pin)
else:
logger.critical("break")
break
else:
print("déplacement hors de l'échiquier")
# test move bishop et dame pour ses déplacements en diagonales
if self.board[lin][col].piece.kind == PieceType.BISHOP or self.board[lin][col].piece.kind == PieceType.QUEEN:
for i in range(2):
for j in range(8):
if 0 <= lin + sign[i] * j <= 7 and 0 <= col + sign[i] * j <= 7:
if move_switcher.get(type_piece_start)(Move([lin, col], [lin + sign[i] * j, col + sign[i] * j]), True):
model_board[lin + sign[i] * j][col + sign[i] * j] = 1
# test si il y a une pièce qui empêche le check (stratégie pin)
if not self.board[lin + sign[i] * j][col + sign[i] * j].isEmpty():
if self.board[lin + sign[i] * j][col + sign[i] * j].piece.kind != PieceType.KING:
#delta_diag: int = abs((col + sign[i] * j) - ((col + sign[i] * j) + sign[i] * (col + sign[i] * 8)))
#fonctionne
if 0 < lin + sign[i] * j < 7 and 0 < col + sign[i] * j < 7:
for k in range(1, 8):
if 0 <= ((lin + sign[i] * j) + (sign[i] * k)) <= 7 and 0 <= ((col + sign[i] * j) + (sign[i] * k)) <= 7:
if not self.board[(lin + sign[i] * j) + (sign[i] * k)][(col + sign[i] * j) + (sign[i] * k)].isEmpty():
logger.warning("coordonné board : %s", self.board[(lin + sign[i] * j) + (sign[i] * k)][(col + sign[i] * j) + (sign[i] * k)].piece.kind)
if self.board[(lin + sign[i] * j) + (sign[i] * k)][(col + sign[i] * j) + (sign[i] * k)].piece.kind == PieceType.KING:
logger.warning("king")
self.list_piece_pin.append((lin + sign[i] * j, col + sign[i] * j))
self.pin_color.append(self.board[lin + sign[i] * j][col + sign[i] * j].piece.color)
logger.warning("list_piece_pin : %s", self.list_piece_pin)
else:
break
else:
print("déplacement hors de l'échiquier")
for i in range(2):
for j in range(8):
if 0 <= lin + list(reversed(sign))[i] * j <= 7 and 0 <= col + sign[i] * j <= 7:
if move_switcher.get(type_piece_start)(Move([lin, col], [lin + list(reversed(sign))[i] * j, col + sign[i] * j]), True):
model_board[lin + list(reversed(sign))[i] * j][col + sign[i] * j] = 1
# test si il y a une pièce qui empêche le check (stratégie pin)
#fonction
# modifié pas testé
if not self.board[lin + list(reversed(sign))[i] * j][col + sign[i] * j].isEmpty():
if self.board[lin + list(reversed(sign))[i] * j][col + sign[i] * j].piece.kind != PieceType.KING:
if 0 < lin + list(reversed(sign))[i] * j < 7 and 0 < col + sign[i] * j < 7:
for k in range(1, 8):
if 0 <= (lin + list(reversed(sign))[i] * j) + (list(reversed(sign))[i] * k) <= 7 and 0 <= ((col + sign[i] * j) + sign[i] * k) <= 7:
if not self.board[(lin + list(reversed(sign))[i] * j) + (list(reversed(sign))[i] * k)][((col + sign[i] * j) + sign[i] * k)].isEmpty():
logger.warning("coordonné board : %s", ([(lin + list(reversed(sign))[i] * j) + (list(reversed(sign))[i] * k)], [(col + sign[i] * j) + sign[i] * k]))
if self.board[(lin + list(reversed(sign))[i] * j) + (list(reversed(sign))[i] * k)][((col + sign[i] * j) + sign[i] * k)].piece.kind == PieceType.KING:
logger.warning("king")
self.list_piece_pin.append((lin + list(reversed(sign))[i] * j, col + sign[i] * j))
self.pin_color.append(self.board[lin + list(reversed(sign))[i] * j][col + sign[i] * j].piece.color)
logger.warning("list_piece_pin : %s", self.list_piece_pin)
else:
break
else:
print("déplacement hors de l'échiquier")
# test move pawn
elif self.board[lin][col].piece.kind == PieceType.PAWN:
for i in range(2):
if 0 <= lin + sign[i] <= 7 and 0 <= col + sign[i] <= 7:
if self.move_pawn(Move([lin, col], [lin + sign[i], col + sign[i]]), True):
model_board[lin + sign[i]][col + sign[i]] = 1
else:
print("déplacement hors de l'échiquier")
for i in range(2):
if 0 <= lin + sign[i] <= 7 and 0 <= col + list(reversed(sign))[i] <= 7:
if self.move_pawn(Move([lin, col], [lin + sign[i], col + list(reversed(sign))[i]]), True):
model_board[lin + sign[i]][col + list(reversed(sign))[i]] = 1
else:
print("déplacement hors de l'échiquier")
# test move king
elif self.board[lin][col].piece.kind == PieceType.KING:
if self.board[lin][col].piece.color == Color.WHITE:
pos_roi_blanc = (lin, col)
elif self.board[lin][col].piece.color == Color.BLACK:
pos_roi_noir = (lin, col)
# déplacement en diagonales
for i in range(2):
if 0 <= lin + sign[i] <= 7 and 0 <= col + sign[i] <= 7:
if self.move_king(Move((lin, col), (lin + sign[i], col + sign[i])), True):
model_board[lin + sign[i]][col + sign[i]] = 1
else:
print("déplacement hors de l'échiquier")
for i in range(2):
if 0 <= lin + sign[i] <= 7 and 0 <= col + list(reversed(sign))[i] <= 7:
if self.move_king(Move([lin, col], [lin + sign[i], col + list(reversed(sign))[i]]), True):
model_board[lin + sign[i]][col + list(reversed(sign))[i]] = 1
else:
print("déplacement hors de l'échiquier")
# déplacement en lignes
for i in range(2):
if 0 <= lin + sign[i] <= 7 and 0 <= col <= 7:
if self.move_king(Move([lin, col], [lin + sign[i], col]), True):
model_board[lin + sign[i]][col] = 1
else:
print("déplacement hors de l'échiquier")
for i in range(2):
if 0 <= lin <= 7 and 0 <= col + sign[i] <= 7:
if self.move_king(Move([lin, col], [lin, col + sign[i]]), True):
model_board[lin][col + sign[i]] = 1
else:
print("déplacement hors de l'échiquier")
############################
############test############
############################
print(model_board[0])
print(model_board[1])
print(model_board[2])
print(model_board[3])
print(model_board[4])
print(model_board[5])
print(model_board[6])
print(model_board[7])
print(self.to_unicode())
logger.warning("white_check : %s", self.white_check)
logger.warning("black_check : %s", self.black_check)
logger.warning("pos_piece_check : %s", self.pos_piece_check)
logger.warning("list_piece_pin : %s", self.list_piece_pin)
############################
############test############
############################
if self.white_check:
return True
if self.black_check:
return True
return False
def move_knight(self, move: Move, check: bool) -> bool:
# postion de la pièce avant son déplacement dans l'échiquier
start_square: Square = self.board[move.start[0]][move.start[1]]
# postion de la pièce après son déplacement dans l'échiquier
end_square: Square = self.board[move.end[0]][move.end[1]]
if move.end[0] == move.start[0] + 2 or move.end[0] == move.start[0] - 2:
if move.end[1] == move.start[1] + 1 or move.end[1] == move.start[1] - 1:
if end_square.isEmpty():
return self.move_piece(move, check)
elif not end_square.isEmpty():
return self.take_piece(move, check)
else:
print("Impossible de déplacer le cavalier à cet endroit")
return False
elif move.end[1] == move.start[1] + 2 or move.end[1] == move.start[1] - 2:
if move.end[0] == move.start[0] + 1 or move.end[0] == move.start[0] - 1:
if end_square.isEmpty():
return self.move_piece(move, check)
else:
return self.take_piece(move, check)
else:
print("Impossible de déplacer le cavalier à cet endroit")
return False
else:
print("Impossible de déplacer le cavalier à cet endroit")
return False
def move_bishop(self, move: Move, check: bool) -> bool:
iteration_lin: int = abs(move.end[0] - move.start[0])
iteration_col: int = abs(move.end[1] - move.start[1])
lin_sign: int = 1
col_sign: int = 1
if iteration_lin == iteration_col:
if (move.end[0] - move.start[0]) < 0:
lin_sign = -1
if (move.end[1] - move.start[1]) < 0:
col_sign = -1
for i in range(1, iteration_lin):
# si la case testée n'est pas vide et qu'elle n'est pas l'emplacement finale de la pièce
if not self.board[move.start[0] + lin_sign * i][move.start[1] + col_sign * i].isEmpty():
print("Une pièce bloque le passage du fou")
return False
# si elle est vide et l'emplacement finale
if self.board[move.end[0]][move.end[1]].isEmpty():
return self.move_piece(move, check)
# si elle n'est pas vide et que c'est l'emplacement finale
else:
return self.take_piece(move, check)
else:
return False
def move_rook(self, move: Move, check: bool) -> bool:
iteration_lin: int = abs(move.end[0] - move.start[0])
iteration_col: int = abs(move.end[1] - move.start[1])
lin_sign: int = 1
col_sign: int = 1
# si le déplacement se fait bien en ligne droite
if (move.end[0] != move.start[0] and move.end[1] == move.start[1]) or (move.end[0] == move.start[0] and move.end[1] != move.start[1]):
# dans quelle direction se déplace t'on
if (move.end[0] - move.start[0]) < 0:
lin_sign = -1
elif (move.end[1] - move.start[1]) < 0:
col_sign = -1
# déplacement en ligne
if move.end[0] != move.start[0]:
for i in range(1, iteration_lin):
if not self.board[move.start[0] + lin_sign * i][move.start[1]].isEmpty():
print("Une pièce bloque le passage de la tour")
return False
# si elle est vide et l'emplacement finale
if self.board[move.end[0]][move.end[1]].isEmpty():
return self.move_piece(move, check)
# si elle n'est pas vide et que c'est l'emplacement finale
else:
return self.take_piece(move,check)
# déplacement en colonne
elif move.end[1] != move.start[1]:
for i in range(1, iteration_col):
if not self.board[move.start[0]][move.start[1] + col_sign * i].isEmpty():
print("Une pièce bloque le passage de la tour")
return False
# si elle est vide et l'emplacement finale
if self.board[move.end[0]][move.end[1]].isEmpty():
return self.move_piece(move, check)
# si elle n'est pas vide et que c'est l'emplacement finale
else:
return self.take_piece(move, check)
else:
print("Impossible de déplacer la tour à cet endroit")
return False
def move_queen(self, move: Move, check: bool) -> bool:
iteration_lin: float = abs(move.end[0] - move.start[0])
iteration_col: float = abs(move.end[1] - move.start[1])
lin_sign: int = 1
col_sign: int = 1
# si le déplacement se fait bien en ligne droite
if (move.end[0] != move.start[0] and move.end[1] == move.start[1]) or (move.end[0] == move.start[0] and move.end[1] != move.start[1]):
# dans quelle direction se déplace t'on
if (move.end[0] - move.start[0]) < 0:
lin_sign = -1
elif (move.end[1] - move.start[1]) < 0:
col_sign = -1
# déplacement en ligne
if move.end[0] != move.start[0]:
for i in range(1, iteration_lin):
if not self.board[move.start[0] + lin_sign * i][move.start[1]].isEmpty():
print("Une pièce bloque le passage de la dame")
return False
# si elle est vide et l'emplacement finale
if self.board[move.end[0]][move.end[1]].isEmpty():
return self.move_piece(move, check)
# si elle n'est pas vide et que c'est l'emplacement finale
else:
return self.take_piece(move, check)
# déplacement en colonne
elif move.end[1] != move.start[1]:
for i in range(1, iteration_col):
if not self.board[move.start[0]][move.start[1] + col_sign * i].isEmpty():
print("Une pièce bloque le passage de la dame")
return False
# si elle est vide et l'emplacement finale
if self.board[move.end[0]][move.end[1]].isEmpty():
return self.move_piece(move, check)
# si elle n'est pas vide et que c'est l'emplacement finale
else:
return self.take_piece(move, check)
#si le déplacement se fait en diagonale
elif iteration_lin == iteration_col:
if (move.end[0] - move.start[0]) < 0:
lin_sign = -1
if (move.end[1] - move.start[1]) < 0:
col_sign = -1
for i in range(1, iteration_lin):
# si la case testée n'est pas vide et qu'elle n'est pas l'emplacement finale de la pièce
if not self.board[move.start[0] + lin_sign * i][move.start[1] + col_sign * i].isEmpty():
print("Une pièce bloque le passage de la dame")
return False
# si elle est vide et l'emplacement finale
if self.board[move.end[0]][move.end[1]].isEmpty():
return self.move_piece(move, check)
# si elle n'est pas vide et que c'est l'emplacement finale
else:
return self.take_piece(move, check)
else:
print("Impossible de déplacer la dame à cet endroit")
return False
def move_king(self, move: Move, check: bool) -> bool:
start_square = self.board[move.start[0]][move.start[1]]
end_square = self.board[move.end[0]][move.end[1]]
# Check who can castle
# Castling is only possible if king has never moved.
moves = self.move_list
has_white_king_already_moved = any(
movehistory.piece.kind == PieceType.KING and movehistory.piece.color == Color.WHITE for movehistory in
moves)
has_black_king_already_moved = any(
movehistory.piece.kind == PieceType.KING and movehistory.piece.color == Color.BLACK for movehistory in
moves)
# Castling is only possible if rook has never moved.
has_white_king_rook_already_moved = any(
movehistory.piece.kind == PieceType.ROOK and movehistory.piece.color == Color.WHITE and movehistory.start ==
[7, 7] for movehistory in moves)
has_white_queen_rook_already_moved = any(
movehistory.piece.kind == PieceType.ROOK and movehistory.piece.color == Color.WHITE and movehistory.start ==
[7, 0] for movehistory in moves)
has_black_king_rook_already_moved = any(
movehistory.piece.kind == PieceType.ROOK and movehistory.piece.color == Color.BLACK and movehistory.start ==
[0, 7] for movehistory in moves)
has_black_queen_rook_already_moved = any(
movehistory.piece.kind == PieceType.ROOK and movehistory.piece.color == Color.BLACK and movehistory.start ==
[0, 0] for movehistory in moves)
# A king can only move one step forward, backwards, left or right
if (move.start[0] - 1 <= move.end[0] <= move.start[0] + 1) and (
move.start[1] - 1 <= move.end[1] <= move.start[1] + 1):
# If square is empty
if end_square.isEmpty():
return self.move_piece(move, check)
# If square is not empty
elif end_square.isEmpty() is False:
print("Cannot take piece of your own color.")
else:
return self.take_piece(move, check)
# Elif queen's side castling
elif move.start[0] == move.end[0] and move.start[1] - 2 == move.end[1]:
if move.piece.color == Color.BLACK and has_black_king_already_moved is False:
if has_black_queen_rook_already_moved is False and self.board[0][1].isEmpty() and self.board[0][
2].isEmpty() and self.board[0][3].isEmpty():
self.can_black_queen_side_castle = True
return self.queen_castle(move)
else:
return False
elif move.piece.color == Color.WHITE and has_white_king_already_moved is False:
if has_white_queen_rook_already_moved is False and self.board[7][1].isEmpty() and self.board[7][
2].isEmpty() and self.board[7][3].isEmpty():
self.can_white_queen_side_castle = True
return self.queen_castle(move)
else:
return False
# Elif king's side castling
elif move.start[0] == move.end[0] and move.start[1] + 2 == move.end[1]:
if move.piece.color == Color.BLACK and has_black_king_already_moved is False:
if has_black_king_rook_already_moved is False and self.board[0][5].isEmpty() and self.board[0][
6].isEmpty():
self.can_black_king_side_castle = True
return self.king_castle(move)
else:
return False
elif move.piece.color == Color.WHITE and has_white_king_already_moved is False:
if has_white_king_rook_already_moved is False and self.board[7][5].isEmpty() and self.board[7][
6].isEmpty():
self.can_white_king_side_castle = True
return self.king_castle(move)
else:
return False
else:
self.can_white_queen_side_castle = False
self.can_white_king_side_castle = False
self.can_black_queen_side_castle = False
self.can_black_king_side_castle = False
print("Cannot move king at this place.")
return False
def move_pawn(self, move: Move, check: bool) -> bool:
start_square: Square = self.board[move.start[0]][move.start[1]]
end_square: Square = self.board[move.end[0]][move.end[1]]
if start_square.piece.color == Color.BLACK:
if move.end[1] == move.start[1] and end_square.isEmpty():
# Pawns can only move forward one square at a time
if move.end[0] == move.start[0] + 1:
if move.end[0] == 7:
return self.promotion(move, check)
else:
return self.move_piece(move, check)
# except for their very first move where they can move forward two squares.
elif move.end[0] == move.start[0] + 2 and move.start[0] == 1:
self.setEnPassantTargetSquare(move.end[0] - 1, move.end[1])
return self.move_piece(move, check)
# Pawns can only capture one square diagonally in front of them
elif move.end[0] == move.start[0] + 1 and (
move.end[1] == move.start[1] - 1 or move.end[1] == move.start[1] + 1):
if end_square.isEmpty() is False:
if move.end[0] == 7 and end_square.piece.color != start_square.piece.color:
return self.promotion(move, check)
else:
return self.take_piece(move, check)
else:
print("cannot move pawn here.")
return False
elif start_square.piece.color == Color.WHITE:
print("pawn is white.")
if move.end[1] == move.start[1] and end_square.isEmpty():
print("Pawn wants to move on same column and ensquare is empty.")
if move.end[0] == move.start[0] - 1:
if move.end[0] == 0:
return self.promotion(move, check)
else:
return self.move_piece(move, check)
elif move.end[0] == move.start[0] - 2 and move.start[0] == 6:
self.setEnPassantTargetSquare(move.end[0] + 1, move.end[1])
return self.move_piece(move, check)
elif move.end[0] == move.start[0] - 1 and (
move.end[1] == move.start[1] - 1 or move.end[1] == move.start[1] + 1):
if end_square.isEmpty() is False:
if move.end[0] == 0 and end_square.piece.color != start_square.piece.color:
return self.promotion(move, check)
else:
return self.take_piece(move, check)
else:
print("cannot move pawn here.")
return False
else:
return False
def promotion(self, move: Move, check: bool) -> bool:
if move.promotion_kind is None:
return False
if check is False:
new_piece_color = self.board[move.start[0]][move.start[1]].piece.color
new_piece = Piece(move.promotion_kind, new_piece_color)
self.board[move.start[0]][move.start[1]].piece = None
self.board[move.end[0]][move.end[1]].piece = new_piece
return True
def board_to_fen(self) -> str:
board_fen = ''
for line in range(8):
board_fen += '/'
empty_squares = 0
for col in range(8):
square = self.board[line][col]
if square.isEmpty():
empty_squares += 1
board_fen += ''
else:
if empty_squares > 0:
empty_number = str(empty_squares)
board_fen += empty_number
piece = str(square.piece.piece_to_str())
board_fen += piece
empty_squares = 0
if empty_squares > 0:
empty_number = str(empty_squares)
board_fen += empty_number
board_fen = board_fen[1:]
if self.to_move == Color.BLACK:
board_fen += " b "
else:
board_fen += " w "
if self.can_white_king_side_castle:
board_fen += "K"
if self.can_white_queen_side_castle:
board_fen += "Q"
if self.can_black_king_side_castle:
board_fen += "k"
if self.can_black_queen_side_castle:
board_fen += "q"
if self.en_passant_target_square is not None:
enpassant = self.en_passant_target_square
chessline = IndexToLine[enpassant[0]]
column = enpassant[1]
enpassant_column = column + 1
chesscolumn = IndexToColumn[enpassant_column]
enpassant_target = ' ' + chessline + chesscolumn + ' '
board_fen += enpassant_target
else:
board_fen += ' - '
halfmove = str(self.halfmove_clock)
board_fen += halfmove
board_fen += ' '
fullmove = str(self.fullmove_number)
board_fen += fullmove
return board_fen
def board_to_fen(self) -> str:
board_fen = ''
for line in range(8):
board_fen += '/'
empty_squares = 0
for col in range(8):
square = self.board[line][col]
if square.isEmpty():
empty_squares += 1
board_fen += ''
else:
if empty_squares > 0:
empty_number = str(empty_squares)
board_fen += empty_number
piece = str(square.piece.piece_to_str())
board_fen += piece
empty_squares = 0
if empty_squares > 0:
empty_number = str(empty_squares)
board_fen += empty_number
board_fen = board_fen[1:]
if self.to_move == Color.BLACK:
board_fen += " b "
else:
board_fen += " w "
if self.can_white_king_side_castle:
board_fen += "K"
if self.can_white_queen_side_castle:
board_fen += "Q"
if self.can_black_king_side_castle:
board_fen += "k"
if self.can_black_queen_side_castle:
board_fen += "q"
if self.en_passant_target_square is not None:
enpassant = self.en_passant_target_square
chessline = IndexToLine[enpassant[0]]
column = enpassant[1]
enpassant_column = column + 1
chesscolumn = IndexToColumn[enpassant_column]
enpassant_target = ' ' + chessline + chesscolumn + ' '
board_fen += enpassant_target
else:
board_fen += ' - '
halfmove = str(self.halfmove_clock)
board_fen += halfmove
board_fen += ' '
fullmove = str(self.fullmove_number)
board_fen += fullmove
return board_fen
def game_to_pgn_long(self) -> str:
moves = self.move_list
long_pgn = ''
i = 1
round = 1
for move in moves:
move_start = IndexToColumn[move.start[1]] + IndexToLine[move.start[0]]
move_end = IndexToColumn[move.end[1]] + IndexToLine[move.end[0]]
piece = move.piece
# Display piece moves per round
if i % 2 != 0 and i > 1:
round += 1
long_pgn += ' ' + str(round) + '. '
elif i == 1:
long_pgn += '1. '
else:
long_pgn += ' '
if piece.kind != PieceType.PAWN:
long_pgn += piece.piece_to_str()
# Display when a piece has been eaten
if move.eat:
long_pgn += IndexToColumn[move.start[1]] + 'x' + move_end
else:
long_pgn += move_start + move_end
# Display promotion piece when pawn reaches board's end
if move.promotion_kind is not None:
new_piece: Piece = Piece(move.promotion_kind, piece.color)
long_pgn += '=' + new_piece.piece_to_str()
if move.is_castled:
if move.castling == 'QUEEN_CASTLING':
long_pgn += 'O-O-O'
else:
long_pgn += 'O-O'
i += 1
return long_pgn
def game_to_pgn(self) -> str:
moves = self.move_list
copied_board: Board = Board()
copied_board.init_board()
long_pgn = ''
i = 1
round = 1
for move in moves:
move_end = IndexToColumn[move.end[1]] + IndexToLine[move.end[0]]
piece = move.piece
# Display piece moves per round
if i % 2 != 0 and i > 1:
round += 1
long_pgn += ' ' + str(round) + '. '
elif i == 1:
long_pgn += '1. '
else:
long_pgn += ' '
if piece.kind != PieceType.PAWN:
long_pgn += piece.piece_to_str()
# Loop through board to get other piece of same color
for line in range(8):
for col in range(8):
square = self.board[line][col]
if square.isEmpty() is False:
# If found same piece on board
if square.piece.kind == move.piece.kind and square.piece.color == move.piece.color:
# If this piece can go to same end square as its pair:
if copied_board.is_move_valid(Move([line, col], [move.end[0], move.end[1]])):
if line == move.end[0]:
long_pgn += IndexToColumn[move.start[1]]
elif col == move.end[1]:
long_pgn += IndexToLine[move.start[0]]
# Display when a piece has been eaten
if move.eat:
long_pgn += 'x' + move_end
else:
long_pgn += move_end
# Display promotion piece when pawn reaches board's end
if move.promotion_kind is not None:
new_piece: Piece = Piece(move.promotion_kind, piece.color)
long_pgn += '=' + new_piece.piece_to_str()
if move.is_castled:
if move.castling == 'QUEEN_CASTLING':
long_pgn += 'O-O-O'
else:
long_pgn += 'O-O'
i += 1
copied_board.make_move(move)
return long_pgn
| 49.767557
| 213
| 0.482202
|
1ce4ff6bd9afa7adc559ce30512ec12dae253a57
| 6,880
|
py
|
Python
|
continued_functions.py
|
ThomasAldheimerFCG/continued-functions
|
3285c570cfffd6fa6bc7e5264e843b85602b0487
|
[
"MIT"
] | null | null | null |
continued_functions.py
|
ThomasAldheimerFCG/continued-functions
|
3285c570cfffd6fa6bc7e5264e843b85602b0487
|
[
"MIT"
] | null | null | null |
continued_functions.py
|
ThomasAldheimerFCG/continued-functions
|
3285c570cfffd6fa6bc7e5264e843b85602b0487
|
[
"MIT"
] | null | null | null |
import numpy as np
import cmath as cm
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import os
color_codes = {1: 'red', 2: 'yellow', 3: 'blue', 4: 'green', 5: 'magenta', 6: 'crimson', 7: 'violet', 8: 'gold',
9: 'palegreen', 10: 'orange', 11: 'skyblue', 12: 'purple', 13: 'aqua', 14: 'pink', 15: 'lime',
16: 'mistyrose', 0: 'white'}
patches = {1: mpatches.Patch(color=color_codes[1], label='1 lp'),
2: mpatches.Patch(color=color_codes[2], label='2 lp'),
3: mpatches.Patch(color=color_codes[3], label='3 lp'),
4: mpatches.Patch(color=color_codes[4], label='4 lp'),
5: mpatches.Patch(color=color_codes[5], label='5 lp'),
6: mpatches.Patch(color=color_codes[6], label='6 lp'),
7: mpatches.Patch(color=color_codes[7], label='7 lp'),
8: mpatches.Patch(color=color_codes[8], label='8 lp'),
9: mpatches.Patch(color=color_codes[9], label='9 lp'),
10: mpatches.Patch(color=color_codes[10], label='10 lp'),
11: mpatches.Patch(color=color_codes[11], label='11 lp'),
12: mpatches.Patch(color=color_codes[12], label='12 lp'),
13: mpatches.Patch(color=color_codes[13], label='13 lp'),
14: mpatches.Patch(color=color_codes[14], label='14 lp'),
15: mpatches.Patch(color=color_codes[15], label='15 lp'),
16: mpatches.Patch(color=color_codes[16], label='16 lp'),
0: mpatches.Patch(color=color_codes[0], label='undef')}
def continued_function(z, n, function):
"""
Return a sequence of n iterations of a continued function for a value z
"""
if function == 'exp':
def f(x):
return cm.exp(x*z)
elif function == 'log':
def f(x):
return cm.log(x*z)
elif function == 'sqrt':
def f(x):
return cm.sqrt(x*z)
elif function == 'cube_root':
def f(x):
return (x*z)**(1.0/3.0)
elif function == 'sine':
def f(x):
return cm.sin(x*z)
elif function == 'cosine':
def f(x):
return cm.cos(x*z)
elif function == 'tan':
def f(x):
return cm.tan(x*z)
elif function == 'arcsine':
def f(x):
return cm.asin(pade[k]*z)
elif function == 'arccosine':
def f(x):
return cm.acos(pade[k]*z)
elif function == 'arctan':
def f(x):
return cm.atan(pade[k]*z)
elif function == 'sineh':
def f(x):
return cm.sinh(x*z)
elif function == 'cosineh':
def f(x):
return cm.cosh(x*z)
elif function == 'tanh':
def f(x):
return cm.tanh(x*z)
elif function == 'arcsineh':
def f(x):
return cm.asinh(x*z)
elif function == 'arccosineh':
def f(x):
return cm.acosh(x*z)
elif function == 'arctanh':
def f(x):
return cm.atanh(x*z)
elif function == 'exp_sin':
def f(x):
return cm.exp(cm.sin(x*z))
elif function == 'sin_exp':
def f(x):
return cm.sin(cm.exp(x*z))
elif function == 'log_sin':
def f(x):
return cm.log(cm.sin(x*z))
elif function == 'sin_log':
def f(x):
return cm.sin(cm.log(x*z))
elif function == 'cos_sin':
def f(x):
return cm.cos(cm.sin(x*z))
elif function == 'fraction_1':
def f(x):
return 1/(1+x*z)
elif function == 'fraction_2':
def f(x):
return z/(1+x*z)
else:
raise ValueError("Incorrect function: {}". format(function))
pade = [1]
try:
for k in range(n):
z_next = f(pade[k])
pade.append(z_next)
return pade
except (OverflowError, ValueError):
return [0]
def test_limit_points(sequence, max_number_of_lp):
"""
Return the number (color coded) of limit points in a sequence.
Points within a distance epsilon are indistinguishable.
"""
if len(sequence) == 1:
return 0 # undefined
epsilon = 1e-10
try:
for i in range(2, max_number_of_lp+2):
if abs(sequence[-1]-sequence[-i]) < epsilon:
return i-1
return 0 # undefined
except (OverflowError, ValueError):
return 0 # undefined
def scatter(function, list_lp, max_number_of_lp, a_re, b_re, a_im, b_im, legend):
"""
Generate a scatter plot for each number of limit points (each color code).
"""
fig = plt.figure(figsize=(16, 16), dpi=2000)
ax = fig.add_subplot(111, facecolor=color_codes[0])
for lp in range(1, max_number_of_lp+1):
x = []
y = []
color = color_codes[lp] # Use predefined dictionary with color codes
for k in range(len(list_lp)):
if list_lp[k][2] == lp:
x.append(list_lp[k][0])
y.append(list_lp[k][1])
ax.scatter(x, y, s=2, color=color, marker=',', lw=0, alpha=1)
ax.set_aspect('equal', 'box')
plt.xlim(a_re, b_re)
plt.ylim(a_im, b_im)
plt.xlabel('Re')
plt.ylabel('Im')
if legend:
list_of_handles = [patches[i] for i in range(max_number_of_lp+1)]
plt.legend(bbox_to_anchor=(1.0, 1.0), handles=list_of_handles)
fig.tight_layout()
if not os.path.exists('output'):
os.makedirs('output')
plt.savefig("output/{}_{}_lp.png".format(function, max_number_of_lp))
return None
def generate_picture(function, max_number_of_lp):
assert isinstance(max_number_of_lp, int), "max_number_of_lp must be of type int"
assert 17 > max_number_of_lp >= 1, "max_number_of_lp must be 17 > max_number_of_lp >= 1"
N = 2000 # num in linspace ("resolution" in picture)
n = 800 # number of terms in pade sequence
a_Re = -4.0 # start value Real axis
b_Re = 4.0 # end value Real axis
Re_x = np.linspace(a_Re, b_Re, N)
a_Im = -4.0 # start value Imaginary axis
b_Im = 4.0 # end value Imaginary axis
Im_y = np.linspace(a_Im, b_Im, N)
list_of_limit_points = []
for k in range(N):
for i in range(N):
z = complex(Re_x[i], Im_y[k])
no_of_limit_points = test_limit_points(sequence=continued_function(z=z, n=n, function=function),
max_number_of_lp=max_number_of_lp)
list_of_limit_points.append([Re_x[i], Im_y[k], no_of_limit_points])
scatter(function=function,
list_lp=list_of_limit_points,
max_number_of_lp=max_number_of_lp,
a_re=a_Re, b_re=b_Re, a_im=a_Im, b_im=b_Im,
legend=False)
| 35.833333
| 113
| 0.548547
|
601c7392eeddd7b974a344042bbab1a94239f9aa
| 6,806
|
py
|
Python
|
torchVisionTester/DataLoader.py
|
robotic-vision-lab/Deep-Ensembles-For-Probabilistic-Object-Detection
|
82fd36376694f447ccb5564c7af4cc6128b3ea60
|
[
"Apache-2.0"
] | 1
|
2022-02-04T13:13:50.000Z
|
2022-02-04T13:13:50.000Z
|
torchVisionTester/DataLoader.py
|
robotic-vision-lab/Deep-Ensembles-For-Probabilistic-Object-Detection
|
82fd36376694f447ccb5564c7af4cc6128b3ea60
|
[
"Apache-2.0"
] | null | null | null |
torchVisionTester/DataLoader.py
|
robotic-vision-lab/Deep-Ensembles-For-Probabilistic-Object-Detection
|
82fd36376694f447ccb5564c7af4cc6128b3ea60
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from torchVisionTester.Augmenter import *
from PIL import Image
from numpy import random
from glob import glob
from enum import Enum
from torch import from_numpy
from torch import stack as torchStack
#from tensorflow import convert_to_tensor
#from tensorflow import stack as tfStack
from torchVisionTester.Debugger import Debugger
import os
from torchvision import transforms
e,d = exec, Debugger()
e(d.gIS())
class ClassTypes(Enum):
NUMPY = 1
PYTORCH_TENSOR = 2
TENSORFLOW_TENSOR = 3
PIL_OBJECT = 4
class Datasets(Enum):
POD_TEST = 1
POD_VALIDATION = 2
POD_TRAIN = 3
class DatasetDirectories:
def addPreSuf(self,name,prefix,suffix):
"""
Params:
name - string to add prefix and suffix to
prefix - prefix of output string
suffix - suffix of output string
Returns: string with name in middle, prefix and suffix at beginning and
end respectively.
"""
return prefix + name + suffix
def getPODTestDirectories(self):
#prefix = './Datasets/test_data/'
prefix = os.path.join('E:', '/test_data/')
suffix = '/'
directoryNames = [self.addPreSuf(str(i).zfill(6),prefix,suffix) for i in range(17)]
return directoryNames
def getDirectories(self, dataset):
"""
Params:
dataset - Dataset to load, choose from Datasets Enum
Returns: list of directories to load for dataset
"""
if dataset == Datasets.POD_TEST:
return self.getPODTestDirectories()
class DataLoader:
def __init__(self, augmentations = None, probabilities = None):
"""
To use this class you need tensorflow and pytorch installed. If you don't want to install both of those, simply comment out lines 7 or 6
Params:
augmentations - list of augmentations from Augmenter.py
probabilities - list of probabilities of applying augmentations
if augmentations !=
"""
if augmentations != None:
self.augmenter = Augmenter()
self.augmentations = augmentations
self.probabilities = probabilities
def getFilePathsFromDirectory(self, directory, dataTypes):
"""
Params:
directories - string path to directory containing images
dataTypes - list of image types to be loaded in string form
Returns - list of file paths from directory with data types dataTypes
"""
fileNames = []
for i in dataTypes:
fileNames += glob(directory + '*.' + i)
return fileNames
def getFilePathsFromDirectories(self, directories, dataTypes):
"""
Params:
directories - list of string paths to directories containing images
dataTypes - list of image types to be loaded in string form
Returns - list of file paths from directories with data types dataTypes
"""
fileNames = []
for i in directories:
fileNames += self.getFilePathsFromDirectory(i,dataTypes)
return fileNames
def getArraysFromFileNames(self, fileNames, classType, stack = True, augment = False):
"""
Params:
fileNames - names of all files to be loaded
classType - type of image to be loaded. Select from ClassTypes
stack - if true, stack list of arrays or tensors
Returns - list of arrays of classType from directories with data type dataTypes
"""
images = [Image.open(filename).convert('RGB') for filename in fileNames]
if classType == ClassTypes.PIL_OBJECT:return pilImages
images = [np.asarray(image) for image in images]
if augment: images = self.augmenter.augmentBatchMul(images, self.augmentations, self.probabilities)
if stack : images = np.stack(images)
if classType == ClassTypes.NUMPY: return images
if classType == ClassTypes.PYTORCH_TENSOR:
tensorImages = []
for image in images:
transform = transforms.Compose([transforms.ToTensor()])
if torch.cuda.is_available():
tensorIm = transform(image).cuda()
else:
tensorIm = transform(image)
tensorImages.append(tensorIm)
if stack: tensorImages = torchStack(tensorImages).float()
return tensorImages
if classType == ClassTypes.TENSORFLOW_TENSOR:
tensorImages = [convert_to_tensor(image) for image in images]
if stack: tensorImages = tfStack(tensorImages)
return tensorImages
def sample(self,mylist):
"""
Params:
- mylist - list
returns: random element from mylist
"""
if len(mylist) == 1: return mylist[0]
randomInt = random.randint(0,len(mylist) -1)
return mylist[randomInt]
def sampleN(self,mylist, n):
"""
Params:
mylist - list
n - number of random items to sample with replacement
returns: n random items from mylist
"""
return [self.sample(mylist) for i in range(n)]
def getBatchFromDir(self, directories, dataTypes, classType, batch_size, augment = False):
"""
Params:
directories - list of string paths to directories containing images
dataTypes - list of image types to be loaded in string form e.g. 'jpg', 'png', 'jpeg'
classType - type of image to be loaded. Select from ClassTypes
batch_size - number of images to sample
Returns - batch_size images from directories with data type dataTypes in form of classType
"""
fileNames = self.getFilePathsFromDirectories(directories,dataTypes)
batchNames = self.sampleN(fileNames, batch_size)
arrayList = self.getArraysFromFileNames(batchNames,classType, augment)
return arrayList
def getBatchFromFileNames(self,fileNames,classType,batch_size, augment = False):
"""
Params:
fileNames - list of paths to images to be loaded
classType - type of image to be loaded. Select from ClassTypes
batch_size - number of images to sample
Returns - batch_size images from fileNames paths in form of classType
"""
batchNames = self.sampleN(fileNames, batch_size)
arrayList = self.getArraysFromFileNames(batchNames,classType, augment)
return arrayList
| 38.235955
| 145
| 0.613429
|
f513df392c681a4d117e2ec20a164853d7eee433
| 1,405
|
py
|
Python
|
tests/parse_args_test.py
|
sudo-julia/devenv
|
2d5da4cc126445f59a2d7588231c69e6841c2e6d
|
[
"Apache-2.0"
] | null | null | null |
tests/parse_args_test.py
|
sudo-julia/devenv
|
2d5da4cc126445f59a2d7588231c69e6841c2e6d
|
[
"Apache-2.0"
] | 11
|
2021-12-22T01:20:30.000Z
|
2022-03-29T01:22:11.000Z
|
tests/parse_args_test.py
|
sudo-julia/dvnv
|
2d5da4cc126445f59a2d7588231c69e6841c2e6d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
from unittest.mock import patch
import pytest
from dvnv.dvnv import parse_args
def test_parse_args_pass():
with patch.object(sys, "argv", ["dvnv", "python", "dvnv"]):
args = parse_args()
assert args.lang == "python"
assert args.name == "dvnv"
def test_parse_args_no_args(capsys):
with patch.object(sys, "argv", ["dvnv", None, None]):
with pytest.raises(SystemExit):
parse_args()
captured = capsys.readouterr()
assert (
captured.err
== "[ERR] the following arguments are required: lang, name\n"
)
def test_parse_args_no_lang(capsys):
with patch.object(sys, "argv", ["dvnv", None, "test-project"]):
with pytest.raises(SystemExit):
parse_args()
captured = capsys.readouterr()
assert captured.err == "[ERR] the following arguments are required: lang\n"
def test_parse_args_no_name(capsys):
with patch.object(sys, "argv", ["dvnv", "python", None]):
with pytest.raises(SystemExit):
parse_args()
captured = capsys.readouterr()
assert captured.err == "[ERR] the following arguments are required: name\n"
def test_parse_args_install_scripts():
with patch.object(sys, "argv", ["dvnv", None, None, "--install_scripts"]):
assert parse_args()
| 29.893617
| 87
| 0.610676
|
aef3007d13ada0815d1caed5154b33e00a29e3cb
| 862
|
py
|
Python
|
GUI/Cocoa/Container.py
|
gcewing/PyGUI
|
58c6c38ccb8e66acdf98dea6b24bef1d9a03147c
|
[
"MIT"
] | 9
|
2019-07-15T19:03:27.000Z
|
2021-11-24T19:50:02.000Z
|
GUI/Cocoa/Container.py
|
mnabeelp/PyGUI
|
58c6c38ccb8e66acdf98dea6b24bef1d9a03147c
|
[
"MIT"
] | 3
|
2019-09-11T13:22:10.000Z
|
2020-08-19T20:13:00.000Z
|
GUI/Cocoa/Container.py
|
mnabeelp/PyGUI
|
58c6c38ccb8e66acdf98dea6b24bef1d9a03147c
|
[
"MIT"
] | 4
|
2020-02-23T16:50:06.000Z
|
2022-02-10T07:15:35.000Z
|
#
# Python GUI - Containers - PyObjC version
#
from AppKit import NSView
from GUI.Utils import PyGUI_Flipped_NSView
from GUI import export
from GUI.GContainers import Container as GContainer
class Container(GContainer):
# _ns_inner_view NSView Containing NSView for subcomponents
# def __init__(self, _ns_view, **kwds):
# GContainer.__init__(self, _ns_view = _ns_view, **kwds)
# def destroy(self):
# #print "Container.destroy:", self ###
# GContainer.destroy(self)
# #print "Container.destroy: breaking inner link to", self._ns_inner_view ###
def _add(self, comp):
GContainer._add(self, comp)
self._ns_inner_view.addSubview_(comp._ns_view)
def _remove(self, comp):
GContainer._remove(self, comp)
comp._ns_view.removeFromSuperview()
#------------------------------------------------------------------------------
export(Container)
| 26.9375
| 79
| 0.679814
|
7b038d1239e464aa440c69a2c913ee04cf910f04
| 23,866
|
py
|
Python
|
models/official/resnet/resnet_main.py
|
dshieble/tpu
|
4f734be260870d7cecfe08ae7229f1c4db9a5df6
|
[
"Apache-2.0"
] | 1
|
2020-03-05T10:35:53.000Z
|
2020-03-05T10:35:53.000Z
|
models/official/resnet/resnet_main.py
|
dshieble/tpu
|
4f734be260870d7cecfe08ae7229f1c4db9a5df6
|
[
"Apache-2.0"
] | null | null | null |
models/official/resnet/resnet_main.py
|
dshieble/tpu
|
4f734be260870d7cecfe08ae7229f1c4db9a5df6
|
[
"Apache-2.0"
] | 1
|
2020-03-05T10:35:55.000Z
|
2020-03-05T10:35:55.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU.
RESNETDEPTH=v2_152; \
BUCKET=gs://performances-tpu-$RESNETDEPTH; \
gsutil rm -r $BUCKET; gsutil mkdir $BUCKET; \
git pull; python3 models/official/resnet/resnet_main.py \
--tpu_name=$TPU_NAME \
--data_dir=gs://imagenet_data/train \
--model_dir=gs://performances-tpu-$RESNETDEPTH \
--resnet_depth=$RESNETDEPTH | tee -a performances-tpu-$RESNETDEPTH
RESNETDEPTH=fc-v2_152; \
BUCKET=gs://performances-tpu-$RESNETDEPTH; \
gsutil rm -r $BUCKET; gsutil mkdir $BUCKET; \
git pull; python3 models/official/resnet/resnet_main.py \
--tpu_name=$TPU_NAME \
--data_dir=gs://imagenet_data/train \
--model_dir=gs://performances-tpu-$RESNETDEPTH \
--clip_gradients 100 \
--resnet_depth=$RESNETDEPTH | tee -a performances-tpu-$RESNETDEPTH
RESNETDEPTH=paper-v2_152; \
BUCKET=gs://performances-tpu-$RESNETDEPTH; \
gsutil rm -r $BUCKET; gsutil mkdir $BUCKET; \
git pull; python3 models/official/resnet/resnet_main.py \
--tpu_name=$TPU_NAME \
--data_dir=gs://imagenet_data/train \
--model_dir=gs://performances-tpu-$RESNETDEPTH \
--resnet_depth=$RESNETDEPTH | tee -a performances-tpu-$RESNETDEPTH
RESNETDEPTH=fc-v2_152; \
BUCKET=gs://performances-tpu-$RESNETDEPTH-aa; \
gsutil rm -r $BUCKET; gsutil mkdir $BUCKET; \
git pull; python3 models/official/resnet/resnet_main.py \
--tpu_name=$TPU_NAME \
--data_dir=gs://imagenet_data/train \
--model_dir=$BUCKET \
--clip_gradients 5 \
--resnet_depth=$RESNETDEPTH | tee -a performances-tpu-$RESNETDEPTH
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import imagenet_input
import resnet_model
import resnet_v2_model
from tensorflow.contrib import summary
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.python.estimator import estimator
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'use_tpu', True,
help=('Use TPU to execute the model for training and evaluation. If'
' --use_tpu=false, will use whatever devices are available to'
' TensorFlow by default (e.g. CPU and GPU)'))
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_name', default=None,
help='Name of the Cloud TPU for Cluster Resolvers. You must specify either '
'this flag or --master.')
flags.DEFINE_string(
'master', default=None,
help='gRPC URL of the master (i.e. grpc://ip.address.of.tpu:8470). You '
'must specify either this flag or --tpu_name.')
# Model specific flags
flags.DEFINE_string(
'data_dir', default=None,
help=('The directory where the ImageNet input data is stored. Please see'
' the README.md for the expected data format.'))
flags.DEFINE_string(
'model_dir', default=None,
help=('The directory where the model and training/evaluation summaries are'
' stored.'))
flags.DEFINE_string(
'resnet_depth', default="50",
help=('either the depth to use or [v2_50, ...]'))
flags.DEFINE_string(
'mode', default='train_and_eval',
help='One of {"train_and_eval", "train", "eval"}.')
flags.DEFINE_integer(
'train_steps', default=200000,
help=('The number of steps to use for training.'))
flags.DEFINE_integer(
'train_batch_size', default=1024, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=1024, help='Batch size for evaluation.')
flags.DEFINE_integer(
'steps_per_eval', default=5000,
help=('Controls how often evaluation is performed. Since evaluation is'
' fairly expensive, it is advised to evaluate as infrequently as'
' possible (i.e. up to --train_steps, which evaluates the model only'
' after finishing the entire training regime).'))
flags.DEFINE_bool(
'skip_host_call', default=False,
help=('Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.'))
flags.DEFINE_integer(
'iterations_per_loop', default=100,
help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'
' If the number of iterations in the loop would exceed the number of'
' train steps, the loop will exit before reaching'
' --iterations_per_loop. The larger this value is, the higher the'
' utilization on the TPU.'))
flags.DEFINE_integer(
'num_cores', default=8,
help=('Number of TPU cores. For a single TPU device, this is 8 because each'
' TPU has 4 chips each with 2 cores.'))
flags.DEFINE_string(
'data_format', default='channels_last',
help=('A flag to override the data format used in the model. The value'
' is either channels_first or channels_last. To run the model on'
' CPU or TPU, channels_last should be used. For GPU, channels_first'
' will improve performance.'))
# TODO(chrisying): remove this flag once --transpose_tpu_infeed flag is enabled
# by default for TPU
flags.DEFINE_bool(
'transpose_input', default=True,
help='Use TPU double transpose optimization')
flags.DEFINE_integer(
'clip_gradients', default=0,
help='if 0 dont clip')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_float(
'base_learning_rate',
default=0.1,
help=('base LR when batch size = 256.'))
# Dataset constants
LABEL_CLASSES = 1000
NUM_TRAIN_IMAGES = 1281167
NUM_EVAL_IMAGES = 50000
# Learning hyperparameters
MOMENTUM = 0.9
WEIGHT_DECAY = 1e-4
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch):
"""Handles linear scaling rule, gradual warmup, and LR decay.
The learning rate starts at 0, then it increases linearly per step.
After 5 epochs we reach the base learning rate (scaled to account
for batch size).
After 30, 60 and 80 epochs the learning rate is divided by 10.
After 90 epochs training stops and the LR is set to 0. This ensures
that we train for exactly 90 epochs for reproducibility.
Args:
current_epoch: `Tensor` for current epoch.
Returns:
A scaled `Tensor` for current learning rate.
"""
scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)
decay_rate = (scaled_lr * LR_SCHEDULE[0][0] *
current_epoch / LR_SCHEDULE[0][1])
for mult, start_epoch in LR_SCHEDULE:
decay_rate = tf.where(current_epoch < start_epoch,
decay_rate, scaled_lr * mult)
return decay_rate
def resnet_model_fn(features, labels, mode, params):
"""The model_fn for ResNet to be used with TPUEstimator.
Args:
features: `Tensor` of batched images.
labels: `Tensor` of labels for the data samples
mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL}`
params: `dict` of parameters passed to the model from the TPUEstimator,
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `TPUEstimatorSpec` for the model
"""
if isinstance(features, dict):
features = features['feature']
if FLAGS.data_format == 'channels_first':
assert not FLAGS.transpose_input # channels_first only for GPU
features = tf.transpose(features, [0, 3, 1, 2])
if FLAGS.transpose_input:
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
if FLAGS.use_tpu:
import bfloat16
scope_fn = lambda: bfloat16.bfloat16_scope()
else:
scope_fn = lambda: tf.variable_scope("")
with scope_fn():
resnet_size = int(FLAGS.resnet_depth.split("_")[-1])
if FLAGS.resnet_depth.startswith("v1_"):
print("\n\n\n\n\nUSING RESNET V1 {}\n\n\n\n\n".format(FLAGS.resnet_depth))
network = resnet_model.resnet_v1(
resnet_depth=int(resnet_size),
num_classes=LABEL_CLASSES,
attention=None,
apply_to="outputs",
use_tpu=FLAGS.use_tpu,
data_format=FLAGS.data_format)
elif FLAGS.resnet_depth.startswith("paper-v1_"):
print("\n\n\n\n\nUSING RESNET V1 (Paper) {}\n\n\n\n\n".format(resnet_size))
network = resnet_model.resnet_v1(
resnet_depth=int(resnet_size),
num_classes=LABEL_CLASSES,
attention="paper",
apply_to="outputs",
use_tpu=FLAGS.use_tpu,
data_format=FLAGS.data_format)
elif FLAGS.resnet_depth.startswith("fc-v1_"):
print("\n\n\n\n\nUSING RESNET V1 (fc) {}\n\n\n\n\n".format(resnet_size))
network = resnet_model.resnet_v1(
resnet_depth=int(resnet_size),
num_classes=LABEL_CLASSES,
attention="fc",
apply_to="outputs",
use_tpu=FLAGS.use_tpu,
data_format=FLAGS.data_format)
elif FLAGS.resnet_depth.startswith("v2_"):
print("\n\n\n\n\nUSING RESNET V2 {}\n\n\n\n\n".format(resnet_size))
network = resnet_v2_model.resnet_v2(
resnet_size=resnet_size,
num_classes=LABEL_CLASSES,
feature_attention=False,
extra_convs=0,
data_format=FLAGS.data_format,
use_tpu=FLAGS.use_tpu)
elif FLAGS.resnet_depth.startswith("paper-v2_"):
print("\n\n\n\n\nUSING RESNET V2 (Paper) {}\n\n\n\n\n".format(resnet_size))
network = resnet_v2_model.resnet_v2(
resnet_size=resnet_size,
num_classes=LABEL_CLASSES,
feature_attention="paper",
extra_convs=0,
apply_to="output",
data_format=FLAGS.data_format,
use_tpu=FLAGS.use_tpu)
elif FLAGS.resnet_depth.startswith("fc-v2_"):
print("\n\n\n\n\nUSING RESNET V2 (fc) {}\n\n\n\n\n".format(resnet_size))
network = resnet_v2_model.resnet_v2(
resnet_size=resnet_size,
num_classes=LABEL_CLASSES,
feature_attention="fc",
extra_convs=1,
data_format=FLAGS.data_format,
use_tpu=FLAGS.use_tpu)
else:
assert False
logits = network(
inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
logits = tf.cast(logits, tf.float32)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
# If necessary, in the model_fn, use params['batch_size'] instead the batch
# size flags (--train_batch_size or --eval_batch_size).
batch_size = params['batch_size'] # pylint: disable=unused-variable
# Calculate loss, which includes softmax cross entropy and L2 regularization.
one_hot_labels = tf.one_hot(labels, LABEL_CLASSES)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=one_hot_labels)
# Add weight decay to the loss for non-batch-normalization variables.
loss = cross_entropy + WEIGHT_DECAY * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
# with tf.device("/cpu:0"):
# loss = tf.Print(loss, [loss], "loss", summarize=20)
host_call = None
if mode == tf.estimator.ModeKeys.TRAIN:
# Compute the current epoch and associated learning rate from global_step.
global_step = tf.train.get_global_step()
batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
current_epoch = (tf.cast(global_step, tf.float32) /
batches_per_epoch)
learning_rate = learning_rate_schedule(current_epoch)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=MOMENTUM, use_nesterov=True)
if FLAGS.use_tpu:
# When using TPU, wrap the optimizer with CrossShardOptimizer which
# handles synchronization details between different TPU cores. To the
# user, this should look like regular synchronous training.
optimizer = tpu_optimizer.CrossShardOptimizer(optimizer)
# Batch normalization requires UPDATE_OPS to be added as a dependency to
# the train operation.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if FLAGS.clip_gradients == 0:
print("\nnot clipping gradients\n")
train_op = optimizer.minimize(loss, global_step)
else:
print("\nclipping gradients\n")
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.clip_gradients)
train_op = optimizer.apply_gradients(zip(gradients, variables),
global_step=global_step)
# gvs = optimizer.compute_gradients(loss)
# gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
# capped_gvs = [(tf.clip_by_value(grad, -10., 10.), var) for grad, var in gvs]
# train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('loss', loss[0], step=gs)
summary.scalar('learning_rate', lr[0], step=gs)
summary.scalar('current_epoch', ce[0], step=gs)
return summary.all_summary_ops()
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'top_1_accuracy': top_1_accuracy,
'top_5_accuracy': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
# logging_hook = tf.train.LoggingTensorHook(
# {"logging_hook_loss": loss}, every_n_iter=1)
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics,
# training_hooks=[logging_hook]
)
def main(unused_argv):
tpu_grpc_url = None
tpu_cluster_resolver = None
if FLAGS.use_tpu:
# Determine the gRPC URL of the TPU device to use
if not FLAGS.master and not FLAGS.tpu_name:
raise RuntimeError('You must specify either --master or --tpu_name.')
if FLAGS.master:
if FLAGS.tpu_name:
tf.logging.warn('Both --master and --tpu_name are set. Ignoring'
' --tpu_name and using --master.')
tpu_grpc_url = FLAGS.master
else:
tpu_cluster_resolver = (
tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project))
else:
# URL is unused if running locally without TPU
tpu_grpc_url = None
config = tpu_config.RunConfig(
master=tpu_grpc_url,
evaluation_master=tpu_grpc_url,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
cluster=tpu_cluster_resolver,
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=tpu_config.InputPipelineConfig.PER_HOST_V2)) # pylint: disable=line-too-long
resnet_classifier = tpu_estimator.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = imagenet_input.ImageNetInput(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat=FLAGS.use_tpu,
transpose_input=FLAGS.transpose_input)
imagenet_eval = imagenet_input.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat=FLAGS.use_tpu,
transpose_input=FLAGS.transpose_input)
if FLAGS.mode == 'eval':
eval_steps = NUM_EVAL_IMAGES // FLAGS.eval_batch_size
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(FLAGS.model_dir):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Eval results: %s. Elapsed seconds: %d' %
(eval_results, elapsed_time))
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.' % (FLAGS.train_steps,
FLAGS.train_steps / batches_per_epoch,
current_step))
start_timestamp = time.time() # This time will include compilation time
if FLAGS.mode == 'train':
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=int(FLAGS.train_steps))
else:
assert FLAGS.mode == 'train_and_eval'
while current_step < FLAGS.train_steps:
# Train for up to steps_per_eval number of steps.
# At the end of training, a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
FLAGS.train_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=int(next_checkpoint))
current_step = next_checkpoint
# Evaluate the model on the most recent model in --model_dir.
# Since evaluation happens in batches of --eval_batch_size, some images
# may be consistently excluded modulo the batch size.
tf.logging.info('Starting to evaluate.')
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=NUM_EVAL_IMAGES // FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.' %
(FLAGS.train_steps, elapsed_time))
if FLAGS.export_dir is not None:
# The guide to serve a exported TensorFlow model is at:
# https://www.tensorflow.org/serving/serving_basic
tf.logging.info('Starting to export model.')
resnet_classifier.export_savedmodel(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=imagenet_input.image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 38.063796
| 133
| 0.68386
|
f78e71706e17df4c3965a43f11e6a35197bbb753
| 12,036
|
py
|
Python
|
Project Files/Prebuilt tools/twitter/Twitter/pylib/requests/packages/urllib3/contrib/pyopenssl.py
|
nVoid/Yale-TouchDesigner-April2016
|
40eb36f515fa3935f3e9ddaa923664e88308262c
|
[
"MIT"
] | 24
|
2015-11-12T06:33:24.000Z
|
2019-04-16T11:11:13.000Z
|
requests/packages/urllib3/contrib/pyopenssl.py
|
lepture/requests
|
ac4e05874a1a983ca126185a0e4d4e74915f792e
|
[
"Apache-2.0"
] | 13
|
2020-10-28T16:02:09.000Z
|
2020-11-16T13:30:05.000Z
|
requests/packages/urllib3/contrib/pyopenssl.py
|
lepture/requests
|
ac4e05874a1a983ca126185a0e4d4e74915f792e
|
[
"Apache-2.0"
] | 9
|
2016-02-19T18:56:18.000Z
|
2019-01-13T16:50:05.000Z
|
'''SSL with SNI-support for Python 2.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
To activate it call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3`.
This can be done in a ``sitecustomize`` module, or at any other time before
your application begins using ``urllib3``, like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
'''
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from socket import _fileobject
import ssl
import select
from cStringIO import StringIO
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class fileobject(_fileobject):
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, connection, socket):
self.connection = connection
self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def sendall(self, data):
return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| 34.685879
| 79
| 0.54337
|
a942c97034ad9187f0ffe94c5d466d8ca7947089
| 16,112
|
py
|
Python
|
python/ccxt/async_support/__init__.py
|
asoedarren/ccxt
|
708a3eb700a003f904c9cab00a4727d959d43bb2
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/__init__.py
|
asoedarren/ccxt
|
708a3eb700a003f904c9cab00a4727d959d43bb2
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/__init__.py
|
asoedarren/ccxt
|
708a3eb700a003f904c9cab00a4727d959d43bb2
|
[
"MIT"
] | 1
|
2021-03-02T10:56:25.000Z
|
2021-03-02T10:56:25.000Z
|
# -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.42.48'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.async_support.aax import aax # noqa: F401
from ccxt.async_support.acx import acx # noqa: F401
from ccxt.async_support.aofex import aofex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitcoincom import bitcoincom # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bitget import bitget # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitkk import bitkk # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmax import bitmax # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitpanda import bitpanda # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitvavo import bitvavo # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.bleutrade import bleutrade # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bybit import bybit # noqa: F401
from ccxt.async_support.bytetrade import bytetrade # noqa: F401
from ccxt.async_support.cdax import cdax # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.chilebit import chilebit # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coingi import coingi # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.currencycom import currencycom # noqa: F401
from ccxt.async_support.delta import delta # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.dsx import dsx # noqa: F401
from ccxt.async_support.equos import equos # noqa: F401
from ccxt.async_support.eterbase import eterbase # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.fcoin import fcoin # noqa: F401
from ccxt.async_support.fcoinjp import fcoinjp # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.foxbit import foxbit # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.gopax import gopax # noqa: F401
from ccxt.async_support.hbtc import hbtc # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hollaex import hollaex # noqa: F401
from ccxt.async_support.huobijp import huobijp # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.ice3x import ice3x # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.lakebtc import lakebtc # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.ndax import ndax # noqa: F401
from ccxt.async_support.novadax import novadax # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoin import okcoin # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.phemex import phemex # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.probit import probit # noqa: F401
from ccxt.async_support.qtrade import qtrade # noqa: F401
from ccxt.async_support.rightbtc import rightbtc # noqa: F401
from ccxt.async_support.ripio import ripio # noqa: F401
from ccxt.async_support.southxchange import southxchange # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.surbitcoin import surbitcoin # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vaultoro import vaultoro # noqa: F401
from ccxt.async_support.vbtc import vbtc # noqa: F401
from ccxt.async_support.vcc import vcc # noqa: F401
from ccxt.async_support.wavesexchange import wavesexchange # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xbtce import xbtce # noqa: F401
from ccxt.async_support.xena import xena # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'aax',
'acx',
'aofex',
'bequant',
'bibox',
'bigone',
'binance',
'binanceus',
'bit2c',
'bitbank',
'bitbay',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitkk',
'bitmart',
'bitmax',
'bitmex',
'bitpanda',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'bytetrade',
'cdax',
'cex',
'chilebit',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'dsx',
'equos',
'eterbase',
'exmo',
'exx',
'fcoin',
'fcoinjp',
'flowbtc',
'foxbit',
'ftx',
'gateio',
'gemini',
'gopax',
'hbtc',
'hitbtc',
'hollaex',
'huobijp',
'huobipro',
'ice3x',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'lakebtc',
'latoken',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mixcoins',
'ndax',
'novadax',
'oceanex',
'okcoin',
'okex',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'rightbtc',
'ripio',
'southxchange',
'stex',
'surbitcoin',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vaultoro',
'vbtc',
'vcc',
'wavesexchange',
'whitebit',
'xbtce',
'xena',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 50.826498
| 86
| 0.555549
|
080bf0ec1405552a72a0c0b9f6f98a6f664e2d78
| 1,798
|
py
|
Python
|
examples/linear_model/plot_ransac.py
|
Giannos-G/scikit-learn_modified
|
03df71bbea1bcb3423262b711191552420422cda
|
[
"BSD-3-Clause"
] | 1
|
2022-03-03T23:54:50.000Z
|
2022-03-03T23:54:50.000Z
|
examples/linear_model/plot_ransac.py
|
Giannos-G/scikit-learn_modified
|
03df71bbea1bcb3423262b711191552420422cda
|
[
"BSD-3-Clause"
] | null | null | null |
examples/linear_model/plot_ransac.py
|
Giannos-G/scikit-learn_modified
|
03df71bbea1bcb3423262b711191552420422cda
|
[
"BSD-3-Clause"
] | null | null | null |
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
lr = linear_model.LinearRegression()
lr.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
ransac = linear_model.RANSACRegressor()
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(X.min(), X.max())[:, np.newaxis]
line_y = lr.predict(line_X)
line_y_ransac = ransac.predict(line_X)
# Compare estimated coefficients
print("Estimated coefficients (true, linear regression, RANSAC):")
print(coef, lr.coef_, ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linewidth=lw, label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linewidth=lw,
label='RANSAC regressor')
plt.legend(loc='lower right')
plt.xlabel("Input")
plt.ylabel("Response")
#plt.show()
| 29.966667
| 78
| 0.669633
|
98e24edb0ea0655574ccc71387a647e324d581c9
| 6,102
|
py
|
Python
|
src/prioritizer/utils/feature_util.py
|
timhannifan/prioritizer
|
892efb7dcd95a1b60e0a8b433f19522e2bf88cb4
|
[
"MIT"
] | null | null | null |
src/prioritizer/utils/feature_util.py
|
timhannifan/prioritizer
|
892efb7dcd95a1b60e0a8b433f19522e2bf88cb4
|
[
"MIT"
] | 1
|
2021-06-01T23:44:36.000Z
|
2021-06-01T23:44:36.000Z
|
src/prioritizer/utils/feature_util.py
|
timhannifan/prioritizer
|
892efb7dcd95a1b60e0a8b433f19522e2bf88cb4
|
[
"MIT"
] | null | null | null |
import datetime
import shutil
import sys
import random
from contextlib import contextmanager
import functools
import operator
import tempfile
import sqlalchemy
import pandas as pd
import numpy
from sqlalchemy.orm import sessionmaker
from prioritizer.components.schemas.schema import Model
from prioritizer.utils.structs import FeatureNameList
def str_in_sql(values):
return ",".join(map(lambda x: "'{}'".format(x), values))
def feature_list(feature_dictionary):
"""Convert a feature dictionary to a sorted list
Args: feature_dictionary (dict)
Returns: sorted list of feature names
"""
if not feature_dictionary:
return FeatureNameList()
return FeatureNameList(sorted(
functools.reduce(
operator.concat,
(feature_dictionary[key] for key in feature_dictionary.keys()),
)
))
def convert_string_column_to_date(column):
return [datetime.datetime.strptime(date, "%Y-%m-%d").date() for date in column]
def create_features_table(table_number, table, engine):
engine.execute(
"""
create table features.features{} (
entity_id int, as_of_date date, f{} int, f{} int
)
""".format(
table_number, (table_number * 2) + 1, (table_number * 2) + 2
)
)
for row in table:
engine.execute(
"""
insert into features.features{} values (%s, %s, %s, %s)
""".format(
table_number
),
row,
)
def create_entity_date_df(
labels,
states,
as_of_dates,
state_one,
state_two,
label_name,
label_type,
label_timespan,
):
""" This function makes a pandas DataFrame that mimics the entity-date table
for testing against.
"""
0, "2016-02-01", "1 month", "booking", "binary", 0
labels_table = pd.DataFrame(
labels,
columns=[
"entity_id",
"as_of_date",
"label_timespan",
"label_name",
"label_type",
"label",
],
)
states_table = pd.DataFrame(
states, columns=["entity_id", "as_of_date", "state_one", "state_two"]
).set_index(["entity_id", "as_of_date"])
as_of_dates = [date.date() for date in as_of_dates]
labels_table = labels_table[labels_table["label_name"] == label_name]
labels_table = labels_table[labels_table["label_type"] == label_type]
labels_table = labels_table[labels_table["label_timespan"] == label_timespan]
labels_table = labels_table.join(other=states_table, on=("entity_id", "as_of_date"))
labels_table = labels_table[labels_table["state_one"] & labels_table["state_two"]]
ids_dates = labels_table[["entity_id", "as_of_date"]]
ids_dates = ids_dates.sort_values(["entity_id", "as_of_date"])
ids_dates["as_of_date"] = [
datetime.datetime.strptime(date, "%Y-%m-%d").date()
for date in ids_dates["as_of_date"]
]
ids_dates = ids_dates[ids_dates["as_of_date"].isin(as_of_dates)]
print(ids_dates)
return ids_dates.reset_index(drop=True)
def NamedTempFile():
if sys.version_info >= (3, 0, 0):
return tempfile.NamedTemporaryFile(mode="w+", newline="")
else:
return tempfile.NamedTemporaryFile()
@contextmanager
def TemporaryDirectory():
name = tempfile.mkdtemp()
try:
yield name
finally:
shutil.rmtree(name)
def fake_labels(length):
return numpy.array([random.choice([True, False]) for i in range(0, length)])
class MockTrainedModel(object):
def predict_proba(self, dataset):
return numpy.random.rand(len(dataset), len(dataset))
def fake_trained_model(project_path, model_storage_engine, db_engine):
"""Creates and stores a trivial trained model
Args:
project_path (string) a desired fs/s3 project path
model_storage_engine (triage.storage.ModelStorageEngine)
db_engine (sqlalchemy.engine)
Returns:
(int) model id for database retrieval
"""
trained_model = MockTrainedModel()
model_storage_engine.write(trained_model, "abcd")
session = sessionmaker(db_engine)()
db_model = Model(model_hash="abcd")
session.add(db_model)
session.commit()
return trained_model, db_model.model_id
def assert_index(engine, table, column):
"""Assert that a table has an index on a given column
Does not care which position the column is in the index
Modified from https://www.gab.lc/articles/index_on_id_with_postgresql
Args:
engine (sqlalchemy.engine) a database engine
table (string) the name of a table
column (string) the name of a column
"""
query = """
SELECT 1
FROM pg_class t
JOIN pg_index ix ON t.oid = ix.indrelid
JOIN pg_class i ON i.oid = ix.indexrelid
JOIN pg_attribute a ON a.attrelid = t.oid
WHERE
a.attnum = ANY(ix.indkey) AND
t.relkind = 'r' AND
t.relname = '{table_name}' AND
a.attname = '{column_name}'
""".format(
table_name=table, column_name=column
)
num_results = len([row for row in engine.execute(query)])
assert num_results >= 1
def create_dense_state_table(db_engine, table_name, data):
db_engine.execute(
"""create table {} (
entity_id int,
state text,
start_time timestamp,
end_time timestamp
)""".format(
table_name
)
)
for row in data:
db_engine.execute(
"insert into {} values (%s, %s, %s, %s)".format(table_name), row
)
def create_binary_outcome_events(db_engine, table_name, events_data):
db_engine.execute(
"create table events (entity_id int, outcome_date date, outcome bool)"
)
for event in events_data:
db_engine.execute(
"insert into {} values (%s, %s, %s::bool)".format(table_name), event
)
def retry_if_db_error(exception):
return isinstance(exception, sqlalchemy.exc.OperationalError)
| 28.25
| 88
| 0.638479
|
688197ab0dd19e25517e5b2f5f1b0a6de3f636f7
| 5,060
|
py
|
Python
|
tensor2tensor/data_generators/all_problems.py
|
koz4k/tensor2tensor
|
859306bb577ec1119e6d9fbdea4ac0743a7fe927
|
[
"Apache-2.0"
] | 2
|
2020-03-02T13:49:11.000Z
|
2020-06-18T09:48:35.000Z
|
tensor2tensor/data_generators/all_problems.py
|
angelfish91/tensor2tensor
|
168928d29cdd887fd6cdd5d5ad35181bef614154
|
[
"Apache-2.0"
] | 1
|
2019-01-21T10:57:47.000Z
|
2019-01-21T10:57:47.000Z
|
tensor2tensor/data_generators/all_problems.py
|
angelfish91/tensor2tensor
|
168928d29cdd887fd6cdd5d5ad35181bef614154
|
[
"Apache-2.0"
] | 3
|
2019-02-10T11:12:30.000Z
|
2022-02-23T20:43:48.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports for problem modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import re
MODULES = [
"tensor2tensor.data_generators.algorithmic",
"tensor2tensor.data_generators.algorithmic_math",
"tensor2tensor.data_generators.allen_brain",
"tensor2tensor.data_generators.audio",
"tensor2tensor.data_generators.babi_qa",
"tensor2tensor.data_generators.bair_robot_pushing",
"tensor2tensor.data_generators.celeba",
"tensor2tensor.data_generators.celebahq",
"tensor2tensor.data_generators.cifar",
"tensor2tensor.data_generators.cipher",
"tensor2tensor.data_generators.cnn_dailymail",
"tensor2tensor.data_generators.cola",
"tensor2tensor.data_generators.common_voice",
"tensor2tensor.data_generators.desc2code",
"tensor2tensor.data_generators.fsns",
"tensor2tensor.data_generators.function_docstring",
"tensor2tensor.data_generators.gene_expression",
"tensor2tensor.data_generators.google_robot_pushing",
"tensor2tensor.data_generators.gym_env",
"tensor2tensor.data_generators.ice_parsing",
"tensor2tensor.data_generators.imagenet",
"tensor2tensor.data_generators.image_lsun",
"tensor2tensor.data_generators.imdb",
"tensor2tensor.data_generators.lambada",
"tensor2tensor.data_generators.librispeech",
"tensor2tensor.data_generators.lm1b",
"tensor2tensor.data_generators.lm1b_imdb",
"tensor2tensor.data_generators.lm1b_mnli",
"tensor2tensor.data_generators.mathematical_language_understanding",
"tensor2tensor.data_generators.mnist",
"tensor2tensor.data_generators.mrpc",
"tensor2tensor.data_generators.mscoco",
"tensor2tensor.data_generators.multinli",
"tensor2tensor.data_generators.paraphrase_ms_coco",
"tensor2tensor.data_generators.program_search",
"tensor2tensor.data_generators.ocr",
"tensor2tensor.data_generators.pointer_generator_word",
"tensor2tensor.data_generators.problem_hparams",
"tensor2tensor.data_generators.ptb",
"tensor2tensor.data_generators.qnli",
"tensor2tensor.data_generators.quora_qpairs",
"tensor2tensor.data_generators.rte",
"tensor2tensor.data_generators.scitail",
"tensor2tensor.data_generators.snli",
"tensor2tensor.data_generators.stanford_nli",
"tensor2tensor.data_generators.style_transfer",
"tensor2tensor.data_generators.squad",
"tensor2tensor.data_generators.sst_binary",
"tensor2tensor.data_generators.subject_verb_agreement",
"tensor2tensor.data_generators.timeseries",
"tensor2tensor.data_generators.translate_encs",
"tensor2tensor.data_generators.translate_ende",
"tensor2tensor.data_generators.translate_enet",
"tensor2tensor.data_generators.translate_enfr",
"tensor2tensor.data_generators.translate_enid",
"tensor2tensor.data_generators.translate_enmk",
"tensor2tensor.data_generators.translate_envi",
"tensor2tensor.data_generators.translate_enzh",
"tensor2tensor.data_generators.video_generated",
"tensor2tensor.data_generators.vqa",
"tensor2tensor.data_generators.wiki",
"tensor2tensor.data_generators.wiki_lm",
"tensor2tensor.data_generators.wiki_revision",
"tensor2tensor.data_generators.wiki_multi_problems",
"tensor2tensor.data_generators.wikisum.wikisum",
"tensor2tensor.data_generators.wikitext103",
"tensor2tensor.data_generators.wsj_parsing",
"tensor2tensor.data_generators.wnli",
]
ALL_MODULES = list(MODULES)
def _is_import_err_msg(err_str, module):
module_pattern = "(.)?".join(["(%s)?" % m for m in module.split(".")])
return re.match("^No module named (')?%s(')?$" % module_pattern, err_str)
def _handle_errors(errors):
"""Log out and possibly reraise errors during import."""
if not errors:
return
log_all = True # pylint: disable=unused-variable
err_msg = "Skipped importing {num_missing} data_generators modules."
print(err_msg.format(num_missing=len(errors)))
for module, err in errors:
err_str = str(err)
if not _is_import_err_msg(err_str, module):
print("From module %s" % module)
raise err
if log_all:
print("Did not import module: %s; Cause: %s" % (module, err_str))
def import_modules(modules):
errors = []
for module in modules:
try:
importlib.import_module(module)
except ImportError as error:
errors.append((module, error))
_handle_errors(errors)
| 39.84252
| 75
| 0.767984
|
996b44a5b748e5d482b9ccf5b1261ad9d0b24e32
| 409
|
py
|
Python
|
backend/enrollment_33200/wsgi.py
|
crowdbotics-apps/enrollment-33200
|
2d925c818afecf0b4a368a2130b9c63783625757
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/enrollment_33200/wsgi.py
|
crowdbotics-apps/enrollment-33200
|
2d925c818afecf0b4a368a2130b9c63783625757
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/enrollment_33200/wsgi.py
|
crowdbotics-apps/enrollment-33200
|
2d925c818afecf0b4a368a2130b9c63783625757
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for enrollment_33200 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'enrollment_33200.settings')
application = get_wsgi_application()
| 24.058824
| 78
| 0.794621
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.