seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
32102291979
|
class Solution:
# 1. 暴力
def longestPalindrome1(self, s: str) -> str:
if len(s) <= 1:
return s
count = 0
res = ""
for i in range(len(s)):
for j in range(i + 1, len(s)):
if s[i:j + 1] == s[i:j + 1][::-1]:
if j - i > count:
res = s[i:j + 1]
count = j - i
if res == "":
res = s[0]
return res
# 2. 中心扩展
def longestPalindrome2(self, s: str) -> str:
str_len = len(s)
if str_len <= 1:
return s
start = 0
end = 0
for i in range(str_len):
len1 = self.expandAroundCenter(s, i, i)
len2 = self.expandAroundCenter(s, i, i + 1)
len3 = max(len1, len2)
if len3 > end - start:
start = i - int((len3 - 1) / 2)
end = i + int(len3 / 2) + 1
return s[start: end]
def expandAroundCenter(self, s, left, right):
while left >= 0 and right < len(s) and s[left] == s[right]:
left -= 1
right += 1
return right - left - 1
def longestPalindrome3(self, s: str) -> str:
str_len = len(s)
if str_len <= 1:
return s
left = 0
right = 0
dp = [[False] * str_len for _ in range(str_len)]
for i in range(str_len):
dp[i][i] = True
for j in range(i + 1, str_len):
dp[i][j] = s[i] == s[j] and (j-i < 3 or dp[i+1][j-1])
if dp[i][j] and (j - i > right - left):
left = i
right = j
return s[left:right+1]
s = Solution()
print(s.longestPalindrome3("bb"))
|
Eleanoryuyuyu/LeetCode
|
python/Dynamic Programming/5. Longest Palindromic Substring.py
|
5. Longest Palindromic Substring.py
|
py
| 1,759
|
python
|
en
|
code
| 3
|
github-code
|
6
|
19399859119
|
# 目标和
# https://leetcode-cn.com/leetbook/read/queue-stack/ga4o2/
from typing import List
import common.arrayCommon as Array
class Solution:
def findTargetSumWays(self, nums: List[int], S: int) -> int:
print(nums)
s = sum(nums)
if s < S:
return 0
n = len(nums)
r = s * 2 + 1
dp = [[0] * r for _ in range(n)]
if nums[0] == 0:
dp[0][s] = 2
else:
dp[0][s - nums[0]] = 1
dp[0][s + nums[0]] = 1
for i in range(1, n):
for j in range(0, r):
if j - nums[i] >= 0:
dp[i][j] += dp[i - 1][j - nums[i]]
if j + nums[i] < r:
dp[i][j] += dp[i - 1][j + nums[i]]
print(dp)
return dp[-1][s + S]
nums = [0, 0, 0, 0, 0, 0, 0, 0, 1]
S = 1
r = Solution().findTargetSumWays(nums, S)
print(r)
# class Solution:
# multiplier = [1, -1]
# ans = 0
#
# def findTargetSumWays(self, nums: List[int], S: int) -> int:
# self.ans = 0
# self.search(S, nums, 0, 0)
# return self.ans
#
# def search(self, target, nums, i, cur):
# if i == len(nums):
# if cur == target:
# self.ans += 1
# return
# for each in self.multiplier:
# c = cur + each * nums[i]
# self.search(target, nums, i + 1, c)
|
Yigang0622/LeetCode
|
findTargetSumWays.py
|
findTargetSumWays.py
|
py
| 1,411
|
python
|
en
|
code
| 1
|
github-code
|
6
|
6176179693
|
import os, time, sys
import matplotlib.pyplot as plt
import itertools
import pickle
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import torchvision.utils as vutils
from torchvision.utils import save_image
EPOCH=20
LR_G=0.0002
LR_D=0.0002
IMG_SIZE=28
LATENT_DIM=100
BATCHSIZE=32
DATA_ROOT='./mnist'
DOWNLOAD=False
dataloader=data.DataLoader(
datasets.MNIST(
root=DATA_ROOT,
train=True,
transform=transforms.ToTensor(),
download=DOWNLOAD
),
batch_size=BATCHSIZE,
shuffle=True
)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.layer1=nn.Sequential(
nn.Linear(LATENT_DIM,128),
nn.LeakyReLU(0.2)
)
self.layer2=nn.Sequential(
nn.Linear(128, 256),
nn.LeakyReLU(0.2)
)
self.layer3 = nn.Sequential(
nn.Linear(256, 512),
nn.LeakyReLU(0.2)
)
self.layer4 = nn.Sequential(
nn.Linear(512, IMG_SIZE*IMG_SIZE),
nn.Tanh()
)
def forward(self,x):
x = x.view(x.size(0),-1)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0),1,IMG_SIZE,IMG_SIZE)
return x
class Discrimator(nn.Module):
def __init__(self):
super(Discrimator, self).__init__()
self.layer1=nn.Sequential(
nn.Linear(IMG_SIZE*IMG_SIZE,512),
nn.LeakyReLU(0.2)
)
self.layer2 = nn.Sequential(
nn.Linear(512, 256),
nn.LeakyReLU(0.2)
)
self.layer3 = nn.Sequential(
nn.Linear(256, 128),
nn.LeakyReLU(0.2)
)
self.layer4 = nn.Sequential(
nn.Linear(128, 1),
nn.Sigmoid()
)
def forward(self,x):
x=x.view(-1,IMG_SIZE*IMG_SIZE)
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
return x
G=Generator()
D=Discrimator()
real_label=1
fake_label=0
optimizer_G=optim.Adam(G.parameters(),lr=LR_G)
optimizer_D=optim.Adam(D.parameters(),lr=LR_D)
loss_func=nn.BCELoss()
loss_G=[]
loss_D=[]
iters=0
for epoch in range(EPOCH):
for step,(x,y)in enumerate(dataloader):
optimizer_D.zero_grad()
label=torch.full((BATCHSIZE,),real_label,dtype=torch.float)
# print(x.size())
output=D(x).view(-1)
# print(output.size())
# print(label.size())
loss_real=loss_func(output,label)
loss_real.backward()
noise=torch.randn(BATCHSIZE,LATENT_DIM)
label=torch.full((BATCHSIZE,),fake_label,dtype=torch.float)
input=G(noise)
output=D(input.detach()).view(-1)
loss_fake=loss_func(output,label)
loss_fake.backward()
loss_d=loss_real+loss_fake
optimizer_D.step()
optimizer_G.zero_grad()
label.fill_(real_label)
output=D(input).view(-1)
loss_g=loss_func(output,label)
loss_g.backward()
optimizer_G.step()
if step % 300 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f'
% (epoch + 1, EPOCH, step, len(dataloader),
loss_d.item(), loss_g.item()))
# Save Losses for plotting later
loss_G.append(loss_g.item())
loss_D.append(loss_d.item())
if (iters % 500 == 0) or ((epoch == EPOCH - 1) and (step == len(dataloader) - 1)):
with torch.no_grad():
test_z = torch.randn(BATCHSIZE, LATENT_DIM)
generated = G(test_z)
save_image(generated.view(generated.size(0), 1, 28, 28), './img4/img_' + (str)(iters) + '.png')
iters += 1
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(loss_G, label="G")
plt.plot(loss_D, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
|
hqyu/DL_GAN
|
GAN.py
|
GAN.py
|
py
| 4,169
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33805613545
|
from typing import Optional, Dict
from fastapi import WebSocket, APIRouter, Cookie, status, Depends
from ws_chat_py.engines.person_engine import PersonEngine
ws_router = APIRouter()
@ws_router.websocket("/ws")
async def ws_chat_handler(websocket: WebSocket):
await websocket.accept()
authorized = check_chat_auth(websocket.cookies)
if not authorized:
await websocket.close()
return
new_person = PersonEngine.create_person(token=authorized, name='name')
while True:
try:
txt = await websocket.receive_json()
resp = {'msg': txt}
await websocket.send_json(resp)
except Exception as e:
await websocket.close()
break
def check_chat_auth(cookies: Dict[str, str]) -> Optional[str]:
chat_auth_token = cookies.get('chat_auth_token')
if not chat_auth_token:
return None
return chat_auth_token
|
backcrawler/ws_chat_py
|
ws_chat_py/handlers/ws_handlers.py
|
ws_handlers.py
|
py
| 925
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39399750947
|
import logging
import os
import argparse
import json
from itertools import chain
from typing import Dict, List, Tuple, Any
from functools import partial
import s3fs
from hydra import compose, initialize, core
from omegaconf import OmegaConf
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Nopep8
import tensorflow as tf
from base_trainer import BaseTrainer
# ------------------------------- Trainer class ------------------------------ #
class BaselineTrainer(BaseTrainer):
"""
This class is used to train an image classification model.
"""
def __init__(self,
hyperparameters: Dict[str, Any],
config: Dict[str, Any],
job_name: str,
train_dataset: tf.data.Dataset,
val_dataset: tf.data.Dataset,
train_class_weights: Dict[str, float],
distributed: bool,
strategy: tf.distribute.Strategy,
model_dir: str,
logger: logging.Logger) -> None:
"""
Constructor for the BaselineTrainer class.
Parameters
----------
hyperparameters : Dict[str, Any]
A dictionary containing the hyperparameters for model training.
config : Dict[str, Any]
A dictionary containing the configuration for model training.
job_name : str
The name of the job.
train_dataset : tf.data.Dataset
A tf.data.Dataset object that contains the training data.
val_dataset : tf.data.Dataset
The validation data is recommend to be a repeated dataset.
train_class_weights : Dict[str, float]
Class weights for the training data.
distributed : bool
A boolean that specifies whether to use distributed training.
strategy : tf.distribute.Strategy
A tf.distribute.Strategy object that specifies the strategy for distributed training.
model_dir : str
Path to the directory where the model will be saved.
logger : logging.Logger
A logger object.
Returns
-------
None
"""
super().__init__(
hyperparameters=hyperparameters,
config=config,
job_name=job_name,
train_dataset=train_dataset,
val_dataset=val_dataset,
train_class_weights=train_class_weights,
distributed=distributed,
strategy=strategy,
model_dir=model_dir,
logger=logger
)
def _create_model(self) -> tf.keras.Model:
"""
Function that creates the compiled model.
Returns
-------
tf.keras.Model
The compiled model.
"""
# Default convolutional layer
DefaultConv2D = partial(
tf.keras.layers.Conv2D,
kernel_size=self.hyperparameters['conv2d_kernel_size'],
padding='same',
activation='linear',
use_bias=False, # Not needed if batch normalization is used
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2()
)
# Default dense layer
DefaultDense = partial(
tf.keras.layers.Dense,
activation='linear',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2()
)
# ---------------------------- Model architecture ---------------------------- #
# Data augmentation
data_augmentation = AugmentationModel(aug_params={
'RandomRotation': {'factor': 0.5},
'RandomContrast': {'factor': 0.3}
}).build_augmented_model()
inputs = tf.keras.Input(shape=(self.config['image_size'], self.config['image_size'], self.config['num_channels']), name='input_layer')
x = data_augmentation(inputs)
x = tf.keras.layers.Rescaling(scale=1.0/255.0, name='rescaling_layer')(x)
for i in range(5):
x = DefaultConv2D(filters=self.hyperparameters[f'conv2d_num_filters_block_{i}'], name=f'conv2d_{i}')(x)
x = tf.keras.layers.BatchNormalization(name=f'conv2d_batch_norm_{i}')(x)
x = tf.keras.layers.Activation('relu', name=f'conv2d_relu_{i}')(x)
x = tf.keras.layers.MaxPooling2D(pool_size=self.hyperparameters['conv2d_pooling_size'], name=f'conv2d_pooling_{i}')(x)
x = tf.keras.layers.Flatten(name='flatten_layer')(x)
for i in range(3):
x = DefaultDense(units=self.hyperparameters[f'dense_num_units_{i}'], name=f'dense_{i}')(x)
x = tf.keras.layers.BatchNormalization(name=f'dense_batch_norm_{i}')(x)
# Dropout before activation is the same as after for 'RELU' based on https://sebastianraschka.com/faq/docs/dropout-activation.html
x = tf.keras.layers.Dropout(rate=self.hyperparameters['dense_dropout_rate'], name=f'dense_drop_out_{i}')(x)
x = tf.keras.layers.Activation('relu', name=f'dense_relu_{i}')(x)
outputs = tf.keras.layers.Dense(units=self.config['num_classes'], activation='linear', name='output_layer')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
# ---------------------------------- Compile --------------------------------- #
optimizer = self._create_optimizer(learning_rate=self.hyperparameters['opt_learning_rate'])
loss_fn = self._create_loss_fn()
metrics = self._create_metrics()
model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=metrics
)
return model
def fit(self) -> None:
"""
Function that fits the models.
Returns
-------
None
"""
# ------------------------------- Create model ------------------------------- #
if self.distributed:
with self.strategy.scope():
model = self._create_model()
else:
model = self._create_model()
# --------------------------------- Callbacks -------------------------------- #
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=3,
mode='min',
restore_best_weights=True
)
back_and_restore = tf.keras.callbacks.BackupAndRestore(
backup_dir=os.path.join(os.getcwd(), 'backup')
)
callbacks = [early_stopping, back_and_restore]
if self.distributed:
tensorboard = tf.keras.callbacks.TensorBoard(
log_dir=f's3://{self.config["s3_bucket"]}/{self.config["s3_key"]}/tensorboard_logs/{self.job_name}'
)
callbacks.append(tensorboard)
# ------------------------------------ Fit ----------------------------------- #
model.fit(
x=self.train_dataset,
epochs=self.hyperparameters['fit_epochs'],
validation_data=self.val_dataset,
callbacks=callbacks,
# Number of steps (batches of samples) to draw from before stopping validation
validation_steps=self.hyperparameters['fit_validation_steps'],
class_weight=self.train_class_weights
)
logger.info(f'Best validation loss: {early_stopping.best}')
# ---------------------------------- Save model --------------------------------- #
if self.distributed:
model_dir = self._create_model_dir(
self.model_dir,
self.strategy.cluster_resolver.task_type,
self.strategy.cluster_resolver.task_id
)
model.save(os.path.join(model_dir, '00000000'))
else:
model.save(os.path.join(self.model_dir, '00000000'))
return None
if __name__ == '__main__':
from custom_utils import get_logger, parser, add_additional_args, AugmentationModel, load_datasets
# ---------------------------------- Set up ---------------------------------- #
logger = get_logger(name='baseline_training')
# Hyra
core.global_hydra.GlobalHydra.instance().clear()
initialize(version_base='1.2', config_path='config', job_name='baseline_training')
config = OmegaConf.to_container(compose(config_name='main'), resolve=True)
# Parser hyperparameters specified by the SageMaker
filters = {f'conv2d_num_filters_block_{i}': int for i in range(0, 5)}
dense_layer_units = {f'dense_num_units_{i}': int for i in range(0, 3)}
loss_hyperparams = {'loss_alpha': float, 'loss_gamma': float}
other_hyperparams = {
'conv2d_pooling_size': int,
'conv2d_kernel_size': int,
'dense_dropout_rate': float,
'opt_learning_rate': float,
'opt_adam_beta_1': float,
'opt_adam_beta_2': float,
'opt_clipnorm': float,
'fit_epochs': int,
'use_focal_loss': int
}
additional_args = dict(chain(
filters.items(),
dense_layer_units.items(),
loss_hyperparams.items(),
other_hyperparams.items()
))
args = add_additional_args(parser_func=parser, additional_args=additional_args)()
job_name = args.training_env['job_name']
# --------------------------------- Load data -------------------------------- #
if args.test_mode:
distributed = False
strategy = None
else:
distributed = True
strategy = tf.distribute.MultiWorkerMirroredStrategy()
if not distributed:
# Sample three batches from the training dataset
train_dataset = load_datasets(
dir=args.train,
batch_size=config['batch_size'],
val=False
).take(3)
# Sample three batches from the validation dataset
val_dataset = load_datasets(
dir=args.val,
batch_size=config['batch_size'],
val=True
).take(3)
else:
tf_config = json.loads(os.environ['TF_CONFIG'])
num_workers = len(tf_config['cluster']['worker'])
global_batch_size = config['batch_size'] * num_workers
# Load the training dataset
train_dataset = load_datasets(
dir=args.train,
batch_size=global_batch_size,
val=False
)
# Load the validation dataset
val_dataset = load_datasets(
dir=args.val,
batch_size=global_batch_size,
val=True
)
# Load training set weights
fs = s3fs.S3FileSystem()
with fs.open(f's3://{config["s3_bucket"]}/{config["s3_key"]}/input-data/train_weights.json', 'rb') as f:
train_class_weights = json.load(f)
# Convert all keys to integers
train_class_weights = {int(k): v for k, v in train_class_weights.items()}
# --------------------------------- Train model --------------------------------- #
trainer = BaselineTrainer(
hyperparameters={
'conv2d_num_filters_block_0': args.conv2d_num_filters_block_0,
'conv2d_num_filters_block_1': args.conv2d_num_filters_block_1,
'conv2d_num_filters_block_2': args.conv2d_num_filters_block_2,
'conv2d_num_filters_block_3': args.conv2d_num_filters_block_3,
'conv2d_num_filters_block_4': args.conv2d_num_filters_block_4,
'conv2d_pooling_size': args.conv2d_pooling_size,
'conv2d_kernel_size': args.conv2d_kernel_size,
'dense_num_units_0': args.dense_num_units_0,
'dense_num_units_1': args.dense_num_units_1,
'dense_num_units_2': args.dense_num_units_2,
'dense_dropout_rate': args.dense_dropout_rate,
'opt_learning_rate': args.opt_learning_rate,
'opt_adam_beta_1': args.opt_adam_beta_1,
'opt_adam_beta_2': args.opt_adam_beta_2,
'opt_clipnorm': args.opt_clipnorm,
'loss_alpha': args.loss_alpha,
'loss_gamma': args.loss_gamma,
'fit_epochs': args.fit_epochs,
'fit_validation_steps': 1 if args.test_mode else int(config['val_size'] / config['batch_size']),
'use_focal_loss': args.use_focal_loss
},
config=config,
job_name=job_name,
train_dataset=train_dataset,
val_dataset=val_dataset,
train_class_weights=train_class_weights,
distributed=distributed,
strategy=strategy,
model_dir=args.model_dir,
logger=logger
)
trainer.fit()
del trainer
|
YangWu1227/python-for-machine-learning
|
neural_network/projects/cnn_insect_classification_sagemaker/src/baseline_entry.py
|
baseline_entry.py
|
py
| 12,600
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39172128523
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import json, make_response
from flask_restplus import Api, Resource
from http import HTTPStatus
from .v1.api import api as api_v1
from .v2.api import api as api_v2
from .health import api as api_health
api = Api()
api.add_namespace(api_v1)
api.add_namespace(api_v2)
api.add_namespace(api_health)
@api.route('/swagger')
class Swagger(Resource):
@api.doc(
id='Get swagger JSON',
responses={200: 'OK'},
description='''
Retrieve the swagger JSON object
'''
)
def get(self):
r = json.dumps(api.__schema__, indent=2)
r = make_response( r, HTTPStatus.OK )
r.headers['Content-Type'] = 'application/json'
return r
@api.route('/postman')
class Swagger(Resource):
@api.doc(
id='Get Postman representation',
responses={200: 'OK'},
description='''
Retrieve the Postman JSON object
'''
)
def get(self):
data = api.as_postman(urlvars=True, swagger=True)
r = json.dumps(data, indent=2)
r = make_response( r, HTTPStatus.OK )
r.headers['Content-Type'] = 'application/json'
return r
|
shalomb/terrestrial
|
apis/__init__.py
|
__init__.py
|
py
| 1,122
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71923388669
|
#!/usr/bin/env python
import os, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-u", required=True, dest="usuario", help="Usuario de PostgreSQL")
parser.add_argument("-H", default="localhost", dest="host", help="IP del equipo remoto")
parser.add_argument("-p", default="5432", dest="puerto", help="Puerto del equipo remoto")
parser.add_argument("-db", required=True, dest="database", help="Nombre de la base de datos")
args = parser.parse_args()
if args.usuario: usuario = "-U "+args.usuario
if args.host: host = "-h "+args.host
if args.puerto: puerto = "-p "+args.puerto
if args.database: database = args.database
os.system("pg_dump "+host+" "+usuario+" "+puerto+" "+database+" | gzip > "+database+"_$(date +%Y-%m-%d).sql.gz")
|
francisjgarcia/ASGBD-2018-19
|
scripts/python/backup.py
|
backup.py
|
py
| 761
|
python
|
es
|
code
| 0
|
github-code
|
6
|
15790344554
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 23:15:17 2020
@author: malrawi
"""
import dicttoxml
import ruamel.yaml # https://pypi.org/project/ruamel.yaml/
import json
def read_yaml_as_dict(fname):
"""
A function used to read the ddoif dictionary in yaml format and return it as a python dictionary.
This functiona makes use of ruamel.yaml https://pypi.org/project/ruamel.yaml/
...
Input arguments
----------
- in_f: the name of yaml file
Output arguments:
- ddoif_dict: All attributes the ddoif dictionary
Methods
-------
ddoif_dict = ddoif_read(in_f='ATest.ddof', check_CRC=True)
"""
with open(fname) as fp:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
# ddoif_dict = yaml.load(fp, Loader=yaml.FullLoader)
# ddoif_dict = yaml.safe_load(fp)
yaml = ruamel.yaml.YAML(typ='safe') # this is claimed to be the safest way to load yaml https://stackoverflow.com/questions/50846431/converting-a-yaml-file-to-python-json-object
ddoif_dict = yaml.load(fp)
return ddoif_dict
def yaml_to_xml(yaml_fname, xml_fname, ids=False):
"""
A function used to read the ddoif dictionary in yaml format and save it into an xml file.
This function makes use of dicttoxml package.
...
Input arguments
----------
- yaml_fname: the name of yaml file as input
- xml_fname: the name of output xml file
Output arguments:
Methods
-------
yaml_to_xml('ddoif_dictionary.yaml', 'ddoif_dictionary.xml')
"""
yaml_dict = read_yaml_as_dict(yaml_fname)
xml_obj = dicttoxml.dicttoxml(yaml_dict, custom_root='ddoif', attr_type=False, ids=ids)
print('Converting to xml using dicttoxml version -- ', dicttoxml.__version__)
# print_xml(xml_obj)
with open(xml_fname, "wb") as fp:
fp.write(xml_obj)
def yaml_to_json(yaml_fname, json_fname):
"""
A function used to read the ddoif dictionary in yaml format and save it into a json file.
This functiona makes use of json package.
...
Input arguments:
----------
- yaml_fname: the name of yaml file as input
- json_fname: the name of ouput json file
Output arguments:
Methods:
-------
yaml_to_xml('ddoif_dictionary.yaml', 'ddoif_dictionary.xml')
"""
yaml_dict = read_yaml_as_dict(yaml_fname)
with open(json_fname, 'w') as fp:
json.dump(yaml_dict, fp, indent=True, )
def print_xml(xml_obj): # xml_obj = dicttoxml.dicttoxml(yaml_dict, custom_root='ddoif', attr_type=False)
from xml.dom.minidom import parseString
dom = parseString(xml_obj)
print(dom.toprettyxml())
|
morawi/ddoif
|
ddoif_utils.py
|
ddoif_utils.py
|
py
| 3,011
|
python
|
en
|
code
| 3
|
github-code
|
6
|
70485999229
|
# Create a program which you will enter the amount of money you have, it will also ask for the price of an apple.
# Display the maximum number of apples that you can buy and the remaining money that you will have.
def getInput(dataIn):
"""takes an input and returns a value depending on the parameter requested
arg: dataIn - a string that determines the output value
return: a value from the userInput
"""
while 1:
if dataIn == "money":
bill = int(input("How much money do you have?: "))
else:
bill = int(input("How much is an apple?: "))
if bill < 0:
print("You cannot input negative number.")
else:
break
return bill
print("program 3")
money = getInput("money")
appleCost = getInput("appleCost")
while 1:
if money >= appleCost:
print(f"you can buy {int(money / appleCost)} apple(s) and your change is ₱{money%appleCost}")
break
else:
print(f"you cannot afford an apple. You are missing P{appleCost - money}")
money = getInput("money")
exit()
|
MagnoClarence/Assignment3
|
Program3.py
|
Program3.py
|
py
| 1,119
|
python
|
en
|
code
| 1
|
github-code
|
6
|
29778121292
|
import requests, os, json
from bs4 import BeautifulSoup as BS
from random import choice
_BASE_PATH = os.path.dirname(__file__)
_DATA_FILE = os.path.join(_BASE_PATH,'data.txt')
_ACTORS_FILE = os.path.join(_BASE_PATH,'actors.txt')
_DIRECTORS_FILE = os.path.join(_BASE_PATH,'directors.txt')
_YEARS_FILE = os.path.join(_BASE_PATH,'years.txt')
_IMDB_LIST = r"http://www.imdb.com/chart/top?ref_=nv_mv_250_6"
_IMDB_BASE_URL = r"http://www.imdb.com"
_movies = []
_actorsPool = []
_directorsPool = []
_yearsPool = []
def _getActorPool():
actors = set()
for m in _movies:
for a in _getActorsForMovie(m['link']):
actors.add(a)
if len(actors) >= 100:
break
return list(actors)
def _getDirectorPool():
directors = set()
for m in _movies:
directors.add(_getDirectorForMovie(m['link']))
if len(directors) >= 100:
break
return list(directors)
def _getYearPool():
years = set()
for m in _movies:
years.add(m['year'])
return list(years)
def _init():
global _movies
global _actorsPool
global _directorsPool
global _yearsPool
try:
file = open(_DATA_FILE, encoding='ISO-8859-1')
_movies = eval(file.read())
file.close()
except FileNotFoundError:
file = open(_DATA_FILE, 'w', encoding='ISO-8859-1')
_movies = _getMovies()
file.write(str(_movies))
file.close()
try:
file = open(_ACTORS_FILE, encoding='ISO-8859-1')
_actorsPool = list(eval(file.read()))
except FileNotFoundError:
file = open(_ACTORS_FILE, 'w', encoding='ISO-8859-1')
_actorsPool = _getActorPool()
file.write(str(_actorsPool))
file.close()
try:
file = open(_DIRECTORS_FILE, encoding='ISO-8859-1')
_directorsPool = list(eval(file.read()))
except FileNotFoundError:
file = open(_DIRECTORS_FILE, 'w', encoding='ISO-8859-1')
_directorsPool = _getDirectorPool()
file.write(str(_directorsPool))
file.close()
try:
file = open(_YEARS_FILE, encoding='ISO-8859-1')
_yearsPool = list(eval(file.read()))
except FileNotFoundError:
file = open(_YEARS_FILE, 'w', encoding='ISO-8859-1')
_yearsPool = _getYearPool()
file.write(str(_yearsPool))
file.close()
def _getMovies():
response = requests.get(_IMDB_LIST, headers = {'accept-language': 'en-US, en'})
text = response.text
soup = BS(text, 'html.parser')
movieList = []
for line in soup.findAll('td', {"class": "titleColumn"}):
m = dict()
m['link'] = _IMDB_BASE_URL + line.find('a').attrs['href']
m['title'] = line.find('a').text
m['year'] = (line.find('span').text)[1:-1]
m['actors'],m['director'] = _getActorsAndDirector(m['link'])
movieList.append(m)
return movieList
def _getActorsForMovie(link):
text = requests.get(link).text
soup = BS(text, 'html.parser')
actors = []
for line in soup.findAll('span', {"itemprop": "actors"}):
name = line.find('span',{"itemprop":"name"}).text
actors.append(name)
return actors
def _getDirectorForMovie(link):
text = requests.get(link).text
soup = BS(text, 'html.parser')
return (soup.find('span', {"itemprop": "director"})).find('span', {"itemprop":"name"}).text
def _getActorsAndDirector(link):
text = requests.get(link).text
soup = BS(text, 'html.parser')
actors = []
for line in soup.findAll('span', {"itemprop": "actors"}):
name = line.find('span',{"itemprop":"name"}).text
actors.append(name)
director = (soup.find('span', {"itemprop": "director"})).find('span', {"itemprop":"name"}).text
return actors,director
def getRandomQuestion(nrOfChoices=4):
if not (_movies and _actorsPool and _directorsPool and _yearsPool):
_init() # Making sure everything is set up, none of these
# variables should be empty
# Get random movie
movie = choice(_movies)
group = {'a': ['actors', _actorsPool, 'Who of the following starred in {0}?'],
'd': ['director', _directorsPool, 'Who was the director of {0}?'],
'y': ['year', _yearsPool, 'When was the movie {0} premeried?']}
choices = []
questionType = choice(['a','d','y']) # a for actor, d for director, y for year
if questionType == 'a':
correctAnswer = choice(movie[group[questionType][0]])
else:
correctAnswer = movie[group[questionType][0]]
pool = set(group[questionType][1])
exclude = set(movie[group[questionType][0]])
pool = list(pool-exclude)
for i in range(nrOfChoices-1):
filler = choice(pool)
pool.remove(filler)
choices.append(filler)
choices.insert(choice(range(3)), correctAnswer)
jsonObj = json.dumps({'question':group[questionType][2].format(movie['title']),
'choices': choices,
'answer': correctAnswer})
return jsonObj
'''
TO BE ABLE TO USE THE MODULE AS A SCRIPT AS WELL
'''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Y U so stupid!?')
parser.add_argument('-q', '--questions', help='number of questions to return', type=int, nargs=1, default=1)
parser.add_argument('-d', '--difficulty', help='difficulty,defined by number of choices', type=int, choices=range(2, 11),default=4)
parser.add_argument('-k', '--keep-them-coming', help='continue getting questions until input is other than y/Y', action='store_true')
args = parser.parse_args()
difficulty = args.difficulty
questions = args.questions
if type(difficulty) == type([]):
difficulty = difficulty[0]
if type(questions) == type([]):
questions = questions[0]
for i in range(questions):
print(getRandomQuestion(difficulty))
if args.keep_them_coming:
userInput = ""
while True:
answer = input('\nKeep them coming? (y):')
if not(str(answer).lower() == 'y' or answer.lower() == 'yes'):
exit()
print(getRandomQuestion(args.difficulty))
|
asav13/PRLA-Verk5
|
part2/y_u_so_stupid.py
|
y_u_so_stupid.py
|
py
| 6,583
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17689175212
|
def read_measurements(filename):
f = open(filename, 'r')
return list(map(lambda s: int(s.strip()), f.readlines()))
def main():
measurements = read_measurements("01 - Depth Measurements.txt")
print("Increased measurements: " + str(num_increased(measurements)))
print("Increased sliding windows: " + str(num_sliding_windows_increased(measurements)))
def num_increased(measurements):
prev = None
increased = 0
for m in measurements:
if prev is not None and prev < m:
increased += 1
prev = m
return increased
def num_sliding_windows_increased(measurements):
prev_sum = None
stop = len(measurements)
i, j, k = 0, 1, 2
increased = 0
while k < stop:
total = measurements[i] + measurements[j] + measurements[k]
if prev_sum is not None and prev_sum < total:
increased += 1
prev_sum = total
i, j, k = i + 1, j + 1, k + 1
return increased
main()
|
aacohn84/Advent_of_Code_2021
|
01 - Sonar Sweep.py
|
01 - Sonar Sweep.py
|
py
| 970
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10233665355
|
from __future__ import annotations
import datetime
from typing import Optional, Union, TYPE_CHECKING, List, Dict
from . import enums
from .utils import parse_timestamp
from .user import BitLeaderboardUser, PartialUser, User
if TYPE_CHECKING:
from .http import TwitchHTTP
__all__ = (
"BitsLeaderboard",
"Clip",
"CheerEmote",
"CheerEmoteTier",
"GlobalEmote",
"ChannelEmote",
"HypeTrainContribution",
"HypeTrainEvent",
"BanEvent",
"FollowEvent",
"SubscriptionEvent",
"Marker",
"VideoMarkers",
"Game",
"ModEvent",
"AutomodCheckMessage",
"AutomodCheckResponse",
"Extension",
"MaybeActiveExtension",
"ActiveExtension",
"ExtensionBuilder",
"Video",
"Tag",
"WebhookSubscription",
"Prediction",
"Predictor",
"PredictionOutcome",
"Schedule",
"ScheduleSegment",
"ScheduleCategory",
"ScheduleVacation",
"Stream",
"Team",
"ChannelTeams",
"ChannelInfo",
"Poll",
"PollChoice",
"Goal",
"ChatSettings",
"Raid",
"ChatterColor",
"Timeout",
"Ban",
"ShieldStatus",
"ChatBadge",
"ChatBadgeVersions",
"ContentClassificationLabel",
"CharityValues",
"CharityCampaign",
"ChannelFollowerEvent",
"ChannelFollowingEvent",
)
class BitsLeaderboard:
"""
Represents a Bits leaderboard from the twitch API.
Attributes
------------
started_at: Optional[:class:`datetime.datetime`]
The time the leaderboard started.
ended_at: Optional[:class:`datetime.datetime`]
The time the leaderboard ended.
leaders: List[:class:`BitLeaderboardUser`]
The current leaders of the Leaderboard.
"""
__slots__ = "_http", "leaders", "started_at", "ended_at"
def __init__(self, http: "TwitchHTTP", data: dict):
self._http = http
self.started_at = (
parse_timestamp(data["date_range"]["started_at"]) if data["date_range"]["started_at"] else None
)
self.ended_at = parse_timestamp(data["date_range"]["ended_at"]) if data["date_range"]["ended_at"] else None
self.leaders = [BitLeaderboardUser(http, x) for x in data["data"]]
def __repr__(self):
return f"<BitsLeaderboard started_at={self.started_at} ended_at={self.ended_at}>"
class CheerEmoteTier:
"""
Represents a Cheer Emote tier.
Attributes
-----------
min_bits: :class:`int`
The minimum bits for the tier
id: :class:`str`
The ID of the tier
colour: :class:`str`
The colour of the tier
images: :class:`dict`
contains two dicts, ``light`` and ``dark``. Each item will have an ``animated`` and ``static`` item,
which will contain yet another dict, with sizes ``1``, ``1.5``, ``2``, ``3``, and ``4``.
Ex. ``cheeremotetier.images["light"]["animated"]["1"]``
can_cheer: :class:`bool`
Indicates whether emote information is accessible to users.
show_in_bits_card: :class`bool`
Indicates whether twitch hides the emote from the bits card.
"""
__slots__ = "min_bits", "id", "color", "images", "can_cheer", "show_in_bits_card"
def __init__(self, data: dict):
self.min_bits: int = data["min_bits"]
self.id: str = data["id"]
self.color: str = data["color"]
self.images = data["images"] # TODO types
self.can_cheer: bool = data["can_cheer"]
self.show_in_bits_card: bool = data["show_in_bits_card"]
def __repr__(self):
return f"<CheerEmoteTier id={self.id} min_bits={self.min_bits}>"
class CheerEmote:
"""
Represents a Cheer Emote
Attributes
-----------
prefix: :class:`str`
The string used to Cheer that precedes the Bits amount.
tiers: :class:`~CheerEmoteTier`
The tiers this Cheer Emote has
type: :class:`str`
Shows whether the emote is ``global_first_party``, ``global_third_party``, ``channel_custom``, ``display_only``, or ``sponsored``.
order: :class:`str`
Order of the emotes as shown in the bits card, in ascending order.
last_updated :class:`datetime.datetime`
The date this cheermote was last updated.
charitable: :class:`bool`
Indicates whether this emote provides a charity contribution match during charity campaigns.
"""
__slots__ = "_http", "prefix", "tiers", "type", "order", "last_updated", "charitable"
def __init__(self, http: "TwitchHTTP", data: dict):
self._http = http
self.prefix: str = data["prefix"]
self.tiers = [CheerEmoteTier(x) for x in data["tiers"]]
self.type: str = data["type"]
self.order: str = data["order"]
self.last_updated = parse_timestamp(data["last_updated"])
self.charitable: bool = data["is_charitable"]
def __repr__(self):
return f"<CheerEmote prefix={self.prefix} type={self.type} order={self.order}>"
class GlobalEmote:
"""
Represents a Global Emote
Attributes
-----------
id: :class:`str`
The ID of the emote.
name: :class:`str`
The name of the emote.
images: :class:`dict`
Contains the image URLs for the emote. These image URLs will always provide a static (i.e., non-animated) emote image with a light background.
format: List[:class:`str`]
The formats that the emote is available in.
scale: List[:class:`str`]
The sizes that the emote is available in.
theme_mode: List[:class:`str`]
The background themes that the emote is available in.
"""
__slots__ = ("id", "name", "images", "format", "scale", "theme_mode", "template")
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
self.images: dict = data["images"]
self.format: List[str] = data["format"]
self.scale: List[str] = data["scale"]
self.theme_mode: List[str] = data["theme_mode"]
def __repr__(self):
return f"<GlobalEmote id={self.id} name={self.name}"
class ChannelEmote(GlobalEmote):
"""
Represents a Channel Emote
Attributes
-----------
id: :class:`str`
The ID of the emote.
name: :class:`str`
The name of the emote.
images: :class:`dict`
Contains the image URLs for the emote. These image URLs will always provide a static (i.e., non-animated) emote image with a light background.
tier: :class:`str`
The subscriber tier at which the emote is unlocked.
type: :class:`str`
The type of emote.
set_id: :class:`str`
An ID that identifies the emote set that the emote belongs to.
format: List[:class:`str`]
The formats that the emote is available in.
scale: List[:class:`str`]
The sizes that the emote is available in.
theme_mode: List[:class:`str`]
The background themes that the emote is available in.
"""
__slots__ = ("tier", "type", "set_id")
def __init__(self, http: "TwitchHTTP", data: dict):
super().__init__(http, data)
self.tier: str = data["tier"]
self.type: str = data["emote_type"]
self.set_id: str = data["emote_set_id"]
def __repr__(self):
return f"<ChannelEmote id={self.id} name={self.name} type={self.type}>"
class Clip:
"""
Represents a Twitch Clip
Attributes
-----------
id: :class:`str`
The ID of the clip.
url: :class:`str`
The URL of the clip.
embed_url: :class:`str`
The URL to embed the clip with.
broadcaster: :class:`~twitchio.PartialUser`
The user whose channel the clip was created on.
creator: :class:`~twitchio.PartialUser`
The user who created the clip.
video_id: :class:`str`
The ID of the video the clip is sourced from.
game_id: :class:`str`
The ID of the game that was being played when the clip was created.
language: :class:`str`
The language, in an `ISO 639-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ format, of the stream when the clip was created.
title: :class:`str`
The title of the clip.
views: :class:`int`
The amount of views this clip has.
created_at: :class:`datetime.datetime`
When the clip was created.
thumbnail_url: :class:`str`
The url of the clip thumbnail.
duration: :class:`float`
Duration of the Clip in seconds (up to 0.1 precision).
vod_offset: Optional[:class:`int`]
The zero-based offset, in seconds, to where the clip starts in the video (VOD) or stream.
This can be None if the parent no longer exists
is_featured: :class:`bool`
Indicates if the clip is featured or not.
"""
__slots__ = (
"id",
"url",
"embed_url",
"broadcaster",
"creator",
"video_id",
"game_id",
"language",
"title",
"views",
"created_at",
"thumbnail_url",
"duration",
"vod_offset",
"is_featured",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.url: str = data["url"]
self.embed_url: str = data["embed_url"]
self.broadcaster = PartialUser(http, data["broadcaster_id"], data["broadcaster_name"])
self.creator = PartialUser(http, data["creator_id"], data["creator_name"])
self.video_id: str = data["video_id"]
self.game_id: str = data["game_id"]
self.language: str = data["language"]
self.title: str = data["title"]
self.views: int = data["view_count"]
self.created_at = parse_timestamp(data["created_at"])
self.thumbnail_url: str = data["thumbnail_url"]
self.duration: float = data["duration"]
self.vod_offset: Optional[int] = data["vod_offset"]
self.is_featured: bool = data["is_featured"]
def __repr__(self):
return f"<Clip id={self.id} broadcaster={self.broadcaster} creator={self.creator}>"
class HypeTrainContribution:
"""
A Contribution to a Hype Train
Attributes
-----------
total: :class:`int`
Total aggregated amount of all contributions by the top contributor. If type is ``BITS``, total represents aggregate amount of bits used.
If type is ``SUBS``, aggregate total where 500, 1000, or 2500 represent tier 1, 2, or 3 subscriptions respectively.
For example, if top contributor has gifted a tier 1, 2, and 3 subscription, total would be 4000.
type: :class:`str`
Identifies the contribution method, either BITS, SUBS or OTHER.
user: :class:`~twitchio.PartialUser`
The user making the contribution.
"""
__slots__ = "total", "type", "user"
def __init__(self, http: "TwitchHTTP", data: dict):
self.total: int = data["total"]
self.type: str = data["type"]
self.user = PartialUser(http, id=data["user"], name=None) # we'll see how this goes
def __repr__(self):
return f"<HypeTrainContribution total={self.total} type={self.type} user={self.user}>"
class HypeTrainEvent:
"""
Represents a Hype Train Event (progression)
Attributes
-----------
id: :class:`str`
The ID of the event.
event_id: :class:`str`
The ID of the Hype Train.
type: :class:`str`
The type of the event. Currently only ``hypetrain.progression``.
version: :class:`str`
The version of the endpoint.
broadcaster: :class:`~twitchio.PartialUser`
The user whose channel the Hype Train is occurring on.
timestamp: :class:`datetime.datetime`
The time the event happened at.
cooldown_end_time: :class:`datetime.datetime`
The time that another Hype Train can happen at.
expiry: :class:`datetime.datetime`
The time that this Hype Train expires at.
started_at: :class:`datetime.datetime`
The time that this Hype Train started at.
last_contribution: :class:`HypeTrainContribution`
The last contribution to this Hype Train.
level: :class:`int`
The level reached on this Hype Train (1-5).
top_contributions: List[:class:`HypeTrainContribution`]
The top contributors to the Hype Train.
contributions_total: :class:`int`
The total score towards completing the goal.
goal: :class:`int`
The goal for the next Hype Train level
"""
__slots__ = (
"id",
"type",
"timestamp",
"version",
"broadcaster",
"expiry",
"event_id",
"goal",
"level",
"started_at",
"top_contributions",
"contributions_total",
"cooldown_end_time",
"last_contribution",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.event_id: str = data["event_data"]["id"]
self.type: str = data["event_type"]
self.version: str = data["version"]
self.broadcaster = PartialUser(http, id=data["event_data"]["broadcaster_id"], name=None)
self.timestamp = parse_timestamp(data["event_timestamp"])
self.cooldown_end_time = parse_timestamp(data["event_data"]["cooldown_end_time"])
self.expiry = parse_timestamp(data["expires_at"])
self.started_at = parse_timestamp(data["event_data"]["started_at"])
self.last_contribution = HypeTrainContribution(http, data["event_data"]["last_contribution"])
self.level: int = data["event_data"]["level"]
self.top_contributions = [HypeTrainContribution(http, x) for x in data["event_data"]["top_contributions"]]
self.contributions_total: int = data["event_data"]["total"]
self.goal: int = data["event_data"]["goal"]
def __repr__(self):
return f"<HypeTrainEvent id={self.id} type={self.type} level={self.level} broadcaster={self.broadcaster}>"
class BanEvent:
"""
This has been deprecated.
Represents a user being banned from a channel.
Attributes
-----------
id: :class:`str`
The event ID.
type: :class:`str`
Type of ban event. Either ``moderation.user.ban`` or ``moderation.user.unban``.
timestamp: :class:`datetime.datetime`
The time the action occurred at.
version: :class:`float`
The version of the endpoint.
broadcaster: :class:`~twitchio.PartialUser`
The user whose channel the ban/unban occurred on.
user: :class:`~twichio.PartialUser`
The user who was banned/unbanned.
moderator: :class:`~twitchio.PartialUser`
The user who performed the action.
expires_at: Optional[:class:`datetime.datetime`]
When the ban expires.
reason: :class:`str`
The reason the moderator banned/unbanned the user.
"""
__slots__ = "id", "type", "timestamp", "version", "broadcaster", "user", "expires_at", "moderator", "reason"
def __init__(self, http: "TwitchHTTP", data: dict, broadcaster: Optional[Union[PartialUser, User]]):
self.id: str = data["id"]
self.type: str = data["event_type"]
self.timestamp = parse_timestamp(data["event_timestamp"])
self.version: float = float(data["version"])
self.reason: str = data["event_data"]["reason"]
self.broadcaster = broadcaster or PartialUser(
http, data["event_data"]["broadcaster_id"], data["event_data"]["broadcaster_name"]
)
self.user = PartialUser(http, data["event_data"]["user_id"], data["event_data"]["user_name"])
self.moderator = PartialUser(http, data["event_data"]["moderator_id"], data["event_data"]["moderator_name"])
self.expires_at = (
parse_timestamp(data["event_data"]["expires_at"]) if data["event_data"]["expires_at"] else None
)
def __repr__(self):
return f"<BanEvent id={self.id} type={self.type} broadcaster={self.broadcaster} user={self.user}>"
class FollowEvent:
"""
Represents a Follow Event.
Attributes
-----------
from_user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that followed another user.
to_user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that was followed.
followed_at: :class:`datetime.datetime`
When the follow happened.
"""
__slots__ = "from_user", "to_user", "followed_at"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
from_: Union[User, PartialUser] = None,
to: Union[User, PartialUser] = None,
):
self.from_user: Union[User, PartialUser] = from_ or PartialUser(http, data["from_id"], data["from_name"])
self.to_user: Union[User, PartialUser] = to or PartialUser(http, data["to_id"], data["to_name"])
self.followed_at = parse_timestamp(data["followed_at"])
def __repr__(self):
return f"<FollowEvent from_user={self.from_user} to_user={self.to_user} followed_at={self.followed_at}>"
class ChannelFollowerEvent:
"""
Represents a ChannelFollowEvent Event.
Attributes
-----------
user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that followed another user.
followed_at: :class:`datetime.datetime`
When the follow happened.
"""
__slots__ = "user", "followed_at"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
):
self.user: Union[User, PartialUser] = PartialUser(http, data["user_id"], data["user_login"])
self.followed_at = parse_timestamp(data["followed_at"])
def __repr__(self):
return f"<ChannelFollowerEvent user={self.user} followed_at={self.followed_at}>"
class ChannelFollowingEvent:
"""
Represents a ChannelFollowEvent Event.
Attributes
-----------
broadcaster: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that is following another user.
followed_at: :class:`datetime.datetime`
When the follow happened.
"""
__slots__ = "broadcaster", "followed_at"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
):
self.broadcaster: Union[User, PartialUser] = PartialUser(
http, data["broadcaster_id"], data["broadcaster_login"]
)
self.followed_at = parse_timestamp(data["followed_at"])
def __repr__(self):
return f"<ChannelFollowerEvent user={self.broadcaster} followed_at={self.followed_at}>"
class SubscriptionEvent:
"""
Represents a Subscription Event
Attributes
-----------
broadcaster: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that was subscribed to.
user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user who subscribed.
tier: :class:`int`
The tier at which the user subscribed. Could be ``1``, ``2``, or ``3``.
plan_name: :class:`str`
Name of the description. (twitch docs aren't helpful, if you know what this is specifically please PR :) ).
gift: :class:`bool`
Whether the subscription is a gift.
"""
__slots__ = "broadcaster", "gift", "tier", "plan_name", "user"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
broadcaster: Union[User, PartialUser] = None,
user: Union[User, PartialUser] = None,
):
self.broadcaster: Union[User, PartialUser] = broadcaster or PartialUser(
http, data["broadcaster_id"], data["broadcaster_name"]
)
self.user: Union[User, PartialUser] = user or PartialUser(http, data["user_id"], data["user_name"])
self.tier: int = round(int(data["tier"]) / 1000)
self.plan_name: str = data["plan_name"]
self.gift: bool = data["is_gift"]
def __repr__(self):
return (
f"<SubscriptionEvent broadcaster={self.broadcaster} user={self.user} tier={self.tier} "
f"plan_name={self.plan_name} gift={self.gift}>"
)
class Marker:
"""
Represents a stream Marker
Attributes
-----------
id: :class:`str`
The ID of the marker.
created_at: :class:`datetime.datetime`
When the marker was created.
description: :class:`str`
The description of the marker.
position: :class:`int`
The position of the marker, in seconds.
url: Optional[:class:`str`]
The url that leads to the marker.
"""
__slots__ = "id", "created_at", "description", "position", "url"
def __init__(self, data: dict):
self.id: str = data["id"]
self.created_at = parse_timestamp(data["created_at"])
self.description: str = data["description"]
self.position: int = data["position_seconds"]
self.url: Optional[str] = data.get("URL")
def __repr__(self):
return f"<Marker id={self.id} created_at={self.created_at} position={self.position} url={self.url}>"
class VideoMarkers:
"""
Represents markers contained in a video
Attributes
-----------
id: :class:`str`
The video id.
markers: List[:class:`Marker`]
The markers contained in the video.
"""
__slots__ = "id", "markers"
def __init__(self, data: dict):
self.id: str = data["video_id"]
self.markers = [Marker(d) for d in data["markers"]]
def __repr__(self):
return f"<VideoMarkers id={self.id}>"
class Game:
"""
Represents a Game on twitch
Attributes
-----------
id: :class:`int`
Game ID.
name: :class:`str`
Game name.
box_art_url: :class:`str`
Template URL for the game's box art.
igdb_id: Optional[:class:`int`]
The IGDB ID of the game. If this is not available to Twitch it will return None
"""
__slots__ = "id", "name", "box_art_url", "igdb_id"
def __init__(self, data: dict):
self.id: int = int(data["id"])
self.name: str = data["name"]
self.box_art_url: str = data["box_art_url"]
self.igdb_id: Optional[int] = data.get("igdb_id") and int(data["igdb_id"])
def __repr__(self):
return f"<Game id={self.id} name={self.name}>"
def art_url(self, width: int, height: int) -> str:
"""
Adds width and height into the box art url
Parameters
-----------
width: :class:`int`
The width of the image
height: :class:`int`
The height of the image
Returns
--------
:class:`str`
"""
return self.box_art_url.format(width=width, height=height)
class ModEvent:
"""
Represents a mod add/remove action
Attributes
-----------
id: :class:`str`
The ID of the event.
type: :class:`~twitchio.ModEventEnum`
The type of the event.
timestamp: :class:`datetime.datetime`
The timestamp of the event.
version: :class:`str`
The version of the endpoint.
broadcaster: Union[:class:`~twitchio.PartialUser`, :class:`~twitchio.User`]
The user whose channel the event happened on.
user: :class:`~twitchio.PartialUser`
The user being removed or added as a moderator.
"""
__slots__ = "id", "type", "timestamp", "version", "broadcaster", "user"
def __init__(self, http: "TwitchHTTP", data: dict, broadcaster: Union[PartialUser, User]):
self.id: str = data["id"]
self.type = enums.ModEventEnum(value=data["event_type"])
self.timestamp = parse_timestamp(data["event_timestamp"])
self.version: str = data["version"]
self.broadcaster = broadcaster
self.user = PartialUser(http, data["event_data"]["user_id"], data["event_data"]["user_name"])
def __repr__(self):
return f"<ModEvent id={self.id} type={self.type} broadcaster={self.broadcaster} user={self.user}>"
class AutomodCheckMessage:
"""
Represents the message to check with automod
Attributes
-----------
id: :class:`str`
Developer-generated identifier for mapping messages to results.
text: :class:`str`
Message text.
user_id: :class:`int`
User ID of the sender.
"""
__slots__ = "id", "text", "user_id"
def __init__(self, id: str, text: str, user: Union[PartialUser, int]):
self.id = id
self.text = text
self.user_id = user.id if isinstance(user, PartialUser) else user
def _to_dict(self):
return {"msg_id": self.id, "msg_text": self.text, "user_id": str(self.user_id)}
def __repr__(self):
return f"<AutomodCheckMessage id={self.id} user_id={self.user_id}>"
class AutomodCheckResponse:
"""
Represents the response to a message check with automod
Attributes
-----------
id: :class:`str`
The message ID passed in the body of the check
permitted: :class:`bool`
Indicates if this message meets AutoMod requirements.
"""
__slots__ = "id", "permitted"
def __init__(self, data: dict):
self.id: str = data["msg_id"]
self.permitted: bool = data["is_permitted"]
def __repr__(self):
return f"<AutomodCheckResponse id={self.id} permitted={self.permitted}>"
class Extension:
"""
Represents an extension for a specified user
Attributes
-----------
id: :class:`str`
ID of the extension.
version: :class:`str`
Version of the extension.
active: :class:`bool`
Activation state of the extension, for each extension type (component, overlay, mobile, panel).
"""
__slots__ = "id", "active", "version", "_x", "_y"
def __init__(self, data):
self.id: str = data["id"]
self.version: str = data["version"]
self.active: bool = data["active"]
self._x = None
self._y = None
def __repr__(self):
return f"<Extension id={self.id} version={self.version} active={self.active}>"
@classmethod
def new(cls, active: bool, version: str, id: str, x: int = None, y: int = None) -> "Extension":
self = cls.__new__(cls)
self.active = active
self.version = version
self.id = id
self._x = x
self._y = y
return self
def _to_dict(self):
v = {"active": self.active, "id": self.id, "version": self.version}
if self._x is not None:
v["x"] = self._x
if self._y is not None:
v["y"] = self._y
return v
class MaybeActiveExtension(Extension):
"""
Represents an extension for a specified user that could be may be activated
Attributes
-----------
id: :class:`str`
ID of the extension.
version: :class:`str`
Version of the extension.
name: :class:`str`
Name of the extension.
can_activate: :class:`bool`
Indicates whether the extension is configured such that it can be activated.
types: List[:class:`str`]
Types for which the extension can be activated.
"""
__slots__ = "id", "version", "name", "can_activate", "types"
def __init__(self, data):
self.id: str = data["id"]
self.version: str = data["version"]
self.name: str = data["name"]
self.can_activate: bool = data["can_activate"]
self.types: List[str] = data["type"]
def __repr__(self):
return f"<MaybeActiveExtension id={self.id} version={self.version} name={self.name}>"
class ActiveExtension(Extension):
"""
Represents an active extension for a specified user
Attributes
-----------
id: :class:`str`
ID of the extension.
version: :class:`str`
Version of the extension.
active: :class:`bool`
Activation state of the extension.
name: :class:`str`
Name of the extension.
x: :class:`int`
(Video-component Extensions only) X-coordinate of the placement of the extension. Could be None.
y: :class:`int`
(Video-component Extensions only) Y-coordinate of the placement of the extension. Could be None.
"""
__slots__ = "id", "active", "name", "version", "x", "y"
def __init__(self, data):
self.active: bool = data["active"]
self.id: Optional[str] = data.get("id", None)
self.version: Optional[str] = data.get("version", None)
self.name: Optional[str] = data.get("name", None)
self.x: Optional[int] = data.get("x", None) # x and y only show for component extensions.
self.y: Optional[int] = data.get("y", None)
def __repr__(self):
return f"<ActiveExtension id={self.id} version={self.version} name={self.name}>"
class ExtensionBuilder:
"""
Represents an extension to be updated for a specific user
Attributes
-----------
panels: List[:class:`~twitchio.Extension`]
List of panels to update for an extension.
overlays: List[:class:`~twitchio.Extension`]
List of overlays to update for an extension.
components: List[:class:`~twitchio.Extension`]
List of components to update for an extension.
"""
__slots__ = "panels", "overlays", "components"
def __init__(
self, panels: List[Extension] = None, overlays: List[Extension] = None, components: List[Extension] = None
):
self.panels = panels or []
self.overlays = overlays or []
self.components = components or []
def _to_dict(self):
return {
"panel": {str(x): y._to_dict() for x, y in enumerate(self.panels)},
"overlay": {str(x): y._to_dict() for x, y in enumerate(self.overlays)},
"component": {str(x): y._to_dict() for x, y in enumerate(self.components)},
}
class Video:
"""
Represents video information
Attributes
-----------
id: :class:`int`
The ID of the video.
user: :class:`~twitchio.PartialUser`
User who owns the video.
title: :class:`str`
Title of the video
description: :class:`str`
Description of the video.
created_at: :class:`datetime.datetime`
Date when the video was created.
published_at: :class:`datetime.datetime`
Date when the video was published.
url: :class:`str`
URL of the video.
thumbnail_url: :class:`str`
Template URL for the thumbnail of the video.
viewable: :class:`str`
Indicates whether the video is public or private.
view_count: :class:`int`
Number of times the video has been viewed.
language: :class:`str`
Language of the video.
type: :class:`str`
The type of video.
duration: :class:`str`
Length of the video.
"""
__slots__ = (
"_http",
"id",
"user",
"title",
"description",
"created_at",
"published_at",
"url",
"thumbnail_url",
"viewable",
"view_count",
"language",
"type",
"duration",
)
def __init__(self, http: "TwitchHTTP", data: dict, user: Union[PartialUser, User] = None):
self._http = http
self.id: int = int(data["id"])
self.user = user or PartialUser(http, data["user_id"], data["user_name"])
self.title: str = data["title"]
self.description: str = data["description"]
self.created_at = parse_timestamp(data["created_at"])
self.published_at = parse_timestamp(data["published_at"])
self.url: str = data["url"]
self.thumbnail_url: str = data["thumbnail_url"]
self.viewable: str = data["viewable"]
self.view_count: int = data["view_count"]
self.language: str = data["language"]
self.type: str = data["type"]
self.duration: str = data["duration"]
def __repr__(self):
return f"<Video id={self.id} title={self.title} url={self.url}>"
async def delete(self, token: str):
"""|coro|
Deletes the video. For bulk deletion see :func:`Client.delete_videos`
Parameters
-----------
token: :class:`str`
The users oauth token with the channel:manage:videos
"""
await self._http.delete_videos(token, ids=[str(self.id)])
class Tag:
"""
Represents a stream tag
Attributes
-----------
id: :class:`str`
An ID that identifies the tag.
auto: :class:`bool`
Indicates whether the tag is an automatic tag.
localization_names: Dict[:class:`str`, :class:`str`]
A dictionary that contains the localized names of the tag.
localization_descriptions: :class:`str`
A dictionary that contains the localized descriptions of the tag.
"""
__slots__ = "id", "auto", "localization_names", "localization_descriptions"
def __init__(self, data: dict):
self.id: str = data["tag_id"]
self.auto: bool = data["is_auto"]
self.localization_names: Dict[str, str] = data["localization_names"]
self.localization_descriptions: Dict[str, str] = data["localization_descriptions"]
def __repr__(self):
return f"<Tag id={self.id}>"
class WebhookSubscription:
__slots__ = "callback", "expires_at", "topic"
def __init__(self, data: dict):
self.callback: str = data["callback"]
self.expires_at = parse_timestamp(data["expired_at"])
self.topic: str = data["topic"]
def __repr__(self):
return f"<WebhookSubscription callback={self.callback} topic={self.topic} expires_at={self.expires_at}>"
class Stream:
"""
Represents a Stream
Attributes
-----------
id: :class:`int`
The current stream ID.
user: :class:`~twitchio.PartialUser`
The user who is streaming.
game_id: :class:`int`
Current game ID being played on the channel.
game_name: :class:`str`
Name of the game being played on the channel.
type: :class:`str`
Whether the stream is "live" or not.
title: :class:`str`
Title of the stream.
viewer_count: :class:`int`
Current viewer count of the stream
started_at: :class:`datetime.datetime`
UTC timestamp of when the stream started.
language: :class:`str`
Language of the channel.
thumbnail_url: :class:`str`
Thumbnail URL of the stream.
tag_ids: List[:class:`str`]
Tag IDs that apply to the stream.
.. warning::
This field will be deprecated by twitch in 2023.
is_mature: :class:`bool`
Indicates whether the stream is intended for mature audience.
tags: List[:class:`str`]
The tags applied to the channel.
"""
__slots__ = (
"id",
"user",
"game_id",
"game_name",
"type",
"title",
"viewer_count",
"started_at",
"language",
"thumbnail_url",
"tag_ids",
"is_mature",
"tags",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: int = data["id"]
self.user = PartialUser(http, data["user_id"], data["user_name"])
self.game_id: int = data["game_id"]
self.game_name: str = data["game_name"]
self.type: str = data["type"]
self.title: str = data["title"]
self.viewer_count: int = data["viewer_count"]
self.started_at = parse_timestamp(data["started_at"])
self.language: str = data["language"]
self.thumbnail_url: str = data["thumbnail_url"]
self.tag_ids: List[str] = data["tag_ids"] or []
self.is_mature: bool = data["is_mature"]
self.tags: List[str] = data["tags"]
def __repr__(self):
return f"<Stream id={self.id} user={self.user} title={self.title} started_at={self.started_at}>"
class ChannelInfo:
"""
Represents a channel's current information
Attributes
-----------
user: :class:`~twitchio.PartialUser`
The user whose channel information was requested.
game_id: :class:`int`
Current game ID being played on the channel.
game_name: :class:`str`
Name of the game being played on the channel.
title: :class:`str`
Title of the stream.
language: :class:`str`
Language of the channel.
delay: :class:`int`
Stream delay in seconds.
This defaults to 0 if the broadcaster_id does not match the user access token.
tags: List[:class:`str`]
The tags applied to the channel.
content_classification_labels: List[:class:`str`]
The CCLs applied to the channel.
is_branded_content: :class:`bool`
Boolean flag indicating if the channel has branded content.
"""
__slots__ = (
"user",
"game_id",
"game_name",
"title",
"language",
"delay",
"tags",
"content_classification_labels",
"is_branded_content",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.user = PartialUser(http, data["broadcaster_id"], data["broadcaster_name"])
self.game_id: int = data["game_id"]
self.game_name: str = data["game_name"]
self.title: str = data["title"]
self.language: str = data["broadcaster_language"]
self.delay: int = data["delay"]
self.tags: List[str] = data["tags"]
self.content_classification_labels: List[str] = data["content_classification_labels"]
self.is_branded_content: bool = data["is_branded_content"]
def __repr__(self):
return f"<ChannelInfo user={self.user} game_id={self.game_id} game_name={self.game_name} title={self.title} language={self.language} delay={self.delay}>"
class Prediction:
"""
Represents channel point predictions
Attributes
-----------
user: :class:`~twitchio.PartialUser`
The user who is streaming.
prediction_id: :class:`str`
ID of the Prediction.
title: :class:`str`
Title for the Prediction.
winning_outcome_id: :class:`str`
ID of the winning outcome
outcomes: List[:class:`~twitchio.PredictionOutcome`]
List of possible outcomes for the Prediction.
prediction_window: :class:`int`
Total duration for the Prediction (in seconds).
prediction_status: :class:`str`
Status of the Prediction.
created_at: :class:`datetime.datetime`
Time for when the Prediction was created.
ended_at: :class:`datetime.datetime`
Time for when the Prediction ended.
locked_at: :class:`datetime.datetime`
Time for when the Prediction was locked.
"""
__slots__ = (
"user",
"prediction_id",
"title",
"winning_outcome_id",
"outcomes",
"prediction_window",
"prediction_status",
"created_at",
"ended_at",
"locked_at",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.user = PartialUser(http, data["broadcaster_id"], data["broadcaster_name"])
self.prediction_id: str = data["id"]
self.title: str = data["title"]
self.winning_outcome_id: str = data["winning_outcome_id"]
self.outcomes: List[PredictionOutcome] = [PredictionOutcome(http, x) for x in data["outcomes"]]
self.prediction_window: int = data["prediction_window"]
self.prediction_status: str = data["status"]
self.created_at = self._parse_time(data, "created_at")
self.ended_at = self._parse_time(data, "ended_at")
self.locked_at = self._parse_time(data, "locked_at")
def _parse_time(self, data, field) -> Optional["Datetime"]:
if field not in data or data[field] is None:
return None
time = data[field].split(".")[0]
return datetime.datetime.fromisoformat(time)
def __repr__(self):
return f"<Prediction user={self.user} prediction_id={self.prediction_id} winning_outcome_id={self.winning_outcome_id} title={self.title}>"
class Predictor:
"""
Represents a predictor
Attributes
-----------
user: :class:`~twitchio.PartialUser`
The user who is streaming.
channel_points_used: :class:`int`
Number of Channel Points used by the user.
channel_points_won: :class:`int`
Number of Channel Points won by the user.
"""
__slots__ = ("channel_points_used", "channel_points_won", "user")
def __init__(self, http: "TwitchHTTP", data: dict):
self.channel_points_used: int = data["channel_points_used"]
self.channel_points_won: int = data["channel_points_won"]
self.user = PartialUser(http, data["user_id"], data["user_login"])
def __repr__(self):
return f"<Predictor user={self.user} channel_points_used={self.channel_points_used} channel_points_won={self.channel_points_won}>"
class PredictionOutcome:
"""
Represents a prediction outcome
Attributes
-----------
outcome_id: :class:`str`
ID for the outcome.
title: :class:`str`
Text displayed for outcome.
channel_points: :class:`int`
Number of Channel Points used for the outcome.
color: :class:`str`
Color for the outcome.
users: :class:`int`
Number of unique uesrs that chose the outcome.
top_predictors: List[:class:`~twitchio.Predictor`]
List of the top predictors. Could be None.
"""
__slots__ = ("outcome_id", "title", "channel_points", "color", "users", "top_predictors")
def __init__(self, http: "TwitchHTTP", data: dict):
self.outcome_id: str = data["id"]
self.title: str = data["title"]
self.channel_points: int = data["channel_points"]
self.color: str = data["color"]
self.users: int = data["users"]
if data["top_predictors"]:
self.top_predictors: List[Predictor] = [Predictor(http, x) for x in data["top_predictors"]]
else:
self.top_predictors: List[Predictor] = None
def __repr__(self):
return f"<PredictionOutcome outcome_id={self.outcome_id} title={self.title} channel_points={self.channel_points} color={self.color} users={self.users}>"
@property
def colour(self) -> str:
"""The colour of the prediction. Alias to color."""
return self.color
def __repr__(self):
return f"<PredictionOutcome outcome_id={self.outcome_id} title={self.title} channel_points={self.channel_points} color={self.color}>"
class Schedule:
"""
Represents a channel's stream schedule
Attributes
-----------
segments: List[:class:`~twitchio.ScheduleSegment`]
List of segments of a channel's stream schedule.
user: :class:`~twitchio.PartialUser`
The user of the channel associated to the schedule.
vacation: :class:`~twitchio.ScheduleVacation`
Vacation details of stream schedule.
"""
__slots__ = ("segments", "user", "vacation")
def __init__(self, http: "TwitchHTTP", data: dict):
self.segments = [ScheduleSegment(d) for d in data["data"]["segments"]] if data["data"]["segments"] else []
self.user = PartialUser(http, data["data"]["broadcaster_id"], data["data"]["broadcaster_login"])
self.vacation = ScheduleVacation(data["data"]["vacation"]) if data["data"]["vacation"] else None
def __repr__(self):
return f"<Schedule segments={self.segments} user={self.user} vacation={self.vacation}>"
class ScheduleSegment:
"""
Represents a list segments of a channel's stream schedule
Attributes
-----------
id: :class:`str`
The ID for the scheduled broadcast.
start_time: :class:`datetime.datetime`
Scheduled start time for the scheduled broadcast
end_time: Optional[:class:`datetime.datetime`]
Scheduled end time for the scheduled broadcast
title: :class:`str`
Title for the scheduled broadcast.
canceled_until: :class:`datetime.datetime`
Used with recurring scheduled broadcasts. Specifies the date of the next recurring broadcast.
category: :class:`~twitchio.ScheduleCategory`
The game or category details for the scheduled broadcast.
is_recurring: :class:`bool`
Indicates if the scheduled broadcast is recurring weekly.
"""
__slots__ = ("id", "start_time", "end_time", "title", "canceled_until", "category", "is_recurring")
def __init__(self, data: dict):
self.id: str = data["id"]
self.start_time = parse_timestamp(data["start_time"])
self.end_time = parse_timestamp(data["end_time"]) if data["end_time"] else None
self.title: str = data["title"]
self.canceled_until = parse_timestamp(data["canceled_until"]) if data["canceled_until"] else None
self.category = ScheduleCategory(data["category"]) if data["category"] else None
self.is_recurring: bool = data["is_recurring"]
def __repr__(self):
return f"<ScheduleSegment id={self.id} start_time={self.start_time} end_time={self.end_time} title={self.title} canceled_until={self.canceled_until} category={self.category} is_recurring={self.is_recurring}>"
class ScheduleCategory:
"""
Game or category details of a stream's schedule
Attributes
-----------
id: :class:`str`
The game or category ID.
name: :class:`str`
The game or category name.
"""
__slots__ = ("id", "name")
def __init__(self, data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
def __repr__(self):
return f"<ScheduleCategory id={self.id} name={self.name}>"
class ScheduleVacation:
"""
A schedule's vacation details
Attributes
-----------
start_time: :class:`datetime.datetime`
Start date of stream schedule vaction.
end_time: :class:`datetime.datetime`
End date of stream schedule vaction.
"""
__slots__ = ("start_time", "end_time")
def __init__(self, data: dict):
self.start_time = parse_timestamp(data["start_time"])
self.end_time = parse_timestamp(data["end_time"])
def __repr__(self):
return f"<ScheduleVacation start_time={self.start_time} end_time={self.end_time}>"
class Team:
"""
Represents information for a specific Twitch Team
Attributes
-----------
users: List[:class:`~twitchio.PartialUser`]
List of users in the specified Team.
background_image_url: :class:`str`
URL for the Team background image.
banner: :class:`str`
URL for the Team banner.
created_at: :class:`datetime.datetime`
Date and time the Team was created.
updated_at: :class:`datetime.datetime`
Date and time the Team was last updated.
info: :class:`str`
Team description.
thumbnail_url: :class:`str`
Image URL for the Team logo.
team_name: :class:`str`
Team name.
team_display_name: :class:`str`
Team display name.
id: :class:`str`
Team ID.
"""
__slots__ = (
"users",
"background_image_url",
"banner",
"created_at",
"updated_at",
"info",
"thumbnail_url",
"team_name",
"team_display_name",
"id",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.users: List[PartialUser] = [PartialUser(http, x["user_id"], x["user_login"]) for x in data["users"]]
self.background_image_url: str = data["background_image_url"]
self.banner: str = data["banner"]
self.created_at = parse_timestamp(data["created_at"].split(" ")[0])
self.updated_at = parse_timestamp(data["updated_at"].split(" ")[0])
self.info: str = data["info"]
self.thumbnail_url: str = data["thumbnail_url"]
self.team_name: str = data["team_name"]
self.team_display_name: str = data["team_display_name"]
self.id = data["id"]
def __repr__(self):
return f"<Team users={self.users} team_name={self.team_name} team_display_name={self.team_display_name} id={self.id} created_at={self.created_at}>"
class ChannelTeams:
"""
Represents the Twitch Teams of which the specified channel/broadcaster is a member
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster.
background_image_url: :class:`str`
URL for the Team background image.
banner: :class:`str`
URL for the Team banner.
created_at: :class:`datetime.datetime`
Date and time the Team was created.
updated_at: :class:`datetime.datetime`
Date and time the Team was last updated.
info: :class:`str`
Team description.
thumbnail_url: :class:`str`
Image URL for the Team logo.
team_name: :class:`str`
Team name.
team_display_name: :class:`str`
Team display name.
id: :class:`str`
Team ID.
"""
__slots__ = (
"broadcaster",
"background_image_url",
"banner",
"created_at",
"updated_at",
"info",
"thumbnail_url",
"team_name",
"team_display_name",
"id",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster: PartialUser = PartialUser(http, data["broadcaster_id"], data["broadcaster_login"])
self.background_image_url: str = data["background_image_url"]
self.banner: str = data["banner"]
self.created_at = parse_timestamp(data["created_at"].split(" ")[0])
self.updated_at = parse_timestamp(data["updated_at"].split(" ")[0])
self.info: str = data["info"]
self.thumbnail_url: str = data["thumbnail_url"]
self.team_name: str = data["team_name"]
self.team_display_name: str = data["team_display_name"]
self.id = data["id"]
def __repr__(self):
return f"<ChannelTeams user={self.broadcaster} team_name={self.team_name} team_display_name={self.team_display_name} id={self.id} created_at={self.created_at}>"
class Poll:
"""
Represents a list of Polls for a broadcaster / channel
.. note::
Twitch have removed support for voting with bits.
By default bits_votes, bits_voting_enabled and bits_per_vote will be received as either 0 or False.
Attributes
-----------
id: :class:`str`
ID of a poll.
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster.
title: :class:`str`
Question displayed for the poll.
choices: List[:class:`~twitchio.PollChoice`]
The poll choices.
bits_voting_enabled: :class:`bool`
Indicates if Bits can be used for voting.
.. warning::
Twitch have removed support for voting with bits.
This will return as False
bits_per_vote: :class:`int`
Number of Bits required to vote once with Bits.
.. warning::
Twitch have removed support for voting with bits.
This will return as 0
channel_points_voting_enabled: :class:`bool`
Indicates if Channel Points can be used for voting.
channel_points_per_vote: :class:`int`
Number of Channel Points required to vote once with Channel Points.
status: :class:`str`
Poll status. Valid values: ACTIVE, COMPLETED, TERMINATED, ARCHIVED, MODERATED, INVALID
duration: :class:`int`
Total duration for the poll (in seconds).
started_at: :class:`datetime.datetime`
Date and time the poll was started.
ended_at: :class:`datetime.datetime`
Date and time the poll was ended.
"""
__slots__ = (
"id",
"broadcaster",
"title",
"choices",
"channel_points_voting_enabled",
"channel_points_per_vote",
"status",
"duration",
"started_at",
"ended_at",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.broadcaster = PartialUser(http, data["broadcaster_id"], data["broadcaster_login"])
self.title: str = data["title"]
self.choices: List[PollChoice] = [PollChoice(d) for d in data["choices"]] if data["choices"] else []
self.channel_points_voting_enabled: bool = data["channel_points_voting_enabled"]
self.channel_points_per_vote: int = data["channel_points_per_vote"]
self.status: str = data["status"]
self.duration: int = data["duration"]
self.started_at: datetime.datetime = parse_timestamp(data["started_at"])
try:
self.ended_at: Optional[datetime.datetime] = parse_timestamp(data["ended_at"])
except KeyError:
self.ended_at = None
def __repr__(self):
return f"<Polls id={self.id} broadcaster={self.broadcaster} title={self.title} status={self.status} duration={self.duration} started_at={self.started_at} ended_at={self.ended_at}>"
class PollChoice:
"""
Represents a polls choices
Attributes
-----------
id: :class:`str`
ID for the choice.
title: :class:`str`
Text displayed for the choice.
votes: :class:`int`
Total number of votes received for the choice across all methods of voting.
channel_points_votes: :class:`int`
Number of votes received via Channel Points.
bits_votes: :class:`int`
Number of votes received via Bits.
.. warning::
Twitch have removed support for voting with bits.
This will return as 0
"""
__slots__ = ("id", "title", "votes", "channel_points_votes", "bits_votes")
def __init__(self, data: dict):
self.id: str = data["id"]
self.title: str = data["title"]
self.votes: int = data["votes"]
self.channel_points_votes: int = data["channel_points_votes"]
self.bits_votes: int = data["bits_votes"]
def __repr__(self):
return f"<PollChoice id={self.id} title={self.title} votes={self.votes} channel_points_votes={self.channel_points_votes} bits_votes={self.bits_votes}>"
class Goal:
"""
Represents a list of Goals for a broadcaster / channel
Attributes
-----------
id: :class:`str`
An ID that uniquely identifies this goal.
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster.
type: :class:`str`
The type of goal.
Valid values: follower, subscription, subscription_count, new_subscription and new_subscription_count.
description: :class:`str`
A description of the goal, if specified.
current_amount: :class:`int`
The current value.
target_amount: :class:`int`
Number of Bits required to vote once with Bits.
created_at: :class:`datetime.datetime`
Date and time of when the broadcaster created the goal.
"""
__slots__ = (
"id",
"broadcaster",
"type",
"description",
"current_amount",
"target_amount",
"created_at",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.broadcaster = PartialUser(http, data["broadcaster_id"], data["broadcaster_login"])
self.type: str = data["type"]
self.description: str = data["description"]
self.current_amount: int = data["current_amount"]
self.target_amount: int = data["target_amount"]
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
def __repr__(self):
return f"<Goal id={self.id} broadcaster={self.broadcaster} description={self.description} current_amount={self.current_amount} target_amount={self.target_amount} created_at={self.created_at}>"
class ChatSettings:
"""
Represents current chat settings of a broadcaster / channel
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster. Only returns the ID.
emote_mode: :class:`bool`
Indicates whether emote only mode is enabled.
follower_mode: :class:`bool`
Indicates whether follower only chat is enabled.
follower_mode_duration: Optional[:class:`int`]
The length of time, in minutes, that the followers must have followed the broadcaster to participate in chat.
slow_mode: :class:`bool`
Indicates whether the chat is in slow mode.
slow_mode_wait_time: Optional[:class:`int`]
The amount of time, in seconds, that users need to wait between sending messages.
subscriber_mode: :class:`bool`
Indicates whether only users that subscribe to the broadcaster's channel can talk in chat.
unique_chat_mode: :class:`bool`
Indicates whether the broadcaster requires users to post only unique messages in the chat room.
moderator: Optional[:class:`~twitchio.PartialUser`]
The User of the moderator, if provided. Only returns the ID.
non_moderator_chat_delay: Optional[:class:`bool`]
Indicates whether the broadcaster adds a short delay before chat messages appear in the chat room.
non_moderator_chat_delay_duration: Optional[:class:`int`]
The amount of time, in seconds, that messages are delayed from appearing in chat.
"""
__slots__ = (
"broadcaster",
"emote_mode",
"follower_mode",
"follower_mode_duration",
"slow_mode",
"slow_mode_wait_time",
"subscriber_mode",
"unique_chat_mode",
"moderator",
"non_moderator_chat_delay",
"non_moderator_chat_delay_duration",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster = PartialUser(http, data["broadcaster_id"], None)
self.emote_mode: bool = data["emote_mode"]
self.follower_mode: bool = data["follower_mode"]
self.follower_mode_duration: Optional[int] = data.get("follower_mode_duration")
self.slow_mode: bool = data["slow_mode"]
self.slow_mode_wait_time: Optional[int] = data.get("slow_mode_wait_time")
self.subscriber_mode: bool = data["subscriber_mode"]
self.unique_chat_mode: bool = data["unique_chat_mode"]
self.non_moderator_chat_delay: Optional[bool] = data.get("non_moderator_chat_delay")
self.non_moderator_chat_delay_duration: Optional[int] = data.get("non_moderator_chat_delay_duration")
try:
self.moderator = PartialUser(http, data["moderator_id"], None)
except KeyError:
self.moderator = None
def __repr__(self):
return f"<ChatSettings broadcaster={self.broadcaster} emote_mode={self.emote_mode} follower_mode={self.follower_mode} slow_mode={self.slow_mode} subscriber_mode={self.subscriber_mode} unique_chat_mode={self.unique_chat_mode}>"
class ChatterColor:
"""
Represents chatters current name color.
Attributes
-----------
user: :class:`~twitchio.PartialUser`
PartialUser of the chatter.
color: :class:`str`
The color of the chatter's name.
"""
__slots__ = ("user", "color")
def __init__(self, http: "TwitchHTTP", data: dict):
self.user = PartialUser(http, data["user_id"], data["user_login"])
self.color: str = data["color"]
def __repr__(self):
return f"<ChatterColor user={self.user} color={self.color}>"
class Raid:
"""
Represents a raid for a broadcaster / channel
Attributes
-----------
created_at: :class:`datetime.datetime`
Date and time of when the raid started.
is_mature: :class:`bool`
Indicates whether the stream being raided is marked as mature.
"""
__slots__ = ("created_at", "is_mature")
def __init__(self, data: dict):
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
self.is_mature: bool = data["is_mature"]
def __repr__(self):
return f"<Raid created_at={self.created_at} is_mature={self.is_mature}>"
class Ban:
"""
Represents a ban for a broadcaster / channel
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
The broadcaster whose chat room the user was banned from chatting in.
moderator: :class:`~twitchio.PartialUser`
The moderator that banned the user.
user: :class:`~twitchio.PartialUser`
The user that was banned.
created_at: :class:`datetime.datetime`
Date and time of when the ban was created.
"""
__slots__ = ("broadcaster", "moderator", "user", "created_at")
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster = PartialUser(http, data["broadcaster_id"], None)
self.moderator = PartialUser(http, data["moderator_id"], None)
self.user = PartialUser(http, data["user_id"], None)
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
def __repr__(self):
return f"<Ban broadcaster={self.broadcaster} user={self.user} created_at={self.created_at}>"
class Timeout:
"""
Represents a timeout for a broadcaster / channel
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
The broadcaster whose chat room the user was timed out from chatting in.
moderator: :class:`~twitchio.PartialUser`
The moderator that timed the user out.
user: :class:`~twitchio.PartialUser`
The user that was timed out.
created_at: :class:`datetime.datetime`
Date and time of when the timeout was created.
end_time: :class:`datetime.datetime`
Date and time of when the timeout will end.
"""
__slots__ = ("broadcaster", "moderator", "user", "created_at", "end_time")
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster = PartialUser(http, data["broadcaster_id"], None)
self.moderator = PartialUser(http, data["moderator_id"], None)
self.user = PartialUser(http, data["user_id"], None)
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
self.end_time: datetime.datetime = parse_timestamp(data["end_time"])
def __repr__(self):
return f"<Timeout broadcaster={self.broadcaster} user={self.user} created_at={self.created_at} end_time={self.end_time}>"
class ShieldStatus:
"""
Represents a Shield Mode activation status.
Attributes
-----------
moderator: :class:`~twitchio.PartialUser`
The moderator that last activated Shield Mode.
display_name: :class:`str`
The moderator's display name. Is an empty string if Shield Mode hasn't been previously activated.
last_activated_at: :class:`datetime.datetime`
The UTC datetime of when Shield Mode was last activated.
Is an empty string if Shield Mode hasn't been previously activated.
is_active: :class:`bool`
A Boolean value that determines whether Shield Mode is active.
Is true if the broadcaster activated Shield Mode; otherwise, false.
"""
__slots__ = ("moderator", "display_name", "last_activated_at", "is_active")
def __init__(self, http: "TwitchHTTP", data: dict):
self.moderator: Optional[PartialUser] = (
PartialUser(http, data["moderator_id"], data["moderator_login"]) if data["moderator_id"] else None
)
self.display_name: Optional[str] = data.get("moderator_name")
self.is_active: bool = data["is_active"]
self.last_activated_at: Optional[datetime.datetime] = (
parse_timestamp(data["last_activated_at"]) if data["last_activated_at"] else None
)
def __repr__(self):
return f"<ShieldStatus moderator={self.moderator} is_active={self.is_active} last_activated_at={self.last_activated_at}>"
class ChatBadge:
"""
Represents chat badges.
Attributes
-----------
set_id: :class:`str`
An ID that identifies this set of chat badges. For example, Bits or Subscriber.
versions: List[:class:`~twitchio.ChatBadgeVersions`]
The list of chat badges in this set.
"""
__slots__ = ("set_id", "versions")
def __init__(self, data: dict):
self.set_id: str = data["set_id"]
self.versions: List[ChatBadgeVersions] = [ChatBadgeVersions(version_data) for version_data in data["versions"]]
def __repr__(self):
return f"<ChatBadge set_id={self.set_id} versions={self.versions}>"
class ChatBadgeVersions:
"""
Represents the different versions of the chat badge.
Attributes
-----------
id: :class:`str`
An ID that identifies this version of the badge. The ID can be any value.
image_url_1x: :class:`str`
URL to the small version (18px x 18px) of the badge.
image_url_2x: :class:`str`
URL to the medium version (36px x 36px) of the badge.
image_url_4x: :class:`str`
URL to the large version (72px x 72px) of the badge.
title: :class:`str`
The title of the badge.
description: :class:`str`
The description of the badge.
click_action: Optional[:class:`str`]
The action to take when clicking on the badge. This can be None if no action is specified
click_url: Optional[:class:`str`]
The URL to navigate to when clicking on the badge. This can be None if no URL is specified.
"""
__slots__ = (
"id",
"image_url_1x",
"image_url_2x",
"image_url_4x",
"title",
"description",
"click_url",
"click_action",
)
def __init__(self, data: dict):
self.id: str = data["id"]
self.image_url_1x: str = data["image_url_1x"]
self.image_url_2x: str = data["image_url_2x"]
self.image_url_4x: str = data["image_url_4x"]
self.title: str = data["title"]
self.description: str = data["description"]
self.click_action: Optional[str] = data.get("click_action")
self.click_url: Optional[str] = data.get("click_url")
def __repr__(self):
return f"<ChatBadgeVersions id={self.id} title={self.title}>"
class ContentClassificationLabel:
"""
Represents a Content Classification Label.
Attributes
-----------
id: :class:`str`
Unique identifier for the CCL.
description: :class:`str`
Localized description of the CCL.
name: :class:`str`
Localized name of the CCL.
"""
__slots__ = ("id", "description", "name")
def __init__(self, data: dict):
self.id: str = data["id"]
self.description: str = data["description"]
self.name: str = data["name"]
def __repr__(self):
return f"<ContentClassificationLabel id={self.id}>"
class CharityValues:
"""
Represents the current/target funds of a charity campaign.
Attributes
-----------
value: :class:`int`
The value of the campaign (either so far, or the target value).
decimal_places: :class:`int`
The decimal places to be inserted into :attr:`.value`.
currency: :class:`str`
The currency this charity is raising funds in. eg ``USD``, ``GBP``, ``EUR``.
"""
__slots__ = ("value", "decimal_places", "currency")
def __init__(self, data: dict) -> None:
self.value: int = data["value"]
self.decimal_places: int = data["decimal_places"]
self.currency: str = data["currency"]
def __repr__(self) -> str:
return f"<CharityValues value={self.value} decimal_places={self.decimal_places} currency={self.currency}>"
class CharityCampaign:
"""
Represents a Charity Campaign on a channel.
Attributes
-----------
campaign_id: :class:`str`
The ID of the running charity campaign.
broadcaster: :class:`~twitchio.PartialUser`
The broadcaster running the campaign.
user: :class:`~twitchio.PartialUser`
The user who donated.
charity_name: :class:`str`
The name of the charity.
charity_description: :class:`str`
The description of the charity.
charity_logo: :class:`str`
The logo of the charity.
charity_website: :class:`str`
The websiet of the charity.
current: :class:`CharityValues`
The current funds raised by this campaign.
target: :class:`CharityValues`
The target funds to be raised for this campaign.
"""
__slots__ = (
"campaign_id",
"broadcaster",
"charity_name",
"charity_description",
"charity_logo",
"charity_website",
"current",
"target",
)
def __init__(self, data: dict, http: TwitchHTTP, broadcaster: PartialUser | None = None) -> None:
self.campaign_id: str = data["campaign_id"]
self.broadcaster: PartialUser = broadcaster or PartialUser(
http, data["broadcaster_id"], data["broadcaster_name"]
)
self.charity_name: str = data["charity_name"]
self.charity_description: str = data["charity_description"]
self.charity_logo: str = data["charity_logo"]
self.charity_website: str = data["charity_website"]
self.current: CharityValues = CharityValues(data["current_amount"])
self.target: CharityValues = CharityValues(data["target_amount"])
def __repr__(self) -> str:
return f"<CharityCampaign broadcaster={self.broadcaster} campaign_id={self.campaign_id} charity_name={self.charity_name}>"
|
PythonistaGuild/TwitchIO
|
twitchio/models.py
|
models.py
|
py
| 69,250
|
python
|
en
|
code
| 714
|
github-code
|
6
|
25441389301
|
# -*- coding: utf-8 -*-
from PIL import Image,ImageFont,ImageDraw
import json
import cover
import time
from io import BytesIO
def paste_with_a(base_img_, img_, pos):
if 4 == len(img_.split()):
r,g,b,a = img_.split()
base_img_.paste(img_, pos,mask=a)
else:
base_img_.paste(img_, pos)
def drawRoundRec(drawObject, x, y, w, h, r, fill_color):
'''Rounds'''
drawObject.ellipse((x, y, x + r, y + r), fill=fill_color)
drawObject.ellipse((x + w - r, y, x + w, y + r), fill=fill_color)
drawObject.ellipse((x, y + h - r, x + r, y + h), fill=fill_color)
drawObject.ellipse((x + w - r, y + h - r, x + w, y + h), fill=fill_color)
'''rec.s'''
drawObject.rectangle((x + r / 2, y, x + w - (r / 2), y + h), fill=fill_color)
drawObject.rectangle((x, y + r / 2, x + w, y + h - (r / 2)), fill=fill_color)
def draw_plate_arc_style(img_, dic_):
draw = ImageDraw.Draw(img_)
# draw 1
# draw cover
# @brief
# wiki url will use "_" to replace " "
# display " " in mai_rating_img but will use "_" to download cover
# however cover.download will save img with "_" (cause ues "_" as param in cover.download())
wiki_title = dic_["title"].replace(" ", "_")
if 0 < cover.download_cover(wiki_title):
cover_img = Image.open("./cover/"+wiki_title+".png")
else:
cover_img = Image.open("./res/" + "gd" + ".png")
cover_img = cover_img.resize((250, 250), Image.ANTIALIAS)
paste_with_a(img_,cover_img,(25,25))
# draw rating base
# master 159 81 220
draw.polygon((275, 25, 525, 25, 575, 75, 275, 75), diff_color[dic_["level_label"]])
# write dx rating
draw.text((275 + 20 , 25 + 5), " "+str(dic_["ra"])+" (" + str(dic_["ds"]) + ")", font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 40), fill="#ffffff")
# write b rank
draw.text((800 , 25 + 8), "#" + str(i+1), font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 45), fill="#000000")
print(str(i))
# draw 2
# write title
draw.text((275 + 20, 25+50+25), dic_["title"], font=ImageFont.truetype('C:/windows/fonts/Deng.ttf', 48), fill="#000000")
# draw 3
# write score
draw.text((275 + 20, 25 + 50 + 25 + 60), str(dic_["achievements"]) + "%", font=ImageFont.truetype('C:/windows/fonts/ALGER.TTF', 58),fill="#000000")
# draw score rank "rate": "sssp"
score_rank_img = Image.open("./res/" + dic_["rate"] + ".png")
paste_with_a(img_, score_rank_img, (625, 25 + 50 + 25 + 60 - 20))
#draw 4
#draw type "type": "SD"
music_type_img = Image.open("./res/" + dic_["type"] + ".png")
paste_with_a(img_, music_type_img, (275 + 20, 25 + 50 + 25 + 60 + 90))
#draw fc "fc": "fcp"
if len(dic_["fc"]) > 0:
fc_img = Image.open("./res/" + dic_["fc"] + ".png")
else:
fc_img = Image.open("./res/" + "fc_dummy" + ".png")
paste_with_a(img_, fc_img, (275 + 20 + 150, 25 + 50 + 25 + 60 + 90 - 8))
# #draw fs "fs": ""
if len(dic_["fs"]) > 0:
fs_img = Image.open("./res/" + dic_["fs"] + ".png")
else:
fs_img = Image.open("./res/" + "fs_dummy" + ".png")
paste_with_a(img_, fs_img, (275 + 20 + 150 + 150 , 25 + 50 + 25 + 60 + 90 - 15))
def draw_name_pad_mai_style(base_img_):
# draw name pad
# load res
name_pad_img = Image.open("./res/name_pad/"+user_dic["name_pad"]+".png")
name_pad_img.convert('RGBA')
name_pad_img = name_pad_img.resize((1800, 300), Image.ANTIALIAS)
# draw ava
ava_img = Image.open("./res/ava/"+user_dic["ava"]+".png")
ava_img = ava_img.resize((250, 260), Image.ANTIALIAS)
paste_with_a(name_pad_img, ava_img, (20,20))
# draw rating base
rating_base_img = Image.open("./res/rating_base_rainbow.png")
rating_base_img = rating_base_img.resize((425, 85), Image.ANTIALIAS)
# write rating
draw = ImageDraw.Draw(rating_base_img)
ra_sum = ra_sum_sd + ra_sum_dx21
ra_sum_list = []
ra_pos_list = [(364 + 6,18),(321+ 6,18),(275+ 6,18),(228+ 6,18),(188+ 6,18)] # max 99999
while 1:
r = ra_sum%10
ra_sum_list.append(r)
ra_sum = int(ra_sum/10)
if 0 == ra_sum:
break
for i in range(len(ra_sum_list)):
draw.text(ra_pos_list[i], str(ra_sum_list[i]), font=ImageFont.truetype('C:/windows/fonts/BAUHS93.TTF', 42), fill="#eedd00")
# paste rating base
paste_with_a(name_pad_img,rating_base_img,(20 + 250 + 10, 20))
# draw mai_logo
maimai_img = Image.open("./res/logo.png")
maimai_img = maimai_img.resize((int(110 * 16 / 9), 110), Image.ANTIALIAS)
paste_with_a(name_pad_img,maimai_img,(20 + 250 + 10 + 425 + 10, 5))
# draw name base
name_base_img = Image.new('RGBA', (900 - 225, 105), (255, 255, 255, 0))
# write name
draw = ImageDraw.Draw(name_base_img)
drawRoundRec(draw,0,0,900 - 225, 105,25,"#666666")
drawRoundRec(draw, 3, 3, 900 - 225-6, 105-6, 25, "#ffffff")
draw.text((10 , 10), user_dic["name"], font=ImageFont.truetype('C:/windows/fonts/ALGER.TTF', 72), fill="#000000")
# paste name base
paste_with_a(name_pad_img,name_base_img, (20 + 250 + 10, 20 + 85 + 5))
#draw trophy
trophy_img = Image.open("./res/trophy.png")
trophy_img = trophy_img.resize((900 - 225, 60), Image.ANTIALIAS)
# write rating on trophy
draw = ImageDraw.Draw(trophy_img)
# draw.text((20, 7), "Standard:2222 DX2021:3333", font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 40), fill="#333333")
draw.text((20, 7), "Kakinuma/maimai_DX_rating_image", font=ImageFont.truetype('C:/windows/fonts/Dengb.ttf', 38), fill="#333333")
# paste trophy
paste_with_a(name_pad_img,trophy_img,(20 + 250 + 10, 20 + 85 + 5 + 105 +5))
#paste name_pad
paste_with_a(base_img_,name_pad_img,(plate_edge,plate_edge))
if __name__ == '__main__':
# load user
user_dic = {"name":"rain","ava":"rain","name_pad":"150603"}
# load json
with open("./data/"+user_dic["name"]+".json", 'r', encoding='utf-8') as load_f:
load_dict = json.load(load_f)
record_list = load_dict["records"]
record_sd_list = []
record_dx21_list = []
for r in record_list:
if r["is_new"]:
record_dx21_list.append(r)
else:
record_sd_list.append(r)
record_sd_list = sorted(record_sd_list, key=lambda e: e.__getitem__('ra'), reverse=True)
record_dx21_list = sorted(record_dx21_list, key=lambda e: e.__getitem__('ra'), reverse=True)
record_sd_num = len(record_sd_list)
if record_sd_num > 25 :
record_sd_num = 25
record_dx21_num = len(record_dx21_list)
if record_dx21_num > 15 :
record_dx21_num = 15
ra_sum_sd = 0
ra_sum_dx21 = 0
# define
template_dic = {"title": "林檎華憐歌", "level": "11+", "level_index": 3, "level_label": "Master", "type": "SD", "dxScore": 1886, "achievements": 100.6206, "rate": "sssp", "fc": "fcp", "fs": "", "ra": 166, "ds": 11.8, "song_id": "322", "is_new": "false"}
diff_color = {"Master":"#9f51dc","Expert":"#ff7b7b","Advanced":"#00ffff","Re:MASTER":"#c495ea"}
plate_interval = 60
plate_edge = 90
plate_width = 900
plate_height = 300
# load base = 3*14 plate
base_img = Image.new('RGBA', (plate_width*3+plate_interval*2+plate_edge*2, plate_height*14+plate_interval*13+plate_edge*2),(81,188,243,255))
# merge sd plate to base
plate_startX = plate_edge
plate_startY = plate_edge + plate_height + plate_interval
for i in range(record_sd_num):
plate_img = Image.new('RGBA', (900, 300), (255, 255, 255, 0))
plate_img.convert('RGBA')
draw = ImageDraw.Draw(plate_img)
drawRoundRec(draw, 0, 0, 900,300, 50, "#aaaaaa")
drawRoundRec(draw, 3, 3, 900 - 6, 300 - 6, 50, "#ffffff")
draw_plate_arc_style(plate_img, record_sd_list[i])
ra_sum_sd += record_sd_list[i]["ra"]
x = plate_startX + i%2 * (plate_width + plate_interval)
y = plate_startY + int(i/2) * (plate_height + plate_interval)
paste_with_a(base_img,plate_img,(x, y))
print("SD",i,x,y)
# merge dx plate to base
plate_startX = plate_edge + plate_width + plate_interval + plate_width + plate_interval
plate_startY = plate_edge
for i in range(record_dx21_num):
plate_img = Image.new('RGBA', (900, 300), (255, 255, 255, 0))
plate_img.convert('RGBA')
draw = ImageDraw.Draw(plate_img)
drawRoundRec(draw, 0, 0, 900, 300, 50, "#aaaaaa")
drawRoundRec(draw, 3, 3, 900 - 6, 300 - 6, 50, "#ffffff")
draw_plate_arc_style(plate_img, record_dx21_list[i])
ra_sum_dx21 += record_dx21_list[i]["ra"]
x = plate_startX
y = plate_startY + i * (plate_height + plate_interval)
if 14==i :
# DX15 move to left*1 up*1 to align
x -= (plate_width + plate_interval)
y -= (plate_height + plate_interval)
paste_with_a(base_img,plate_img,(x, y))
print("DX",i,x, y)
draw_name_pad_mai_style(base_img)
base_img.save("./out.png")
print(ra_sum_sd,ra_sum_dx21)
|
kakinumaCN/maimai_DX_rating_image
|
main.py
|
main.py
|
py
| 9,086
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7713955808
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask
myApp = Flask(__name__)
@myApp.route('/')
def bonjour():
message = 'Bonjour, je suis Ramy \n'
return message
if __name__ == '__main__':
myApp.run(host='0.0.0.0', port=8080)
|
RMDHMN/pythonFlash_testing
|
app.py
|
app.py
|
py
| 254
|
python
|
en
|
code
| 1
|
github-code
|
6
|
25009445303
|
import datetime
import re
from random import shuffle
from collections import defaultdict
from django.utils.translation import (
activate,
get_language_info,
get_language,
)
from django import http
from django.shortcuts import render
from django.core.cache import cache
from django.conf import settings
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils import timezone
try:
from nltk.corpus import wordnet
except ImportError:
wordnet = None
from kl.search.models import Word, Search
SEARCH_SUMMARY_SKIPS = (
'crossword', 'korsord', 'fuck', 'peter', 'motherfucker',
)
def niceboolean(value):
if isinstance(value, bool):
return value
falseness = ('', 'no', 'off', 'false', 'none', '0', 'f')
return str(value).lower().strip() not in falseness
def uniqify(seq, idfun=None): # Alex Martelli ******* order preserving
if idfun is None:
def idfun(x): return x
seen = set()
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
##if marker in seen: continue
if marker in seen:
continue
seen.add(marker)
result.append(item)
return result
class SearchResult(object):
def __init__(self, word, definition='', by_clue=None):
self.word = word
self.definition = definition
self.by_clue = by_clue
def home(request, json=False, record_search=True):
# By default we are set to record the search in our stats
# This can be overwritten by a CGI variable called 'r'
# E.g. r=0 or r=no
if request.GET.get('r'):
record_search = niceboolean(request.GET.get('r'))
language = request.GET.get('lang', get_language()).lower()
slots = None
if request.GET.get('l'):
try:
length = int(request.GET.get('l'))
except ValueError:
return http.HttpResponseRedirect('/?error=length')
slots = request.GET.getlist('s')
# if not type(slots) is list:
if not isinstance(slots, list):
return http.HttpResponseRedirect('/?error=slots')
notletters = request.GET.get('notletters', '').upper()
notletters = [x.strip() for x in notletters.split(',')
if len(x.strip()) == 1 and not x.strip().isdigit()]
if not len(slots) >= length:
return http.HttpResponseRedirect('/?error=slots&error=length')
if not [x for x in slots if x.strip()]:
# all blank
return http.HttpResponseRedirect('/?error=slots')
clues = request.GET.get('clues', '')
if clues and ' ' in clues and ',' not in clues:
clues = clues.replace(' ',', ')
clues = [
x.strip() for x in clues.split(',')
if (
x.strip() and
x.strip().lower() not in STOPWORDS and
not x.count(' ')
)
]
search_results = [] # the final simple list that is sent back
for clue in clues:
alternatives = _find_alternative_synonyms(
clue,
slots[:length],
language,
notletters=notletters,
request=request
)
search_results.extend([
SearchResult(x, by_clue=clue)
for x in alternatives
])
# find some alternatives
search = ''.join([x and x.lower() or ' ' for x in slots[:length]])
cache_key = '_find_alternatives_%s_%s' % (search, language)
if notletters:
cache_key += '__not' + ''.join(notletters)
cache_key = cache_key.replace(' ','_')
if re.findall('\s', cache_key):
raise ValueError(
'invalid cache_key search=%r, language=%r' % (search, language)
)
alternatives = cache.get(cache_key)
if alternatives is None:
alternatives = _find_alternatives(
slots[:length],
language,
notletters=notletters
)
cache.set(cache_key, alternatives, 60 * 60 * 24)
alternatives_count = len(alternatives)
alternatives_truncated = False
if alternatives_count > 100:
alternatives = alternatives[:100]
alternatives_truncated = True
result = dict(
length=length,
search=search,
word_count=alternatives_count,
alternatives_truncated=alternatives_truncated,
)
already_found = [x.word for x in search_results]
search_results.extend([
SearchResult(each.word, definition=each.definition)
for each in alternatives
if each.word not in already_found
])
match_points = None
match_points = []
if search_results:
first_word = search_results[0].word
for i, letter in enumerate(first_word):
if letter.lower() == search[i]:
match_points.append(1)
else:
match_points.append(0)
result['match_points'] = match_points
result['words'] = []
for search_result in search_results:
v = dict(word=search_result.word)
if search_result.definition:
v['definition'] = search_result.definition
if search_result.by_clue:
v['by_clue'] = search_result.by_clue
result['words'].append(v)
if alternatives_count == 1:
result['match_text'] = _("1 match found")
elif alternatives_count:
if alternatives_truncated:
result['match_text'] = _(
"%(count)s matches found but only showing first 100"
) % dict(count=alternatives_count)
else:
result['match_text'] = _("%(count)s matches found") % dict(
count=alternatives_count
)
else:
result['match_text'] = _("No matches found unfortunately :(")
found_word = None
if len(search_results) == 1:
try:
found_word = Word.objects.get(
word=search_results[0].word,
language=language
)
except Word.DoesNotExist:
# this it was probably not from the database but
# from the wordnet stuff
found_word = None
if record_search:
_record_search(
search,
user_agent=request.META.get('HTTP_USER_AGENT',''),
ip_address=request.META.get('REMOTE_ADDR',''),
found_word=found_word,
language=language
)
request.session['has_searched'] = True
if json:
return http.JsonResponse(result)
# return _render_json(result)
else:
length = ''
show_example_search = not bool(request.session.get('has_searched'))
most_recent_search_word = None
if not show_example_search:
most_recent_search_word = _get_recent_search_word(request)
lang = get_language()
accept_clues = (
wordnet is not None and lang.lower() in ('en', 'en-gb', 'en-us')
)
context = {
'length': length,
'slots': slots,
'accept_clues': accept_clues,
'show_example_search': show_example_search,
'most_recent_search_word': most_recent_search_word,
}
return render(request, 'search/home.html', context)
def _find_alternatives(slots, language, notletters=[]):
length = len(slots)
if length == 1:
return Word.objects.filter(length=1, word=slots[0], language=language)
filter_ = dict(length=length, language=language)
slots = [x and x.lower() or ' ' for x in slots]
search = ''.join(slots)
start = ''
end = ''
try:
start = re.findall('^\w+', search)[0]
if len(start) > 1:
filter_['first2'] = start[:2].lower()
if len(start) > 2:
filter_['word__istartswith'] = start
else:
filter_['first1'] = start.lower()
except IndexError:
pass
try:
end = re.findall('\w+$', search)[0]
if len(end) > 1:
filter_['last2'] = end[-2:].lower()
if len(end) > 2:
filter_['word__iendswith'] = end
else:
filter_['last1'] = end.lower()
except IndexError:
pass
def filter_match(match):
if end:
matchable_string = search[len(start):-len(end)]
found_string = match.word[len(start):-len(end)]
else:
matchable_string = search[len(start):]
found_string = match.word[len(start):]
assert len(matchable_string) == len(found_string), \
"matchable_string=%r, found_string=%r" % (matchable_string, found_string)
for i, each in enumerate(matchable_string):
if each != ' ' and each != found_string[i]:
# can't be match
return False
return True
search_base = Word.objects
limit = 10000
# if the filter is really vague and the length is high we're going to get
# too many objects and we need to cut our losses.
if filter_['length'] > 5:
if filter_.get('word__istartswith') and filter_.get('word__iendswith'):
# It's long but has a startswith and an endswith, increase the limit
limit = 5000
elif filter_.get('word__istartswith') or filter_.get('word__iendswith'):
# we're going to get less than above but still many
limit = 2500
else:
limit = 1000
# if there's neither a start or a end (e.g. '_E_E_A_') it will get all words
# that are of that length then end truncate the result set then filter them
# as a string operation. Then there's a chance it might not ever test word we
# are looking for.
if not start and not end:
# must come up with some other crazy icontains filter
# Look for the longest lump of letter. For example in '_E_ERA_' 'era' is
# the longest lump
#lumps = re.findall('\w+', search)
lumps = search.split()
longest = sorted(lumps, lambda x,y: cmp(len(y), len(x)))[0]
if len(longest) > 1:
filter_['word__icontains'] = longest
else:
for each in uniqify(lumps):
search_base = search_base.filter(word__icontains=each)
limit = search_base.filter(**filter_).order_by('word').count()
elif (start and len(start) <= 2) or (end and len(end) <= 2):
# If you search for somethin like "___TAM__T"
# We so far only know it's 9 characters long (french as 21k 9 characters long
# words).
# We also have one tiny little 't' at the end but there's still
# 4086 options
for lump in re.findall(r'\s(\w+)\s', search):
filter_['word__icontains'] = lump
search_qs = search_base.filter(**filter_)
for notletter in notletters:
search_qs = search_qs.exclude(word__icontains=notletter)
all_matches = [x for x
in search_qs.order_by('word')[:limit]
if filter_match(x)]
return uniqify(all_matches, lambda x: x.word.lower())
def _find_alternative_synonyms(
word,
slots,
language,
notletters=None,
request=None
):
length = len(slots)
if notletters is None:
notletters = []
slots = [x and x.lower() or ' ' for x in slots]
search = ''.join(slots)
start = ''
end = ''
try:
start = re.findall('^\w+', search)[0]
except IndexError:
pass
try:
end = re.findall('\w+$', search)[0]
except IndexError:
pass
def filter_match(word):
if end and not word.endswith(end):
# Don't even bother
return False
elif start and not word.startswith(start):
# Don't even bother
return False
if end:
matchable_string = search[len(start):-len(end)]
found_string = word[len(start):-len(end)]
else:
matchable_string = search[len(start):]
found_string = word[len(start):]
assert len(matchable_string) == len(found_string)
for i, each in enumerate(matchable_string):
if each != ' ' and each != found_string[i]:
# can't be match
return False
return True
def test(word):
if len(word) == length:
if not notletters:
for letter in word:
if letter.upper() in notletters:
return False
return filter_match(word)
for variation in _get_variations(word, greedy=True, request=request):
if test(variation):
yield variation
def _get_variations(word, greedy=False,
store_definitions=True,
request=None):
a = _get_variations_wordnet(
word,
greedy=greedy,
store_definitions=store_definitions
)
return a
# b = _get_variations_synonym_dot_com(
# word,
# greedy=greedy,
# store_definitions=store_definitions,
# request=request
# )
# return a + b
def _record_search(
search_word,
user_agent='',
ip_address='',
found_word=None,
language=None,
search_type='',
):
if len(user_agent) > 200:
user_agent = user_agent[:200]
if len(ip_address) > 15:
import warnings
warnings.warn("ip_address too long (%r)" % ip_address)
ip_address = ''
elif ip_address == '127.0.0.1' and settings.DEBUG:
# because 127.0.0.1 can't be looked up, use a random other one
examples = '125.239.15.42,114.199.97.224,68.190.165.25,208.75.100.212,'\
'61.29.84.154,72.49.16.234,66.57.228.64,196.25.255.250,'\
'141.117.6.97,85.68.18.183,90.157.186.202'.split(',')
shuffle(examples)
ip_address = examples[0]
Search.objects.create(
search_word=search_word,
user_agent=user_agent.strip(),
ip_address=ip_address.strip(),
found_word=found_word,
language=language,
search_type=search_type,
)
def _get_recent_search_word(request):
# _today = datetime.datetime.today()
_today = timezone.now()
_since = datetime.datetime(_today.year, _today.month, 1)
_extra_exclude = dict(found_word__word__in=list(SEARCH_SUMMARY_SKIPS))
if request.META.get('HTTP_USER_AGENT'):
_extra_exclude['user_agent'] = request.META.get('HTTP_USER_AGENT')
if request.META.get('REMOTE_ADDR'):
_extra_exclude['ip_address'] = request.META.get('REMOTE_ADDR')
_extra_filter = dict()
# Special hack! Since the search summary has a cache of 1 hour,
# don't include things that are too recent
_extra_filter['add_date__lt'] = _today - datetime.timedelta(hours=1)
return _find_recent_search_word(
get_language(),
since=_since,
random=True,
extra_exclude=_extra_exclude,
**_extra_filter,
)
def _find_recent_search_word(
language,
since=None,
random=False,
extra_exclude={},
**extra_filter
):
searches = Search.objects.filter(
language=language,
found_word__isnull=False,
**extra_filter
).select_related('found_word')
if since:
searches = searches.filter(add_date__gte=since)
searches = searches.exclude(**extra_exclude)
if random:
# For some bizzare reason it seems that even if the exclude above
# has found_word__word__in=SEARCH_SUMMARY_SKIPS it still returns
# words from that list!!!!
# Hence this list comprehension.
found_words = [x.found_word for x in searches
if x.found_word.word not in SEARCH_SUMMARY_SKIPS]
shuffle(found_words)
try:
return found_words[0]
except IndexError:
return None
else:
searches = searches.order_by('-add_date')
return searches[0].found_word
return None
def get_search_stats(language):
# Total no words in our database
cache_key = 'no_total_words_%s' % language
no_total_words = cache.get(cache_key)
if no_total_words is None:
no_total_words = Word.objects.filter(language=language).count()
cache.set(cache_key, no_total_words, 60 * 60 * 24 * 30)
today = timezone.now()
# Searches today
# today_midnight = datetime.datetime(
# today.year,
# today.month,
# today.day, 0, 0, 0)
today_midnight = today - datetime.timedelta(days=1)
cache_key = 'no_searches_today_%s' % language
no_searches_today = cache.get(cache_key)
if no_searches_today is None:
no_searches_today = Search.objects.filter(
language=language,
add_date__gte=today_midnight
).count()
cache.set(cache_key, no_searches_today, 60 * 60)
# Searches yesterday
cache_key = 'no_searches_yesterday_%s' % language
no_searches_yesterday = cache.get(cache_key)
if no_searches_yesterday is None:
yesterday_midnight = today_midnight - datetime.timedelta(days=1)
no_searches_yesterday = Search.objects.filter(language=language,
add_date__range=(yesterday_midnight, today_midnight)
).count()
cache.set(cache_key, no_searches_yesterday, 60 * 60 * 24)
# Searches this week
cache_key = 'no_searches_this_week_%s' % language
no_searches_this_week = cache.get(cache_key)
if no_searches_this_week is None:
# find the first monday
monday_midnight = today_midnight
while monday_midnight.strftime('%A') != 'Monday':
monday_midnight = monday_midnight - datetime.timedelta(days=1)
no_searches_this_week = Search.objects.filter(
language=language,
add_date__gt=monday_midnight
).count()
cache.set(cache_key, no_searches_this_week, 60 * 60 * 24)
# Searches this month
cache_key = 'no_searches_this_month_%s' % language
no_searches_this_month = cache.get(cache_key)
if no_searches_this_month is None:
first_day_month = today.replace(day=1)
no_searches_this_month = Search.objects.filter(
language=language,
add_date__gte=first_day_month
).count()
cache.set(cache_key, no_searches_this_month, 60 * 60)
# Searches this year
cache_key = 'no_searches_this_year_%s' % language
no_searches_this_year = cache.get(cache_key)
if no_searches_this_year is None:
# first_day_year = datetime.datetime(today.year, 1, 1, 0, 0, 0)
first_day_year = today.replace(month=1, day=1)
no_searches_this_year = Search.objects.filter(
language=language,
add_date__gte=first_day_year
).count()
cache.set(cache_key, no_searches_this_year, 60 * 60)
return {
'no_total_words': no_total_words,
'no_searches_today': no_searches_today,
'no_searches_yesterday': no_searches_yesterday,
'no_searches_this_week': no_searches_this_week,
'no_searches_this_month': no_searches_this_month,
'no_searches_this_year': no_searches_this_year,
}
MONTH_NAMES = []
for i in range(1, 13):
d = datetime.date(2009, i, 1)
MONTH_NAMES.append(d.strftime('%B'))
def searches_summary(request, year, month, atleast_count=2,
lookup_definitions=False):
first_search_date = Search.objects.all().order_by('add_date')[0].add_date
last_search_date = Search.objects.all().order_by('-add_date')[0].add_date
year = int(year)
try:
month_nr = [x.lower() for x in MONTH_NAMES].index(month.lower()) + 1
except ValueError:
raise http.Http404("Unrecognized month name")
# turn that into a date
since = datetime.date(year, month_nr, 1)
if (month_nr + 1) > 12:
since_month_later = datetime.date(year+1, 1, 1)
else:
since_month_later = datetime.date(year, month_nr+1, 1)
today = timezone.now()
since_month_later_datetime = today.replace(
year=since_month_later.year,
month=since_month_later.month,
day=since_month_later.day
)
next_month_link = None
if since_month_later_datetime < first_search_date:
raise http.Http404("Too far back in time")
if since_month_later_datetime < last_search_date:
next_month_link = since_month_later.strftime("/searches/%Y/%B/")
since_datetime = today.replace(
year=since.year,
month=since.month,
day=since.day
)
previous_month_link = None
if since_datetime > last_search_date:
raise http.Http404("Too far into the future")
elif since_datetime > first_search_date:
if (month_nr - 1) < 1:
since_month_earlier = datetime.date(year-1, 12, 1)
else:
since_month_earlier = datetime.date(year, month_nr-1, 1)
previous_month_link = since_month_earlier.strftime("/searches/%Y/%B/")
base_searches = Search.objects.filter(
add_date__gte=since,
add_date__lt=since_month_later
)
found_searches = base_searches.exclude(
found_word=None
).select_related(
'found_word'
).exclude(
found_word__word__in=list(SEARCH_SUMMARY_SKIPS)
)
found_words = defaultdict(list)
definitions = {}
for each in found_searches:
found_words[each.language].append(each.found_word.word)
if each.language not in definitions:
definitions[each.found_word.language] = {}
if each.found_word.definition:
definitions[each.found_word.language][each.found_word.word.lower()]\
= each.found_word.definition.splitlines()
found_words = dict(found_words)
found_words_repeats = {}
for language, words in found_words.items():
counts = defaultdict(int)
for word in words:
if len(word) < 2:
# don't want to find single character words
# It's a bug that they're even in there
continue
counts[word.lower()] += 1
found_words_repeats[language] = sorted(
[k for (k, v) in counts.items()
if v >= atleast_count],
key=lambda x: x[1]
)
if lookup_definitions:
for lang, words in found_words_repeats.items():
for word in words:
try:
definitions[lang][word]
except KeyError:
if lang in ('en-us','en-gb'):
# wordnet
definition = _get_word_definition(word, language=lang)
else:
definition = None
if not definition:
definition = _get_word_definition_scrape(word, language=lang)
if definition:
add_word_definition(word, definition, language=lang)
# bake the definitions into found_words_repeats
for lang, words in found_words_repeats.items():
for i, word in enumerate(words):
words_dict = dict(word=word)
if lang in definitions:
if word in definitions[lang]:
words_dict = dict(words_dict, definitions=definitions[lang][word])
found_words_repeats[lang][i] = words_dict
all_words_plain = set()
for records in found_words_repeats.values():
for record in records:
all_words_plain.add(record['word'].lower())
all_words_plain = list(all_words_plain)
context = {
# 'language': language,
'month': month,
'year': year,
'all_words_plain': all_words_plain,
'found_words_repeats': found_words_repeats,
'previous_month_link': previous_month_link,
'next_month_link': next_month_link,
}
return render(request, 'search/searches_summary.html', context)
def about_crosstips(request):
return render(request, 'search/about-crosstips.html')
|
peterbe/kl2
|
kl/search/views.py
|
views.py
|
py
| 24,492
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8765296577
|
""" Faça um script que leia dois números e retorne como resultado a soma deles. Faça um script que leia algo
pelo teclado e mostra na tela o seu tipo de dado.
"""
numero1 = int(input("Digite o Numero 1 : "))
numero2 = int(input("Digite o Numero 2 : "))
soma = numero1+numero2
print(f"A soma é {soma}")
x = input("Digite algo : ")
print("O tipo da variavel é : ",type(x))
|
AndreDosSantosMaier/Liguagem_Programacao
|
Lista de Exercicios/Exer-1.py
|
Exer-1.py
|
py
| 388
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
16471277711
|
"""
A row measuring seven units in length has red blocks with a minimum length of
three units placed on it, such that any two red blocks (which are allowed to be
different lengths) are separated by at least one black square. There are
exactly seventeen ways of doing this.
How many ways can a row measuring fifty units in length be filled?
NOTE: Although the example above does not lend itself to the possibility, in
general it is permitted to mix block sizes. For example, on a row measuring
eight units in length you could use red (3), black (1), and red (4).
Solution comment: Fast for Python, ~4 ms. This somehow worked on first
try. Idea was to describe the number of ways to place the blocks with N units,
and use this to build larger solutions. A baseline fact is that there is only
one way to do it if N < 3 (i.e. the trivial solution). Then we can place a
block of ever increasing size (until no more room), and then add the number of
ways to place blocks on the remaining units. We can place the block either at
the start, or at some offset. Trying all blocksizes and all offsets we generate
the solution.
The memoization is essential for building the solution recursively like this.
Could be translated to DP with a simple array, but that would take some more
accurate indexing. The simplicity of this approach is the most appealing part.
And somehow I got the ±1 parts right on the first go.
"""
from time import time
from functools import lru_cache
@lru_cache()
def ways(N):
w = 1 # The trivial solution is always possible.
if N >= 3:
for offset in range(N - 3 + 1): # Start placing block at each offset.
n = N - offset # The remaining units after the offset.
for b in range(3, n + 1): # Add the ways after placing a block of size b.
w += ways(n - b - 1)
return w
if __name__ == "__main__":
t0 = time()
print('Answer: {}\nExecution time: {:.3f} ms'.format(ways(50), (time() - t0) * 1e3))
|
bsamseth/project-euler
|
114/114.py
|
114.py
|
py
| 1,999
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36065552368
|
# url to update the member in flight club datasheet:-- https://replit.com/@ShivamKumar28/Shivam-Flight-Club
#This file will need to use the DataManager,FlightSearch, FlightData, NotificationManager classes to achieve the program requirements.
from data_manager import DataManager
from flight_search import FlightSearch
from notification_manager import NotificationManager
datamager = DataManager()
flightsearch = FlightSearch()
send_notification = NotificationManager()
ORIGIN_CITY_IATA = "LON"
for destination in datamager.data:
flight = flightsearch.find_flights(
ORIGIN_CITY_IATA,
destination["iataCode"],
)
if flight is None: continue
if flight.price < destination["lowestPrice"]:
message = f"Low price alert! Only ₹{flight.price} to fly from {flight.origin_city}-{flight.origin_airport} to {flight.destination_city}-{flight.destination_airport}, from {flight.out_date} to {flight.return_date}"
print(message)
# send_notification.send_sms(message)
# send_notification.send_mail(message)
send_notification.send_mails(message)
|
Shivam29k/Python_Projects
|
flight_deals_alert/main.py
|
main.py
|
py
| 1,106
|
python
|
en
|
code
| 1
|
github-code
|
6
|
6589740252
|
import pickle
from flask import Flask, request, render_template, jsonify, send_file
from elasticsearch import Elasticsearch
from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering, AutoModel
import spacy
import json
import time
from pymongo import MongoClient
import os
from sklearn.linear_model import LogisticRegression
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
with open("config.json", "r") as config:
config_variables = json.load(config)
all_models = dict()
all_elastics = dict()
all_ood_classes = dict()
for model in config_variables["models"]:
all_models[model["model"]] = pipeline(
model["pipeline"],
tokenizer=model["tokenizer"],
model=model["model"],
device=model["device"],
handle_impossible_answer=bool(model["handle_impossible_answer"]),
max_answer_len=model["max_answer_len"],
)
for elastic_table in config_variables["elastics"]:
all_elastics[elastic_table["elastic_table_name"]] = elastic_table[
"elastic_table_name"
]
all_ood_classes[elastic_table["elastic_table_name"]] = elastic_table["ood_class"]
contriever_tokenizer = AutoTokenizer.from_pretrained("facebook/mcontriever-msmarco")
contriever_model = AutoModel.from_pretrained("facebook/mcontriever-msmarco")
ood_model = pickle.load(open("models/ood_model.pkl", "rb"))
nlp_hu = spacy.load("hu_core_news_trf")
MONGO_URL = os.environ.get("MONGO_URL")
ELASTIC_URL = os.environ.get("ELASTIC_URL")
ELASTIC_USER = os.environ.get("ELASTIC_USER")
ELASTIC_PASSWORD = os.environ.get("ELASTIC_PASSWORD")
# ELASTIC_PASSWORD = "lFqLIrbCQfI84P6v_ue0"
DEBUG = os.environ.get("DEBUG", "").lower() == "true"
@app.route("/test")
def test():
return jsonify({"Hello": "world!"}), 200
# @app.route('/query/<query>')
def predict_from_question(query, size, elastic, model_type):
doc_q = nlp_hu(query)
clean_tokens = list()
for token in doc_q:
# print(token.text, token.pos_, token.dep_)
if token.pos_ not in ["DET", "ADV", "PRON", "PUNCT"]:
clean_tokens.append(token.lemma_)
clean_question = " ".join(clean_tokens)
body = {"size": size, "query": {"match": {"document": clean_question}}}
es = Elasticsearch(
ELASTIC_URL, http_auth=(ELASTIC_USER, ELASTIC_PASSWORD), verify_certs=False
)
s = es.search(index=all_elastics[elastic], body=body)
# The query only returns the text before the question mark, so we add it here.
official_question = query if query[-1:] == "?" else query + "?"
# We use the highest ranked document by the elasticsearch.
contexts = list(s["hits"]["hits"])
return_value = list()
official_all_context = "\n-\n\n".join(
context["_source"]["official_document"] for context in contexts
)
lemmatized_all_context = "\n-\n\n".join(
context["_source"]["document"] for context in contexts
)
app.logger.info(contexts)
qa_pipeline = all_models[model_type]
if official_all_context != "":
prediction = qa_pipeline(
{"context": official_all_context, "question": official_question}
)
else:
prediction = {"answer": "", "start": 0, "end": 0, "score": -1}
if "\n-\n\n" in prediction["answer"]:
model_answer = prediction["answer"].split("\n-\n\n")[0]
else:
model_answer = prediction["answer"]
relevant_context = ""
elastic_score = 0
file_name, h1, h2, h3 = "", "", "", ""
for context_raw in contexts:
if context_raw["_source"]["official_document"].__contains__(model_answer):
relevant_context = context_raw["_source"]["official_document"]
elastic_score = context_raw["_score"]
file_name = context_raw["_source"]["file_name"]
h1 = context_raw["_source"]["h1"]
h2 = context_raw["_source"]["h2"]
h3 = context_raw["_source"]["h3"]
break
return_value.append(
{
"lemmatized_context": lemmatized_all_context,
"official_question": official_question,
"official_context": official_all_context,
"relevant_context": relevant_context,
"answer": prediction["answer"],
"start": prediction["start"],
"end": prediction["end"],
"model_score": prediction["score"],
"elastic_score": elastic_score,
"metadata": [
{"section": h2 + " > " + h3, "filename": file_name, "source": h1}
]
}
)
return return_value
@app.route("/qa", methods=["POST"])
def rest_api():
try:
record = json.loads(request.data)
if record["query"] == "":
return jsonify({"answers": [], "system": {}})
record["elapsed_time"] = time.time()
ood_class = ood_model.predict(get_contriever_vector([record["query"]]).detach().numpy())[0].item()
if (ood_class == all_ood_classes[record["elastic"]]):
query = predict_from_question(
record["query"], record["size"], record["elastic"], record["model_type"]
)
query[0]["ood_class"] = ood_class
else:
query = list([{"ood_class": ood_class}])
record["elapsed_time"] = time.time() - record["elapsed_time"]
record["time"] = time.time()
mongo_id = str(
db["qa"].insert_one({"answers": query, "system": record}).inserted_id
)
try:
if not DEBUG:
for answer in query:
del answer["lemmatized_context"]
del answer["official_question"]
del answer["official_context"]
del answer["model_score"]
del answer["elastic_score"]
del answer["ood_class"]
except Exception as e:
app.logger.error(e)
db["errors"].insert_one(
{"error": str(e), "time": time.time(), "type": "qa_delete_ood"}
)
return jsonify({"answers": query, "system": {"id": mongo_id}})
except Exception as e:
app.logger.error(e)
db["errors"].insert_one({"error": str(e), "time": time.time(), "type": "qa"})
return jsonify({}), 418
@app.route("/feedback/like", methods=["POST"])
def feedback_like():
try:
record = json.loads(request.data)
db["likes"].insert_one({"id": record["id"], "time": time.time()})
return jsonify({}), 200
except Exception as e:
app.logger.error(e)
db["errors"].insert_one({"error": str(e), "time": time.time(), "type": "like"})
return jsonify({}), 400
@app.route("/feedback/dislike", methods=["POST"])
def feedback_dislike():
try:
record = json.loads(request.data)
db["dislikes"].insert_one(
{
"id": record["id"],
"what_should_be": record["what_should_be"],
"whats_wrong": record["whats_wrong"],
"anything_else": record["anything_else"],
"was_this_in_the_context": record["was_this_in_the_context"],
"time": time.time(),
}
)
return jsonify({}), 200
except Exception as e:
app.logger.error(e)
db["errors"].insert_one(
{"error": str(e), "time": time.time(), "type": "dislike"}
)
return jsonify({}), 400
def get_contriever_vector(sentences):
inputs = contriever_tokenizer(sentences, padding=True, truncation=True, return_tensors="pt")
outputs = contriever_model(**inputs)
def mean_pooling(token_embeddings, mask):
token_embeddings = token_embeddings.masked_fill(~mask[..., None].bool(), 0.0)
sentence_embeddings = token_embeddings.sum(dim=1) / mask.sum(dim=1)[..., None]
return sentence_embeddings
return mean_pooling(outputs[0], inputs["attention_mask"])
if __name__ == "__main__":
client = MongoClient(MONGO_URL)
db = client["shunqa"]
app.run(host="0.0.0.0", port=5000, debug=True)
|
szegedai/SHunQA
|
backend/flask_service.py
|
flask_service.py
|
py
| 8,073
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75066660668
|
import scipy.io as sio
import numpy as np
class ReadFiles(object):
def __init__(self):
spamData = sio.loadmat('../data/spam_data.mat', struct_as_record=False)
self.header = spamData['__header__']
self.version = spamData['__version__']
self.names = spamData['names']
pTrain = spamData['P_train']
pTest = spamData['P_test']
self.features = np.concatenate((pTrain, pTest), axis=1)
self.features = self.features.transpose()
self.log("Features Matrix Created and Imported")
tTest = spamData['T_test']
tTrain = spamData['T_train']
self.labels = np.concatenate((tTrain, tTest), axis=1)
self.labels = self.labels.transpose()
self.labels = np.ravel(self.labels)
self.log("Labels Array Created and Imported")
def getFeatures(self):
return self.features
def getLabels(self):
return self.labels
def log(self, msg):
print('[Reading Files] {}'.format(msg))
|
Skalwalker/SpamRecognition
|
scripts/readfiles.py
|
readfiles.py
|
py
| 1,011
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1307107485
|
import functools
import os
import re
from importlib import import_module
from typing import Callable, Pattern
import yaml
from livelossplot.outputs import NeptuneLogger
def unpack_config(func: Callable) -> Callable:
"""Load parameters from a config file and inject it to function keyword arguments"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
config_file = kwargs.get('config')
if config_file:
del kwargs['config']
with open(config_file, 'r') as f:
run_args = yaml.full_load(f)
kwargs.update(run_args)
ret = func(*args, **kwargs)
return ret
return _wrapper
def create_artifacts_dir(runs_dir: str, run_template: Pattern = re.compile(r'(offline-experiment-)([0-9]+)')) -> str:
os.makedirs(runs_dir, exist_ok=True)
runs = [re.match(run_template, run) for run in os.listdir(runs_dir) if re.match(run_template, run)]
if len(runs) == 0:
next_run_dir = 'offline-experiment-0'
else:
last_run_match = max(runs, key=lambda r: int(r.group(2)))
next_run_id = int(last_run_match.group(2)) + 1
next_run_dir = last_run_match.group(1) + str(next_run_id)
next_run_dir = os.path.join(runs_dir, next_run_dir)
os.makedirs(next_run_dir)
return next_run_dir
def create_experiment(func: Callable) -> Callable:
"""Create experiment with function keyword parameters and generated name"""
def wrapper(*args, **params):
neptune_project_name = params.get('neptune_project')
output_dir = params['output_dir']
del params['output_dir']
logger_outputs = []
params['logger_outputs'] = logger_outputs
if neptune_project_name is not None:
del params['neptune_project']
neptune_output = NeptuneLogger(
project_qualified_name=neptune_project_name, params=params, upload_source_files='**/*.py'
)
logger_outputs.append(neptune_output)
params['run_dir'] = os.path.join(output_dir, neptune_output.experiment.id)
ret = func(*args, **params)
neptune_output.neptune.stop()
else:
logger_outputs.append('ExtremaPrinter')
params['run_dir'] = create_artifacts_dir(output_dir)
ret = func(*args, **params)
return ret
return wrapper
def import_function(class_path: str) -> Callable:
"""Function take module with to class or function and imports it dynamically"""
modules = class_path.split('.')
module_str = '.'.join(modules[:-1])
cls = modules[-1]
module = import_module(module_str)
return getattr(module, cls)
|
Bartolo1024/RLCarRacing
|
utils.py
|
utils.py
|
py
| 2,684
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23518058201
|
def rev(a):
a=str(a)
a=a[::-1]
return int(a)
def palin(p):
k=str(p)
if p== int(k[::-1]):
return True
a=int(input())
p=0
f= True
while f:
a=a+rev(a)
if palin(a):
break
print(a)
|
jayavishnumadhiri/Python-Practised-programs
|
range.py
|
range.py
|
py
| 252
|
python
|
en
|
code
| 2
|
github-code
|
6
|
43991242007
|
l=[]
for i in range(10):
num=int(input("DIGITE UM VALOR"))
l.append(num)
valor=int(input("DIGITE O VALOR Q DESEJA PESQUISAR NA LISTA"))
for i in l:
if i==valor:
print("Elemento encontrado!")
break
else:
print("Elemento não encontrado.")
|
marcoAureliosm/Lista-de-vetores-python
|
questão07.py
|
questão07.py
|
py
| 283
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
35917887576
|
from os import confstr
from string import Template
ISL = " { data: { id: 'ISL', label: 'ISL', fedType: 'ISL', innerLevel: 3 }, group: 'nodes' },\n"
CARTEL_TEMPLATE = " { data: { id: '$id', label: '$label', fedType: '$fed_type', innerLevel: $inner_level }, group: 'nodes' },\n"
SYSTEM_TEMPLATE = " { data: { id: '$id', label: '$label', fedType: '$fed_type', innerLevel: $inner_level, outerLevel: $outer_level, cartel: '$cartel', parent: '$parent' }, group: 'nodes' },\n"
PLANET_TEMPLATE = " { data: { id: '$id', label: '$label', fedType: '$fed_type', econ: '$econ', outerLevel: $outer_level, cartel: '$cartel', system: '$system', parent: '$parent' }, group: 'nodes' },\n"
CARTEL_LINK_TEMPLATE = " { data: { id: '$id', source: '$src', target: '$tgt' }, group: 'edges' },\n"
HTML_TEMPLATE = './html_templates/index.html.tpl'
ELEMENTS_OUTFILE = '../src/elements.js'
class GalaxyPresenter:
def __init__(self):
self.galaxy = None
def load(self, galaxy_dict):
self.galaxy = galaxy_dict
def buildOutput(self, output_filename):
cyto_data = 'const elements = [\n'
#cyto_data += ISL
for cartel in self.galaxy:
s = Template(CARTEL_TEMPLATE)
cyto_data += s.safe_substitute(id=self.galaxy[cartel]['id'], label=cartel, fed_type='CARTEL', inner_level='2')
for system in self.galaxy[cartel]['systems']:
syst = self.galaxy[cartel]['systems'][system]
for planet in syst['planets']:
plt = syst['planets'][planet]
s = Template(PLANET_TEMPLATE)
cyto_data += s.safe_substitute(
id=plt['id'],
label=planet,
fed_type='PLANET',
econ=plt['econ'],
outer_level='1',
cartel=cartel,
system=system,
parent=syst['id'])
ft = 'SYSTEM'
inner_lvl = '1'
outer_lvl = '1'
if system == cartel:
ft = 'CARTELSYSTEM'
inner_lvl = '1'
outer_lvl = '2'
s = Template(SYSTEM_TEMPLATE)
cyto_data += s.safe_substitute(
id=syst['id'],
label=system,
fed_type=ft,
inner_level=inner_lvl,
outer_level=outer_lvl,
cartel=cartel,
parent=self.galaxy[cartel]['id'])
cartels = list(self.galaxy)
print(cartels)
for idx in range(1,len(cartels)):
s = Template(CARTEL_LINK_TEMPLATE)
prev_id = self.galaxy[cartels[idx-1]]['id']
this_id = self.galaxy[cartels[idx]]['id']
cyto_data += s.safe_substitute(id='cartel_{0}_{1}'.format(prev_id, this_id), src=prev_id, tgt=this_id)
for cartel in self.galaxy:
#crt = self.galaxy[cartel]
s = Template(CARTEL_LINK_TEMPLATE)
#cyto_data += s.safe_substitute(id='ISL{0}'.format(cartel), src='ISL', tgt=crt['id'])
cs_id = None
for system in self.galaxy[cartel]['systems']:
if system == cartel:
cs_id = self.galaxy[cartel]['systems'][system]['id']
break
for system in self.galaxy[cartel]['systems']:
syst = self.galaxy[cartel]['systems'][system]
sys_id = syst['id']
if system != cartel:
cyto_data += s.safe_substitute(id='cs_{0}_{1}'.format(cs_id, sys_id), src=cs_id, tgt=sys_id)
for planet in syst['planets']:
plt = syst['planets'][planet]
#cyto_data += s.safe_substitute(id='sp_{0}_{1}'.format(sys_id, plt['id']), src=sys_id, tgt=plt['id'])
cyto_data += '];\n'
cyto_data += 'export default elements;'
print(cyto_data)
with open(ELEMENTS_OUTFILE, 'w') as outfile:
outfile.write(cyto_data)
template_str = None
with open(HTML_TEMPLATE, 'r') as template:
template_str = template.read()
#s = Template(template_str)
#outdata = s.safe_substitute(CYTO_DATA=cyto_data)
with open(output_filename, 'w') as outfile:
outfile.write(template_str)
|
lbelella/f2gm
|
python/GalaxyPresenter.py
|
GalaxyPresenter.py
|
py
| 4,562
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1512983994
|
"""
This file contains helper functions for the project
"""
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from math import atan2, degrees
import urllib.request
from PIL import Image
# functions
def get_tracking_data():
"""
Function to read in tracking data and return a dataframe
"""
return pd.read_csv("./data/tracking_data.csv")
def gini_coefficient(x):
"""Compute Gini coefficient of array of values"""
diffsum = 0
for i, xi in enumerate(x[:-1], 1):
diffsum += np.sum(np.abs(xi - x[i:]))
return diffsum / (len(x) ** 2 * np.mean(x))
def create_football_field(
linenumbers=True,
endzones=True,
highlight_line=False,
highlight_line_number=50,
highlighted_name="Line of Scrimmage",
fifty_is_los=False,
figsize=(12, 6.33),
):
"""
Function that plots the football field for viewing plays.
"""
# credit https://www.kaggle.com/code/robikscube/nfl-big-data-bowl-plotting-player-position/notebook
rect = patches.Rectangle(
(0, 0), 120, 53.3, linewidth=0.1, edgecolor="r", facecolor="darkgreen", zorder=0
)
fig, ax = plt.subplots(1, figsize=figsize)
ax.add_patch(rect)
plt.plot(
[
10,
10,
10,
20,
20,
30,
30,
40,
40,
50,
50,
60,
60,
70,
70,
80,
80,
90,
90,
100,
100,
110,
110,
120,
0,
0,
120,
120,
],
[
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
0,
0,
53.3,
53.3,
53.3,
0,
0,
53.3,
],
color="white",
)
if fifty_is_los:
plt.plot([60, 60], [0, 53.3], color="gold")
plt.text(62, 50, "<- Player Yardline at Snap", color="gold")
# Endzones
if endzones:
ez1 = patches.Rectangle(
(0, 0),
10,
53.3,
linewidth=0.1,
edgecolor="r",
facecolor="blue",
alpha=0.2,
zorder=0,
)
ez2 = patches.Rectangle(
(110, 0),
120,
53.3,
linewidth=0.1,
edgecolor="r",
facecolor="blue",
alpha=0.2,
zorder=0,
)
ax.add_patch(ez1)
ax.add_patch(ez2)
plt.xlim(0, 120)
plt.ylim(-5, 58.3)
plt.axis("off")
if linenumbers:
for x in range(20, 110, 10):
numb = x
if x > 50:
numb = 120 - x
plt.text(
x,
5,
str(numb - 10),
horizontalalignment="center",
fontsize=20, # fontname='Arial',
color="white",
)
plt.text(
x - 0.95,
53.3 - 5,
str(numb - 10),
horizontalalignment="center",
fontsize=20, # fontname='Arial',
color="white",
rotation=180,
)
if endzones:
hash_range = range(11, 110)
else:
hash_range = range(1, 120)
for x in hash_range:
ax.plot([x, x], [0.4, 0.7], color="white")
ax.plot([x, x], [53.0, 52.5], color="white")
ax.plot([x, x], [22.91, 23.57], color="white")
ax.plot([x, x], [29.73, 30.39], color="white")
if highlight_line:
hl = highlight_line_number + 10
plt.plot([hl, hl], [0, 53.3], color="yellow")
plt.text(hl + 2, 50, "<- {}".format(highlighted_name), color="yellow")
return fig, ax
def calc_angle(x, y, x1, y1):
"""
function to calculate angle between two sets of x-y coordinates
"""
# change in x and y
dx = x1 - x
dy = y1 - y
# calculate angle
return degrees(atan2(dy, dx))
def draw_table_image(img_url, ax):
"""
Draws table image
"""
club_icon = Image.open(urllib.request.urlopen(img_url))
club_icon.resize((100, 100))
ax.imshow(club_icon)
ax.axis("off")
return ax
class BboxLocator:
"""
A helper class to locate a bbox in a given axes.
Will be used in our leaderboards.
"""
def __init__(self, bbox, transform):
self._bbox = bbox
self._transform = transform
def __call__(self, ax, renderer):
_bbox = self._transform.transform_bbox(self._bbox)
return ax.figure.transFigure.inverted().transform_bbox(_bbox)
|
emekaamadi/Milestone-1-NFL-Project
|
src/functions.py
|
functions.py
|
py
| 5,059
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2739075186
|
"""Given the html files for each of the language families,
build language tree for each and write them in json object
"""
from bs4 import BeautifulSoup
from tqdm import tqdm
import json
import sys
import os
link = 'html/indo-european.html'
link = 'html/mongolic.html'
link = 'html/bororoan.html'
def get_list(html):
if html.name == 'div' and 'item-list' in html.attrs['class']:
# Already inputted item list
item_list = html
else:
# Extract item list
result1 = html.find_all('div', {'class': 'item-list'}, recursive=False)
result2 = html.find_all('div', {'class': 'item-list'}, recursive=True)
if len(result1) == 1:
# Item list found in the top next level
item_list = result1[0]
elif len(result2) == 1:
# Item list not found in the first children level
# But there is only one list further on in the tree
item_list = result2[0]
ul = item_list.find_all('ul', recursive=False)
if len(ul) != 1:
# Failed
return
elements = ul[0].find_all('li', recursive=False)
return elements
def strip(html):
divs = html.find_all('div', recursive=False)
#if any('item-list' in tag.attrs['class'] for tag in divs):
if 'class' in html.attrs:
if 'first' in html.attrs['class'] or 'last' in html.attrs['class']:
name = html.find_next('a').text
elems = get_list(html)
elif 'lang-indent' in html.attrs['class']:
name = html.text
elems = None
else:
print(html)
assert False
else:
name = html.find_next('a').text
elems = get_list(html)
return name, elems
def unravel(tag):
name, elems = strip(tag)
if elems is not None:
return (name, [unravel(elem) for elem in elems])
else:
return (name, [])
def parse_file(path):
family = os.path.split(path)[-1].replace('.html', '')
with open(path, 'r') as f:
soup = BeautifulSoup(f.read(), 'html.parser')
root = soup.find_all('div', {"class": "views-field views-field-name-1"})
assert len(root) == 1, "Too many root candidates!"
root = root[0]
#top = root.find_parent().find_parent().find_parent().find_parent().find_next_sibling()
##blocks = top.find_all('li', {'class': 'first'})
top = root.find_next('div', {"class": "view-content"})
res = [unravel(el) for el in get_list(top)]
return {family: res}
def parse_all():
tree = {}
errcount = 0
for file in tqdm(os.listdir('html')):
if file == '.html':
continue
path = os.path.join('html', file)
try:
tree.update(parse_file(path))
except Exception as e:
print('ERROR IN', file)
errcount += 1
raise e
continue
print("Error count:", errcount)
return tree
if __name__ == '__main__':
res = parse_all()
with open('data/language_families.json', 'w') as f:
json.dump(res, f)
|
ialsina/LangTree
|
parse_html.py
|
parse_html.py
|
py
| 3,111
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26111772381
|
#practice for Intro ML - Gradient Descent : Feb 25th 2020
import numpy as np
y = np.zeros((2, 3, 4))
#print(y)
a = np.array([[1,2,3], [4,5,6]])
b = np.array([[2,3,4], [9,7,8]])
#print(np.hstack((a,b)))
c = np.array([[2,3,4], [5,6,7]])
d = np.array([[3,4,5], [4,5,3]])
#print(np.hstack((c,d)))
#print([i for i in range(4)])
random_n = np.random.choice(range(2),3)
#print(c)
#print(random_n)
# the indexing: the second part is columnwise
#print(c[random_n,:-1]) # stops right before the last entry.
a = np.ones([9, 3, 2, 4])
c = np.ones([9, 5, 4, 3]) # how to dot product and matmul.
print(a)
print(c)
print(np.dot(a, c).shape)
#print(np.matmul(a, c).shape)
x = np.arange(4)
xx = x.reshape(4,1)
y = np.ones(5)
z = np.ones((3,4))
#print(xx)
#print(x)
#print(y)
#print(xx + y) # hmm I don't understand by what principle it does this.
#print(z)
|
kyrajeep/Practice
|
numpy_prac.py
|
numpy_prac.py
|
py
| 851
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5254327339
|
# -*- coding: utf-8 -*-
"""
Utility functions
@authors: Álvaro Ramírez Cardona (alramirezca@unal.edu.co)
Vanessa Robledo Delgado (vanessa.robledo@udea.edu.co)
"""
from os import path
import xarray as xr
import numpy as np
import pandas as pd
from scipy import ndimage
import geopandas as gpd
from datetime import timedelta
import rioxarray
import rasterio
from geopy.distance import geodesic
import math
import sys
import glob
import numbers as checknumbers
from shapely.geometry import MultiPolygon, Polygon, shape, Point, MultiPoint, mapping
from shapely.wkt import loads
import uuid
import tqdm
import time
import warnings
import folium
import webbrowser
warnings.filterwarnings("ignore")
#___________________________________Functions______________________________________________________
def readNC(pathTb = None, pathP = None, utc_local_hour = 0, utc_local_sign = "minus"):
"""
Function for reading and resampling the Tb and P DataArrays.
The spatial resampling is 0.1° - lineal interpolation.
The temporal resampling is 1 h - nearest original coordinate to up-sampled frequency coordinates.
Inputs:
* pathTb: str, path where the Tb raw data are located.
* pathP: str, path where the P raw data are located.
The path must have the next structure:
linux: r"/home....../"
windows: r"C:/....../"
* utc_local_hour: int, allows transform the raw data hour (UTC)
to a interest time zone (interest region).
* utc_local_sign: str (minus, plus, local), sets whether to add or subtract
in the conversion for the time zone of interest. If set "local" no conversion will be done
and the time zone will be GMT or UTC.
(ex: UTC-5 timing is determined by utc_local_hour = 5 and utc_local_sign = "minus".
Outputs:
* xarray.Dataset with the brightness temperature (Tb) and P (Precipitation) data.
"""
if isinstance(pathTb, str) and isinstance(pathP, str):
try:
#Globbing the Tb and P files
filenamestb = glob.glob(pathTb+'*.nc4')
filenamesp =glob.glob(pathP+'*.nc4')
#Reading P data
ds_p = xr.open_mfdataset(filenamesp)
ds_p['P'] = ds_p['precipitationCal']; del ds_p['precipitationCal']
ds_p = ds_p['P'].T
#Temporal resampling precipitation data
ds_p = ds_p.resample(time ='1H').mean() #promedio Horario de precipitacion
#Reading Tb data
ds_t = xr.open_mfdataset(filenamestb); ds_t = ds_t['Tb']
#Temporal resampling Tb data
ds_t = ds_t.resample(time="1H").nearest(tolerance="1H")
#Spatial resampling Tb DataArray. This is based on P coordinates (lat and lon).
ds_t = ds_t.interp(lat=ds_p.lat.values, lon=ds_p.lon.values)
#Reorder levels from Tb DataArray for merging with P DataArray.
ds_t = ds_t.transpose("lat", "lon", "time")
#Merging DataArrays
try:
ds = xr.merge([ds_p, ds_t])
except:
#The raw P data from some years does not have a valid datetime index
#These lines convert CFTimeIndex to DatetimeIndex for merging.
datetimeindex = ds_p.indexes['time'].to_datetimeindex()
ds_p['time'] = datetimeindex
ds = xr.merge([ds_p, ds_t])
#Closing the raw DataArrays
ds_p.close()
ds_t.close()
#Converting the UTC to local hour
datex = ds.time.coords.to_index()
#Replacing the datetimeindex based on UTC_LOCAL_HOUR
if utc_local_sign == "minus":
datedt = datex.to_pydatetime() - timedelta(hours=utc_local_hour)
elif utc_local_sign == "plus":
datedt = datex.to_pydatetime() + timedelta(hours=utc_local_hour)
elif utc_local_sign == "local":
datedt = datex.to_pydatetime()
else:
raise TypeError("You must type a valid parameter for utc_local_sign: minus, plus or local. If you use local please enter utc_local_hour = 0")
dates_64 = [np.datetime64(row) for row in datedt]
ds = ds.assign_coords({"time": dates_64})
#Attaching Atributes to DataArray merged.
ds.Tb.attrs["units"] = "K"
ds.P.attrs["units"] = "mm/h"
ds.Tb.attrs["_FillValue"] = 'NaN'
ds.P.attrs["_FillValue"] = 'NaN'
ds.lon.attrs['units'] = "degrees_east"
ds.lat.attrs['units'] = "degrees_north"
#Extracting dimensiones: time, lat and lon
dates = ds.time.values;
lon, lat = np.float32(np.meshgrid(ds.lon,ds.lat))
#Establishing EPSG:4326
ds = ds.rio.set_crs(4326)
ds.attrs['crs'] = ds.rio.crs
initial_date_lecture = str(ds.time[0].values)[:16]
final_date_lecture = str(ds.time[-1].values)[:16]
print('Complete Tb and P data reading ' + initial_date_lecture + " - " + final_date_lecture)
except:
raise FileNotFoundError("Make sure you are complying with the Tb and P paths parameters: /home../")
elif isinstance(pathTb, str) and pathP is None:
try:
#Globbing the Tb files
filenamestb = glob.glob(pathTb+'*.nc4')
#Reading Tb data
ds_t = xr.open_mfdataset(filenamestb);
#Temporal resampling Tb data
ds_t = ds_t.resample(time="1H").nearest(tolerance="1H")
#Reorder levels from Tb DataArray
ds = ds_t.transpose("lat", "lon", "time")
#Converting the UTC to local hour
datex = ds.time.coords.to_index()
#Replacing the datetimeindex based on UTC_LOCAL_HOUR
if utc_local_sign == "minus":
datedt = datex.to_pydatetime() - timedelta(hours=utc_local_hour)
elif utc_local_sign == "plus":
datedt = datex.to_pydatetime() + timedelta(hours=utc_local_hour)
elif utc_local_sign == "local":
datedt = datex.to_pydatetime() + timedelta(hours=utc_local_hour)
else:
raise TypeError("You must type a valid parameter for utc_local_sign: minus, plus or local. If you use local please enter tc_local_hour = 0")
dates_64 = [np.datetime64(row) for row in datedt]
ds = ds.assign_coords({"time": dates_64})
#Attaching Atributes to DataArray merged.
ds.Tb.attrs["units"] = "K"
ds.Tb.attrs["_FillValue"] = 'NaN'
ds.lon.attrs['units'] = "degrees_east"
ds.lat.attrs['units'] = "degrees_north"
#Extracting dimensiones: time, lat and lon
lon, lat = np.float32(np.meshgrid(ds.lon,ds.lat))
#Establishing EPSG:4326
ds = ds.rio.set_crs(4326)
ds.attrs['crs'] = ds.rio.crs
initial_date_lecture = str(ds.time[0].values)[:16]
final_date_lecture = str(ds.time[-1].values)[:16]
print('Complete Tb data reading ' + initial_date_lecture + " - " + final_date_lecture)
except:
raise FileNotFoundError("Make sure you are complying with the Tb path parameters: /home../")
else:
raise FileNotFoundError("There must be at least a valid path for Tb data.")
return ds
def readTRACKS(path):
"""
function for reading tracks results.
Inputs:
* path: str, path where the tracks and MCS results is located.
Outputs:
* Geopandas.GeoDataFrame with the tracks and MCS associated.
"""
df = pd.read_csv(path, index_col = ["belong", "id_gdf"], parse_dates = ["time"])
df['geometry'] = gpd.GeoSeries.from_wkt(df['geometry'])
df['centroid_'] = gpd.GeoSeries.from_wkt(df['centroid_'])
df = gpd.GeoDataFrame(df, geometry='geometry', crs = 4326)
return df
def plot_folium(resume, location, path_save):
"""
function for plotting tracks results in folium map.
Inputs:
* resume: GeoDataFrame, data related with the tracks and MCS's.
* location list (lat, lon), location for center the map_folium.
* path_save: str, path where the .html folium map will be saved
Outputs:
* the .html folium map will be open with the librarie "webbrowser"
* path_saved: str, path where the .html was saved.
"""
m = folium.Map(location=location, zoom_start=5, tiles='CartoDB positron')
df = resume.reset_index()
for i in df.belong.unique():
#Sorting index by time
tracks = df.loc[df.belong == i].reset_index()
tracks = tracks.set_index("time").sort_index()
tracks = tracks.reset_index()
for idn, r in tracks.iterrows():
sim_geo = gpd.GeoSeries(r['geometry']).simplify(tolerance=0.001)
geo_j = sim_geo.to_json()
geo_j = folium.GeoJson(data=geo_j,
style_function=lambda x: {'fillColor': 'orange'})
folium.Popup(r.index).add_to(geo_j)
try: #Tb and P methodlogy
folium.Marker(location=[r['centroid_'].y, r['centroid_'].x], popup='id_track: {} <br> id_mcs: {} <br> hour_mcs: {} <br> time: {} <br> area[km2]: {} <br> distance_traveled[km]: {} <br> direction[°]: {} <br> intersection_percentage[%]: {} <br> mean_tb[K]: {} <br> mean_p[mm/h]: {} <br> total_distance_traveled[km]: {} <br> total_duration[h]: {} <br>'.format(r['belong'], r["id_gdf"], idn, r["time"], round(r['area_tb'],1), round(r["distance_c"],1), r["direction"], r["intersection_percentage"], round(r["mean_tb"],1), round(r["mean_pp"],1), round(r["total_distance"],1), r["total_duration"])).add_to(m)
extra_name = "Tb_P_"
except: #Tb methodlogy
folium.Marker(location=[r['centroid_'].y, r['centroid_'].x], popup='id_track: {} <br> id_mcs: {} <br> hour_mcs: {} <br> time: {} <br> area[km2]: {} <br> distance_traveled[km]: {} <br> direction[°]: {} <br> intersection_percentage[%]: {} <br> mean_tb[K]: {} <br> total_distance_traveled[km]: {} <br> total_duration[h]: {} <br>'.format(r['belong'], r["id_gdf"], idn, r["time"], round(r['area_tb'],1), round(r["distance_c"],1), r["direction"], r["intersection_percentage"], round(r["mean_tb"],1), round(r["total_distance"],1), r["total_duration"])).add_to(m)
extra_name = "Tb_"
geo_j.add_to(m)
min_time = str(resume.time.min())[:-6].replace("-","_").replace(" ","_")
max_time = str(resume.time.max())[:-6].replace("-","_").replace(" ","_")
path_result = path_save+'map_'+extra_name+min_time+"_"+max_time+".html"
m.save(path_result)
try:
webbrowser.open(path_result)
except:
pass
return path_result
|
alramirezca/ATRACKCS
|
atrackcs/utils/funcs.py
|
funcs.py
|
py
| 11,183
|
python
|
en
|
code
| 7
|
github-code
|
6
|
22400134597
|
from PyQt5 import QtWidgets
from diz3_2 import * # импорт нашего сгенерированного файла
import sys
from BD import Orm
class Dialog2(QtWidgets.QDialog):
def __init__(self, id):
self.id = id
super(Dialog2, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.comboBox.addItem("Да")
self.ui.comboBox.addItem("Нет")
self.ui.comboBox_2.addItem("Да")
self.ui.comboBox_2.addItem("Нет")
self.ui.buttonBox.accepted.connect(self.add)
self.ui.buttonBox.rejected.connect(self.close)
self.bd = Orm()
def add(self):
owner = self.id
name = self.ui.lineEdit.text()
facility = self.ui.lineEdit_2.text()
if "Да" == self.ui.comboBox.currentText():
reckoning = True
else:
reckoning = False
if "Да" == self.ui.comboBox_2.currentText():
waybills = True
else:
waybills = False
count = self.ui.spinBox.value()
# r = []
# r.append((name, company, store, supplier, reckoning, ndc, count, price))
# print(r)
self.bd.addfacil(owner, name, facility, reckoning, waybills, count)
self.close()
# app = QtWidgets.QApplication([])
# application = Dialog()
#
# sys.exit(app.exec())
|
Vorlogg/BD
|
dialog2.py
|
dialog2.py
|
py
| 1,367
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6932656070
|
import gc
import json
import numpy as np
import optuna
import pandas as pd
import sys
import warnings
import xgboost
from glob import glob
from sklearn.model_selection import KFold, StratifiedKFold
from tqdm import tqdm
from utils import FEATS_EXCLUDED, loadpkl, line_notify, to_json
#==============================================================================
# hyper parameter optimization by optuna
# https://github.com/pfnet/optuna/blob/master/examples/lightgbm_simple.py
#==============================================================================
warnings.filterwarnings('ignore')
# load datasets
CONFIGS = json.load(open('../configs/105_xgb.json'))
# load feathers
FILES = sorted(glob('../features/*.feather'))
DF = pd.concat([pd.read_feather(f) for f in tqdm(FILES, mininterval=60)], axis=1)
# split train & test
TRAIN_DF = DF[DF['click_mode'].notnull()]
del DF
gc.collect()
# use selected features
TRAIN_DF = TRAIN_DF[CONFIGS['features']]
# set card_id as index
TRAIN_DF.set_index('sid', inplace=True)
FEATS = [f for f in TRAIN_DF.columns if f not in FEATS_EXCLUDED]
def objective(trial):
xgb_train = xgboost.DMatrix(TRAIN_DF[FEATS],
TRAIN_DF['click_mode'])
param = {
'device':'gpu',
'objective':'multi:softmax',
'tree_method': 'gpu_hist', # GPU parameter
'predictor': 'gpu_predictor', # GPU parameter
'eval_metric':'mlogloss',
'num_class':12,
'eta': 0.05,
'booster': 'gbtree',
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0),
'silent':1,
}
param['gamma'] = trial.suggest_loguniform('gamma', 1e-8, 1.0)
param['max_depth'] = trial.suggest_int('max_depth', 1, 12)
param['min_child_weight'] = trial.suggest_uniform('min_child_weight', 0, 45)
param['subsample']=trial.suggest_uniform('subsample', 0.001, 1)
param['colsample_bytree']=trial.suggest_uniform('colsample_bytree', 0.001, 1)
param['colsample_bylevel'] = trial.suggest_uniform('colsample_bylevel', 0.001, 1)
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=326)
clf = xgboost.cv(params=param,
dtrain=xgb_train,
metrics=['mlogloss'],
nfold=NUM_FOLDS,
folds=list(folds.split(TRAIN_DF[FEATS], TRAIN_DF['click_mode'])),
num_boost_round=10000,
early_stopping_rounds=200,
verbose_eval=100,
seed=47
)
gc.collect()
return clf['test-mlogloss-mean'].iloc[-1]
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(objective, n_trials=100)
print('Number of finished trials: {}'.format(len(study.trials)))
print('Best trial:')
trial = study.best_trial
print(' Value: {}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
# save result
hist_df = study.trials_dataframe()
hist_df.to_csv("../output/optuna_result_xgb.csv")
# save json
CONFIGS['params'] = trial.params
to_json(CONFIGS, '../configs/105_xgb.json')
line_notify('{} finished. Value: {}'.format(sys.argv[0],trial.value))
|
MitsuruFujiwara/KDD-Cup-2019
|
src/804_optimize_xgb_optuna.py
|
804_optimize_xgb_optuna.py
|
py
| 3,421
|
python
|
en
|
code
| 3
|
github-code
|
6
|
7796643764
|
import numpy as np
import pickle
from engine.sim_functions import calc_local_zodiacal_minimum,Spectrograph
from engine.planet_retrieval import Planet,Star
from engine.main_computer import compute
from itertools import chain
from multiprocessing import Pool
import json
import sys
dR_scale = float(sys.argv[1]) #Reflectance error in beamsplitter
out_file = str(sys.argv[2])
base_wave = 18
#####################################################
#Secondary parameters
min_wave = 4 #microns
max_wave = 19 #microns
num_channels = 50
#####################################################
#Set up the spectral parameters
spec = Spectrograph(min_wave,max_wave,base_wave,num_channels)
from engine.nullers.five_telescopes_err_2 import get_nuller_response
architecture_verbose = "Five telescope kernel nuller, optimised for diagonal telescopes (K2 alt)"
base_scale_factor = 1.028 #= approx 1.03*0.619 (where 0.619 is the conversion between a side and diagonal of a pentagon)
sz = 1500
mode_verbose = "Search"
fov_scale_factor = 5
dphi_scale = dR_scale
number_processes = 28 #parallelise?
###########################################################################
#Tau boo coordinates (ecliptic latitude of 26 degress)
ra = 206.81560195
dec = 17.45690446
z = 1 #exozodi (same as solar system)
#Generate stars of given types
def Beta_Pic(dist):
name = "Beta Pic analogue"
stype = "A6V"
rad = 1.8
teff = 8052
mass = 1.75
return Star(name,1,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Tau_Boo(dist):
name = "Tau Boo analogue"
stype = "F7V"
rad = 1.42
teff = 6399
mass = 1.39
return Star(name,2,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Sun(dist):
name = "Solar analogue"
stype = "G2V"
rad = 1
teff = 5772
mass = 1
return Star(name,3,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Eps_Eri(dist):
name = "Epsilon Eri analogue"
stype = "K2V"
rad = 0.735
teff = 5084
mass = 0.82
return Star(name,4,dist,stype,rad,teff,mass,ra,dec,spec,z)
def Prox_Cen(dist):
name = "Proxima Cen analogue"
stype = "M5V"
rad = 0.15
teff = 3042
mass = 0.12
return Star(name,5,dist,stype,rad,teff,mass,ra,dec,spec,z)
#Helper function to generate Earth-twin planets
def myPlanet(star,num,a):
#Earth twin
PRad = 1
PMass = 1
Temp = 300
Ageom = 0.1 #Rough estimate?
AngSep = a/star.Dist
lam_ref = 0.318 #From PPop, assuming face on orbit (inc = 0)
return Planet(star,0,star.SNumber,num,PRad,PMass,365,0,0,0,0,0,0,0,Ageom,a,a,AngSep,0,0,lam_ref,Temp,spec)
#Give each star one planet in the middle of the HZ
def append_planet_list(star):
star.Planets = [myPlanet(star,2,star.HZMid)]
return star
dists = np.linspace(1,20,20)
#Make the list of stars at given distances
star_list = []
for d in dists:
star_list.append(append_planet_list(Sun(d)))
star_list.append(append_planet_list(Eps_Eri(d)))
star_list.append(append_planet_list(Prox_Cen(d)))
#Make errors
dphi = np.zeros(10)
dR = np.zeros(10)
dphi[2] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[4] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[5] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[7] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[8] = np.sign(np.random.random()*2-1)*dphi_scale
dphi[9] = np.sign(np.random.random()*2-1)*dphi_scale
dR[2] = np.sign(np.random.random()*2-1)*dR_scale
dR[4] = np.sign(np.random.random()*2-1)*dR_scale
dR[5] = np.sign(np.random.random()*2-1)*dR_scale
dR[7] = np.sign(np.random.random()*2-1)*dR_scale
dR[8] = np.sign(np.random.random()*2-1)*dR_scale
dR[9] = np.sign(np.random.random()*2-1)*dR_scale
def response_func(baseline,fov,sz,base_wavelength):
return get_nuller_response(dphi,dR,baseline,fov,sz,base_wavelength)
###########################################################################
#Get local zodi minimum
local_exozodi = calc_local_zodiacal_minimum(spec)
#RUN!!
#Multiprocessing
def worker_func(star):
return compute(star,1,response_func,spec,sz,base_scale_factor,fov_scale_factor,local_exozodi)
pool = Pool(processes=number_processes)
ls_star_data = pool.map(worker_func,star_list)
pool.close()
#Make into one list of dictionaries
dict_ls = list(chain.from_iterable(ls_star_data))
#Funtion to round sig figs (for numerical readibility)
def round_sig_figs(x, p):
x_positive = np.where(np.isfinite(x) & (x != 0), np.abs(x), 10**(p-1))
mags = 10 ** (p - 1 - np.floor(np.log10(x_positive)))
return np.round(x * mags) / mags
#Header data, plus results
main_dict = {
"Architecture":architecture_verbose,
"Mode":mode_verbose,
"baseline_wavelength (microns)":round_sig_figs(spec.baseline_wave*1e6,5),
"sz":sz,
"fov_scale_factor":fov_scale_factor,
"min_wavelength (microns)":round_sig_figs(spec.wave_min*1e6,5),
"max_wavelength (microns)":round_sig_figs(spec.wave_max*1e6,5),
"num_channels":spec.n_channels,
"channel_widths (microns)":round_sig_figs(spec.dlambda*1e6,5),
"channel_centres (microns)":round_sig_figs(spec.channel_centres*1e6,5),
"results":dict_ls
}
#Needed for writing JSON
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
#Write file
fout = open(out_file,"w")
json.dump(main_dict,fout,cls=NpEncoder,indent=2)
fout.close()
|
JonahHansen/LifeTechSim
|
error_sim.py
|
error_sim.py
|
py
| 5,550
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2087116071
|
from bson import ObjectId
import Usuario.search as buscarUsuario
import Produto.search as buscarProduto
from datetime import date
def inserir_compra(mydb):
compra = mydb.Compra
usuario = mydb.Usuario
lista_produtos = []
usuarios = buscarUsuario.userByID(mydb,ObjectId)
data_atual = date.today()
data_formatada = data_atual.strftime('%d/%m/%Y')
execucao = True
while execucao:
opcao = input(str("Deseja comprar um produto ? "))
if opcao.lower() == "sim":
produto = buscarProduto.produtoByID(mydb,ObjectId)
lista_produtos.append(produto)
else:
execucao = False
mydict = {
"data_compra":data_formatada,
'usuario':usuarios,
"produtos":lista_produtos
}
print(type(usuarios))
compra_id = compra.insert_one(mydict)
compra_realizada = compra.find_one({"_id":ObjectId(compra_id.inserted_id)})
usuario.update_one({"_id":ObjectId(usuarios["_id"])},{ "$push": { "compras":compra_realizada }})
print("\nCompra realizada com sucesso")
print(f'Id da compra {compra_id.inserted_id}')
|
Raniel-Santos/Banco-NoSQL-Python_MongoDB
|
Compra/insertCompra.py
|
insertCompra.py
|
py
| 1,170
|
python
|
pt
|
code
| 1
|
github-code
|
6
|
30217613419
|
#!/usr/bin/python3
import os
import re
import string
import sys
import subprocess
import time
import traceback
from queue import Queue
from queue import Empty
import random
from threading import Thread
macro_var_regex = re.compile("%(?P<var>[-0-9]+)%")
macro_rand_uint_regex = re.compile("%RANDUINT\((?P<length>[-0-9A-Z()]+)\)%")
macro_rand_string_regex = re.compile("%RANDSTRING\((?P<length>[-0-9A-Z()]+)\)%")
return_regex = re.compile("return +(?P<retval>[-0-9]+).*")
cfs_not_empty_regex = re.compile("readdir result -- ino:(?!1 )[0-9]+ name:.*")
def queue_output(output, q):
while True:
line = output.readline()
q.put(line.strip())
def get_random_string(length):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
def get_random_uint(length_in_bits):
return random.randrange(0, pow(2, length_in_bits))
class ProcessManager:
def __init__(self, test_bin, test_output_directory, test_set, fsp_shm_id):
self.data = {
"instance": {},
"fdtable": {}, # {vfs: {line: return value}, cfs: {line: return value}}
"output_queue": {},
"output_worker_thread": {},
"line_no": 1
}
if "vfs" in test_set:
self.data["instance"]['vfs'] = subprocess.Popen([test_bin['vfs'], test_output_directory['vfs']],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=1,
encoding="utf-8"
)
if "cfs" in test_set:
self.data["instance"]['cfs'] = subprocess.Popen([test_bin['cfs'], repr(fsp_shm_id)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=1,
encoding="utf-8"
)
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
q = Queue()
t = Thread(target=queue_output, args=(instance.stdout, q))
t.daemon = True
t.start()
self.data["output_queue"][key] = q
self.data["output_worker_thread"][key] = t
time.sleep(0.1)
while True:
try:
line = q.get_nowait()
except Empty:
break
else:
print("%s -> %s" % (key, line))
self.data["fdtable"][key] = {}
self.data["fdtable"][key] = {}
def check_live(self):
# Verify that the clients are still alive
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
ret = instance.poll()
if ret is not None:
print("%s has exited unexpectedly with status %i" % (key, ret))
return False
return True
def terminus(self):
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
instance.stdin.close()
def run_command(self, command):
line_no = self.data["line_no"]
self.data["line_no"] = line_no + 1
# Prepare randomness
rand_uint = 0
rand_string = ""
print("====== Line %d ======" % line_no)
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
output = self.data["output_queue"][key]
input = instance.stdin
# Tokenize and replace macro in input command
command_token = command.strip().split(" ")
for index in range(0, len(command_token)):
token = command_token[index]
# Check existence of variable
matched = macro_var_regex.match(token)
if matched:
# Regular expression lexer only returns string,
# so we require explicit casting here.
param = int(matched.groupdict()["var"])
command_token[index] = self.data["fdtable"][key][param]
continue
# Check if we need to generate a random string
matched = macro_rand_string_regex.match(token)
if matched:
param = int(matched.groupdict()["length"])
if not param:
print("Cannot generate a 0-character string")
sys.exit(1)
if not rand_string:
rand_string = get_random_string(param)
command_token[index] = rand_string
continue
# Check if we need to generate a random uint
matched = macro_rand_uint_regex.match(token)
if matched:
param = int(matched.groupdict()["length"])
if not rand_uint:
rand_uint = get_random_uint(param)
command_token[index] = repr(rand_uint)
real_command = " ".join(command_token)
print("%s <- %s" % (key, real_command.strip()))
print(real_command.strip(), file=input, flush=True)
time.sleep(0.1)
try:
while True:
line = output.get(timeout=0.4)
if not self.check_live():
sys.exit(1)
print("%s -> %s" % (key, line), flush=True)
matched = return_regex.match(line)
# Be aware that line_no here is an integer
if matched:
self.data["fdtable"][key][line_no] = matched.groupdict()["retval"]
except Empty:
pass
def sanity_check(test_bin, test_output_directory, test_set, fsp_shm):
# Check output directories
# If we are testing on CFS only, test_output_directory isn't really used
if "vfs" in test_set:
for key in test_set:
check_dir = test_output_directory[key]
if not os.path.isdir(check_dir):
print("%s is not a directory. Cannot continue." % check_dir)
return False
if os.listdir(check_dir):
print("%s is not empty. Cannot continue." % check_dir)
return False
# Check CFS content
if "cfs" in test_set:
cfs = subprocess.Popen([test_bin['cfs'], repr(fsp_shm)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=1,
encoding="utf-8"
)
(out, err) = cfs.communicate(input="lsdir .\n")
for line in out.split("\n"):
matched = cfs_not_empty_regex.match(line)
if matched:
print("CFS is not empty. Cannot continue.")
return False
return True
def usage_and_exit(e):
print("Usage: testCfsVersysVfs.py -c <fsp shm> -v -i <batch command file>")
print(" -c <fsp shm>")
print(" If specified, the testing sequence will run on CFS with this shm id.")
print(" -v")
print(" If specified, the testing sequence will run on Linux VFS.")
print(" -e")
print(" Allow existing files in cfs / vfs paths")
print(" If both -c and -v are specified, resulting FS structure will be diffed.")
print("Each line in a batch file should be a single command, where %KEYWORD% expands:")
print(" %N% where N is a positive integer")
print(" Return value of N-th line. Will trigger a KeyError if it refers to a")
print(" line not encountered yet")
print(" %RANDUINT(N)%")
print(" Return a random unsigned integer with at most N bits")
print(" %RANDSTRING(N)%")
print(" Return a random string with length N")
sys.exit(e)
def main():
tests_prefix_dir = '@CMAKE_CURRENT_BINARY_DIR@/'
test_bin = {'cfs': tests_prefix_dir + 'testAppCli', 'vfs': tests_prefix_dir + 'testAppCliVfs'}
test_set = []
fsp_shm = -1
input_file = ""
skip_sanity_check = False
arg_index = 1
while arg_index < len(sys.argv):
current_arg = sys.argv[arg_index]
if current_arg == "-c":
test_set.append("cfs")
if arg_index + 1 >= len(sys.argv):
usage_and_exit(1)
arg_index += 1
try:
fsp_shm = int(sys.argv[arg_index])
except ValueError as e:
print(traceback.format_exc())
usage_and_exit(1)
elif current_arg == "-v":
test_set.append("vfs")
elif current_arg == "-i":
if arg_index + 1 >= len(sys.argv):
usage_and_exit(1)
arg_index += 1
input_file = sys.argv[arg_index]
elif current_arg == "-e":
skip_sanity_check = True
arg_index += 1
if not test_set:
print("Neither VFS or CFS is specified. Nothing to do.")
usage_and_exit(0)
if not input_file:
print("No input testing sequence specified. Nothing to do.")
usage_and_exit(0)
test_output_directory = {'vfs': "/tmp/vfs",
'cfs': "/tmp/cfs"}
if not sanity_check(test_bin, test_output_directory, test_set, fsp_shm):
if not skip_sanity_check:
print("Sanity check failed")
sys.exit(1)
else:
print("Warning: Sanity check skipped")
try:
batch_fd = open(input_file, "r")
except IOError as e:
print("Cannot open batch file")
sys.exit(1)
mgr = ProcessManager(test_bin, test_output_directory, test_set, fsp_shm)
for line in batch_fd:
mgr.run_command(line)
mgr.terminus()
if "vfs" in test_set and "cfs" in test_set:
# Now dump CFS content out to test_output_directory
print("===== Dumping CFS =====")
completed = subprocess.run(["@CMAKE_CURRENT_BINARY_DIR@/testDumpToVfs", repr(fsp_shm), test_output_directory['cfs']])
if completed.returncode != 0:
print("===== Dumper returned %d indicated error =====" % completed.returncode)
sys.exit(completed.returncode)
# Now do diff
print("===== Diff start =====")
completed = subprocess.run(["diff", "-aur", test_output_directory['cfs'], test_output_directory['vfs']])
print("===== Diff returned %d =====" % completed.returncode)
sys.exit(completed.returncode)
if __name__ == '__main__':
main()
|
WiscADSL/uFS
|
cfs/test/client/testCfsVersusVfs.py
|
testCfsVersusVfs.py
|
py
| 11,058
|
python
|
en
|
code
| 26
|
github-code
|
6
|
10414874563
|
import itertools
import operator
import types
from typing import Any, List, Optional, Tuple, Type
import torch
from executorch.exir.dialects.edge._ops import EdgeOpOverload
from executorch.exir.error import ExportError, ExportErrorType
from executorch.exir.lowered_backend_module import LoweredBackendModule
from executorch.exir.verification.arg_validator import (
EdgeOpArgValidator,
RunHigherOrderOperatorError,
)
from torch._export.verifier import SpecViolationError, Verifier
from torch._ops import OpOverload
from torch._subclasses import FakeTensor
from torch.export.exported_program import ExportedProgram
from torch.fx import GraphModule
ALLOWED_META_KEYS = {"spec", "stack_trace"}
def _check_tensors_are_contiguous(gm: GraphModule) -> None:
# Tensors be of contiguous format
for name, param in itertools.chain(gm.named_parameters(), gm.named_buffers()):
if isinstance(param, torch.Tensor):
if not param.is_contiguous():
raise SpecViolationError(
f"Tensors in Aten dialect must be contiguous, {name} is not contiguous"
)
class EXIRATenDialectVerifierBase(Verifier):
dialect = "OLD_EXIR_ATEN_DISABLED"
def allowed_getattr_types(self) -> Tuple[Type[Any], ...]:
return (
torch.fx.GraphModule,
LoweredBackendModule,
torch.Tensor,
torch.ScriptObject,
)
def allowed_op_types(self):
return super().allowed_op_types() + (torch._ops.OpOverloadPacket,)
def __call__(self, *args, **kwargs):
if hasattr(self, "_check_graph_module"):
return self._check_graph_module(*args, **kwargs)
elif hasattr(self, "check_valid"):
return self.check_valid(*args, **kwargs)
else:
raise RuntimeError("")
class EXIRATenDialectVerifier(EXIRATenDialectVerifierBase):
dialect = "OLD_EXIR_ATEN"
def check_valid_op(self, op):
if isinstance(op, OpOverload):
# TODO These special ops should be removable easily.
if op.namespace in (
"quantized_decomposed",
"boltnn_nimble",
"nimble",
"quantized",
) or op in (
torch.ops.aten.mkldnn_rnn_layer.default,
torch.ops.aten._upsample_bilinear2d_aa.default,
torch.ops.aten.quantize_per_tensor.default,
torch.ops.aten.dequantize.self,
torch.ops.aten.max.default,
):
return
if torch.Tag.core not in op.tags and torch.Tag.view_copy not in op.tags:
# NOTE(qihan): whether view_copy operators are marked as canonical is still under
# discussion.
raise SpecViolationError(
f"Operator {op.__module__}.{op.__name__} is not Aten Canonical."
)
def get_aten_verifier(enable: bool = True):
return EXIRATenDialectVerifier if enable else EXIRATenDialectVerifierBase
def _get_inputs(graph_module: GraphModule) -> List[Optional[FakeTensor]]:
def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]:
if "val" in node.meta:
return node.meta["val"]
if len(node.users) == 0:
return None
# TODO(ycao): `val` should always exist after we enable shape environment
# serialization and deserialization.
raise ExportError(
ExportErrorType.VIOLATION_OF_SPEC,
f"Cannot construct an input for graph module: {graph_module}.",
)
return [
extract_input(node)
for node in graph_module.graph.nodes
if node.op == "placeholder"
]
def _check_tensor_args_matching_op_allowed_dtype(gm: GraphModule) -> None:
validator = EdgeOpArgValidator(gm)
inputs = _get_inputs(gm)
try:
validator.run(*inputs)
except RunHigherOrderOperatorError:
# NB: ignore higher order operator in the graph.
# If we lower a graph module to delegate and then compose it with some other graph module, retrace it,
# if we also turn on edge ops and validator (_check_ir_validity=True), we will run
# into RunHigherOrderOperatorError. The only thing we can do right now is to ignore this error, since
# by definition it's still a valid Edge dialect. This is not ideal because it ignores possible invalidity
# later in the graph.
return
if validator.violating_ops:
raise SpecViolationError(
f"These operators are taking Tensor inputs with mismatched dtypes: {validator.violating_ops}"
)
def EXIREdgeDialectVerifier( # noqa: C901
check_edge_ops: bool = True,
enable: bool = True,
class_only: bool = False,
):
class _EXIREdgeDialectVerifier(Verifier):
dialect = "EDGE"
def __init__(self) -> None:
self.check_edge_ops = check_edge_ops
if self.check_edge_ops:
self.check_valid_op = self.check_valid_edge_op
else:
self.check_valid_op = self.check_valid_aten_op
def allowed_getattr_types(self) -> Tuple[Type[Any], ...]:
return (
torch.fx.GraphModule,
LoweredBackendModule,
torch.Tensor,
torch.ScriptObject,
)
def allowed_op_types(self):
return super().allowed_op_types() + (EdgeOpOverload, types.FunctionType)
def check_valid_edge_op(self, op):
if not enable:
return
if op in [operator.getitem, torch.ops.aten.sym_size.int]:
return
if isinstance(op, OpOverload) and not isinstance(op, EdgeOpOverload):
raise SpecViolationError(
"Operator {}.{} is not an Edge operator.".format(
op.__module__, op.__name__
)
)
if isinstance(op, EdgeOpOverload):
self.check_valid_aten_op(op._op)
if isinstance(op, types.FunctionType):
assert op.__name__ in ("alloc",)
def check_valid_aten_op(self, op) -> None:
if isinstance(op, OpOverload):
if (
torch.Tag.core not in op.tags # type: ignore[attr-defined]
and torch.Tag.view_copy not in op.tags # type: ignore[attr-defined]
):
# NOTE(qihan): whether view_copy operators are marked as canonical is still under
# discussion.
raise SpecViolationError(
"Operator {}.{} is not Aten Canonical.".format(
op.__module__, op.__name__
)
)
def check_additional(self, gm: GraphModule) -> None:
if not enable:
return
if self.check_edge_ops:
_check_tensors_are_contiguous(gm)
_check_tensor_args_matching_op_allowed_dtype(gm)
def is_valid(self, gm: GraphModule) -> bool:
try:
self(gm)
return True
except SpecViolationError:
return False
def __call__(self, ep_or_gm):
if not enable:
return
gm = ep_or_gm
if isinstance(gm, ExportedProgram):
gm = ep_or_gm.graph_module
if hasattr(self, "_check_graph_module"):
return self._check_graph_module(gm)
elif hasattr(self, "check_valid"):
return self.check_valid(gm)
else:
raise RuntimeError("")
ret = _EXIREdgeDialectVerifier
if not class_only:
ret = ret()
return ret
EXIREdgeDialectVerifier()
|
pytorch/executorch
|
exir/verification/verifier.py
|
verifier.py
|
py
| 7,890
|
python
|
en
|
code
| 479
|
github-code
|
6
|
36413248388
|
# ---------------
# ParamCopy - Substance 3D Designer plugin
# (c) 2019-2022 Eyosido Software SARL
# ---------------
import os, weakref
from functools import partial
from PySide2.QtCore import QObject
from PySide2.QtWidgets import QToolBar
import sd
from sd.context import Context
from sd.api.sdapplication import SDApplication
from sd.api.sduimgr import SDUIMgr
from paramcopy.pccore import pclog
from paramcopy.pccore.pcdata import PCData
class PCGraphCustomToolbarMgr(QObject):
"""
Handles a single custom toolbar per graph view used by a single external component
"""
def __init__(self, callback, toolbarIcon):
super().__init__()
self.sdApp = sd.getContext().getSDApplication()
self.sdUiMgr = self.sdApp.getQtForPythonUIMgr()
self.toolbars = {} # key: graphViewId, value: weak reference on created toolbar (so we don't prevent Qt to delete the toolbars)
self.callback = partial(callback) # callback must create/setup a single QToolBar object and return it.
self.toolbarIcon = toolbarIcon
self.registerGraphViewCreated()
# --- Public
def cleanup(self):
self.removeAllToolbars()
if self.graphViewCreatedCbId:
self.sdUiMgr.unregisterCallback(self.graphViewCreatedCbId)
# --- Private
def registerGraphViewCreated(self):
self.graphViewCreatedCbId = self.sdUiMgr.registerGraphViewCreatedCallback( partial(self.onGraphViewCreated, uiMgr=self.sdUiMgr))
def removeAllToolbars(self):
for toolbarRef in self.toolbars.values():
weakref.proxy(toolbarRef).deleteLater()
self.toolbars = {}
def onGraphViewCreated(self, graphViewId, uiMgr):
if not self.toolbars.get(graphViewId):
toolbar = self.callback() # let user create and setup the QToolBar
toolbar.destroyed.connect(partial(self.onToolbarDestroyed, graphViewId=graphViewId))
self.toolbars[graphViewId] = toolbar
self.sdUiMgr.addToolbarToGraphView(graphViewId, toolbar, icon = self.toolbarIcon, tooltip = toolbar.toolTip())
def onToolbarDestroyed(self, graphViewId):
# self.sender() is not the toolbar object, so we need to look-up by graphViewId
if self.toolbars.get(graphViewId):
del self.toolbars[graphViewId]
|
eyosido/ParamCopy
|
src/paramcopy/pcui/pctoolbar.py
|
pctoolbar.py
|
py
| 2,317
|
python
|
en
|
code
| 9
|
github-code
|
6
|
7066069240
|
import sys,os,subprocess,string,re
from threading import Timer
import time,socket,json
from os import path
base_dir = path.dirname(path.abspath(sys.path[0]))
print(base_dir)
sys.path.append(base_dir)
class HfsSetup():
def __init__(self,hver='',plants='',adict={}):
self._run_code = True
self._plants = plants
self._hfs_version = hver
self._servers = ['10.60.96.203']
self._server = self._servers[0]
self._Kapps = ["houdinifx.exe","hython.exe"]
self._folders = ["temp"]
self.Makerdir()
self._temp = "%s/temp"%base_dir
self._code_base_path = adict['codebase'] # C:/script/CG/Houdini
self._B_plugin_path = adict['plugingase'] # B:/plugins/houdini
self._D_houdini_path = adict['hfsbase'] # D:/plugins/houdini
self._info_return = []
def getcustip(self):
self._hostname = socket.gethostname()
## print(self._hostname)
_ip_end = re.findall(r"\d+",self._hostname)
_ip_a = _ip_end[0]
z_num = 0
for i in range(0,len(_ip_a)):
if not _ip_a[i]=="0":
z_num=i
break
_ip_a =_ip_a[z_num:]
_ipList = socket.gethostbyname_ex(self._hostname)
self._this_ip = ''
for elm in _ipList[-1]:
_ip_ss = elm.split(".")
if _ip_a in _ip_ss:
self._this_ip = elm
break
def Makerdir(self):
for elm in self._folders:
_elm = "%s/%s"%(base_dir,elm)
if not os.path.exists(_elm):
os.makedirs(_elm)
def ConfigApp_win(self):
self._CopyHfs = True
self._info_return.append("Server tyr to chang to: %s"%self._server)
## copy files
# 7z tools
localpath = r"D:/plugins/tools/7z"
z7 = "D:/plugins/tools/7z/7z.exe"
fromdir = self._B_plugin_path.replace("/plugins/houdini","") + r"/tools/7-Zip"
h_source = os.path.abspath("%s/apps/win/%s.7z"%(self._B_plugin_path,self._hfs_version))
h_amd = os.path.abspath("%s/%s.7z"%(self._D_houdini_path,self._hfs_version))
if os.path.exists(h_amd):
self._CopyHfs = False
## set hfs server
version_base = self._hfs_version[:2]
py_path = os.path.abspath("%s/function/HoudiniLibs/hfsserver.py %s"%(self._code_base_path,base_dir))
_lic_info = {"hver":version_base,"server":self._server}
_lic_info_f = "%s/temp/lic_info.json"%base_dir
with open(_lic_info_f,"w")as f:
json.dump(_lic_info,f)
f.close()
server_cmds = py_path
## unzip houdini
cmd_un7z = z7 + " x -y -aos "
cmd_un7z += "%s/%s.7z"%(self._D_houdini_path,self._hfs_version) # D:/plugins/houdini
cmd_un7z += " -o%s" % ("%s/%s"%(self._D_houdini_path,self._hfs_version))
## subprocess
# creat the handl
setserver_log = open(r'%s/Server_info.txt'%self._temp,'wt')
copy7ztool_log = open(r'%s/Copy_ziptool.txt'%self._temp,'wt')
Uzi_Houdini_log = open(r'%s/Uzip_Houdini.txt'%self._temp,'wt')
set_server = subprocess.Popen(server_cmds,stdout=setserver_log,shell=True)
copy7ztool = subprocess.Popen("robocopy /S /NDL /NFL %s %s %s" % (fromdir, localpath, "*"),stdout=copy7ztool_log,shell=True)
if self._CopyHfs:
copyhoudini = subprocess.Popen("copy %s %s" % (h_source, h_amd),shell=True)
copyhoudini.wait()
copy7ztool.wait()
UzipHoudini = subprocess.Popen(cmd_un7z,stdout=Uzi_Houdini_log,shell=True)
_s_result = set_server.wait()
if not _s_result:
# print("License server changed to: %s"%self._server)
self._info_return.append("License server changed to: %s"%self._server)
UzipHoudini.wait()
# finish,close the handl
setserver_log.close()
copy7ztool_log.close()
Uzi_Houdini_log.close()
_h_result = UzipHoudini.returncode
## os.remove(h_amd)
if not _h_result:
print("Houdini setup finished. ")
def KillApps(self):
for app in self._Kapps:
if self._plants == "win":
cmds = r'c:\windows\system32\cmd.exe /c c:\windows\system32\TASKKILL.exe /F /IM %s'%app
elif self._plants == "Linux":
cmds = ''
subprocess.call(cmds,shell=True)
def Extued(self):
# print("Try to kill the houdinifx and hython before houdini app setup")
self._info_return.append("Try to kill the houdinifx and hython before houdini app setup")
try:
self.KillApps()
except:
pass
if self._run_code and self._plants=="win":
self.ConfigApp_win()
elif self._run_code and self._plants=="Linux":
self.ConfigApp_Linux()
def main(version='',plants='win',adict={}):
s_time = time.time()
app = HfsSetup(version,plants,adict)
app.Extued()
n_time = time.time()
print("Times for apps setup: %d s"%(n_time-s_time))
app._info_return.append("Times for apps setup: %d s"%(n_time-s_time))
return app._info_return
if __name__ == '__main__':
main('160557','win')
'''
_info_file = "%s/temp/app_info.json"%base_dir
_plugin_info = {}
if os.path.exists(_info_file):
with open(_info_file,"r")as f:
_plugin_info = json.load(f)
f.close()
'''
|
kRayvison/Pycharm_python36
|
new_render_data/input/p/script/CG/Houdini/function/old/HoudiniMain/HoudiniAppSet.py
|
HoudiniAppSet.py
|
py
| 5,487
|
python
|
en
|
code
| 1
|
github-code
|
6
|
195263286
|
from django.db import models
from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
from django.forms.models import model_to_dict
from django.db.models import Q
from operator import itemgetter, attrgetter, methodcaller
import traceback
from xos.exceptions import *
from core.models import SlicePrivilege, SitePrivilege
from sets import Set
from urlparse import urlparse
CEILOMETER_KIND = "ceilometer"
class CeilometerService(Service):
KIND = CEILOMETER_KIND
class Meta:
app_label = "ceilometer"
verbose_name = "Ceilometer Service"
proxy = True
@property
def ceilometer_pub_sub_url(self):
return self.get_attribute("ceilometer_pub_sub_url", None)
@ceilometer_pub_sub_url.setter
def ceilometer_pub_sub_url(self, value):
self.set_attribute("ceilometer_pub_sub_url", value)
class MonitoringChannel(TenantWithContainer): # aka 'CeilometerTenant'
class Meta:
proxy = True
KIND = CEILOMETER_KIND
LOOK_FOR_IMAGES=[ #"trusty-server-multi-nic-docker", # CloudLab
"ceilometer-trusty-server-multi-nic",
#"trusty-server-multi-nic",
]
sync_attributes = ("private_ip", "private_mac",
"ceilometer_ip", "ceilometer_mac",
"nat_ip", "nat_mac", "ceilometer_port",)
default_attributes = {}
def __init__(self, *args, **kwargs):
ceilometer_services = CeilometerService.get_service_objects().all()
if ceilometer_services:
self._meta.get_field("provider_service").default = ceilometer_services[0].id
super(MonitoringChannel, self).__init__(*args, **kwargs)
self.set_attribute("use_same_instance_for_multiple_tenants", True)
def can_update(self, user):
#Allow creation of this model instances for non-admin users also
return True
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a monitoring channel since it creates a slice
raise XOSProgrammingError("MonitoringChannel's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("MonitoringChannel's self.creator was not set")
if self.pk is None:
#Allow only one monitoring channel per user
channel_count = sum ( [1 for channel in MonitoringChannel.objects.filter(kind=CEILOMETER_KIND) if (channel.creator == self.creator)] )
if channel_count > 0:
raise XOSValidationError("Already %s channels exist for user Can only create max 1 MonitoringChannel instance per user" % str(channel_count))
super(MonitoringChannel, self).save(*args, **kwargs)
model_policy_monitoring_channel(self.pk)
def delete(self, *args, **kwargs):
self.cleanup_container()
super(MonitoringChannel, self).delete(*args, **kwargs)
@property
def addresses(self):
if (not self.id) or (not self.instance):
return {}
addresses = {}
for ns in self.instance.ports.all():
if "private" in ns.network.name.lower():
addresses["private"] = (ns.ip, ns.mac)
elif ("nat" in ns.network.name.lower()) or ("management" in ns.network.name.lower()):
addresses["nat"] = (ns.ip, ns.mac)
#TODO: Do we need this client_access_network. Revisit in VTN context
#elif "ceilometer_client_access" in ns.network.labels.lower():
# addresses["ceilometer"] = (ns.ip, ns.mac)
return addresses
@property
def nat_ip(self):
return self.addresses.get("nat", (None, None))[0]
@property
def nat_mac(self):
return self.addresses.get("nat", (None, None))[1]
@property
def private_ip(self):
return self.addresses.get("nat", (None, None))[0]
@property
def private_mac(self):
return self.addresses.get("nat", (None, None))[1]
@property
def ceilometer_ip(self):
return self.addresses.get("ceilometer", (None, None))[0]
@property
def ceilometer_mac(self):
return self.addresses.get("ceilometer", (None, None))[1]
@property
def site_tenant_list(self):
tenant_ids = Set()
for sp in SitePrivilege.objects.filter(user=self.creator):
site = sp.site
for cs in site.controllersite.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
return tenant_ids
@property
def slice_tenant_list(self):
tenant_ids = Set()
for sp in SlicePrivilege.objects.filter(user=self.creator):
slice = sp.slice
for cs in slice.controllerslices.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
for slice in Slice.objects.filter(creator=self.creator):
for cs in slice.controllerslices.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
if self.creator.is_admin:
#TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
#For now, ceilometer code is changed to pusblish all such meters with tenant
#id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
#for all admin users.
tenant_ids.add("default_admin_tenant")
return tenant_ids
@property
def tenant_list(self):
return self.slice_tenant_list | self.site_tenant_list
@property
def tenant_list_str(self):
return ", ".join(self.tenant_list)
@property
def ceilometer_port(self):
# TODO: Find a better logic to choose unique ceilometer port number for each instance
if not self.id:
return None
return 8888+self.id
@property
def ceilometer_url(self):
if not self.private_ip:
return None
return "http://" + self.private_ip + ":" + str(self.ceilometer_port) + "/"
def model_policy_monitoring_channel(pk):
# TODO: this should be made in to a real model_policy
with transaction.atomic():
mc = MonitoringChannel.objects.select_for_update().filter(pk=pk)
if not mc:
return
mc = mc[0]
mc.manage_container()
SFLOW_KIND = "sflow"
SFLOW_PORT = 6343
SFLOW_API_PORT = 33333
class SFlowService(Service):
KIND = SFLOW_KIND
class Meta:
app_label = "ceilometer"
verbose_name = "sFlow Collection Service"
proxy = True
default_attributes = {"sflow_port": SFLOW_PORT, "sflow_api_port": SFLOW_API_PORT}
sync_attributes = ("sflow_port", "sflow_api_port",)
@property
def sflow_port(self):
return self.get_attribute("sflow_port", self.default_attributes["sflow_port"])
@sflow_port.setter
def sflow_port(self, value):
self.set_attribute("sflow_port", value)
@property
def sflow_api_port(self):
return self.get_attribute("sflow_api_port", self.default_attributes["sflow_api_port"])
@sflow_api_port.setter
def sflow_api_port(self, value):
self.set_attribute("sflow_api_port", value)
def get_instance(self):
if self.slices.exists():
slice = self.slices.all()[0]
if slice.instances.exists():
return slice.instances.all()[0]
return None
@property
def sflow_api_url(self):
if not self.get_instance():
return None
return "http://" + self.get_instance().get_ssh_ip() + ":" + str(self.sflow_api_port) + "/"
class SFlowTenant(Tenant):
class Meta:
proxy = True
KIND = SFLOW_KIND
sync_attributes = ("listening_endpoint", )
default_attributes = {}
def __init__(self, *args, **kwargs):
sflow_services = SFlowService.get_service_objects().all()
if sflow_services:
self._meta.get_field("provider_service").default = sflow_services[0].id
super(SFlowTenant, self).__init__(*args, **kwargs)
@property
def creator(self):
from core.models import User
if getattr(self, "cached_creator", None):
return self.cached_creator
creator_id=self.get_attribute("creator_id")
if not creator_id:
return None
users=User.objects.filter(id=creator_id)
if not users:
return None
user=users[0]
self.cached_creator = users[0]
return user
@creator.setter
def creator(self, value):
if value:
value = value.id
if (value != self.get_attribute("creator_id", None)):
self.cached_creator=None
self.set_attribute("creator_id", value)
@property
def listening_endpoint(self):
return self.get_attribute("listening_endpoint", None)
@listening_endpoint.setter
def listening_endpoint(self, value):
if urlparse(value).scheme != 'udp':
raise XOSProgrammingError("SFlowTenant: Only UDP listening endpoint URLs are accepted...valid syntax is: udp://ip:port")
self.set_attribute("listening_endpoint", value)
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a SFlow tenant since it creates a slice
raise XOSProgrammingError("SFlowTenant's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("SFlowTenant's self.creator was not set")
if not self.listening_endpoint:
raise XOSProgrammingError("SFlowTenant's self.listening_endpoint was not set")
if self.pk is None:
#Allow only one sflow channel per user and listening_endpoint
channel_count = sum ( [1 for channel in SFlowTenant.objects.filter(kind=SFLOW_KIND) if ((channel.creator == self.creator) and (channel.listening_endpoint == self.listening_endpoint))] )
if channel_count > 0:
raise XOSValidationError("Already %s sflow channels exist for user Can only create max 1 tenant per user and listening endpoint" % str(channel_count))
super(SFlowTenant, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(MonitoringChannel, self).delete(*args, **kwargs)
@property
def authorized_resource_list(self):
return ['all']
@property
def authorized_resource_list_str(self):
return ", ".join(self.authorized_resource_list)
|
xmaruto/mcord
|
xos/services/ceilometer/models.py
|
models.py
|
py
| 10,893
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35212077312
|
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((TCP_IP,TCP_PORT))
message ="Player"
s.send(message.encode())
while 1:
data = s.recv(1024).decode()
print("received:",data)
s.send(input().encode())
|
OleHalvor/HS
|
client.py
|
client.py
|
py
| 294
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11068770479
|
from django.http import *
from forms import UploadForm
from django import template
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.utils.decorators import method_decorator
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView, View
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User, Group, Permission
from models import *
from django.db import models
from django.db.models import Count, Min, Sum, Max, Avg
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import unittest
from django.db import connection, transaction
import logging
import hashlib
from google.appengine.api import files
try:
files.gs
except AttributeError:
import gs
files.gs = gs
PERPAGE=50
def checkadminlogin_dispatch(f):
def wrap(request, *args, **kwargs):
if 'IsLogin' in request.session and request.session['IsLogin'] and 'Staff' in request.session and request.session['Staff'].username !="":
staff_list = Admins.objects.filter(username = request.session['Staff_username'], pass_field = hashlib.md5(request.session['Staff_password']).hexdigest())
if staff_list:
request.session['IsLogin'] = True
request.session['Staff'] = staff_list[0]
success = True
else:
return HttpResponseRedirect('/logout')
logging.info('Fetch Started:: %s', staff_list[0])
else:
return HttpResponseRedirect('/logout')
return f(request, *args, **kwargs)
return wrap
class CsrfExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CsrfExemptMixin, self).dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
@method_decorator(checkadminlogin_dispatch)
def dispatch(self,request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
@csrf_exempt
def render_template(request, template, data=None):
errs =""
if request.method == 'GET' and 'err' in request.GET:
data.update({'errs':request.GET['err']})
response = render_to_response(template, data,
context_instance=RequestContext(request))
return response
class CMSClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Extrapages.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Extrapages.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "cms_pages.htm", content)
class CMSEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['pageid']
allpages = Extrapages.objects.get(id=pageid)
content = {'page_title': "Summary",
'allpages':allpages,
}
return render_template(request, "cms_pages_edit.htm", content)
class EmailViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Emails.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Emails.objects.all()[offset-100:offset]
content = {'page_title': "Admin :: Email List",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "email_pages.htm", content)
class EmailEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['id']
allpages = Emails.objects.get(id=pageid)
content = {'page_title': "Admin::Email Edit",
'allpages':allpages,
}
return render_template(request, "email_pages_edit.htm", content)
class CMSAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Summary",}
return render_template(request, "cms_pages_add.htm", content)
class TitlesContentClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Html.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Html.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "titles_content.htm", content)
class ProductWishListClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
#allitems = ProductWaitinglist.objects.annotate(dcount=Count('catalogid')).values('catalogid',
# 'current_stock',
# 'products__catalogid').all()[offset-100:offset]
allitems = ProductWaitinglist.objects.raw('select count(*) as dcount,product_waitinglist.catalogid,products.id,name,current_stock from product_waitinglist,products where product_waitinglist.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductWaitinglist.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_wish_list.htm", content)
class ProductWishViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
itemid = request.GET['itemid']
allitems = ProductWaitinglist.objects.filter(catalogid=itemid).all()[offset-100:offset]
count = ProductWaitinglist.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_wish_list_view_list.htm", content)
class ReviewAllClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = ProductReview.objects.raw('select count(*) as dcount,product_review.catalogid,products.id,name,thumbnail from product_review, products where product_review.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductReview.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_7_reviews.htm", content)
class ProductsReviewsViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.filter(catalogid=itemid).all()
count = ProductReview.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'itemid':itemid,
}
return render_template(request, "products_review_view_list.htm", content)
class ProductsReviewEditFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.get(id=itemid)
content = {'page_title': "Summary",
'allitems':allitems,
#'count':count,
#'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_7_reviews_edit_2_edit.htm", content)
class ApanelViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Profile",}
return render_template(request, "home-page-admin.htm", content)
class CustomersViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = customers.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':customers.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "customers.htm", content)
class CRMViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
if 'status' in request.GET and request.GET['status'] != "":
status = request.GET['status']
else:
status = 1
count = Crm.objects.filter(status=status).count()
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Crm.objects.all().filter(status=status)[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "crm.htm", content)
class CRMEditViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
crmid = request.GET['id']
allitems = Crm.objects.get(id=crmid)
categories = ProductCategory.objects.all()
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "crm_edit.htm", content)
class StaffViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Site Staff",
'customers':Admins.objects.all()[:100],
'count':Admins.objects.count(),}
return render_template(request, "admins.htm", content)
class CategoryViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Category.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':Category.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "categories.htm", content)
class CustomerAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'title': "Add Customer",}
return render_template(request, "customer_add.htm", content)
class CustomerInfoClass(LoginRequiredMixin,TemplateView):
#summary = Customers.objects.all()
def get(self, request, *args, **kwargs):
cid = request.GET['id']
customer = customers.objects.get(contactid=cid)
customeremail= customer.email
customerrewards = CustomerRewards.objects.filter(contactid=cid).all()
totalrewards = CustomerRewards.objects.filter(contactid=cid).aggregate(Sum('points'))
#customers_promocode = SwfCustomerCreditsLog.objects.values_list('customers_promocode', flat=True)
#customers_promocode = customers_promocode['customers_promocode']
#storerewards = SwfCustomerCreditsLog.objects.filter(customers_email_address=customeremail)
storerewards = SwfCustomerCreditsLog.objects.raw('select *,swf_customer_credits_log.id as sid from swf_customer_credits_log , promotions where customers_promocode = coupon AND customers_email_address="'+customeremail+'" AND customers_promocode != ""')
fulldata = list(storerewards)
try:
wish_id = WshWishlist.objects.get(customerid=cid)
wishitems = WsiWishlistitems.objects.filter(wsh_id=wish_id.wsh_id)
except Exception as e:
wishitems = ""
content = {'page_title': "Customers Info",
'customer': customer,
'customerorders':Orders.objects.filter(ocustomerid=cid).all(),
'wishlists':wishitems,
'customerrewards':customerrewards,
'totalrewards':totalrewards,
'storerewards':fulldata,
}
#'count':Admins.objects.count(),}
return render_template(request, "customers_info.htm", content)
class ProductsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Products.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Products.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "products.htm", content)
class ProductViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productedit.htm", content)
class ProductRelatedClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productrelated.htm", content)
class ProductsImagesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "images_products.htm", content)
class ApanelViewOrdersClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
order_status = request.GET['order_status']
if order_status < 1:
order_status = 1
else:
order_status = order_status
count = Orders.objects.filter(order_status=order_status).count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Orders.objects.all().filter(order_status=order_status)[offset-100:offset]
order_status_links = OrderStatus.objects.all().filter(visible='1')
#crm_messages=CrmMessages.objects.select_related(crmid__orderid='8623')
#return HttpResponse(crm_messages)
content = {'page_title': "Orders",
'allitems':allitems,
'count':count,
'page_num':page_num,
'order_status':order_status,
'order_links':order_status_links,}
return render_template(request, "vieworders.htm", content)
class ApanelViewOrdersStatusClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = OrderStatus.objects.all()
content = {'page_title': "Orders Status",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "orders_status.htm", content)
class OrderPageClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
oid = request.GET['oid']
order_status_links = OrderStatus.objects.all().filter(visible='1')
allitems = Orders.objects.get(orderid=oid)
try:
transactions = Transactions.objects.get(orderid=oid)
amount = transactions.amount
totalamt = Oitems.objects.filter(orderid=oid).aggregate(Sum('unitprice'))
totalamt = totalamt['unitprice__sum']
except Exception as e:
transactions = ""
totalamt = 0
amount = 0
alloiitems = Oitems.objects.all().filter(orderid=oid)
finaltotal = (totalamt + int(allitems.oshipcost)) - allitems.coupondiscount
balance = finaltotal - amount
content = {'page_title': "Orders Status",
'allitems':allitems,
'alloiitems':alloiitems,
'order_links':order_status_links,
'totalamt':totalamt,
'finaltotal':finaltotal,
'paidamt':finaltotal,
'transactions':transactions,
'balance':balance,
}
return render_template(request, "orderpage.htm", content)
class AddAdminsFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = Admins.objects.all()
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ""
if "id" in request.GET:
allitems = Admins.objects.get(id=request.GET['id'])
else:
allitems = ""
content = {'page_title': "Add User",
'allitems':allitems,
'mode':mode,}
return render_template(request, "admins_add.htm", content)
class RmaPagesClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Rma.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Rma.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "rma_pages.htm", content)
class RmaViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
rmaid=request.GET['rmaid']
allitems = Rma.objects.get(idrma=rmaid)
content = {'page_title': "View RMA",
'allitems':allitems,}
return render_template(request, "rmaview.htm", content)
class ShippingManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ShippingCategory.objects.all()
content = {'page_title': "Admin: Shipping Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "adminshippingmanager.htm", content)
class TaxManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = Tax.objects.all()
content = {'page_title': "Admin: Tax Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "taxmanager.htm", content)
class GiftCertificatesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = GiftCertificates.objects.all().count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = GiftCertificates.objects.all()[offset-100:offset]
content = {'page_title': "Admin: Gift Certificate View",
'allitems':allitems,
'page_num':page_num,
'count':count,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "giftcertificate_pages.htm", content)
class EditGiftCertificateClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
giftid=request.GET['id']
allitems = GiftCertificates.objects.get(id=giftid)
total = allitems.certificate_amount + allitems.certificate_expenses
content = {'page_title': "Admin :: Edit Gift Certificate",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),
'total':total}
return render_template(request, "edit_giftcertificate.htm", content)
class ProductArticleViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductArticle.objects.all().filter(catalogid=pid)
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_articles.htm", content)
class ProductArticleEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['id']
allpages = ProductArticle.objects.get(id=pid)
content = {'page_title': "Admin :: Edit Article",
'allpages':allpages,}
return render_template(request, "product_article_edit.htm", content)
class ProductArticleAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
content = {'page_title': "Admin :: Add Article",
'pid':pid,}
return render_template(request, "product_article_add.htm", content)
class ProductReviewsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductReview.objects.filter(catalogid=pid).all()
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_reviews.htm", content)
class ProductOptionEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allpages = Products.objects.get(catalogid=pid)
content = {'page_title': "Admin :: Edit Options",
'allpages':allpages,
'prod':pid,}
return render_template(request, "product_options_edit.htm", content)
class BannersViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
allpages = SiteBanners.objects.all()
content = {'page_title': "Admin :: Banner Managements",
'allitems':allpages,}
return render_template(request, "viewbanners.htm", content)
class BannerEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
bid = request.GET['bid']
filename = "/gs/swf_product_images/banner/banner5.png"
allpages = SiteBanners.objects.get(id=bid)
content = {'page_title': "Admin :: Edit banner",
'allpages':allpages,
'bannerpath':filename,}
return render_template(request, "editbanner.htm", content)
class BannersAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
return render_template(request, "addbanner.htm", content)
class GCSfilesClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
file_list = files.listdir('/gs/swf_product_images')
for file_name in file_list:
if not file_name.__contains__('$folder$'):
self.response.write('<a href="https://storage.cloud.google.com/%s">%s<a><br>' %(file_name[4:], file_name[4:]))
#return render_template(request, "gcsfiles.htm", content)
class CouponsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Promotions.objects.count()
if "page" in request.GET and request.GET['page'] != "":
page_num = request.GET['page']
else:
page_num = 1
#pages = count/100
page_num = int(page_num)
offset = page_num * 100
allitems = Promotions.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "viewcoupons.htm", content)
|
hughsons/saltwaterfish
|
admin/views.py
|
views.py
|
py
| 28,274
|
python
|
en
|
code
| 1
|
github-code
|
6
|
7520101007
|
"""
Created on Wed Feb 24 12:34:17 2021
@author: Narmin Ghaffari Laleh
"""
##############################################################################
from dataGenerator.dataSetGenerator_ClamMil import Generic_MIL_Dataset
import utils.utils as utils
from extractFeatures import ExtractFeatures
from utils.core_utils import Train_MIL_CLAM
from utils.data_utils import ConcatCohorts_Classic
from eval.eval import CalculatePatientWiseAUC, CalculateTotalROC, MergeResultCSV
from sklearn.model_selection import StratifiedKFold
import numpy as np
import os
import pandas as pd
import random
from sklearn import preprocessing
import torch
##############################################################################
def CLAM_MIL_Training(args):
targetLabels = args.target_labels
args.feat_dir = args.feat_dir[0]
for targetLabel in targetLabels:
for repeat in range(args.repeatExperiment):
args.target_label = targetLabel
random.seed(args.seed)
args.projectFolder = utils.CreateProjectFolder(args.project_name, args.adressExp, targetLabel, args.model_name, repeat+1)
print(args.projectFolder)
if os.path.exists(args.projectFolder):
continue
else:
os.mkdir(args.projectFolder)
args.result_dir = os.path.join(args.projectFolder, 'RESULTS')
os.makedirs(args.result_dir, exist_ok = True)
args.split_dir = os.path.join(args.projectFolder, 'SPLITS')
os.makedirs(args.split_dir, exist_ok = True)
reportFile = open(os.path.join(args.projectFolder,'Report.txt'), 'a', encoding="utf-8")
reportFile.write('-' * 30 + '\n')
reportFile.write(str(args))
reportFile.write('-' * 30 + '\n')
if args.extractFeature:
imgs = os.listdir(args.datadir_train[0])
imgs = [os.path.join(args.datadir_train[0], i) for i in imgs]
ExtractFeatures(data_dir = imgs, feat_dir = args.feat_dir, batch_size = args.batch_size, target_patch_size = -1, filterData = True)
print('\nLOAD THE DATASET FOR TRAINING...\n')
patientsList, labelsList, args.csvFile = ConcatCohorts_Classic(imagesPath = args.datadir_train,
cliniTablePath = args.clini_dir, slideTablePath = args.slide_dir,
label = targetLabel, minNumberOfTiles = args.minNumBlocks,
outputPath = args.projectFolder, reportFile = reportFile, csvName = args.csv_name,
patientNumber = args.numPatientToUse)
yTrueLabel = utils.CheckForTargetType(labelsList)
le = preprocessing.LabelEncoder()
yTrue = le.fit_transform(yTrueLabel)
args.num_classes = len(set(yTrue))
args.target_labelDict = dict(zip(le.classes_, range(len(le.classes_))))
utils.Summarize(args, list(yTrue), reportFile)
print('\nLoad the DataSet...')
dataset = Generic_MIL_Dataset(csv_path = args.csvFile,
data_dir = args.feat_dir,
shuffle = False,
seed = args.seed,
print_info = True,
label_dict = args.target_labelDict,
patient_strat = True,
label_col = args.target_label,
ignore = [],
reportFile = reportFile)
if len(patientsList) < 20:
continue
if args.train_full:
print('-' * 30)
print('IT IS A FULL TRAINING FOR ' + targetLabel + '!')
train_data = pd.DataFrame(list(zip(patientsList, yTrue, yTrueLabel)), columns = ['PATIENT', 'yTrue', 'yTrueLabel'])
if args.early_stopping:
val_data = train_data.groupby('yTrue', group_keys = False).apply(lambda x: x.sample(frac = 0.1))
train_data = train_data[~train_data['PATIENT'].isin(list(val_data['PATIENT']))]
train_data.reset_index(inplace = True, drop = True)
val_data.reset_index(inplace = True, drop = True)
df = pd.DataFrame({'train': pd.Series(train_data['PATIENT']), 'test': pd.Series([]), 'val' : pd.Series(val_data['PATIENT'])})
df.to_csv(os.path.join(args.split_dir, 'TrainSplit.csv'), index = False)
train_dataset, val_dataset, test_dataset = dataset.Return_splits(from_id = False, csv_path = os.path.join(args.split_dir, 'TrainSplit.csv'))
else:
df = pd.DataFrame({'train': pd.Series(train_data['PATIENT']), 'test': pd.Series([]), 'val' : pd.Series([])})
df.to_csv(os.path.join(args.split_dir, 'TrainValSplit.csv'), index = False)
train_dataset, val_dataset, test_dataset = dataset.Return_splits(from_id = False, csv_path = os.path.join(args.split_dir, 'TrainValSplit.csv'))
datasets = (train_dataset, val_dataset, test_dataset)
model, _, _ = Train_MIL_CLAM(datasets = datasets, fold = 'FULL', args = args, trainFull = True)
torch.save(model.state_dict(), os.path.join(args.projectFolder, 'RESULTS', 'finalModel'))
print()
print('-' * 30)
reportFile.close()
else:
print('IT IS A ' + str(args.k) + 'FOLD CROSS VALIDATION TRAINING FOR ' + targetLabel + '!')
patientID = np.array(patientsList)
yTrue = np.array(yTrue)
yTrueLabel = np.array(yTrueLabel)
folds = args.k
kf = StratifiedKFold(n_splits = folds, random_state = args.seed, shuffle = True)
kf.get_n_splits(patientID, yTrue)
foldcounter = 1
for train_index, test_index in kf.split(patientID, yTrue):
testPatients = patientID[test_index]
trainPatients = patientID[train_index]
testyTrue = yTrue[test_index]
trainyTrue = yTrue[train_index]
testyTrueLabel = yTrueLabel[test_index]
trainyTrueLabel = yTrueLabel[train_index]
print('GENERATE NEW TILES...\n')
print('FOR TRAIN SET...\n')
train_data = pd.DataFrame(list(zip(trainPatients, trainyTrue, trainyTrueLabel)), columns = ['PATIENT', 'yTrue', 'yTrueLabel'])
print('FOR VALIDATION SET...\n')
val_data = train_data.groupby('yTrue', group_keys = False).apply(lambda x: x.sample(frac = 0.1))
train_data = train_data[~train_data['PATIENT'].isin(list(val_data['PATIENT']))]
print('FOR TEST SET...\n')
test_data = pd.DataFrame(list(zip(testPatients, testyTrue, testyTrueLabel)), columns = ['PATIENT', 'yTrue', 'yTrueLabel'])
train_data.reset_index(inplace = True, drop = True)
test_data.reset_index(inplace = True, drop = True)
val_data.reset_index(inplace = True, drop = True)
print('-' * 30)
print("K FOLD VALIDATION STEP => {}".format(foldcounter))
print('-' * 30)
df = pd.DataFrame({'train': pd.Series(train_data['PATIENT']), 'test': pd.Series(test_data['PATIENT']), 'val' : pd.Series(val_data['PATIENT'])})
df.to_csv(os.path.join(args.split_dir, 'TrainTestValSplit_{}.csv'.format(foldcounter)), index = False)
train_dataset, val_dataset, test_dataset = dataset.Return_splits(from_id = False, csv_path = os.path.join(args.split_dir, 'TrainTestValSplit_{}.csv'.format(foldcounter)))
datasets = (train_dataset, val_dataset, test_dataset)
model, results, test_auc = Train_MIL_CLAM(datasets = datasets, fold = foldcounter, args = args, trainFull = False)
reportFile.write('AUC calculated by CLAM' + '\n')
reportFile.write(str(test_auc) + '\n')
reportFile.write('-' * 30 + '\n')
patients = []
filaNames = []
yTrue_test = []
yTrueLabe_test = []
probs = {}
for i_temp in range(args.num_classes):
key = utils.get_key_from_value(args.target_labelDict, i_temp)
probs[key] = []
for item in list(results.keys()):
temp = results[item]
patients.append(temp['PATIENT'])
filaNames.append(temp['FILENAME'])
yTrue_test.append(temp['label'])
yTrueLabe_test.append(utils.get_key_from_value(args.target_labelDict, temp['label']))
for key in list(args.target_labelDict.keys()):
probs[key].append(temp['prob'][0][utils.get_value_from_key(args.target_labelDict, key)])
probs = pd.DataFrame.from_dict(probs)
df = pd.DataFrame(list(zip(patients, filaNames, yTrue_test, yTrueLabe_test)), columns =['PATIENT', 'FILENAME', 'yTrue', 'yTrueLabel'])
df = pd.concat([df, probs], axis = 1)
testResultsPath = os.path.join(args.result_dir, 'TEST_RESULT_SLIDE_BASED_FOLD_' + str(foldcounter) + '.csv')
df.to_csv(testResultsPath, index = False)
CalculatePatientWiseAUC(resultCSVPath = testResultsPath, args = args, foldcounter = foldcounter , clamMil = True, reportFile = reportFile)
reportFile.write('-' * 30 + '\n')
foldcounter += 1
patientScoreFiles = []
slideScoreFiles = []
for i in range(args.k):
patientScoreFiles.append('TEST_RESULT_PATIENT_BASED_FOLD_' + str(i + 1) + '.csv')
slideScoreFiles.append('TEST_RESULT_SLIDE_BASED_FOLD_' + str(i + 1) + '.csv')
CalculateTotalROC(resultsPath = args.result_dir, results = patientScoreFiles, target_labelDict = args.target_labelDict, reportFile = reportFile)
reportFile.write('-' * 30 + '\n')
MergeResultCSV(args.result_dir, slideScoreFiles, milClam = True)
reportFile.close()
##############################################################################
|
KatherLab/HIA
|
CLAM_MIL_Training.py
|
CLAM_MIL_Training.py
|
py
| 12,138
|
python
|
en
|
code
| 76
|
github-code
|
6
|
34200144967
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import matplotlib.lines as mlines
df= pd.read_csv('/home/nishchay/Documents/Arcon/Day7/winequality-red.csv')
X1=df.iloc[:,11].values
Y1=df.iloc[:,0].values
Y2=df.iloc[:,1].values
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('jet', 20)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
plt.title('Wine Quality Correlation')
labels=['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar','chlorides', 'free sulfur dioxide','asdf']
ax1.set_xticklabels(labels,fontsize=6)
ax1.set_yticklabels(labels,fontsize=6)
fig.colorbar(cax, ticks=[- .6,- .5,- .4,- .3,- .2,- .1,0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])
plt.show()
#################################################################################
col_labels = df.columns[1:]
corMat2 = df.corr().values[::-1]
fig, axes = plt.subplots(nrows=1,ncols=1)
ax0 = axes
ax0.set_xticks(np.linspace(.5,12.5,11))
ax0.set_xticklabels(col_labels,rotation=45)
ax0.set_yticks(np.linspace(.5,12.5,11))
ax0.set_yticklabels(col_labels[::-1],rotation=45)
#ax0.set_yticklabels(col_labels,rotation=45)
#visualize correlations using heatmap
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
cmap = cm.get_cmap('jet', 20)
fig.colorbar(cax, ticks=[- .6,- .5,- .4,- .3,- .2,- .1,0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])
plt.pcolor(corMat2,cmap='jet')
plt.show()
############################################################################
plt.plot(Y1,X1,'r--',Y2,X1,'bs')
plt.xlabel('Wine Quality')
plt.ylabel('fixed acidity')
red_line = mlines.Line2D(Y1,X1,color='red',marker='_',markersize=10,label='Fixed Acidity')
blue_line=mlines.Line2D(Y2,X1,color='blue',marker='|',markersize=10,label='Volatile Acidity')
plt.legend(handles=[red_line,blue_line])
plt.show()
labels = 'Python', 'C++', 'Ruby', 'Java'
sizes = [215, 130, 245, 210]
colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue']
explode = (0.1, 0, 0, 0) # explode 1st slice
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.show()
|
nagrawal63/Neural-Networks
|
Day7/plot.py
|
plot.py
|
py
| 2,190
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11353423523
|
'''
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/584/week-1-february-1st-february-7th/3630/
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if not root:
return None
result = []
self.dfs(root, result, 0)
return result
def dfs(self, node, trace, depth):
if depth == len(trace):
trace.append(node.val)
if node.right:
self.dfs(node.right, trace, depth + 1)
if node.left:
self.dfs(node.left, trace, depth + 1)
|
jihoonyou/problem-solving
|
leetcode/binary-tree-right-side-view.py
|
binary-tree-right-side-view.py
|
py
| 799
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29929387988
|
#!/bin/python
import xml.etree.ElementTree as ET
import sys
tree = ET.parse(sys.argv[1])
root = tree.getroot()
'''
print root.find('deckname').text
main = root.find('./zone')
for c in main.findall(path='card'):
print c.get('number')+c.get('name')
'''
for c in root[2]:
print(c.get('number') + ' ' + c.get('name'))
for c in root[3]:
print('SB: '+c.get('number') + ' ' + c.get('name'))
|
nikisix/dex
|
xml_parser.py
|
xml_parser.py
|
py
| 398
|
python
|
en
|
code
| 0
|
github-code
|
6
|
57215059
|
import pytest
import numpy as np
import os
from netZooPy import dragon
def test_dragon():
#1. test1
print('Start Dragon run ...')
n = 1000
p1 = 500
p2 = 100
X1, X2, Theta, _ = dragon.simulate_dragon_data(eta11=0.005, eta12=0.005, eta22=0.05,
p1=100, p2=500, epsilon=[0.1,0.1],
n=n, seed=123)
os.system('curl -O https://netzoo.s3.us-east-2.amazonaws.com/netZooPy/unittest_datasets/dragonx1.npy')
os.system('curl -O https://netzoo.s3.us-east-2.amazonaws.com/netZooPy/unittest_datasets/dragonx2.npy')
X1=np.load('dragonx1.npy')
X2=np.load('dragonx2.npy')
lambdas, lambdas_landscape = dragon.estimate_penalty_parameters_dragon(X1, X2)
lambdasSingle=tuple([int(10*x)/10 for x in lambdas]) # 3 digit precision
alamb=lambdas_landscape[1,1]
assert(lambdasSingle == (0.9, 0.9))
assert((alamb < 398.7*1.002) & (alamb > 398.7*0.998)) #0.2% of error
assert(int(X1[1,1]*1000)/1000 ==0.880)
assert(int(X2[1,1]*1000)/1000 ==0.664)
#2. test2
r = dragon.get_partial_correlation_dragon(X1, X2, lambdas)
adj_p_vals, p_vals = dragon.estimate_p_values_dragon(r, n, p1, p2, lambdas)
p_valstest=int(p_vals[2,1]*100)/100
adj_p_valstest=int(adj_p_vals[2,1]*10)/10 # 3 digit precision
assert(int(np.max(r)*100)/100 == 0.03)
assert(int(r[1,2]*100000)/100000 == 0.00012)
assert(p_valstest == 0.96)
assert(adj_p_valstest == 0.9)
#3. test monte carlo p-values
p1 = 3
p2 = 4
n = 10
lam = [0,0] # no shrinkage
test11 = np.array([[1,1/2.,-1/4.],
[1/2.,1,1/8.],
[1/4.,1/8.,1]])
test12 = np.array([[-3/4.,1/2.,1/4.,0],
[-3/4.,1/2.,1/4.,0],
[-3/4.,1/2.,1/4.,0]])
test21 = np.transpose(test12)
test22 = np.array([[1,-3/4.,1/2.,-1/4.],
[-3/4.,1,1/8.,1/16.],
[1/2.,1/8.,1,1/32.],
[-1/4.,1/16.,1/32.,1]])
test_mc_mat = np.identity(p1+p2)
test_mc_mat[0:3,0:3] = test11
test_mc_mat[0:3,3:7] = test12
test_mc_mat[3:7,0:3] = test21
test_mc_mat[3:7,3:7] = test22
dragon_p_mc = dragon.dragon.estimate_p_values_mc(test_mc_mat,n,p1,p2,lam,seed=412)
ground_truth_mc_p = np.array([[0,0,1/3.,0,1/12.,7/12.,1],
[0,0,2/3.,0,1/12.,7/12.,1],
[1/3.,2/3.,0,0,1/12,7/12.,1],
[0,0,0,0,0,0,5/6.],
[1/12.,1/12.,1/12.,0,0,5/6.,5/6.],
[7/12.,7/12.,7/12.,0,5/6.,0,5/6.],
[1,1,1,5/6.,5/6.,5/6.,0]])
assert(np.array_equal(dragon_p_mc,ground_truth_mc_p))
return()
def test_remove_zero_variance_preds():
layer1 = np.array([[1,2,3],
[1,5,6],
[1,4,9],
[1,10,11]])
layer2 = np.array([[1,2,3],
[2,5,6],
[3,4,9],
[4,10,11]])
layer1_manual_complete = np.array([[2,3],
[5,6],
[4,9],
[10,11]])
layer1_complete = dragon.dragon.remove_zero_variance_preds(layer1)
layer2_complete = dragon.dragon.remove_zero_variance_preds(layer2)
assert(np.array_equal(layer1_complete, layer1_manual_complete))
assert(np.array_equal(layer2_complete, layer2))
return()
def test_zero_variance_exception_estimate_penalty_parameters_dragon():
layer1 = np.array([[1,2,3],
[1,5,6],
[1,4,9],
[1,10,11]])
layer2 = np.array([[1,2,3],
[2,5,6],
[3,4,9],
[4,10,11]])
with pytest.raises(Exception) as exc:
dragon.dragon.estimate_penalty_parameters_dragon(X1 = layer1, X2 = layer2)
assert(str(exc.value) == "[netZooPy.dragon.dragon.estimate_penalty_parameters_dragon] Found variables with zero variance. These must be removed before use of DRAGON. Consider use of `dragon.dragon.remove_zero_variance_preds`.")
return()
def test_zero_variance_exception_get_shrunken_covariance_dragon():
layer1 = np.array([[1,2,3],
[1,5,6],
[1,4,9],
[1,10,11]])
layer2 = np.array([[1,2,3],
[2,5,6],
[3,4,9],
[4,10,11]])
with pytest.raises(Exception) as exc:
dragon.dragon.get_shrunken_covariance_dragon(X1 = layer1, X2 = layer2, lambdas = [0.5,0.5])
assert(str(exc.value) == "[netZooPy.dragon.dragon.get_shrunken_covariance_dragon] Found variables with zero variance. These must be removed before use of DRAGON. Consider use of `dragon.dragon.remove_zero_variance_preds`.")
return()
def test_singularity_exception():
layer1 = np.array([[1,2,3],
[2,5,6]])
layer2 = np.array([[1,2,3],
[2,5,6]])
with pytest.raises(Exception) as exc:
dragon.dragon.get_shrunken_covariance_dragon(X1 = layer1, X2 = layer2, lambdas=[0,0]) # no shrinkage
assert(str(exc.value) == "[dragon.dragon.get_shrunken_covariance_dragon] Sigma is not invertible for the input values of lambda. Make sure that you are using `estimate_penalty_parameters_dragon` to select lambda. You may have variables with very small variance or highly collinear variables in your data. Consider removing such variables.")
return()
|
netZoo/netZooPy
|
tests/test_dragon.py
|
test_dragon.py
|
py
| 5,525
|
python
|
en
|
code
| 71
|
github-code
|
6
|
20338445920
|
import numpy as np
def Poisson1D( v, L ):
# Solve 1-d Poisson equation:
# d^u / dx^2 = v for 0 <= x <= L
# using spectral method
J = len(v)
# Fourier transform source term
v_tilde = np.fft.fft(v)
# vector of wave numbers
k = (2*np.pi/L)*np.concatenate((np.linspace(0,J/2-1,J/2),np.linspace(-J/2,-1,J/2)))
k[0] = 1
# Calculate Fourier transform of u
u_tilde = np.divide(-v_tilde,np.power(k,2))
# Inverse Fourier transform to obtain u
u = np.real(np.fft.ifft(u_tilde))
# Specify arbitrary constant by forcing corner u = 0;
u = u - u[0]
return u
|
snytav/Kaa
|
Poisson1D.py
|
Poisson1D.py
|
py
| 615
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8385121611
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import optparse
import collections
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
import sumolib # noqa
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
def get_options(args=None):
optParser = optparse.OptionParser()
optParser.add_option("-n", "--net-file", dest="netfile",
help="define the net file (mandatory)")
optParser.add_option("-o", "--output-file", dest="outfile",
default="tlsAdaptation.add.xml", help="define the output filename")
optParser.add_option("-r", "--route-files", dest="routefiles",
help="define the route file seperated by comma(mandatory)")
optParser.add_option("-b", "--begin", dest="begin", type="int",
default=0, help="begin time of the optimization period with unit second")
optParser.add_option("-y", "--yellow-time", dest="yellowtime", type="int",
default=4, help="yellow time")
optParser.add_option("-a", "--all-red", dest="allred", type="int",
default=0, help="all-red time")
optParser.add_option("-l", "--lost-time", dest="losttime", type="int",
default=4, help="lost time for start-up and clearance in each phase")
optParser.add_option("-g", "--min-green", dest="mingreen", type="int",
default=4, help=" minimal green time when there is no traffic volume")
optParser.add_option("--green-filter-time", dest="greenFilter", type="int", default=0,
help="when computing critical flows, do not count phases with a green time below INT")
optParser.add_option("-c", "--min-cycle", dest="mincycle", type="int",
default=20, help="minimal cycle length")
optParser.add_option("-C", "--max-cycle", dest="maxcycle", type="int",
default=120, help="maximal cycle length")
optParser.add_option("-e", "--existing-cycle", dest="existcycle", action="store_true",
default=False, help="use the existing cycle length")
optParser.add_option("--write-critical-flows", dest="write_critical_flows", action="store_true",
default=False, help="print critical flows for each tls and phase")
optParser.add_option("-p", "--program", dest="program", default="a",
help="save new definitions with this program id")
optParser.add_option("-H", "--saturation-headway", dest="satheadway", type="float", default=2,
help="saturation headway in seconds for calculating hourly saturation flows")
optParser.add_option("-R", "--restrict-cyclelength", dest="restrict", action="store_true",
default=False, help="restrict the max. cycle length as the given one")
optParser.add_option("-u", "--unified-cycle", dest="unicycle", action="store_true", default=False,
help=" use the calculated max cycle length as the cycle length for all intersections")
optParser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="tell me what you are doing")
(options, args) = optParser.parse_args(args=args)
if not options.netfile or not options.routefiles:
optParser.print_help()
sys.exit()
return options
def getFlows(net, routeFiles, tlsList, begin, verbose):
tlsFlowsMap = {}
end = begin + 3600
for tls in tlsList:
tlsFlowsMap[tls._id] = collections.defaultdict(lambda: collections.defaultdict(int))
for file in routeFiles.split(','):
if verbose:
print("route file:%s" % file)
for veh in sumolib.output.parse(file, 'vehicle'):
if float(veh.depart) >= end:
break
if float(veh.depart) >= begin:
edgeList = veh.route[0].edges.split()
for tls in tlsList:
# c: [[inLane, outLane, linkNo],[],..]
for c in tls.getConnections():
inEdge = c[0].getEdge().getID()
outEdge = c[1].getEdge().getID()
if inEdge in edgeList:
beginIndex = edgeList.index(inEdge)
if beginIndex < len(edgeList) - 1 and edgeList[beginIndex + 1] == outEdge:
pce = 1.
if veh.type == "bicycle":
pce = 0.2
elif veh.type in ["moped", "motorcycle"]:
pce = 0.5
elif veh.type in ["truck", "trailer", "bus", "coach"]:
pce = 3.5
tlsFlowsMap[tls._id][inEdge + " " + outEdge][c[2]] += pce
# remove the doubled counts
connFlowsMap = {}
for t in tlsList:
connFlowsMap[t.getID()] = {}
for subRoute in tlsFlowsMap[t.getID()]:
totalConns = len(tlsFlowsMap[t.getID()][subRoute])
for conn in tlsFlowsMap[t.getID()][subRoute]:
tlsFlowsMap[t.getID()][subRoute][conn] /= totalConns
connFlowsMap[t.getID()][conn] = tlsFlowsMap[t.getID()][subRoute][conn]
# remove the redundant connection flows
connFlowsMap = removeRedundantFlows(t, connFlowsMap)
return connFlowsMap
def getEffectiveTlsList(tlsList, connFlowsMap, verbose):
effectiveTlsList = []
for tl in tlsList:
if len(tl.getPrograms()) == 0:
continue
valid = True
for program in tl.getPrograms().values():
for phase in program.getPhases():
if len(phase.state) > len(tl.getConnections()):
print("Skipping TLS '%s' due to unused states (%s states, %s connections)" % (
tl.getID(), len(phase.state), len(tl.getConnections())))
valid = False
break
if valid:
for conn in connFlowsMap[tl.getID()]:
if connFlowsMap[tl.getID()][conn] > 0:
effectiveTlsList.append(tl)
break
return effectiveTlsList
def removeRedundantFlows(t, connFlowsMap):
# if two or more intersections share the lane-lane connection indices together,
# the redundant connection flows will set to zero.
connsList = t.getConnections()
connsList = sorted(connsList, key=lambda connsList: connsList[2])
redundantConnsList = []
identical = True
for c1 in connsList:
for c2 in connsList:
if c1[2] != c2[2]:
if c1[1]._edge == c2[0]._edge:
identical = identityCheck(c1[0]._edge, c2[0]._edge._incoming, identical)
if identical:
for toEdge in c2[0]._edge._outgoing:
for c in c2[0]._edge._outgoing[toEdge]:
if c._tlLink not in redundantConnsList:
redundantConnsList.append(c._tlLink)
else:
for conn_1 in c1[0]._edge._outgoing[c2[0]._edge]:
if conn_1._direction == 's':
for toEdge in c2[0]._edge._outgoing:
for conn_2 in c2[0]._edge._outgoing[toEdge]:
if conn_2._tlLink not in redundantConnsList:
redundantConnsList.append(conn_2._tlLink)
for conn in redundantConnsList:
if conn in connFlowsMap[t._id]:
connFlowsMap[t._id][conn] = 0.
return connFlowsMap
def identityCheck(e1, incomingLinks, identical):
for i in incomingLinks:
if i != e1:
identical = False
break
return identical
def getLaneGroupFlows(tl, connFlowsMap, phases, greenFilter):
connsList = tl.getConnections()
groupFlowsMap = {} # i(phase): duration, laneGroup1, laneGroup2, ...
connsList = sorted(connsList, key=lambda connsList: connsList[2])
# check if there are shared lane groups, i.e. some lane groups have only "g" (no "G")
ownGreenConnsList = []
for i, p in enumerate(phases):
for j, control in enumerate(p.state):
if control == "G" and j not in ownGreenConnsList:
ownGreenConnsList.append(j)
yellowRedTime = 0
greenTime = 0
currentLength = 0
phaseLaneIndexMap = collections.defaultdict(list)
for i, p in enumerate(phases):
currentLength += p.duration
if 'G' in p.state and 'y' not in p.state and p.duration >= greenFilter:
greenTime += p.duration
groupFlowsMap[i] = [p.duration]
groupFlows = 0
laneIndexList = []
for j, control in enumerate(p.state):
inEdge = connsList[j][0]._edge._id
if j == 0:
exEdge = inEdge
if (inEdge == exEdge and control == 'G') or (inEdge == exEdge and
control == 'g' and j not in ownGreenConnsList):
if j in connFlowsMap[tl._id]:
groupFlows += connFlowsMap[tl._id][j]
if connsList[j][0].getIndex() not in laneIndexList:
laneIndexList.append(connsList[j][0].getIndex())
if exEdge != inEdge or j == len(p.state) - 1:
if laneIndexList:
phaseLaneIndexMap[i].append(laneIndexList)
groupFlowsMap[i].append(groupFlows)
laneIndexList = []
groupFlows = 0
if control == "G":
if j in connFlowsMap[tl._id]:
groupFlows = connFlowsMap[tl._id][j]
if connsList[j][0].getIndex() not in laneIndexList:
laneIndexList.append(connsList[j][0].getIndex())
exEdge = inEdge
elif 'G' not in p.state and 'g' in p.state and 'y' not in p.state and 'r' not in p.state:
print("Check: only g for all connections:%s in phase %s" % (tl._id, i))
elif ('G' not in p.state and 'g' not in p.state) or ('G' not in p.state and 'y' in p.state and 'r' in p.state):
yellowRedTime += int(p.duration)
if options.verbose and i in groupFlowsMap:
print("phase: %s" % i)
print("group flows: %s" % groupFlowsMap[i])
print("The used lanes: %s" % phaseLaneIndexMap[i])
if options.verbose:
print("the current cycle length:%s sec" % currentLength)
return groupFlowsMap, phaseLaneIndexMap, currentLength
def getMaxOptimizedCycle(groupFlowsMap, phaseLaneIndexMap, currentLength, cycleList, options):
lostTime = len(groupFlowsMap) * options.losttime + options.allred
satFlows = 3600. / options.satheadway
# calculate the critical flow ratios and the respective sum
criticalFlowRateMap = {}
for i in groupFlowsMap: # [duration. groupFlow1, groupFlow2...]
criticalFlowRateMap[i] = 0.
maxFlow = 0
index = None
if len(groupFlowsMap[i][1:]) > 0:
for j, f in enumerate(groupFlowsMap[i][1:]):
if f >= maxFlow:
maxFlow = f
index = j
criticalFlowRateMap[i] = (maxFlow / float((len(phaseLaneIndexMap[i][index])))) / satFlows
else:
criticalFlowRateMap[i] = 0.
sumCriticalFlows = sum(criticalFlowRateMap.values())
if options.existcycle:
optCycle = currentLength
elif sumCriticalFlows >= 1.:
optCycle = options.maxcycle
if options.verbose:
print("Warning: the sum of the critical flows >= 1:%s" % sumCriticalFlows)
else:
optCycle = int(round((1.5 * lostTime + 5.) / (1. - sumCriticalFlows)))
if not options.existcycle and optCycle < options.mincycle:
optCycle = options.mincycle
elif not options.existcycle and optCycle > options.maxcycle:
optCycle = options.maxcycle
cycleList.append(optCycle)
return cycleList
def optimizeGreenTime(tl, groupFlowsMap, phaseLaneIndexMap, currentLength, options):
lostTime = len(groupFlowsMap) * options.losttime + options.allred
satFlows = 3600. / options.satheadway
# calculate the critical flow ratios and the respective sum
criticalFlowRateMap = {}
for i in groupFlowsMap: # [duration. groupFlow1, groupFlow2...]
criticalFlowRateMap[i] = 0.
maxFlow = 0
index = None
if len(groupFlowsMap[i][1:]) > 0:
for j, f in enumerate(groupFlowsMap[i][1:]):
if f >= maxFlow:
maxFlow = f
index = j
criticalFlowRateMap[i] = (maxFlow / float((len(phaseLaneIndexMap[i][index])))) / satFlows
else:
criticalFlowRateMap[i] = 0.
sumCriticalFlows = sum(criticalFlowRateMap.values())
if options.write_critical_flows:
print(tl.getID(), criticalFlowRateMap)
if options.existcycle:
optCycle = currentLength
elif sumCriticalFlows >= 1.:
optCycle = options.maxcycle
if options.verbose:
print("Warning: the sum of the critical flows >= 1:%s" % sumCriticalFlows)
else:
optCycle = int(round((1.5 * lostTime + 5.) / (1. - sumCriticalFlows)))
if not options.existcycle and optCycle < options.mincycle:
optCycle = options.mincycle
elif not options.existcycle and optCycle > options.maxcycle:
optCycle = options.maxcycle
# calculate the green time for each critical group
effGreenTime = optCycle - lostTime
totalLength = lostTime
minGreenPhasesList = []
adjustGreenTimes = 0
totalGreenTimes = 0
subtotalGreenTimes = 0
for i in criticalFlowRateMap:
groupFlowsMap[i][0] = effGreenTime * \
(criticalFlowRateMap[i] / sum(criticalFlowRateMap.values())) - options.yellowtime + options.losttime
groupFlowsMap[i][0] = int(round(groupFlowsMap[i][0]))
totalGreenTimes += groupFlowsMap[i][0]
if groupFlowsMap[i][0] < options.mingreen:
groupFlowsMap[i][0] = options.mingreen
minGreenPhasesList.append(i)
else:
subtotalGreenTimes += groupFlowsMap[i][0]
totalLength += groupFlowsMap[i][0]
# adjust the green times if minimal green times are applied for keeping the defined maximal cycle length.
if minGreenPhasesList and totalLength > options.maxcycle and options.restrict:
if options.verbose:
print("Re-allocate the green splits!")
adjustGreenTimes = totalGreenTimes - len(minGreenPhasesList) * options.mingreen
for i in groupFlowsMap:
if i not in minGreenPhasesList:
groupFlowsMap[i][0] = int((groupFlowsMap[i][0] / float(subtotalGreenTimes)) * adjustGreenTimes)
if options.verbose:
totalLength = lostTime
for i in groupFlowsMap:
totalLength += groupFlowsMap[i][0]
print("Green time for phase %s: %s" % (i, groupFlowsMap[i][0]))
print("the optimal cycle length:%s" % totalLength)
return groupFlowsMap
def main(options):
net = sumolib.net.readNet(options.netfile, withPrograms=True, withPedestrianConnections=True)
tlsList = net.getTrafficLights()
if options.verbose:
print("the total number of tls: %s" % len(tlsList))
print("Begin time:%s" % options.begin)
# get traffic flows for each connection at each TL
connFlowsMap = getFlows(net, options.routefiles, tlsList, options.begin, options.verbose)
# remove the tls where no traffic volumes exist
effectiveTlsList = getEffectiveTlsList(tlsList, connFlowsMap, options.verbose)
with open(options.outfile, 'w') as outf:
outf.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outf.write('<additional>\n')
if len(effectiveTlsList) > 0:
if options.unicycle:
cycleList = []
if options.verbose:
print("Firstly only calculate the maximal optimized cycle length! ")
for tl in effectiveTlsList:
if options.verbose:
print("tl-logic ID: %s" % tl._id)
programs = tl.getPrograms()
for pro in programs:
phases = programs[pro].getPhases()
# get the connection flows and group flows
groupFlowsMap, phaseLaneIndexMap, currentLength = getLaneGroupFlows(tl, connFlowsMap, phases, 0)
# only optimize the cycle length
cycleList = getMaxOptimizedCycle(groupFlowsMap, phaseLaneIndexMap,
currentLength, cycleList, options)
options.maxcycle = max(cycleList)
options.mincycle = max(cycleList)
options.restrict = True
if options.verbose:
print("The maximal optimized cycle length is %s." % max(cycleList))
print(" It will be used for calculating the green splits for all intersections.")
# calculate the green splits; the optimal length will be also calculate if options.unicycle is set as false.
for tl in effectiveTlsList:
if options.verbose:
print("tl-logic ID: %s" % tl._id)
programs = tl.getPrograms()
for pro in programs:
phases = programs[pro].getPhases()
# get the connection flows and group flows
groupFlowsMap, phaseLaneIndexMap, currentLength = getLaneGroupFlows(
tl, connFlowsMap, phases, options.greenFilter)
# optimize the cycle length and calculate the respective green splits
groupFlowsMap = optimizeGreenTime(tl, groupFlowsMap, phaseLaneIndexMap, currentLength, options)
# write output
outf.write(' <tlLogic id="%s" type="%s" programID="%s" offset="%i">\n' %
(tl._id, programs[pro]._type, options.program, programs[pro]._offset))
phases = programs[pro].getPhases()
for i, p in enumerate(phases):
duration = p.duration
if i in groupFlowsMap:
duration = groupFlowsMap[i][0]
outf.write(' <phase duration="%s" state="%s"/>\n' % (duration, p.state))
outf.write(' </tlLogic>\n')
else:
print("There are no flows at the given intersections. No green time optimization is done.")
outf.write('</additional>\n')
if __name__ == "__main__":
options = get_options(sys.argv)
main(options)
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/tlsCycleAdaptation.py
|
tlsCycleAdaptation.py
|
py
| 19,264
|
python
|
en
|
code
| 17
|
github-code
|
6
|
20793244215
|
import numpy as np
from jax import numpy as jnp
from flax import struct
from flax.traverse_util import flatten_dict, unflatten_dict
from flax.core import Scope, lift, freeze, unfreeze
from commplax import comm, xcomm, xop, adaptive_filter as af
from commplax.util import wrapped_partial as wpartial
from typing import Any, NamedTuple, Iterable, Callable, Optional
Array = Any
# related: https://github.com/google/jax/issues/6853
@struct.dataclass
class SigTime:
start: int = struct.field(pytree_node=False)
stop: int = struct.field(pytree_node=False)
sps: int = struct.field(pytree_node=False)
class Signal(NamedTuple):
val: Array
t: Any = SigTime(0, 0, 2)
def taxis(self):
return self.t[0].shape[0], -self.t[0].shape[1]
def __mul__(self, other):
Signal._check_type(other)
return Signal(self.val * other, self.t)
def __add__(self, other):
Signal._check_type(other)
return Signal(self.val + other, self.t)
def __sub__(self, other):
Signal._check_type(other)
return Signal(self.val - other, self.t)
def __truediv__(self, other):
Signal._check_type(other)
return Signal(self.val / other, self.t)
def __floordiv__(self, other):
Signal._check_type(other)
return Signal(self.val // other, self.t)
def __imul__(self, other):
return self * other
def __iadd__(self, other):
return self + other
def __isub__(self, other):
return self - other
def __itruediv__(self, other):
return self / other
def __ifloordiv__(self, other):
return self // other
@classmethod
def _check_type(cls, other):
assert not isinstance(other, cls), 'not implemented'
def zeros(key, shape, dtype=jnp.float32): return jnp.zeros(shape, dtype)
def ones(key, shape, dtype=jnp.float32): return jnp.ones(shape, dtype)
def delta(key, shape, dtype=jnp.float32):
k1d = comm.delta(shape[0], dtype=dtype)
return jnp.tile(np.expand_dims(k1d, axis=list(range(1, len(shape)))), (1,) + shape[1:])
def gauss(key, shape, dtype=jnp.float32):
taps = shape[0]
k1d = comm.gauss(comm.gauss_minbw(taps), taps=taps, dtype=dtype)
return jnp.tile(np.expand_dims(k1d, axis=list(range(1, len(shape)))), (1,) + shape[1:])
def dict_replace(col, target, leaf_only=True):
col_flat = flatten_dict(unfreeze(col))
diff = {}
for keys_flat in col_flat.keys():
for tar_key, tar_val in target.items():
if (keys_flat[-1] == tar_key if leaf_only else (tar_key in keys_flat)):
diff[keys_flat] = tar_val
col_flat.update(diff)
col = unflatten_dict(col_flat)
return col
def update_varcolbykey(var, col_name, target, leaf_only=True):
wocol, col = var.pop(col_name)
col = dict_replace(col, target, leaf_only=leaf_only)
del var
return freeze({**wocol, col_name: col})
def update_aux(var, tar):
return update_varcolbykey(var, 'aux_inputs', tar, leaf_only=True)
def conv1d_t(t, taps, rtap, stride, mode):
assert t.sps >= stride, f'sps of input SigTime must be >= stride: {stride}, got {t.sps} instead'
if rtap is None:
rtap = (taps - 1) // 2
delay = -(-(rtap + 1) // stride) - 1
if mode == 'full':
tslice = (-delay * stride, taps - stride * (rtap + 1)) #TODO: think more about this
elif mode == 'same':
tslice = (0, 0)
elif mode == 'valid':
tslice = (delay * stride, (delay + 1) * stride - taps)
else:
raise ValueError('invalid mode {}'.format(mode))
return SigTime((t.start + tslice[0]) // stride, (t.stop + tslice[1]) // stride, t.sps // stride)
def conv1d_slicer(taps, rtap=None, stride=1, mode='valid'):
def slicer(signal):
x, xt = signal
yt = conv1d_t(xt, taps, rtap, stride, mode)
D = xt.sps // yt.sps
zt = SigTime(yt.start * D, yt.stop * D, xt.sps)
x = x[zt.start - xt.start: x.shape[0] + zt.stop - xt.stop]
return Signal(x, zt)
return slicer
def fullsigval(inputs: Signal, fill_value=1):
x, t = inputs
full_shape = (x.shape[0] + t.start - t.stop,) + x.shape[1:]
return jnp.full(full_shape, fill_value, dtype=x.dtype)
def vmap(f,
variable_axes={
'params': -1,
'const': None
},
split_rngs={
'params': True,
},
in_axes=(Signal(-1, None),), out_axes=Signal(-1, None)):
# in_axes needs to be wrapped by a tuple, see Flax's lifted vmap implemetation:
# https://github.com/google/flax/blob/82e9798274c927286878c4600b4b09650d1e7935/flax/core/lift.py#L395
vf = lift.vmap(f,
variable_axes=variable_axes, split_rngs=split_rngs,
in_axes=in_axes, out_axes=out_axes)
vf.__name__ = 'vmapped_' + f.__name__ # [Workaround]: lifted transformation does not keep the original name
return vf
def scan(f, in_axes=0, out_axes=0):
sf = lift.scan(f, in_axes=in_axes, out_axes=out_axes)
sf.__name__ = 'scanned' + f.__name__
return sf
def simplefn(scope, signal, fn=None, aux_inputs=None):
assert fn is not None, 'simple function cannot be None'
aux = ()
if aux_inputs is not None:
aux_name, aux_init = aux_inputs
aux += scope.variable('aux_inputs', aux_name, aux_init, signal).value,
return fn(signal, *aux)
def batchpowernorm(scope, signal, momentum=0.999, mode='train'):
running_mean = scope.variable('norm', 'running_mean',
lambda *_: 0. + jnp.ones(signal.val.shape[-1]), ())
if mode == 'train':
mean = jnp.mean(jnp.abs(signal.val)**2, axis=0)
running_mean.value = momentum * running_mean.value + (1 - momentum) * mean
else:
mean = running_mean.value
return signal / jnp.sqrt(mean)
def conv1d(
scope: Scope,
signal,
taps=31,
rtap=None,
mode='valid',
kernel_init=delta,
conv_fn = xop.convolve):
x, t = signal
t = scope.variable('const', 't', conv1d_t, t, taps, rtap, 1, mode).value
h = scope.param('kernel',
kernel_init,
(taps,), np.complex64)
x = conv_fn(x, h, mode=mode)
return Signal(x, t)
def mimoconv1d(
scope: Scope,
signal,
taps=31,
rtap=None,
dims=2,
mode='valid',
kernel_init=zeros,
conv_fn=xop.convolve):
x, t = signal
t = scope.variable('const', 't', conv1d_t, t, taps, rtap, 1, mode).value
h = scope.param('kernel', kernel_init, (taps, dims, dims), np.float32)
y = xcomm.mimoconv(x, h, mode=mode, conv=conv_fn)
return Signal(y, t)
def mimofoeaf(scope: Scope,
signal,
framesize=100,
w0=0,
train=False,
preslicer=lambda x: x,
foekwargs={},
mimofn=af.rde,
mimokwargs={},
mimoinitargs={}):
sps = 2
dims = 2
tx = signal.t
# MIMO
slisig = preslicer(signal)
auxsig = scope.child(mimoaf,
mimofn=mimofn,
train=train,
mimokwargs=mimokwargs,
mimoinitargs=mimoinitargs,
name='MIMO4FOE')(slisig)
y, ty = auxsig # assume y is continuous in time
yf = xop.frame(y, framesize, framesize)
foe_init, foe_update, _ = af.array(af.frame_cpr_kf, dims)(**foekwargs)
state = scope.variable('af_state', 'framefoeaf',
lambda *_: (0., 0, foe_init(w0)), ())
phi, af_step, af_stats = state.value
af_step, (af_stats, (wf, _)) = af.iterate(foe_update, af_step, af_stats, yf)
wp = wf.reshape((-1, dims)).mean(axis=-1)
w = jnp.interp(jnp.arange(y.shape[0] * sps) / sps,
jnp.arange(wp.shape[0]) * framesize + (framesize - 1) / 2, wp) / sps
psi = phi + jnp.cumsum(w)
state.value = (psi[-1], af_step, af_stats)
# apply FOE to original input signal via linear extrapolation
psi_ext = jnp.concatenate([w[0] * jnp.arange(tx.start - ty.start * sps, 0) + phi,
psi,
w[-1] * jnp.arange(tx.stop - ty.stop * sps) + psi[-1]])
signal = signal * jnp.exp(-1j * psi_ext)[:, None]
return signal
def mimoaf(
scope: Scope,
signal,
taps=32,
rtap=None,
dims=2,
sps=2,
train=False,
mimofn=af.ddlms,
mimokwargs={},
mimoinitargs={}):
x, t = signal
t = scope.variable('const', 't', conv1d_t, t, taps, rtap, 2, 'valid').value
x = xop.frame(x, taps, sps)
mimo_init, mimo_update, mimo_apply = mimofn(train=train, **mimokwargs)
state = scope.variable('af_state', 'mimoaf',
lambda *_: (0, mimo_init(dims=dims, taps=taps, **mimoinitargs)), ())
truth_var = scope.variable('aux_inputs', 'truth',
lambda *_: None, ())
truth = truth_var.value
if truth is not None:
truth = truth[t.start: truth.shape[0] + t.stop]
af_step, af_stats = state.value
af_step, (af_stats, (af_weights, _)) = af.iterate(mimo_update, af_step, af_stats, x, truth)
y = mimo_apply(af_weights, x)
state.value = (af_step, af_stats)
return Signal(y, t)
def fdbp(
scope: Scope,
signal,
steps=3,
dtaps=261,
ntaps=41,
sps=2,
d_init=delta,
n_init=gauss):
x, t = signal
dconv = vmap(wpartial(conv1d, taps=dtaps, kernel_init=d_init))
for i in range(steps):
x, td = scope.child(dconv, name='DConv_%d' % i)(Signal(x, t))
c, t = scope.child(mimoconv1d, name='NConv_%d' % i)(Signal(jnp.abs(x)**2, td),
taps=ntaps,
kernel_init=n_init)
x = jnp.exp(1j * c) * x[t.start - td.start: t.stop - td.stop + x.shape[0]]
return Signal(x, t)
def identity(scope, inputs):
return inputs
def fanout(scope, inputs, num):
return (inputs,) * num
# compositors
def serial(*fs):
def _serial(scope, inputs, **kwargs):
for f in fs:
if isinstance(f, tuple) or isinstance(f, list):
name, f = f
else:
name = None
inputs = scope.child(f, name=name)(inputs, **kwargs)
return inputs
return _serial
def parallel(*fs):
def _parallel(scope, inputs, **kwargs):
outputs = []
for f, inp in zip(fs, inputs):
if isinstance(f, tuple) or isinstance(f, list):
name, f = f
else:
name = None
out = scope.child(f, name=name)(inp, **kwargs)
outputs.append(out)
return outputs
return _parallel
|
remifan/commplax
|
commplax/module/core.py
|
core.py
|
py
| 10,764
|
python
|
en
|
code
| 49
|
github-code
|
6
|
25786118251
|
import select
import socket
EOL1 = b'\n\n'
EOL2 = b'\r\n'
response = b'HTTP/1.0 200 OK\r\nDate: Mon, 1 Jan 1996 01:01:01 GMT\r\n'
response += b'Content-Type: text/plain\r\nContent-Length: 13\r\n\r\n'
response += b'Hello, world!'
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', 8080))
serversocket.listen(1)
serversocket.setblocking(0)
epoll = select.epoll()
|
gaufung/CodeBase
|
Python-Standard-Library/Network/epoll/example3.py
|
example3.py
|
py
| 475
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38736630905
|
import datetime
import logging
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from backend.data.measurements import MeasurementArray, Measurements
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def xyz2blh(x, y, z):
"""_summary_
Angle returned will be in radians
"""
A = 6378137.0
B = 6356752.314245
e = np.sqrt(1 - (B ** 2) / (A ** 2))
# calculate longitude, in radians
longitude = np.arctan2(y, x)
# calculate latitude, in radians
xy_hypot = np.hypot(x, y)
lat0 = np.zeros_like(x)
latitude = np.arctan(z / xy_hypot)
while np.any(np.abs(latitude - lat0) > 1e-9):
lat0 = latitude
N = A / np.sqrt(1 - e ** 2 * np.sin(lat0) ** 2)
latitude = np.arctan((z + e ** 2 * N * np.sin(lat0)) / xy_hypot)
# calculate height, in meters
N = A / np.sqrt(1 - e ** 2 * np.sin(latitude) ** 2)
small_angle_indices = np.abs(latitude) < np.pi / 4
R, phi = np.hypot(xy_hypot[small_angle_indices], z[small_angle_indices]), np.arctan(z[small_angle_indices] / xy_hypot[small_angle_indices])
height = np.zeros_like(x)
height[small_angle_indices] = R * np.cos(phi) / np.cos(latitude[small_angle_indices]) - N[small_angle_indices]
height[~small_angle_indices] = z[~small_angle_indices] / np.sin(latitude[~small_angle_indices]) - N[~small_angle_indices] * (1 - e ** 2)
return latitude, longitude, height
class Position:
"""
Position class to handle position analysis
"""
def __init__(
self,
data: MeasurementArray = None,
base: MeasurementArray = None,
sitelist: list = None,
) -> None:
self.data = data
self.base = base
self.sitelist = sitelist
if self.base is not None:
self.data = self.data - self.base
def __iter__(self):
return iter(self.data)
def rotate_enu(self) -> None:
"""
rotate Rotate the position to the ENU frame from the base
"""
for data in self.data:
for k in data.data:
print(k)
#locate the base with the same station id
base = self.base.locate(site=data.id['site'])
lat, lon, height = xyz2blh(base.data['x_0'], base.data['x_1'], base.data['x_2'])
rot = np.zeros((3,3, len(lat)))
rot[0,0] = -np.sin(lon)
rot[0,1] = -np.sin(lat)*np.cos(lon)
rot[0,2] = np.cos(lat)*np.cos(lon)
rot[1,0] = np.cos(lon)
rot[1,1] = -np.sin(lat)*np.sin(lon)
rot[1,2] = np.cos(lat)*np.sin(lon)
rot[2,0] = 0
rot[2,1] = np.cos(lat)
rot[2,2] = np.sin(lat)
project = np.empty((len(data.data['x_0']),3))
for i in range(3):
project[:, i] = data.data[f'x_{i}']
enu = np.matmul(rot.transpose(), project[:,:,np.newaxis])[:,:,0]
for i in range(3):
data.data[f'x_{i}'] = enu[:, i]
|
GeoscienceAustralia/ginan
|
scripts/GinanEDA/backend/data/position.py
|
position.py
|
py
| 3,039
|
python
|
en
|
code
| 165
|
github-code
|
6
|
73933198267
|
from .common import * # NOQA
import pytest
project_detail = {"project": None, "namespace": None, "cluster": None,
"project2": None, "namespace2": None, "cluster2": None}
user_token = {"user_c1_p1_owner": {"user": None, "token": None},
"user_c1_p1_member": {"user": None, "token": None},
"user_c1_p2_owner": {"user": None, "token": None},
"user_standard": {"user": None, "token": None}}
CATALOG_URL = "https://git.rancher.io/charts"
MYSQL_EXTERNALID_037 = "catalog://?catalog=library&template=mysql" \
"&version=0.3.7"
MYSQL_EXTERNALID_038 = "catalog://?catalog=library&template=mysql" \
"&version=0.3.8"
WORDPRESS_EXTID = "catalog://?catalog=library&template=wordpress" \
"&version=1.0.5"
def cluster_and_client(cluster_id, mgmt_client):
cluster = mgmt_client.by_id_cluster(cluster_id)
url = cluster.links.self + '/schemas'
client = rancher.Client(url=url,
verify=False,
token=mgmt_client.token)
return cluster, client
def wait_for_template_to_be_created(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(catalogId=name)
if len(templates) > 0:
found = True
time.sleep(interval)
interval *= 2
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def test_tiller():
name = random_test_name()
admin_client = get_user_client()
clusters = admin_client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster_id = clusters[0].id
p = admin_client. \
create_project(name="test-" + random_str(),
clusterId=cluster_id,
resourceQuota={
"limit": {
"secrets": "1"}},
namespaceDefaultResourceQuota={
"limit": {
"secrets": "1"}}
)
p = admin_client.reload(p)
proj_client = rancher.Client(url=p.links.self +
'/schemas', verify=False,
token=USER_TOKEN)
# need a cluster scoped client to create a namespace
_cluster, cluster_client = cluster_and_client(cluster_id, admin_client)
ns = cluster_client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota={
"limit": {
"secrets": "1"
}}
)
wait_for_template_to_be_created(admin_client, "library")
app = proj_client.create_app(
name=name,
externalId=WORDPRESS_EXTID,
targetNamespace=ns.name,
projectId=p.id,
answers=get_defaut_question_answers(admin_client, WORDPRESS_EXTID)
)
app = proj_client.reload(app)
# test for tiller to be stuck on bad installs
wait_for_condition(proj_client, app, check_condition('Installed', 'False'))
# cleanup by deleting project
admin_client.delete(p)
def test_app_deploy():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
wait_for_template_to_be_created(admin_client, "library")
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
proj_client.delete(app)
def test_app_delete():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
app = proj_client.delete(app)
validate_app_deletion(proj_client, app.id)
def test_app_upgrade_version():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
new_answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_038)
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_038,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=new_answer)
app = proj_client.reload(app)
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_038)
assert app.externalId == MYSQL_EXTERNALID_038, "incorrect template version"
proj_client.delete(app)
def test_app_rollback():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answer)
print("App is active")
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
rev_id = app.appRevisionId
new_answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_038)
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_038,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=new_answer)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_038)
assert app.externalId == MYSQL_EXTERNALID_038, "incorrect template version"
proj_client.action(obj=app,
action_name='rollback',
revisionId=rev_id)
app = proj_client.reload(app)
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
assert app.externalId == MYSQL_EXTERNALID_037, "incorrect template version"
proj_client.delete(app)
def test_app_answer_override():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answers = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_037)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answers)
print("App is active")
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037)
answers["mysqlUser"] = "admin1234"
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_037,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id,
answers=answers)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_037, answers)
assert app["answers"].mysqlUser == "admin1234", \
"incorrect answer upgrade"
proj_client.delete(app)
def test_rbac_app_project_scope_deploy():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["project"],
USER_TOKEN)
catalog = admin_client.create_projectCatalog(
name="projectcatalog",
baseType="projectCatalog",
branch="master",
url=CATALOG_URL,
projectId=project_detail["project"].id)
time.sleep(5)
pId = project_detail["project"].id.split(":")[1]
catalog_proj_scoped_ext_id = "catalog://?catalog=" + pId + \
"/projectcatalog&type=" \
"projectCatalog&template=" \
"mysql&version=0.3.8"
answers = get_defaut_question_answers(
admin_client,
catalog_proj_scoped_ext_id)
app = proj_client.create_app(
name=random_test_name(),
externalId=catalog_proj_scoped_ext_id,
answers=answers,
targetNamespace=project_detail["namespace"].name,
projectId=project_detail["project"].id)
validate_catalog_app(proj_client, app, catalog_proj_scoped_ext_id)
p2, ns2 = create_project_and_ns(
USER_TOKEN,
project_detail["cluster"],
random_test_name("testapp"))
#Assign role
assign_members_to_project(admin_client,
user_token["user_c1_p2_owner"]["user"],
p2,
"project-owner")
#Verify "project-owner" of p1 can list the added catalog
user1_client = get_client_for_token(
user_token["user_c1_p1_owner"]["token"])
catalogs_list = user1_client.list_projectCatalog()
assert len(catalogs_list) == 1, \
"Project catalog not found for the user"
assert catalogs_list["data"][0]["name"] == \
"projectcatalog", "Incorrect project catalog found"
# Verify "project-member" of p1 can list the added catalog
user2_client = get_client_for_token(
user_token["user_c1_p1_member"]["token"])
catalogs_list_2 = user2_client.list_projectCatalog()
assert len(catalogs_list_2) == 1, \
"Project catalog not found for the user"
# Verify "project-owner" of p2 CANNOT list the added catalog
user3_client = get_client_for_token(
user_token["user_c1_p2_owner"]["token"])
catalogs_list_3 = user3_client.list_projectCatalog()
assert len(catalogs_list_3) == 0, \
"Project catalog found for the user"
# Verify A standard user CANNOT list the added catalog
user4_client = get_client_for_token(
user_token["user_standard"]["token"])
catalogs_list_4 = user4_client.list_projectCatalog()
assert len(catalogs_list_4) == 0, \
"Project catalog found for the user"
admin_client.delete(p2)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client = get_admin_client()
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
project_detail["project"], project_detail["namespace"] = \
create_project_and_ns(USER_TOKEN, clusters[0],
random_test_name("testapp"))
project_detail["cluster"] = clusters[0]
#create users
user_token["user_c1_p1_owner"]["user"], \
user_token["user_c1_p1_owner"]["token"] = create_user(client)
user_token["user_c1_p1_member"]["user"], \
user_token["user_c1_p1_member"]["token"] = create_user(client)
user_token["user_c1_p2_owner"]["user"], \
user_token["user_c1_p2_owner"]["token"] = create_user(client)
user_token["user_standard"]["user"], \
user_token["user_standard"]["token"] = create_user(client)
#Assign roles to the users
assign_members_to_project(client,
user_token["user_c1_p1_owner"]["user"],
project_detail["project"],
"project-owner")
assign_members_to_project(client,
user_token["user_c1_p1_member"]["user"],
project_detail["project"],
"project-member")
def fin():
client = get_user_client()
client.delete(project_detail["project"])
request.addfinalizer(fin)
|
jim02468/rancher
|
tests/validation/tests/v3_api/test_app.py
|
test_app.py
|
py
| 13,408
|
python
|
en
|
code
| 0
|
github-code
|
6
|
28339823949
|
def secondElem(a):
return a[1]
def alphasort(a):
a.sort(key=secondElem,reverse=True)
for i in range(len(a)-1):
if a[i][1] == a[i+1][1] and a[i][0] > a[i+1][0]:
a[i],a[i+1] = a[i+1],a[i]
return a
from collections import Counter
name = list(Counter(input()).items())
name = alphasort(name)
for i in range(3):
print(name[i][0],name[i][1])
|
t3chcrazy/Hackerrank
|
company-logo.py
|
company-logo.py
|
py
| 389
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42636165267
|
import netCDF4
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
import cartopy.crs as ccrs
import matplotlib.colors as colors
# Land cover
lc = netCDF4.Dataset("../data/LandCover_half.nc")
lc.set_auto_mask(True)
lc.variables
land_cover = lc["Land Cover"][:]
# fix landcover
halfway = np.shape(land_cover)[1]//2
first_half = land_cover[:,:halfway]
second_half = land_cover[:,halfway:]
land_cover = np.concatenate((second_half,first_half), axis=1)
#land_cover[land_cover == 0] = np.nan
#land_cover[land_cover > 10] = np.nan
land_cover_int = land_cover.astype(int) # Convert land_cover to int
unique_land_cover = np.unique(land_cover_int)
# plot
fig = plt.figure(figsize=(10, 6))
ax = plt.axes(projection=ccrs.PlateCarree())
image = ax.imshow(land_cover_int , origin="lower", extent=[-180, 180, -90, 90], transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global()
plt.show()
#ds = netCDF4.Dataset("../results/iso_result_mon_mean.nc")
ds = netCDF4.Dataset("../results/iso_result_OMI_filterd_mon_mean.nc")
ds.set_auto_mask(True)
dar = ds["J_times_IspS"][:]
dar[dar > 1000] = np.nan
dar_mean = np.nanmean(dar, axis=0)
dar_mean.shape
# Subset everything above 30 degrees North
def calculate_mean_subset(latitude_threshold,latitude, data):
lat_indices = np.where(latitude > latitude_threshold)[0]
data_subset = data[:, lat_indices, :]
data_mean_subset = np.nanmean(data_subset, axis=0)
anomaly_subset = np.subtract(data_subset, data_mean_subset)
norm_subset = np.divide(anomaly_subset, data_mean_subset)
mean_data_subset = np.column_stack((np.arange(12), [np.nanmean(norm_subset[i, :, :]) for i in range(12)]))
return mean_data_subset
mean_iso_subset_30 = calculate_mean_subset(30, latitude_iso, dar)
print(mean_iso_subset_30)
calculate_mean_subset(30, latitude_iso, dar)
# Normalize the data between the maximum and 0
anomaly = np.subtract(dar, dar_mean)
norm = np.divide(anomaly, dar_mean)
np.nanmean(norm[5,:,:])
np.nanmin(norm[5,:,:])
np.nanmax(norm[5,:,:])
# Create a figure with multiple subplots
fig, axs = plt.subplots(3, 4, figsize=(16, 12), subplot_kw={'projection': ccrs.PlateCarree()})
# Iterate over the range of plots you want to display (0 to 11)
for i in range(12):
# Calculate the subplot indices based on the iteration index
row = i // 4 # Row index
col = i % 4 # Column index
# Plot the normalized data in the current subplot
im = axs[row, col].imshow(norm[i, :, :], origin="lower", extent=[-180, 180, -90, 90], transform=ccrs.PlateCarree(), cmap='RdYlGn', vmin=-4, vmax=4, )
axs[row, col].set_title("Iso_{:02d}".format(i)) # Format the title with leading zeros
# Add coastlines
axs[row, col].coastlines()
# Create a colorbar for the figure
fig.colorbar(im, ax=axs, fraction=0.022, pad=0.03, location='bottom')
# Adjust the spacing between subplots
plt.tight_layout()
# Move the colorbar below the subplots
fig.subplots_adjust(bottom=0.15)
# Display the plot
plt.show()
ds.close()
np.where(land_cover_int == 1)
norm[5, np.where(land_cover_int == 1)]
mean_norm_values = []
for lc in unique_land_cover:
indices = np.where(land_cover_int == lc) # Find indices of matching land_cover values
mean_norm = np.mean(norm[indices]) # Calculate mean norm for the current land_cover value
mean_norm_values.append(mean_norm)
print("Mean norm values for each land_cover group:")
for lc, mean_norm in zip(unique_land_cover, mean_norm_values):
print(f"Land Cover {lc}: {mean_norm}")
#### Compare to the HCHO
HCHO = netCDF4.Dataset("../data/OMI_iso_estimate/mon_average_OMI.nc")
HCHO.set_auto_mask(False)
## Conveert isoprene values from the units kg/gridcell/month to kg/m2/month
def gridcell_to_m2(latitude):
"Find the number of m2 in each grid cell in a 0.5 by 0.5 degree grid"
half_degree_lat = 111111.1/2 # latitude lengths stay at about 111.1km per degree
half_degree_lon = np.cos(np.deg2rad(latitude)) * (111111.1/2) # equation to get the length of each half degree of longitude (changes with latitude)
meter_square_gridcell = half_degree_lat * half_degree_lon
return meter_square_gridcell
latitudes = [float(lat) for lat in HCHO['lat'][:]] #get each latitude degree as a float
no_m2_in_grid = [gridcell_to_m2(lat) for lat in latitudes] #gets the number of m2 blocks in each grid cell
tiled = np.tile(no_m2_in_grid, (len(HCHO['lon'][:]), 1)).T #repeat across each longitude as distance remains the same across each latitude degree
# Get the isoprene emmited per m2
isoprene_per_m2 = (HCHO["EMworldC5H8"][:])/(tiled)
# Calculate the threshold value for the top 5%
threshold = np.percentile(isoprene_per_m2, 99.98)
isoprene_per_m2[isoprene_per_m2 == 0] = np.nan
# Set values above the threshold to NaN
isoprene_per_m2[isoprene_per_m2 > threshold] = np.nan
HCHO_mean = np.nanmean(isoprene_per_m2, axis=0)
HCHO_anomaly = np.subtract(isoprene_per_m2, HCHO_mean)
HCHO_norm = np.divide(HCHO_anomaly, HCHO_mean)
i = 6
np.nanmean(HCHO_norm[i,:,:])
np.nanmin(HCHO_norm[i,:,:])
# Subset everything above 30 degrees North
latitude_HCHO = HCHO.variables["lat"][:]
def HCHO_std_anomoly_subset(latitude_threshold, latitude, data):
lat_indices = np.where(latitude > latitude_threshold)[0]
data_subset = np.flipud(data)[:, lat_indices, :]
data_mean_subset = np.nanmean(data_subset, axis=0)
data_anomaly_subset = np.subtract(data_subset, data_mean_subset)
data_norm_subset = np.divide(data_anomaly_subset, data_mean_subset)
std_anomoly_subset = np.column_stack((np.arange(12), [np.nanmean(data_norm_subset[i, :, :]) for i in range(12)]))
return std_anomoly_subset
mean_HCHO_subset_30 = HCHO_std_anomoly_subset(30, latitude_HCHO, isoprene_per_m2)
print(mean_HCHO_subset_30)
HCHO_std_anomoly_subset(0, latitude_HCHO, isoprene_per_m2)
# Create a figure and a single subplot
fig, ax = plt.subplots(figsize=(10, 4))
# Plot the first line with a red color
ax.plot(mean_HCHO_subset_30[:, 0], mean_HCHO_subset_30[:, 1], 'r-', label='HCHO')
# Plot the second line with a blue color
ax.plot(mean_iso_subset_30[:, 0], mean_iso_subset_30[:, 1], 'b-', label='Model')
# Set the title and legend
ax.set_title('Average anomaly in the Northern temperate zone (>30 degrees)')
ax.legend()
# Display the plot
plt.show()
np.nanmin(HCHO_norm)
# Create a figure with multiple subplots
fig, axs = plt.subplots(3, 4, figsize=(16, 12), subplot_kw={'projection': ccrs.PlateCarree()})
# Iterate over the range of plots you want to display (0 to 11)
for i in range(12):
# Calculate the subplot indices based on the iteration index
row = i // 4 # Row index
col = i % 4 # Column index
# Plot the normalized data in the current subplot
im = axs[row, col].imshow(np.flipud(HCHO_norm[i, :, :]), origin="lower", extent=[-180, 180, -90, 90], transform=ccrs.PlateCarree(), cmap='RdYlGn')#, vmin=-4, vmax=4, )
axs[row, col].set_title("OMI_{:02d}".format(i)) # Format the title with leading zeros
# Add coastlines
axs[row, col].coastlines()
# Create a colorbar for the figure
fig.colorbar(im, ax=axs, fraction=0.022, pad=0.03, location='bottom')
# Adjust the spacing between subplots
plt.tight_layout()
# Move the colorbar below the subplots
fig.subplots_adjust(bottom=0.15)
# Display the plot
plt.show()
HCHO.close()
|
bikempastine/Isoprene_PModel
|
exploration/anomaly_mapping.py
|
anomaly_mapping.py
|
py
| 7,350
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4643605876
|
import os
import shutil
import pickle
import glob
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from cam import create_dataset, Camera
def save_data(path, data):
with open(path, 'wb') as handle:
pickle.dump(data, handle)
print("Saved")
def load_data(path):
with open(path, 'rb') as handle:
data = pickle.load(handle)
return data
def camera_calibrate(images_folder='./img',
board_size=(6, 9),
world_scaling=1.,
debug=False):
images_names = sorted(glob.glob(images_folder))
images = []
for imname in images_names:
im = cv2.imread(imname, 1)
images.append(im)
# критерии, используемые детектором шахматной доски.
# Измените это, если код не может найти шахматную доску
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((1, board_size[0] * board_size[1], 3), np.float32)
objp[0, :, :2] = np.mgrid[0:board_size[0], 0:board_size[1]].T.reshape(-1, 2)
objp = world_scaling * objp
width = images[0].shape[1]
height = images[0].shape[0]
imgpoints = []
objpoints = []
for frame in images:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (board_size[0], board_size[1]), None)
if ret:
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
cv2.drawChessboardCorners(frame, (board_size[0], board_size[1]), corners, ret)
if debug:
cv2.imshow('img', frame)
k = cv2.waitKey(500)
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (width, height), None, None)
return mtx, dist
def stereo_camera_calibrate(images_folder1='./img',
images_folder2='./img',
board_size=(6, 9),
world_scaling=1.,
cameraMatrix1=None,
distCoeffs1=None,
cameraMatrix2=None,
distCoeffs2=None,
debug=False):
cam1_path = sorted(glob.glob(images_folder1))
cam2_path = sorted(glob.glob(images_folder2))
c1_images = []
c2_images = []
for im1, im2 in zip(cam1_path, cam2_path):
im = cv2.imread(im1, 1)
c1_images.append(im)
im = cv2.imread(im2, 1)
c2_images.append(im)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
objp = np.zeros((board_size[0] * board_size[1], 3), np.float32)
objp[:, :2] = np.mgrid[0:board_size[0], 0:board_size[1]].T.reshape(-1, 2)
objp = world_scaling * objp
width = c1_images[0].shape[1]
height = c1_images[0].shape[0]
imgpoints_left = []
imgpoints_right = []
objpoints = []
for frame1, frame2 in zip(c1_images, c2_images):
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
c_ret1, corners1 = cv2.findChessboardCorners(gray1, board_size, None)
c_ret2, corners2 = cv2.findChessboardCorners(gray2, board_size, None)
if c_ret1 == True and c_ret2 == True:
corners1 = cv2.cornerSubPix(gray1, corners1, (11, 11), (-1, -1), criteria)
corners2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), criteria)
if debug:
cv2.drawChessboardCorners(frame1, board_size, corners1, c_ret1)
cv2.imshow('img', frame1)
cv2.drawChessboardCorners(frame2, board_size, corners2, c_ret2)
cv2.imshow('img2', frame2)
cv2.waitKey(500)
objpoints.append(objp)
imgpoints_left.append(corners1)
imgpoints_right.append(corners2)
stereocalibration_flags = cv2.CALIB_FIX_INTRINSIC
ret, CM1, dist1, CM2, dist2, R, T, E, F = cv2.stereoCalibrate(objectPoints=objpoints,
imagePoints1=imgpoints_left,
imagePoints2=imgpoints_right,
cameraMatrix1=cameraMatrix1,
distCoeffs1=distCoeffs1,
cameraMatrix2=cameraMatrix2,
distCoeffs2=distCoeffs2,
imageSize=(width, height),
criteria=criteria,
flags=stereocalibration_flags)
return R, T
def mousePoint(event, x, y, flag, params):
if event == cv2.EVENT_LBUTTONDOWN:
print(f"[{x}, {y}]")
def DLT(P1, P2, point1, point2):
A = [point1[1] * P1[2, :] - P1[1, :],
P1[0, :] - point1[0] * P1[2, :],
point2[1] * P2[2, :] - P2[1, :],
P2[0, :] - point2[0] * P2[2, :]
]
A = np.array(A).reshape((4, 4))
# print('A: ')
# print(A)
B = A.transpose() @ A
# from scipy import linalg
U, s, Vh = np.linalg.svd(B, full_matrices=False)
# print('Triangulated point: ')
# print(Vh[3, 0:3] / Vh[3, 3])
return Vh[3, 0:3] / Vh[3, 3]
if __name__ == "__main__":
camera1 = Camera(camera_id=0, show_frame=False, vertical_flip=True, save_video=False)
camera2 = Camera(camera_id=1, show_frame=False, vertical_flip=True, save_video=False)
#
# create_screen(0)
# create_screen(1)
#
# camera1.initialize()
# camera2.initialize()
# create_dataset([camera1, camera2], './img/split/')
# ========================================== КАЛИБРОВКА КАМЕРЫ =====================================================
# mtx1, dist1 = camera_calibrate('./img/split/camera 1/*.jpg', debug=False)
# mtx2, dist2 = camera_calibrate('./img/split/camera 2/*.jpg', debug=False)
#
# R, T = stereo_camera_calibrate(images_folder1="./img/split/camera 1/*.jpg",
# images_folder2="./img/split/camera 2/*.jpg",
# cameraMatrix1=mtx1,
# cameraMatrix2=mtx2,
# distCoeffs1=dist1,
# distCoeffs2=dist2,
# debug=False)
# ==================================== СОХРАНЕНИЕ ДАННЫХ ===========================================================
# save_data('./data/matrix_camera_1080.pickle', mtx1)
# save_data('./data/matrix_camera.pickle', mtx2)
#
# save_data('./data/dist_camera_1080.pickle', dist1)
# save_data('./data/dist_camera.pickle', dist2)
#
# save_data('./data/stereo_R.pickle', R)
# save_data('./data/stereo_T.pickle', T)
# ============================================== ЗАГРУЗКА ДАННЫХ ===================================================
mtx2 = load_data('./data/matrix_camera_1080.pickle')
mtx1 = load_data('./data/matrix_camera.pickle')
dist2 = load_data('./data/dist_camera_1080.pickle')
dist1 = load_data('./data/dist_camera.pickle')
R = load_data('./data/stereo_R.pickle')
T = load_data('./data/stereo_T.pickle')
print(f"Camera matrix 0:\n {mtx1}")
print(f"Camera matrix 1:\n {mtx2}")
print(f"Camera dist 0:\n {dist1}")
print(f"Camera dist 1:\n {dist2}")
print(f"R:\n {R}")
print(f"T:\n {T}")
# board_size = (6, 9)
# world_scaling = 1.
# =============================================== РУЧНАЯ РАЗМЕТКА ДАННЫХ ===========================================
# count = 0
# while True:
#
# if not count:
# path = './img/1.jpg'
# else:
# path = './img/2.jpg'
#
# img = cv2.imread(path, 1)
#
# cv2.imshow("Img", img)
# cv2.setMouseCallback('Img', mousePoint)
#
# if cv2.waitKey(0) & 0xFF == ord('q'):
# if not count:
# count += 1
# continue
#
# cv2.destroyAllWindows()
# break
# # Право #Середина # лево
# uvs1 = np.array([[249, 175], [187, 177], [106, 166],
# [67, 296], [163, 409], [257, 289],
# [267, 408], [190, 405]])
#
# uvs2 = np.array([[506, 50], [408, 52], [321, 53],
# [286, 196], [355, 320], [503, 189],
# [494, 329], [398, 321]])
# frame1 = cv2.imread('./img/1.jpg')
# frame2 = cv2.imread('./img/2.jpg')
#
# plt.imshow(frame1[:, :, [2, 1, 0]])
# plt.scatter(uvs1[:, 0], uvs1[:, 1])
# plt.show()
#
# plt.imshow(frame2[:, :, [2, 1, 0]])
# plt.scatter(uvs2[:, 0], uvs2[:, 1])
# plt.show()
# #
# RT1 = np.concatenate([np.eye(3), [[0], [0], [0]]], axis=-1)
# P1 = mtx1 @ RT1
#
# RT2 = np.concatenate([R, T], axis=-1)
# P2 = mtx2 @ RT2
#
# from mpl_toolkits.mplot3d import Axes3D
#
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlim3d(0, -40)
# ax.set_ylim3d(-20, 20)
# ax.set_zlim3d(50, 100)
#
# p3ds = []
# for uv1, uv2 in zip(uvs1, uvs2):
# _p3d = DLT(P1, P2, uv1, uv2)
# p3ds.append(_p3d)
# p3ds = np.array(p3ds)
#
# connections = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [1, 7]]
# for _c in connections:
# # print(p3ds[_c[0]])
# # print(p3ds[_c[1]])
# ax.plot(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]], ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
# zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]], c='red')
#
# plt.show()
# =============================================== НАХОЖДЕНИЕ АВТО =================================================
import mediapipe as mp
from mpl_toolkits.mplot3d import Axes3D
def get_frame_keypoints(landmarks, frame):
frame_keypoints = []
print(landmarks)
for face_landmarks in landmarks:
for p in range(21):
pxl_x = int(round(frame.shape[1] * face_landmarks.landmark[p].x))
pxl_y = int(round(frame.shape[0] * face_landmarks.landmark[p].y))
kpts = [pxl_x, pxl_y]
frame_keypoints.append(kpts)
return frame_keypoints
mp_drawing = mp.solutions.drawing_utils
# mp_face = mp.solutions.face_mesh
mp_face = mp.solutions.hands
# face1 = mp_face.FaceMesh(max_num_faces=1,
# refine_landmarks=True,
# min_detection_confidence=0.5,
# min_tracking_confidence=0.5)
# face2 = mp_face.FaceMesh(max_num_faces=1,
# refine_landmarks=True,
# min_detection_confidence=0.5,
# min_tracking_confidence=0.5)
face1 = mp_face.Hands(max_num_hands=1,
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
face2 = mp_face.Hands(max_num_hands=1,
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
camera1.initialize()
camera2.initialize()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# connections = [[i, i+1] for i in range(467)]
ax.view_init(-90, -90)
mp_pose = mp.solutions.pose
connections = mp_face.HAND_CONNECTIONS
counter = 0
global_kps1 = []
global_kps2 = []
while True:
frame1 = camera1.read_frame()
frame2 = camera2.read_frame()
frame1_copy = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
frame2_copy = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
frame1_copy.flags.writeable = False
frame2_copy.flags.writeable = False
results1 = face1.process(frame1_copy)
results2 = face2.process(frame2_copy)
# if results1.multi_face_landmarks:
if results1.multi_hand_landmarks:
frame1_keypoints = get_frame_keypoints(results1.multi_hand_landmarks,
frame1)
else:
# frame1_keypoints = [[-1, -1]] * 468
frame1_keypoints = [[-1, -1]] * 21
if results2.multi_hand_landmarks:
frame2_keypoints = get_frame_keypoints(results2.multi_hand_landmarks,
frame2)
else:
frame2_keypoints = [[-1, -1]] * 21
global_kps1.append(frame1_keypoints)
global_kps2.append(frame2_keypoints)
# print("Frame kp 1:\n", frame1_keypoints)
# print("Frame kp 2:\n", frame2_keypoints)
for points1, points2 in zip(frame1_keypoints, frame2_keypoints):
cv2.circle(frame1, points1, 1, (255, 0, 0), cv2.FILLED)
cv2.circle(frame2, points2, 1, (255, 0, 0), cv2.FILLED)
frames = Camera().stack_images(0.8, [[frame1, frame2]])
cv2.imshow('Check', frames)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
camera1.release()
camera2.release()
save_data('data/glob1_kps.pickle', global_kps1)
save_data('data/glob2_kps.pickle', global_kps2)
break
# @ - матричное умножение
RT1 = np.concatenate([np.eye(3), [[0], [0], [0]]], axis=-1)
P1 = mtx1 @ RT1
RT2 = np.concatenate([R, T], axis=-1)
P2 = mtx2 @ RT2
p3ds = []
for uv1, uv2 in zip(frame1_keypoints, frame2_keypoints):
_p3d = DLT(P1, P2, uv1, uv2)
p3ds.append(_p3d)
p3ds = np.array(p3ds)
for _c in connections:
ax.plot(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]],
ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]],
c='red')
ax.scatter(xs=[p3ds[:, 0], p3ds[:, 0]],
ys=[p3ds[:, 1], p3ds[:, 1]],
zs=[p3ds[:, 2], p3ds[:, 2]],
c='green')
# ax.set_axis_off()
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
plt.draw()
plt.pause(.001)
ax.clear()
# save_data('./data/glob1_kps.pickle', global_kps1)
# save_data('./data/glob2_kps.pickle', global_kps2)
# # ax.set_xlim3d(-14, -24)
# # ax.set_ylim3d(-5, 5)
# # ax.set_zlim3d(-500, 500)
#
#
# connections = [[0, 1], [1, 2], [2, 3], [3, 4],
# [0,5], [5,6], [6,7], [7,8],
# [5,9], [9,10], [10,11], [11,12],
# [9,13], [13,14], [14,15], [15,16],
# [13,17], [17,18], [18,19], [19,20], [17, 0]]
#
# for _c in connections:
# # print(p3ds[_c[0]])
# # print(p3ds[_c[1]])
# ax.plot(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]], ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
# zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]], c='red')
# ax.scatter(xs=[p3ds[_c[0], 0], p3ds[_c[1], 0]], ys=[p3ds[_c[0], 1], p3ds[_c[1], 1]],
# zs=[p3ds[_c[0], 2], p3ds[_c[1], 2]], c='green')
#
# def animate(i):
# print(i/360 * 100, "%")
# line = ax.view_init(210, i)
# return line
#
# import matplotlib.animation as animation
#
# # Создаем объект анимации:
# sin_animation = animation.FuncAnimation(fig,
# animate,
# frames=np.linspace(0, 360, 360),
# interval = 10,
# repeat = False)
#
# # Сохраняем анимацию в виде gif файла:
# sin_animation.save('моя анимация.gif',
# writer='imagemagick',
# fps=30)
# for angle in range(0, 360):
# ax.view_init(210, angle)
# plt.draw()
# plt.pause(.001)
# %%
|
EvilFis/MultiCamVision
|
test_method.py
|
test_method.py
|
py
| 16,755
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11370340934
|
import torch
import torchvision
import random
import torch.nn as nn
import torch
from torch import tanh
import torch.nn.functional as F
# custom weights initialization
def weights_init_1st(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.15)
#m.weight.data.uniform_(-0.15, 0.15)
#m.weight.data.fill_(0.5)
def weights_init_2nd(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(-0.3, 0.3)
#m.weight.data.uniform_(0.01, 0.02)
#m.weight.data.fill_(0.5)
def print_net(model):
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.data.numpy())
def get_pi_net():
net = pi_net()
net.apply(weights_init_1st)
return net
class pi_net(nn.Module):
def __init__(self):
super(pi_net, self).__init__()
bias_on = True
self.linear1 = nn.Linear(36, 64, bias=bias_on)
self.linear2 = nn.Linear(64, 64, bias=bias_on)
self.linear3 = nn.Linear(64, 36, bias=bias_on)
#torch.nn.init.xavier_uniform_(self.linear1)
#torch.nn.init.xavier_uniform_(self.linear2)
def forward(self, x):
# --- 0000 ---- 0000 >>> z-score normalization
x = self.linear1(x)
# print("AFTER linear1 = = = = = = = = = =")
# print(x)
# print("AFTER linear1 = = = = = = = = = =")
x_avg = torch.sum(x) / 20
# print("AVG " + str(x_avg) )
# print("x - x_avg ~~~~~~~~~~~~~~")
x_minus_x_avg = x - x_avg
# print(x_minus_x_avg)
# print("x - x_avg ~~~~~~~~~~~~~~")
x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 20
# print("VAR " + str(x_std))
epsilon = 0.0000001
# print("STD " + str(torch.sqrt(x_std)))
x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
# print("BEFORE sigmoid = = = = = = = = = =")
# print(x_norm)
# print("BEFORE sigmoid = = = = = = = = = =")
#x = F.sigmoid(x_norm)
x = tanh(x_norm)
x = self.linear2(x)
x_avg = torch.sum(x) / 40
x_minus_x_avg = x - x_avg
x_std = torch.sum(torch.pow(x_minus_x_avg, 2)) / 40
x_norm = (x_minus_x_avg) / (torch.sqrt(x_std) + epsilon)
x = tanh(x_norm)
# print("AFTER sigmoid = = = = = = = = = =")
# print(x)
# print("AFTER sigmoid = = = = = = = = = =")
x = self.linear3(x)
return x.view(-1, 36)
# --- 0000 ---- 0000 >>> feature scaling
# x = self.linear1(x)
# print("AFTER linear1 = = = = = = = = = =")
# print(x)
# print("AFTER linear1 = = = = = = = = = =")
# x_max = torch.max(x)
# x_min = torch.min(x)
# epsilon = 0.00001
# x_norm = ((x - x_min) / (x_max - x_min + epsilon))
# print("BEFORE sigmoid = = = = = = = = = =")
# print(x_norm)
# print("BEFORE sigmoid = = = = = = = = = =")
# x = F.sigmoid(x_norm)
# print("AFTER sigmoid = = = = = = = = = =")
# print(x)
# print("AFTER sigmoid = = = = = = = = = =")
# x = self.linear2(x)
# return x.view(-1, 4)
|
ssainz/reinforcement_learning_algorithms
|
fleet_simulator/Models.py
|
Models.py
|
py
| 3,261
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6766919720
|
from sys import exit
def get_hurt(message, severity):
"""
A simple function that determines damage of mc based on severity level.
Parameters:
(String) message: message explaining how the mc got hurt.
(int) severity: the severity level of how bad the damage inflicted is.
"""
if severity == 1:
print(message)
print("You take 5 damage!")
health -= int(5)
elif severity == 2:
print(message)
print("You take 25 damage!")
health -= int(25)
else:
print(message)
print("You take 100 damage!")
health -= int(100)
def dead(message):
"""
Function that tells you the MC is dead.
Parameters:
(String) message: Message stating the character is dead.
"""
print(message)
exit()
def dungeon():
print("It is dark there's no light")
print("What do you do? ")
print(" Go left or right?")
print(" Or go straight?")
mc_moved = False
while True:
choice = input("> ")
if choice == "go left":
get_hurt('Oscar was right if this message prints', 1)
print('You slipped on some moldy surface while drake plays in' +
'the background. You get up with a bruised arm.')
mc_moved = True
elif choice =='go right':
print("You found a small insufficient light source.")
print("You can move forward.")
mc_moved = True
elif choice == "go straight":
if not light:
print("You found a police flashlight. Lights up the room decently.")
#light_found()
else:
print("You stumble on some dark object. It falls through a " +
"crevice in the floor.")
""" Main Program """
health = int(100)
light = False
health_kit = False
bandages = int(0)
name = "Sir Donkulus"
mc = ""
name_1 = mc
print(f"Hello my name is {name}.")
print(f"What is yours {mc} fellow adventurer?")
mc = input(name_1)
print(f"Well {mc} you have entered a dungeon")
dungeon() # Added by Oscar
def light_found():
print("Where do I go next?")
print("Do I enter the mysterious room?")
print("Continue onward?")
choice = input("> ")
if choice == "Enter the mysterious room":
dead("You encounter a dragon! It burned you're face off.")
mc_moved = False
if choice == "Continue onward":
print("Freedom! You did it")
mc_moved = True
|
ShawnT21/The_Box
|
dungeon_adventure.py
|
dungeon_adventure.py
|
py
| 2,496
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18405188691
|
import numpy as np
from astropy import table
from glob import glob
import pandas as pd
from scipy.stats import binned_statistic
def get_outlier_fraction(tbl, suffix='', bins=20):
diff = np.array(np.abs(tbl['z_est'] - tbl['z']) > 0.15 * (1 + tbl['z']),
dtype=float)
stat = binned_statistic(tbl['z%s' % suffix], diff, statistic='mean',
bins=bins)
return stat.statistic
def get_diagnostics(z1, z2):
diff = np.array(z1 - z2) / (1 + np.array((z1)))
outlier_mask = np.abs(diff) < 0.15 # * (1 + z1)
med = np.median(diff)
mad = np.median(np.abs(diff - med))
return 100*np.array((np.mean(diff[outlier_mask]),
np.std(diff[outlier_mask]),
med, mad, 1-outlier_mask.mean()))
def run_for_table_old(name, min=None):
t = table.Table.from_pandas(pd.read_csv(name))
tmax = t['mags'].max()
t = t[t['z_est'] > 0]
if min is None:
max_mag = 2
while max_mag <= max(max_mag, tmax):
t_ = t[t['mags'] <= max_mag]
if len(t_) > 0.9 * len(t):
break
max_mag += 1
diag_old = get_diagnostics(t_['z'], t_['z_est'])
max_outlier_rate = diag_old[-1]
used_fraction = len(t_)*100 / len(t)
i = 2
for i in range(max_mag, tmax + 1):
t_ = t[t['mags'] <= i]
x = t_['z']
y = t_['z_est']
if len(t_) == 0:
break
diag = get_diagnostics(x, y)
print(name, i, '%.3f' % diag[-1], len(t_), i,
'%.3f' % max_outlier_rate)
if diag[-1] > max_outlier_rate:
break
diag_old = diag
used_fraction = len(t_)*100 / len(t)
else:
i = min + 1
t_ = t[t['mags'] <= int(min)]
diag_old = get_diagnostics(t_['z'], t_['z_est'])
used_fraction = len(t_)*100 / len(t)
return len(t_['z']), diag_old, i - 1, used_fraction
def run_for_table(name):
if name.endswith('csv'):
df = pd.read_csv(name)
elif name.endswith('parquet'):
df = pd.read_parquet(name, columns=['mags', 'z', 'z_est'])
else:
return [0, [0]*5]
x = df['z']
y = df['z_est']
diag = get_diagnostics(x, y)
return len(df['z']), diag
def name_to_caption(name):
output = name.split('/')[-1].replace('.csv', '').replace('.parquet', '')
if '-' in output:
output_parts = output.split('-')[1:-1]
output = ' '.join([s.replace('_', ' ').replace('+', '')
for s in output_parts])
output = output.replace(' ', ' ').replace(' ', ', ')
return output
def get_stats_for_file(name, **kwargs):
output = table.Table(names=['Name', 'Mean', 'Std', 'Median',
'MAD', 'Outliers',
'Count'],
dtype=[str, float, float, float, float, float,
int])
row = run_for_table(name, **kwargs)
output.add_row([name_to_caption(name), *row[1], row[0]])
return output
def get_stats_for_folder(folder, **kwargs):
output = table.Table(names=['Name', 'Mean', 'Std', 'Median',
'MAD', 'Outliers',
'Count'],
dtype=[str, float, float, float, float, float,
int])
names = glob('%s/*.csv' % folder) + glob('%s/*.parquet' % folder)
names.sort()
for f in names:
row = run_for_table(f, **kwargs)
output.add_row([name_to_caption(f), *row[1], row[0]])
return output
|
minzastro/semiphore_public
|
utils/stats.py
|
stats.py
|
py
| 3,666
|
python
|
en
|
code
| 0
|
github-code
|
6
|
158477486
|
import datetime as dt
from rest_framework import status
from rest_framework.exceptions import NotAuthenticated, PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from bumblebee.core.exceptions import (
MissingFieldsError,
NoneExistenceError,
UrlParameterError,
)
from bumblebee.core.helpers import create_400, create_500
from bumblebee.feeds.api.serializers.feed_serializers import (
FeedBuzzSerializer,
FeedRebuzzSerializer,
)
from bumblebee.feeds.api.serializers.user_serializers import FeedUserSerializer
from bumblebee.feeds.utils import (
get_follow_suggestions_for_user,
get_folowing_buzzes_for_user,
)
from bumblebee.users.utils import DbExistenceChecker
class FeedBuzzListView(APIView):
""" """
permission_classes = [IsAuthenticated]
def get_posts(self, *args, **kwargs):
""" """
return get_folowing_buzzes_for_user(self.request.user)
def get(self, request, *args, **kwargs):
""" """
try:
post_instances = self.get_posts()
user_serializer = FeedUserSerializer(self.request.user, many=False)
buzz_serializer = FeedBuzzSerializer(
post_instances.get("buzzes"), many=True
)
rebuzz_serializer = FeedRebuzzSerializer(
post_instances.get("rebuzzes"), many=True
)
return Response(
data=dict(
updated_time=dt.datetime.now(),
user=user_serializer.data,
post=buzz_serializer.data + rebuzz_serializer.data,
),
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get feed due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class FeedFollowSuggestionsListView(APIView):
""" """
permission_classes = [IsAuthenticated]
def get_suggestions(self, *args, **kwargs):
""" """
return get_follow_suggestions_for_user(self.request.user)
def get(self, request, *args, **kwargs):
""" """
try:
suggestion_instances = self.get_suggestions()
user_serializer = FeedUserSerializer(self.request.user, many=False)
suggestion_serializer = FeedUserSerializer(suggestion_instances, many=True)
return Response(
data=dict(
updated_time=dt.datetime.now(),
user=user_serializer.data,
suggestions=suggestion_serializer.data,
),
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get suggestions due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
|
sthasam2/bumblebee-backend
|
bumblebee/feeds/api/views/feed_views.py
|
feed_views.py
|
py
| 4,226
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25081332800
|
# If this is the name of a readable file, the Python commands in that file are
# executed before the first prompt is displayed in interactive mode.
# https://docs.python.org/3/using/cmdline.html#envvar-PYTHONSTARTUP
#
# Sample code which supports concurrent interactive sessions, by only
# appending the new history is taken from
# https://docs.python.org/3/library/readline.html?highlight=readline#example
#
# The goal is to store interactive Python shell history in
# $XDG_STATE_HOME/python/python_history instead of ~/.python_history.
import atexit
import os
import readline
histfile = os.path.join(
os.getenv("XDG_STATE_HOME", "~/.local/state"), "python", "python_history"
)
if not os.path.exists(histfile):
os.makedirs(os.path.dirname(histfile), exist_ok=True)
if not os.path.isfile(histfile):
if os.path.exists(histfile):
os.remove(histfile)
# Create an empty file
open(histfile, "a").close()
readline.read_history_file(histfile)
h_len = readline.get_current_history_length()
def save(prev_h_len, histfile):
new_h_len = readline.get_current_history_length()
# Make the history file much bigger for relative suggestions
readline.set_history_length(int(os.getenv("HISTSIZE", 1000000)))
readline.append_history_file(new_h_len - prev_h_len, histfile)
atexit.register(save, h_len, histfile)
# Map TAB to auto-completion instead of the TAB symbol
readline.parse_and_bind("tab: complete")
|
mvshmakov/dotfiles
|
python/.config/python/pythonstartup.py
|
pythonstartup.py
|
py
| 1,442
|
python
|
en
|
code
| 3
|
github-code
|
6
|
37822719553
|
"""Contains the MetaCurriculum class."""
import os
from unitytrainers.curriculum import Curriculum
from unitytrainers.exception import MetaCurriculumError
import logging
logger = logging.getLogger('unitytrainers')
class MetaCurriculum(object):
"""A MetaCurriculum holds curriculums. Each curriculum is associated to a particular
brain in the environment.
"""
def __init__(self, curriculum_folder, default_reset_parameters):
"""Initializes a MetaCurriculum object.
Args:
curriculum_folder (str): The relative or absolute path of the
folder which holds the curriculums for this environment.
The folder should contain JSON files whose names are the
brains that the curriculums belong to.
default_reset_parameters (dict): The default reset parameters
of the environment.
"""
used_reset_parameters = set()
self._brains_to_curriculums = {}
try:
for curriculum_filename in os.listdir(curriculum_folder):
brain_name = curriculum_filename.split('.')[0]
curriculum_filepath = \
os.path.join(curriculum_folder, curriculum_filename)
curriculum = Curriculum(curriculum_filepath, default_reset_parameters)
# Check if any two curriculums use the same reset params.
if any([(parameter in curriculum.get_config().keys()) for parameter in used_reset_parameters]):
logger.warning('Two or more curriculums will '
'attempt to change the same reset '
'parameter. The result will be '
'non-deterministic.')
used_reset_parameters.update(curriculum.get_config().keys())
self._brains_to_curriculums[brain_name] = curriculum
except NotADirectoryError:
raise MetaCurriculumError(curriculum_folder + ' is not a '
'directory. Refer to the ML-Agents '
'curriculum learning docs.')
@property
def brains_to_curriculums(self):
"""A dict from brain_name to the brain's curriculum."""
return self._brains_to_curriculums
@property
def lesson_nums(self):
"""A dict from brain name to the brain's curriculum's lesson number."""
lesson_nums = {}
for brain_name, curriculum in self.brains_to_curriculums.items():
lesson_nums[brain_name] = curriculum.lesson_num
return lesson_nums
@lesson_nums.setter
def lesson_nums(self, lesson_nums):
for brain_name, lesson in lesson_nums.items():
self.brains_to_curriculums[brain_name].lesson_num = lesson
def increment_lessons(self, progresses):
"""Increments all the lessons of all the curriculums in this MetaCurriculum.
Args:
progresses (dict): A dict of brain name to progress.
"""
for brain_name, progress in progresses.items():
self.brains_to_curriculums[brain_name].increment_lesson(progress)
def set_all_curriculums_to_lesson_num(self, lesson_num):
"""Sets all the curriculums in this meta curriculum to a specified lesson number.
Args:
lesson_num (int): The lesson number which all the curriculums will
be set to.
"""
for _, curriculum in self.brains_to_curriculums.items():
curriculum.lesson_num = lesson_num
def get_config(self):
"""Get the combined configuration of all curriculums in this MetaCurriculum.
Returns:
A dict from parameter to value.
"""
config = {}
for _, curriculum in self.brains_to_curriculums.items():
curr_config = curriculum.get_config()
config.update(curr_config)
return config
|
Sohojoe/ActiveRagdollAssaultCourse
|
python/unitytrainers/meta_curriculum.py
|
meta_curriculum.py
|
py
| 3,968
|
python
|
en
|
code
| 37
|
github-code
|
6
|
16635792661
|
from pulp_2to3_migration.app.plugin.api import (
is_different_relative_url,
Pulp2to3Importer,
Pulp2to3Distributor,
)
from pulp_rpm.app.models import RpmRemote, RpmPublication, RpmDistribution
from pulp_rpm.app.tasks.publishing import publish
from urllib.parse import urlparse, urlunparse
class RpmImporter(Pulp2to3Importer):
"""
Interface to migrate Pulp 2 RPM importer
"""
pulp3_remote_models = [RpmRemote]
@classmethod
def migrate_to_pulp3(cls, pulp2importer):
"""
Migrate importer to Pulp 3.
Args:
pulp2importer(Pulp2Importer): Pre-migrated pulp2 importer to migrate
Return:
remote(RpmRemote): RpmRemote in Pulp3
created(bool): True if Remote has just been created; False if Remote is an existing one
"""
pulp2_config = pulp2importer.pulp2_config
base_config, name = cls.parse_base_config(pulp2importer, pulp2_config)
sles_auth_token = pulp2_config.get("query_auth_token")
if sles_auth_token:
base_config["sles_auth_token"] = sles_auth_token
else:
url = urlparse(pulp2_config.get("feed", ""))
if url.query and "=" not in url.query and "&" not in url.query:
base_config["sles_auth_token"] = url.query
base_config["url"] = urlunparse(url._replace(query=""))
return RpmRemote.objects.update_or_create(name=name, defaults=base_config)
class RpmDistributor(Pulp2to3Distributor):
"""
Interface to migrate Pulp 2 RPM distributor
"""
pulp3_publication_models = [RpmPublication]
pulp3_distribution_models = [RpmDistribution]
@classmethod
def migrate_to_pulp3(cls, pulp2distributor, repo_version, signing_service):
"""
Migrate distributor to Pulp 3.
Args:
pulp2distributor(Pulp2distributor): Pre-migrated pulp2 distributor to migrate
Return:
publication(RpmPublication): publication in Pulp 3
distribution(RpmDistribution): distribution in Pulp 3
created(bool): True if a distribution has just been created; False if a distribution
is an existing one
"""
pulp2_config = pulp2distributor.pulp2_config
# this will go away with the simple-complex plan conversion work
if not repo_version:
repo = pulp2distributor.pulp2_repos.filter(not_in_plan=False, is_migrated=True)
repo_version = repo[0].pulp3_repository_version
publication = repo_version.publication_set.filter(complete=True).first()
if not publication:
pulp2_checksum_type = pulp2_config.get("checksum_type")
checksum_types = None
if pulp2_checksum_type:
checksum_types = {
"metadata": pulp2_checksum_type,
"package": pulp2_checksum_type,
}
else:
# Set the checksum type based on content in a repo, pulp 2 supports only one
# checksum type for packages in a repo. It is important to set checksum type for
# Pulp 3 to Pulp 2 sync use case.
package_qs = repo_version.content.filter(pulp_type="rpm.package")
if package_qs.count():
pkg_checksum_type = package_qs.first().cast().checksum_type
checksum_types = {
"metadata": pkg_checksum_type,
"package": pkg_checksum_type,
}
sqlite = pulp2_config.get("generate_sqlite", False)
try:
publish(
repo_version.pk,
checksum_types=checksum_types,
sqlite_metadata=sqlite,
)
except TypeError:
# hack, pulp_rpm <3.9 doesn't support sqlite_metadata kwarg
publish(repo_version.pk, checksum_types=checksum_types)
publication = repo_version.publication_set.filter(complete=True).first()
# create distribution
distribution_data = cls.parse_base_config(pulp2distributor, pulp2_config)
# ensure that the base_path does not end with / in Pulp 3, it's often present in Pulp 2.
base_path = pulp2_config.get("relative_url", pulp2distributor.pulp2_repo_id)
distribution_data["base_path"] = base_path.rstrip("/")
distribution_data["publication"] = publication
distribution, created = RpmDistribution.objects.update_or_create(
name=distribution_data["name"],
base_path=distribution_data["base_path"],
defaults=distribution_data,
)
return publication, distribution, created
@classmethod
def needs_new_publication(cls, pulp2distributor):
"""
Check if a publication associated with the pre_migrated distributor needs to be recreated.
Args:
pulp2distributor(Pulp2Distributor): Pre-migrated pulp2 distributor to check
Return:
bool: True, if a publication needs to be recreated; False if no changes are needed
"""
if not pulp2distributor.pulp3_publication:
return True
new_checksum_type = pulp2distributor.pulp2_config.get("checksum_type")
current_checksum_type = pulp2distributor.pulp3_publication.cast().metadata_checksum_type
is_default_checksum_type = new_checksum_type is None and current_checksum_type == "sha256"
if new_checksum_type != current_checksum_type and not is_default_checksum_type:
return True
return False
@classmethod
def needs_new_distribution(cls, pulp2distributor):
"""
Check if a distribution associated with the pre_migrated distributor needs to be recreated.
Args:
pulp2distributor(Pulp2Distributor): Pre-migrated pulp2 distributor to check
Return:
bool: True, if a distribution needs to be recreated; False if no changes are needed
"""
return is_different_relative_url(pulp2distributor)
|
pulp/pulp-2to3-migration
|
pulp_2to3_migration/app/plugin/rpm/repository.py
|
repository.py
|
py
| 6,151
|
python
|
en
|
code
| 3
|
github-code
|
6
|
71474225467
|
import csv
positives = 0
negatives = 0
i = 1
tn = 0
tp = 0
fn = 0
fp = 0
flag = 0
flag2 = 0
totalRawCount = 0
flag5 = 0
linelist = []
#################################################################################################################
#################################################################################################################
#################################################################################################################
folder_path = "C:\\Users\\Ayan Deep Hazra\\Desktop\\Repos\\Natural_Language_Processing_Research\\open this for text file " \
"manipulation\\filtered_test_scibert.csv"
csvfile = open(folder_path, encoding="utf-8")
csvreader = csv.reader(csvfile)
rows = []
truelabel = []
for row in csvreader:
rows.append(row[0])
for row in csvreader:
truelabel.append(row[1])
#################################################################################################################
#################################################################################################################
#################################################################################################################
with open(folder_path, "r", encoding="utf-8") as filetemp:
for line in filetemp:
words = line.split()
if len(words) > 0:
linelist.append(line)
with open("file.txt", "r", encoding="utf-8") as file:
for line in file:
print(line)
words = line.split()
if len(words) > 0:
# totalRawCount = totalRawCount + 1
if words[0] == "input:":
totalRawCount = totalRawCount + 1
print(totalRawCount, words)
if flag5 == 0:
a = totalRawCount
flag5 = 1
if float(words[len(words) - 1]) > 0.1:
positives = positives + 1
str = " ".join(words[1:len(words) - 2])
flag = 0
if str.find("bulk moduli") != -1 or str.find("bulk modulus") != -1 or str.find(
"K 0") != -1 or str.find("K0") != -1 or str.find("B=") != -1 or str.find("B =") != -1:
flag = 1
# tp = tp + 1
if flag == 1 and (
str.find("GPa") != -1 or str.find("gpa") != -1 or str.find("Gpa") != -1 or str.find("range of") != -1 or str.find(
"ranges of") != -1):
tp = tp + 1
# printing the positive sentences
# print(i, " ".join(words[1:len(words) - 3]))
i = i + 1
else:
negatives = negatives + 1
str = " ".join(words[1:len(words) - 2])
flag2 = 0
if str.find("bulk moduli") != -1 or str.find("bulk modulus") != -1 or str.find("K 0") != -1 \
or str.find("K0") != -1 or str.find("B=") != -1 or str.find("B =") != -1:
flag2 = 1
# fn = fn + 1
if flag2 == 1 and (
str.find("GPa") != -1 or str.find("gpa") != -1 or str.find("Gpa") != -1 or str.find("range of") != -1 or str.find(
"ranges of") != -1):
fn = fn + 1
string = " ".join(words[1:len(words) - 3])
print(positives)
print(negatives)
fp = positives - tp
tn = negatives - fn
print("tn:", tn)
print("tp:", tp)
print("fp:", fp)
print("fn:", fn)
recall = tp / (tp + fn)
precision = tp / (tp + fp)
print("recall: ", recall)
print("precision: ", precision)
print("F score:", 2 * precision * recall / (precision + recall))
print(totalRawCount)
|
ayandeephazra/Natural_Language_Processing_Research
|
open this for text file manipulation/manualClassifier.py
|
manualClassifier.py
|
py
| 3,821
|
python
|
en
|
code
| 2
|
github-code
|
6
|
74632636347
|
# -*- coding: utf-8 -*-
"""
Sihoo Celery Worker 模块
@author: AZLisme
@email: helloazl@icloud.com
"""
from celery import Celery
celery_app = Celery('SihooWorker')
def configure(app):
celery_app.config_from_object('sihoo.settings.celery-setting')
celery_app.config_from_envvar('SIHOO_CELERY_SETTINGS', silent=True)
app.config['CELERY'] = celery_app
|
AZLisme/sihoo
|
sihoo/tasks/__init__.py
|
__init__.py
|
py
| 370
|
python
|
en
|
code
| 4
|
github-code
|
6
|
22758733002
|
# (C) StackState 2020
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
@pytest.fixture(scope='session')
def sts_environment():
return {
'type': 'csv',
'health_file': '/home/static_health/health.csv',
'delimiter': ',',
'collection_interval': 15
}
@pytest.fixture(scope="class")
def instance(request):
cfg = {
'type': 'csv',
'health_file': 'health.csv',
'delimiter': ',',
'collection_interval': 15
}
request.cls.instance = cfg
|
StackVista/stackstate-agent-integrations
|
static_health/tests/conftest.py
|
conftest.py
|
py
| 560
|
python
|
en
|
code
| 1
|
github-code
|
6
|
21142626845
|
import numpy as np
import tensorflow as tf
from eventgen import CEvent
from nets.net import Net
from nets.utils import get_trainable_vars, prep_data_cells
class PPOSinghNet(Net):
def __init__(self, pre_conv=False, double_net=False, *args, **kwargs):
"""
Afterstate value net
"""
self.name = "SinghNet"
self.pre_conv = pre_conv
self.double_net = double_net
super().__init__(name=self.name, *args, **kwargs)
self.weight_beta = self.pp['weight_beta']
self.weight_beta_decay = self.pp['weight_beta_decay']
self.avg_reward = 0
def _build_vnet(self, freps, name):
with tf.variable_scope('model/' + name) as scope:
value_layer = tf.layers.Dense(
units=1,
kernel_initializer=tf.zeros_initializer(),
kernel_regularizer=self.dense_regularizer,
use_bias=False,
activation=None)
value = value_layer.apply(tf.layers.flatten(freps))
self.weight_vars.append(value_layer.kernel)
self.weight_names.append(value_layer.name)
# NOTE TODO either gotta have 7x7x70 outputs, or input cell
# also gotta think about a hidden layer before value/policy
trainable_vars = get_trainable_vars(scope)
return value, trainable_vars
def _build_pnet(self, freps, name):
with tf.variable_scope('model/' + name) as scope:
# policy = tf.keras.layers.LocallyConnected2D(
# filters=70,
# kernel_size=1,
# padding="valid",
# kernel_initializer=tf.zeros_initializer(),
# use_bias=self.pp['conv_bias'],
# activation=None)(freps)
# print(policy.shape)
policy_layer = tf.layers.Dense(
units=70,
kernel_initializer=tf.zeros_initializer(),
kernel_regularizer=self.dense_regularizer,
use_bias=False,
activation=None)
policy = policy_layer.apply(tf.layers.flatten(freps))
# self.weight_vars.append(policy_layer.kernel)
# self.weight_names.append(policy_layer.name)
trainable_vars = get_trainable_vars(scope)
return policy, trainable_vars
def build(self):
# frepshape = [None, self.rows, self.cols, self.n_channels * 3 + 1]
frepshape = [None, self.rows, self.cols, self.n_channels + 1]
self.freps = tf.placeholder(tf.float32, frepshape, "feature_reps")
self.value_target = tf.placeholder(tf.float32, [None], "value_target")
oh_cellshape = [None, self.rows, self.cols, 1]
self.cells = tf.placeholder(tf.bool, oh_cellshape, "oh_cell")
cells = tf.cast(self.cells, tf.float32)
# self.cells = tf.placeholder(tf.int32, [None, 2], "cell")
self.action = tf.placeholder(tf.int32, [None], "action")
self.policy_in = tf.placeholder(tf.float32, [70], "pol_in")
self.old_neglogpac = tf.placeholder(tf.float32, [None], "old_neglogpac")
inp = tf.concat([self.freps, cells], axis=3)
self.value, online_vf_vars = self._build_vnet(self.freps, "online-vf")
self.policy, online_pg_vars = self._build_pnet(inp, "online-pg")
# nrange = tf.range(tf.shape(self.freps)[0], name="cellrange")
# ncells = tf.concat([tf.expand_dims(nrange, axis=1), self.cells], axis=1)
# self.policy = tf.gather_nd(self.conv_policy, ncells)
self.err = self.value_target - self.value
self.vf_loss = tf.losses.mean_squared_error(
labels=tf.expand_dims(self.value_target, axis=1), predictions=self.value)
CLIPRANGE = 0.2
self.neglogpac_out = self.neglogp(self.policy_in, self.action)
self.neglogpac = self.neglogp(self.policy, self.action)
ratio = tf.exp(self.old_neglogpac - self.neglogpac)
pg_losses = -self.value_target * ratio
pg_losses2 = -self.value_target * tf.clip_by_value(ratio, 1.0 - CLIPRANGE,
1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
# entropy = self.entropy(self.policy)
# pgnet_loss = pg_loss + 0.01 * entropy
# trainer = tf.train.AdamOptimizer(learning_rate=self.pp, epsilon=1e-5)
trainer = tf.train.GradientDescentOptimizer(
learning_rate=1e-6) #self.pp['net_lr'])
grads = trainer.compute_gradients(pg_loss, online_pg_vars)
self.do_train_pg = trainer.apply_gradients(grads)
return self.vf_loss, online_vf_vars
@staticmethod
def entropy(logits):
a0 = logits - tf.reduce_max(logits, axis=-1, keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
@staticmethod
def neglogp(logits, x):
one_hot_actions = tf.one_hot(x, logits.get_shape().as_list()[-1])
return tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=one_hot_actions)
def forward_action(self, frep, cell, ce_type, chs):
# u = tf.random_uniform(tf.shape(self.policy))
# self.sample_action = tf.argmax(elig_policy - tf.log(-tf.log(u)), axis=-1)
policy = self.sess.run(self.policy, {
self.freps: [frep],
self.cells: prep_data_cells(cell),
})[0]
# u = np.random.uniform(policy.shape)
# policy_ent = policy - np.log(-np.log(u))
# NOTE TODO should this be argmin for END?
if ce_type == CEvent.END:
idx = np.argmin(policy[chs])
else:
idx = np.argmax(policy[chs])
ch = chs[idx]
neglogpac = self.sess.run(self.neglogpac_out, {
self.policy_in: policy,
self.action: [ch]
})
return ch, neglogpac
def get_neglogpac(self, frep, cell, ch):
policy = self.sess.run(self.policy, {
self.freps: [frep],
self.cells: prep_data_cells(cell),
})[0]
neglogpac = self.sess.run(self.neglogpac_out, {
self.policy_in: policy,
self.action: [ch]
})
return neglogpac
def forward_value(self, freps):
value = self.sess.run(
self.value,
feed_dict={
self.freps: freps
},
).squeeze()
return value
def backward(self, step, buf, n_step):
# TODO:
# - collect nsteps of data. 16-128
# - train noptepochs consecutive times on pg net. 4
# next_values = self.sess.run(
# self.value, feed_dict={
# self.freps: [e.next_frep for e in buf]
# }).squeeze()
value_target = step.reward - self.avg_reward + step.next_val
loss, lr, err = self.backward_vf([step.frep], [value_target])
if len(buf) != n_step:
return loss, lr, err
# np.random.shuffle(buf)
next_values = np.array([e.next_val for e in buf])
rewards = [e.reward for e in buf]
value_targets = rewards + next_values - self.avg_reward
freps = [e.frep for e in buf]
cells = [e.cell for e in buf]
neglogpacs = [e.neglogpac for e in buf]
chs = [e.ch for e in buf]
for _ in range(4):
self.sess.run(
[self.do_train_pg], {
self.freps: freps,
self.cells: prep_data_cells(cells),
self.value_target: value_targets,
self.old_neglogpac: neglogpacs,
self.action: chs
})
return loss, lr, err
def backward_vf(self, freps, value_target):
data = {self.freps: freps, self.value_target: value_target}
_, loss, lr, err = self.sess.run([self.do_train, self.vf_loss, self.lr, self.err],
data)
self.avg_reward += self.weight_beta * np.mean(err)
return loss, lr, err
|
tsoernes/dca
|
dca/nets/singh_ppo.py
|
singh_ppo.py
|
py
| 8,129
|
python
|
en
|
code
| 14
|
github-code
|
6
|
41705912087
|
import newspaper
# Declare the url
url = "https://ktechhub.com/tutorials/completely-deploy-your-laravel-application-on-ubuntu-linux-server-60a51098a8bf2"
#Extract web content
url = newspaper.Article(url="%s" % (url), language='en')
url.download()
url.parse()
# Display scraped data
print(url.text)
|
Kalkulus1/python_codes
|
scrape_any_web_article.py
|
scrape_any_web_article.py
|
py
| 302
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19691344827
|
"""Define custom dataset class extending the Pytorch Dataset class"""
import os
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as tvt
from utils.utils import Params
class SketchesDataset(Dataset):
"""Custom class for Sketches dataset"""
def __init__(self, root: str, csv_file: str, transform: tvt = None) -> None:
"""Get the filenames and labels of images from a csv file.
Args:
root: Directory containing the data
csv_file: file containing the data
transform: Transformation to apply on images
"""
self.root = root
self.data = pd.read_csv(os.path.join(root, csv_file))
self.transform = transform
def __len__(self) -> int:
"""Return the size of the dataset.
"""
return len(self.data)
def __getitem__(self, idx: int) -> Tuple[Image.Image, np.ndarray]:
"""Get an item from the dataset given the index idx"""
row = self.data.iloc[idx]
im_name = row["Image Id"] + ".png"
im_path = os.path.join(self.root, "images", im_name)
img = Image.open(im_path).convert("RGB")
labels = torch.tensor(row[1:], dtype=torch.float32)
if self.transform is not None:
img = self.transform(img)
return img, labels
def get_transform(mode: str, params: Params) -> tvt.Compose:
"""Data augmentation
Args:
is_train: If the dataset is training
Returns:
Composition of all the data transforms
"""
trans = [
tvt.Resize((params.height, params.width)),
tvt.ToTensor(),
tvt.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
if mode == "train":
trans += [
tvt.RandomHorizontalFlip(params.flip),
tvt.ColorJitter(
brightness=params.brightness,
contrast=params.contrast,
saturation=params.saturation,
hue=params.hue
),
tvt.RandomRotation(params.degree)
]
return tvt.Compose(trans)
def collate_fn(batch: List[Tuple[torch.tensor, torch.tensor]]) -> Tuple[torch.tensor, torch.tensor]:
"""Collate function to create a batch of data
Args:
batch: List of data generated by dataset
Returns:
Batch of images and labels
"""
data = list(zip(*batch))
imgs = torch.stack(data[0], 0)
labels = torch.stack(data[1], 0)
return imgs, labels
def get_dataloader(
modes: List[str],
params: Params,
) -> Dict[str, DataLoader]:
"""Get DataLoader objects.
Args:
modes: Mode of operation i.e. 'train', 'val', 'test'
params: Hyperparameters
Returns:
DataLoader object for each mode
"""
dataloaders = {}
for mode in modes:
if mode == "train":
trans = get_transform(mode, params)
shuf = True
else:
trans = get_transform(mode, params)
shuf = False
dataset = SketchesDataset(
root=params.data_dir,
csv_file=mode + "_sketches_" + params.type + ".csv",
transform=trans
)
dataloaders[mode] = DataLoader(
dataset,
batch_size=params.batch_size,
num_workers=params.num_workers,
pin_memory=params.pin_memory,
collate_fn=collate_fn,
shuffle=shuf
)
return dataloaders
|
karanrampal/sketches
|
src/model/data_loader.py
|
data_loader.py
|
py
| 3,585
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11898315364
|
#!/usr/bin/env python3
""" basic Flask app """
from flask import Flask, render_template, request, g
from flask_babel import Babel
import pytz
app = Flask(__name__)
babel = Babel(app)
users = {
1: {"name": "Balou", "locale": "fr", "timezone": "Europe/Paris"},
2: {"name": "Beyonce", "locale": "en", "timezone": "US/Central"},
3: {"name": "Spock", "locale": "kg", "timezone": "Vulcan"},
4: {"name": "Teletubby", "locale": None, "timezone": "Europe/London"},
}
class Config(object):
""" Babel configuration class """
LANGUAGES = ["en", "fr"]
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
def get_user() -> dict:
""" returns a user dictionary or None """
user_logged = request.args.get('login_as')
if user_logged and int(user_logged) in users:
return users[int(user_logged)]
return None
@app.before_request
def before_request():
""" find a user if any """
user = get_user()
g.user = user
@babel.localeselector
def get_locale():
""" determine the best match """
locale = request.args.get('locale')
if locale and locale in Config.LANGUAGES:
return locale
if g.user:
locale = g.user.get('locale')
if locale and locale in Config.LANGUAGES:
return locale
locale = request.headers.get('locale')
if locale and locale in Config.LANGUAGES:
return locale
return request.accept_languages.best_match(Config.LANGUAGES)
@babel.timezoneselector
def get_timezone():
""" get timezone selector """
try:
if request.args.get('timezone'):
time_zone = request.args.get('timezone')
pytz.timezone(time_zone)
elif g.user and g.user.get('timezone'):
time_zone = g.user.get('timezone')
pytz.timezone(time_zone)
else:
time_zone = app.config["BABEL_DEFAULT_TIMEZONE"]
pytz.timezone(time_zone)
except pytz.exceptions.UnknownTimeZoneError:
time_zone = "UTC"
return time_zone
app.config.from_object(Config)
@app.route("/", methods=["GET"])
def index():
""" returns the index """
return render_template('7-index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000")
|
jeanpierreba/holbertonschool-web_back_end
|
0x0A-i18n/7-app.py
|
7-app.py
|
py
| 2,161
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19399695029
|
from typing import List
import copy
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
graph = {}
s = set()
for i, each in enumerate(equations):
nominator = each[0]
denominator = each[1]
s.add(nominator)
s.add(denominator)
if nominator not in graph:
graph[nominator] = {denominator: values[i]}
else:
graph[nominator][denominator] = values[i]
if denominator not in graph:
graph[denominator] = {nominator: 1.0 / values[i]}
else:
graph[denominator][nominator] = 1.0 / values[i]
print(graph)
ans = []
for query in queries:
nominator = query[0]
denominator = query[1]
if nominator not in s or denominator not in s:
ans.append(-1)
elif nominator == denominator:
ans.append(1.0)
else:
r = self.solve(graph, nominator,denominator, [], [])
ans.append(r)
return ans
def solve(self, graph, nominator, denominator, path, ratio):
# print(path)
if nominator == denominator:
res = 1
for each in ratio:
res *= each
return res
if len(path) == 0:
path.append(nominator)
for each in graph[nominator]:
if each not in path:
p = copy.deepcopy(path)
r = copy.deepcopy(ratio)
p.append(each)
r.append(graph[nominator][each])
ans = self.solve(graph, each, denominator, p, r)
if ans != -1:
return ans
# path.pop()
# ratio.pop()
return -1
equations = [["x1","x2"],["x2","x3"],["x3","x4"],["x4","x5"]]
values = [3.0,4.0,5.0,6.0]
queries =[["x1","x5"],["x5","x2"],["x2","x4"],["x2","x2"],["x2","x9"],["x9","x9"]]
s = Solution()
r = s.calcEquation(equations, values, queries)
print(r)
|
Yigang0622/LeetCode
|
calcEquation.py
|
calcEquation.py
|
py
| 2,160
|
python
|
en
|
code
| 1
|
github-code
|
6
|
19981905247
|
import json
import os
from aiogram import Bot, Dispatcher, executor, types
from aiogram.dispatcher.filters import Text
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.dispatcher import FSMContext
from aiogram.utils.markdown import hbold, hlink
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, KeyboardButton
from dotenv import load_dotenv
from sbermarket import get_data
load_dotenv()
TOKEN = str(os.environ.get('TOKEN'))
bot = Bot(token=TOKEN, parse_mode=types.ParseMode.HTML)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
async def on_startup(_):
print('BOT STARTED')
class Form(StatesGroup):
search = State()
resource = State()
def main_menu_keyboard():
keyboard = ReplyKeyboardMarkup(resize_keyboard=True)
last_attempt = KeyboardButton(text='Получить предыдущий запрос')
help_button = KeyboardButton(text='Справка')
description_button = KeyboardButton(text='Описание')
search_button = KeyboardButton(text='Выбрать ресурс для поиска')
search_button = KeyboardButton(text='Ввести поисковый запрос')
keyboard.add(last_attempt).add(description_button, help_button).add(search_button)
return keyboard
def resource_keyboard():
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text='МВидео', callback_data='res_mvideo')],
[InlineKeyboardButton(text='СберМаркет', callback_data='res_sbermarket')],
[InlineKeyboardButton(text='DNS', callback_data='res_dns')],
[InlineKeyboardButton(text='Корпорация Центр', callback_data='res_kcent')],
[InlineKeyboardButton(text='Вернуться в главное меню', callback_data='main_menu')],
])
return keyboard
@dp.message_handler(commands='start')
async def start_command(message: types.Message):
await message.answer(f'<b>Добро пожаловать!</b>\nЭтот бот позволит найти интересующее Вас товары на СберМаркете со скидкой!',
reply_markup=main_menu_keyboard())
await message.delete()
@dp.message_handler(Text(equals='Выбрать ресурс для поиска'))
async def resource_command(message: types.Message):
await message.answer(text='Вы перешли в меню выбора сайта!', reply_markup=ReplyKeyboardRemove())
await message.answer(text='Пожалуйста выберете сайт для поиска!', reply_markup=resource_keyboard())
await message.delete()
@dp.message_handler(Text(equals='Ввести поисковый запрос'))
async def get_discount_search(message: types.Message):
await Form.search.set()
await message.reply("Вводите поисковый запрос:")
@dp.callback_query_handler(lambda callback: callback.startswith('res'), state=Form.search)
async def get_resource(callback: types.CallbackQuery):
if callback.data.endswith('mvideo'):
async with state.proxy() as data:
data['search'] = callback.data ### ФИКСИТЬ!!!!
@dp.callback_query_handler()
async def main_menu(callback: types.CallbackQuery):
await callback.message.answer('Возврат в главное меню!', reply_markup=main_menu_keyboard())
@dp.message_handler(state=Form.search)
async def get_discount_search(message: types.Message, state: FSMContext):
async with state.proxy() as data:
data['search'] = message.text
await message.answer('Идет поиск. Примерное время ожидания: 30 секунд\nОжидайте...')
get_data(message.text, message.from_user.id)
with open(f'data/sbermarket-{message.from_user["id"]}.json', encoding='utf-8') as file:
data = json.load(file)
for item in data[:6]:
card = f'{hlink(item.get("item_name"), item.get("url"))}\n' \
f'{hbold("Старая цена")} {item.get("old_price")}\n' \
f'👩🏿🎓👩🏿🎓{hbold("Новая цена")} -{item.get("discount")}%: {item.get("item_price")}👩🏿🎓👩🏿🎓\n'
await message.answer(card)
async with state.proxy() as data:
for i in data:
print(data)
await state.finish()
def main():
executor.start_polling(dp, skip_updates=True, on_startup=on_startup)
if __name__ == '__main__':
main()
|
Baradys/scrappers
|
scrappers/sbermarket/sbermarket_bot.py
|
sbermarket_bot.py
|
py
| 4,624
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5414745490
|
#!flask/bin/python
"""Alternative version of the ToDo RESTful server implemented using the
Flask-RESTful extension."""
from flask import Flask, jsonify, abort, make_response, make_response, request, current_app
from flask.ext.restful import Api, Resource, reqparse, fields, marshal
from flask.ext.httpauth import HTTPBasicAuth
from datetime import timedelta
from functools import update_wrapper
from py2neo import Graph
graph = Graph("http://PyBase:sZzmKcoKKjG1pnUhjitl@pybase.sb04.stations.graphenedb.com:24789/db/data/")
app = Flask(__name__, static_url_path="")
api = Api(app)
auth = HTTPBasicAuth()
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/')
def hello_world():
return 'Hello crule World!'
@auth.get_password
def get_password(username):
if username == 'miguel':
return 'python'
return None
@auth.error_handler
def unauthorized():
# return 403 instead of 401 to prevent browsers from displaying the default
# auth dialog
return make_response(jsonify({'message': 'Unauthorized access'}), 403)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
task_fields = {
'title': fields.String,
'description': fields.String,
'done': fields.Boolean,
'uri': fields.Url('task')
}
person_fields = {
'userName': fields.String,
'playerID': fields.String,
'firstName': fields.String,
'lastName': fields.String,
'city':fields.String,
'email': fields.String,
'bankRef': fields.String,
'gender':fields.String,
'role': fields.String,
'active': fields.String,
'img': fields.String
}
class TaskListAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('title', type=str, required=True,
help='No task title provided',
location='json')
self.reqparse.add_argument('description', type=str, default="",
location='json')
super(TaskListAPI, self).__init__()
def get(self):
return {'tasks': [marshal(task, task_fields) for task in tasks]}
def post(self):
args = self.reqparse.parse_args()
task = {
'id': tasks[-1]['id'] + 1,
'title': args['title'],
'description': args['description'],
'done': False
}
tasks.append(task)
return {'task': marshal(task, task_fields)}, 201
class PeopleListAPI(Resource):
# decorators = [auth.login_required]
people = []
def __init__(self):
super(PeopleListAPI, self).__init__()
@crossdomain(origin='*')
def get(self):
if self.people == []:
qry = "MATCH (n :PERSON) RETURN n"
ans = graph.cypher.execute(qry)
for x in ans:
person = {
'userName': x[0]['userName'],
'playerID': x[0]['playerID'],
'firstName': x[0]['firstName'],
'lastName': x[0]['lastName'],
'city': x[0]['city'],
'email': x[0]['email'],
'bankRef': x[0]['bankRef'],
'gender':x[0]['gender'],
'role': x[0]['role'],
'active': x[0]['active'],
'img': x[0]['img']
}
self.people.append(person)
return jsonify({'people': self.people})
class TaskAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('title', type=str, location='json')
self.reqparse.add_argument('description', type=str, location='json')
self.reqparse.add_argument('done', type=bool, location='json')
super(TaskAPI, self).__init__()
def get(self, id):
task = [task for task in tasks if task['id'] == id]
if len(task) == 0:
abort(404)
return {'task': marshal(task[0], task_fields)}
def put(self, id):
task = [task for task in tasks if task['id'] == id]
if len(task) == 0:
abort(404)
task = task[0]
args = self.reqparse.parse_args()
for k, v in args.items():
if v is not None:
task[k] = v
return {'task': marshal(task, task_fields)}
def delete(self, id):
task = [task for task in tasks if task['id'] == id]
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return {'result': True}
api.add_resource(TaskListAPI, '/todo/api/v1.0/tasks', endpoint='tasks')
api.add_resource(PeopleListAPI, '/todo/api/v1.0/people', endpoint='people')
api.add_resource(TaskAPI, '/todo/api/v1.0/tasks/<int:id>', endpoint='task')
if __name__ == '__main__':
app.run(debug=True)
|
Spanarchie/BaseAPI
|
BaseAPI.py
|
BaseAPI.py
|
py
| 6,714
|
python
|
en
|
code
| 0
|
github-code
|
6
|
44685313114
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 20:18:24 2019
@author: YAO
"""
import pandas as pd
r = pd.read_csv("data_test.csv")
r1=pd.DataFrame(r)
#delete other columns
r2=r1.drop(['trajectory_id', 'time_entry', 'time_exit','vmax','vmin','vmean','x_entry','y_entry','x_exit','y_exit'], axis=1)
#delete duplicate
r3=r2.drop_duplicates(subset='hash', keep='last', inplace=False)
#add one conlume
r3['city_center']='1'
#rename
r3 = r3.rename(columns={'hash': 'trajectory_id'})
#define r_0 and r_1: all 0 and all 1
#drop 'hash'
r_1=r_1.drop(['hash'], axis=1)
r_0=r_0.drop(['hash'], axis=1)
#rename
r_1.columns=['id','target']
#write to csv
r_0.to_csv("submission_0.csv",index=False,sep=',')
r_1.to_csv("submission_1.csv",index=False,sep=',')
|
zy-yao/EY-NextWave-Data-Science-Challenge-2019
|
Data_Preparation_1.py
|
Data_Preparation_1.py
|
py
| 778
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19501436322
|
"""
This file (test_youbit.py) contains unit tests for the encode.py and decode.py files.
"""
from pathlib import Path
import os
import time
from yt_dlp.utils import DownloadError
from tests.conftest import uploads
from youbit import Encoder, download_and_decode
from youbit.settings import Settings, Browser
from youbit.download import Downloader
from youbit.util import get_md5
@uploads
def test_youbit_round_trip(browser: Browser, tempdir: Path):
test_file = Path(os.path.dirname(__file__)) / "testdata" / "files" / "test_file.jpg"
encoder = Encoder(test_file, Settings(browser=browser))
url = encoder.encode_and_upload()
time.sleep(
10
) # YouTube needs time to process the video before we can download the correct resolution
timeout = 0
while timeout < 60:
try:
downloader = Downloader(url)
if downloader.best_vbr > 6000:
break
except DownloadError:
time.sleep(5)
timeout += 5
continue
if timeout >= 60:
assert False, "Timeout"
output_path = download_and_decode(url, tempdir)
original_md5 = get_md5(test_file)
output_md5 = get_md5(output_path)
assert original_md5 == output_md5
|
mevimo/youbit
|
tests/unit/test_youbit.py
|
test_youbit.py
|
py
| 1,245
|
python
|
en
|
code
| 651
|
github-code
|
6
|
74800916026
|
import openai
import uvicorn
from fastapi import FastAPI, Request, Form
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.vectorstores import DocArrayInMemorySearch
import os
import datetime
import random
#import IPython.display
from PIL import Image
import base64
import requests
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import openai
import tkinter as tk
from langchain.llms import OpenAI
from langchain.document_loaders import (
DataFrameLoader,
TextLoader,
PyPDFLoader
)
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
CharacterTextSplitter
)
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import (
DocArrayInMemorySearch,
Chroma
)
from langchain.chains import (
RetrievalQA,
ConversationalRetrievalChain
)
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
#from dotenv import load_dotenv, find_dotenv
#from dotenv import load_dotenv, find_dotenv
# Set your OpenAI API key here
def set_openai_api_key(api_key):
openai.api_key = api_key
# function to convert data and load it into panda format
# load data and preprocess it
def squad_json_to_dataframe(file_path, record_path=['data','paragraphs','qas','answers']):
"""
input_file_path: path to the squad json file.
record_path: path to deepest level in json file default value is
['data','paragraphs','qas','answers']
"""
file = json.loads(open(file_path).read())
# parsing different level's in the json file
js = pd.json_normalize(file, record_path)
m = pd.json_normalize(file, record_path[:-1])
r = pd.json_normalize(file,record_path[:-2])
# combining it into single dataframe
idx = np.repeat(r['context'].values, r.qas.str.len())
m['context'] = idx
data = m[['id','question','context','answers']].set_index('id').reset_index()
data['c_id'] = data['context'].factorize()[0]
return data
def preprocess(data):
data['answers'] = data['answers'].apply(lambda x: x[0]['text'] if x else None)
# create a new data structure combine questions and answers
# add $ at then end so its going to be easier to chunking later
data['qa'] = data['question'] +data['answers']+'$'
return data
def data_loader(data):
# load the dataframe into loader
# context
loader = DataFrameLoader(data, page_content_column="qa")
doc = loader.load()
doc = doc[:6000]
return doc
def create_text_splits(doc):
# splitting text into the specific chunck sizes
# defining the overlap size for each chunck
#from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(
separator = "$",
chunk_size = 125,
chunk_overlap = 20,
length_function = len,
is_separator_regex = False,
)
splits = text_splitter.split_documents(doc)
return splits
def initialize_openai_embeddings():
embedding = OpenAIEmbeddings(request_timeout=60)
return embedding
def get_gpt_model():
# get the specific gpt model
current_date = datetime.datetime.now().date()
if current_date < datetime.date(2023, 9, 2):
llm_name = "gpt-3.5-turbo-0301"
else:
llm_name = "gpt-3.5-turbo"
print(llm_name)
return llm_name
def create_docarray_in_memory_search(data, embedding):
db = DocArrayInMemorySearch.from_documents(data, embedding)
return db
def create_vectordb(splits, embedding):
vectordb = Chroma.from_documents(
documents=splits,
embedding=embedding,
)
# EXAMPLES:
#question = "What are major topics for this class?"
#docs = vectordb.similarity_search(question,k=4)
#print(docs[0].metadata['answers'])
return vectordb
def initialize_llm_chatbot(llm_name, temperature=0):
# create chatbot
llm = ChatOpenAI(model_name=llm_name, temperature=temperature)
# define chatbot memory
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
return llm, memory
def create_prompt_template(input_variables):
# Build prompt
template = """
start by greeting to the Stanfor chatbot.\n
if user say hi/hello respond like hello and welcome to the stanford chatbot, how can i assist you today?\n
try to ask the user Name, and remember it and when you respons back say the user Name as well.\n
Also, try to memorize the converstation, and act like you are a human and responding.\n
You are like an QA agent that you suppose to answer the question that you know.\n
You will always gretting every one at the beging, also you can ask for their name so you will respond back with their name to be more polit.\n
Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible.\n
Also, if you answered any question say something like "Do you have any other question that I can help with?".\n
If the person says no, thank you orI don't have any furthur questions, or any similar sentence to it. just say something like: bye, I am always here to help you with any questions that you may have.\n
{context}\n
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(input_variables=input_variables,template=template,)
return QA_CHAIN_PROMPT
def initialize_qa_chain(llm, vectordb, QA_CHAIN_PROMPT):
# Run chain
#retriever = db.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .5})
qa_chain = RetrievalQA.from_chain_type(llm,
retriever=vectordb.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .5}),
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
qa = ConversationalRetrievalChain.from_llm(
llm,
retriever=vectordb.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .3}),
memory=memory,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# Examples
# test topics
#question = "Is probability a class topic?"
#result = qa_chain({"query": question})
#result["result"]
return qa_chain, qa
# Set your OpenAI API key here
set_openai_api_key("sk-xxxxxxxxxxxxx")
data_df = squad_json_to_dataframe("data/train-v1.1.json") # convert json to dataframe
data_df = preprocess(data_df)
data_loader = data_loader(data_df)
splits = create_text_splits(data_loader)
embedding = initialize_openai_embeddings()
llm_name = get_gpt_model()
db = create_docarray_in_memory_search(data_loader, embedding)
vectordb = create_vectordb(splits, embedding)
llm, memory = initialize_llm_chatbot(llm_name, temperature=0)
QA_CHAIN_PROMPT = create_prompt_template(["context", "question"])
qa_chain, qa = initialize_qa_chain(llm, vectordb, QA_CHAIN_PROMPT)
def get_bot_response(user_message):
result = qa({"question": user_message})
response = result["answer"]
#result = qa_chain({"query": user_message})
#response = result["result"]
return str(response)
|
carson-edmonds/AAI-520-Chatbot-Project
|
openai_fastapi/llm.py
|
llm.py
|
py
| 7,658
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17651609361
|
from builtins import print, input, int
import mariadb
import sqlite3
import psycopg2
print("Indique en que base de datos quiere realizar las gestiones:")
print("1. PostgreSQL\n2. MariaDB\n3. SQLite3")
lectura = input()
lectura = int(lectura)
while True:
if lectura == 1:
# Creamos la conexión
conn = psycopg2.connect(
host="localhost",
database="bdpython",
user="openpg",
password="openpgpwd"
)
cursor = conn.cursor()
# Borramos la tabla en caso de que exista
cursor.execute("DROP TABLE IF EXISTS ejemplo_python;")
# Creamos la tabla de ejemplo
cursor.execute("""
CREATE TABLE PEDROPUERTAS (
id serial PRIMARY KEY,
nombre varchar(50),
salario real,
fecha_alta date,
inscrito boolean
);
""")
# Hacemos el insert de algunas filas
cursor.execute("""
INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito)
VALUES
('Juan', 5000, '2022-01-01', True),
('María', 6000, '2022-02-01', True),
('Pedro', 7000, '2022-03-01', False),
('Ana', 8000, '2022-04-01', True),
('Lucía', 9000, '2022-05-01', False);
""")
print("Filas añadidas.\n")
# Guardamos los cambios
conn.commit()
cursor.execute("SELECT * FROM PEDROPUERTAS;")
# Guardamos en la variable rows todas las filas seleccionadas
rows = cursor.fetchall()
for i in rows:
print(i)
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 1;")
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 2;")
print("\nFilas borradas.\n")
# Guardamos los cambios
conn.commit()
# Hacemos el select de las filas y mostramos con el bucle for
cursor.execute("SELECT * FROM PEDROPUERTAS;")
rows = cursor.fetchall()
for i in rows:
print(i)
# Cerramos la conexión
conn.close()
break
elif lectura == 2:
# Creamos la conexión
conn = mariadb.connect(
host="localhost",
user="root",
password="usuario",
database="bdpython"
)
cursor = conn.cursor()
# Borramos la tabla si existe
cursor.execute("DROP TABLE IF EXISTS PEDROPUERTAS")
# Creamos la tabla
cursor.execute("""
CREATE TABLE PEDROPUERTAS(
id INT AUTO_INCREMENT PRIMARY KEY,
nombre TEXT,
salario FLOAT,
fecha_alta DATE,
inscrito BOOLEAN
);
""")
# Insertamos datos
cursor.execute("""
INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito)
VALUES
('Juan', 5000, '2022-01-01', True),
('María', 6000, '2022-02-01', True),
('Pedro', 7000, '2022-03-01', False),
('Ana', 8000, '2022-04-01', True),
('Lucía', 9000, '2022-05-01', False);
""")
print("Filas añadidas.\n")
# Guardamos los cambios
conn.commit()
# Hacemos el select para mostrar los datos
cursor.execute("SELECT * FROM PEDROPUERTAS")
# Mostramos las filas por pantalla
rows = cursor.fetchall()
for i in rows:
print(i)
# Borramos filas
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 1")
cursor.execute("DELETE FROM PEDROPUERTAS WHERE ID = 2")
print("\nFilas borradas.\n")
# Guardamos los cambios
conn.commit()
# Mostramos todas las filas restantes
cursor.execute("SELECT * FROM PEDROPUERTAS")
rows = cursor.fetchall()
for i in rows:
print(i)
# Cerramos la conexión
conn.close()
break
elif lectura == 3:
# Creamos la conexión en memoria
conn = sqlite3.connect('bdpython.db')
cursor = conn.cursor()
# Creamos la tabla
cursor.execute('''CREATE TABLE IF NOT EXISTS PEDROPUERTAS (id INTEGER PRIMARY KEY, nombre TEXT,
salario REAL, fecha_alta DATE, inscrito BOOLEAN)''')
# Insertamos algunos datos
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Juan', 5000.0, '2022-01-01', 1)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Ana', 6000.0, '2022-02-01', 0)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Pedro', 7000.0, '2022-03-01', 1)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Sofia', 8000.0, '2022-04-01', 0)")
cursor.execute(
"INSERT INTO PEDROPUERTAS (nombre, salario, fecha_alta, inscrito) "
"VALUES ('Lucas', 9000.0, '2022-05-01', 1)")
conn.commit()
print("Filas añadidas.\n")
# Hacemos el select de las filas
cursor.execute("SELECT * FROM PEDROPUERTAS")
# Las guardamos en rows y las mostramos con el bucle for
rows = cursor.fetchall()
for i in rows:
print(i)
# Borramos algunas filas
cursor.execute("DELETE FROM PEDROPUERTAS WHERE id = 1")
cursor.execute("DELETE FROM PEDROPUERTAS WHERE id = 2")
print("\nFilas borradas.\n")
# Hacemos el select y las mostramos con el for
cursor.execute("SELECT * FROM PEDROPUERTAS")
rows = cursor.fetchall()
for i in rows:
print(i)
conn.close()
break
else:
print("Seleccione una opción correcta:")
lectura = input()
lectura = int(lectura)
|
PedroPuertasR/2DAM
|
2 Trimestre/SGE/ConexionBD/main.py
|
main.py
|
py
| 6,179
|
python
|
es
|
code
| 0
|
github-code
|
6
|
21321917983
|
#! /usr/bin/env python3
import datetime
import AutoPrimer as ntp
import os
class Worker(object):
def __init__(self, name, command, options, channel, poster):
# date time stamp from scheduler
self.name = name
self.status = 'init' # init, running, done, expired
# which command this worker should execute
self.command = command
# what options to pipe to the command
self.options = options
self.channel = channel
self.poster = poster
# defines possible commands
self.comms = {
'find_primers' : self.find_primers,
'connection_status' : self.connection_status,
'hello' : self.hello
}
# all commands from comms should go into one of these two categories
self.message_commands = ['connection_status', 'hello']
self.action_commands = ['find_primers']
def start_message(self):
# start and stop message commands
if self.command in self.action_commands:
now = datetime.datetime.now()
mess = f"Starting {self.command} at " + now.strftime("%Y-%m-%d.%H-%M")
return mess
# start only message commands
if self.command in self.message_commands:
return self.comms[self.command]()
def run(self):
if self.command in self.action_commands:
self.comms[self.command]()
def done_message(self):
if self.command in self.action_commands:
now = datetime.datetime.now()
mess = f"Finshed {self.command} main at " + now.strftime("%Y-%m-%d.%H-%M")
common_dict[self.name]['status'] = 'closed'
self.status = 'expired'
return mess
#######
##### Commands
#######
def find_primers(self):
"""
Initiates AutoPrimer
"""
self.status = 'running'
if not self.options:
self.options = '/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer'
find_folder = os.path.isdir('/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer')
if find_folder:
ntp.submit_folder(self.options)
self.status = 'done'
else:
self.status = 'failed'
def connection_status(self):
"""
Checks the connection status to a given folder
"""
if self.options:
folder = self.options
find_folder = os.path.isdir(self.options)
else:
folder = '/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer'
find_folder = os.path.isdir('/Volumes/i_bio/Crispr_F0_Screens/0-Genes_for_design/Genes_for_autoprimer')
if find_folder:
response = f"Connection established with {folder}. Everything looks good, ready to run"
else:
response = f"I can't find the {folder} - it is possible that the connection is bad"
self.status = 'expired'
return response
def hello(self):
"""
Replies to the user with a friendly hello message.
"""
response = f"Hello <@{self.poster}>! Looking forward to designing some primers for you."
self.status = 'expired'
return response
|
jcooper036/autoprimer
|
AutoPrimer/autobot/Worker.py
|
Worker.py
|
py
| 3,361
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72498342269
|
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import WordNetError
from numpy import dot
from numpy.linalg import norm
import numpy as np
import pdb
class BaseModel:
def __init__(self, subject, predicate, _object):
#subjectFamily.getBaseRanking()[0], predicateFamily.getBaseRanking()[0], objectFamily.getBaseRanking()[0]
self.natural_model = (subject.word,predicate.word,_object.word)
self.model = (subject.getBaseRanking()[0], predicate.getBaseRanking()[0],_object.getBaseRanking()[0])
self.subject = wn.synset(self.model[0])
self.predicate = wn.synset(self.model[1])
self.object = wn.synset(self.model[2])
self.subjSim = 1.0
self.objSim = 1.0
self.predSim = 1.0
def synCompare(self,syn1,syn2):
return syn1[-4:-3] == syn2[-4:-3]
def rank(self,relation, embedder = None,w2vModel = None):
subject = wn.synset(relation[0])
predicate = wn.synset(relation[1])
_object = wn.synset(relation[2])
subjSimilarity, objSimilarity,predSimilarity = 1.,1.,1.
zero_v = np.zeros(shape=(300,))
#so we will adjust this now to perform cos_sim. For this, we need the dictionary...
if self.synCompare(self.model[0],relation[0]):
if embedder is not None and w2vModel is not None:
#perform cos_sim between self.natural_model <-- nlp and subject <-- wn
src=embedder[str(subject)[8:-2]] if str(subject)[8:-2] in embedder else zero_v
tgt=w2vModel[self.natural_model[0]] if self.natural_model[0] in w2vModel else zero_v
try:
subjSimilarity = (1.0-self.cos_sim(src,tgt))/2.
subjSimilarity = subjSimilarity[0]
except RuntimeWarning:
pass
else:
subjSimilarity = self.subject.lch_similarity(subject)
if self.synCompare(self.model[2],relation[2]):
if embedder is not None and w2vModel is not None:
#perform cos_sim between self.natural_model <-- nlp and subject <-- wn
src=embedder[str(_object)[8:-2]] if str(_object)[8:-2] in embedder else zero_v
tgt=w2vModel[self.natural_model[2]] if self.natural_model[2] in w2vModel else zero_v
try:
objSimilarity = (1.0-self.cos_sim(src,tgt))/2.
objSimilarity=objSimilarity[0]
except RuntimeWarning:
pass
else:
objSimilarity = self.object.lch_similarity(_object)
if self.synCompare(self.model[1],relation[1]):
if embedder is not None and w2vModel is not None:
#perform cos_sim between self.natural_model <-- nlp and subject <-- wn
src=embedder[str(predicate)[8:-2]] if str(predicate)[8:-2] in embedder else zero_v
tgt=w2vModel[self.natural_model[1]] if self.natural_model[1] in w2vModel else zero_v
try:
predSimilarity = (1.0-self.cos_sim(src,tgt))/2.
predSimilarity=predSimilarity[0]
except RuntimeWarning:
pass
else:
predSimilarity = self.predicate.lch_similarity(predicate)
if not predSimilarity:
predSimilarity=1.0
self.subjSim = subjSimilarity
self.objSim = objSimilarity
self.predSim = predSimilarity
self.netSim = (self.subjSim+self.predSim+self.objSim)/3.0
return (self.subjSim,self.objSim,self.predSim,self.netSim)
def getModel(self):
return self.model
def cos_sim(self,a,b):
return dot(a, b)/(norm(a)*norm(b))
|
asuprem/imag-s
|
utils/baseModel.py
|
baseModel.py
|
py
| 3,739
|
python
|
en
|
code
| 1
|
github-code
|
6
|
21521911350
|
import os
def read_file(name_file):
cook_book = {}
ingredient = []
file_path = os.getcwd()
path_to_file = os.path.join(file_path, name_file)
with open(path_to_file, 'rt', encoding='utf-8') as recipes:
for line in recipes:
meals = line.strip()
count = int(recipes.readline().strip())
for line in range(count):
record = recipes.readline().strip().split(' | ')
ingredient.append({'ingredient_name': record[0], 'quantity': record[1], 'measure': record[2]})
cook_book[meals] = ingredient
recipes.readline().strip()
ingredient = []
return cook_book
def get_list_by_dishes(dishes, cook_book):
shop_list = []
for dish in dishes:
if dish not in cook_book:
print(f'Рецепта {dish} нет в кулинарной книге')
return -1
for i, item in enumerate(cook_book[dish]):
if len(shop_list) == 0:
shop_list.append(item)
elif item not in shop_list:
shop_list.append(item)
else:
sum_products = int(shop_list[shop_list.index(item)]['quantity']) + int(item['quantity'])
shop_list[shop_list.index(item)]['quantity'] = str(sum_products)
return shop_list
def get_shop_list_by_dishes(dishes, person_count, cook_book):
shop_list_dishes = ""
shop_list = get_list_by_dishes(dishes, cook_book)
shop_list_dishes += "{\n"
for ingredient in shop_list:
volume_for_persons = int(ingredient['quantity']) * person_count
shop_list_dishes += f" '{ingredient['ingredient_name']}': {{'mesure': '{ingredient['measure']}', 'quantity': {volume_for_persons}}} \n"
shop_list_dishes += "}"
return shop_list_dishes
def order_files(combine_text_str, file_name = 'menu.txt', path = os.getcwd()):
'''Записываем результирующую строку в файл'''
with open(file_name, 'w') as text:
return text.write(combine_text_str)
shop_list = get_shop_list_by_dishes(['Омлет', 'Фахитос'], 2, read_file("recipes.txt"))
# shop_list = get_shop_list_by_dishes(['Омлет', 'Утка по-пекински'], 4, read_file("recipes.txt"))
print(shop_list)
order_files(shop_list)
|
SergeyDolgushin/homework_2_1
|
cook_book.py
|
cook_book.py
|
py
| 2,424
|
python
|
en
|
code
| 0
|
github-code
|
6
|
28634572744
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
from torch.autograd import Variable
pi = 0.01
class Recommend(nn.Module):
"""A model to build Recommendation system
"""
def __init__(self, past_observations, n_factors, output_dim):
super().__init__()
self.past_observations = past_observations
self.n_factors = n_factors
self.output_dim = output_dim
self.embedding = torch.nn.Embedding(self.output_dim, self.n_factors)
self.n1 = nn.Linear(self.n_factors * self.past_observations, 100)
self.n2 = nn.Linear(100, 50)
self.output = nn.Linear(50, self.output_dim)
init.constant(self.output.bias, -math.log((1-pi)/pi))
def forward(self, x):
""" We will have one Embedding matrix.
"""
k = []
for i in x:
val = self.embedding(i)
k.append(val.view(1, -1))
x = torch.cat(k)
x = self.n1(x)
x = F.relu(x)
x = self.n2(x)
x = F.relu(x)
x = self.output(x)
return x
class DataLoader():
def __init__(self, inputs, output, embed):
self.inputs = inputs
self.output = output
self.embed = embed
def __getitem__(self, idx):
o_in = torch.from_numpy(self.inputs[idx, :])
o_out = torch.from_numpy(self.output[idx, :])
return o_in, o_out
def __len__(self):
return self.inputs.shape[0]
class FocalLoss(nn.Module):
def __init__(self,
classes,
focusing_param=2.0,
balance_param=0.25,
use_gpu=False):
super().__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
self.classes = classes
self.use_gpu = use_gpu
def forward(self, x, y):
batch_size, next_best = y.size()[0], y.size()[1]
t = torch.FloatTensor(batch_size, self.classes)
t.zero_()
t.scatter_(1, y.data.cpu(), 1)
t = Variable(t)
sigmoid_p = F.sigmoid(x)
zeros = Variable(torch.zeros(sigmoid_p.size()))
if self.use_gpu:
zeros = zeros.cuda()
t = t.cuda()
pos_p_sub = ((t >= sigmoid_p).float() * (t-sigmoid_p)) + ((t < sigmoid_p).float() * zeros)
neg_p_sub = ((t >= zeros).float() * zeros) + ((t <= zeros).float() * sigmoid_p)
ce = (-1) * self.balance_param * (pos_p_sub ** self.focusing_param) * torch.log(torch.clamp(sigmoid_p, 1e-4, 1.0)) -(1-self.balance_param) * (neg_p_sub ** self.focusing_param) * torch.log(torch.clamp(1.0-sigmoid_p, 1e-4, 1.0))
pos_samples = float(batch_size * next_best)
return ce.sum()/pos_samples
|
prakashjayy/av_mckinesy_recommendation_challenge
|
func.py
|
func.py
|
py
| 2,764
|
python
|
en
|
code
| 6
|
github-code
|
6
|
10418282793
|
from __future__ import annotations
import platform
import dolphin_memory_engine
import pid
from randovania.game_connection.executor.memory_operation import (
MemoryOperation,
MemoryOperationException,
MemoryOperationExecutor,
)
MEM1_START = 0x80000000
MEM1_END = 0x81800000
def _validate_range(address: int, size: int):
if address < MEM1_START or address + size > MEM1_END:
raise MemoryOperationException(
f"Range {address:x} -> {address + size:x} is outside of the GameCube memory range."
)
class DolphinExecutor(MemoryOperationExecutor):
def __init__(self):
super().__init__()
self.dolphin = dolphin_memory_engine
self._pid = pid.PidFile("randovania-dolphin-backend")
@property
def lock_identifier(self) -> str | None:
return "randovania-dolphin-backend"
async def connect(self) -> str | None:
if platform.system() == "Darwin":
return "macOS is not supported"
if not self.dolphin.is_hooked():
self.dolphin.hook()
if not self.dolphin.is_hooked():
return "Unable to connect to Dolphin"
try:
self._pid.create()
except pid.PidFileError:
return "Another Randovania is connected to Dolphin already"
return None
def disconnect(self):
self._pid.close()
self.dolphin.un_hook()
def _test_still_hooked(self):
try:
if len(self.dolphin.read_bytes(0x0, 4)) != 4:
raise RuntimeError("Dolphin hook didn't read the correct byte count")
except RuntimeError as e:
self.logger.warning(f"Test read for Dolphin hook didn't work: {e}")
self.dolphin.un_hook()
def is_connected(self) -> bool:
if self.dolphin.is_hooked():
self._test_still_hooked()
return self.dolphin.is_hooked()
# Game Backend Stuff
def _memory_operation(self, op: MemoryOperation, pointers: dict[int, int | None]) -> bytes | None:
op.validate_byte_sizes()
address = op.address
if op.offset is not None:
if address not in pointers:
raise MemoryOperationException(f"Invalid op: {address:x} is not in pointers")
new_address = pointers[address]
if new_address is None:
return None
address = new_address + op.offset
_validate_range(address, op.byte_count)
if not self.dolphin.is_hooked():
raise MemoryOperationException("Lost connection do Dolphin")
try:
result = None
if op.read_byte_count is not None:
result = self.dolphin.read_bytes(address, op.read_byte_count)
if op.write_bytes is not None:
self.dolphin.write_bytes(address, op.write_bytes)
self.logger.debug(f"Wrote {op.write_bytes.hex()} to {address:x}")
except RuntimeError as e:
raise MemoryOperationException(f"Lost connection do Dolphin: {e}")
return result
async def perform_memory_operations(self, ops: list[MemoryOperation]) -> dict[MemoryOperation, bytes]:
pointers_to_read = set()
for op in ops:
if op.offset is not None:
pointers_to_read.add(op.address)
pointers = {}
for pointer in pointers_to_read:
if not self.dolphin.is_hooked():
raise MemoryOperationException("Lost connection do Dolphin")
try:
pointers[pointer] = self.dolphin.follow_pointers(pointer, [0])
except RuntimeError:
pointers[pointer] = None
self.logger.debug(f"Failed to read a valid pointer from {pointer:x}")
self._test_still_hooked()
if not self.dolphin.is_hooked():
raise MemoryOperationException("Lost connection do Dolphin")
result = {}
for op in ops:
op_result = self._memory_operation(op, pointers)
if op_result is not None:
result[op] = op_result
return result
|
randovania/randovania
|
randovania/game_connection/executor/dolphin_executor.py
|
dolphin_executor.py
|
py
| 4,135
|
python
|
en
|
code
| 165
|
github-code
|
6
|
16166651344
|
import os
USER_HOME = os.path.expanduser('~')
PROJECT_NAME = 'modbus'
PROJECT_HOME = os.path.join(USER_HOME, 'projects', PROJECT_NAME)
DEVICE_INFO_PATH = os.path.join(PROJECT_HOME, 'device_info')
DRIVER_PATH = 'drivers' # dev_info/drivers 등 폴더를 이용해도 됨
DRIVER_FULL_PATH = os.path.join(PROJECT_HOME, DRIVER_PATH)
# 최대 UNIT 갯수
MAX_UNIT_CNT = 10
|
freemancho1/modbus
|
slave/slave_config.py
|
slave_config.py
|
py
| 449
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5479394327
|
import numpy as np, base64
from .dict_array import GDict
from .array_ops import encode_np, decode_np
from .converter import as_dtype
from .type_utils import is_np_arr, get_dtype, is_dict, is_not_null, is_null, is_seq_of
from maniskill2_learn.utils.meta import Config, merge_a_to_b
def float_to_int(data, vrange=[0.0, 1.0], res=None, dtype="uint8"):
data_dtype = get_dtype(data)
if "int" in data_dtype:
return as_dtype(data, dtype) if data_dtype != dtype else data
assert data_dtype.startswith("float"), f"{type(data), data}"
min_v = np.iinfo(getattr(np, dtype)).min
max_v = np.iinfo(getattr(np, dtype)).max
if is_not_null(vrange):
assert vrange[0] < vrange[1] and is_null(res)
data = (np.clip(data, a_min=vrange[0], a_max=vrange[1]) - vrange[0]) / (vrange[1] - vrange[0]) # Normalize value to [0, 1]
data = data * max_v + (1 - data) * min_v
else:
assert is_not_null(res)
data = data / res
data = as_dtype(np.clip(data, a_min=min_v, a_max=max_v), dtype)
return data
def int_to_float(data, vrange=[0.0, 1.0], res=None, *dtype):
data_dtype = get_dtype(data)
if data_dtype == "object":
assert data.shape == (1,)
data = data[0]
elif data_dtype.startswith("float"):
return as_dtype(data, dtype) if data_dtype != dtype else data
data_dtype = get_dtype(data)
assert data_dtype.startswith("int") or data_dtype.startswith("uint"), f"{data_dtype}"
min_v = np.float32(np.iinfo(getattr(np, data_dtype)).min)
max_v = np.float32(np.iinfo(getattr(np, data_dtype)).max)
if is_not_null(vrange):
assert vrange[0] < vrange[1] and is_null(res)
data = (data - min_v) / (max_v - min_v) # [0, 1]
data = data * np.float32(vrange[1]) + (1 - data) * np.float32(vrange[0])
else:
assert is_not_null(res)
res = np.float32(res)
data = data * res
return as_dtype(data, "float32")
def f64_to_f32(item):
"""
Convert all float64 data to float32
"""
from .type_utils import get_dtype
from .converter import as_dtype
sign = get_dtype(item) in ["float64", "double"]
return as_dtype(item, "float32") if sign else item
def to_f32(item):
return as_dtype(item, "float32")
def to_f16(item):
return as_dtype(item, "float16")
"""
def compress_data(data, mode='pcd', key_map=None):
if mode.startswith('pcd'):
# For general point cloud inputs
assert is_dict(data) and 'inputs' in data, f"The data type is not a usual dataset! Keys: {data.keys()}"
inputs = data['inputs']
if get_dtype(inputs['xyz']) != 'int16':
assert get_dtype(inputs['xyz']).startswith('float')
inputs['xyz'] = float_to_int(inputs['xyz'], vrange=None, res=1E-3, dtype='int16') # 1mm
if 'rgb' in inputs and get_dtype(inputs['rgb']) != 'uint8':
assert get_dtype(inputs['rgb']).startswith('float')
inputs['rgb'] = float_to_int(inputs['rgb'])
if 'labels' in data:
labels_dtype = get_dtype(data['labels'])
# At most 65535 or 32767 objects in one scene
if labels_dtype.startswith('uint'):
data['labels'] = as_dtype(data['labels'], 'uint16')
if labels_dtype.startswith('int') :
data['labels'] = as_dtype(data['labels'], 'int16')
data['inputs'] = inputs
return data
def decompress_data(data, mode='pcd', process_map=None):
deault_process_map = {
'pcd': {
'inputs/xyz': {'int16': 'to_float'},
'inputs/rgb': {'uint8', 'to_float'},
},
}
if mode not in deault_process_map and is_null(process_map):
# Do not do any process
return data
if is_null(process_map):
process_map = deault_process_map[mode]
elif mode in deault_process_map:
deault_process_map.update(process_map)
if mode.startswith('pcd'):
# For general point cloud inputs
if is_np_arr(data):
return data
assert is_dict(data) and 'xyz' in data, f"The data type is not a usual dataset! {data}"
if get_dtype(data['xyz']) == 'int16':
data['xyz'] = int_to_float(data['xyz'], vrange=None, res=1e-3)
if 'rgb' in data and get_dtype(data['rgb']) == 'uint8':
data['rgb'] = int_to_float(data['rgb'])
for key in data:
if not get_dtype(data[key]).startswith('float'):
data[key] = data[key].astype(np.float32)
return data
def encode_data(data, mode='maniskill-rgbd'):
# Encode numpy objects to binary
if mode == 'maniskill-rgbd':
from ..image import imencode
rgbd = data['rgbd']
rgb = rgbd['rgb']
seg = rgbd['seg']
depth = rgbd['depth']
num_image = depth.shape[-1]
assert num_image * 3 == rgb.shape[-1]
rgb = np.split(rgb, num_image, axis=-1)
depth = np.split(depth, num_image, axis=-1)
seg = np.split(seg, num_image, axis=-1)
assert seg[0].shape[-1] <= 8
# Concat all boolean mask of segmentation and add the one
seg = [np.packbits(np.concatenate([_, np.ones_like(_[..., :1])], axis=-1), axis=-1, bitorder='little') for _ in seg]
seg = [imencode(_) for _ in seg]
rgb = [imencode(_) for _ in rgb]
depth = [imencode(_) for _ in depth]
data['rgbd'] = {'rgb': rgb, 'depth': depth, 'seg': seg}
return data
elif mode == 'pcd-variable':
assert is_dict(data) and 'inputs' in data, f"The data type is not a usual dataset! Keys: {data.keys()}"
inputs = data['inputs']
data['inputs']['xyz'] = encode_np(inputs['xyz'], use_pkl=True)
data['labels'] = encode_np(data['labels'], use_pkl=True)
if 'rgb' in inputs:
data['inputs']['rgb'] = encode_np(inputs['rgb'], use_pkl=True)
else:
raise NotImplementedError()
def decode_data(data, mode='maniskill-rgbd', **kwargs):
# From binary string like pkl object of png to numpy array
# def imdecode(sparse_array):
# if isinstance(sparse_array, (bytes, np.void)):
# sparse_array = np.frombuffer(base64.binascii.a2b_base64(sparse_array), dtype=np.uint8)
# return cv2.imdecode(sparse_array, -1)
if mode == 'maniskill-rgbd':
from ..image import imdecode
rgbd = data['rgbd']
rgb = rgbd['rgb']
seg = rgbd['seg']
depth = rgbd['depth']
seg = [imdecode(_[0])[..., None] for _ in seg]
num_segs = int(seg[0][0, 0, 0]).bit_length() - 1
seg = np.concatenate([np.unpackbits(_, axis=-1, count=num_segs, bitorder='little') for _ in seg], axis=-1).astype(np.bool_)
rgb = np.concatenate([imdecode(_[0]) for _ in rgb], axis=-1)
depth = np.concatenate([imdecode(_[0])[..., None] for _ in depth], axis=-1) # uint16
data['rgbd'] = {'rgb': rgb, 'depth': depth, 'seg': seg}
return data
elif mode == 'pcd-variable':
assert is_dict(data) and 'inputs' in data, f"The data type is not a usual dataset! Keys: {data.keys()}"
inputs = data['inputs']
inputs['xyz'] = decode_np(inputs['xyz'][0], dtype=np.int16).reshape(-1, 3)
if 'rgb' in inputs:
inputs['rgb'] = decode_np(inputs['rgb'][0], dtype=np.uint8).reshape(-1, 3)
data['labels'] = decode_np(data['labels'][0], dtype=np.uint16)
data['inputs'] = inputs
return data
else:
raise NotImplementedError()
"""
class DataCoder:
"""
To reduced the filesize when storing data for deep learning, we need to first compreess the data.
If the data cannot be represented as a numpy array, we can encode them into a binary string and store into hdf5.
"""
ENCODE_SETTINGS = {
"maniskill-rgbd": {
# 'obs/rgbd/xyz': ,
"obs/rgbd/rgb": "encode_rgb_png",
"obs/rgbd/depth": "encode_depth_png",
"obs/rgbd/seg": ("encode_seg_mask", 3),
# 'obs/rgbd/seg': 'encode_seg_mask',
},
"pcd-variable": {
"inputs/xyz": "encode_np",
"inputs/rgb": "encode_np",
"labels": "encode_np",
"vote_xyz": "encode_np",
"vote_center": "encode_np",
},
"pcd": {
"vote_center": "encode_np",
},
}
COMPRESS_SETTINGS = {
"maniskill-rgbd": {
"obs/rgbd/rgb": ("np_compress", [0.0, 1.0], None, "uint8"),
"obs/rgbd/depth": ("np_compress", [0.0, 1.0], None, "uint16"),
},
"pcd": {
"inputs/xyz": ("np_compress", None, 1e-3, "int16"),
"inputs/rgb": ("np_compress", [0.0, 1.0], None, "uint8"),
"xyz": ("np_compress", None, 1e-3, "int16"),
"rgb": ("np_compress", [0.0, 1.0], None, "uint8"),
"vote_xyz": ("np_compress", None, 1e-3, "int16"),
"vote_center": ("np_compress", None, 1e-3, "int16"),
},
}
def __init__(self, mode=None, encode_cfg=None, compress_cfg=None, var_len_item=False):
self.mode = mode
self.var_len_item = var_len_item
encode_cfg = merge_a_to_b(encode_cfg, self.ENCODE_SETTINGS.get(mode, None))
compress_cfg = merge_a_to_b(compress_cfg, self.COMPRESS_SETTINGS.get(mode, None))
pop_null = lambda _: {key: value for key, value in _.items() if is_not_null(value)}
self.encode_cfg = None if is_null(encode_cfg) else pop_null(encode_cfg)
self.compress_cfg = None if is_null(compress_cfg) else pop_null(compress_cfg)
# Encode functions [For single item]
def uint8_png(self, arr, encode):
from ..image import imencode, imdecode
if encode:
num_image = arr.shape[-1] // 3
assert num_image * 3 == arr.shape[-1]
arr = np.split(arr, num_image, axis=-1)
arr = [imencode(_) for _ in arr]
else:
arr = np.concatenate([imdecode(_[0]) for _ in arr], axis=-1)
return arr
def uint16_png(self, arr, encode):
from ..image import imencode, imdecode
if encode:
num_image = arr.shape[-1]
arr = np.split(arr, num_image, axis=-1)
arr = [imencode(_) for _ in arr]
else:
arr = np.concatenate([imdecode(_[0]) for _ in arr], axis=-1)
return arr
def seg_png(self, arr, encode, num_images=None):
from ..image import imencode, imdecode
if encode:
arr = np.split(arr, num_images, axis=-1)
assert arr[0].shape[-1] <= 8
arr = [np.packbits(np.concatenate([_, np.ones_like(_[..., :1])], axis=-1), axis=-1, bitorder="little") for _ in arr]
arr = [imencode(_) for _ in arr]
else:
arr = [imdecode(_[0])[..., None] for _ in arr]
num_segs = int(arr[0][0, 0, 0]).bit_length() - 1
arr = np.concatenate([np.unpackbits(_, axis=-1, count=num_segs, bitorder="little") for _ in arr], axis=-1).astype(np.bool_)
return arr
def encode_np(self, arr, encode, *args):
if encode:
return encode_np(arr, *args)
else:
return decode_np(arr, *args)
# Compress functions [For batched inputs]
def np_compress(self, arr, encode, *args):
if encode:
return float_to_int(arr, *args)
else:
return int_to_float(arr, *args)
@GDict.wrapper(class_method=True)
def _apply(self, data, cfg, encode=False):
if encode:
data = data.f64_to_f32()
if is_null(cfg):
return data
for key, item in cfg.items():
if isinstance(item, (list, tuple)):
args = item[1:]
item = item[0]
else:
args = []
if key in data:
# if key == 'inputs/rgb':
# print('Before', data[key])
# print(data.keys(), key, encode, args, item, data[key].dtype)
# exit(0)
# import time
# st = time.time()
data[key] = getattr(self, item)(data[key], encode, *args)
# print(time.time() - st, key)
# print(data.keys(), key, encode, args, item, data[key].dtype)
# if key == 'inputs/rgb':
# print('After', data[key])
# print(GDict(data).dtype, self.np_compress)
# exit(0)
return data
def encode(self, data):
return self._apply(data, self.encode_cfg, True)
def decode(self, data):
return self._apply(data, self.encode_cfg, False)
def compress(self, data):
return self._apply(data, self.compress_cfg, True)
def decompress(self, data):
return self._apply(data, self.compress_cfg, False)
|
haosulab/ManiSkill2-Learn
|
maniskill2_learn/utils/data/compression.py
|
compression.py
|
py
| 12,857
|
python
|
en
|
code
| 53
|
github-code
|
6
|
21836861999
|
import sys
sys.stdin = open("../inputdata/swea_5202.txt", "r")
for test in range(int(input())):
n = int(input())
trucks = [list(map(int, input().split())) for _ in range(n)]
limit, res = 24, 0
while limit > 0:
tmp = []
for truck in trucks:
if truck[1] <= limit:
tmp.append(truck)
if not tmp:
break
pick = max(tmp)
limit = pick[0]
trucks.pop(trucks.index(pick))
res += 1
print('#{} {}'.format(test+1, res))
|
liza0525/algorithm-study
|
SWEA/swea_5202.py
|
swea_5202.py
|
py
| 524
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30509139595
|
"""Add sessions
Revision ID: 821a722fb6c5
Revises: 371a1b269d3f
Create Date: 2017-05-04 14:38:19.372886
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '821a722fb6c5'
down_revision = '371a1b269d3f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('session',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('expires', sa.DateTime(), nullable=True),
sa.Column('awaiting_mfa', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('session')
# ### end Alembic commands ###
|
UltrosBot/Ultros-site
|
migrations/versions/821a722fb6c5_add_sessions.py
|
821a722fb6c5_add_sessions.py
|
py
| 1,008
|
python
|
en
|
code
| 2
|
github-code
|
6
|
40225040255
|
import tkinter as tk
from .Dialog import Dialog
from ..internals.Machine import MachineBuilder
from ..internals.MachineExceptions import *
class InitDialog(Dialog):
def __init__(self, master):
self._builder = MachineBuilder()
Dialog.__init__(self, master, "Initialize machine")
def create_widgets(self, master):
self.l1 = tk.Label(master, text="States: ")
self.l2 = tk.Label(master, text="Alphabet: ")
self.l3 = tk.Label(master, text="Initial state: ")
self.l4 = tk.Label(master, text="Initial data: ")
self.l1.grid(row=0, sticky=tk.E)
self.l2.grid(row=1, sticky=tk.E)
self.l3.grid(row=2, sticky=tk.E)
self.l4.grid(row=3, sticky=tk.E)
self.e1 = tk.Entry(master, width=40)
self.e2 = tk.Entry(master, width=40)
self.e3 = tk.Entry(master, width=40)
self.e4 = tk.Entry(master, width=40)
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
self.e3.grid(row=2, column=1)
self.e4.grid(row=3, column=1)
return self.e1 # initial focus
def apply(self):
return (self._machine, self._builder)
def validate(self):
try:
self._builder.slist = self.e1.get().split(', ')
self._builder.alphabet = self.e2.get()
self._builder.endsym = ' ' # default
self._builder.initial_state = self.e3.get()
self._builder.initial_data = self.e4.get()
self._machine = self._builder.machine # for validation purposes only
except StateException:
self.initial_focus = self.e1
self.l1.config(fg='red')
self.l2.config(fg='black')
self.l3.config(fg='red')
self.l4.config(fg='black')
return False
except AlphabetException:
self.initial_focus = self.e2
self.l1.config(fg='black')
self.l2.config(fg='red')
self.l3.config(fg='black')
self.l4.config(fg='red')
return False
except IncompleteMachineException:
if self._builder.slist == []:
self.initial_focus = self.e1
elif self._builder.alphabet == '':
self.initial_focus = self.e2
elif self._builder.initial_state == '':
self.initial_focus = self.e3
elif self._builder.initial_data == '':
self.initial_focus = self.e4
return False
return True
|
rodentrabies/TMCode
|
src/gui/InitDialog.py
|
InitDialog.py
|
py
| 2,522
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10205108247
|
from bson.objectid import ObjectId
from pyramid.httpexceptions import HTTPFound
from pyramid.security import remember, forget
from pyramid.url import route_url
from pyramid.view import view_config
from .forms import TaskForm, TaskUpdateForm
@view_config(route_name='home', renderer='templates/home.jinja2')
def task_list(request):
tasks = request.db['tasks'].find()
return {
'tasks': tasks,
'project': 'task_manager',
}
@view_config(route_name='tadd', renderer='templates/add.jinja2', permission='create')
def task_add(request):
form = TaskForm(request.POST, None)
if request.POST and form.validate():
entry = form.data
request.db['tasks'].save(entry)
return HTTPFound(route_url('home', request))
return {'form': form}
@view_config(route_name='tedit', renderer='templates/edit.jinja2', permission='edit')
def task_edit(request):
id_task = request.matchdict.get('id', None)
item = request.db['tasks'].find_one({'_id': ObjectId(id_task)})
form = TaskUpdateForm(request.POST,
id=id_task, name=item['name'],
active=item['active'])
if request.method == 'POST' and form.validate():
entry = form.data
entry['_id'] = ObjectId(entry.pop('id'))
request.db['tasks'].save(entry)
return HTTPFound(route_url('home', request))
return {'form': form}
@view_config(route_name='tdelete', permission='delete')
def task_delete(request):
id_task = request.matchdict.get('id', None)
if id_task:
request.db['tasks'].remove({'_id': ObjectId(id_task)})
return HTTPFound(route_url('home', request))
@view_config(route_name='auth', match_param='action=in', renderer='string', request_method='POST')
@view_config(route_name='auth', match_param='action=out', renderer='string')
def sign_in_out(request):
username = request.POST.get('username')
if username:
user = request.db['users'].find_one({'name': username})
if user and user['password'] == request.POST.get('password'):
headers = remember(request, user['name'])
else:
headers = forget(request)
else:
headers = forget(request)
return HTTPFound(location=request.route_url('home'), headers=headers)
|
albertosdneto/tutorial_pyramid_mongo
|
task_manager/views.py
|
views.py
|
py
| 2,297
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33008999633
|
'''
Script to process nbn coverage map csv files, transform and load into a MongoDB
Author: Rommel Poggenberg (29860571)
Date created: 19th April 2021 (FIT5147 TP2 2021)
'''
import csv
import pymongo
import pprint
import sys
import datetime
pp = pprint.PrettyPrinter(indent=4)
state_lookup={2:'New South Wales',3:'Victoria',4:'Queensland',5:'South Australia',6:'Western Australia',7:'Tasmania',8:'Northern Territory',9:'Australian Capital Territory'}
filter_tech={'Fibre to the Basement':'fttb', 'Fibre to the Curb':'fttc', 'Fibre to the Node':'fttn', 'Fibre to the Premises':'fttp', 'Fixed Wireless':'fixed_wireless', 'Hybrid Fibre Coaxial (HFC)':'hfc'}
filter_state={'Australian Capital Territory':'act', 'New South Wales':'nsw', 'Northern Territory':'nt', 'Queensland':'ql','South Australia':'sa', 'Tasmania':'tas', 'Victoria':'vic', 'Western Australia': 'wa'}
nbn_map_data={}
#Cut off date which nbn declared to be built 30th June 2020
nbn_build_deadline=datetime.datetime.strptime('30/06/2020 00:00:00', '%d/%m/%Y %H:%M:%S')
all_techs=['ALL_FixedWireless','ALL_FTTB','ALL_FTTC','ALL_FTTN','ALL_FTTP','ALL_HFC']
#Read CSV files
for tech in all_techs:
with open('data\\'+tech+'.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
#print(row)
data={}
data['Technology_Type']=row['Technology_Type']
data['Ready_for_Service_Date']=row['Ready_for_Service_Date']
try:
if datetime.datetime.strptime(row['Ready_for_Service_Date']+' 00:00:00', '%d/%m/%Y %H:%M:%S') <= nbn_build_deadline:
data['RFS_On_Schedule']=True
else:
data['RFS_On_Schedule']=False
except:
data['RFS_On_Schedule']=False
data['Area_ID']=row['Area_ID']
data['Service_Status']=row['Service_Status']
data['state']=state_lookup[int(row['Area_ID'][0])]
data['longitude']=row['longitude']
data['latitude']=row['latitude']
data['markerscale']=1
if data['state'] in nbn_map_data.keys():
if data['Technology_Type'] in nbn_map_data[data['state']].keys():
map_records=nbn_map_data[data['state']][data['Technology_Type']]
map_records.append(data)
nbn_map_data[data['state']][data['Technology_Type']]=map_records
else:
nbn_map_data[data['state']][data['Technology_Type']]=[data]
else:
nbn_map_data[data['state']]={}
nbn_map_data[data['state']][data['Technology_Type']]=[data]
#pp.pprint(nbn_map_data)
#sys.exit(0)
bar_chart_all={}
bar_chart_ontime={}
bar_chart_after={}
#Calculate areas within original build deadline
for state in nbn_map_data.keys():
for tech in nbn_map_data[state].keys():
for record in nbn_map_data[state][tech]:
try:
year=int(record['Ready_for_Service_Date'].split('/')[2])
except:
year=2024
year=int(year)
technology=record['Technology_Type']
#Schedule of all areas in all time
if year in bar_chart_all.keys():
if technology in bar_chart_all[year].keys():
bar_chart_all[year][technology]=bar_chart_all[year][technology]+1
else:
bar_chart_all[year]= {
"Fibre to the Basement": 0,
"Fibre to the Curb": 0,
"Fibre to the Node": 0,
"Fibre to the Premises": 0,
"Fixed Wireless": 0,
"Hybrid Fibre Coaxial (HFC)": 0,
"year": str(year)
}
bar_chart_all[year][technology]=1
#Find areas which were build on schedule
if record['RFS_On_Schedule']==True:
if year in bar_chart_ontime.keys():
if technology in bar_chart_ontime[year].keys():
bar_chart_ontime[year][technology]=bar_chart_ontime[year][technology]+1
else:
bar_chart_ontime[year]= {
"Fibre to the Basement": 0,
"Fibre to the Curb": 0,
"Fibre to the Node": 0,
"Fibre to the Premises": 0,
"Fixed Wireless": 0,
"Hybrid Fibre Coaxial (HFC)": 0,
"year": str(year)
}
bar_chart_ontime[year][technology]=1
#Find areas which will be built after the deadline
if record['RFS_On_Schedule']==False:
if year in bar_chart_after.keys():
if technology in bar_chart_after[year].keys():
bar_chart_after[year][technology]=bar_chart_after[year][technology]+1
else:
bar_chart_after[year]= {
"Fibre to the Basement": 0,
"Fibre to the Curb": 0,
"Fibre to the Node": 0,
"Fibre to the Premises": 0,
"Fixed Wireless": 0,
"Hybrid Fibre Coaxial (HFC)": 0,
"year": str(year)
}
bar_chart_after[year][technology]=1
#Get all schedules in a dictionary
raw_values={'all':bar_chart_all,'ontime':bar_chart_ontime,'after':bar_chart_after}
rollout_schedule={}
for key in raw_values:
for year in sorted(raw_values[key].keys()):
if key in rollout_schedule.keys():
values=rollout_schedule[key]
values.append(raw_values[key][year])
rollout_schedule[key]=values
else:
rollout_schedule[key]=[raw_values[key][year]]
#Write dictionaries to mongodb
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["nbn"]
col = db["map"]
col2 = db["chart"]
for state in nbn_map_data.keys():
for tech in nbn_map_data[state].keys():
print(state, tech)
col.insert_one({'technology_type':filter_tech[tech],'state':filter_state[state],'results':nbn_map_data[state][tech]})
for timeline in rollout_schedule.keys():
print(timeline)
col2.insert_one({'schedule':timeline,'results':rollout_schedule[timeline]})
|
rommjp/NBN_Rollout_Visualisation
|
write_nbn_data_to_mongodb.py
|
write_nbn_data_to_mongodb.py
|
py
| 5,360
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31974764531
|
# -*- coding: utf-8 -*-
import json
import scrapy
from club_activity_friends_details.items import ClubActivityFriendsDetailsItem
from lib.GetCurrentTime import get_current_date
from models.club import StructureStartUrl
class AutoHomeClubActivityFriendsDetailsSpider(scrapy.Spider):
name = 'auto_home_club_activity_friends_details'
club_id_list = StructureStartUrl().get_bbs_id()
club_index = 0
page_index = 1
base_url = "https://club.app.autohome.com.cn/club_v8.2.0/club/getactivityfriendlist-pm2-b%s-t2-c0-u66230826-p%s-s20.json"
start_urls = [base_url % (club_id_list[club_index], page_index)]
def parse(self, response):
item = ClubActivityFriendsDetailsItem()
content = json.loads(response.body.decode(), strict=False)
activity_friend_list = content["result"]["activityfriendlist"] # 活跃车友
club_master_list = content["result"]["clubmasterlist"] # 推荐车友
for club_master in club_master_list:
item["bbs_id"] = self.club_id_list[self.club_index]
item["user_id"] = club_master["userid"]
item["recommend"] = 0
item["time"] = get_current_date()
yield item
for activity_friend in activity_friend_list:
item["bbs_id"] = self.club_id_list[self.club_index]
item["user_id"] = activity_friend["userid"]
item["recommend"] = 1
item["time"] = get_current_date()
yield item
self.page_index += 1
if self.page_index <= content["result"]["pagecount"]:
print(self.page_index)
url = self.base_url % (self.club_id_list[self.club_index], self.page_index)
print(url)
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
else:
self.club_index += 1
if self.club_index < len(self.club_id_list):
self.page_index = 1
url = self.base_url % (self.club_id_list[self.club_index], self.page_index)
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
|
CY113/Cars
|
club_activity_friends_details/club_activity_friends_details/spiders/auto_home_club_activity_friends_details.py
|
auto_home_club_activity_friends_details.py
|
py
| 2,113
|
python
|
en
|
code
| 10
|
github-code
|
6
|
12989554153
|
from PIL import Image, ImageDraw, ImageFont
import calendar, datetime, holidays
def get_image_calendar(dates, year, month):
width, height = 500, 500
img = Image.new('RGB', (width, height), color='white')
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('arial.ttf', size=30)
dict_for_month = {"January": "Январь", "February": "Февраль", "March": "Март", "April": "Апрель", "May": "Май",
"June":
"Июнь", "July": "Июль", "August": "Август", "September": "Сентябрь", "October": "Октябрь",
"November": "Ноябрь",
"December": "Декабрь"}
title = dict_for_month[calendar.month_name[month]] + ' ' + str(year)
title_size = draw.textlength(title, font=font)
draw.text(((width - 100) // 2, 20), title, font=font, fill='black')
cal = calendar.monthcalendar(year, month)
now = datetime.datetime.now()
cell_width = (width - 40) // 7
cell_height = (height - 100) // len(cal)
# рисуем дни недели
days = ["Пн", "Вт", "Ср", "Чт", "Пт", "Сб", "Вс"]
for i, day in enumerate(days):
day_width, day_height = draw.textlength(day, font=font), 20
if day == "СБ" or day == "ВС":
draw.text((20 + i * cell_width + (cell_width - day_width) // 2, 60), day, font=font, fill="red")
else:
draw.text((20 + i * cell_width + (cell_width - day_width) // 2, 60), day, font=font, fill="black")
Belarus_holidays_list = holidays.BY(years=year)
bel_holidays = [(i.month, i.day) for i in Belarus_holidays_list]
hol_dict = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: []}
for i in hol_dict:
for j in bel_holidays:
if i == j[0]:
hol_dict[i].append(j[1])
for i, row in enumerate(cal):
for j, day in enumerate(row):
if day != 0 and day not in dates:
day_width, day_height = draw.textlength(str(day), font=font), 20
if now.year == year and now.month == month and day < now.day:
draw.text((20 + j * cell_width + (cell_width - day_width) // 2,
100 + i * cell_height + (cell_height - day_height) // 2), str(day), font=font,
fill="gray")
elif row[-1] == day or row[-2] == day or day in hol_dict[month]:
draw.text((20 + j * cell_width + (cell_width - day_width) // 2,
100 + i * cell_height + (cell_height - day_height) // 2), str(day), font=font,
fill="red")
else:
draw.text((20 + j * cell_width + (cell_width - day_width) // 2,
100 + i * cell_height + (cell_height - day_height) // 2), str(day), font=font,
fill="black")
# сохраняем изображение в файл
img.save('calendar.png')
|
michaelgershov/Calendar
|
calendar_image.py
|
calendar_image.py
|
py
| 3,088
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34832286900
|
import cv2
vid = cv2.VideoCapture("my_video.mp4")
while(1):
ret, frame = vid.read()
if ret:
frame = cv2.resize(frame, (0, 0), fx = 1.2, fy = 1.2)
cv2.imshow("video", frame)
else:
break
if cv2.waitKey(10000) == ord("q"):
break
|
jim2832/Image-Recognition
|
video2.py
|
video2.py
|
py
| 277
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19114657314
|
import pandas as pd
import torch
import torch.nn as nn
import math
import download
import pickle
import random
max_seq_len=34
pd.set_option('display.max_colwidth', None)
print("here")
# Importing flask module in the project is mandatory
# An object of Flask class is our WSGI application.
from flask import Flask, request, jsonify
import json
#creating model template
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=max_seq_len):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
if self.pe.size(0) < x.size(0):
self.pe = self.pe.repeat(x.size(0), 1, 1)
self.pe = self.pe[:x.size(0), :, :]
x = x + self.pe
return self.dropout(x)
class ImageCaptionModel(nn.Module):
def __init__(self, n_head, n_decoder_layer, vocab_size, embedding_size):
super(ImageCaptionModel, self).__init__()
self.pos_encoder = PositionalEncoding(embedding_size, 0.1)
self.TransformerDecoderLayer = nn.TransformerDecoderLayer(d_model=embedding_size, nhead=n_head)
self.TransformerDecoder = nn.TransformerDecoder(decoder_layer=self.TransformerDecoderLayer,
num_layers=n_decoder_layer)
self.embedding_size = embedding_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.last_linear_layer = nn.Linear(embedding_size, vocab_size)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embedding.weight.data.uniform_(-initrange, initrange)
self.last_linear_layer.bias.data.zero_()
self.last_linear_layer.weight.data.uniform_(-initrange, initrange)
def generate_Mask(self, size, decoder_inp):
decoder_input_mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)
decoder_input_mask = decoder_input_mask.float().masked_fill(decoder_input_mask == 0, float('-inf')).masked_fill(
decoder_input_mask == 1, float(0.0))
decoder_input_pad_mask = decoder_inp.float().masked_fill(decoder_inp == 0, float(0.0)).masked_fill(
decoder_inp > 0, float(1.0))
decoder_input_pad_mask_bool = decoder_inp == 0
return decoder_input_mask, decoder_input_pad_mask, decoder_input_pad_mask_bool
def forward(self, encoded_image, decoder_inp):
encoded_image = encoded_image.permute(1, 0, 2)
decoder_inp_embed = self.embedding(decoder_inp) * math.sqrt(self.embedding_size)
decoder_inp_embed = self.pos_encoder(decoder_inp_embed)
decoder_inp_embed = decoder_inp_embed.permute(1, 0, 2)
decoder_input_mask, decoder_input_pad_mask, decoder_input_pad_mask_bool = self.generate_Mask(
decoder_inp.size(1), decoder_inp)
decoder_input_mask = decoder_input_mask
decoder_input_pad_mask = decoder_input_pad_mask
decoder_input_pad_mask_bool = decoder_input_pad_mask_bool
decoder_output = self.TransformerDecoder(tgt=decoder_inp_embed, memory=encoded_image,
tgt_mask=decoder_input_mask,
tgt_key_padding_mask=decoder_input_pad_mask_bool)
final_output = self.last_linear_layer(decoder_output)
return final_output, decoder_input_pad_mask
# Flask constructor takes the name of
# current module (__name__) as argument.
app = Flask(__name__)
# @app.route('/init')
# def function_to_run_only_once():
# loading pickle data
dbfile = open('index_to_word', 'rb')
index_to_word = pickle.load(dbfile)
print('loading indextoword')
dbfile.close()
dbfile = open('word_to_index', 'rb')
word_to_index = pickle.load(dbfile)
print('loading wordtoindex')
dbfile.close()
# download model from google driver
download.download_from_drive()
print('downloading model')
## Generate Captions !!!
model = ImageCaptionModel(16, 4, 8812, 512)
model.load_state_dict(torch.load("model_state.pth", map_location=torch.device('cpu')) )
model.eval()
# model = torch.load('./BestModel1', map_location=torch.device('cpu'))
print('loading model')
start_token = word_to_index['<start>']
end_token = word_to_index['<end>']
pad_token = word_to_index['<pad>']
print(start_token, end_token, pad_token)
K = 1
# The route() function of the Flask class is a decorator,
# which tells the application which URL should call
# the associated function.
@app.route('/')
# ‘/’ URL is bound with hello_world() function.
def hello_world():
return jsonify({'status': 'Server 2 is UP ...'})
@app.route('/foo', methods=['POST'])
def foo():
data = request.json
image_data_torch = torch.tensor(data['image_embedding'])
print(image_data_torch.shape)
img_embed = image_data_torch.permute(0, 2, 3, 1)
img_embed = img_embed.view(img_embed.size(0), -1, img_embed.size(3))
input_seq = [pad_token] * max_seq_len
input_seq[0] = start_token
input_seq = torch.tensor(input_seq).unsqueeze(0)
predicted_sentence = []
# return {'tt':"ok"}
with torch.no_grad():
for eval_iter in range(0, max_seq_len):
output, padding_mask = model.forward(img_embed, input_seq)
output = output[eval_iter, 0, :]
values = torch.topk(output, K).values.tolist()
indices = torch.topk(output, K).indices.tolist()
next_word_index = random.choices(indices, values, k=1)[0]
next_word = index_to_word[next_word_index]
input_seq[:, eval_iter + 1] = next_word_index
if next_word == '<end>':
break
predicted_sentence.append(next_word)
print("\n")
print("Predicted caption : ")
predicted_sentence[0]=predicted_sentence[0][0].upper()+predicted_sentence[0][1:]
sentence = " ".join(predicted_sentence + ['.'])
print(sentence)
return {'prediction': f'{sentence}'}
# main driver function
if __name__ == '__main__':
# run() method of Flask class runs the application
# on the local development server.
app.run(port=5001,use_reloader=False)
|
razerspeed/Image-Caption-Generation
|
server2.py
|
server2.py
|
py
| 6,750
|
python
|
en
|
code
| 1
|
github-code
|
6
|
7126327995
|
from django.urls import path
from . import views
# какие url какой view обрабатывается
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
path('post/t/', views.post_t, name='post_t'),
]
|
x2wing/django_l2
|
blog/urls.py
|
urls.py
|
py
| 424
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75131926908
|
"""
在python中,只有函数才是Callable(可Call的对象才是Callable)。但是tuple是一个数据类型,当然是不能Call(翻译成:使唤,hhh可能会比较容易理解)
"""
import cv2 as cv
import numpy as np
def negation_pixels(image):
print(image.shape)
height = image.shape[0]
width = image.shape[1]
channels = image.shape[2]
print("width: %s height: %s channels: %s " % (width, height, channels))
# 遍历每一个像素点的每一个通道,使他们的像素值取反
for row in range(height):
for col in range(width):
for c in range(channels):
pv = image[row, col, c] # pv是image对象在第row行,第col列,第c通道的像素值
image[row, col, c] = 255 - pv # 像素值取反
cv.imshow("negation pixels_1", image)
scr = cv.imread(r"beautyful_view.jpg")
cv.imshow("before", scr)
negation_pixels(scr)
cv.bitwise_not(scr)
cv.imshow("negation pixels_2", scr)
cv.waitKey(0)
cv.destroyAllWindows()
|
hahahei957/NewProject_Opencv2
|
04_像素取反.py
|
04_像素取反.py
|
py
| 1,037
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6606098136
|
from math import floor
def solution(numbers):
answer = []
for number in numbers:
for idx, elm in enumerate(bin(number)[::-1]):
if elm == '0' or elm == 'b':
target = floor(2**(idx-1)) ^ number | 2**idx
answer.append(target)
break
return answer
|
JeongGod/Algo-study
|
hyeonjun/16week/p77885.py
|
p77885.py
|
py
| 326
|
python
|
en
|
code
| 7
|
github-code
|
6
|
26040960706
|
from __future__ import annotations
from textwrap import dedent
from typing import Callable
import pytest
from pants.backend.python.goals.publish import (
PublishPythonPackageFieldSet,
PublishPythonPackageRequest,
rules,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact
from pants.core.goals.publish import PublishPackages, PublishProcesses
from pants.core.util_rules.config_files import rules as config_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.process import Process
from pants.testutil.process_util import process_assertion
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
preserve_tmpdirs=True,
rules=[
*config_files_rules(),
*pex_from_targets.rules(),
*rules(),
QueryRule(PublishProcesses, [PublishPythonPackageRequest]),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
return set_options(rule_runner)
def set_options(rule_runner: PythonRuleRunner, options: list | None = None) -> PythonRuleRunner:
rule_runner.set_options(
options or [],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
env={
"TWINE_USERNAME": "whoami",
"TWINE_USERNAME_PYPI": "whoareyou",
"TWINE_PASSWORD_PYPI": "secret",
},
)
return rule_runner
@pytest.fixture
def packages():
return (
BuiltPackage(
EMPTY_DIGEST,
(
BuiltPackageArtifact("my-package-0.1.0.tar.gz"),
BuiltPackageArtifact("my_package-0.1.0-py3-none-any.whl"),
),
),
)
def project_files(
skip_twine: bool = False, repositories: list[str] = ["@pypi", "@private"]
) -> dict[str, str]:
return {
"src/BUILD": dedent(
f"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories={repositories!r},
skip_twine={skip_twine},
)
"""
),
"src/hello.py": """print("hello")""",
".pypirc": "",
}
def request_publish_processes(rule_runner: PythonRuleRunner, packages) -> PublishProcesses:
tgt = rule_runner.get_target(Address("src", target_name="dist"))
fs = PublishPythonPackageFieldSet.create(tgt)
return rule_runner.request(PublishProcesses, [fs._request(packages)])
def assert_package(
package: PublishPackages,
expect_names: tuple[str, ...],
expect_description: str,
expect_process: Callable[[Process], None] | None,
) -> None:
assert package.names == expect_names
assert package.description == expect_description
if expect_process:
assert package.process
expect_process(package.process.process)
else:
assert package.process is None
def test_twine_upload(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=False))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 2
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@pypi",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=pypi",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_USERNAME": "whoareyou", "TWINE_PASSWORD": "secret"}),
),
)
assert_package(
result[1],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@private",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=private",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_USERNAME": "whoami"}),
),
)
def test_skip_twine(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=True))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="(by `skip_twine` on src:dist)",
expect_process=None,
)
# Skip twine globally from config option.
rule_runner.set_options(["--twine-skip"])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 0
@pytest.mark.parametrize(
"options, cert_arg",
[
pytest.param(
[],
None,
id="No ca cert",
),
pytest.param(
["--twine-ca-certs-path={}"],
"--cert=ca_certs.pem",
id="[twine].ca_certs_path",
),
# This test needs a working ca bundle to work. Verified manually for now.
# pytest.param(
# ["--ca-certs-path={}"],
# "--cert=ca_certs.pem",
# id="[GLOBAL].ca_certs_path",
# ),
],
)
def test_twine_cert_arg(rule_runner, packages, options, cert_arg) -> None:
ca_cert_path = rule_runner.write_files({"conf/ca_certs.pem": ""})[0]
rule_runner.write_files(project_files(repositories=["@private"]))
set_options(rule_runner, [opt.format(ca_cert_path) for opt in options])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
process = result[0].process
assert process
if cert_arg:
assert cert_arg in process.process.argv
else:
assert not any(arg.startswith("--cert") for arg in process.process.argv)
|
pantsbuild/pants
|
src/python/pants/backend/python/goals/publish_test.py
|
publish_test.py
|
py
| 6,774
|
python
|
en
|
code
| 2,896
|
github-code
|
6
|
31872746206
|
from lxml import html
import requests
import re
MainPage = requests.get("https://www.carvezine.com/stories/")
tree = html.fromstring(MainPage.content)
links = tree.xpath('//a[@class="summary-title-link"]/@href')
text = ""
text.encode('utf-8').strip()
for link in links:
testURL = "https://www.carvezine.com" + link
story = requests.get(testURL)
storyTree = html.fromstring(story.content)
storyList = storyTree.xpath('//*[@class="sqs-block-content"]/p//text()')
storyText = ' '.join(storyList)
text += storyText
new_txt = re.sub('[^a-zA-Z0-9\'\.\,\!\?\:\;\(\)\"\$\#]', ' ', text)
open('collections.txt', 'w').write(new_txt)
|
RichardWen/python-practice
|
webscraping/storyscraper.py
|
storyscraper.py
|
py
| 631
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31108318218
|
'''
Created on 2021年1月30日
@author: Administrator
'''
def signalMaParaList(maShort=range(10,200,10),maLong=range(10,300,10)):
"""
产生简单移动平均线策略的参数范围
:param ma_short:
:param ma_long:
:return:
"""
paraList=[]
for short in maShort:
for long in maLong:
if short>=long:
continue
else:
paraList.append((short,long))
return paraList
def signalMa(df,para): #计算交易信号,产生signal(只有产生信号这部分代码需要自己写,其它部分都是可以复用的)
"""
均线策略:
当短期均线由下向上穿过长期均线的时候,第二天以开盘价全仓买入并在之后一直持有股票。
当短期均线由上向下穿过长期均线的时候,第二天以开盘价卖出全部股票并在之后一直空仓,直到下一次买入。
:param df:
:param ma_short: 短期均线
:param ma_long: 长期均线
:return:
"""
#双均线策略
# for maShort in [5,10,15,20,25]:
# for maLong in [10,15,20,25,30]:
# if maShort<maLong:
# continue
maShort,maLong=para
df['maShort']=df['close_post'].rolling(maShort,min_periods=1).mean()
df['maLong']=df['close_post'].rolling(maLong,min_periods=1).mean()
#买入信号,做多信号
cond1=(df['maShort']>df['maLong'])
cond2=(df['maShort'].shift(1)<=df['maLong'].shift(1))
df.loc[cond1 & cond2,'signal']=1 #1:做多
#卖出信号,平仓信号
cond1=(df['maShort']<df['maLong'])
cond2=(df['maShort'].shift(1)>=df['maLong'].shift(1))
df.loc[cond1 & cond2,'signal']=0 #0:平仓
df.drop(['maShort','maLong','open_post','high_post','low_post'],axis=1,inplace=True)
return df
|
geekzhp/zhpLiangHua
|
timingStrategy/singals.py
|
singals.py
|
py
| 1,841
|
python
|
zh
|
code
| 0
|
github-code
|
6
|
21699610056
|
import cv2
# Method: getFrames
# Purpose: Extract a predefined number of frames from a provided video
# Parameters: video_capture: provided video
# frame_num: the desired number of frames
# frame_start: optional value to input for start of frame
def get_frames(video_capture, frame_num, frame_start=0):
counter = 0
image_arr = []
frame_num = frame_start + frame_num
# Loop through and create individual frames that were captured from the video file
while True and counter < frame_num:
is_image_good, image = video_capture.read()
if not is_image_good:
if counter == 0:
print('Video cannot be read from file')
break
# Use opencv to write the frame that was extracted from the video
if counter >= frame_start:
image_arr.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Increment the counter as more frames are extracted
counter += 1
# Frames to be returned
return image_arr
# Method: getEdges
# Purpose: gets the edges in an image via converting to gray scale than blurring the image
# Parameters: frame_list: list of frames
# line_size: how large the lines should be on edges
# blur_value: how blurred the image should be
def get_edges(frame_list, line_size, blur_value):
frame_edges = []
for i in frame_list:
gray_image = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
gray_blurred_image = cv2.medianBlur(gray_image, blur_value)
frame_edges.append(
cv2.adaptiveThreshold(gray_blurred_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, line_size,
blur_value))
return frame_edges
# Method: rgb2gray
# Purpose: Algorithm to convert a color image to gray scale in matplotlib
# Parameters: image (The image array to convert)
def rgb2gray(image):
red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2]
return 0.2989 * red + 0.5870 * green + 0.1140 * blue
|
ccranson27/ccr_playground
|
frame_gathering.py
|
frame_gathering.py
|
py
| 2,033
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72699109627
|
# -*- coding: UTF-8 –*-
import random
from datetime import datetime
"""This is a random address function"""
def address():
# 小区名,可自行添加
area_address_name = ['蓝湾上林院', '绿城金华御园(别墅)', '紫金湾', '玫瑰星城', '绿城兰园',
'龙庭一品', '江山风华', '中梁首府', '中梁首府', '都市豪园',
'光明湖海城市花园', '金色海塘', '天御花园', '广润翰城', '泰地金水湾',
'新纪元香湖', '绿城金都美地', '中天学府诚品', '金都美苑', '金都美苑',
'香格里拉城市花园', '广天九龙玉府', '中天公元诚品', '南岸名城', '欧景名城',
'御园(东区)', '蝶景湾御江山', '滨江金色蓝庭', '书香名邸', '蓝湾国际花园',
'丽州一品', '丽州一品', '苏桂院(一期)', '环球春江花园', '冠达东方兰庭',
'五星清华园', '鸿基彼岸', '东方明珠花园', '华庭常青墅', '四季尊域',
'尖峰郦园', '金地艺境', '保集蓝郡', '保集蓝郡', '泰瑞家园',
'泰瑞家园', '和信花园', '环球春江花园', '矿泉花园', '环球春江花园',
'福林花园', '海韵嘉园', '万科青岛小镇', '中海蓝庭', '城发长江瑞城',
'麦岛金岸', '城建湖光山色', '青岛印象山', '金帝山庄', '保利海上罗兰',
'东海路9号', '鲁商蓝岸丽舍', '瑞源名嘉汇', '中海清江华府', '万科魅力之城',
'中央国际', '湛园海德公园', '万达悦公馆', '万科如园', '和达璟城紫御',
'上实海上海', '温哥华花园', '金秋泰和郡', '海信珠山小镇', '海逸天成',
'青特小镇', '中海银海一号', '万科春阳花园', '山水名园二期', '晓港名城(五期)',
'浮山后四小区', '万丽海景', '浮山湾花园', '深蓝中心', '万科翡翠长江',
'青铁华润城', '左岸风度', '逍遥花园', '鲁商首府', '鲁德海德堡',
'海尔山海湾', '龙湖悠山郡', '保利百合花园', '浮山后六小区', '锦绣天成',
'万科金色城市', '海尔世纪公馆', '青特赫山', '丽泽花园', '万科城',
'御景峰', '柏悦华府', '依云曦城', '上林一品', '蔚蓝创新天地',
'融创御府', '广佛颐景园', '荔园新天地', '友谊大街18号街坊', '星海岸',
'金地天玺', '翠湖绿洲', '梧桐苑', '弘信山庄', '中海临安府',
'东逸湾(别墅)', '世纪华庭', '宝翠花园', '龙光水悦龙湾', '藏珑华府',
'半岛碧桂园', '都市豪园', '仙湖别墅', '惠景城', '雅居蓝湾',
'尚观嘉园', '阳光山色', '青春小区', '颐山源墅', '颐和国际',
'致越优城', '优山美地', '保利海德公园', '星湖湾', '影都学府',
'绿岛明珠', '天台山庄', '时代领峰', '国际城名苑', '保集半岛',
'保利东御花园', '碧桂园翡翠湾', '保利西雅图', '中恒海晖城', '嘉乐花园',
'金海岸花园', '绿地未来城', '尚辉苑', '南江壹号', '南江壹号',
'长华国际中心', '丽日豪庭', '北滘海琴水岸', '万科沁园', '丽泽花园',
'永盛新阳光', '柳湖花园', '山水庄园', '御景花园', '南江名郡',
'紫金玉澜', '长信东海银湾', '丹灶碧桂园', '青春小区', '青春小区',
'名汇浩湖湾', '广夏花园', '海琴湾', '保利东景花园', '新城云昱',
'天悦湾花园', '美的翰湖苑', '招商臻园', '荟景豪庭', '如意花园',
'同济广场', '金地悦荔', '岭南天地璟廷', '龙光水悦云天', '江山瑞城',
'红星天悦', '保利外滩一号', '金地九珑璧', '碧桂园钻石湾', '泰地世锦园',
'光明花半里', '新君汇花地湾', '鹿璟村', '美的领贤公馆', '君御花园',
'恒大帝景', '帝景蓝湾', '雅丽豪庭', '红星天悦', '鲁能公馆',
'凤起兰庭', '珠水豪庭', '花苑广场', '雅瑶绿洲', '顺德居小区',
'保利花园(六期)', '鼎太风华', '馥室成双(一期)', '二冶小区街坊', '鹿港小镇',
'自由路8号街坊', '恒大华府', '保利罗兰香谷', '保利拉菲公馆(二三期)', '滨海名都',
'东河国际商住城', '日月豪庭', '光辉佳苑', '文雅苑', '迎宾小区',
'万达广场万达小区', '中冶世家', '景苑花园', '保利花园(三期)', '青福新城',
'东方俪城', '富力城(EFG区)', '丰盈小区', '富强路七号街坊', '居然新城',
'锦尚国际', '奥宇新城', '阿尔丁小区', '三江尊园', '现代城',
'文馨苑', '新星壹品', '邻圃道街坊', '惠民小区(昆都仑)', '正翔国际枫景苑',
'新星美地', '维多利华府', '口岸花苑', '凡尔赛颐阁(凡尔赛观邸)', '友谊17号街坊',
'欧风丽景', '保利花园(一期)', '欧鹿生活城', '文脉苑', '东亚香堤丽舍',
'乌兰小区', '佳园小区', '富强路十号街坊', '阳光小区', '翡丽湾',
'檀香湾', '维多利摩尔城', '恒大名都', '友谊大街22号东街坊', '文博苑',
'青山路1号街坊', '京奥港花园', '凯旋豪庭', '六合新城(二区南)', '富丽佳园',
'绿地国际花都', '景富家园(C区)', '中建御澜世家', '阿南小区', '松石国际城石榴花园',
'中冶世家水晶城', '华丽家族', '美室层双', '松石名第', '燕赵锦河湾',
'牡丹花园', '友谊小区(南区)', '合志家', '园文芳苑', '山水佳苑',
'万郡大都城', '华发新城', '包钢友谊十三小区', '丽晶名邸', '金茂豪庭',
'少先路31号街坊', '百兴小区', '佳福小区', '首创加州郡府', '锦林花园',
'昆河壹号', '馥室成双(二期)', '青山路五号街坊', '恒基景苑', '振华二区',
'紫金华府', '保利花园(二期)', '富强路一号街坊', '健康新城', '望园小区',
'嘉园泊景湾', '新元华庭', '金沙华府', '育才小区', '龙熙盛景',
'呼得木林大街10号街坊', '青东华庭', '黄河小区', '呼得木林大街11号街坊', '中冶世家华庭',
'明日星城知情苑', '富力华庭', '锡华世纪花园', '自由路7号街坊', '保利花园(四期)',
'水岸花都', '鹿鸣苑', '青年路8号街坊', '龙苑小区(B区)', '富贵佳园',
'高新花园', '丰景佳苑', '荣资梦乡', '胜达小区', '检察馨苑',
'青山路六号街坊', '居然青年城', '少先路二十二号街坊', '大连新型居住区温馨园',
'保利拉菲公馆(一期)',
'桐荷嘉苑', '远洲国际城', '青11号街坊', '广基花园', '茂业天地',
'和发紫薇园', '城际美景', '丰景御苑', '裕民新城理想城', '东方花园',
'天疆骊城', '纺织社区', '惠德花园', '海威小区(二区)', '青松小区(二区)',
'铭峰佳苑', '景富家园(B区)', '颐和山庄', '大连新型居住区春意园', '鹿景苑',
'青云小区一段', '阳光尚品(南区)', '滨江国际阅江台', '融茂第一城(C1区)', '意城晶华',
'富强路三号街坊', '友谊大街19号街坊(一区)', '大连新型居住区长熙园', '幸福路7号街坊',
'华天云居',
'振翔小区', '神华佳苑', '幸八雅园', '当代左岸绿洲', '江南文枢苑',
'滨江国际澜泊湾', '丰产道一号街坊', '青年路10号街坊', '明日星城知乐苑', '新星水岸花园',
'北梁新区南二区', '幸福路5号街坊(哈达道)', '向阳花苑', '青年路7号街坊', '闽辉禧瑞都',
'友谊大街27号街坊', '瀚星华府', '龙昱华府', '景富家园(F区)', '友谊嘉园(三期)',
'怡然苑', '南排小区', '赛音小区(西区)', '天赐新城(B区)', '万达嘉园',
'金泰花园', '明日星城德景苑', '通顺东二区', '当代菁英国际', '友谊大街25号街坊',
'呼得木林大街7号街坊', '加州郡府融邦', '万新家园', '民馨家园(二区)', '呼得木林新天地2区',
'北大恒苑', '万和城(二期)', '东豪国际城', '自由路5号街坊', '明日星城知雅苑',
'钢铁大街36号街坊', '海威小区(五区)', '呼得木林大街14号街坊', '西五街房管楼小区(体育场南路)',
'碧水嘉苑',
'巨力时代', '民主路5号街坊', '汇金小区', '景晟开元', '瑞春园',
'金辉华府', '恩和小区', '喜瑞都御府', '钢铁大街18号街坊', '国际新城(南区公寓)',
'电力佳苑', '健康阳光城(北区)', '和平路西小区', '祥和苑', '幸福路1号街坊',
'龙苑小区(A区)', '北梁新区南四区', '鑫泰豪庭', '天福广场', '友谊大街23号街坊',
'海威小区(四区)', '银苑小区', '康乐小区(西区)', '中晟华悦', '保利香槟湾(保利香槟花园)',
'兵工华居', '西河景苑', '都兰小区', '友谊大街16号街坊', '公园大道',
'自由路4号街坊', '中冶世家荣园', '园林新村', '内蒙古地勘五院', '恒大帝景',
'苏宁广场', '朝阳小区(一区)', '佳禾公寓', '滨江国际王俯景', '青松小区(五区)',
'一化小区', '民馨家园(六区)', '瑞芬小区', '青年园', '核工业208小区',
'沃土阳光住宅小区', '春光小区(六区)', '华清佳苑', '瑞德花园', '北梁新区西一区',
'万和城(一期)', '明华学府', '青年路12号街坊住宅小区', '呼得木林大街12号街坊', '少先20号街坊',
'友谊大街22号西街坊', '松石国际城', '中和文化广场', '大连新型居住区逸民园', '振华小区',
'九郡嘉园', '世纪佳苑', '民主路3号街坊', '富强路十二号街坊', '西四街小区',
'夏日花园', '宏源鑫都', '明日星城安景苑', '幸福南路16号街坊', '青松小区(六区)',
'龙藏新城福地园', '大连新型居住区怡生园', '钢铁大街37号街坊', '锦绣嘉园', '融茂第一城(A区)',
'矿机小区', '保成上元名府(南区)', '幸福路2号街坊', '幸福路10号街坊', '青宾小区',
'矿办小区', '金泰丽苑', '民馨家园A区', '海湖豪庭', '赛音道五号街坊',
'西脑包康乐小区', '璟华苑', '三电住宅小区', '时代天骄', '明日星城知书苑',
'通顺西二区', '友谊小区(北区)', '山水文苑', '富强路四号街坊', '呼得木林新天地1区',
'当铺佳苑', '香林美地', '华丰园', '鹿城福满园', '明日星城文景苑(北区)',
'青苑小区', '公二街住宅小区', '春阳小区', '边防佳苑', '九中小区',
'鹿苑小区', '丰产道2号街坊', '绿都花庭', '顺鑫望潮苑(别墅)', '绿苑豪庭',
'傲北上城', '胜源滨河新城', '御融公馆(公寓住宅)', '青年路18号街坊', '总部经济园',
'田城康都苑', '赛音道六号街坊', '中慧新城', '美岸华庭(北区)', '呼得木林大街9号街坊',
'友谊大街26号街坊', '海威小区十区', '电力小区(青山)', '南海五村', '警官花园',
'信德雅居', '神力龙园', '怡景园', '三克拉', '天赐新城(A区)',
'贵发山庄(一期)', '步步高东苑', '朝阳小区(二区)', '太阳城', '青年路14号街坊',
'绿苑小区(东河)', '颐和山庄(半山湖)', '友谊大街31号小区', '安富小区', '天安雅居',
'草原小区', '青六新苑', '青云二段', '团结大街11号街坊', '新春小区',
'兵工佳苑', '胜达花苑', '福宇小区', '新桃园小区', '西一街小区',
'兰溪花园', '金桂名园', '福泰嘉苑', '安泰华庭', '保利钻石小区',
'迎宾道一号街坊', '幸福路9号街坊', '东方嘉苑', '永茂泉阳光小区', '横泰佳苑',
'明德花园(公寓住宅)', '少先路29号街坊盛世嘉苑', '友谊大街15号街坊', '御景华庭',
'团结大街8号街坊',
'自由路10号街坊', '青松小区(七区)', '北新街小区(北新苑东区)', '怡荷豪庭', '信合龙湾半岛',
'曹欣小区', '北梁新区西二区', '赛音道一号街坊', '健康阳光城(南区)', '光辉小区(三区)',
'古邑人家', '近水楼台', '龙藏新城雅典苑', '东海花园', '景富家园(A区)',
'国际新城(北区)', '工业路2号街坊', '松石雅居', '钢铁大街24号街坊', '锦裕园小区',
'青甲12号街坊', '团结22号街坊', '景天花园(一期)', '龙丰苑', '保利公园壹号',
'海威小区(六区)', '攀宇小区', '宁馨佳园', '丰产道3号街坊', '方兴观澜壹号']
# 随机选择小区名
area_name = random.choice(area_address_name)
# 随机选择楼栋
build_number = str(random.choice(
[1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32])
)
# 随机选取单元号
unit_number = str(random.choice(
[1, 2, 3, 5, 6, 7])
)
# 随机选择楼层号
floor_number = str(random.choice(
[1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27])
)
# 随机选择门牌号
house_number = str(random.choice(
[1, 2, 3])
)
result_address_name = demo + area_name + build_number + "号" + unit_number + "单元" + floor_number + "0" + house_number + "室"
return result_address_name
"""this is persion input function"""
str1 = "龙洞堡御景新城大田大道"
str2 = "水川镇转青城镇西巴路"
print("##########请勿乱分享,珍惜劳动付出,谢谢!!!!")
while True:
print("* 1 : 龙洞堡御景新城大田大道")
print("* 2 : 水川镇转青城镇西巴路")
while True:
try:
persion_input = int(input('请输入地址头对应的数字:'))
break
except ValueError:
print('!!!!输入有误请重新输入==>[1或者2]!!!')
continue
if persion_input == 1:
demo = str1
print("==> 需要生成的地址前标记为: " + demo)
break
elif persion_input == 2:
demo = str2
print("==> 需要生成的地址前标记为: " + demo)
break
else:
print('您输入的不正确,请重新输入')
continue
while True:
try:
enter_number = int(input('请输入生成地址个数:'))
break
except ValueError:
print("输入有误请重新输入==>[整数数字]")
continue
nt = datetime.now()
day_time = nt.strftime('%Y{y}%m{m}%d{d} %H{h}%M{mm}%S{s}').format(y='年', m='月', d='日', h='时', mm='分', s='秒')
"""This is a Main function"""
with open(day_time + "地址.txt", "w", encoding='utf-8') as f:
for i in range(enter_number):
person_address = address()
f.write(person_address)
f.write('\n')
f.close()
print(
"""
_ _ _
| | | | ( )
| | ___ | |_ |/ ___ __ _ ___
| | / _ \ | __| / __| / _` | / _ \
| |____ | __/ | |_ \__ \ | (_| | | (_) |
\_____/ \___| \__| |___/ \__, | \___/
__/ |
|___/
"""
)
|
zzyy8678/stady_python
|
create_address.py
|
create_address.py
|
py
| 17,836
|
python
|
zh
|
code
| 0
|
github-code
|
6
|
23630080600
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 19 18:41:54 2022
@author: Vikki
"""
class Node:
def __init__(self, data, next = None, prev=None):
self.data = data
self.next = next
self.prev = prev
class Linkedlist:
def __init__(self,header = None, tail = None):
self.header = header
self.tail = tail
def insert_values(self, ls):
self.header = None
for data in ls:
self.insert_at_begining(data)
def insert_at_end(self, data):
if self.header is None:
self.header = Node(data, None, None)
return
itr = self.header
while itr.next:
itr = itr.next
itr.next = Node(data, None, itr)
def insert_at_begining(self, data):
if self.header is None:
self.header = Node(data, None, None)
else:
node = Node(data, self.header, None)
self.header.prev = node
self.header = node
def get_last_node(self):
itr = self.header
while itr.next:
itr = itr.next
return itr
def print_forward(self):
if self.header is None:
print("Linked list is empty")
return
itr = self.header
llstr = ''
while itr:
llstr += str(itr.data) + ' --> '
itr = itr.next
print(llstr)
def print_backward(self):
if self.header is None:
print("Linked list is empty")
return
itr = self.get_last_node()
llstr = ''
while itr:
llstr += str(itr.data) + ' --> '
itr = itr.prev
print(llstr)
def insert_at_end(self, data):
itr = self.get_last_node()
itr.next = Node(data, None, itr.prev)
def insert_at(self, index, data):
itr = self.header
i = 0
while itr:
if i == index:
node = Node(data, itr.next, itr)
if node.next:
node.next.prev = node
itr.next = node
break
i += 1
itr = itr.next
if __name__ == "__main__":
ll = Linkedlist()
ll.insert_values(["banana","mango","grapes","orange"])
ll.print_forward()
ll.print_backward()
ll.insert_at_end("figs")
ll.print_forward()
ll.insert_at(3,"jackfruit")
ll.print_forward()
|
kambojrakesh/Python_DSA
|
algo-cb/3_doubly_linked_list.py
|
3_doubly_linked_list.py
|
py
| 2,575
|
python
|
en
|
code
| 0
|
github-code
|
6
|
585614447
|
from pathlib import Path
ROOT_FOLDER = Path("STOCK_VOLATILITY_NEW").resolve().parent
DATASET_DIR = ROOT_FOLDER / "data"
ALL_DATA_DIR = DATASET_DIR / "all_data.csv"
ALL_DATA_NEW_DIR = DATASET_DIR / "all_data_new.csv"
UNPROCESSED_DATA = DATASET_DIR / "index_funds_data.csv"
FORMATTED_DATA = DATASET_DIR / "formatted_data.csv"
DATA_FOR_ANALYSIS = DATASET_DIR / "new_formatted_data.csv"
EARNINGS_DATA = DATASET_DIR / "earnings.csv"
CPI_DATA = DATASET_DIR / "cpi.csv"
MARKET_SCHEDULE = DATASET_DIR / "nasdaq_schedule.csv"
TRAIN_DATA_PATH = DATASET_DIR / "train.csv"
VALID_DATA_PATH = DATASET_DIR / "valid.csv"
TEST_DATA_PATH = DATASET_DIR / "test.csv"
TICKERS = ["XLK", "XLP", "XLF", "XLV", "XLE", "XLI", "XLU"]
FORECAST_HORIZONS = ["one_days", "two_days", "three_days", "four_days", "five_days"]
ADDITIONAL_OUTPUT_COLS = ["volatility_target", "date", "ticker", "idx"]
LIGHTNING_LOGS_DIR = ROOT_FOLDER / "lightning_logs"
IMAGE_PATH = ROOT_FOLDER / "img"
MODEL_DATA = ROOT_FOLDER / "model_data"
BEST_MODEL_PATH = MODEL_DATA / "checkpoint.ckpt"
TIMESERIES_DATASET_PARAMS = MODEL_DATA / "ts_dataset_params.joblib"
PRES_ELECTION_DATES = [
"2004-11-02",
"2008-11-04",
"2012-11-06",
"2016-11-08",
"2020-11-03",
]
MIDTERM_ELECTION_DATES = [
"2006-11-07",
"2010-11-02",
"2014-11-04",
"2018-11-06",
"2022-11-08",
]
ELECTION_DATES = PRES_ELECTION_DATES + MIDTERM_ELECTION_DATES
|
vladkramarov/index_fund_volatility
|
core.py
|
core.py
|
py
| 1,412
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.