max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
presqt/targets/figshare/utilities/helpers/extra_metadata_helper.py | djordjetrajkovic/presqt | 3 | 12759351 | import requests
def extra_metadata_helper(project_url, headers):
"""
Build extra metadata dict to help with other integrations.
Parameters
----------
project_url: str
The url to the project info
headers: dict
Figshare Authorization header
Returns
-------
Extra metadata dictionary
"""
project_info = requests.get(project_url, headers=headers).json()
creators = [{
"first_name": author['name'].partition(' ')[0],
"last_name": author['name'].partition(' ')[2],
'ORCID': None
} for author in project_info['collaborators']]
publication_date = project_info['created_date']
if 'published_date' in project_info.keys():
publication_date = project_info['published_date']
extra_metadata = {
"title": project_info['title'],
"creators": creators,
"publication_date": publication_date,
"description": project_info['description'],
"keywords": [],
"license": None,
"related_identifiers": [],
"references": None,
"notes": None
}
return extra_metadata | 2.96875 | 3 |
prodigy/__init__.py | enriqueg1839/prodigy | 25 | 12759352 | #!/usr/bin/env python
#
# This code is part of the binding affinity prediction tools distribution
# and governed by its license. Please see the LICENSE file that should
# have been included as part of this package.
#
"""
Binding affinity prediction methods developed by the Bonvin Lab.
Assorted data files for different predictors.
"""
from .predict_IC import Prodigy
| 1.03125 | 1 |
tests/test_card.py | softbutterfly/culqi-api-python | 1 | 12759353 | import os
import unittest
from copy import deepcopy
from uuid import uuid4
import pytest
from dotenv import load_dotenv
from culqi import __version__
from culqi.client import Culqi
from culqi.resources import Card
from .data import Data
class CardTest(unittest.TestCase):
# pylint: disable = too-many-public-methods
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
load_dotenv()
self.version = __version__
self.public_key = os.environ.get("API_PUBLIC_KEY")
self.private_key = os.environ.get("API_PRIVATE_KEY")
self.culqi = Culqi(self.public_key, self.private_key)
self.card = Card(client=self.culqi)
self.metadata = {"order_id": "0001"}
def get_card_data(self, code, provider):
email = "<EMAIL>".format(uuid4().hex[:4])
token_data = deepcopy(Data.CARD[code][provider])
token_data["email"] = email
token = self.culqi.token.create(data=token_data)
customer_data = deepcopy(Data.CUSTOMER)
customer_data["email"] = email
customer = self.culqi.customer.create(data=customer_data)
return {
"token_id": token["data"]["id"],
"customer_id": customer["data"]["id"],
}
def test_url(self):
# pylint: disable=protected-access
id_ = "sample_id"
assert self.card._get_url() == "https://api.culqi.com/v2/cards"
assert self.card._get_url(id_) == "https://api.culqi.com/v2/cards/{0}".format(
id_
)
@pytest.mark.vcr()
def test_card_create(self):
card_data = self.get_card_data("successful", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_retrieve(self):
card_data = self.get_card_data("successful", "visa")
created_card = self.card.create(data=card_data)
retrieved_card = self.card.read(created_card["data"]["id"])
assert created_card["data"]["id"] == retrieved_card["data"]["id"]
@pytest.mark.vcr()
def test_card_list(self):
retrieved_card_list = self.card.list(
headers={
"Accept-Encoding": "identity",
},
)
assert "items" in retrieved_card_list["data"]
@pytest.mark.vcr()
def test_card_update(self):
card_data = self.get_card_data("successful", "visa")
created_card = self.card.create(data=card_data)
metadata = {"metadata": self.metadata}
updated_card = self.card.update(id_=created_card["data"]["id"], data=metadata)
assert created_card["data"]["id"] == created_card["data"]["id"]
assert updated_card["data"]["metadata"] == self.metadata
@pytest.mark.vcr()
def test_card_delete(self):
card_data = self.get_card_data("successful", "visa")
created_card = self.card.create(data=card_data)
deleted_card = self.card.delete(id_=created_card["data"]["id"])
assert deleted_card["data"]["deleted"]
assert deleted_card["data"]["id"] == created_card["data"]["id"]
assert deleted_card["status"] == 200
@pytest.mark.vcr()
def test_card_create__successful__visa(self):
card_data = self.get_card_data("successful", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__successful__master_card(self):
card_data = self.get_card_data("successful", "master_card")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__successful__american_express(self):
card_data = self.get_card_data("successful", "american_express")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__successful__diners_club(self):
card_data = self.get_card_data("successful", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__stolen_card__visa(self):
card_data = self.get_card_data("stolen_card", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "stolen_card"
@pytest.mark.vcr()
def test_card_create__lost_card__visa(self):
card_data = self.get_card_data("lost_card", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "lost_card"
@pytest.mark.vcr()
def test_card_create__insufficient_funds__visa(self):
card_data = self.get_card_data("insufficient_funds", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "insufficient_funds"
@pytest.mark.vcr()
def test_card_create__contact_issuer__master_card(self):
card_data = self.get_card_data("contact_issuer", "master_card")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "contact_issuer"
@pytest.mark.vcr()
def test_card_create__incorrect_cvv__master_card(self):
card_data = self.get_card_data("incorrect_cvv", "master_card")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "incorrect_cvv"
@pytest.mark.vcr()
def test_card_create__issuer_not_available__american_express(self):
card_data = self.get_card_data("issuer_not_available", "american_express")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "issuer_not_available"
@pytest.mark.vcr()
def test_card_create__issuer_decline_operation__american_express(self):
card_data = self.get_card_data("issuer_decline_operation", "american_express")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "issuer_decline_operation"
@pytest.mark.vcr()
def test_card_create__invalid_card__diners_club(self):
card_data = self.get_card_data("invalid_card", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "invalid_card"
@pytest.mark.vcr()
def test_card_create__processing_error__diners_club(self):
card_data = self.get_card_data("processing_error", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "processing_error"
@pytest.mark.vcr()
def test_card_create__fraudulent__diners_club(self):
card_data = self.get_card_data("fraudulent", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "fraudulent"
if __name__ == "__main__":
unittest.main()
| 2.5 | 2 |
collector/estate_modules/slnecnice.py | kalcok/spoofDog | 0 | 12759354 | <reponame>kalcok/spoofDog
# -*- coding: UTF-8 -*-
from base_module import RealEstate
from bs4 import BeautifulSoup
import requests
import os
class Slnecnice(RealEstate):
estate_name = 'slnecnice'
pretty_name = 'Slnečnice'
def get_data(self):
base_url = 'https://www.slnecnice.sk/mesto/ponuka-byvania/vsetky-byty'
base_page = requests.get(base_url).text
pagination_soup = BeautifulSoup(base_page, 'html.parser')
paginator = pagination_soup.find('section', {'id': 'paginator'}).find('ul', {'class': 'pagination'})
flat_list_pages = []
for page in paginator.find_all('li'):
flat_list_pages.append(page.get_text())
for page in flat_list_pages:
url = '{0}/page:{1}'.format(base_url, page)
page_data = requests.get(url).text
soup = BeautifulSoup(page_data, "html.parser")
table_body = soup.find("table", {"class": "flats-list"}).find('tbody')
rows = table_body.find_all("tr")
for row in rows:
cells = row.find_all("td")
self.data[cells[3].get_text()] = {'availability': self.translate(cells[12].get_text()), 'price': None}
def translate(self, flat_status):
flat_status = unicode(flat_status.lower())
if flat_status == 'voľná'.decode('utf-8'):
translation = self.free_const
elif flat_status == 'rezervovaná'.decode('utf-8') or flat_status == 'predrezervovaná'.decode('utf-8'):
translation = self.reserved_const
elif flat_status == 'predaná'.decode('utf-8'):
translation = self.sold_const
else:
raise ValueError('Translation failed. Unable to translate {0}'.format(flat_status))
return translation
| 2.390625 | 2 |
ChatBotAI/Responder.py | nurlanov-zh/SlavkaBot | 0 | 12759355 | <reponame>nurlanov-zh/SlavkaBot
import logging
import zipfile
import requests
from google_drive_downloader import GoogleDriveDownloader as gdd
import threading
import torch
import torch.nn.functional as F
from ChatBotAI.yt_encoder import YTEncoder
from transformers import GPT2Config, GPT2LMHeadModel, GPT2Tokenizer
FILTER_VALUE = -float('Inf')
URL_ZIP_MODEL = "https://drive.google.com/open?id=1FR72Ib40V0nXxfH__x91NWGsy13hzcs5"
ID_GOOGLE_FILE = "1FR72Ib40V0nXxfH__x91NWGsy13hzcs5"
ZIP_NAME = "./ChatBotAI/model_checkpoint.zip"
DIR_NAME = './ChatBotAI/model_checkpoint'
logger = logging.getLogger(__name__)
class ChatBotAI:
def __init__(self, model_path="", tokenizer_class="YTEncoder",
tokenizer_name="ChatBotAI/bpe/yt.model", device='cpu'):
# assert model_path != "", "model_path is empty."
self.model = None
self.config = None
self.tokenizer = None
if model_path == "":
logger.info("Downloading model...")
# gdd.download_file_from_google_drive(file_id=ID_GOOGLE_FILE,
# dest_path=f'./{ZIP_NAME}')
#
# with zipfile.ZipFile(ZIP_NAME, 'r') as zip_ref:
# zip_ref.extractall(DIR_NAME)
# model_path = DIR_NAME
ThreadingExample(ID_GOOGLE_FILE, ZIP_NAME, DIR_NAME)
logger.info("Download completed!")
self.model_path = DIR_NAME
self.model_class = GPT2LMHeadModel
self.config_class = GPT2Config
# TODO:
# Train own tokenizer
self.tokenizer_class = YTEncoder if tokenizer_class == "YTEncoder" else GPT2Tokenizer
self.tokenizer_name = tokenizer_name if tokenizer_name else self.model_path
self.device = device
self.max_input = 1023
def load_model(self):
self.tokenizer = self.tokenizer_class.from_pretrained(self.tokenizer_name)
self.max_input = min(self.tokenizer.max_len_single_sentence, self.max_input)
self.config = self.config_class.from_pretrained(self.model_path)
self.model = self.model_class.from_pretrained(self.model_path, config=self.config)
self.model.to(self.device)
self.model.eval()
def respond(self, context=""):
self.model.eval()
context_tokens = self.tokenizer.encode(context)
out = sample_sequence(
model=self.model,
context=context_tokens,
length=500,
temperature=1.0,
top_k=50,
top_p=0.9,
device=self.device,
max_input=self.max_input
)
out = out[0, len(context_tokens):].tolist()
out_text = self.tokenizer.decode(out)
return out_text
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1.0, top_k=0, top_p=0.0,
device='cpu', max_input=1023, filter_single=[], filter_double=[]):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in range(length):
inputs = {'input_ids': generated[:, -max_input:]}
outputs = model(
**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_tokens = torch.zeros(num_samples, dtype=torch.long).to(device)
for isample in range(num_samples):
next_token_logits = outputs[0][isample, -1, :] / temperature
next_token_logits[filter_single] = FILTER_VALUE
# filter blank line = double \n
if generated[isample, -1] in filter_double:
next_token_logits[generated[isample, -1]] = FILTER_VALUE
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_tokens[isample] = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_tokens.unsqueeze(-1)), dim=1)
return generated
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 3276
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class ThreadingExample(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, id_google="", dest_path_zip="", dest_path=""):
self.id_google = id_google
self.dest_path_zip = dest_path_zip
self.dest_path = dest_path
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
download_file_from_google_drive(self.id_google, self.dest_path_zip)
with zipfile.ZipFile(self.dest_path_zip, 'r') as zip_ref:
zip_ref.extractall(self.dest_path)
# if __name__=="__main__":
# chatbot = ChatBotAI()
# chatbot.load_model()
| 2.296875 | 2 |
src/bootstrap.py | guyemerson/sem-func | 4 | 12759356 | <reponame>guyemerson/sem-func<filename>src/bootstrap.py
import pickle
import numpy as np
from main import setup_trainer
# Load sparse vectors
from bootstrap_meanfield import pred_wei, D, C
trainer = setup_trainer(thresh=5,
suffix='bootstrap_bigratio',
dims=D,
card=C,
init_bias=0,
init_card=0,
init_range=0,
model='independent',
rate=0.01,
rate_ratio=1,
l1=0.00001,
l1_ratio=0.01,
l1_ent=0,
l2=0.001,
l2_ratio=0.01,
l2_ent=0.0001,
ent_steps=50,
pred_steps=5,
setup='adagrad',
ada_decay=10**-4,
neg_samples=1,
processes=10,
epochs=3,
minibatch=20,
ent_burnin=200,
pred_burnin=5)
# Copy vectors
trainer.model.pred_wei[:] = pred_wei
pred_wei = trainer.model.pred_wei
pred_bias = trainer.model.pred_bias
# Choose biases to match vectors
#(so prob is high when most relevant units are on, and low when random units are on)
thresh = 5
high = np.partition(pred_wei, -C, axis=1)[:,-C:].sum(axis=1)
aver = pred_wei.mean(axis=1) * C * 2
clip_mask = (high - aver > 2 * thresh)
#pred_bias[:] = clip_mask * pred_bias.clip(aver+5, high-5) + (1 - clip_mask) * (high-aver)/2 # Seems to be slower than:
pred_bias[clip_mask].clip(aver[clip_mask]+thresh, high[clip_mask]-thresh, out=pred_bias[clip_mask])
diff_mask = 1 - clip_mask
pred_bias[diff_mask] = (high[diff_mask] - aver[diff_mask])/2
# Remove link weight initialisation
with open('/anfs/bigdisc/gete2/wikiwoods/sem-func/bootstrap_link_log400.pkl', 'rb') as f:
trainer.model.link_wei[:] = pickle.load(f)
trainer.model.ent_bias[:] = 5
trainer.start() | 1.898438 | 2 |
models/vae.py | ak-dm/Glow-PyTorch | 0 | 12759357 | import math
import torch
import torch.nn as nn
from models.modules import (gaussian_likelihood, gaussian_sample, View)
class VAE(nn.Module):
def __init__(self, image_shape, hidden_channels):
super().__init__()
self.encoder = nn.Sequential(*[
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 16, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Flatten(),
nn.Linear(8 * 8 * 16, hidden_channels),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(),
nn.Linear(hidden_channels, hidden_channels * 2),
])
self.decoder = nn.Sequential(*[
nn.Linear(hidden_channels, hidden_channels),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(),
nn.Linear(hidden_channels, 8 * 8 * 16),
nn.BatchNorm1d(8 * 8 * 16),
nn.ReLU(),
View((-1, 16, 8, 8)),
nn.ConvTranspose2d(16, 32, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.ConvTranspose2d(16, 3 * 2, kernel_size=3, stride=1, padding=1, bias=False),
])
def forward(self, x):
b, c, h, w = x.shape
z_mu, z_logvar = torch.chunk(self.encoder(x),2,dim=1)
z_var = torch.exp(z_logvar)
z = z_mu
if self.training:
z_std = torch.exp(0.5 * z_logvar)
eps = torch.randn_like(z_mu)
z += z_std * eps
x_mu, x_logvar = torch.chunk(self.decoder(z),2,dim=1)
x_var = torch.exp(x_logvar)
loss_DKL = - 0.5 * (1 + z_logvar - z_mu**2 - z_var).sum(dim=1)
loss_rec = 0.5 * (c * h * w) * torch.log(2 * math.pi * x_var.view(b,-1).sum(dim=1)) + 0.5 * ((x - x_mu) ** 2 / x_var).view(b, -1).sum(dim=1)
objective = -(loss_rec + loss_DKL)
bpd = (-objective) / (math.log(2.) * c * h * w)
summary_image = torch.cat([x[:16], x_mu[:16]], dim=0)
return z, bpd, None, summary_image | 2.265625 | 2 |
test/integration/targets/module_utils/module_utils/sub/bar/bam.py | Container-Projects/ansible-provider-docs | 37 | 12759358 | <filename>test/integration/targets/module_utils/module_utils/sub/bar/bam.py<gh_stars>10-100
#!/usr/bin/env python
bam = "BAM FROM sub/bar/bam.py"
| 0.929688 | 1 |
NVIDIA_CNN.py | tommykil123/CarND-Behavioral-Cloning-P3 | 0 | 12759359 | <gh_stars>0
#Setup Keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
def model():
learning_rate = 0.001
ADAM = Adam(lr= learning_rate)
#Build the model
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((72,25),(0,0))))
model.add(Conv2D(24, (5,5), strides=(2,2), activation='relu'))
model.add(Dropout(0.3))
model.add(Conv2D(36, (5,5), strides=(2,2), activation='relu'))
model.add(Dropout(0.3))
model.add(Conv2D(48, (5,5), strides=(2,2), activation='relu'))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer=ADAM)
return model | 2.765625 | 3 |
DFT_1d/constants.py | Chriscrosser3310/Kohn_Sham_DFT_1d | 2 | 12759360 | <filename>DFT_1d/constants.py<gh_stars>1-10
"""Default constants used in this library.
Exponential Coulomb interaction.
v(x) = amplitude * exp(-abs(x) * kappa)
See also ext_potentials.exp_hydrogenic. Further details in:
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. One-dimensional mimicking of electronic structure:
The case for exponentials. Physical Review B,91(23):235141, 2015.
"""
EXPONENTIAL_COULOMB_AMPLITUDE = 1.071295
EXPONENTIAL_COULOMB_KAPPA = 1 / 2.385345
| 1.585938 | 2 |
src/foolscap/eventual.py | exarkun/foolscap | 29 | 12759361 | <filename>src/foolscap/eventual.py
# -*- test-case-name: foolscap.test.test_eventual -*-
from twisted.internet import reactor, defer
from twisted.python import log
class _SimpleCallQueue(object):
# XXX TODO: merge epsilon.cooperator in, and make this more complete.
def __init__(self):
self._events = []
self._flushObservers = []
self._timer = None
def append(self, cb, args, kwargs):
self._events.append((cb, args, kwargs))
if not self._timer:
self._timer = reactor.callLater(0, self._turn)
def _turn(self):
self._timer = None
# flush all the messages that are currently in the queue. If anything
# gets added to the queue while we're doing this, those events will
# be put off until the next turn.
events, self._events = self._events, []
for cb, args, kwargs in events:
try:
cb(*args, **kwargs)
except:
log.err()
if not self._events:
observers, self._flushObservers = self._flushObservers, []
for o in observers:
o.callback(None)
def flush(self):
"""Return a Deferred that will fire (with None) when the call queue
is completely empty."""
if not self._events:
return defer.succeed(None)
d = defer.Deferred()
self._flushObservers.append(d)
return d
_theSimpleQueue = _SimpleCallQueue()
def eventually(cb, *args, **kwargs):
"""This is the eventual-send operation, used as a plan-coordination
primitive. The callable will be invoked (with args and kwargs) in a later
reactor turn. Doing 'eventually(a); eventually(b)' guarantees that a will
be called before b.
Any exceptions that occur in the callable will be logged with log.err().
If you really want to ignore them, be sure to provide a callable that
catches those exceptions.
This function returns None. If you care to know when the callable was
run, be sure to provide a callable that notifies somebody.
"""
_theSimpleQueue.append(cb, args, kwargs)
def fireEventually(value=None):
"""This returns a Deferred which will fire in a later reactor turn, after
the current call stack has been completed, and after all other deferreds
previously scheduled with callEventually().
"""
d = defer.Deferred()
eventually(d.callback, value)
return d
def flushEventualQueue(_ignored=None):
"""This returns a Deferred which fires when the eventual-send queue is
finally empty. This is useful to wait upon as the last step of a Trial
test method.
"""
return _theSimpleQueue.flush()
| 2.265625 | 2 |
setup.py | ViTalityGH/KivMob | 0 | 12759362 | <gh_stars>0
from setuptools import setup
setup(name='kivmob',
version='2.0',
description='Provides AdMob support for Kivy.',
url='http://github.com/MichaelStott/KivMob',
author='<NAME>',
license='MIT',
py_modules=['kivmob'],
install_requires=[
'kivy'
],
zip_safe=False)
| 1.164063 | 1 |
liveplot.py | arpita739/Machine_Learning_Introductory | 0 | 12759363 | import matplotlib.animation as animation
from matplotlib import style
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
fig1=plt.figure()
ax1=fig1.add_subplot(1,1,1)
def animate(p):
plot_data=open('test1.txt','r').read()
line_data=plot_data.split('\n')
x1=[]
y1=[]
for line in line_data:
if len(line)>1:
x,y= line.split(',')
x1.append(x)
y1.append(y)
ax1.clear()
ax1.plot(x1,y1)
anime_data=animation.FuncAnimation(fig1,animate,interval=500)
plt.show()
| 3.78125 | 4 |
articlequality/extractors/tests/test_ptwiki.py | he7d3r/articlequality | 0 | 12759364 | <gh_stars>0
from collections import namedtuple
from mwtypes import Timestamp
from .. import ptwiki
def test_extractor():
Revision = namedtuple("Revision", ['id', 'timestamp', 'sha1', 'text'])
class Page:
def __init__(self, title, namespace, revisions):
self.title = title
self.namespace = namespace
self.revisions = revisions
def __iter__(self):
return iter(self.revisions)
pages = [
Page("Page without reverts", 1, [
Revision(
1, Timestamp(1), "aaa",
"{{Brasil/Marca|qualidade=1|importância=3}}\n" +
"{{Geografia/Marca|qualidade=1|importância=?|rev=20110614}}"
),
Revision(
2, Timestamp(2), "bbb",
"{{marca de projeto|rev=20120715|1|Brasil|3}}"
),
Revision(
3, Timestamp(3), "ccc",
"{{Classificação/Anfíbios|qualidade=2|importância=1}}"
),
Revision(
4, Timestamp(4), "ddd",
"{{Marca de projeto|qualidade=3|Biografias|4|rev=20140917}}"
),
Revision(
5, Timestamp(5), "eee",
"{{Marca de projeto|qualidade=3||Biografias|2|rev=20151018}}"
),
Revision(
6, Timestamp(6), "fff",
"{{Wikipedia:Projetos/Subdivisões do Brasil/Artigo membro" +
"|qualidade=5|importância=2}}"
),
Revision(
7, Timestamp(7), "ggg",
"{{Marca de projeto|AB}}"
),
Revision(
8, Timestamp(8), "hhh",
"{{Marca de projeto|AD|Biografias|4}}"
)
]),
Page("Page with single revert", 1, [
Revision(
1, Timestamp(1), "aaa",
"{{Marca de projeto|2}}"
),
Revision(
2, Timestamp(2), "bbb",
"{{Marca de projeto|3}}"
),
Revision(
3, Timestamp(3), "ccc",
"{{Marca de projeto|4}}"
),
Revision(
4, Timestamp(4), "aaa",
"{{Marca de projeto|2}}" # Vandal messing up the template
),
Revision(
5, Timestamp(5), "ccc",
"{{Marca de projeto|4}}" # Patroller reverting vandal
)
]),
Page("Page with overlaping reverts", 1, [
Revision(
1, Timestamp(1), "aaa",
"{{Marca de projeto|1}}"
),
Revision(
2, Timestamp(2), "bbb",
"{{Marca de projeto|2}}"
),
Revision(
3, Timestamp(3), "ccc",
"{{Marca de projeto|3}}"
),
Revision(
4, Timestamp(4), "aaa",
"{{Marca de projeto|1}}" # Vandal messing up the template
),
Revision(
5, Timestamp(5), "ccc",
"{{Marca de projeto|3}}" # Rollback
),
Revision(
6, Timestamp(6), "bbb",
"{{Marca de projeto|2}}" # Active editor reevaluates the page
),
Revision(
7, Timestamp(7), "ddd",
"{{Marca de projeto|4}}" # Later on, the page is improved
)
]),
Page("Page with concentric reverts", 1, [
Revision(
1, Timestamp(1), "aaa",
"{{Marca de projeto|1}}"
),
Revision(
2, Timestamp(2), "bbb",
"{{Marca de projeto|2}}"
),
Revision(
3, Timestamp(3), "ccc",
"{{Marca de projeto|3}}"
),
Revision(
4, Timestamp(4), "aaa",
"{{Marca de projeto|1}}" # Vandal messing up the template
),
Revision(
5, Timestamp(5), "ccc",
"{{Marca de projeto|3}}" # Rollback
),
Revision(
6, Timestamp(6), "ddd",
"{{Marca de projeto|2}}<!-- re-evaluation -->"
)
])
]
expectations = [
[
("marca de projeto", "1", Timestamp(1)),
("marca de projeto", "2", Timestamp(3)),
("marca de projeto", "3", Timestamp(5)),
("marca de projeto", "5", Timestamp(6)),
("marca de projeto", "6", Timestamp(8))
],
[
("marca de projeto", "2", Timestamp(1)),
("marca de projeto", "3", Timestamp(2)),
("marca de projeto", "4", Timestamp(3))
],
[
("marca de projeto", "1", Timestamp(1)),
("marca de projeto", "2", Timestamp(2)),
("marca de projeto", "4", Timestamp(7))
],
[
("marca de projeto", "1", Timestamp(1)),
("marca de projeto", "2", Timestamp(2)),
("marca de projeto", "3", Timestamp(3)),
("marca de projeto", "2", Timestamp(6))
]
]
for page, expected in zip(pages, expectations):
observations = list(ptwiki.extract(page))
lab_tuples = [(ob['project'], ob['wp10'], ob['timestamp'])
for ob in observations]
assert lab_tuples == expected
| 2.578125 | 3 |
sconscontrib/SCons/Tool/erlang/erlang.py | dirkbaechle/scons-contrib | 0 | 12759365 | # -*- mode: python; coding:utf-8; -*-
from __future__ import print_function
# A SCons tool to enable compilation of Erlang in SCons.
#
# Copyright © 2005 <NAME> "Pupeno" <NAME>
# Copyright © 2009, 2011, 2017 <NAME>
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
# Original this code was licenced under GPLv2. This fork is relicenced under GPLv3 as is permitted.
from SCons.Builder import Builder
from SCons.Scanner import Scanner
import os
import subprocess
def generate(env):
env["ERLC"] = env.Detect("erlc") or "erlc"
env["ERL"] = env.Detect("erl") or "erl"
bugReport = '''
Please report this bug via the SCons Erlang tool project issue tracker on BitBucket ( cf. https://bitbucket.org/russel/scons_erlang)
or direct to Russel Winder <<EMAIL>>.'''
def addTarget(target, source, env):
""" Adds the targets (.beam, .script and/or .boot) according to source's extension, source's path and $OUTPUT. """
# We should receive one and only one source.
if len(source) > 1:
print("Warning: unexpected internal situation.")
print("This is a bug. {}".format(bugReport))
print("addTarget received more than one source.")
print("addTarget({}, {}, {})".format(source, target, env))
sourceStr = str(source[0])
# Tear appart the source.
filename = os.path.basename(sourceStr)
extension = os.path.splitext(filename)[1]
basename = os.path.splitext(filename)[0]
# Use $OUTPUT or where the source is as the prefix.
prefix = outputDir(sourceStr, env)
# Generate the targen according to the source.
if extension == ".erl":
# .erls generate a .beam.
return ([prefix + basename + ".beam"], source)
elif extension == ".rel":
# .rels generate a .script and a .boot.
return ([prefix + basename + ".script", prefix + basename + ".boot"], source)
else:
print("Warning: extension '{}' is unknown.".format(extension))
print("If you feel this is a valid extension, then it might be a missing feature or a bug. {}".format(bugReport))
print("addTarget({}, {}, {}).".format(target, source, env))
return (target, source)
def erlangGenerator(source, target, env, for_signature):
""" Generate the erlc compilation command line. """
# We should receive one and only one source.
if len(source) > 1:
print("Warning: unexpected internal situation.")
print("This is a bug. {}".format(bugReport))
print("erlangGenerator received more than one source.")
print("erlangGenerator({}, {}, {}, {})".format(source, target, env, for_signature))
source = str(source[0])
# Start with the complier.
command = "$ERLC"
# The output (-o) parameter
command += " -o " + outputDir(source, env)
# Add the libpaths.
if env.has_key("ERLLIBPATH"):
if not isinstance(env["ERLLIBPATH"], list):
env["ERLLIBPATH"] = [env["ERLLIBPATH"]]
for libpath in env["ERLLIBPATH"]:
command += " -I " + libpath
# At last, the source.
return command + " " + source
erlangBuilder = Builder(generator = erlangGenerator,
#action = "$ERLC -o $OUTPUT $SOURCE",
#suffix = [".beam", ".boot", ".script"],
src_suffix = ".erl",
emitter = addTarget,
single_source = True)
env.Append(BUILDERS = {"Erlang" : erlangBuilder})
env.Append(ENV = {"HOME" : os.environ["HOME"]}) # erlc needs $HOME.
def outputDir(source, env):
""" Given a source and its environment, return the output directory. """
if env.has_key("OUTPUT"):
return env["OUTPUT"]
else:
return dirOf(source)
def libpath(env):
""" Return a list of the libpath or an empty list. """
if env.has_key("ERLLIBPATH"):
if isinstance(env["ERLLIBPATH"], list):
return env["ERLLIBPATH"]
else:
return [env["ERLLIBPATH"]]
else:
return []
def dirOf(filename):
""" Returns the relative directory of filename. """
directory = os.path.dirname(filename)
if directory == "":
return "./"
else:
return directory + "/"
def relModules(node, env, path):
""" Return a list of modules needed by a release (.rel) file. """
# Run the function reApplications of erlangscanner to get the applications.
command = "erl -noshell -s erlangscanner relApplications \"" + str(node) + "\" -s init stop"
sp = subprocess.Popen(command,
shell = True,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
sp.wait()
if sp.returncode != 0:
print("Warning: The scanner failed to scan your files, dependencies won't be calculated.")
print("If your file '{}' is correctly (syntactically and semantically), this is a bug. {}".format((node, bugReport)))
print("Command: {}.".format(command))
print("Return code: {}.".format(sp.returncode))
print("Output: \n{}\n".format(sp.stdout.read().strip()))
print("Error: \n{}\n".format(sp.stderr.read().strip()))
return []
# Get the applications defined in the .rel.
appNames = sp.stdout.read().split()
# Build the search path
paths = set([outputDir(str(node), env)] + libpath(env))
modules = []
for path in paths:
for appName in appNames:
appFileName = path + appName + ".app"
if os.access(appFileName, os.R_OK):
modules += appModules(appFileName, env, path)
return modules
def appModules(node, env, path):
""" Return a list of modules needed by a application (.app) file. """
# Run the function appModules of erlangscanner to get the modules.
command = "erl -noshell -s erlangscanner appModules \"" + str(node) + "\" -s init stop"
sp = subprocess.Popen(command,
shell = True,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
sp.wait()
if sp.returncode != 0:
print("Warning: The scanner failed to scan your files, dependencies won't be calculated.")
print("If your file '{}' is correctly (syntactically and semantically), this is a bug. {}".format(node, bugReport))
print("Command: {}.".format(command))
print("Return code: {}.".format(sp.returncode))
print("Output: \n{}\n".format(sp.stdout.read().strip()))
print("Error: \n{}\n".format(sp.stderr.read().strip()))
return []
# Get the applications defined in the .rel.
moduleNames = sp.stdout.read().split()
# Build the search path
paths = set([outputDir(node, env)] + libpath(env))
modules = []
# When there are more than one application in a project, since we are scanning all paths against all files, we might end up with more dependencies that really exists. The worst is that we'll get recompilation of a file that didn't really needed it.
for path in paths:
for moduleName in moduleNames:
modules.append(moduleName + ".beam")
return modules
relScanner = Scanner(function = relModules,
name = "RelScanner",
skeys = [".rel"],
recursive = False)
env.Append(SCANNERS = relScanner)
def edocGenerator(source, target, env, for_signature):
""" Generate the command line to generate the code. """
tdir = os.path.dirname(str(target[0])) + "/"
command = "erl -noshell -run edoc_run files '[%s]' '[{dir, \"%s\"}]' -run init stop" % (
",".join(['"' + str(x) + '"' for x in source]),
tdir)
return command
def documentTargets(target, source, env):
""" Artifitially create all targets that generating documentation will generate to clean them up latter. """
tdir = os.path.dirname(str(target[0])) + "/"
newTargets = [str(target[0])]
# TODO: What happens if two different sources has the same name on different directories ?
newTargets += [tdir + os.path.splitext(os.path.basename(filename))[0] + ".html"
for filename in map(str, source)]
newTargets += [tdir + filename for filename in
["edoc-info", "modules-frame.html", "overview-summary.html", "overview-summary.html", "stylesheet.css", "packages-frame.html"]]
#newSources = source + [tdir + "overview.edoc"]
return (newTargets, source)
def edocScanner(node, env, path):
#print "edocScanner(%s, %s, %s)\n" % (node, env, path)
overview = os.path.dirname(str(node)) + "/overview.edoc"
if os.path.exists(overview):
return ["overview.edoc"]
else:
return []
edocBuilder = Builder(generator = edocGenerator,
emitter = documentTargets,
target_scanner = Scanner(function=edocScanner))
env.Append(BUILDERS = {"EDoc" : edocBuilder})
def exists(env):
return env.Detect(["erlc"])
| 2.25 | 2 |
locations/spiders/meijer.py | thismakessand/alltheplaces | 1 | 12759366 | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
STATES = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD',
'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ',
'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY']
class MeijerSpider(scrapy.Spider):
name = 'meijer'
allowed_domains = ['www.meijer.com']
def start_requests(self):
for state in STATES:
yield scrapy.Request(
'https://www.meijer.com/custserv/locate_store_by_state.cmd?form_state=locateStoreByStateForm&state={}'.format(state),
callback = self.parse
)
def parse(self, response):
stores = response.css('div.records_inner>script::text').extract_first()
if stores:
stores = stores.strip()[13:-1]
stores = stores.replace('\',\'', '","')
stores = stores.replace('[\'', '["')
stores = stores.replace('\']', '"]')
stores = json.loads(stores)
loc_data = response.css('script').extract()[10]
lat_matches = re.findall(r'(\"LAT\"), (\")([+-]?([0-9]*[.])?[0-9]+)(\")', loc_data)
lon_matches = re.findall(r'(\"LNG\"), (\")([+-]?([0-9]*[.])?[0-9]+)(\")', loc_data)
n = 0
for store in stores:
address1 = store[6].split(',')
city = address1[0].strip()
address2 = address1[1].strip().split(' ')
state = address2[0]
postcode = address2[1]
properties = {
'ref': store[0],
'name': store[1],
'phone': store[7],
'opening_hours': self.hours(store[8]),
'lat': lat_matches[n][2],
'lon': lon_matches[n][2],
'street': store[2],
'city': city,
'state': state,
'postcode': postcode
}
n = n + 1
yield GeojsonPointItem(**properties)
def hours(self, data):
if data == 'Open 24 hrs a day, 364 days a year.':
return '24/7'
else :
return data
| 2.5 | 2 |
virtual/bin/django-admin.py | LoiseMwarangu/Instagram | 0 | 12759367 | <gh_stars>0
#!/home/loise/instagram/virtual/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 1.15625 | 1 |
py_tests/test_form_filters.py | tiesjan/url-shortener | 0 | 12759368 | from url_shortener.form_filters import prepend_http, strip_value
def test_prepend_http():
assert prepend_http('example.com/') == 'http://example.com/'
assert prepend_http('http://example.com/') == 'http://example.com/'
assert prepend_http('https://example.com/') == 'https://example.com/'
assert prepend_http('') == ''
assert prepend_http(None) is None
def test_strip_value():
assert strip_value(' example ') == 'example'
assert strip_value('example') == 'example'
assert strip_value('') == ''
assert strip_value(None) is None
| 2.609375 | 3 |
chat_example/server.py | h-hirokawa/swampdragon | 0 | 12759369 | <gh_stars>0
import os
from swampdragon.swampdragon_server import run_server
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chat_example.settings")
run_server()
| 1.414063 | 1 |
lib/scheduler.py | ace-ecosystem/faqueue | 0 | 12759370 | import time, os, sys
import logging
import json
import importlib
import threading
import pprint
import datetime
import gc
import re
import shutil
from configparser import ConfigParser
from pymongo import MongoClient
from bson.objectid import ObjectId
from subprocess import Popen
from threading import Lock, Thread
from lib.constants import FA_HOME
from lib.saq.client import Alert, AlertSubmitException
log = logging.getLogger()
class Scheduler:
def __init__(self):
log.info('Initializing scheduler.')
self.config = ConfigParser()
self.config.read(os.path.join(FA_HOME, 'etc', 'config.ini'))
self.working = self.config.get('general', 'working_dir')
self.logging_dir = self.config.get('general', 'logging_dir')
self.json_results_file = os.path.join(self.working, 'scan_results.json')
self.running = True
self.update_minutes = self.config.getint('general', 'update_minutes')
# Thread to regularly update the master list of indicators and their final status
self.update_list_thread = None
self.master_indicator_dict = {}
self.master_indicator_lock = Lock()
self.has_initial_data = False
# Module data structures
self.modules = []
self.module_classes = {}
# Informational tracking
# Tracks types that don't have a module
self.orphaned_types = []
# Now we can initialize all our modules
modules_to_load = self.config.get('general', 'load_modules').split(',')
for m in modules_to_load:
mcfg = 'module_{}'.format(m)
if mcfg in self.config:
log.info('Loading module {}'.format(m))
try:
_class_name = self.config.get(mcfg, 'class_name')
_module_name = 'lib.modules.{}'.format(self.config.get(mcfg, 'module_name'))
log.debug('Loading module: {}'.format(_module_name))
_module = importlib.import_module(_module_name)
log.debug('Loading class: {}'.format(_class_name))
_class = getattr(_module, _class_name)
self.module_classes[_class_name] = _class
log.debug('Creating instance of module: {}'.format(_class_name))
_module_instance = _class()
self.modules.append(_module_instance)
except ImportError as e:
log.error('Error importing module. {}'.format(e))
except AttributeError as e:
log.error('Error importing class. {}'.format(e))
else:
log.error('Module {} configuration not found!'.format(m))
def start(self):
# Start a new thread to hold a master list of all 'New' indicators and update it regularly
# This updates the indicator list as new indicators are found in CRITS
self.update_list_thread = Thread(target = self.update_master_indicator_list, name='IndicatorMaster')
self.update_list_thread.start()
# Wait until our master_indicator_dict contains our first set of indicators
# A little hacky, but whatever
log.info('Obtaining initial indicator data.')
while not self.has_initial_data:
time.sleep(1)
log.info('Initial data obtained. Starting modules.')
# Get our indicator data and start the modules
for module in self.modules:
self.start_module(module)
# Finally, make sure our modules aren't broken
for module in self.modules:
status = module.get_module_status()
if not status:
log.error('module_status field not found in module data for {}. Shutting down...'.format(module.getName()))
self.running = False
break
if status == 'not initialized':
log.error('Module {} not initialized! Cannot update indicators!'.format(module.getName()))
self.running = False
break
# Start the main loop
sleeptime = 10
while self.running:
try:
with self.master_indicator_lock:
all_cids = list(self.master_indicator_dict.keys())
# looping through all crits ids
for cid in all_cids:
with self.master_indicator_lock:
status = self.master_indicator_dict[cid]['status']
ctype = self.master_indicator_dict[cid]['type']
value = self.master_indicator_dict[cid]['value']
# Process indicators by their status
# This may or may not update the overall status depending on whether
# all the modules have finished for that particular indicator
if status == 'New':
self._process_new_indicator(cid, ctype, value)
self._process_indicator_status(cid, ctype)
# If the status is something other than 'New', we update the indicator and remove it from
# all of the modules.
if status == 'In Progress' or status == 'Analyzed':
# Update the CRITS status
self.update_indicator_status(cid, status)
# Send alerts to CRITS
if status == 'In Progress':
self.send_alert_to_ace(cid)
with self.master_indicator_lock:
self.master_indicator_dict[cid]['submitted'] = True
# Remove the indicator from our master_indicator_dict
self.clean_master_and_modules()
collected = gc.collect()
if collected > 0:
log.debug('Garbage Collector: Collected {} objects.'.format(collected))
scount = 0
while scount < sleeptime and self.running:
time.sleep(1)
scount += 1
except KeyboardInterrupt:
log.info('Keyboard interrupt caught in scheduler. Terminating...')
self.stop()
def _process_new_indicator(self, cid, ctype, value):
# First we will see if we need to add the indicator to the modules
has_module = False
for module in self.modules:
if ctype not in module.get_valid_indicator_types():
continue
if module.has_indicator(cid):
has_module = True
continue
log.info('Adding new indicator to all the modules: {}'.format(value))
has_module = True
module.add_indicator(cid, value, ctype)
# Reporting that an indicator type does not have a module
# This means we need to write a module
if not has_module:
if ctype not in self.orphaned_types:
self.orphaned_types.append(ctype)
log.warning('No module for indicator type {} and indicator {}'.format(ctype, cid))
def _process_indicator_status(self, cid, ctype):
# Now we will check the status of the indicators and see if we can update the overall
# status from 'New' to either 'In Progress' or 'Analyzed'
# Can we update this module
_can_update = True
# If only one module says "in progress", we set this to False
_is_in_progress = False
# We want to make sure at least one module can analyze an indicator before we set it to 'Analyzed'.
# Otherwise it stays 'New'. This flag tracks that.
_at_least_one_analyzed = False
for module in self.modules:
# Now we process the results data
module_data = module.get_indicator_data(cid)
if not module_data and ctype in module.get_valid_indicator_types():
log.warning('Module {} can handle indicator type '\
'{} for {}, but it is not in the module data.'.format(module.getName(), ctype, cid))
_can_update = False
continue
if not module_data:
continue
if module_data['status'] == 'New':
_can_update = False
if module_data['processing_results']:
# We are still processing results
_can_update = False
# One module says it is 'in progress', so that's what we mark it
if module_data['status'] == 'In Progress':
_is_in_progress = True
_is_analyzed = False
if module_data['status'] == 'Analyzed':
_at_least_one_analyzed = True
# Now we check the results!
# We set the ultimate result in master_indicator_dict, which is what the mongo update function will use
if _can_update:
if _is_in_progress:
log.debug('Setting indicator {} to "In Progress"'.format(cid))
with self.master_indicator_lock:
self.master_indicator_dict[cid]['status'] = 'In Progress'
elif _at_least_one_analyzed:
log.debug('Setting indicator {} to "Analyzed"'.format(cid))
with self.master_indicator_lock:
self.master_indicator_dict[cid]['status'] = 'Analyzed'
else:
log.debug('Not updating indicator {}'.format(cid))
def start_module(self, module):
module.start()
def stop(self):
self.running = False
for module in self.modules:
module.stop()
def get_all_new_indicators(self):
mongo_host = self.config.get('database', 'host')
mongo_port = int(self.config.get('database', 'port'))
try:
connection = MongoClient(host=mongo_host, port=mongo_port)
db = connection['crits']
whitelist_reg = re.compile('^whitelist:')
collection = db.indicators.find( { 'status' : 'New',
'confidence.rating' : { '$ne' :
'benign' },
'impact.rating' : { '$ne' :
'benign' },
'bucket_list' : { '$nin' : [
whitelist_reg ] }
} )
return list(collection)
except Exception as e:
sys.exit('Error retrieving data from mongo: {}'.format(str(e)))
finally:
connection.close()
def update_indicator_status(self, cid, status):
mongo_host = self.config.get('database', 'host')
mongo_port = int(self.config.get('database', 'port'))
try:
connection = MongoClient(host=mongo_host, port=mongo_port)
db = connection['crits']
# Make sure the indicator is still New first
log.debug('Ensuring indicator {} is still New'.format(cid))
indicator = db.indicators.find_one( { '_id' : ObjectId(cid) } )
if indicator['status'] != 'New':
log.warning('Tried to update indicator {} but status was not New. Status was {}'.format(cid, indicator['status']))
return False
# Now we can update the indicator
log.info('Updating indicator {} with status {}'.format(cid, status))
db.indicators.update_one( { '_id' : ObjectId(cid)}, { '$set' : { 'status' : status } } )
return True
except Exception as e:
log.error('Error retrieving data from mongo: {}'.format(e))
finally:
connection.close()
return False
def send_alert_to_ace(self, cid):
# Create the basic alert data
with self.master_indicator_lock:
ind_value = self.master_indicator_dict[cid]['value']
_results = { 'indicator' : { 'crits_id' : cid, 'value' : ind_value } }
_observables = []
_observables.append( { 'type' : 'indicator', 'value' : cid } )
total_hit_count = 0
_at_least_one_module = False
for module in self.modules:
if module.has_indicator(cid):
_at_least_one_module = True
module_results = module.get_indicator_data(cid)
_results[module.getName()] = module_results['results']
for fa_result in module_results['results']:
total_hit_count += int(fa_result['total_hits'])
if 'observables' in module_results:
obs_count = 0
for observable in module_results['observables']:
obs_count += 1
_observables.append( observable )
log.debug('Adding observable {} {}'.format(observable['type'], observable['value']))
if not _at_least_one_module:
log.warning('Tried to submit an alert to ACE, but no module has this indicator: {}'.format(cid))
return False
# Send results
log.info('Sending alert to ACE for indicator {}'.format(cid))
alert = Alert(
tool = 'faqueue',
tool_instance = 'nakylexsec101',
alert_type = 'faqueue',
desc = 'FA Queue - Indicator {} - {} Hits'.format(ind_value, total_hit_count),
event_time = datetime.datetime.now(),
details = _results
)
for obs in _observables:
alert.add_observable(obs['type'], obs['value'])
try:
alert.submit(self.config.get('general', 'ace_submit'), 'blah')
except Exception as e:
log.error('Error submitting alert to ACE: {}'.format(str(e)))
# This means we can remove the indicator from all the modules and our master list
self.master_indicator_dict[cid]['submitted'] = True
# Check for alerts that failed submission and attempt to resubmit them
failed_alerts_path = os.path.join(FA_HOME, '.saq_alerts')
if os.path.exists(failed_alerts_path):
for alert_dir in os.listdir(failed_alerts_path):
if not re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', alert_dir):
continue
data_file = os.path.join(failed_alerts_path, alert_dir, 'data.json')
alert_full_path = os.path.join(failed_alerts_path, alert_dir)
alert = Alert()
url = None
key = None
ok_to_delete = False
try:
url, key = alert.load_saved_alert(data_file)
alert.submit(url, key, save_on_fail=False)
ok_to_delete = True
except AlertSubmitException as e:
log.error("Failed to re-submit alert to ACE with the following error: {}".format(str(e)))
except Exception as e:
log.error("Unable to load alert from {0}: {1}".format(data_file, str(e)))
ok_to_delete = True
if ok_to_delete:
try:
shutil.rmtree(alert_full_path)
except Exception as e:
log.error("Unable to delete directory {0}: {1}".format(alert_full_path, str(e)))
def update_master_indicator_list(self):
# Update every X minutes
update_time = self.update_minutes * 60
last_update_time = time.time() - update_time - 1
while self.running:
current_time = time.time()
if current_time - last_update_time > update_time:
# log.debug('Updating the master indicator list.')
indicators = self.get_all_new_indicators()
new_indicator_count = 0
total_indicator_count = 0
with self.master_indicator_lock:
for indicator in indicators:
cid = str(indicator['_id'])
ctype = indicator['type']
cvalue = indicator['value']
if cid not in self.master_indicator_dict:
self.master_indicator_dict[cid] = { 'status' : 'New', 'type' : ctype, 'value' : cvalue, 'submitted' : False }
new_indicator_count += 1
self.add_indicator_to_modules(cid, ctype, cvalue)
total_indicator_count = len(self.master_indicator_dict.keys())
if new_indicator_count > 0:
log.info('Found {} new indicators to analyze.'.format(new_indicator_count))
log.info('Master list size is now {}'.format(total_indicator_count))
last_update_time = time.time()
self.has_initial_data = True
# log.debug('Master indicator list updated.')
time.sleep(1)
def add_indicator_to_modules(self, cid, ctype, cvalue):
for module in self.modules:
if ctype in module.get_valid_indicator_types():
log.debug('Adding indicator {} to module {}'.format(cid, module.getName()))
module.add_indicator(cid, ctype, cvalue)
# This should only be called after update_indicator()
# This removes any indicator from the list that has a status of 'In Progress' or 'Analyzed'
def clean_master_and_modules(self):
ids_to_remove = []
was_modified = False
total_indicator_count = 0
with self.master_indicator_lock:
for cid in self.master_indicator_dict.keys():
if self.master_indicator_dict[cid]['submitted']:
ids_to_remove.append(cid)
for cid in ids_to_remove:
self.master_indicator_dict.pop(cid)
was_modified = True
total_indicator_count = len(self.master_indicator_dict.keys())
# Now remove from the modules
for module in self.modules:
for cid in ids_to_remove:
module.remove_indicator(cid)
if was_modified:
log.info('Master list size is now {}'.format(total_indicator_count))
| 1.960938 | 2 |
python/geeksforgeeks/dp/catalan.py | othonreyes/code_problems | 0 | 12759371 | <gh_stars>0
def catalan_rec(n):
if n <= 1:
return 1
res = 0
for i in range(n):
res += catalan_rec(i) * catalan_rec(n-i-1)
return res
def catalan_rec_td(n, arr):
if n <= 1:
return 1
if arr[n] > 0:
return arr[n]
res = 0
for i in range(n):
res += catalan_rec_td(i, arr) * catalan_rec_td(n-i-1, arr)
arr[n] = res
return res
def catalan_rec_bu(n):
catalan = [0] * (n+1)
catalan[0] = 1
catalan[1] = 1
for i in range(2, n + 1):
catalan[i] = 0
for j in range(i):
catalan[i] = catalan[i] + catalan[j] + catalan[i-j-1]
return catalan[n]
if __name__ == "__main__":
for i in range(10):
catalan_rec(i)
for i in range(10):
arr = [0] * i
catalan_rec(i, arr)
| 3.25 | 3 |
inferelator/tests/artifacts/test_stubs.py | meichenfang/inferelator | 25 | 12759372 | from inferelator import amusr_workflow
from inferelator import workflow
from inferelator.regression.base_regression import _RegressionWorkflowMixin
from inferelator.postprocessing.results_processor import ResultsProcessor
from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE
from inferelator.utils import InferelatorData
import pandas as pd
import numpy as np
class NoOutputRP(ResultsProcessor):
def summarize_network(self, output_dir, gold_standard, priors):
return super(NoOutputRP, self).summarize_network(None, gold_standard, priors)
# Factory method to spit out a puppet workflow
def create_puppet_workflow(regression_class=_RegressionWorkflowMixin,
base_class=workflow.WorkflowBase,
result_processor_class=NoOutputRP):
puppet_parent = workflow._factory_build_inferelator(regression=regression_class, workflow=base_class)
class PuppetClass(puppet_parent):
"""
Standard workflow except it takes all the data as references to __init__ instead of as filenames on disk or
as environment variables, and returns the model AUPR and edge counts without writing files (unless told to)
"""
write_network = True
network_file_name = None
pr_curve_file_name = None
initialize_mp = False
def __init__(self, data, prior_data, gs_data):
self.data = data
self.priors_data = prior_data
self.gold_standard = gs_data
super(PuppetClass, self).__init__()
def startup_run(self):
# Skip all of the data loading
self.process_priors_and_gold_standard()
def create_output_dir(self, *args, **kwargs):
pass
return PuppetClass
class TaskDataStub(amusr_workflow.create_task_data_class(workflow_class="single-cell")):
priors_data = TestDataSingleCellLike.priors_data
tf_names = TestDataSingleCellLike.tf_names
meta_data_task_column = "Condition"
tasks_from_metadata = True
task_name = "TestStub"
task_workflow_type = "single-cell"
def __init__(self, sparse=False):
self.data = TEST_DATA.copy() if not sparse else TEST_DATA_SPARSE.copy()
super(TaskDataStub, self).__init__()
def get_data(self):
if self.tasks_from_metadata:
return self.separate_tasks_by_metadata()
else:
return [self]
class FakeDRD:
def __init__(self, *args, **kwargs):
pass
def run(self, expr, meta):
return expr, expr, expr
def validate_run(self, meta):
return True
class FakeWriter(object):
def writerow(self, *args, **kwargs):
pass
class FakeRegressionMixin(_RegressionWorkflowMixin):
def run_regression(self):
beta = [pd.DataFrame(np.array([[0, 1], [0.5, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
beta_resc = [pd.DataFrame(np.array([[0, 1], [1, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
return beta, beta_resc
def run_bootstrap(self, bootstrap):
return True
class FakeResultProcessor:
network_data = None
def __init__(self, *args, **kwargs):
pass
def summarize_network(self, *args, **kwargs):
return 1, 0, 0
| 2.171875 | 2 |
Python 3 - Mundo 2/Desafio das Aulas - Mundo 2/Desafio63-SequenciaDeFibonacciV1.py | Pedro0901/python3-curso-em-video | 1 | 12759373 | <filename>Python 3 - Mundo 2/Desafio das Aulas - Mundo 2/Desafio63-SequenciaDeFibonacciV1.py
'''
Exercício Python 63: Escreva um programa que leia um número N inteiro qualquer e
mostre na tela os N primeiros elementos de uma Sequência de Fibonacci. Exemplo:
0 – 1 – 1 – 2 – 3 – 5 – 8
'''
print('------------------------------')
print('Sequencia de Fibonacci')
print('------------------------------')
n = int(input('Quantos termos você quer mostrar? '))
t1 = 0
t2 = 1
print('~'*30)
print('{} → {}'.format(t1, t2), end='')
cont = 3
while cont <= n:
t3 = t1 + t2
print(' → {}'.format(t3), end='')
t1 = t2
t2 = t3
cont += 1
print(' → FIM')
print('~'*30)
| 4.34375 | 4 |
blog/blog/urls.py | thomaszdxsn/blog | 4 | 12759374 | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.decorators.cache import cache_page
from django.contrib.sitemaps.views import sitemap
from post import views as post_views
from post.sitemaps import PostSitemap
from post.feeds import LatestPostFeed
from core import views as core_views
sitemaps = {
"posts": PostSitemap
}
urlpatterns = [
url(r"^admin/",
include("admin_honeypot.urls", namespace="admin_honeypot")),
url(r'^fake-admin/', admin.site.urls),
url(r"^$",
post_views.homepage_view,
name="homepage"),
url(r"^query/$",
post_views.post_search_view,
name="post_search"),
url(r"^post/(?P<slug>[\w-]+)/$",
post_views.post_detail_view,
name="post_detail"),
url(r"^tag/(?P<tag_slug>[\w-]+)$",
cache_page(60 * 60)(post_views.PostListByTag.as_view()),
name="post_tagged"),
url(r"^all/$",
post_views.PostListView.as_view(),
name="post_list"),
url(r"^image/(?P<width>\d+)/(?P<height>\d+)/$",
core_views.placeholder_view,
name="placeholder"),
url(r"^markdown/", include("django_markdown.urls")),
url(r"^sitemap\.xml$", sitemap, {"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap"),
url(r"^feed/$",
LatestPostFeed(),
name="post_feed"),
url(r"^api/",
include("post.api.urls", namespace="api")),
url(r'^grappelli/', include('grappelli.urls')),
# url(r'^silk/', include('silk.urls', namespace='silk')) # 性能检测
]
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns | 2.6875 | 3 |
annotation_dep.py | NicholasRoberts/healthhack2015_genome_browser | 1 | 12759375 | <gh_stars>1-10
from subprocess import Popen
import sys
def main(genome_name, annotation_name, bed_file, ra_filename):
p = Popen('hgLoadBed {} {} {}'.format(genome_name, annotation_name, bed_file))
p.communicate()
p = Popen('hgTrackDb -raName={} . {} trackDb /home/ubuntu/src/kent/src/hg/lib/trackDb.sql'.format(ra_filename, genome_name))
p.communicate()
p = Popen('hgFindSpec -raName=hydei.ra . droHyd1 hgFindSpec /home/ubuntu/src/kent/src/hg/lib/hgFindSpec.sql')
p.communicate()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) | 2.09375 | 2 |
todo/menu_utils.py | nightKrwler/TODO-CLI | 0 | 12759376 | import curses
import colorama
from colorama import Fore, Back, Style
colorama.init()
class menuInterface:
def __init__(self, menu):
self.info = menu
def print_menu(self,stdscr, selected_row_idx):
list = self.info
stdscr.clear()
h, w = stdscr.getmaxyx()
for idx, row in enumerate(list):
char = ' ✓ ' if row["status"] else ' x '
text = '{id} {char} {task}'
x = w//2 - len(text)//2
y = h//2 - len(list)//2 + idx
pair = 2
if row["status"]:
pair = 1
if idx == selected_row_idx:
stdscr.attron(curses.color_pair(pair))
stdscr.addstr(y, x, text.format(
id=idx,
char = char,
task=row["task"],
))
stdscr.attroff(curses.color_pair(pair))
else:
stdscr.addstr(y, x, text.format(
id=idx,
char = char,
task=row["task"],
))
x = 0
y = h//2 + len(list)//2 +1
text = "↑ or ↓ to scroll the list \n ↵ (Enter) to toggle" +"\n" +"q to quit"
stdscr.attron(curses.color_pair(3))
stdscr.addstr(y,x,text,)
stdscr.attron(curses.color_pair(3))
stdscr.refresh()
def update(self,stdscr,rowid,status):
self.info[rowid]["status"] = not status
self.print_menu(stdscr, rowid)
return
def main(self,stdscr):
menu = self.info
# turn off cursor blinking
curses.curs_set(0)
# color schemes
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE,curses.COLOR_BLACK)
# specify the current selected row
current_row = 0
# print the menu
self.print_menu(stdscr, current_row)
while 1:
key = stdscr.getch()
if key == curses.KEY_UP and current_row > 0:
current_row -= 1
elif key == curses.KEY_DOWN and current_row < len(menu)-1:
current_row += 1
elif key == curses.KEY_ENTER or key in [10, 13]:
s = menu[current_row]["status"]
self.update(stdscr,current_row,s)
# if user selected last row, exit the program
elif key==ord("q"):
return self.info
self.print_menu(stdscr, current_row)
| 3.09375 | 3 |
algorithms/sorting/insertion/python/insertion_sort.py | manojbhargavan/Data-Structures | 0 | 12759377 | <gh_stars>0
import sys
def sort(inputArray, reversed=False):
for i in range(1, len(inputArray)):
key = inputArray[i]
j = i - 1
while(((not reversed and key < inputArray[j]) or (reversed and key > inputArray[j])) and j >= 0):
inputArray[j+1] = inputArray[j]
j -= 1
inputArray[j + 1] = key
return inputArray
# sort([5, 3, 6, 9, 10, 45, 23, 12, 4, 2])
if __name__ == "__main__":
arr = sys.argv[1].split(',')
reverse = sys.argv[2]
numArr = []
for num in arr:
numArr.append(int(num))
print(sort(numArr, reverse == "True"))
| 3.8125 | 4 |
workspace/pynb_dag_runner/tests/tasks/jupytext_test_notebooks/notebook_ok.py | pynb-dag-runner/pynb-dag-runner | 4 | 12759378 | # %%
P = {"task.variable_a": "value-used-during-interactive-development"}
# %% tags=["parameters"]
# ---- During automated runs parameters will be injected in this cell ---
# %%
# -----------------------------------------------------------------------
# %%
# Example comment
print(1 + 12 + 123)
# %%
print(f"""variable_a={P["task.variable_a"]}""")
# %%
| 2.21875 | 2 |
StreetPatch.API/Services/PythonScripts/calculate_similarity.py | TheRandomTroll/ucn-3rd-semester-project | 0 | 12759379 | <filename>StreetPatch.API/Services/PythonScripts/calculate_similarity.py<gh_stars>0
import spacy
import sys
title1 = sys.argv[1]
title2 = sys.argv[2]
description1 = sys.argv[3]
description2 = sys.argv[4]
# nlp = spacy.load("en_core_web_lg")
nlp = spacy.load("en_core_web_md")
doc1 = nlp(title1)
doc2 = nlp(title2)
print(doc1.similarity(doc2));
doc1 = nlp(description1)
doc2 = nlp(description2)
print(doc1.similarity(doc2)); | 2.578125 | 3 |
scripts/generate_report.py | 3ideas/config_tracker | 0 | 12759380 | <gh_stars>0
#!/usr/bin/env python2.7
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_generate_report.ipynb (unless otherwise specified).
__all__ = ['get_output_from_cmd', 'run_subprocess', 'verbose', 'show_progress', 'GitRepositoryReader',
'parse_commitstr_to_datetime', 'copyfile_fullpath', 'mkdir_for_fullpath', 'remove_files_from_output_branch',
'remove_files_from_output', 'get_report_name_refs', 'make_output_directories', 'get_relative_path',
'copy_style_and_bootstrap', 'bootstrap_navbar', 'header', 'footer', 'page', 'diff2html_template',
'default_footer', 'generate_difference_report_page', 'BranchComparisonReport',
'FileChangeHistoryReportForBranch']
# Cell
#C--------------------------------------------------------------------------
#C Program : initalise_repository.py
#C
#C Generates a difference report between two branches (servers) in the repository
#C
#C Copyright: <EMAIL>
#C
#C See https://github.com/3ideas/config_tracker
#C--------------------------------------------------------------------------
import argparse
from os import listdir
from os.path import isfile, join,exists
import os
import sys
import datetime
import shutil
from subprocess import call, check_output
# Cell
verbose = False
show_progress = True
def get_output_from_cmd(cmd):
"""
Get the output from a command
"""
try:
if verbose:
print('RUN: %s' %cmd)
output = check_output(cmd, shell=True)
return output.decode("utf-8")
except Exception as e:
print(e)
return None
def run_subprocess(command):
"""
Run the subprocess
"""
try:
retcode = call(command, shell=True)
if verbose:
print("Command: %s\nReturned: %s" %(command,retcode))
if retcode < 0:
sys.stderr.write("Command: %s, was terminated by signal: %s\n" % (command, retcode))
elif retcode > 0:
sys.stderr.write("Command: %s, returned: %s \n"% (command, retcode))
except OSError as e:
sys.stderr.write("Execution failed: %s \n"% e)
return retcode
# Cell
############################################################################################
#
# Git functions
#
############################################################################################
class GitRepositoryReader:
def __init__(self, repository, output_dir, branch1, branch2=None):
self.repository = repository
self.branch1 = branch1
self.branch2 = branch2
self.output_dir = output_dir
def run_git_command(self, command,splitlines = True):
"""
Run a git command
"""
result = get_output_from_cmd('cd %s && git %s' % (self.repository, command))
if result is not None and splitlines:
return result.splitlines()
return result
def set_branch1(self, branch1):
self.branch1 = branch1
def get_list_of_files_differences_between_branches(self):
""" Get the list of file difference between 2 git branches
"""
file_list = self.run_git_command("diff --name-only %s %s" %(self.branch1,self.branch2))
return file_list
def get_list_of_file_changes(self,filename):
""" get the list of changes to `filename` in a repository,
returns a list of tubles with (git commit hash, commit details(date), datetime)"""
change_list = []
format_str='%h %s'
lines = self.run_git_command('log %s --pretty=format:"%s" -- %s' % (self.branch1,format_str,filename))
for line in lines:
line = line.strip()
fields = line.split()
date = parse_commitstr_to_datetime(fields[1])
change_list.append([fields[0],fields[1],date])
return change_list # TODO make this a structure!
def get_filename_ref_for(self, git_reference, filename):
""" Given a repository reference (branch/commit), return the fullpath to the file in the output_dir."""
part1 = os.path.join(self.output_dir, 'original_files')
part2 = os.path.join(part1, git_reference)
filename_output_path = os.path.join(part2, filename)
return filename_output_path
def get_all_filenames_in_branch(self,branch=None):
""" Returns a list of all filenames in the database and files directories in a branch/commit"""
if branch is None:
branch = self.branch1
lines = self.run_git_command('ls-tree -r --name-only %s' %(branch))
filenames = []
for line in lines:
if line.startswith('database') or line.startswith('files') or line == 'last_updated.md':
filenames.append(line)
return filenames
def copy_file_to_output(self,filename, git_reference=None): # git_get_file:
""" retreive filename from repository in the output_dir
the ref can be a branchname or a commit hash (long or short)
this enable us to retrieve files from commits without having to checkout
the file
returns the full pathname of the file copied, returns None if unable to get the file"""
# if no reference is given default to branch1
if git_reference is None:
git_reference = branch1
filename_output_location = self.get_filename_ref_for(git_reference,filename)
#print('output filename: %s' % filename_output_location)
# if the file already exists in the output dir do not copy it again
# this saves us getting the same file over and over.
if os.path.isfile(filename_output_location):
return filename_output_location
if not mkdir_for_fullpath(filename_output_location):
return None
# cd ../config_repository && git show d08b328:database/ALARM_DEFINITION.csv
file_contents = self.run_git_command('show %s:%s' %(git_reference,filename),splitlines = False)
if file_contents is None:
return None
hs = open(filename_output_location, 'w')
if len(file_contents) !=0: # can't write 0 contents to file ...
hs.write(file_contents)
hs.close()
return filename_output_location
# Cell
def parse_commitstr_to_datetime(string):
"""
Parse the string to a timestamp, tries to interperate the comment as a date.
TODO: use the commit date itself !
"""
if len(string) < 8:
return None
try:
year = int(string[0:4])
month = int(string[4:6])
date = int(string[6:8])
dt = datetime.datetime(year=year, month=month, day=date)
except:
return None
return dt
############################################################################################
#
# file functions
#
############################################################################################
def copyfile_fullpath(file_name,dest_dir):
""" Copy the file to the destination dir in the same directory structure as its original path"""
# TODO : use if not mkdir_for_fullpath(filename_output_location):
directory_name = os.path.dirname(file_name)
base_name = os.path.basename(file_name)
if len(directory_name) >0 and directory_name[0] == '/':
directory_name = directory_name[1:]
dir_to_create = os.path.join(dest_dir,directory_name)
copy_name = file_name
if copy_name[0] == '/':
copy_name = copy_name[1:]
dest_full_name = os.path.join(dest_dir,copy_name)
if not os.path.isdir(dir_to_create):
try:
os.makedirs(dir_to_create)
except OSError as error:
sys.stderr.write("Directory '%s' cannot be created, skipping file: %s\n" % (error,file_name))
return
try:
shutil.copyfile(file_name,dest_full_name)
except Exception as e:
sys.stderr.write("File cannot be copied to dest : %s, skipping file: %s\n" % (e,file_name))
def mkdir_for_fullpath(filename_output_location):
path, file = os.path.split(filename_output_location)
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as error:
sys.stderr.write("Directory '%s' cannot be created, skipping file: %s\n" % (error,path))
return False
return True
#def copy_branch_to_output(repository, output_dir, branch, file_list):
# """
# Copy the files to the output directory
# """
# cwd = os.getcwd()
# os.chdir(repository)
# # checkout the branch
# switch_to_branch(repository, branch)
#
# dest_dir = os.path.join(output_dir,branch)
# if not os.path.isdir(dest_dir):
# os.mkdir(dest_dir)
# # copy the files
# for file_name in file_list:
# copyfile_fullpath(file_name,dest_dir)
# os.chdir(cwd)
def remove_files_from_output_branch( output_dir, branch, files):
"""
remove files from the output directory
"""
dest_dir = os.path.join(output_dir,branch)
print('Would delete: %s'% dest_dir)
#shutil.rmtree(dest_dir)
def remove_files_from_output(output_dir, branch1,branch2, file_list):
remove_files_from_output_branch(output_dir,branch1,file_list)
remove_files_from_output_branch(output_dir,branch2,file_list)
#def copy_files_to_output(repository,output_dir, branch1,branch2, files_with_differences):
# """
# Copy the files to the output directory
# """
# copy_branch_to_output(repository,output_dir,branch1,files_with_differences)
# copy_branch_to_output(repository,output_dir,branch2,files_with_differences)
#def copy_all_file_versions_to_output(repository,output_dir,commit_log,filename):
# """ copy all versions of a file to the output"""
#
# for commit_entry in commit_log:
# commit_hash=commit_entry[0]
# git_get_file(repository, commit_hash,filename,output_dir )
def get_report_name_refs(repository_filename,
change_ref=None,
report_base='',
output_dir=None,
add_report_base_to_link=False,
link_ref_strip_dir=False):
""" given a repository filename, returns the name stripped in 3 forms
the visible_name, the link_ref and the filename it should be stored under
also works for non-repository names ...
"""
visible_name = repository_filename
link_ref = repository_filename
filename = repository_filename
base = repository_filename
if repository_filename.startswith("database/"):
base = repository_filename.replace("database/", "")
base = base.replace(".csv", "")
visible_name = base
filename = repository_filename.replace(".csv", "")
if repository_filename.startswith("files/"):
base = repository_filename.replace("files/", "")
visible_name = '/'+base
if link_ref_strip_dir:
_, link_ref = os.path.split(filename)
#link_ref = base
else:
link_ref = filename
if change_ref is not None:
filename = filename + '_' + change_ref
link_ref = link_ref + '_' + change_ref
link_ref = link_ref + '.html'
filename = filename + '.html'
if add_report_base_to_link:
link_ref = os.path.join(report_base,link_ref)
filename = os.path.join(report_base,filename)
if output_dir is not None:
filename = os.path.join(output_dir,filename)
return (visible_name,link_ref, filename)
def make_output_directories(output_dir,base):
"""make the directories required for the reports"""
database_dir = os.path.join(output_dir,'database')
files_dir = os.path.join(output_dir,'files')
orig_dir = os.path.join(output_dir,'original_files')
print('database output dir: %s'%database_dir)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(database_dir):
os.mkdir(database_dir)
if not os.path.isdir(files_dir):
os.mkdir(files_dir)
if not os.path.isdir(orig_dir):
os.mkdir(orig_dir)
# Cell
def get_relative_path(repository_filename):
""" returns the path back to the base dir, from the given filename"""
path = repository_filename
c = 0
while path != '':
c += 1
path,_ = os.path.split(path)
rel_path = ''
while c > 0:
rel_path += '../'
c -= 1
return rel_path
def copy_style_and_bootstrap(src,dst):
""" copy the required styles to dst.
copies all the files from src to dst, if dst already exists it does nothing
"""
dst = os.path.join(dst,'styles')
if not os.path.isdir(dst):
shutil.copytree(src, dst)
def bootstrap_navbar(relative_path='',title='',description=''):
return """<!-- Image and text -->
<nav class="navbar navbar-light bg-light">
<a class="navbar-brand" href="#">
<img src="%sstyles/logo.svg" height="30" class="d-inline-block align-top" alt="">
%s : %s
</a>
</nav>""" % (relative_path,title,description)
def header(relative_path='',description=''):
h = '<!doctype html>\n'
h += '<html lang="en">\n'
h += '<head>\n'
h += '<title>%s</title>\n' % description
h += '<link rel="stylesheet" href="%sstyles/bootstrap-4.0.0-dist/css/bootstrap.css">\n' %relative_path
h += '<link rel="stylesheet" href="%sstyles/style.css">\n' % relative_path
h += '</head>\n'
return h
default_footer='<a href="https://github.com/3ideas/config_tracker">Config Tracker</a> written by <a href="https://3ideas.co.uk/">3ideas.co.uk</a>'
def footer(footer_text=default_footer):
return """<footer class="footer">
<div class="container">
<span class="text-muted">%s</span>
</div>
</footer>""" % footer_text
def page(relative_path='',description='',content='',title='',footer_text=default_footer):
p = header(relative_path=relative_path,description=description)
p += '<body>\n'
p += bootstrap_navbar(relative_path=relative_path,title=title,description=description)
p += content
p += footer(footer_text)
p += '</body>\n'
p += '</html>\n'
return p
def diff2html_template(relative_path='',description='',title='',footer_text=default_footer):
p = header(relative_path=relative_path,description=description)
# TODO add relative local stylesheet !!! use relative path!
p += """<link rel="stylesheet" href="%sstyles/diff2html.css" />
<!--diff2html-css-->
<!--diff2html-js-ui-->
<script>
document.addEventListener('DOMContentLoaded', () => {
const targetElement = document.getElementById('diff');
const diff2htmlUi = new Diff2HtmlUI(targetElement);
//diff2html-fileListToggle
//diff2html-synchronisedScroll
//diff2html-highlightCode
});
</script>""" % relative_path
p += '<body>\n'
p += bootstrap_navbar(relative_path=relative_path,title=title,description=description)
p += """<div id="diff">
<!--diff2html-diff-->
</div>"""
p += footer(footer_text)
p += '</body>\n'
p += '</html>\n'
return p
# Cell
############################################################################################
#
# report functions
#
############################################################################################
#
# General difference function ...
# used for all the report differences
#
def generate_difference_report_page(r,repository_filename,branch1, branch2,output_file_name , description,title):
""" generate a difference report for `file`.
This function assumes the files have already been copied to each of the branch sub directories
This will work for comparing between systems or comparing history of the file"""
visible_name,_,_ = get_report_name_refs(repository_filename)
full_description = ' %s : %s' % (visible_name,description) # TODO escape any special chars
relative_path = get_relative_path(repository_filename)
mkdir_for_fullpath(output_file_name) # TODO check for errors
#
# get the files to compare from the repository
#
file1 = r.copy_file_to_output(repository_filename,git_reference=branch1)
file2 = r.copy_file_to_output(repository_filename,git_reference=branch2)
# TODO check for file1 or file2 being None !
if file1 is None or file2 is None:
content = '<p>Unable to generate report for: %s to %s </p>' % (branch1,branch2)
if file1 is None:
content += "<p>File: %s does not exist for: %s </p>" %(repository_filename,branch1)
if file2 is None:
content += "<p>File: %s does not exist for: %s </p>" %(repository_filename,branch2)
full_page = page(relative_path=relative_path,
description=description,
content=content,
title=title
)
hs = open(output_file_name, 'w')
hs.write(full_page)
hs.close()
return
# if its a database file then its a csv diff
if repository_filename.endswith('.csv'):
if show_progress:
print("Generating csv comparison for : %s between: %s, %s" %(visible_name,branch1,branch2))
tmp_diff_file = os.path.join(r.output_dir,'diff_DELETEME.csv' )
run_subprocess('daff diff %s %s >%s'%(file1,file2,tmp_diff_file))
diff_table_html = get_output_from_cmd('daff.py render --fragment %s'% tmp_diff_file)
run_subprocess('rm %s'% tmp_diff_file)
diff_table_html = diff_table_html.replace('<table>','<table class="table table-sm">',1)
content = "<div class='highlighter'>\n"
content += '<div class="table-responsive">\n'
content += '<div class="tableFixHead">\n'
content += '<div class="h-100 d-inline-block">\n'
content += diff_table_html
content += "</div>\n"
content += "</div>\n"
content += "</div>\n"
content += "</div>\n"
full_page = page(relative_path=relative_path,
description=description,
content=content,
title=title
)
hs = open(output_file_name, 'w')
hs.write(full_page)
hs.close()
else:
if show_progress:
print("Generating comparison for : %s between: %s, %s" %(visible_name,branch1,branch2))
# generate side by side diff
if os.path.exists(output_file_name):
os.remove(output_file_name)
template = diff2html_template(relative_path=relative_path,
description=description,
title=title
)
hs = open('template.html', 'w')
hs.write(template)
hs.close()
run_subprocess('diff2html -s side --hwt template.html -F %s %s %s '%(output_file_name,file1,file2))
run_subprocess('rm template.html')
# Cell
###################
#
# Branch comparison report
#
####################
class BranchComparisonReport:
# All files are stored under the branch_name
def __init__(self, repository, branch1,branch2, output_dir,title='Config Tracker',styles_src=None):
self.repository = repository
self.branch1 = branch1
self.branch2 = branch2
self.output_dir = output_dir
self.title = title
self.r = GitRepositoryReader(repository,output_dir,branch1,branch2)
self.file_list = self.r.get_list_of_files_differences_between_branches()
if styles_src is not None:
copy_style_and_bootstrap(styles_src,output_dir)
def generate_report(self):
""" this generates the index page and all the comparisons for files accross 2 branches."""
self.index_page()
self.difference_reports()
def get_change_ref(self):
return self.branch1+'_'+self.branch2
def index_page(self):
""" Generate the index page for all changes between 2 branches/systems"""
description = 'Differences between %s and %s' %(self.branch1,self.branch2)
content = "<table><tr><th>Table Name</th></tr>\n"
for repository_filename in self.file_list:
change_ref = self.get_change_ref()
visible_name,link_name,_ = get_report_name_refs(repository_filename,
change_ref=change_ref,
report_base=change_ref,
output_dir=self.output_dir,
add_report_base_to_link=True)
content += '<tr><td><a href="%s">%s</a></td></tr>\n' %(link_name,visible_name)
content += "</table>\n"
full_page = page(relative_path='',
description=description,
content=content,
title=self.title
)
index_file = os.path.join(self.output_dir,'index_%s.html' % self.get_change_ref())
hs = open(index_file, 'w')
hs.write(full_page)
hs.close()
#
# branch comparison report, for 2 branches generate report of the changes between them
#
def difference_reports(self):
""" generate a difference report for each file in the repository in the `file_list` """
for repository_filename in self.file_list:
change_ref = self.get_change_ref()
visible_name,_,output_filename = get_report_name_refs(repository_filename,
change_ref=change_ref,
report_base=change_ref,
output_dir=self.output_dir)
description = 'Differences between %s and %s : %s' %(self.branch1,self.branch2,visible_name)
generate_difference_report_page(self.r,
repository_filename,
self.branch1,
self.branch2,
output_filename,
title = self.title,
description = description)
# Cell
###################
#
# File Change History on a branch
#
####################
class FileChangeHistoryReportForBranch:
# All files are stored under the branch_name
def __init__(self, repository, branch, output_dir,title='Config Tracker',styles_src=None):
self.repository = repository
self.branch = branch
self.output_dir = output_dir
self.title = title
self.r = GitRepositoryReader(repository,output_dir,branch)
self.filename_list = self.r.get_all_filenames_in_branch()
self.report_desc_name='change_history'
if styles_src is not None:
copy_style_and_bootstrap(styles_src,output_dir)
#make_output_directories(output_dir,branch)
def generate_report(self):
self.index_page()
for repository_filename in self.filename_list:
self.reports_for_file(repository_filename)
def format_date(self,date):
""" Formats the date for display next to change history"""
return date.strftime("%A %d. %B %Y")
def index_page(self):
""" File history index for the given server"""
description = 'Change history for: %s' % self.branch
content = "<table><tr><th>File/tablename</th></tr>\n"
for repository_filename in self.filename_list:
visible_name,report_link,_ = get_report_name_refs(repository_filename,
change_ref=self.report_desc_name,
report_base=self.branch,
add_report_base_to_link = True)
content += '<tr><td><a href="%s">%s</a></td></tr>\n' %(report_link,visible_name)
content += "</table>\n"
_,_,index_filename = get_report_name_refs(self.branch,
change_ref=self.report_desc_name,
output_dir=self.output_dir)
full_page = page(relative_path='',
description=description,
content=content,
title=self.title
)
mkdir_for_fullpath(index_filename)
hs = open(index_filename, 'w')
hs.write(full_page)
hs.close()
#
# file history report, generate report for all changes made to a file
#
def reports_for_file(self, repository_filename):
#
# Get the change history for the file (in this branch)
commit_log = self.r.get_list_of_file_changes(repository_filename)
self.file_index_page(repository_filename, commit_log)
if len(commit_log) <= 1:
return # No history available
else:
first = True
for change_entry in commit_log:
# a change_entry is a tuple with the following format:
# (git commit hash, commit details(date), datetime)
commit_hash = change_entry[0]
commit_ref = change_entry[1]
datetime = change_entry[2]
if not first:
visible_name,_,output_filename = get_report_name_refs(repository_filename,
#change_ref=commit_ref,
change_ref=commit_hash,
output_dir=self.output_dir,
report_base=self.branch)
description = 'Changes made on: %s to: %s' % (self.format_date(datetime),visible_name)
generate_difference_report_page(self.r,repository_filename,
commit_hash,prev_commit_hash,
output_filename,
description = description,
title=self.title)
else:
first = False
prev_commit_hash = change_entry[0]
prev_commit_ref = change_entry[1]
prev_datatime = change_entry[2]
def file_index_page(self,repository_filename, commit_log):
""" generates a page that shows all the changes to a file and the dates they occurred on.
If there is only one commit in the files history then no changes have occurred since it was
first commited.
"""
visible_name,_,output_filename = get_report_name_refs(repository_filename,
change_ref=self.report_desc_name,
output_dir=self.output_dir,
report_base=self.branch)
description = 'Change history for %s' % visible_name
content = ''
if len(commit_log) <= 1:
content += '<p> No changes recorded </p>'
else:
content += "<table><tr><th>Date of change</th></tr>\n"
first = True
for change_entry in commit_log:
if first: # skip the first commit
first = False
continue
_,diff_file_link,_ = get_report_name_refs(repository_filename,
#change_ref=change_entry[1],
change_ref=change_entry[0],
link_ref_strip_dir=True)
date_str = self.format_date(change_entry[2])
content += '<tr><td><a href="%s">%s</a></td></tr>\n' %(diff_file_link,date_str)
content += "</table>\n"
full_page = page(relative_path=get_relative_path(repository_filename),
description=description,
content=content,
title=self.title
)
mkdir_for_fullpath(output_filename)
hs = open(output_filename, 'w')
hs.write(full_page)
hs.close()
# Cell
try: from nbdev.imports import IN_NOTEBOOK
except: IN_NOTEBOOK=False
if __name__ == "__main__" and not IN_NOTEBOOK:
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--branch1", required=True,
help="name of the server to compare ")
ap.add_argument("-b", "--branch2", required=False,
help="name of the server to compare, if this is not supplied, a report with the history of branch1 will be generated",default='')
ap.add_argument("-r", "--repository", required=True,
help="directory of the repository")
ap.add_argument("-o", "--outputdir", required=False, default="./",
help="report output directory")
ap.add_argument('-V', '--verbose', required=False,
help='add very chatty output, mainly for debug', default=False, action='store_true')
ap.add_argument('-P', '--progress', required=False,
help='show progress', default=False, action='store_true')
ap.add_argument('-s', '--stylesheets', required=True,
help='location of directory holding the stylesheets')
args = vars(ap.parse_args())
repository = args["repository"]
output_dir = args["outputdir"]
branch1 = args["branch1"]
branch2 = args["branch2"]
verbose = args['verbose']
show_progress = args['progress']
styles_src = args['stylesheets']
if output_dir[-1] != '/':
output_dir = output_dir + '/'
if branch2 != '':
report = BranchComparisonReport(repository, branch1,branch2, output_dir,styles_src=styles_src)
report.generate_report()
report = FileChangeHistoryReportForBranch(repository,branch1,output_dir,styles_src=styles_src)
report.generate_report()
if branch2 != '':
report = FileChangeHistoryReportForBranch(repository,branch2,output_dir,styles_src=styles_src)
report.generate_report()
| 1.59375 | 2 |
monkq/stat.py | zsluedem/MonkTrader | 2 | 12759381 | #
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import pickle
from typing import TYPE_CHECKING, Dict, List, Union
from monkq.assets.order import ORDER_T, BaseOrder
from monkq.assets.trade import Trade
from monkq.utils.timefunc import utc_datetime
from pandas.tseries.frequencies import DateOffset, to_offset
if TYPE_CHECKING:
from monkq.context import Context
DAILY_STAT_TYPE = Dict[str, Union[float, datetime.datetime]]
class Statistic():
def __init__(self, context: "Context"):
self.context = context
self.report_file: str = getattr(self.context.settings, 'REPORT_FILE', 'result.pkl')
self.collect_freq = getattr(self.context.settings, 'COLLECT_FREQ', '4H')
self.daily_capital: List[DAILY_STAT_TYPE] = []
self.order_collections: List[BaseOrder] = []
self.trade_collections: List[Trade] = []
self.collect_offset: DateOffset = to_offset(self.collect_freq)
self.last_collect_time: datetime.datetime = utc_datetime(1970, 1, 1)
def collect_account_info(self) -> None:
accounts_capital: DAILY_STAT_TYPE = {k: v.total_capital for k, v in self.context.accounts.items()}
accounts_capital.update({'timestamp': self.context.now})
self.daily_capital.append(accounts_capital)
def freq_collect_account(self) -> None:
if self.context.now - self.last_collect_time >= self.collect_offset.delta:
self.collect_account_info()
self.last_collect_time = self.context.now
def collect_order(self, order: ORDER_T) -> None:
self.order_collections.append(order)
def collect_trade(self, trade: Trade) -> None:
self.trade_collections.append(trade)
def _pickle_obj(self) -> dict:
return {
"daily_capital": self.daily_capital,
"orders": self.order_collections,
"trades": self.trade_collections,
"settings": self.context.settings
}
def report(self) -> None:
with open(self.report_file, 'wb') as f:
pickle.dump(self._pickle_obj(), f)
| 2.046875 | 2 |
resources/vars.py | juliancheal/incubator-openwhisk-deploy-openshift | 1 | 12759382 | <reponame>juliancheal/incubator-openwhisk-deploy-openshift<filename>resources/vars.py
import os
def main(dict):
return {
"api_host": os.environ['__OW_API_HOST'],
"api_key": os.environ['__OW_API_KEY'],
"namespace": os.environ['__OW_NAMESPACE'],
"action_name": os.environ['__OW_ACTION_NAME'],
"activation_id": os.environ['__OW_ACTIVATION_ID'],
"deadline": os.environ['__OW_DEADLINE']
}
| 1.765625 | 2 |
setup.py | ebsaral/writable | 0 | 12759383 | <filename>setup.py<gh_stars>0
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='filewriter',
version='2.0.2',
description='json supported easy debugger for python, in files. can also read.',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='json supported easy debugger for python debug file writer reader',
url='https://github.com/ebsaral/filewriter',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=['filewriter'],
install_requires=[],
include_package_data=True,
zip_safe=False,
project_urls={
'Documentation': 'https://github.com/ebsaral/filewriter',
'Funding': 'https://github.com/ebsaral/filewriter',
'Source': 'https://github.com/ebsaral/filewriter',
},
)
| 1.671875 | 2 |
LeetCode/incomplete/331-Verify-Preorder-Serialization-of-a-Binary-Tree.py | PyroGenesis/Comprehensive-Coding-Solutions | 0 | 12759384 | class Solution:
def isValidSerialization(self, preorder: str) -> bool:
return self.isValidSerializationStack(preorder)
'''
Initial, Almost-Optimal, Split Iteration
Look at the question closely.
If i give you a subset from start, will you be able to tell me easily if its valid
You will quickly realize that the numbers don't matter
There a given number of slots for any given tree structure
and all of them must be filled, BUT
there should be no more nodes than the number of slots
How do we know how many slots to be filled?
Look at the tree growth and you will find a pattern
Initially, we have 1 slot available (for the root)
For every new number node, we use up 1 slot but create 2 new ones
Net change: +1
For every null node, we use up 1 slot and create 0 new ones
Net change: -1
So if you keep track of this slot count, you can easily figure out if the traversal is valid
NOTE: The traversal being preorder is basically useless
Time: O(n)
Space: O(n)
'''
def isValidSerializationInitial(self, preorder: str) -> bool:
# initially we have one empty slot to put the root in it
slots = 1
for node in preorder.split(','):
# no empty slot to put the current node
if slots == 0:
return False
if node == '#':
# null node uses up a slot
slots -= 1
else:
# number node creates a new slot
slots += 1
# we don't allow empty slots at the end
return slots == 0
'''
Optimal, Character Iteration
Similar logic as above, just skips the .split for char iteration
This is because .split saves the split in a new list, costing us O(n) memory
Time: O(n)
Space: O(n)
'''
def isValidSerializationCharIteration(self, preorder: str) -> bool:
# initially we have one empty slot to put the root in it
slots = 1
# this boolean indicates whether current digit char indicates a new node (for multi-char numbers)
new_symbol = True
for ch in preorder:
# if current char is a comma
# get ready for next node and continue
if ch == ',':
new_symbol = True
continue
# no empty slot to put the current node
if slots == 0:
return False
if ch == '#':
# null node uses up a slot
slots -= 1
elif new_symbol:
# number node creates a new slot
slots += 1
# next letter is not a new node
new_symbol = False
# we don't allow empty slots at the end
return slots == 0
'''
Stack
Not better than initial, but interesting
'''
def isValidSerializationStack(self, preorder: str) -> bool:
stack = []
for node in preorder.split(","):
while stack and node == stack[-1] == "#":
if len(stack) < 2:
return False
stack.pop()
stack.pop()
stack.append(node)
return stack == ['#'] | 3.8125 | 4 |
tests/experiments/table/model_table.py | Anthony102899/Lego-ImageGenerator | 1 | 12759385 | from solvers.rigidity_solver.models import *
import numpy as np
_scale = lambda arr: arr * 15
v = lambda x, y, z: np.array([x, y, z], dtype=np.double)
p = lambda x, y, z: (_scale(np.array([x, y, z], dtype=np.double)))
def lerp(p, q, weight):
return p + (q - p) * weight
def define(stage):
_p = {
"a": p(0, 0, 0),
"b": p(1, 0, 0),
"c": p(1 / 2, np.sqrt(3) / 2, 0),
"A-u": p(3 / 2, np.sqrt(3) / 2, 1),
"A-d": p(3 / 2, np.sqrt(3) / 2, -1),
"B-u": p(-1 / 2, np.sqrt(3) / 2, 1),
"B-d": p(-1 / 2, np.sqrt(3) / 2, -1),
"C-u": p(1 / 2, -np.sqrt(3) / 2, 1),
"C-d": p(1 / 2, -np.sqrt(3) / 2, -1),
}
_p.update({
"ab-mid": lerp(_p["A-u"], _p["B-u"], 0.5),
"bc-mid": lerp(_p["B-u"], _p["C-u"], 0.5),
"ca-mid": lerp(_p["C-u"], _p["A-u"], 0.5),
"ab-0.1": lerp(_p["A-u"], _p["B-u"], 0.1),
"bc-0.1": lerp(_p["B-u"], _p["C-u"], 0.1),
"ca-0.1": lerp(_p["C-u"], _p["A-u"], 0.1),
"ba-0.1": lerp(_p["B-u"], _p["A-u"], 0.1),
"cb-0.1": lerp(_p["C-u"], _p["B-u"], 0.1),
"ac-0.1": lerp(_p["A-u"], _p["C-u"], 0.1),
"ab-0.9": lerp(_p["A-u"], _p["B-u"], 0.9),
"bc-0.9": lerp(_p["B-u"], _p["C-u"], 0.9),
"ca-0.9": lerp(_p["C-u"], _p["A-u"], 0.9),
"ba-0.9": lerp(_p["B-u"], _p["A-u"], 0.9),
"cb-0.9": lerp(_p["C-u"], _p["B-u"], 0.9),
"ac-0.9": lerp(_p["A-u"], _p["C-u"], 0.9),
})
def beam_init(p, q, density=0.5):
return Beam.tetra(p, q, density=density, thickness=1)
stage_2_frac = 0.25
stage_3_frac = 0.7
normalize = lambda x: x / np.linalg.norm(x)
_da = normalize(_p["c"] - _p["b"])
_db = normalize(_p["a"] - _p["c"])
_dc = normalize(_p["b"] - _p["a"])
_dz = v(0, 0, 1)
model = Model()
_bmap = {
"top-A": beam_init(_p["B-u"], _p["C-u"]),
"top-B": beam_init(_p["C-u"], _p["A-u"]),
"top-C": beam_init(_p["A-u"], _p["B-u"]),
# "top-ab-bc": beam_init(_p["ab-mid"], _p["bc-mid"]),
# "top-bc-ca": beam_init(_p["bc-mid"], _p["ca-mid"]),
# "top-ca-ab": beam_init(_p["ca-mid"], _p["ab-mid"]),
#
# "core-ab": beam_init(_p['a'], _p["b"]),
# "core-bc": beam_init(_p["b"], _p["c"]),
# "core-ca": beam_init(_p["c"], _p["a"]),
#
"A-c": beam_init(_p["ca-0.9"], _p["C-d"]),
"A-b": beam_init(_p["ab-0.1"], _p["B-d"]),
"B-a": beam_init(_p["ab-0.9"], _p["A-d"]),
"B-c": beam_init(_p["bc-0.1"], _p["C-d"]),
"C-b": beam_init(_p["bc-0.9"], _p["B-d"]),
"C-a": beam_init(_p["ca-0.1"], _p["A-d"]),
}
joints = [
Joint(_bmap["B-a"], _bmap["C-a"], pivot=_p["A-d"], rotation_axes=_da),
Joint(_bmap["C-b"], _bmap["A-b"], pivot=_p["B-d"], rotation_axes=_db),
Joint(_bmap["A-c"], _bmap["B-c"], pivot=_p["C-d"], rotation_axes=_dc),
Joint(_bmap["top-C"], _bmap["top-A"], pivot=_p["B-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-A"], _bmap["top-B"], pivot=_p["C-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-B"], _bmap["top-C"], pivot=_p["A-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-B"], _bmap["A-b"], pivot=_p["ab-0.1"], rotation_axes=_da),
Joint(_bmap["top-C"], _bmap["A-c"], pivot=_p["ca-0.9"], rotation_axes=_da),
Joint(_bmap["top-C"], _bmap["B-c"], pivot=_p["bc-0.1"], rotation_axes=_db),
Joint(_bmap["top-A"], _bmap["B-a"], pivot=_p["ab-0.9"], rotation_axes=_db),
Joint(_bmap["top-A"], _bmap["C-a"], pivot=_p["ca-0.1"], rotation_axes=_dc),
Joint(_bmap["top-B"], _bmap["C-b"], pivot=_p["bc-0.9"], rotation_axes=_dc),
Joint(_bmap["A-b"], _bmap["B-a"],
pivot=(_p["ab-0.1"] + _p["ab-0.9"] + _p["A-d"] + _p["B-d"]) / 4,
rotation_axes=np.cross(_dc, _dz)),
Joint(_bmap["B-c"], _bmap["C-b"],
pivot=(_p["bc-0.1"] + _p["bc-0.9"] + _p["B-d"] + _p["C-d"]) / 4,
rotation_axes=np.cross(_da, _dz)),
Joint(_bmap["C-a"], _bmap["A-c"],
pivot=(_p["ca-0.1"] + _p["ca-0.9"] + _p["C-d"] + _p["A-d"]) / 4,
rotation_axes=np.cross(_db, _dz)),
]
ax_z = v(0, 0, 1)
if stage >= 2:
_stage_2_points = {
f"{a}-u-{b}-d-{stage_2_frac}": lerp(_p[f"{a.lower()}{b.lower()}-0.1"], _p[f"{b}-d"], stage_2_frac)
for a in "ABC" for b in "ABC" if a != b
}
_p.update(_stage_2_points)
_stage_2_beam = {
f"s2-{a}{b}": beam_init(_p[f"{a}-u-{b}-d-{stage_2_frac}"], _p[f"{b}-u-{a}-d-{stage_2_frac}"])
for a, b in ("AB", "BC", "CA")
}
_bmap.update(_stage_2_beam)
_stage_2_joint = [
Joint(_bmap[f"s2-{a}{b}"], _bmap[f"{a}-{b.lower()}"], pivot=_p[f"{a}-u-{b}-d-{stage_2_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
] + [
Joint(_bmap[f"s2-{a}{b}"], _bmap[f"{b}-{a.lower()}"], pivot=_p[f"{b}-u-{a}-d-{stage_2_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
]
joints.extend(_stage_2_joint)
if stage >= 3:
_stage_3_points = {
f"{a}-u-{b}-d-{stage_3_frac}": lerp(_p[f"{a}-u"], _p[f"{b}-d"], stage_3_frac)
for a in "ABC" for b in "ABC" if a != b
}
_p.update(_stage_3_points)
_stage_3_beam = {
f"s3-{a}{b}": beam_init(_p[f"{a}-u-{b}-d-{stage_3_frac}"], _p[f"{b}-u-{a}-d-{stage_3_frac}"])
for a, b in ("AB", "BC", "CA")
}
_bmap.update(_stage_3_beam)
_stage_3_joint = [
Joint(_bmap[f"s3-{a}{b}"], _bmap[f"{a}-{b.lower()}"], pivot=_p[f"{a}-u-{b}-d-{stage_3_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
] + [
Joint(_bmap[f"s3-{a}{b}"], _bmap[f"{b}-{a.lower()}"], pivot=_p[f"{b}-u-{a}-d-{stage_3_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
]
joints.extend(_stage_3_joint)
if stage >= 4:
_indices = ["AB", "BC", "CA"]
_stage_4_points = {
f"s4-{_indices[i % 3]}": lerp(_p[f"{a}-u-{b}-d-{stage_2_frac}"], _p[f"{b}-u-{a}-d-{stage_2_frac}"], 0.5)
for i, (a, b) in enumerate(_indices)
}
_p.update(_stage_4_points)
_stage_4_beam = {
f"s4-{_indices[i % 3]}": beam_init(_p[f"s4-{_indices[i]}"], _p[f"{a.lower()}{b.lower()}-mid"])
for i, (a, b) in enumerate(_indices)
}
_bmap.update(_stage_4_beam)
_stage_4_joint = [
Joint(_bmap[f"s4-{_indices[i % 3]}"], _bmap[f"s2-{_indices[i % 3]}"],
pivot=_p[f"s4-{_indices[i]}"],
rotation_axes=np.cross((_dc, _da, _db)[i], v(0, 0, 1))
)
for i, (a, b) in enumerate(_indices)
] + [
Joint(_bmap[f"s4-{_indices[i % 3]}"], _bmap[f"top-{'CAB'[i]}"],
pivot=_p[f"{a.lower()}{b.lower()}-mid"],
rotation_axes=np.cross((_dc, _da, _db)[i], v(0, 0, 1))
)
for i, (a, b) in enumerate(_indices)
]
joints.extend(_stage_4_joint)
beams = list(_bmap.values())
model.add_beams(beams)
model.add_joints(joints)
return locals()
if __name__ == "__main__":
model = define(1)["model"]
model.visualize(show_hinge=True)
points = model.point_matrix()
edges = model.edge_matrix()
stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[]),
constraints = model.constraint_matrix()
new_stiffness, B = generalized_courant_fischer(
stiffness,
constraints
)
pairs = model.eigen_solve(num_pairs=20)
print([e for e, v in pairs])
for stage in range(1, 4 + 1):
model = define(stage)["model"]
model.save_json(f"output/table-stage{stage}.json")
| 2.1875 | 2 |
src/utils/url_checker.py | rafa-leao/site_mapper_challenge | 0 | 12759386 | from urllib.parse import urlparse
def url_checker(url_site,
url_site_disallowed_path):
validator = False
for site_path in url_site_disallowed_path:
if site_path not in urlparse(url_site).path:
validator = True
continue
else:
validator = False
break
return validator
| 2.921875 | 3 |
article_85/use_so7C.py | tisnik/go-fedora | 0 | 12759387 | import ctypes
import time
so7 = ctypes.CDLL("./so7.so")
t1 = ("ěščř ЩжΛλ"*10000).encode("utf-8")
t2 = ("<foobar>"*10000).encode("utf-8")
so7.concat.restype = ctypes.c_char_p
for i in range(100000):
t = so7.concat(t1, t2)
print(len(t))
time.sleep(0.001)
| 2.453125 | 2 |
1.0/api_server-1.0.5/controllers/src/geo.py | concurrentlabs/laguna | 31 | 12759388 | <filename>1.0/api_server-1.0.5/controllers/src/geo.py
#!/usr/bin/env python
#
import config
def geo_restrict(ip):
passed = False
if config.geoip is not None and config.geo_country_codes_count > 0:
country = config.geoip.country_code_by_addr(ip)
for code in config.geo_country_codes:
if country == code:
passed = True
break
else:
passed = True # This forces the same as not having geo restriction
return passed
| 2.390625 | 2 |
tab_bot.py | Pu-ck/tab-bot | 0 | 12759389 | import discord
import time
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from discord.ext import commands
from selenium import webdriver
# Selenium setup
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("log-level=3")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
# Bot as discord.Client() class object
tab_bot = discord.Client()
tab_bot = commands.Bot(command_prefix=".")
# Startup
@tab_bot.event
async def on_ready():
print("Logged as {0.user}".format(tab_bot))
# On command
@tab_bot.command()
@commands.cooldown(1, 15, commands.BucketType.user)
# Search for requested tabulature on Songsterr
async def tab(ctx, *, arg):
search = "songsterr.com " + arg
driver.get("https://google.com")
try:
driver.find_element_by_id("L2AGLb").click()
except:
pass
driver.find_element_by_name("q").send_keys(search)
driver.find_element_by_name("q").send_keys(Keys.ENTER)
driver.find_element_by_css_selector("div a h3").click()
time.sleep(3)
url = driver.current_url
if url[:32] == "https://www.songsterr.com/a/wsa/":
if "-tab-" in url:
try:
driver.find_element_by_id("accept").click()
except:
pass
# BPM
try:
bpm = driver.find_element_by_class_name("vs1qc").text[1:]
except:
bpm = "?"
# Time signature
try:
show_time_signature = True
time_signature = []
for name in driver.find_elements_by_class_name("vscf"):
time_signature.append(name.text)
for name in time_signature:
if int(name) > 9:
time_signature = "?"
show_time_signature = False
if show_time_signature == True:
time_signature = str(time_signature)
time_signature = (
time_signature.replace("[", "")
.replace("]", "")
.replace("'", "")
.replace(",", "")
)
time_signature = time_signature.replace(" ", "")
length = len(time_signature) * 2
for i in range(1, len(time_signature) * 2, 3):
time_signature = time_signature[:i] + "/" + time_signature[i:]
time_signature = time_signature.replace(" ", "")
for i in range(3, len(time_signature) * 2, 4):
time_signature = time_signature[:i] + " " + time_signature[i:]
for i in range(len(time_signature)):
if time_signature[len(time_signature) - 1] == "/":
time_signature = time_signature[: len(time_signature) - 1]
print(time_signature)
time_signature = time_signature[:length]
except:
time_signature = "?"
# Number of tracks
try:
driver.find_element_by_id("control-mixer").click()
tracks_number = len(driver.find_elements_by_class_name("Cv3137"))
except:
tracks_number = "?"
# Tuning
try:
tuning = []
for name in driver.find_elements_by_class_name("C8nsu"):
tuning.append(name.text)
tuning = str(tuning)
tuning = (
tuning.replace("[", " ")
.replace("]", " ")
.replace(",", " ")
.replace("'", "")
)
tuning = tuning[::-1]
except:
tuning = "?"
# Chords, if avaliable
try:
chords_url = driver.find_element_by_class_name("C6c2vy").get_attribute(
"href"
)
except:
chords_url = "No chords for this particular song"
# Artist and song name
try:
artist_name = driver.find_element_by_class_name("Bpv319").text
song_title = driver.find_element_by_css_selector(
"span[aria-label='title']"
).text
except:
artist_name = "?"
song_title = "?"
# Difficulty
try:
driver.find_element_by_id("menu-search").click()
time.sleep(1)
driver.find_element_by_class_name("Cgl126").send_keys(
artist_name + " " + song_title
)
difficulty = driver.find_element_by_class_name("Cae2ew").get_attribute(
"title"
)
except:
difficulty = "?"
# Tab embed
embed = discord.Embed(title="Requested tab", color=0x128DF6)
embed.add_field(name="Artist name", value=artist_name, inline=False)
embed.add_field(name="Song title", value=song_title, inline=False)
embed.add_field(name="Url", value=url, inline=False)
embed.add_field(name="Chords", value=chords_url, inline=False)
embed.add_field(name="Difficulty", value=difficulty, inline=False)
embed.add_field(name="BPM", value=bpm, inline=False)
embed.add_field(name="Tuning", value=tuning, inline=False)
embed.add_field(name="Time signature", value=time_signature, inline=False)
embed.add_field(name="Number of tracks", value=tracks_number, inline=False)
await ctx.send(embed=embed)
tab.reset_cooldown(ctx)
elif "-tabs-" in url:
# Number of tabs for particular artist
try:
tabs_number = len(driver.find_elements_by_class_name("Beiqi"))
artist_name = driver.find_element_by_id("top").text
except:
tabs_number = "?"
artist_name = "?"
if tabs_number == 50:
tabs_number = "50+"
# Tab embed
embed = discord.Embed(title="Requested artist", color=0x128DF6)
embed.add_field(name="Artist name", value=artist_name[:-4], inline=False)
embed.add_field(name="Url", value=url, inline=False)
embed.add_field(name="Number of tabs", value=tabs_number, inline=False)
await ctx.send(
"Unable to find requested tab - redirecting to band page", embed=embed
)
tab.reset_cooldown(ctx)
else:
await ctx.send("Unable to find requested tab or artist")
tab.reset_cooldown(ctx)
# On command
@tab_bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
# Informations about bot
async def info(ctx):
embed = discord.Embed(title="TabBot", color=0x128DF6)
embed.add_field(
name="Information",
value="Use .tab command in format \".tab artist name song name\" to find requested tablature. If the particular tablature couldn't be find or the song name won't be given, only the url to artist page will be sent. The whole tabulature finding process needs a few seconds.",
inline=False,
)
await ctx.send(embed=embed)
# Handling commands errors
@tab.error
async def tab_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Command missing required argument")
tab.reset_cooldown(ctx)
if isinstance(error, commands.CommandOnCooldown):
return
else:
raise error
@info.error
async def tab_error(error):
if isinstance(error, commands.CommandOnCooldown):
return
else:
raise error
# Bot's token
tab_bot.run("token")
| 2.640625 | 3 |
common/string_utils.py | junglestory/python-boilerplate | 0 | 12759390 | import re
# 태그 제거
def relace_tag(content):
cleaner = re.compile('<.*?>')
cleantext = re.sub(cleaner, '', content)
return cleantext | 2.90625 | 3 |
rastervision/core/__init__.py | carderne/raster-vision | 4 | 12759391 | # flake8: noqa
from rastervision.core.box import *
from rastervision.core.class_map import *
from rastervision.core.command_io_definition import *
from rastervision.core.config import *
from rastervision.core.raster_stats import RasterStats
from rastervision.core.training_data import *
| 1.03125 | 1 |
foreman/data_refinery_foreman/surveyor/test_transcriptome_index.py | dongbohu/ccdl_test | 0 | 12759392 | <reponame>dongbohu/ccdl_test
import os
import json
from unittest.mock import Mock, patch, call
from django.test import TestCase
from urllib.request import URLError
from data_refinery_common.job_lookup import Downloaders
from data_refinery_common.models import (
Batch,
DownloaderJob,
SurveyJob,
SurveyJobKeyValue,
Organism
)
from data_refinery_foreman.surveyor.transcriptome_index import TranscriptomeIndexSurveyor
class SurveyTestCase(TestCase):
def setUp(self):
survey_job = SurveyJob(source_type="TRANSCRIPTOME_INDEX")
survey_job.save()
self.survey_job = survey_job
key_value_pair = SurveyJobKeyValue(survey_job=survey_job,
key="ensembl_division",
value="EnsemblPlants")
key_value_pair.save()
@patch('data_refinery_foreman.surveyor.external_source.send_job')
@patch("data_refinery_foreman.surveyor.transcriptome_index.urllib.request.urlopen")
@patch("data_refinery_foreman.surveyor.transcriptome_index.requests.get")
def test_survey(self, mock_get, mock_urlopen, mock_send_job):
json_file_path = os.path.join(os.path.dirname(__file__), "test_transcriptome_species.json")
with open(json_file_path, "r") as json_file:
species_json = json.load(json_file)
# Insert the organisms into the database so the model doesn't call the
# taxonomy API to populate them.
for species in species_json:
# Account for the subtle difference between the API for
# the main Ensembl division and the API for the rest of
# them.
name_key = "common_name" if "common_name" in species else "name"
taxonomy_key = "taxonomy_id" if "taxonomy_id" in species else "taxon_id"
organism = Organism(name=species[name_key].upper(),
taxonomy_id=species[taxonomy_key],
is_scientific_name=True)
organism.save()
mock_get.return_value = Mock(ok=True)
mock_get.return_value.json.return_value = species_json
# There are two possible file locations. The correct one is
# determined by making a request to one to see if it
# exists. This URLError simulates it not existing.
mock_urlopen.side_effect = URLError("404 or something")
surveyor = TranscriptomeIndexSurveyor(self.survey_job)
surveyor.survey()
downloader_jobs = DownloaderJob.objects.order_by("id").all()
self.assertEqual(downloader_jobs.count(), len(species_json))
send_job_calls = []
for downloader_job in downloader_jobs:
send_job_calls.append(
call(Downloaders.TRANSCRIPTOME_INDEX,
downloader_job.id))
mock_send_job.assert_has_calls(send_job_calls)
# There should be 2 Batches for each species (long and short
# transcriptome lengths).
batches = Batch.objects.all()
self.assertEqual(batches.count(), len(species_json) * 2)
# And each batch has two files: fasta and gtf
for batch in batches:
self.assertEqual(len(batch.files), 2)
| 2.3125 | 2 |
out/python/IpfsPinningSDK/models/__init__.py | JenksyGuo/pinning-services-api-codegen | 3 | 12759393 | # flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from IpfsPinningSDK.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from IpfsPinningSDK.model.delegates import Delegates
from IpfsPinningSDK.model.failure import Failure
from IpfsPinningSDK.model.failure_error import FailureError
from IpfsPinningSDK.model.origins import Origins
from IpfsPinningSDK.model.pin import Pin
from IpfsPinningSDK.model.pin_meta import PinMeta
from IpfsPinningSDK.model.pin_results import PinResults
from IpfsPinningSDK.model.pin_status import PinStatus
from IpfsPinningSDK.model.status import Status
from IpfsPinningSDK.model.status_info import StatusInfo
from IpfsPinningSDK.model.text_matching_strategy import TextMatchingStrategy
| 1.78125 | 2 |
students/k3343/laboratory_works/Bogdanova Elizaveta/laboratory_work_1/tour_agency/migrations/0001_initial.py | TonikX/ITMO_ICT_-WebProgramming_2020 | 10 | 12759394 | <filename>students/k3343/laboratory_works/Bogdanova Elizaveta/laboratory_work_1/tour_agency/migrations/0001_initial.py
# Generated by Django 3.0.4 on 2020-04-19 07:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Agency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agency', models.CharField(max_length=50, verbose_name='Название турагентства')),
],
options={
'verbose_name': 'Турагентство',
'verbose_name_plural': 'Турагентства',
},
),
migrations.CreateModel(
name='Tours',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tour', models.CharField(max_length=50, verbose_name='Название тура')),
('country', models.CharField(max_length=100, verbose_name='Страна')),
('period_s', models.DateField(verbose_name='Период тура с')),
('period_po', models.DateField(verbose_name='Период тура по')),
('summa', models.CharField(max_length=50, verbose_name='Стоимость тура')),
('text_of_tour', models.CharField(max_length=5000, verbose_name='Описание тура')),
('conditions', models.CharField(max_length=5000, verbose_name='Условия оплаты')),
('agency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tour_agency.Agency')),
],
options={
'verbose_name': 'Тур',
'verbose_name_plural': 'Туры',
},
),
migrations.CreateModel(
name='Tourist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Имя')),
('surname', models.CharField(max_length=50, verbose_name='Фамилия')),
('country_live', models.CharField(max_length=50, verbose_name='Страна проживания')),
('login', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Турист',
'verbose_name_plural': 'Туристы',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_of_comment', models.CharField(blank=True, choices=[('Вопрос_по_содержанию_тура', 'Вопрос По Содержанию Тура'), ('Вопрос_об_условиях_оплаты', 'Вопрос Об Условиях Оплаты'), ('Отзыв', 'Отзыв')], max_length=50, verbose_name='Тип комментария')),
('text', models.CharField(max_length=5000, verbose_name='Текст комментария')),
('post_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата отправки')),
('tour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tour_agency.Tours', verbose_name='Тур')),
('tourist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tour_agency.Tourist', verbose_name='Автор комментария')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарии',
},
),
]
| 1.820313 | 2 |
server/src/server.py | massongit/word2vec-demo | 1 | 12759395 | <reponame>massongit/word2vec-demo<gh_stars>1-10
# coding=utf-8
"""
サーバー
"""
import collections
import datetime
import json
import logging
import os
import pathlib
import flask
import flask_api.status
import flask_classy
import gensim
import natto
import configs
# 作者
__author__ = '<NAME>'
# バージョン
__version__ = '0.0.6'
# 設定
conf = configs.Config(pathlib.Path.cwd().parent / 'configs')
app = flask.Flask(__name__, conf.get('general', 'front', 'url'), conf.get('general', 'front', 'dir path'))
def output_http_data(headers, body):
"""
HTTPデータ (リクエストやレスポンス) の内容を出力する
:param headers: HTTPデータ (リクエストやレスポンス) のheader
:param body: HTTPデータ (リクエストやレスポンス) のbody
"""
app.logger.debug('[Header]')
for header in headers:
app.logger.debug('{}: {}'.format(*header))
app.logger.debug(os.linesep.join(['[Data]',
json.dumps(body, indent=4, ensure_ascii=False, sort_keys=True)]))
@app.route('/')
def index():
"""
トップページを表示
:return: トップページ
"""
app.logger.debug('/ called!')
return app.send_static_file('index.html')
class OrderedCounter(collections.Counter, collections.OrderedDict):
"""
入れた順序を保存するCounter
"""
pass
class WordEmbeddingView(flask_classy.FlaskView):
"""
分散表現による計算結果を返すView
"""
trailing_slash = False
def __init__(self):
self.word_embeddings = collections.OrderedDict()
# Word2Vecのモデルを使用するとき
if conf.get('general', 'word2vec', 'model path') and conf.get('general', 'word2vec', 'mecab dir path'):
# Word2Vecのモデル
self.word_embeddings['word2vec'] = gensim.models.KeyedVectors.load_word2vec_format(
conf.get('general', 'word2vec', 'model path'),
binary=bool(conf.get('general', 'word2vec', 'is binary')))
# MeCab
self.mecab = natto.MeCab({
'output_format_type': 'wakati',
'dicdir': conf.get('general', 'word2vec', 'mecab dir path')
})
# FastTextのモデルを使用するとき
if conf.get('general', 'fasttext', 'model path'):
# FastTextのモデル
self.word_embeddings['fasttext'] = gensim.models.FastText.load_fasttext_format(conf.get('general', 'fasttext', 'model path'))
self.pn = {
'positive': 1,
'negative': -1
}
def _wakati_keywords(self, method, keywords):
"""
キーワードの単語分割を行う
:param method: 手法
:param keywords: キーワードのりスト
:return: 単語分割されたキーワードのりスト
"""
words = list()
for keyword in keywords:
if method == 'fasttext': # FastTextのモデルを使用するとき
words.append(keyword)
elif method == 'word2vec': # Word2Vecのモデルを使用するとき
words += self.mecab.parse(keyword).split()
return words
def _count_keywords(self, request):
"""
単語をカウントする
(positiveな単語は+1、negativeな単語は-1する)
:param request: リクエスト
:return: 単語のカウント結果
"""
# 単語カウンター
# (positiveな単語は+1、negativeな単語は-1する)
# counter['positive']: positiveな単語のカウンター
# counter['negative']: negativeな単語のカウンター
counter = dict()
for k in self.pn.keys():
if k in request:
words = self._wakati_keywords(request['method'], request[k])
else:
words = list()
counter[k] = OrderedCounter(words)
counter['positive'].subtract(counter['negative'])
return counter['positive']
def _make_responce(self, request):
"""
レスポンスを生成する
:param request: リクエスト
:return: レスポンス
"""
# 単語カウンター
# (positiveな単語は+1、negativeな単語は-1する)
counter = self._count_keywords(request)
# レスポンス
responce = {k: [w for w, n in counter.items() for _ in range(pm * n)]
for k, pm in self.pn.items()}
# 類似単語を導出
responce['similar'] = [{
'word': w,
'cosine': c
} for w, c in self.word_embeddings[request['method']].most_similar(**responce)]
return responce
def post(self):
try:
app.logger.debug('POST /wordembedding/ called!')
# リクエスト
request = flask.request.get_json()
app.logger.debug('<Request>')
output_http_data(flask.request.headers, request)
response = flask.jsonify(self._make_responce(request))
response.status_code = flask_api.status.HTTP_200_OK
response.headers['Access-Control-Allow-Origin'] = '*'
app.logger.debug('<Response>')
app.logger.debug('[Status]')
app.logger.debug(response.status)
output_http_data(response.headers, response.json)
return response
except Exception as e:
app.logger.exception(e)
flask.abort(flask_api.status.HTTP_500_INTERNAL_SERVER_ERROR)
def get(self):
app.logger.debug('GET /wordembedding/ called!')
response = flask.jsonify(list(self.word_embeddings.keys()))
response.status_code = flask_api.status.HTTP_200_OK
response.headers['Access-Control-Allow-Origin'] = '*'
app.logger.debug('<Response>')
app.logger.debug('[Status]')
app.logger.debug(response.status)
output_http_data(response.headers, response.json)
return response
if __name__ == '__main__':
# RootLoggerのログレベルをDEBUGに設定
logging.root.setLevel(logging.DEBUG)
# RootLoggerにハンドラをセット
for handler in [logging.StreamHandler(),
logging.FileHandler(str(pathlib.Path(conf.get('general', 'log', 'path'))
/ (datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '.log')))]:
handler.setLevel(logging.root.getEffectiveLevel())
handler.setFormatter(logging.Formatter('[%(name)s %(asctime)s %(levelname)s] %(message)s'))
logging.root.addHandler(handler)
WordEmbeddingView.register(app)
if __name__ == '__main__':
app.run(conf.get('general', 'server', 'host'), conf.get('general', 'server', 'port'), True, use_reloader=False)
| 2.375 | 2 |
vstreamer_utils/model/FileEntry.py | artudi54/video-streamer | 2 | 12759396 | <filename>vstreamer_utils/model/FileEntry.py<gh_stars>1-10
import abc
import collections
import copy
import datetime
import pathlib
import time
import pymediainfo
import vstreamer_utils
from vstreamer_utils import model
class FileEntry(abc.ABC):
def __new__(cls, file=None, directory_root=None):
# if called from subclass call default implementation
if cls is not FileEntry:
return super().__new__(cls)
# if called from FileEntry class return selected subclass (factory)
if pathlib.Path(file).is_dir():
return super().__new__(DirectoryEntry)
return super().__new__(VideoFileEntry)
def __init__(self, file, directory_root):
file = pathlib.Path(file)
directory_root = pathlib.Path(directory_root)
stat = file.stat()
self.filename = str(file.name)
self.path = "/" + str(file.relative_to(directory_root))
self.size = stat.st_size
self.creation_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(stat.st_ctime))
self.modification_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(stat.st_mtime))
# additional
self.description = None
self.image = None
self.properties = collections.OrderedDict()
self.other_properties = collections.OrderedDict()
self.properties["Filename"] = self.filename
self.properties["Path"] = self.path
self.properties["Size"] = vstreamer_utils.size_to_string(self.size)
self.properties["Creation Time"] = self.creation_time
self.properties["Modification Time"] = self.modification_time
@abc.abstractmethod
def is_video(self):
...
def light_copy(self):
copied = copy.copy(self)
copied.description = None
copied.image = None
return copied
def additional_properties(self):
return model.AdditionalEntryProperties.from_file_entry(self)
def apply_additional_properties(self, additional_properties):
if additional_properties.title is None:
self.properties["Filename"] = self.filename
else:
self.properties["Filename"] = additional_properties.title
self.description = additional_properties.description
self.image = additional_properties.image
class DirectoryEntry(FileEntry):
def __init__(self, file, directory_root):
super().__init__(file, directory_root)
file = pathlib.Path(file)
if not file.is_dir():
raise ValueError("'%s' is not a directory" % str(file))
# zero size for directories
self.size = 0
self.properties["Size"] = "0B"
self.properties["Type"] = "Directory"
subdirectories, video_files = DirectoryEntry._file_count(file)
self.other_properties["File Count"] = str(subdirectories + video_files)
self.other_properties["Subdirectories"] = str(subdirectories)
self.other_properties["Video Files"] = str(video_files)
vstreamer_utils.log_info("Created DirectoryEntry for '%s'" % self.path)
def is_video(self):
return False
@staticmethod
def _file_count(directory):
directories = 0
video_files = 0
for file in directory.iterdir():
if file.is_dir():
directories += 1
elif vstreamer_utils.is_video_file(file):
video_files += 1
return directories, video_files
class VideoFileEntry(FileEntry):
def __init__(self, file, directory_root):
super().__init__(file, directory_root)
file = pathlib.Path(file)
if not vstreamer_utils.is_video_file(file):
raise ValueError("'%s' is not a video file" % str(file))
self.properties["Type"] = "Video"
media_info = pymediainfo.MediaInfo.parse(file)
for track in media_info.tracks:
if track.track_type == "General":
if track.format is not None:
self.other_properties["Container"] = track.format
if track.duration is not None:
self.other_properties["Duration"] = str(datetime.timedelta(seconds=track.duration//1000))
if track.overall_bit_rate is not None:
self.other_properties["Overall Bitrate"] = vstreamer_utils.size_to_string(track.overall_bit_rate, "b/s")
elif track.track_type == "Video":
if track.format_info is not None:
self.other_properties["Video Format"] = track.format_info
if track.width is not None and track.height is not None:
self.other_properties["Resolution"] = "%dx%d" % (track.width, track.height)
elif track.track_type == "Audio":
if track.format_info is not None:
self.other_properties["Audio Format"] = track.format_info
vstreamer_utils.log_info("Created VideoFileEntry for '%s'" % self.path)
def is_video(self):
return True
| 2.453125 | 2 |
graphit/graph_io/__init__.py | codacy-badger/graphit | 0 | 12759397 | <gh_stars>0
#TODO: make walk and serialization methods to customize format export
#TODO: add support for import/export of LEMON Graph Format (LGF) http://lemon.cs.elte.hu/pub/doc/1.2.3/a00002.html | 0.96875 | 1 |
requests_example.py | guilhermej/web_requests | 4 | 12759398 | <reponame>guilhermej/web_requests
#####################################
# Python para Pentesters #
# https://solyd.com.br/treinamentos #
#####################################
import requests
url = 'https://solyd.com.br'
cabecalho = {'user-agent': 'Mozilla/5.0 (X11; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0 Iceweasel/43.0.4',
}
parametros = {'id': '10'}
resposta = requests.get(url, headers=cabecalho)
print resposta.text
| 2.9375 | 3 |
add_lda_topic_vector_to_dataset.py | tikhonovpavel/LdaSummarization | 0 | 12759399 | import glob
import pickle
import gensim
import torch
from tqdm import tqdm
import nltk
nltk.download('wordnet')
# with open('F:/workspace/LdaSummarization/dictionary_large_2020_12_05.pkl', 'rb') as f:
# tm_dictionary = pickle.load(f)
with open('F:/workspace/LdaSummarization/lda_model_large_2020_12_08.pkl', 'rb') as f:
lda_model, tm_dictionary = pickle.load(f)
stemmer = nltk.SnowballStemmer('english')
def lemmatize(text):
return nltk.WordNetLemmatizer().lemmatize(text, pos='v')
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize(token))
return result
limit = 9999999
pt_files = sorted(glob.glob('F:/workspace/LdaSummarization/bert_data/cnndm' + '.' + 'train' + '.[0-9]*.pt'))[:limit]
for pt in tqdm(pt_files):
pt_result = []
res = torch.load(pt)
for r in res:
bow_vector = tm_dictionary.doc2bow(preprocess(' '.join(r['src_txt'])))
article_topic = sorted(lda_model[bow_vector], key=lambda tup: -1 * tup[1])
r['topics'] = article_topic
torch.save(res, pt.replace('bert_data', 'bert_data_with_topics'))
| 2.3125 | 2 |
indico/util/enum.py | salevajo/indico | 1 | 12759400 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from enum import Enum
class IndicoEnum(Enum):
"""Enhanced Enum.
You can use SomeEnum.get('some_name') like you could with a dict.
"""
@classmethod
def get(cls, name, default=None):
try:
return cls[name]
except KeyError:
return default
@classmethod
def serialize(cls):
return {x.name: x.value for x in cls}
class RichEnum(IndicoEnum):
"""An Enum that stores extra information per entry."""
__titles__ = []
__css_classes__ = []
@property
def title(self):
return self.__titles__[self] if self.__titles__ else None
@property
def css_class(self):
return self.__css_classes__[self] if self.__css_classes__ else None
class RichIntEnum(int, RichEnum):
pass
| 3.3125 | 3 |
caql/agent_policy_test.py | gunpowder78/google-research | 1 | 12759401 | <reponame>gunpowder78/google-research<filename>caql/agent_policy_test.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for agent_policy."""
from unittest import mock
import numpy as np
import tensorflow as tf
from tf_agents.specs import array_spec
from caql import agent_policy
from caql import caql_agent
class AgentPolicyTest(tf.test.TestCase):
def setUp(self):
super(AgentPolicyTest, self).setUp()
self._action_spec = array_spec.BoundedArraySpec(
shape=(3,), dtype=np.float, minimum=[0, 0, 0], maximum=[1, 1, 1])
def testBatchModeFalseWithOneDimensionalState(self):
state = np.arange(2)
mock_agent = mock.create_autospec(caql_agent.CaqlAgent, instance=True)
mock_agent.best_action.return_value = (
np.arange(3).reshape(1, 3), None, None, True)
policy = agent_policy.AgentPolicy(self._action_spec, mock_agent)
action = policy.action(state, batch_mode=False)
self.assertAllEqual(np.arange(3), action)
def testBatchModeFalseWithTwoDimensionalState(self):
state = np.arange(2).reshape(1, 2)
mock_agent = mock.create_autospec(caql_agent.CaqlAgent, instance=True)
mock_agent.best_action.return_value = (
np.arange(3).reshape(1, 3), None, None, True)
policy = agent_policy.AgentPolicy(self._action_spec, mock_agent)
action = policy.action(state, batch_mode=False)
self.assertAllEqual(np.arange(3), action)
def testBatchModeTrueWithOneDimensionalState(self):
state = np.arange(2)
mock_agent = mock.create_autospec(caql_agent.CaqlAgent, instance=True)
mock_agent.best_action.return_value = (
np.arange(3).reshape(1, 3), None, None, True)
policy = agent_policy.AgentPolicy(self._action_spec, mock_agent)
action = policy.action(state, batch_mode=True)
self.assertAllEqual(np.arange(3).reshape(1, 3), action)
def testBatchModeTrueWithTwoDimensionalState(self):
state = np.arange(2).reshape(1, 2)
mock_agent = mock.create_autospec(caql_agent.CaqlAgent, instance=True)
mock_agent.best_action.return_value = (
np.arange(3).reshape(1, 3), None, None, True)
policy = agent_policy.AgentPolicy(self._action_spec, mock_agent)
action = policy.action(state, batch_mode=True)
self.assertAllEqual(np.arange(3).reshape(1, 3), action)
if __name__ == '__main__':
tf.test.main()
| 2.3125 | 2 |
2017/day-21/part1-2.py | amochtar/adventofcode | 1 | 12759402 | <gh_stars>1-10
import numpy as np
def hash(matrix):
return tuple(map(tuple, matrix))
def solve(inp):
patterns = {}
for line in inp:
left, right = line.split(' => ')
left = np.array([list(row) for row in left.split('/')])
right = np.array([list(row) for row in right.split('/')])
for _ in range(4):
left = np.rot90(left)
patterns[hash(left)] = right
left = np.fliplr(left)
for _ in range(4):
left = np.rot90(left)
patterns[hash(left)] = right
image = np.array([list(row) for row in [".#.", "..#", "###"]])
for i in range(18):
size = image.shape[0]
if size % 2 == 0:
step = 2
else:
step = 3
count = size // step
new_step = step + 1
new_size = count * new_step
new_image = np.empty((new_size, new_size), dtype=str)
for x in range(count):
for y in range(count):
xfrom = x*step
xto = xfrom + step
yfrom = y*step
yto = yfrom + step
new_xfrom = x*new_step
new_xto = new_xfrom + new_step
new_yfrom = y*new_step
new_yto = new_yfrom + new_step
replacement = patterns[hash(image[xfrom:xto, yfrom:yto])]
new_image[new_xfrom:new_xto, new_yfrom:new_yto] = replacement
image = new_image
if i == 4:
print("Part 1:", sum(sum(image == "#")))
print("Part 2:", sum(sum(image == "#")))
with open('input.txt', 'r') as f:
inp = f.read().splitlines()
solve(inp)
| 2.8125 | 3 |
dts_test_project/dts_test_project/settings.py | Jragon/django-tenants-rls | 0 | 12759403 | """
Django settings for dts_test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_FILE_STORAGE = 'tenant_schemas.storage.TenantFileSystemStorage'
# Application definition
SHARED_APPS = (
'tenant_schemas', # mandatory
'customers', # you must list the app where your tenant model resides in
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
TENANT_APPS = (
'dts_test_app',
)
TENANT_MODEL = "customers.Client" # app.Model
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
INSTALLED_APPS = (
'tenant_schemas',
'dts_test_app',
'customers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
ROOT_URLCONF = 'dts_test_project.urls'
WSGI_APPLICATION = 'dts_test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'tenant_schemas.postgresql_backend',
'NAME': os.environ.get('PG_NAME', 'dts_test_project'),
'USER': os.environ.get('PG_USER'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('PG_HOST'),
'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,
}
}
MIDDLEWARE = (
'tenant_tutorial.middleware.TenantTutorialMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
]
},
}
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'tenant_schemas.storage.TenantStaticFilesStorage'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'tenant_context': {
'()': 'tenant_schemas.log.TenantContextFilter'
},
},
'formatters': {
'simple': {
'format': '%(levelname)-7s %(asctime)s %(message)s',
},
'tenant_context': {
'format': '[%(schema_name)s:%(domain_url)s] '
'%(levelname)-7s %(asctime)s %(message)s',
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
},
'console': {
'class': 'logging.StreamHandler',
'filters': ['tenant_context'],
'formatter': 'tenant_context',
},
},
'loggers': {
'': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| 1.914063 | 2 |
Assignment2/task2d.py | HyperTars/NYC-Taxi-Analysis | 0 | 12759404 | <gh_stars>0
import sys
from pyspark import SparkContext
sc = SparkContext.getOrCreate()
file = sc.textFile(sys.argv[1], 1)
lines = file.map(lambda line: line.split(','))
data_trips = lines.map(lambda x: ((x[0]), 1))
trips = data_trips.reduceByKey(lambda x, y: x + y)
data_days = lines.map(lambda x: ((x[0], x[3][:10]), 1))
days = data_days.reduceByKey(lambda x, y: x + y)
days = days.map(lambda x: (x[0][0], 1))
days = days.reduceByKey(lambda x, y: x + y)
result = trips.join(days) \
.map(lambda x: (x[0], x[1][0], x[1][1], float((x[1][0] / x[1][1]))))
result = result.sortBy(lambda x: x[0])
output = result.map(lambda x: x[0] + ',' + str(x[1]) + ',' + str(x[2])
+ ',' + '%.2f' % x[3])
output.saveAsTextFile("task2d.out")
sc.stop()
'''
module load python/gnu/3.6.5
module load spark/2.4.0
rm -rf task2d.out
hfs -rm -R task2d.out
spark-submit --conf \
spark.pyspark.python=/share/apps/python/3.6.5/bin/python \
task2d.py task1a.out
hfs -getmerge task2d.out task2d.out
hfs -rm -R task2d.out
wc -l task2d.out
head task2d.out
tail task2d.out
'''
| 2.34375 | 2 |
hooks/webkitpy/common/net/failuremap_unittest.py | nizovn/luna-sysmgr | 3 | 12759405 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.buildbot import Build
from webkitpy.common.net.failuremap import *
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.tool.mocktool import MockBuilder
class FailureMapTest(unittest.TestCase):
builder1 = MockBuilder("Builder1")
builder2 = MockBuilder("Builder2")
build1a = Build(builder1, build_number=22, revision=1233, is_green=True)
build1b = Build(builder1, build_number=23, revision=1234, is_green=False)
build2a = Build(builder2, build_number=89, revision=1233, is_green=True)
build2b = Build(builder2, build_number=90, revision=1235, is_green=False)
regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1'])
regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1'])
def _make_failure_map(self):
failure_map = FailureMap()
failure_map.add_regression_window(self.builder1, self.regression_window1)
failure_map.add_regression_window(self.builder2, self.regression_window2)
return failure_map
def test_failing_revisions(self):
failure_map = self._make_failure_map()
self.assertEquals(failure_map.failing_revisions(), [1234, 1235])
def test_new_failures(self):
failure_map = self._make_failure_map()
failure_map.filter_out_old_failures(lambda revision: False)
self.assertEquals(failure_map.failing_revisions(), [1234, 1235])
def test_new_failures_with_old_revisions(self):
failure_map = self._make_failure_map()
failure_map.filter_out_old_failures(lambda revision: revision == 1234)
self.assertEquals(failure_map.failing_revisions(), [])
def test_new_failures_with_more_old_revisions(self):
failure_map = self._make_failure_map()
failure_map.filter_out_old_failures(lambda revision: revision == 1235)
self.assertEquals(failure_map.failing_revisions(), [1234])
def test_tests_failing_for(self):
failure_map = self._make_failure_map()
self.assertEquals(failure_map.tests_failing_for(1234), [u'test1'])
def test_failing_tests(self):
failure_map = self._make_failure_map()
self.assertEquals(failure_map.failing_tests(), set([u'test1']))
| 1.671875 | 2 |
preprocess/snips_preprocess.py | OlegJakushkin/s3prl | 856 | 12759406 | from random import shuffle
import os
from glob import glob
import shutil
import re
import tqdm
from multiprocessing import Pool
from normalise import normalise
months = {'jan.': 'January', 'feb.': 'February', 'mar.': 'March', 'apr.': 'April', 'may': 'May', 'jun.': 'June', 'jul.': 'July', 'aug.': 'August', 'sep.': 'September', 'oct.': 'October', 'nov.': 'November', 'dec.': 'December', 'jan': 'January', 'feb': 'February', 'mar': 'March', 'apr': 'April', 'jun': 'June', 'jul': 'July', 'aug': 'August', 'sep': 'September', 'oct': 'October', 'nov': 'November', 'dec': 'December'}
replace_words = {'&': 'and', '¡':'', 'r&b':'R and B', 'funtime':'fun time', 'español':'espanol', "'s":'s', 'palylist':'playlist'}
replace_vocab = {'ú':'u', 'ñ':'n', 'Ō':'O', 'â':'a'}
reservations = {'chyi':'chyi', 'Pre-Party':'pre party', 'Chu':'Chu', 'B&B':'B and B', '0944':'nine four four', 'Box':'Box', 'ain’t':'am not', 'Zon':'Zon', 'Yui':'Yui', 'neto':'neto', 'skepta':'skepta', '¡Fiesta':'Fiesta', 'Vue':'Vue', 'iheart':'iheart', 'disco':'disco'}
same = "klose la mejor música para tus fiestas dubstep dangles drejer listas".split(' ')
for word in same:
reservations[word] = word
def word_normalise(words):
ret = []
for word in words:
if word.lower() in months:
word = months[word.lower()]
if word.lower() in replace_words:
word = replace_words[word.lower()]
for regex in replace_vocab:
word = re.sub(regex, '', word)
#word = re.sub(r'(\S)([\.\,\!\?])', r'\1 \2', word)
word = re.sub(r'[\.\,\!\?;\/]', '', word)
ret.append(word)
return ret
def sent_normalise(text, slots_split=None):
norm_slots, norm_texts = [], []
text_split = text.split(' ')
if slots_split is None:
slots_split = ['O']*len(text_split)
for idx in range(len(text_split)):
if text_split[idx] in '.,!?;/]':
continue
if text_split[idx] in reservations:
for word in reservations[text_split[idx]].split(' '):
norm_texts.append(word)
norm_slots.append(slots_split[idx])
continue
norm_text = normalise(word_normalise([text_split[idx]]), variety="AmE", verbose=False)
for phrase in norm_text:
if phrase == '':
continue
for word in re.split(r' |\-', phrase):
word = re.sub(r'[\.\,\!\?;\/]', '', word)
if word == '':
continue
norm_texts.append(word)
norm_slots.append(slots_split[idx])
return norm_slots, norm_texts
def process_raw_snips_file(file, out_f):
with open(file) as f:
content = f.readlines()
content = [x.strip() for x in content]
with open(out_f, 'w') as f:
for cnt, line in enumerate(content):
text = line.split(' <=> ')[0]
intent = line.split(' <=> ')[1]
#[r.split(':')[0] if len(r.split(':')) == 2 else ' ' for r in x.split()]
text_split = [x.replace('::', ':').split(':')[0] if len(x.replace('::', ':').split(':')) == 2 else ' ' for x in text.split()]
text_entities = ' '.join(text_split)
slots_split = [x.replace('::', ':').split(':')[1] for x in text.split()]
slots_entities = ' '.join(slots_split)
assert len(text_split) == len(slots_split), (text_split, slots_split)
f.write('%d | BOS %s EOS | O %s | %s\n' % (cnt, text_entities, slots_entities, intent))
def remove_IBO_from_snipt_vocab_slot(in_f, out_f):
with open(in_f) as f:
content = f.readlines()
content = [x.strip() for x in content]
# get rid of BIO tag from the slots
for idx, line in enumerate(content):
if line != 'O':
content[idx] = line[len('B-'):]
content = set(content) # remove repeating slots
with open(out_f, 'w') as f:
for line in content:
f.write('%s\n' % line)
def process_daniel_snips_file(content):
content = [x.strip() for x in content]
utt_ids = [x.split('\t', 1)[0] for x in content]
valid_uttids = [x for x in utt_ids if x.split('-')[1] == 'valid']
test_uttids = [x for x in utt_ids if x.split('-')[1] == 'test']
train_uttids = [x for x in utt_ids if x.split('-')[1] == 'train']
utt2text, utt2slots, utt2intent = {}, {}, {}
assert len(utt_ids) == len(set(utt_ids))
# create utt2text, utt2slots, utt2intent
for line in content:
uttid, text, slots, intent = line.split('\t')
if len(text.split()) != len(slots.split()): # detect 'empty' in text
assert len(text.split(' ')) == 2
empty_idx = text.split().index(text.split(' ')[0].split()[-1]) + 1
slots_list = slots.split()
del slots_list[empty_idx]
cleaned_slots = ' '.join(slots_list)
assert len(text.split()) == len(slots_list)
cleaned_text = ' '.join(text.split())
#print(cleaned_text, cleaned_slots)
else:
(cleaned_text, cleaned_slots) = (text, slots)
# get rid of the 'intent/' from all slot values
cleaned_slots = ' '.join([x.split('/')[1] if x != 'O' else x for x in cleaned_slots.split()])
# strip the whitespaces before punctuations
#cleaned_text = re.sub(r'\s([?.!,"](?:\s|$))', r'\1', cleaned_text)
utt2text[uttid] = cleaned_text
utt2slots[uttid] = cleaned_slots
utt2intent[uttid] = intent
test_utt2text, test_utt2slots, test_utt2intent = {}, {}, {}
valid_utt2text, valid_utt2slots, valid_utt2intent = {}, {}, {}
train_utt2text, train_utt2slots, train_utt2intent = {}, {}, {}
for utt in valid_uttids:
valid_utt2text[utt] = utt2text[utt]
valid_utt2slots[utt] = utt2slots[utt]
valid_utt2intent[utt] = utt2intent[utt]
for utt in test_uttids:
test_utt2text[utt] = utt2text[utt]
test_utt2slots[utt] = utt2slots[utt]
test_utt2intent[utt] = utt2intent[utt]
for utt in train_uttids:
train_utt2text[utt] = utt2text[utt]
train_utt2slots[utt] = utt2slots[utt]
train_utt2intent[utt] = utt2intent[utt]
assert len(set(valid_utt2intent.values())) == len(set(test_utt2intent.values())) == len(set(train_utt2intent.values())) == 7
assert len(valid_utt2intent.keys()) == len(test_utt2intent.keys()) == 700
assert len(train_utt2intent.keys()) == 13084
def __return_set_of_slots(utt2slots):
all_slots = []
for slot in utt2slots.values():
all_slots.extend(slot.split())
unique_slots = set(all_slots)
return unique_slots
assert len(__return_set_of_slots(valid_utt2slots)) == len(__return_set_of_slots(test_utt2slots)) == \
len(__return_set_of_slots(train_utt2slots)) == 40
return (train_utt2text, train_utt2slots, train_utt2intent), \
(valid_utt2text, valid_utt2slots, valid_utt2intent), \
(test_utt2text, test_utt2slots, test_utt2intent)
def map_and_link_snips_audio(snips_audio_dir, link_dir):
# traverse through snips_audio_dir
result = [y for x in os.walk(snips_audio_dir) for y in glob(os.path.join(x[0], '*.mp3'))]
for path in result:
person = path.split('/')[8].split('_')[1]
filename = path.split('/')[-1]
if filename[:5] != 'snips':
continue
uttid = filename.split('.')[0]
new_uttid = person + '-' + filename
partition = uttid.split('-')[1]
destination = os.path.join(link_dir, partition, new_uttid)
shutil.copyfile(path, destination)
def create_multispk_for_snips(output_dir):
speakers = "<NAME> <NAME>".split(' ')
dataset_info = [{'split':'test', 'num_utts':700}, {'split':'valid', 'num_utts':700}, {'split':'train', 'num_utts':13084}]
test_out_f = open(os.path.join(output_dir, 'all.iob.snips.txt'), 'w')
for data in dataset_info:
num_utts = data['num_utts']
split = data['split']
with open(os.path.join(output_dir, 'single-matched-snips.%s.w-intent'%split)) as f:
content = f.readlines()
utt2line = {x.strip().split()[0]:x.strip() for x in content}
for spk in speakers:
for num in range(num_utts):
uttid = "%s-snips-%s-%d"%(spk, split, num) #mp3.split('/')[-1].split('.')[0]
line = utt2line["snips-%s-%d"%(split, num)] #'-'.join(uttid.split('-')[1:])]
text = line.split('\t')[1].upper()
slots = line.split('\t')[2]
intent = line.split('\t')[3]
test_out_f.write('%s BOS %s EOS\tO %s %s\n' % (uttid, text, slots, intent))
test_out_f.close()
def apply_text_norm_and_modify_slots(all_tsv, output_dir):
train_dirs, valid_dirs, test_dirs = process_daniel_snips_file(all_tsv)
# test
test_file = open(os.path.join(output_dir, 'single-matched-snips.test.w-intent'), 'w')
vocab_slot = {}
for uttid in tqdm.tqdm(test_dirs[0].keys(), desc='Text Normalising on testing set'):
text = test_dirs[0][uttid]
slots = test_dirs[1][uttid]
intent = test_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
test_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
test_file.close()
# valid
valid_file = open(os.path.join(output_dir, 'single-matched-snips.valid.w-intent'), 'w')
for uttid in tqdm.tqdm(valid_dirs[0].keys(), desc='Text Normalising on validation set'):
text = valid_dirs[0][uttid]
slots = valid_dirs[1][uttid]
intent = valid_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
valid_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
valid_file.close()
# train
train_file = open(os.path.join(output_dir, 'single-matched-snips.train.w-intent'), 'w')
for uttid in tqdm.tqdm(train_dirs[0].keys(), desc='Text Normalising on training set'):
text = train_dirs[0][uttid]
slots = train_dirs[1][uttid]
intent = train_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
train_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
train_file.close()
vocab_file = open(os.path.join(output_dir, 'slots.txt'), 'w')
vocab_file.write('\n'.join(sorted(list(vocab_slot.keys()), key=lambda x:vocab_slot[x], reverse=True)))
def sox_func(inputs):
files, root, out_root, speaker = inputs
for name in tqdm.tqdm(files, desc='Process for speaker: '+speaker):
if name.endswith(".mp3"):
split = name.split('-')[1]
out_dir = os.path.join(out_root, split)
os.makedirs(out_dir, exist_ok=True)
orig_file = os.path.join(root, name)
new_file = os.path.join(out_dir, speaker+'-'+name.split('/')[-1].split('.')[0] + '.wav')
bashCommand = "sox " + orig_file + " -t wav -c 1 -r 16000 -b 16 -e signed-integer " + new_file
r = os.popen(bashCommand).read()
def sox_mp3_to_wav(in_root, out_root):
os.makedirs(out_root, exist_ok=True)
pool = Pool(16)
inputs = []
for root, dirs, files in os.walk(in_root):
print('[Processing] enter directory %s'%root)
if not len(files):
continue
speaker = root.split('/')[-2].split('_')[1]
print('[Processing] process %d audio files from speaker %s'%(len(files), speaker))
inputs.append((files, root, out_root, speaker))
pool.map(sox_func, inputs)
if __name__ == '__main__':
import sys, os
mode = sys.argv[1]
if mode == 'text':
repo_dir = sys.argv[2]
dump_dir = sys.argv[3]
os.makedirs(dump_dir, exist_ok=True)
content = []
content += open(os.path.join(repo_dir, 'data/nlu_annotation/valid')).readlines()[1:]
content += open(os.path.join(repo_dir, 'data/nlu_annotation/test')).readlines()[1:]
content += open(os.path.join(repo_dir, 'data/nlu_annotation/train')).readlines()[1:]
apply_text_norm_and_modify_slots(content, dump_dir)
create_multispk_for_snips(dump_dir)
elif mode == 'audio':
audio_dir = sys.argv[2]
dump_dir = sys.argv[3]
# Step: sox the snips *.mp3 to the correct format
sox_mp3_to_wav(audio_dir, dump_dir)
else:
print('Usage: python preprocess.py [text|audio] [data_path] [dump_path]')
| 2.109375 | 2 |
tests/test_cfg.py | NVSL/fiddle | 2 | 12759407 | from cfiddle import *
from util import *
from fixtures import *
from cfiddle.source import FullyInstrumentedExecutable
from cfiddle.util import working_directory
import pytest
import tempfile
import os
def test_cfg(test_cpp):
assert isinstance(test_cpp, FullyInstrumentedExecutable)
with tempfile.TemporaryDirectory() as d:
png = os.path.join(d, "test.png")
svg = os.path.join(d, "test.svg")
test_cpp.cfg("four", output=png)
assert os.path.exists(png)
test_cpp.cfg("four", svg)
assert os.path.exists(svg)
| 2.03125 | 2 |
client/message.py | Kolkir/netbot | 0 | 12759408 | from enum import IntEnum
class MessageId(IntEnum):
HELLO = 1
CAPTURE_IMAGE = 2
SEND_IMAGE = 3
GET_CAMERA_LIST = 4
SEND_CAMERA_LIST = 5
MOVE = 6
GET_CAMERA_PROP = 7
SEND_CAMERA_PROP = 8
STOP = 9
SET_CAMERA_PROP = 10
class Message:
def __init__(self, id):
self.id_ = id
def id(self):
return self.id_
class RecvMessage(Message):
def __init__(self, id):
super(RecvMessage, self).__init__(id)
def from_bytes(self, data):
pass
class SendMessage(Message):
def __init__(self, id):
super(SendMessage, self).__init__(id)
self.bytes_ = bytearray()
def size(self):
return len(self.bytes_)
def to_bytes(self):
return self.bytes_
def add_bytes(self, data):
self.bytes_.extend(data)
class HelloMsg(Message):
def __init__(self):
super().__init__(MessageId.HELLO)
def size(self):
return 0
def to_bytes(self):
return None
def from_bytes(self, data):
pass
class StopMsg(Message):
def __init__(self):
super().__init__(MessageId.STOP)
def size(self):
return 0
def to_bytes(self):
return None
def from_bytes(self, data):
pass
| 2.78125 | 3 |
Data_aug.py | MaithriRao/Predicting-COVID-19-From-Chest-X-Ray-Images-Using-Deep-Transfer-Learning | 0 | 12759409 | import os
import datetime
import json
import numpy as np
import itertools
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import transforms, datasets, models
import torch.nn.functional as F
from sklearn.metrics import roc_curve, auc
from matplotlib import pyplot as plt
from time import sleep
import datetime
import cv2
import shutil
from tqdm import tqdm
#
def transform_image(img):
min_size = min(img.shape[0],img.shape[1])
max_crop = min_size - 224 # 224 for ResNet50
pil_transform = transforms.ToPILImage()
resize_transform = transforms.Resize(224)
total_transform = transforms.Compose([
transforms.RandomApply([
transforms.ColorJitter(0.2, 0.2),
transforms.Pad((10,10))
], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.RandomPerspective(),
transforms.RandomRotation(30),
transforms.RandomCrop(min_size - round(max_crop/10))
])
image = pil_transform(img)
if min_size < 224:
image = resize_transform(image)
return total_transform(image)
# About RANDOMCROP transformation
# ResNet50 would a 224x224 sized images
# Due to differente size of images in dataset, random crop must preserve at least
# 224 pixels for each dimensions. With max_crop I obtain the maximum crop to preserve 224 pixels
# on minimum size. Then I crop min_size - max_crop/10
def data_augmentation(workspace, data_dir, source_dirs):
augset_dir = os.path.join(workspace, 'Augmented_TrainSet')
if os.path.isdir(augset_dir) != True:
os.mkdir(augset_dir)
for c in source_dirs:
if (os.path.isdir(os.path.join(augset_dir, c)) != True):
os.mkdir(os.path.join(augset_dir, c))
imgs = [x for x in os.listdir(os.path.join(data_dir, c))]
for i, img in enumerate(imgs):
original_img = img
source_path = os.path.join(data_dir, c, original_img)
target_path = os.path.join(augset_dir, c)
shutil.copy(source_path, target_path)
img = cv2.imread(source_path)
for j in range(12):
new_img = np.array(transform_image(img))
new_img_name = "{}_copy{}.{}".format("".join(original_img.split(".")[:-1]),j,original_img.split(".").pop(-1))
cv2.imwrite(os.path.join(target_path, new_img_name), new_img)
print("Immagine {} trasformazione {} salvata".format(i, j), end="\r")
# DONT KEEP THIS CREATE_TRANSFORM FUNCTION ALREADY IN MODEL UTILITIES
def create_transform(model_name):
if model_name == 'Inception_v3':
transform = transforms.Compose([
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return transform
| 2.25 | 2 |
tests/unit/test_area.py | applecreekacres/farmos.py.ext | 0 | 12759410 | <reponame>applecreekacres/farmos.py.ext<gh_stars>0
import mock
from mock.mock import MagicMock
from farmos_ext.area import Area
@mock.patch("farmos_ext.Farm")
def test_area_empty(mock_farm: MagicMock):
mock_farm.assets.return_value = []
mock_farm.areas.return_value = []
area = Area(mock_farm, {})
assert not area.tid
assert not area.description
assert not area.flags
assert not area.geofield
assert not area.vocabulary
assert not area.parent
mock_farm.areas.assert_called_with(None)
assert not area.parents_all
mock_farm.areas.assert_called_with(None)
assert not area.assets
mock_farm.assets.assert_called_with(None)
@mock.patch("farmos_ext.Farm")
def test_area_not_empty(mock_farm: MagicMock):
area = Area(mock_farm, {
"tid": '4',
"description": 'small description',
'flags': ['flag1', 'flag2', 'flag3'],
"geofield": 'well-known text',
"vocabulary": {
"key": "value"
},
"parent": [
{
"parent": "value"
}
],
"parents_all": [
{
"parent2": "value2"
},
{
"parent3": "value3"
}
],
"assets": [
{"asset": "5"}
]
})
assert area.tid == 4
assert area.description == "small description"
assert len(area.flags) == 3
assert area.geofield == "well-known text"
assert isinstance(area.vocabulary, dict)
assert "key" in area.vocabulary
area.parent
mock_farm.areas.assert_called_with([{"parent": "value"}])
area.parents_all
mock_farm.areas.assert_called_with([
{
"parent2": "value2"
},
{
"parent3": "value3"
}
])
area.assets
mock_farm.assets.assert_called_with([
{"asset": "5"}
])
| 2.390625 | 2 |
sort_radix.py | rachitmishra/45 | 0 | 12759411 | """
Shell Sort
Approach: Divide and Conquer
Complexity: O(n2)
"""
def sort_shell(input_arr):
print("""""""""""""""""""""""""")
print("input " + str(input_arr))
print("""""""""""""""""""""""""")
print("""""""""""""""""""""""""")
print("result " + str(input_arr))
print("""""""""""""""""""""""""")
if __name__ == '__main__':
arr = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
sort_shell(arr)
| 4.15625 | 4 |
Easy/remove_linked_list_elements.py | BrynjarGeir/LeetCode | 0 | 12759412 | <filename>Easy/remove_linked_list_elements.py<gh_stars>0
from typing import Optional, ListNode
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeElements(self, head: Optional[ListNode], val: int) -> Optional[ListNode]:
if head == None: return None
pre_head = ListNode(-1)
pre_head.next = head
nxt = pre_head
while nxt.next != None:
if nxt.next.val == val:
nxt.next = nxt.next.next
else: nxt = nxt.next
return pre_head.next | 3.609375 | 4 |
heroku-code/backend/parse_url.py | nd-0r/Musiquity | 0 | 12759413 | from backend.services import services
from urllib.parse import urlparse
def parse_url(url):
'''
takes a validated url pointing to an item on
a supported streaming service and returns a
query to be used with other streaming services
Parameters:
url (str): the url to be parsed
Returns:
query (Query): the query object to be used
with other streaming services
'''
hygienic_url = urlparse(url)
loc = hygienic_url.netloc
for service in services.keys():
if (loc in services[service].NETLOCS):
return services[service].parse_link(url) | 3.296875 | 3 |
RecoTauTag/RecoTau/python/CaloTauProducer_cff.py | nistefan/cmssw | 3 | 12759414 | <filename>RecoTauTag/RecoTau/python/CaloTauProducer_cff.py
import FWCore.ParameterSet.Config as cms
from RecoTauTag.RecoTau.CaloRecoTauTagInfoProducer_cfi import *
from RecoTauTag.RecoTau.CaloRecoTauProducer_cfi import *
CaloTau = cms.Sequence(caloRecoTauTagInfoProducer*caloRecoTauProducer)
| 1.304688 | 1 |
third_party/rules_pkg-0.7.0/tests/zip/zip_test.py | Vertexwahn/FlatlandRT | 62 | 12759415 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import filecmp
import unittest
import zipfile
from bazel_tools.tools.python.runfiles import runfiles
from tests.zip import zip_test_lib
HELLO_CRC = 2069210904
LOREM_CRC = 2178844372
EXECUTABLE_CRC = 342626072
class ZipContentsTests(zip_test_lib.ZipContentsTestBase):
def test_empty(self):
self.assertZipFileContent("test_zip_empty.zip", [])
def test_basic(self):
self.assertZipFileContent("test_zip_basic.zip", [
{"filename": "foodir/", "isdir": True, "attr": 0o711},
{"filename": "hello.txt", "crc": HELLO_CRC},
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
{"filename": "usr/bin/foo", "attr": 0o555, "data": "/usr/local/foo/foo.real"},
])
def test_timestamp(self):
self.assertZipFileContent("test_zip_timestamp.zip", [
{"filename": "hello.txt", "crc": HELLO_CRC, "timestamp": 1234567890},
])
def test_permissions(self):
self.assertZipFileContent("test_zip_permissions.zip", [
{
"filename": "executable.sh",
"crc": EXECUTABLE_CRC,
"timestamp": 1234567890,
"attr": 0o644,
}
])
def test_package_dir(self):
self.assertZipFileContent("test_zip_package_dir0.zip", [
{"filename": "abc/def/hello.txt", "crc": HELLO_CRC},
{"filename": "abc/def/loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_empty(self):
self.assertZipFileContent("test-zip-strip_prefix-empty.zip", [
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_none(self):
self.assertZipFileContent("test-zip-strip_prefix-none.zip", [
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_zipcontent(self):
self.assertZipFileContent("test-zip-strip_prefix-zipcontent.zip", [
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_dot(self):
self.assertZipFileContent("test-zip-strip_prefix-dot.zip", [
{"filename": "zipcontent/loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_tree(self):
self.assertZipFileContent("test_zip_tree.zip", [
{"filename": "generate_tree/a/a"},
{"filename": "generate_tree/a/b/c"},
{"filename": "generate_tree/b/c/d"},
{"filename": "generate_tree/b/d"},
{"filename": "generate_tree/b/e"},
])
if __name__ == "__main__":
unittest.main()
| 2.09375 | 2 |
intake/cli/client/subcommands/drivers.py | mattkram/intake | 149 | 12759416 | <reponame>mattkram/intake
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
CLI for listing, enabling, disabling intake drivers
"""
from intake import __version__
from intake.cli.util import Subcommand
from intake.source.discovery import drivers
import logging
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# API
# -----------------------------------------------------------------------------
class Drivers(Subcommand):
"""
List, enable, and disable intake drivers.
"""
name = "drivers"
def initialize(self):
sub_parser = self.parser.add_subparsers()
list = sub_parser.add_parser(
'list',
help='Show all intake drivers, whether enabled, disabled, '
'or directly inserted into the registry'
)
list.add_argument(
'-v', '--verbose', action='store_true', help='Show module path.')
list.set_defaults(invoke=self._list)
enable = sub_parser.add_parser('enable', help='Enable an intake driver.')
enable.add_argument('name', type=str, help='Driver name')
enable.add_argument('driver', type=str, default=None, nargs='?',
help='Module path and class name, as in '
'package.submodule.ClassName')
enable.set_defaults(invoke=self._enable)
disable = sub_parser.add_parser(
'disable', help='Disable one or more intake drivers.')
disable.add_argument('names', type=str, help='Driver names', nargs='+')
disable.set_defaults(invoke=self._disable)
def invoke(self, args):
self.parser.print_help()
def _list(self, args):
if drivers.do_scan:
print("Package scan:")
for k, v in drivers.scanned.items():
print(f'{k:<30}{v.__module__}.{v.__name__}')
print()
print("Entrypoints:")
eps = [ep for ep in drivers.from_entrypoints()
if ep.name not in drivers.disabled()]
if eps:
for v in eps:
print(f'{v.name:<30}{v.module_name}:{v.object_name}')
else:
print("<none>")
print()
print("From Config:")
eps = [ep for ep in drivers.from_conf()
if ep.name not in drivers.disabled()]
if eps:
for v in eps:
if v.name not in drivers.disabled():
print(f'{v.name:<30}{v.module_name}:{v.object_name}')
else:
print("<none>")
print()
print("Disabled: ", drivers.disabled() or "<none>")
def _enable(self, args):
drivers.enable(args.name, args.driver)
def _disable(self, args):
for name in args.names:
drivers.disable(name)
| 2.140625 | 2 |
mtools/util/logevent.py | corymintz/mtools | 0 | 12759417 | <gh_stars>0
from datetime import datetime
from dateutil.tz import tzutc
import dateutil.parser
import re
import json
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
""" custom datetime encoder for json output. """
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
""" LogEvent extracts information from a mongod/mongos log file line and
stores the following properties/variables:
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj, \
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', \
'Oct', 'Nov', 'Dec']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, str):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._counters_calculated = False
self._nscanned = None
self._ntoreturn = None
self._nupdated = None
self._nreturned = None
self._ninserted = None
self._ndeleted = None
self._numYields = None
self._r = None
self._w = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
""" line_str is only writeable if LogEvent was created from a string, not from a system.profile documents. """
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
""" return line_str depending on source, logfile or system.profile. """
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str, self._datetime_str, self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str, self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
""" splits string into tokens (lazy) """
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
""" calculate duration if available (lazy) """
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if line_str and line_str.endswith('ms'):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ")+1:-2].replace(',',''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms', self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
""" extract datetime if available (lazy) """
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing very long lines
split_tokens = self.split_tokens[:10]
match_found = False
for offs in xrange(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs+4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = ' '.join(self.split_tokens[self._datetime_nextpos:])
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos == None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# fast check if timezone changed. if it has, trigger datetime evaluation
if format.startswith('ctime'):
if len(self.split_tokens) < 4 or self.split_tokens[self._datetime_nextpos-4] not in self.weekdays:
_ = self.datetime
return False
return True
else:
if not self.split_tokens[self._datetime_nextpos-1][0].isdigit():
_ = self.datetime
return False
return True
def _match_datetime_pattern(self, tokens):
""" Helper method that takes a list of tokens and tries to match
the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if len(tokens) < 4 or (weekday not in self.weekdays) or \
(month not in self.months) or not day.isdigit():
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}', \
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[:4]), default=datetime(year, 1, 1))
if dt.tzinfo == None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year-1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
""" extract thread name if available (lazy) """
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos or len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
return self._thread
@property
def operation(self):
""" extract operation (query, insert, update, remove, getmore, command)
if available (lazy) """
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
""" extract namespace if available (lazy) """
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
""" Helper method to extract both operation and namespace from a
logevent. It doesn't make sense to only extract one as they
appear back to back in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of datetime to get access to datetime_offset
_ = self.datetime
if not self._datetime_nextpos or len(split_tokens) <= self._datetime_nextpos + 2:
return
op = split_tokens[self._datetime_nextpos + 1]
if op in ['query', 'insert', 'update', 'remove', 'getmore', 'command']:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
""" extract query pattern from operations """
if not self._pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore', 'update', 'remove']:
self._pattern = self._find_pattern('query: ')
return self._pattern
@property
def sort_pattern(self):
""" extract query pattern from operations """
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def nscanned(self):
""" extract nscanned counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def ntoreturn(self):
""" extract ntoreturn counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def nreturned(self):
""" extract nreturned counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
""" extract ninserted counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
""" extract ndeleted counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
""" extract nupdated counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
""" extract numYields counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def r(self):
""" extract read lock (r) counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
""" extract write lock (w) counter if available (lazy) """
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
""" Helper method to extract counters like nscanned, nreturned, etc.
from the logevent.
"""
# extract counters (if present)
counters = ['nscanned', 'ntoreturn', 'nreturned', 'ninserted', \
'nupdated', 'ndeleted', 'r', 'w', 'numYields']
split_tokens = self.split_tokens
# trigger thread evaluation to get access to offset
if self.thread:
for t, token in enumerate(split_tokens[self.datetime_nextpos+2:]):
for counter in counters:
if token.startswith('%s:'%counter):
try:
vars(self)['_'+counter] = int((token.split(':')[-1]).replace(',', ''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if counter == 'numYields' and token.startswith('numYields'):
try:
self._numYields = int((split_tokens[t+1+self.datetime_nextpos+2]).replace(',', ''))
except ValueError:
pass
# token not parsable, skip
break
def parse_all(self):
""" triggers the extraction of all information, which would usually
just be evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx+len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx+1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc', 'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, ctime-pre2.4, iso8601-utc, iso8601-local.')
if (self.datetime_format == None or (self.datetime_format == format and self._datetime_str != '')) and not force:
return
elif self.datetime == None:
return
elif format.startswith('ctime'):
dt_string = self.weekdays[self.datetime.weekday()] + ' ' + self.datetime.strftime("%b %d %H:%M:%S")
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond / 1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if not self.datetime.utcoffset():
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond * 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no : in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)', '.%s\\2\\3\\4'%ms_str, dt_string)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-%dT%H:%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond * 1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
""" default string conversion for a LogEvent object is just its line_str. """
return str(self.line_str)
def to_dict(self, labels=None):
""" converts LogEvent object to a dictionary. """
output = {}
if labels == None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation', \
'thread', 'namespace', 'nscanned', 'ntoreturn', \
'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value != None:
output[label] = value
return output
def to_json(self, labels=None):
""" converts LogEvent object to valid JSON. """
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
""" Parses a system.profile document and copies all the values to the member variables. """
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo == None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
# query pattern for system.profile events, all three cases (see SERVER-13245)
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'], dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if 'orderby' in doc['query'] and isinstance(doc['query']['orderby'], dict):
self._sort_pattern = str(doc['query']['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
# build a fake line_str
payload = ''
if 'query' in doc:
payload += 'query: %s' % str(doc[u'query']).replace("u'", "'").replace("'", '"')
if 'command' in doc:
payload += 'command: %s' % str(doc[u'command']).replace("u'", "'").replace("'", '"')
if 'updateobj' in doc:
payload += ' update: %s' % str(doc[u'updateobj']).replace("u'", "'").replace("'", '"')
scanned = 'nscanned:%i'%self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i'%self._numYields if 'numYield' in doc else ''
locks = 'w:%i' % self.w if self.w != None else 'r:%i' % self.r
duration = '%ims' % self.duration if self.duration != None else ''
self._line_str = "[{thread}] {operation} {namespace} {payload} {scanned} {yields} locks(micros) {locks} {duration}".format(
datetime=self.datetime, thread=self.thread, operation=self.operation, namespace=self.namespace, payload=payload, scanned=scanned, yields=yields, locks=locks, duration=duration)
| 2.8125 | 3 |
src/9/using_metaclasses_to_control_instance_creation/example3.py | tuanavu/python-gitbook | 14 | 12759418 | # example3.py
#
# Cached instances
import weakref
class Cached(type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__cache = weakref.WeakValueDictionary()
def __call__(self, *args):
if args in self.__cache:
return self.__cache[args]
else:
obj = super().__call__(*args)
self.__cache[args] = obj
return obj
class Spam(metaclass=Cached):
def __init__(self, name):
print('Creating Spam({!r})'.format(name))
self.name = name
if __name__ == '__main__':
a = Spam('foo')
b = Spam('bar')
print('a is b:', a is b)
c = Spam('foo')
print('a is c:', a is c)
| 2.859375 | 3 |
cats_analysis/summary.py | TomMonks/cats-time-series | 1 | 12759419 | <filename>cats_analysis/summary.py
'''
summary
Classes and functions for summarising Trips
Tightly coupled to CleanTrip
'''
import numpy as numpy
import pandas as pd
class TripSummaryStatistics(object):
'''
Summary statistics for a cleaned trip
'''
def __init__(self, clean_trip):
'''
Create an instance of TripSummaryStatistics for an individual
trip.
Parameters:
-------
clean_trip, cats_analysis.io.CleanTrip. Cleaned Trip Data
'''
self._clean_trip = clean_trip
self._summary = None
self._duration = -1.0
def _get_duration(self):
'''
Return trip duration in HH:MM:SS
'''
return self._get_duration
def _get_summary_table(self):
'''
Summary statistics for the trip
'''
return self._summary
def calculate(self, resample='30s', smooth=False, interp_missing=False):
'''
Calculate basic summary statistics for trip.
1. Trip Duration
2. Completely empty fields
3. For every field:
3.1 Mean,
3.2 Stdev,
3.3 Histogram.... think about that one. (numpy.hist?)
3.4
Parameters:
----------
resample -- str, interval to aggregate values over (default=30s)
interp_missing -- bool, linear interpolation between missing values
(default=False)
'''
df = self._clean_trip.resample(resample, smooth, interp_missing)
self.duration = df.index.max() - df.index.min()
results = {}
results['per_missing'] = (1 - df.count()/df.shape[0])*100
results['mean'] = df.mean()
results['std'] = df.std()
results['min'] = df.min()
results['max'] = df.max()
results['median'] = df.quantile(q=0.5)
results['iqr'] = df.quantile(q=0.75) - df.quantile(q=0.25)
results['skew'] = df.skew()
results['kurtosis'] = df.kurtosis()
self._summary = pd.DataFrame(results)
summary_table = property(_get_summary_table)
trip_duration = property(_get_duration) | 3.046875 | 3 |
DeNardi-Tornatore/app/android/Tab_Writer/app/src/main/python/main.py | LucaLand/SistemiDigitaliM20-21 | 9 | 12759420 | <reponame>LucaLand/SistemiDigitaliM20-21
from preprocessing import preprocessing_file
from predict import predict_model
def main(path):
images, frames = preprocessing_file(path)
result = predict_model(images, frames)
return result | 1.414063 | 1 |
tests/test_autoCorrect.py | gagneurlab/autoCorrect | 2 | 12759421 | import autoCorrection
import numpy as np
import unittest
class TestEndToEnd(unittest.TestCase):
def test_end_to_end(self):
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector()
correction = corrector.correct(counts=counts, size_factors=sf)
self.assertEqual(counts.shape, correction.shape)
class TestSavingAndLoading(unittest.TestCase):
def test_loading(self):
self.test_saving()
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector(model_name='test1', model_directory=".")
correction = corrector.correct(counts, sf, only_predict=True)
self.assertEqual(counts.shape, correction.shape)
def test_saving(self):
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector(model_name='test1', model_directory=".", save_model=True)
correction = corrector.correct(counts, sf)
self.assertEqual(counts.shape, correction.shape)
class TestSetSeed(unittest.TestCase):
def test_setSeed(self):
# generate data
nsamples = 15
ngenes = 20
counts = np.random.negative_binomial(n=20, p=0.2, size=(ngenes, nsamples))
sf = np.random.uniform(0.8, 1.2, size=(ngenes, nsamples))
# run the autocorrection 2 times with seed and one without. it should deviate
ac = autoCorrection.correctors
correct1 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0).correct(counts, sf)
correct2 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0, seed=42).correct(counts, sf)
correct3 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0, seed=42).correct(counts, sf)
# check if the results are similar. Due to randomness in the numbers we still have little changes
#self.assertTrue(sum(sum(np.round(correct2) == np.round(correct3))) > 0.9 * nsamples * ngenes)
self.assertTrue(sum(sum(np.round(correct1) == np.round(correct2))) < 0.3 * nsamples * ngenes)
self.assertTrue(sum(sum(np.round(correct1) == np.round(correct3))) < 0.3 * nsamples * ngenes)
if __name__ == '__main__':
unittest.main()
| 3.078125 | 3 |
installer/install_delfin.py | sfzeng/delfin | 0 | 12759422 | <reponame>sfzeng/delfin
#!/usr/bin/python3
# Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, shutil, subprocess
from subprocess import CalledProcessError
import traceback as tb
from helper import *
delfin_source_path = ''
delfin_etc_dir = '/etc/delfin'
delfin_var_dir = '/var/lib/delfin'
conf_file = os.path.join(delfin_etc_dir, 'delfin.conf')
proj_name = 'delfin'
DEVNULL = '/dev/null'
def _activate():
path_to_activate = os.path.join(delfin_source_path , 'installer', proj_name, 'bin/activate')
command = '. ' + path_to_activate
os.system(command)
# Initialize the settings first
def init():
#_activate()
pass
def create_delfin_db():
try:
db_path = os.path.join(delfin_source_path, 'installer', 'create_db.py')
subprocess.check_call(['python3', db_path, '--config-file', conf_file])
except CalledProcessError as cpe:
logger.error("Got CPE error [%s]:[%s]" % (cpe, tb.print_exc()))
return
logger.info('db created ')
def start_processes():
# start api process
proc_path = os.path.join(delfin_source_path, 'delfin', 'cmd', 'api.py')
command = 'python3 ' + proc_path + ' --config-file ' + conf_file + ' >' + DEVNULL + ' 2>&1 &'
# >/dev/null 2>&1
logger.info("Executing command [%s]", command)
os.system(command)
logger.info("API process_started")
#start task process
proc_path = os.path.join(delfin_source_path, 'delfin', 'cmd', 'task.py')
command = 'python3 ' + proc_path + ' --config-file ' + conf_file + ' >' + DEVNULL + ' 2>&1 &'
logger.info("Executing command [%s]", command)
os.system(command)
logger.info("TASK process_started")
# Start alert process
proc_path = os.path.join(delfin_source_path, 'delfin', 'cmd', 'alert.py')
command = 'python3 ' + proc_path + ' --config-file ' + conf_file + ' >' + DEVNULL + ' 2>&1 &'
logger.info("Executing command [%s]", command)
os.system(command)
logger.info("ALERT process_started")
def install_delfin():
python_setup_comm = ['build', 'install']
req_logs = os.path.join(delfin_log_dir, 'requirements.log')
command='pip3 install -r requirements.txt >' + req_logs+ ' 2>&1'
logger.info("Executing [%s]", command)
os.system(command)
setup_file=os.path.join(delfin_source_path, 'setup.py')
for command in python_setup_comm:
try:
command = 'python3 ' + setup_file + ' ' + command + ' >>' + logfile
logger.info("Executing [%s]", command)
os.system(command)
except CalledProcessError as cpe:
logger.error("Got CPE error [%s]:[%s]" % (cpe, tb.print_exc()))
return
def main():
global delfin_source_path
cwd = os.getcwd()
logger.info("Current dir is %s" % (cwd))
this_file_dir = os.path.dirname(os.path.realpath(__file__))
delfin_source_path = os.path.join(this_file_dir, "../" )
logger.info("delfins [%s]" % (delfin_source_path))
os.chdir(delfin_source_path)
logger.info(os.getcwd())
# create required directories
create_dir(delfin_etc_dir)
create_dir(delfin_var_dir)
# Copy required files
# Copy api-paste.ini
ini_file_src = os.path.join(delfin_source_path, 'etc', 'delfin', 'api-paste.ini')
ini_file_dest = os.path.join(delfin_etc_dir, 'api-paste.ini')
copy_files(ini_file_src, ini_file_dest)
# Copy the conf file
conf_file_src = os.path.join(delfin_source_path, 'etc', 'delfin', 'delfin.conf')
copy_files(conf_file_src, conf_file)
# install
install_delfin()
# create db
create_delfin_db()
# start
start_processes()
if __name__ == "__main__":
main()
| 1.820313 | 2 |
src/release_artifacts_resources/ios/cdk/cdk/credential_rotation/lambda_functions/test/test_secrets_manager_helper.py | jhockett/amplify-ci-support | 9 | 12759423 | import os
import unittest
from unittest.mock import patch
import botocore.session
from botocore.stub import Stubber
from src.utils import secrets_manager_helper
session = botocore.session.get_session()
secretsmanager = session.create_client("secretsmanager", region_name=secrets_manager_helper.REGION)
class TestSecretsManagerHelper(unittest.TestCase):
def test_null_environment_value(self):
with self.assertRaises(ValueError):
secrets_manager_helper.retrieve_secret("variable")
@patch.dict(os.environ, {"variable": "some_secret_id"})
def test_retrieve_secret(self):
mock_secret = "SEKRET!"
secretsmanager_stubber = Stubber(secretsmanager)
request = {"SecretId": "some_secret_id"}
response = {"SecretString": mock_secret}
secretsmanager_stubber.add_response("get_secret_value", response, request)
secretsmanager_stubber.activate()
secret_value = secrets_manager_helper.retrieve_secret("variable", secretsmanager)
secretsmanager_stubber.assert_no_pending_responses()
self.assertEqual(mock_secret, secret_value)
if __name__ == "__main__":
unittest.main()
| 2.65625 | 3 |
data_utility/data_utility.py | Leinadj/CREAM | 1 | 12759424 | # Import all packages required
# Type annotation imports
from typing import Union
from typing import Tuple
# Other imports
import os
import glob
import h5py
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import numpy as np
import pandas as pd
from scipy import interpolate
#-------------------------------------------------------------------------------------------------------#
# CREAM Data Utility class. Please refer to the docstring for details!
#-------------------------------------------------------------------------------------------------------#
class CREAM_Day():
"""
A class representing one particular day of the CREAM dataset.
The CREAM dataset has the following file structure:
|-CREAM
|------- 2018-08-23
| |--------- *.hdf5
| |--------- *.hdf5
| |--------- *.hdf5
| |--------- ......
|
|------- 2018-08-24
.......
This class corresponds to one of the subfolders, i.e. of the folders representing a particular day, such as, for
example, the first folder "2018-08-23". You have to create one CREAM_Day object per day folder in order to use the
data of the full dataset.
During initialization, the following attributes are set.
files_metadata_df (pandas.DataFrame): columns: Start_timestamp, End_timestamp, Filename to store start
end times of each file in this day
files (list): full path to every file in this day
minimum_request_timestamp (datetime.datetime): First timestamp of the day
maximum_request_timestamp (datetime.datetime): Last timestamp of the day
file_cache (dict): file cache for buffering already loaded files
day_date (datetime.datetime): day and date of the current object
This class also provides convenience functions to load the files of the CREAM dataset.
To load an arbitrary CREAM file, use the load_file method.
To load an arbitrary data window, based on the start_timestamp of the window to load, use the load_time_frame method.
To load the maintenance or product events as a pandas.DataFrame, use the load_machine_events method.
Via a parameter, one can also load the raw files that were generated by the coffee maker (they can be found in the
raw_coffee_maker_logs subfolder of the CREAM dataset).
To load the component events as a pandas.DataFrame, use the load_component_events_method.
To load information whether a specific day is a working day (German working day in the dataset), use the get_weekday_
information method.
Other self-explaining convenience functions are:
- get_datetime_from_filepath
- get_index_from_timestamp
- get_timestamp_from_index
Functions starting with an "_" underscore are private functions and are not intended for user usage.
"""
def __init__(self, cream_day_location: str, use_buffer : bool =False, buffer_size_files : int =5):
"""
Initialize the CREAM_Day object
Parameters
----------
cream_day_location (str): location of the root folder of the respective day in the CREAM dataset. Specify
a path to the respective day, not to the root of the overall CREAM datset!
use_buffer (boolean): default=False. In case it is set to True, files loaded via the load_file, or load_time_frame
method are stored in the cache of the CREAM_Day object. This speeds up streaming the dataset.
In case no buffer_size_file is provide, a default buffer_size_files of 5 is used.
Hence, the recent 5 files are stored in the cache. Old files are automatically removed from
the cache in case the buffer_size_files limit is exceeded.
buffer_size_files (int): Size of the file cache of the CREAM_Day object. Functionality of the cache is documented
in the use_buffer parameter description right above.
"""
self.dataset_location = cream_day_location
self.use_buffer = use_buffer
self.buffer_size_files = buffer_size_files
if self.buffer_size_files == 5 and use_buffer is True:
raise Warning("Buffer size was specified with size 5 (default value): a minimum buffer size of 5 files was set therefore")
# Initiate the file buffer dictionary
self.file_cache = {}
# Get all the files of the respective day
self.files = glob.glob(os.path.join(self.dataset_location, "*.hdf5"))
self.files.sort()
# We use the first file and the timestamps in the filenames in the dataset (of this day) to get the metadata information
# Get the timezone information from the filename timestamp
# Load Metadata from the first file of the respective device --> same for all of the device --> STATIC METADATA
with h5py.File(self.files[0], 'r', driver='core') as f:
self.sampling_rate = int(f.attrs['frequency']) # get the sampling rate
self.samples_per_file = len(f["voltage"]) # get the length of the signal
# get the start timestamp‚
start_timestamp = datetime(
year=int(f.attrs['year']),
month=int(f.attrs['month']),
day=int(f.attrs['day']),
hour=int(f.attrs['hours']),
minute=int(f.attrs['minutes']),
second=int(f.attrs['seconds']),
microsecond=int(f.attrs['microseconds']),
tzinfo=timezone(timedelta(hours=int(f.attrs['timezone'][1:4]), minutes=int(f.attrs['timezone'][4:]))))
self.file_duration_sec = 60 * 60 # each file, one hour --> seconds per file
self.number_of_files = len(self.files)
# Some file metadata for every file
file_start_times = [self.get_datetime_from_filepath(f) for f in self.files]
file_end_times = [timedelta(seconds=self.file_duration_sec) + ts for ts in file_start_times]
self.files_metadata_df = pd.DataFrame({"Start_timestamp": file_start_times,
"Filename": self.files,
"End_timestamp": file_end_times})
self.dataset_name = "CREAM"
# Compute the minimum and maximum time for this day, and the respective differences to the day before
self.minimum_request_timestamp = self.files_metadata_df.iloc[0].Start_timestamp
self.maximum_request_timestamp = self.files_metadata_df.iloc[-1].Start_timestamp + timedelta(seconds=self.file_duration_sec)
# Find the day of the dataset
folder_path = os.path.basename(os.path.normpath(self.dataset_location)) # name of the folder
date = folder_path.split("-")
self.day_date = datetime(year=int(date[0]), month=int(date[1]), day=int(date[2]))
# Initialize weekday information
self.weekday_information_df = None
def load_machine_events(self, file_path: str = None, filter_day : bool = False, raw_file=True) -> pd.DataFrame:
"""
Load the maintenance event file. The events are sorted by the time they occur.
Parameters
----------
file_path (str): path to the component events file (.csv) file
filter_day (boolean): default=False. If set to True, the DataFrame is filtered for the events belonging
to the CREAM_Day object
raw_file (boolean): default=True. If set to True, the user has to provide the path to the raw events file that
were generated by the coffee maker. They can be found in the raw_coffee_maker_logs subfolder
of the dataset.
Returns
-------
data (pd.DataFrame):
if raw_file=True: pd.DataFrame with columns "Timestamp", "Activity" (maintenance file) or
"Timestamp", "Product" (product file)
If raw_file=False: pd.DataFrame with columns
'Start_Timestamp', 'Automatic_Timestamp', 'Event_Type', 'End_Timestamp', 'Event_Duration_Seconds', 'Date',
Sorted descending by 'Start_Timestamp'.
"""
if file_path is None:
raise ValueError("Specify a file_path, containing the events file.")
if raw_file is True and "raw" not in file_path:
raise ValueError("In case you intend to load a raw_file, you also need to pass a path to a raw file to the "
"function!")
data = pd.read_csv(file_path)
# The timezone of the timestamps need to be from the same type
# We use the first file of the day_object to get
timezone = self.get_datetime_from_filepath(self.files[0]).tzinfo
if raw_file is True: # In case the raw product file is used
data.Timestamp = pd.to_datetime(data.Timestamp)
data = self._convert_timezone(data, "Timestamp", target_timezone=timezone)
data.sort_values("Timestamp", inplace=True)
data["Date"] = data.Timestamp.apply(lambda x: x.date())
else: # the manually adjusted and pre-processed product file is used
for column in data.columns:
# Convert all timestamp columns
if "Timestamp" in column:
data[column] = pd.to_datetime(data[column])
data = self._convert_timezone(data, column, target_timezone=timezone)
data["Date"] = data.End_Timestamp.apply(lambda x: x.date())
data.sort_values("Start_Timestamp", inplace=True)
if filter_day is True: # only return the event of the corresponding CREAM day
data = data[data["Date"] == self.day_date.date()]
return data
def load_component_events(self, file_path: str = None, filter_day : bool = False) -> pd.DataFrame:
"""
Load the labeled electrical events, i.e. the components events, file. The events are sorted by the time they occur.
Parameters
----------
file_path (str): path to the component events file (.csv) file
filter_day (boolean): default=False, if set to True, the DataFrame is filtered for the events belonging
to the CREAM_Day object
Returns
-------
data (pd.DataFrame): pd.DataFrame with columns:
'Start_Timestamp', 'Automatic_Timestamp', 'Event_Type', 'End_Timestamp', 'Event_Duration_Seconds', 'Date',
Sorted descending by 'Start_Timestamp'.
"""
if file_path is None:
raise ValueError("Specify a file_path, containing the events file.")
data = pd.read_csv(file_path)
# The timezone of the timestamps need to be from the same type
# We use the first file of the day_object to get
timezone = self.get_datetime_from_filepath(self.files[0]).tzinfo
for column in data.columns:
# Convert all timestamp columns
if "Timestamp" in column:
data[column] = pd.to_datetime(data[column])
data = self._convert_timezone(data, column, target_timezone=timezone)
data["Date"] = data.Timestamp.apply(lambda x: x.date())
data.sort_values("Timestamp", inplace=True)
if filter_day is True: # only return the event of the corresponding CREAM day
data = data[data["Date"] == self.day_date.date()]
return data
def load_file(self, file_path: str, return_noise: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Load a file of the CREAM dataset
If return_noise is specified, the noise channel is also returned. The current is 2-dimensional then.
The signals get pre-processed before they are returned by this function:
1. y-direction calibration: we center the signal around zero
2. calibration_factor: we calibrate the signal by the measurement device specific calibration_factor.
This calibration_factor is included in the metadata of the files.
Parameters
----------
file_path (string): path to the file to be loaded
return_noise (boolean): default=False. If set to True, the current of the noise socket is also returned.
Returns
-------
voltage (ndarray): voltage signal with shape=(1, file_length,). In case of an empty file None is returned.
current (ndarray): current signal either with shape (1, file_length) or (2, file_length)
In case of an empty file None is returned
"""
voltage = None
current = None
# Check if the file is already in the file cache
if self.use_buffer is True and file_path in self.file_cache:
voltage = self.file_cache[file_path]["voltage"]
current = self.file_cache[file_path]["current"]
return voltage, current
else:
# Check if the file is empty (zero bytes): if so return and empty current and voltage array
if os.stat(file_path).st_size > 0: # if not empty
with h5py.File(file_path, 'r', driver='core') as f:
voltage_offset, current_offset = self._adjust_amplitude_offset(f) # y value offset adjustment
for name in list(f):
signal = f[name][:] * 1.0
if name == 'voltage' and voltage_offset is not None: # the voltage signal
voltage = signal - voltage_offset
calibration_factor = f[name].attrs['calibration_factor']
voltage = np.multiply(voltage, calibration_factor)
elif "current1" in name and current_offset is not None: # the current signal of the coffee maker
current = signal - current_offset
calibration_factor = f[name].attrs['calibration_factor']
current = np.multiply(current, calibration_factor)
elif return_noise == True and "current6" in name and current_offset is not None: # the current signal of the noise channel
current_noise = signal - current_offset
calibration_factor = f[name].attrs['calibration_factor']
current_noise = np.multiply(current_noise, calibration_factor)
if return_noise is True:
current = np.array([current, current_noise])
voltage = np.array(voltage)
else:
current = np.array(current)
voltage = np.array(voltage)
# Before returning, check if we store the file in the cache and if we need to delete one instead from the cache
if self.use_buffer is True:
if len(self.file_cache) < self.buffer_size_files:
self.file_cache[file_path] = {"voltage" : np.array(voltage), "current": np.array(current)}
else:
sorted_filenames = list(self.file_cache.keys())
sorted_filenames.sort()
del self.file_cache[sorted_filenames[0]] #delete the oldest file
return np.array(voltage), np.array(current)
else: # if empty
return None, None
def load_file_metadata(self, file_path: str, attribute_list: list = []) -> dict:
"""
Load the file metadata for a specifc files.
The metadata is stored in the HDF5 attributes, details are documented in the data descriptor.
The following attributes are available:
["name", "first_trigger_id", "last_trigger_id", "sequence", "frequency", "year", "month", "day",
"hours", "minutes", "seconds", "microseconds", "timezone", "calibration_factor", "removed_offset"]
Parameters
----------
file_path (str): path to the file to be loaded. Needs to be the full-path, as provide by the "files"
attribute of the CREAM_Day object.
attribute_list (list): default=[], specify specifc attribute names to be loaded. If no
dedicated attributes are specified, all attributes are returned
Returns
-------
attributes_dict (dict): dictionary with all HDF5 attributes of a specifc file.
"""
if file_path is None:
raise ValueError("Specify a file path!")
all_attributes = ["name", "first_trigger_id", "last_trigger_id", "sequence", "frequency", "year", "month", "day",
"hours", "minutes", "seconds", "microseconds", "timezone", "calibration_factor", "removed_offset"]
if len(attribute_list) == 0: #use all attributes if non is specified
attribute_list = all_attributes
else:
# Check if user specified attributes exist in the metadata
for attr in attribute_list:
if attr not in all_attributes:
raise ValueError("The atttribute %s is not available!")
attributes_dict = {}
with h5py.File(file_path, 'r', driver='core') as f:
for attr in attribute_list:
if attr in ["calibration_factor", "removed_offset"]: #not in the attribute root of the hdf5 file
attributes_dict[attr] = f["voltage"].attrs[attr]
else: #attributes in the root of the hdf5 file
attributes_dict[attr] = f.attrs[attr]
return attributes_dict
def load_time_frame(self, start_datetime: datetime, duration : float, return_noise: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Loads an arbitrary time-frame of the CREAM dataset. Can be also used for streaming the data fast: in case
the caching parameter is enabled in the CREAM_Day object. Otherwise, the files will be reloaded every time
this method is called, thus, slowing down the data retrieval.
Parameters
----------
start_datetime (datetime.datetime): start timestamp of the window to load
duration (float): duration of the window to load (window size) in seconds. ATTENTION: if not provided in seconds,
wrong results are returned!
return_noise (boolean): default: False. If set to True, also returns the signal of the noise channel recorded in
CREAM dataset (from socket 6)
Returns
-------
voltage (numpy.ndarray): voltage signal of the window
current (numpy.ndarray): curent signal of the window. One dimensional if return_noise=False, two dimensional if
if return_noise=True. The first element is the coffee-maker signal, the second element
the noise signal.
"""
# Perform initial checks
if start_datetime < self.minimum_request_timestamp:
raise ValueError(
"The requested Time window is smaller then the minimum_request_timestamp of the day object")
end_datetime = start_datetime + timedelta(seconds=duration)
if end_datetime > self.maximum_request_timestamp:
raise ValueError("The requested Time window is bigger then the maximum_request_timestamp of the day object")
# determine all the files that are relevant for the requested time window
# The index of the first relevant_file: i.e. the last file that is smaller then the start_datetime
first_file_idx = self.files_metadata_df[self.files_metadata_df.Start_timestamp <= start_datetime].index[-1]
# The last relevant_file: i.e. the first file that has and End_timestamp that is bigger then the one we need
last_file_idx = self.files_metadata_df[self.files_metadata_df.End_timestamp >= end_datetime].index[0]
# Get all the files in between the first and the last file needed
relevant_files_df = self.files_metadata_df.loc[first_file_idx:last_file_idx]
if len(relevant_files_df) == 0:
raise ValueError("The timeframe requested does not lie within the current day!")
relevant_voltage = []
relevant_current = []
relevant_current_noise = []
for i, row in relevant_files_df.iterrows():
voltage, current = self.load_file(row.Filename, return_noise=return_noise)
relevant_voltage.append(voltage)
relevant_current.append(current)
if return_noise is True:
relevant_current.append(current[0])
relevant_current_noise.append(current[1])
# now stack together the relevant signals
relevant_voltage = np.concatenate(relevant_voltage, axis=-1)
relevant_current = np.concatenate(relevant_current, axis=-1)
if return_noise is True and len(relevant_current_noise) > 0:
relevant_current_noise = np.concatenate(relevant_current_noise, axis=-1)
# Compute the start_index
# 1.1 Compute the offset in the first file
start_index = int(self.get_index_from_timestamp(relevant_files_df.iloc[0].Start_timestamp, start_datetime))
end_index = int(self.get_index_from_timestamp(relevant_files_df.iloc[0].Start_timestamp, end_datetime))
# Get the voltage and current window
voltage = relevant_voltage[start_index:end_index] #there is only one voltage channel
if return_noise is True and len(relevant_current_noise) > 0:
current = [relevant_current[start_index:end_index], relevant_current_noise[start_index:end_index]]
else:
current = relevant_current[start_index:end_index]
voltage = np.array(voltage)
current = np.array(current)
return voltage, current
def compute_average_sampling_rate(self) -> float:
"""
Estimate the average sampling rate per day.
Load the metadata of every file of the current day.
Per file (one hour files), we compute the actual sampling rate.
We then average this number over all files of this day, resulting in the average sampling rate.
Calculate the difference between the first and last sample of a day based on
the timestamps of the files.
Sets the average_sampling_rate attribute of the CREAM_Day object.
One can compare the average_sampling_rate to the nominal one of 6400.
Parameters
----------
Returns
-------
average_sampling_rate (float): average sampling rate per day (computed over the files)
"""
FILE_LENGTH_SEC = 60 * 60 #one hour files
actual_sampling_rates = []
for file in self.files:
voltage, current = self.load_file(file_path=file)
samples_per_file = len(voltage)
actual_sampling_rate = samples_per_file / FILE_LENGTH_SEC
actual_sampling_rates.append(actual_sampling_rate)
self.average_sampling_rate = np.mean(actual_sampling_rates)
return self.average_sampling_rate
def get_datetime_from_filepath(self, filepath: str) -> datetime:
"""
Extracts the datetime from a filename of a CREAM file.
Parameters
----------
filepath (str): path to a CREAM file
Returns
-------
start_timestamp (datetime): start timestamp of the file, extracted from the filename
"""
filename = os.path.basename(filepath) # get the filename
string_timestamp = "-".join(filename.split("-")[2:-1])
datetime_object = datetime.strptime(string_timestamp, '%Y-%m-%dT%H-%M-%S.%fT%z') # string parse time
return datetime_object
def get_index_from_timestamp(self, start_timestamp: datetime, event_timestamp: datetime) -> int:
"""
Returns the index of the event, represented by the event_timestamp, relativ to the start_timestamp (i.e. start timestamp of the file of interest e.g.)
Parameters
----------
start_timestamp (datetime.datetime): start timestamp of the window the event is located at
event_timestamp (datetime.datetime): timestamp of the event of interest
Returns
-------
event_index (int): The resulting event index
"""
sec_since_start = event_timestamp - start_timestamp
event_index = sec_since_start.total_seconds() * (self.sampling_rate) # and # multiply by samples per second
return int(event_index)
def get_timestamp_from_index(self, start_timestamp: datetime, event_index: int) -> datetime:
"""
Returns the timestamp for an event index. The event index has to be relative to a start_timestamp of a window.
Parameters
----------
start_timestamp (datetime.datetime): start timestamp of the window.
event_index (int): Index of the event of interest, has to be relative to the start_timestamp provided.
Returns
-------
event_timestamp (datetime.datetime): The resulting timestamp
"""
seconds_per_sample = 1 / self.sampling_rate # 1 second / samples = seconds per sample
time_since_start = event_index * seconds_per_sample
event_ts = start_timestamp + timedelta(seconds=time_since_start)
return event_ts
def _adjust_amplitude_offset(self, file: h5py.File) -> Tuple[int, int]:
"""
Resembles the pre-processing functionality in the BLOND repository (one_second_data_summary_functions.py) by
<NAME>.
Computes the mean per period to get an estimate for the offset in each period.
This is done for the voltage signal.
The period length is computed using the nominal sampling rate. Tthis can deviate from the
actual period length. Therefore, we zero pad the voltage signal to get full periods again before computing
the mean.
Then we use the estimate per period, to linearly interpolate the mean values per period, to get an offset value
per sample point in the signal. We then use the offset of the voltage to compute the offset of the current by multiplying
it by the crest-coefficient of 1/sqrt(2), i.e., approx. 0.7 .
Parameters
----------
file (h5py.File): a h5py CREAM file.
Returns
-------
voltage_offset (int): the voltage offset to adjust for
current_offset (int): the current offset to adjust for
"""
length = len(file['voltage'])
# Compute the average period_length, using the nominal sampling rate
period_length = round(self.sampling_rate / 50)
# Get the missing samples, opposed to the optimal number of periods in the signal
remainder = divmod(length, period_length)[1]
voltage = np.pad(file['voltage'][:], (0, period_length - remainder), 'constant',
constant_values=0) # zero padding
voltage = voltage.reshape(-1, period_length) # the single periods, period wise reshape
mean_values_per_period = voltage.mean(axis=1) # compute the mean per period
# Create x values for the interpolation
x_per_period = np.linspace(1, length, len(mean_values_per_period), dtype=np.int) # number of periods
x_original = np.linspace(1, length, length, dtype=np.int)
# build a linear interpolation, that interpolates for each period witch offset it should have
# for each of the datapoints, interpolate the offset
voltage_offset = interpolate.interp1d(x_per_period, mean_values_per_period)(x_original)
current_offset = voltage_offset * 1 / np.sqrt(2) # roughly * 0.7
return voltage_offset, current_offset
def _convert_timezone(self, dataframe: pd.DataFrame, column_name : str, target_timezone:str) -> pd.DataFrame:
"""
Converts timezone in column_name column in dataframe to target_timezone
Parameters
----------
dataframe (pandas.DataFrame): DataFrame object, containing some time columns
column_name (str): Name of the column of interest, i.e. the name of a time column
target_timezone (str): datetime.datetime.tzinfo timezone information as a string. This is the target timezone.
Returns
-------
dataframe (pandas.DataFrame): DataFrame object, with the column_name column converted to the target_timezone
"""
ts_array = []
for i, row in dataframe.iterrows():
ts = row[column_name].tz_convert(target_timezone)
ts_array.append(ts)
dataframe[column_name] = ts_array
return dataframe
def get_weekday_information(self, date : Union[list, np.ndarray], file_path : str = None) -> pd.DataFrame:
"""
For a certain date, get the day related information from the file provided with the dataset.
Parameters
----------
date (list, np.ndarray): list of string dates to be checked, format: year-month-day
file_path (string): default=None if path is not provided, the default location of the file is assumed
Returns
-------
day_information_df (pd.DataFrame): DataFrame with columns:
Date (string, date format year-month-day), WorkingDay (boolean), Weekday (string)
"""
if file_path is None:
file_path = os.path.abspath(self.dataset_location + "/../" + "day_information.csv")
day_information_df = None
if self.weekday_information_df is None: # if not initialized yet
self.weekday_information_df = pd.read_csv(file_path)
if type(date) in [list, np.ndarray]:
if not all(isinstance(n, str) for n in date): # if not all dates are strings, convert them
date = [str(n) for n in date]
day_information_df = self.weekday_information_df[self.weekday_information_df.Date.isin(date)]
day_information_df.Date = day_information_df.Date.apply(lambda x: pd.to_datetime(x, format='%Y-%m-%d')).dt.date
return day_information_df
| 2.5 | 2 |
2020-07-month-long-challenge/day22.py | jkbockstael/leetcode | 0 | 12759425 | #!/usr/bin/env python3
# Day 22: Binary Tree Zigzag Level Order Traversal
#
# Given a binary tree, return the zigzag level order traversal of its nodes'
# values. (ie, from left to right, then right to left for the next level and
# alternate between).
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> [[int]]:
# First get a regular level order traversal
if root is None:
return []
result = [[root.val]]
queue = [root]
while len(queue) > 0:
children = []
for node in queue:
if node.left is not None:
children.append(node.left)
if node.right is not None:
children.append(node.right)
queue = children
if len(children) > 0:
result.append([node.val for node in queue])
# Then flip the odd levels
for level in range(len(result)):
if level % 2 == 1:
result[level] = result[level][::-1]
return result
# Test
test_tree = TreeNode(3)
test_tree.left = TreeNode(9)
test_tree.right = TreeNode(20)
test_tree.right.left = TreeNode(15)
test_tree.right.right = TreeNode(7)
assert Solution().zigzagLevelOrder(test_tree) == [[3],[20,9],[15,7]]
| 4.3125 | 4 |
votemanager/votemanager.py | dexbiobot/SML-Cogs | 17 | 12759426 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime as dt
import os
import discord
from __main__ import send_cmd_help
from box import Box, BoxList
from cogs.utils import checks
from cogs.utils.chat_formatting import pagify
from cogs.utils.dataIO import dataIO
from discord.ext import commands
PATH = os.path.join("data", "votemanager")
JSON = os.path.join(PATH, "settings.json")
class Survey(Box):
"""A survey"""
def __init__(self, title=None, description=None, role_ids=None, options=None, votes=None, timestamp=None):
super().__init__()
if role_ids is None:
role_ids = BoxList()
if options is None:
options = BoxList()
if votes is None:
votes = Box()
self.title = title
self.description = description
self.role_ids = role_ids
self.options = options
self.votes = votes
self.timestamp = timestamp
class VoteManager:
"""Vote Manager. Voting module"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = Box(dataIO.load_json(JSON), default_box=True)
def save_settings(self):
"""Save settings."""
dataIO.save_json(JSON, self.settings)
def add_survey(self, server, title, description, roles, options):
"""Add a new survey.
:returns: ID of survey
"""
server_settings = self.settings[server.id]
if server_settings.surveys == Box():
server_settings.surveys = BoxList()
survey = Survey(
title=title,
description=description,
role_ids=[r.id for r in roles],
options=options,
timestamp=dt.datetime.utcnow().timestamp()
)
server_settings.surveys.append(survey)
self.save_settings()
return len(server_settings.surveys)
def get_surveys(self, server):
"""Return list of surveys on the server."""
server_settings = self.settings[server.id]
if "surveys" not in server_settings:
server_settings.surveys = BoxList()
return server_settings.surveys
def get_survey_by_id(self, server, id):
"""Return survey by ID, where ID is 0-based index of surveys."""
surveys = self.get_surveys(server)
if id >= len(surveys):
return None
return surveys[id]
def reset_server(self, server):
"""Reset server settings."""
self.settings[server.id] = Box()
self.settings[server.id].surveys = BoxList()
self.save_settings()
@commands.group(pass_context=True, aliases=['vm'])
async def votemanager(self, ctx):
"""Settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@checks.serverowner()
@votemanager.command(name="reset", aliases=[], pass_context=True, no_pm=True)
async def votemanager_reset(self, ctx):
"""Resret server settings"""
server = ctx.message.server
self.reset_server(server)
await self.bot.say("Server settings reset.")
@checks.mod_or_permissions()
@votemanager.command(name="add", aliases=['a'], pass_context=True, no_pm=True)
async def votemanager_add(self, ctx):
"""Add vote. Interactive."""
author = ctx.message.author
server = ctx.message.server
await self.bot.say("Add a new survey. Continue? (y/n)")
answer = await self.bot.wait_for_message(author=author)
if answer.content.lower() != 'y':
await self.bot.say("Aborted.")
return
#: Title
await self.bot.say("Enter the title of the vote:")
answer = await self.bot.wait_for_message(author=author)
title = answer.content
#: Description
await self.bot.say("Enter the description of the vote:")
answer = await self.bot.wait_for_message(author=author)
description = answer.content
#: Roles
await self.bot.say("Enter list of roles who can vote for this, separated by `|`:")
answer = await self.bot.wait_for_message(author=author)
role_names = [a.strip() for a in answer.content.split('|')]
roles = [discord.utils.get(server.roles, name=role_name) for role_name in role_names]
for role in roles:
if role is None:
await self.bot.say("Cannot find {} on server. Aborting…".format(role))
return
#: Options
await self.bot.say("Enter a list of options, separated by `|`:")
answer = await self.bot.wait_for_message(author=author)
options = [a.strip() for a in answer.content.split('|')]
survey_id = self.add_survey(server, title, description, roles, options)
await ctx.invoke(self.votemanager_list, survey_id)
@votemanager.command(name="list", aliases=['l'], pass_context=True, no_pm=True)
async def votemanager_list(self, ctx, survey_number=None):
"""List votes."""
server = ctx.message.server
surveys = self.get_surveys(server)
if len(surveys) == 0:
await self.bot.say("No surveys found.")
return
if survey_number is None:
em = discord.Embed(
title="Vote Manager",
description="List of surveys"
)
for i, s in enumerate(surveys, 1):
em.add_field(
name=str(i),
value=s.title,
inline=False
)
em.set_footer(
text='[p]vm list 1 to see details about survey 1'
)
await self.bot.say(embed=em)
else:
id = int(survey_number) - 1
survey = self.get_survey_by_id(server, id)
em = discord.Embed(
title=survey.title,
description=survey.description
)
em.add_field(
name='Role(s)',
value=', '.join([discord.utils.get(server.roles, id=rid).name for rid in survey.role_ids]),
)
em.add_field(
name='Options',
value='\n'.join(
['`{}. ` {}'.format(number, option) for number, option in enumerate(survey.options, 1)]
),
inline=False
)
em.set_footer(
text='[p]vm vote {} [option_number] to cast your vote.'.format(survey_number)
)
await self.bot.say(embed=em)
@votemanager.command(name="vote", pass_context=True, no_pm=True)
async def votemanager_vote(self, ctx, survey_number, option_number=None):
"""Vote"""
server = ctx.message.server
author = ctx.message.author
survey_id = int(survey_number) - 1
survey = self.get_survey_by_id(server, survey_id)
if survey is None:
await self.bot.say("Invalid survey id.")
return
if option_number is None:
await self.bot.say("You didn’t enter your option number. Here are the options:")
await ctx.invoke(self.votemanager_list, survey_number)
return
if not option_number.isdigit():
await self.bot.say("Option number must be a number.")
return
option_number = int(option_number)
if option_number > len(survey.options) or option_number < 1:
await self.bot.say("That is not a valid options.")
await ctx.invoke(self.votemanager_list, survey_number)
return
roles = [discord.utils.get(server.roles, id=id) for id in survey.role_ids]
valid_roles = [x for x in roles if x in author.roles]
if len(valid_roles) == 0:
await self.bot.say("You do not have the required roles to vote for this survey.")
return
if author.id in survey.votes.keys():
voted_option_id = survey.votes[author.id]
await self.bot.say(
"You have previously voted for option {}. {}".format(
voted_option_id + 1, survey.options[voted_option_id]
)
)
self.add_vote(server, author, survey_number, option_number)
await self.bot.say(
"You have cast a vote for option {}. {}".format(
option_number, survey.options[int(option_number) - 1]
)
)
def add_vote(self, server, author, survey_number, option_number):
"""Add a vote."""
survey_id = int(survey_number) - 1
option_id = int(option_number) - 1
survey = self.get_survey_by_id(server, survey_id)
survey.votes[author.id] = option_id
self.save_settings()
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = VoteManager(bot)
bot.add_cog(n)
| 1.734375 | 2 |
squaresEnumerator.py | squares-sql/SQUARES | 20 | 12759427 | #!/usr/bin/env python
# File: squares-enumerator.py
# Description: An SQL Synthesizer Using Query Reverse Engineering
# Author: <NAME>
# Created on: 22-02-2019 15:13:15
# Usage: python3 squaresEnumerator.py [flags|(-h for help)] specFile.in
# Python version: 3.6.4
from sys import argv
from string import *
import tyrell.spec as S
from tyrell.interpreter import PostOrderInterpreter, GeneralError
from tyrell.enumerator import *
from tyrell.decider import Example, ExampleConstraintDecider, ExampleConstraintPruningDecider
from tyrell.synthesizer import Synthesizer
from tyrell.logger import get_logger
import rpy2.robjects as robjects
from itertools import permutations
import warnings
from rpy2.rinterface import RRuntimeWarning
import sqlparse as sp
import re
import sys
import os
warnings.filterwarnings("ignore", category=RRuntimeWarning)
logger = get_logger('tyrell')
counter_ = 0
distinct = False
getProgram = False
final_program = ''
_tables = dict()
output_attrs = ""
attributes = []
robjects.r('''
library(dplyr)
library(dbplyr)
library(tidyr)
library(stringr)
options(warn=-1)
''')
## Common utils.
def get_collist(sel):
return sel
def get_fresh_name():
global counter_
counter_ = counter_ + 1
fresh_str = 'RET_DF' + str(counter_)
return fresh_str
def get_fresh_col():
global counter_
counter_ = counter_ + 1
fresh_str = 'COL' + str(counter_)
return fresh_str
def get_type(df, index):
_rscript = 'sapply({df_name}, class)[{pos}]'.format(df_name=df, pos=index)
ret_val = robjects.r(_rscript)
return ret_val[0]
# get the string format to be used in filter
def getConst(cons):
global attributes
try:
if int(cons):
return str(cons)
except:
if str(cons)=="max(n)" or cons in attributes:
return str(cons)
else:
return "\""+str(cons)+"\""
def getColsPermutations(cols, num):
if num == 0:
return []
return [", ".join(a) for a in permutations(cols, num)] + getColsPermutations(cols,num-1)
def eq_r(actual, expect):
global distinct
_rscript = 'all.equal(lapply({lhs}, as.character),lapply({rhs}, as.character))'.format(lhs=actual, rhs=expect)
# _rscript = 'all.equal(lapply({lhs}, FUN=function(x){{ data.frame(as.matrix(x))}}),lapply({rhs}, FUN=function(x){{ data.frame(as.matrix(x)) }} ))'.format(lhs=actual, rhs=expect)
try:
ret_val = robjects.r(_rscript)
except:
return False
return True == ret_val[0]
# find if there is one integer constant in the list of constants
def findConst(consts):
if consts == []:
return False
try:
if int(consts[0][1:-1]):
return True
except:
return findConst(consts[1:])
class SquaresInterpreter(PostOrderInterpreter):
## Concrete interpreter
def eval_ColInt(self, v):
return v
def eval_ColList(self, v):
return v
def eval_const(self, node, args):
return args[0]
def eval_unused(self, node, args):
return get_fresh_name()
def eval_select(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- {table} %>% ungroup() %>% select({cols})'.format(ret_df=ret_df_name, table=args[0], cols=get_collist(args[1]))
if args[2] == "distinct":
_script += ' %>% distinct()'
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting select...')
raise GeneralError()
def eval_filter(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "str_detect" not in args[1]:
col, op, const = args[1].split(" ")
_script = '{ret_df} <- {table} %>% ungroup() %>% filter({col} {op} {const})'.format(ret_df=ret_df_name, table=args[0], op=op, col=col, const=getConst(const)) if const != "max(n)" else '{ret_df} <- filter({table}, {col} {op} {const})'.format(ret_df=ret_df_name, table=args[0], op=op, col=col, const="max(n)")
else:
col, string = args[1].split("|")
_script = '{ret_df} <- {table} %>% ungroup() %>% filter({col}, {const}))'.format(ret_df=ret_df_name, table=args[0], col=col, const="\""+string[:-1]+"\"")
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting filter...')
raise GeneralError()
def eval_filters(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "str_detect" not in args[1]:
col, op, const = args[1].split(" ")
const = getConst(const) if const != "max(n)" else "max(n)"
arg1 = col + " " + op + " " + const
else:
col, string = args[1].split("|")
arg1 = col+", "+"\""+string[:-1]+"\")"
if "str_detect" not in args[2]:
col, op, const = args[2].split(" ")
const = getConst(const) if const != "max(n)" else "max(n)"
arg2 = col + " " + op + " " + const
else:
col, string = args[2].split("|")
arg2 = col+", "+"\""+string[:-1]+"\")"
_script = '{ret_df} <- {table} %>% ungroup() %>% filter({arg1} {Operator} {arg2})'.format(ret_df=ret_df_name, table=args[0], arg1=arg1, arg2=arg2, Operator=args[3])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting filters...')
raise GeneralError()
def eval_summariseGrouped(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "paste" in args[1]:
args[1] = '{at} = paste({at}, collapse=:)'.format(at=args[1].split("|")[1])
_script = '{ret_df} <- {table} %>% group_by({cols}) %>% summarise({cond})'.format(ret_df=ret_df_name, table=args[0], cols=get_collist(args[2]), cond=args[1].replace(":", "\":\""))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting summarise...')
raise GeneralError()
def eval_summarise(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "paste" in args[1]:
args[1] = '{at} = paste({at}, collapse=\":\")'.format(at=args[1].split("|")[1])
_script = '{ret_df} <- {table} %>% summarise({cond})'.format(ret_df=ret_df_name, table=args[0], cond=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting summarise...')
raise GeneralError()
def eval_inner_join(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- inner_join({t1}, {t2})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_inner_join3(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- inner_join(inner_join({t1}, {t2}), {t3})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], t3=args[2])
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin3...')
raise GeneralError()
def eval_inner_join4(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- inner_join(inner_join(inner_join({t1}, {t2}), {t3}), {t4})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], t3=args[2], t4=args[3])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin4...')
raise GeneralError()
def eval_anti_join(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- anti_join(select({t1},{col}), select({t2}, {col}))'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], col=get_collist(args[2]))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_left_join(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- left_join({t1}, {t2})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_bind_rows(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- bind_rows({t1}, {t2})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_intersect(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- intersect(select({t1},{col}), select({t2}, {col}))'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], col=get_collist(args[2]))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_unite(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- unite({t1}, {col1}, which(colnames({t1})=="{col1}"), {col2}, which(colnames({t1})=="{col2}"), sep=":")'.format(
ret_df=ret_df_name, t1=args[0], col1=get_collist(args[1]), col2=get_collist(args[2]))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
## Abstract interpreter
def apply_row(self, val):
df = val
if isinstance(val, str):
df = robjects.r(val)
## df: rpy2.robjects.vectors.DataFrame
return df.nrow
def apply_col(self, val):
df = val
if isinstance(val, str):
df = robjects.r(val)
return df.ncol
def apply_name(self, val):
return _tables[val]
def divide_int_str_constants(const):
str_const, int_const = [], []
for c in const:
try:
if c == '0' or int(c):
int_const.append(c)
except:
str_const.append(c)
return str_const, int_const
def divide_int_str_attributes(files, attrs):
str_attr, int_attr = [], []
for a in attrs:
if a == "n":
if a not in int_attr:
int_attr.append(a)
for i in files:
with open(i, 'r') as f:
columns = f.readline()[:-1].split(",")
if a in columns:
ind = columns.index(a)
try:
if f.readline()[:-1].split(",")[ind]=='0' or int(f.readline()[:-1].split(",")[ind]):
if a not in int_attr:
int_attr.append(a)
except:
if a not in str_attr:
str_attr.append(a)
return str_attr, int_attr
def find_filter_conditions(str_const, int_const, str_attr, int_attr, new_int_attr, aggrs, files, necessary_conditions, summarise_conditions):
conditions = []
int_ops = ["==", ">", "<", ">=", "<="]
str_ops = ["==", "!="]
happens_before = []
for sc in str_const + int_const:
necessary_conditions.append([])
for sa in str_attr:
att = False
for i in files:
if att:
break
with open(i, 'r') as f:
columns = f.readline()[:-1].split(",")
if sa in columns:
ind = columns.index(sa)
for l in f:
if l[:-1].split(",")[ind] == sc:
att = True
break
else:
continue
if 'like' in aggrs:
conditions.append('str_detect({sa}|{sc})'.format(sa=sa, sc=sc))
necessary_conditions[-1].append(conditions[-1])
if not att:
continue
for so in str_ops:
conditions.append('{sa} {so} {sc}'.format(sa=sa, so=so, sc=sc))
necessary_conditions[-1].append(conditions[-1])
for ic in int_const:
necessary_conditions.append([])
for ia in int_attr + new_int_attr:
if ic == ia:
continue
for io in int_ops:
conditions.append('{ia} {io} {ic}'.format(ia=ia, io=io, ic=ic))
necessary_conditions[-1].append(conditions[-1])
if ia == "n":
happens_before.append((conditions[-1],"n = n()"))
for ic in new_int_attr:
for ia in int_attr + new_int_attr:
if ic == ia:
continue
for io in int_ops:
conditions.append('{ia} {io} {ic}'.format(ia=ia, io=io, ic=ic))
for sc in summarise_conditions:
if ic in sc:
happens_before.append((conditions[-1], sc))
necessary_conditions = list(filter(lambda a: a != [], necessary_conditions))
# if "max" in aggrs and "n" in aggrs or "max(n)" in aggrs:
if "max(n)" in aggrs:
conditions.append("n == max(n)")
happens_before.append((conditions[-1],"n = n()"))
necessary_conditions.append([conditions[-1]])
# print("filter conditions "+str(conditions))
return conditions, necessary_conditions, happens_before
def find_summarise_conditions(int_attr, str_attr, aggrs, necessary_conditions):
conditions = []
new_int_attr = []
for a in aggrs:
if a == "like":
continue
necessary_conditions.append([])
if "n" == a:
conditions.append('{a} = {a}()'.format(a=a))
necessary_conditions[-1].append(conditions[-1])
continue
if 'concat' in a:
for at in int_attr + str_attr:
conditions.append('paste|{at}'.format(at=at))
necessary_conditions[-1].append(conditions[-1])
continue
if "max(n)" == a:
continue
for ia in int_attr:
conditions.append('{a}{ia} = {a}({ia})'.format(ia=ia, a=a))
necessary_conditions[-1].append(conditions[-1])
new_int_attr.append('{a}{ia}'.format(ia=ia, a=a))
return list(filter(lambda a: a != [], necessary_conditions)), new_int_attr, conditions
def find_conditions(files, const, attrs, aggrs, bools):
global attributes
necessary_conditions = []
str_const, int_const =divide_int_str_constants(const)
str_attr, int_attr = divide_int_str_attributes(files, attrs)
# print("str_consts "+str(str_const))
# print("int consts "+str(int_const))
# print("str attrs "+str(str_attr))
# print("int attrs "+str(int_attr))
necessary_conditions, new_int_attr, sum_cond = find_summarise_conditions(int_attr, str_attr, aggrs, necessary_conditions)
# print("new_int_attr " + str(new_int_attr))
# print("summarise cond "+ str(sum_cond))
filt_cond, necessary_conditions, happens_before = find_filter_conditions(str_const, int_const, str_attr, int_attr, new_int_attr, aggrs, files, necessary_conditions, sum_cond)
# exit()
attributes = int_attr + new_int_attr
return filt_cond, sum_cond, necessary_conditions, happens_before
def find_necessary_conditions(conds):
predicates = ""
for c in conds:
if c == []:
break
predicate = "\npredicate constant_occurs(\""
for i in c:
predicate += i + ","
predicates += predicate[:-1]+"\");"
return predicates
def happensBefore(conds):
predicates = ""
for c in conds:
if c == ():
break
predicates += "\npredicate happens_before(\""+c[0]+"\",\""+c[1]+"\");"
return predicates
def DSL():
global counter_
global _tables
global output_attrs
prog_out = ""
Operators = ""
concat = ""
input_tables, ags, cns, ats, bls, db_columns = [], [], [], [], [], []
filtersOne = "\nfunc filter: Table r -> Table a, FilterCondition f {\n row(r) <= row(a);\n col(r) == col(a);\n}"
filters = filtersOne
filterAndOr = "\nfunc filters: Table r -> Table a, FilterCondition f, FilterCondition g, Op o {\n row(r) <= row(a);\n col(r) == col(a);\n}"
filterPredicateOne = "\npredicate is_not_parent(inner_join3, filter, 100);\npredicate is_not_parent(inner_join4, filter, 100);\npredicate is_not_parent(filter, filter, 100);\npredicate distinct_inputs(filter);\n"
filterPredicate = filterPredicateOne
filterPredicateTwo = "predicate distinct_filters(filters, 1, 2);\npredicate is_not_parent(filters, filters, 100);\npredicate is_not_parent(inner_join, filters, 100);\npredicate is_not_parent(inner_join3, filters, 100);\npredicate is_not_parent(inner_join4, filters, 100);\npredicate distinct_inputs(filters);"
summarise = "\nfunc summariseGrouped: Table r -> Table a, SummariseCondition s, Cols b {\n row(r) <= row(a);\n col(r) <= 3;\n}\n\npredicate is_not_parent(inner_join4, summariseGrouped, 100);\npredicate is_not_parent(summariseGrouped, summariseGrouped, 100);"
# \nfunc summarise: Table r -> Table a, SummariseCondition s {\n row(r) == 1;\n col(r) == 1;\n}\n\npredicate is_not_parent(summariseGrouped, summarise, 100);\npredicate is_not_parent(inner_join3, summarise, 100);\npredicate is_not_parent(inner_join4, summarise, 100);\npredicate is_not_parent(summarise, summariseGrouped, 100);\npredicate is_not_parent(summarise, summarise, 100);
# summarise = "\nfunc summariseGrouped: Table r -> Table a, SummariseCondition s, Cols b;\n\nfunc summarise: Table r -> Table a, SummariseCondition s;\n\npredicate is_not_parent(summariseGrouped, summarise, 100);\npredicate is_not_parent(inner_join3, summarise, 100);\npredicate is_not_parent(inner_join4, summarise, 100);\npredicate is_not_parent(inner_join4, summariseGrouped, 100);\npredicate is_not_parent(summarise, summariseGrouped, 100);\npredicate is_not_parent(summarise, summarise, 100);\npredicate is_not_parent(summariseGrouped, summariseGrouped, 100);"
# read the input and output files
f_in = open(argv[-1], 'r')
inputs = f_in.readline()[:-1].split(":")[1].replace(" ","").split(",")
prog_out += "con <- DBI::dbConnect(RSQLite::SQLite(), \":memory:\")\n"
for i in inputs:
_script = 'input{cnt} <- read.table("{file}", sep =",", header=T)\ninput{cnt}\n'.format(file=i, cnt=counter_)
prog_out += _script
prog_out += 'input{cnt} <- copy_to(con,input{cnt})\n'.format(cnt=counter_)
benchmark1_input = robjects.r(_script)
input_tables.append('input{cnt}'.format(cnt=counter_))
_tables[input_tables[-1]] = counter_
counter_+=1
with open(i, 'r') as f:
db_columns = list(set(db_columns + f.readline()[:-1].split(",")))
output = f_in.readline()[:-1].split(":")[1].replace(" ","")
_script = 'expected_output <- read.table("{file}", sep =",", header=T)\nexpected_output\n'.format(file=output)
prog_out += _script
# print(_script)
_tables['expected_output'] = counter_
counter_+=1
benchmark1_output = robjects.r(_script)
# read the list of constants from the input
consts = f_in.readline()[:-1].replace(" ","").split(":",1)
intConst = findConst(consts[1].replace(" ","").split(","))
filterFlag = 0
if(consts[1]!=''):
filterFlag = 1
consts_temp = ""
if len(consts[1].split(","))>1:
filterFlag = 2
filters = filterAndOr
filterPredicate = filterPredicateTwo
Operators = "enum Op{\n \"|\", \"&\"\n}"
cns = consts[1].replace(" ","").replace("\"","").split(",")
else:
filterPredicate, filters, consts = "", "", ""
# read the list of aggregation functions from the input file
aggrs = f_in.readline()[:-1].replace(" ","").split(":")
if aggrs[1]!='':
ags = aggrs[1].replace(" ","").replace("\"","").split(",")
for a in ags:
if a == "concat":
ags.remove(a)
concat = "\nfunc unite: Table r -> Table a, Col c, Col d {\n row(r) <= row(a);\n col(r) < col(a);\n}"
if (len(ags) == 1 and "like" in ags) or len(ags)==0:
summarise = ""
else:
aggrs = ""
summarise = ""
if "\"max(n)\"" in aggrs:
cns.append("max(n)")
aggrs = aggrs.replace(",\"max(n)\"", "")
file_path = 'example/squares.tyrell'
# read the list of attributes from the input file
attrs = f_in.readline()[:-1].replace(" ","").split(":")
if(attrs[1]!=''):
ats = list(attrs[1].replace(" ","").replace("\"","").split(","))
ats = ats + ["n"] if "n" in ags and intConst else ats
elif "\"n\"" in aggrs:
ats.append("n")
else:
attrs = ""
hasBools = False
bools = f_in.readline()[:-1].replace(" ","").split(":")
if "bools" in bools:
hasBools = True
if not hasBools:
loc = int(bools[1])
else:
loc = int(f_in.readline()[:-1].replace(" ","").split(":")[1])
# print("constants "+str(cns))
# print("attributes "+str(ats))
# print("aggrs "+str(ags))
# print("bools "+str(bls))
filterConditions, summariseConditions, necessary_conditions, happens_before = find_conditions(inputs, cns, ats, ags, bls)
if filters == "" and filterConditions != []:
filters = filtersOne
filterPredicate = "\npredicate is_not_parent(filter, filter, 100);"
# \npredicate is_not_parent(inner_join3, filter, 100);\npredicate is_not_parent(inner_join4, filter, 100);
if len(necessary_conditions) > 1:
filters = filtersOne + filterAndOr
filterPredicate = "predicate distinct_filters(filters, 1, 2);\n\npredicate is_not_parent(filters, filter, 100);\npredicate is_not_parent(filter, filters, 100);\npredicate is_not_parent(filter, filter, 100);\npredicate is_not_parent(filters, filters, 100);"
# \npredicate is_not_parent(inner_join3, filter, 100);\npredicate is_not_parent(inner_join, filters, 100);\npredicate is_not_parent(inner_join3, filters, 100);\npredicate is_not_parent(inner_join4, filters, 100);\npredicate is_not_parent(anti_join, filters, 100);\npredicate is_not_parent(inner_join4, filter, 100);
Operators = "enum Op{\n \"|\", \"&\"\n}"
necessary_conditions = find_necessary_conditions(necessary_conditions)
necessary_conditions += happensBefore(happens_before)
# find which attributes are in the output table, and format the DSL
with open(output, 'r') as f:
cols = f.readline()
output_attrs = cols[:-1]
cols = str(getColsPermutations(str(db_columns)[1:-1].replace("'","").replace(" ","").split(","), 2))[1:-1].replace("'", "\"")
oneColumn = str(getColsPermutations(str(db_columns)[1:-1].replace("'","").replace(" ","").split(","), 1))[1:-1].replace("'", "\"")
# try:
with open(dir+file_path, 'r') as f:
spec_str = f.read()
# except:
# with open('../example/squares.tyrell', 'r') as f:
# spec_str = f.read()
fil_conditions = "enum FilterCondition{\n"+ str(filterConditions)[1:-1].replace("'","\"") +"\n}\n" if filterConditions!=[] else ""
sum_conditions = "enum SummariseCondition{\n"+ str(summariseConditions)[1:-1].replace("'","\"") +"\n}\n" if summariseConditions != [] else ""
# print("final filter conditions "+ str(fil_conditions))
# print("final summarise conditions "+ str(sum_conditions))
return spec_str.format(cols=cols, Tables=str("Table, "*len(inputs))[:-2], summarise=summarise, filters=filters, filterPred=filterPredicate, FilterConditions=fil_conditions, SummariseConditions=sum_conditions, Op=Operators, necessaryConditions=necessary_conditions, SelectCols=str("\""+output_attrs+"\""), col=oneColumn, concat=concat), input_tables, prog_out, loc
index_table_aux = 0
def beautifier(sql):
# parsed = sp.parse(sql)
# new_sql = beautifier_aux(parsed[0])
sql = re.sub("\`TBL_LEFT\`\.\`[^,\`]*\` AS |\`LHS\`\.\`[^,\`]*\` AS ", "", sql)
sql = re.sub("\`TBL_RIGHT\`\.\`[^,\`]*\` AS |\`RHS\`\.\`[^,\`]*\` AS ", "", sql)
return sp.format(sql, reindent=True, keyword_case='upper')
# print(sp.format(new_sql, reindent=True, keyword_case='upper'))
def main(seed=None):
global getProgram, final_program
if not debug:
sys.stderr = open(dir+'output.err', 'w+')
# os.close(sys.stderr.fileno())
warnings.filterwarnings("ignore", category=RRuntimeWarning)
warnings.filterwarnings('ignore')
logger.info('Parsing Spec...')
dsl, input_tables, prog_out, loc = DSL()
# print(dsl)
spec = S.parse(dsl)
logger.info('Parsing succeeded')
# loc += 1 #select
# logger.info("Lines of Code: "+str(loc))
logger.info('Building synthesizer...')
loc = 1
while (True):
logger.info("Lines of Code: "+str(loc))
if argv[1]=="tree":
enumerator = SmtEnumerator(spec, depth=loc+1, loc=loc)
else:
if "-off" in argv:
enumerator = LinesEnumerator(spec, depth=loc+1, loc=loc)
elif "-on" in argv:
enumerator = LinesEnumerator(spec, depth=loc+1, loc=loc, break_sym_online=True)
else:
enumerator = LinesEnumerator(spec, depth=loc+1, loc=loc, sym_breaker=False)
synthesizer = Synthesizer(
#loc: # of function productions
enumerator=enumerator,
# decider=ExampleConstraintDecider(
decider=ExampleConstraintPruningDecider(
spec=spec,
interpreter=SquaresInterpreter(),
examples=[
Example(input=input_tables, output='expected_output'),
],
equal_output=eq_r
)
)
logger.info('Synthesizing programs...')
prog = synthesizer.synthesize()
if prog is not None:
logger.info('Solution found: {}'.format(prog))
# print(prog_out+"select("+str(prog).replace("@param", "table")+","+output_attrs+")")
# print(prog_out+str(prog).replace("@param", "table"))
getProgram = True
interpreter=SquaresInterpreter()
evaluation = interpreter.eval(prog, input_tables)
if dir == "./":
print()
if "-nr" not in argv:
print("------------------------------------- R Solution ---------------------------------------\n")
print(prog_out)
print(final_program)
print();print()
print("+++++++++++++++++++++++++++++++++++++ SQL Solution +++++++++++++++++++++++++++++++++++++\n")
robjects.r('{rscript}'.format(rscript=prog_out+final_program))
sql_query = robjects.r('sql_render({result_table})'.format(result_table=evaluation))
if dir == "./":
print(beautifier(str(sql_query)[6:]))
print()
return final_program,beautifier(str(sql_query)[6:])
else:
logger.info('No more queries to be tested. Solution not found!')
logger.info('Increasing the number of lines of code.')
loc = loc + 1
debug=False
dir ="./"
if __name__ == '__main__':
# sys.stderr = open('output.err', 'w')
# sys.stderr.close()
# sys.stderr = sys.__stderr__
if "-d" in argv:
debug = True
print("Hey")
logger.setLevel('DEBUG')
else:
logger.setLevel('CRITICAL')
seed = None
if "-h" in argv:
exit("Usage: python3 squaresEnumerator.py [tree|lines] [flags -h, ...] input.in\nflags:\n-on : computing symmetries online\n-off : computing symmetries offline\n-d : debug info\n\n-nr : only SQL solution\n\nDefault: lines enumerator and without symmetry breaking")
if len(argv) > 1:
try:
seed = int(argv[1])
except ValueError:
pass
prog = main(seed)
class Squares(object):
"""docstring for Squares."""
def __init__(self):
super(Squares, self).__init__()
self.template = "inputs: {inputs}\noutput: {output}\nconst: {const}\naggrs: {aggrs}\nattrs: {attrs}\nbools:\nloc: {loc}\n"
def synthesize(self, inputs, output_ex, const="", aggrs="", attrs="", loc=0):
"""docstring for Squares."""
global argv, dir
dir = "../"
ins = list([])
temp = self.template
try:
path, dirs, files = next(os.walk("../users/files"))
except:
path, dirs, files = next(os.walk("users/files"))
dir="./"
file_count = str(len(files) +1)
i_c = 0
for i in inputs:
input = open(dir+"users/tables/"+"i"+str(file_count)+str(i_c),"w+")
input.write(i)
input.close()
ins.append(dir+"users/tables/"+"i"+str(file_count)+str(i_c))
i_c += 1
output = open(dir+"users/tables/"+"o"+str(file_count),"w+")
output.write(output_ex)
output.close()
output = dir+"users/tables/o"+str(file_count)
input_file_name = dir+"users/files/"+"f"+str(file_count)
input_file = open(input_file_name, "w+")
inputs=str(ins).replace("\'","").replace("]","").replace("[","")
input_file.write(temp.format(inputs=inputs,output=output, const="\""+const.replace(",","\",\"").replace(" ","")+"\"", aggrs="\""+aggrs.replace(",","\",\"").replace(" ","")+"\"", attrs="\""+attrs.replace(",","\",\"").replace(" ","")+"\"", loc=str(loc)).replace("\"\"",""))
input_file.close()
argv = []
argv.append("lines")
argv.append(input_file_name)
return main()
# # not used
# def beautifier_aux(tokens):
# # print(tokens)
# global index_table_aux
# sub_query = ""
# left_index = right_index = None
# for t in tokens:
# if "(SELECT" in str(t):
# if "AS `TBL_RIGHT`" == str(t)[-13:]:
# right_index = index_table_aux
# index_table_aux += 1
# elif "AS `TBL_LEFT`" == str(t)[-13:]:
# left_index = index_table_aux
# index_table_aux += 1
# if "`TBL_LEFT`" in str(t):
# left_index = index_table_aux
# index_table_aux += 1
# if "`TBL_RIGHT`" in str(t):
# right_index = index_table_aux
# index_table_aux += 1
# for t in tokens:
# if "(SELECT" in str(t):
# # print(t)
# if "AS `TBL_RIGHT`" == str(t)[-13:]:
# aux_str = str(t).split("AS `TBL_RIGHT`")
# new_input = sp.parse(aux_str[0])[0]
# # print("RIGHT", t, "-->", new_input)
# sub_query += beautifier_aux(new_input) + " AS " + "table_"+str(right_index)
# elif "AS `TBL_LEFT`" == str(t)[-13:]:
# aux_str = str(t).split("AS `TBL_LEFT`")
# new_input = sp.parse(aux_str[0])[0]
# # print("LEFT", t, "-->", new_input)
# sub_query += beautifier_aux(new_input) + " AS " + "table_"+str(left_index)
# else:
# sub_query += beautifier_aux(t)
# else:
# sub_query += str(t).replace("`TBL_LEFT`", "table_"+str(left_index)).replace("`TBL_RIGHT`", "table_"+str(right_index))
# return sub_query
| 2.59375 | 3 |
day2/day2.py | ohaz/adventofcode2020 | 0 | 12759428 | <reponame>ohaz/adventofcode2020
import re
from file_reader import get_file_entries
regex = re.compile(r'(?P<min>\d+)-(?P<max>\d+)\s(?P<policy>\w):\s(?P<password>.*)')
def is_valid_sub1_password(line):
parsed = regex.match(line)
amount = parsed.group('password').count(parsed.group('policy'))
return int(parsed.group('min')) <= amount <= int(parsed.group('max'))
def sub1():
lines = get_file_entries('./day2/input.txt')
amount = 0
for line in lines:
amount += is_valid_sub1_password(line)
print(amount)
def is_valid_sub2_password(line):
parsed = regex.match(line)
position1 = int(parsed.group('min')) - 1
position2 = int(parsed.group('max')) - 1
password = parsed.group('password')
policy = parsed.group('policy')
amount = 0
amount = amount + 1 if password[position1] == policy else amount
amount = amount + 1 if password[position2] == policy else amount
return amount == 1
def sub2():
lines = get_file_entries('./day2/input.txt')
amount = 0
for line in lines:
amount += is_valid_sub2_password(line)
print(amount) | 3.390625 | 3 |
code/projects/pbrt_scene_converter/PBRTv3Loader.py | LiamTyler/Progression | 3 | 12759429 | import PBRTv3Lex
import PBRTv3Yacc
from Directives import *
import sys
class PBRTv3Loader:
def importFile(self, filename):
data = open(filename).read()
sceneStructure = PBRTv3Yacc.parse(data)
return sceneStructure
def loadScene(self, sceneStructure):
scene = Scene()
if len(sceneStructure) == 1:
if sceneStructure[0][0] in ['Integrator', 'Sampler', 'Film', 'Filter', 'Camera', 'Transform']:
scene = self.loadDirectives(sceneStructure[0], scene)
else:
scene = self.loadWorld(sceneStructure[0], scene)
else:
scene = self.loadDirectives(sceneStructure[0], scene)
scene = self.loadWorld(sceneStructure[1], scene)
return scene
def loadDirectives(self, directiveStructure, scene):
scene.sensor = Sensor()
for struct in directiveStructure:
directive = struct[0]
if directive == 'Integrator':
scene.integrator.type = struct[1]
if struct[2] is not None:
scene.integrator.params = self.loadParams(struct[2])
elif directive == 'Camera':
scene.sensor.type = struct[1]
if struct[2] is not None:
scene.sensor.params = self.loadParams(struct[2])
elif directive == 'Sampler':
scene.sensor.sampler.type = struct[1]
if struct[2] is not None:
scene.sensor.sampler.params = self.loadParams(struct[2])
elif directive == 'Film':
scene.sensor.film.type = struct[1]
if struct[2] is not None:
scene.sensor.film.params = self.loadParams(struct[2])
elif directive == 'PixelFilter':
scene.sensor.film.filter = struct[1]
elif directive == 'Transform':
scene.sensor.transform = Transform()
if struct[2] is not None:
scene.sensor.transform.matrix = struct[2]
scene.sensor.transform.matrix = [scene.sensor.transform.matrix[i:i + 4] for i in range(0, len(scene.sensor.transform.matrix), 4)]
return scene
def loadWorld(self, worldStructure, scene):
materials = []
shapes = []
lights = []
textures = [] # {}
currentRefMaterial = ''
for struct in worldStructure:
directive = struct[0]
if directive == 'Texture':
name = struct[1]
type = struct[3]
params = self.loadParams(struct[4])
texture = Texture(name, type)
texture.params = params
textures.append( texture )
#textures[name] = texture
elif directive == 'MakeNamedMaterial':
id = struct[1]
type = ''
material = None
if struct[2] is not None:
params = self.loadParams(struct[2])
# actually there's little need to check if type is specified, but for the sake of properness...
if 'type' in params:
type = params['type'].value
params.pop('type')
# I'M NOT SURE
# if 'bumpmap' in params:
# bumpTextureName = params['bumpmap'].value
# material = BumpMap()
# material.texture = textures[bumpTextureName]
# material.material = Material(type, id)
# material.material.params = params
# materials.append(material)
# else:
material = Material(type, id)
material.params = params
# if 'Kd' in params:
# kd = params['Kd']
# if kd.type == 'texture':
# material.texture = textures[kd.value]
# material.params.pop('Kd')
materials.append(material)
elif directive == 'NamedMaterial':
currentRefMaterial = struct[1]
elif directive == 'Shape':
# simple shape, no emitter, embed material or transform
shape = Shape(struct[1])
shape.params = self.loadParams(struct[2])
shape.material = currentRefMaterial
shapes.append(shape)
elif directive == 'LightSource':
# simple emitters, no transform or shape involved. they go into lights list
emitter = Emitter(struct[1])
emitter.transform = None
emitter.params = self.loadParams(struct[2])
lights.append(emitter)
elif directive == 'AttributeBegin':
material = None
emitter = None
transform = None
for modifiedStruct in struct[1]:
modifiedDirective = modifiedStruct[0]
if modifiedDirective == 'AreaLightSource':
emitter = Emitter(modifiedStruct[1])
emitter.params = self.loadParams(modifiedStruct[2])
elif modifiedDirective == 'Transform':
transform = Transform()
transform.matrix = modifiedStruct[2]
elif modifiedDirective == 'Material':
type = modifiedStruct[1]
params = self.loadParams(modifiedStruct[2])
material = Material(type, '')
material.params = params
elif modifiedDirective == 'Shape':
# simple shape, no emitter, embed material or transform
shape = Shape(modifiedStruct[1])
shape.params = self.loadParams(modifiedStruct[2])
shape.emitter = emitter
shape.material = currentRefMaterial
shape.transform = transform
shapes.append(shape)
elif directive == 'TransformBegin':
transform = None
for modifiedStruct in struct[1]:
modifiedDirective = modifiedStruct[0]
if modifiedDirective == 'Transform':
transform = Transform()
transform.matrix = modifiedStruct[2]
transform.matrix = [transform.matrix[i:i + 4] for i in range(0, len(transform.matrix), 4)]
elif modifiedDirective == 'Shape':
# simple shape, no emitter, embed material or transform
shape = Shape(modifiedStruct[1])
shape.params = self.loadParams(modifiedStruct[2])
shape.material = currentRefMaterial
shape.transform = transform
shapes.append(shape)
elif modifiedDirective == 'LightSource':
# simple emitters, no transform or shape involved. they go into lights list
emitter = Emitter(modifiedStruct[1])
emitter.transform = transform
emitter.params = self.loadParams(modifiedStruct[2])
lights.append(emitter)
scene.materials = materials
scene.lights = lights
scene.shapes = shapes
scene.textures = textures
return scene
def loadParams(self, paramStructure):
params = {}
for tuple in paramStructure:
param = Param(tuple[0], tuple[1], tuple[2])
params[tuple[1]] = param
return params
def __init__(self, filename):
sceneStruct = self.importFile(filename)
self.scene = self.loadScene(sceneStruct)
# if __name__ == '__main__':
# loader = PBRTv3Loader(sys.argv[1])
| 2.4375 | 2 |
flogger/blog/forms.py | jcromerohdz/FlaskDev | 0 | 12759430 | <reponame>jcromerohdz/FlaskDev
from flask_wtf import FlaskForm
from wtforms import validators, StringField, TextAreaField, SelectField, FileField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from flask_wtf.file import FileAllowed
from blog.models import Category
def categories():
return Category.query
class PostForm(FlaskForm):
image = FileField('Image', validators=[
FileAllowed(['jpg', 'png'], 'We only accept JPG or PNG images')
])
title = StringField('Title', [
validators.InputRequired(),
validators.Length(max=80)
])
body = TextAreaField('Content', validators=[validators.InputRequired()])
category = QuerySelectField('Category', query_factory=categories,
allow_blank=True)
new_category = StringField('New Category') | 2.6875 | 3 |
LabelTree.py | merve-kilic/MinVar-Rooting | 0 | 12759431 | <gh_stars>0
#! /usr/bin/env python
# usage: python LabelTree.py <tree_file>
import os
from Tree_extend import Tree_extend
#from dendropy import Tree,TreeList
from treeswift import *
from sys import argv
from os.path import splitext
import argparse
import optparse
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input',required=True,help="input file")
parser.add_argument('-o','--outfile',required=False,help="specify output file")
parser.add_argument('-s','--schema',required=False,help="schema of your input treefile. Default is newick")
parser.add_argument('-l','--label',required=False,help="labeling style: 'leaves', 'all', or 'internal'. Default: all")
args = vars(parser.parse_args())
tree_file = args["input"]
base_name,ext = splitext(tree_file)
schema=args["schema"] if args["schema"] else "newick"
style = args["label"] if args["label"] else "all"
if args["outfile"]:
outfile = args["outfile"]
else:
#outfile = base_name + "_labeled" + ext
outfile = None
try:
os.remove(outfile)
except:
pass
trees = read_tree(tree_file,schema)
for tree in trees:
a_tree = Tree_extend(ddpTree=tree)
a_tree.Topdown_label(label_type=style)
a_tree.tree_as_newick(outfile=outfile,append=True,label_by_name=True)
| 2.65625 | 3 |
volcano/VolcanoData.py | HKSenior/volcano-map | 0 | 12759432 | <gh_stars>0
from bs4 import BeautifulSoup
from unidecode import unidecode
import pandas as pd
from .Requester import Requester
class VolcanoData(Requester):
def __init__(self, url):
self.req = Requester(url)
def removeChar(self, char, string):
pos = string.find(char)
if pos == -1:
return string
else:
return string.replace(char, '')
def getData(self):
raw_html = self.req.get()
if not raw_html is None:
soup = BeautifulSoup(raw_html, 'html.parser')
# Set the dataframe columns
col = ['NAME', 'COUNTRY', 'TYPE', 'LAT', 'LON', 'ELEV']
df = pd.DataFrame(columns=col)
df_index = 0
td_tags = soup.find_all('td')
for index in range(0, len(td_tags), 6):
# Get the name and remove any commas
name = td_tags[index].get_text().strip()
name = self.removeChar(',', name)
# Get other pieces of information
country = td_tags[index + 1].get_text().strip()
vol_type = td_tags[index + 2].get_text().strip()
lat = td_tags[index + 3].get_text().strip()
lon = td_tags[index + 4].get_text().strip()
# Check if latitude and longitude are given
if lat == '' or lon == '':
continue
# Convert any non english characters
name = unidecode(name)
country = unidecode(country)
# Get the elevation
elev = td_tags[index + 5].get_text().strip()
# Add data to dataframe and increment index
df.loc[df_index] = [name, country, vol_type, lat, lon, elev]
df_index = df_index + 1
return df
| 3.125 | 3 |
fts/tests.py | Edmonton-Public-Library/centennial | 0 | 12759433 | import re
import json
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
BROWSER_WAIT = 10
class APITest(LiveServerTestCase):
fixtures = ['demo.json']
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(BROWSER_WAIT)
def tearDown(self):
self.browser.quit()
def test_branch_api(self):
self.assert_branches_reachable()
def assert_branches_reachable(self):
self.browser.get(self.live_server_url + '/api/v1/branch/?format=json')
json_data = extract_json(self.browser.page_source)
branch_2 = json_data['objects'][1]['name']
self.assertEqual(branch_2, 'Capilano')
def extract_json(page_source):
match = re.search('<pre>(.*)</pre>', page_source)
if not match:
return {}
page_json = match.groups()[0]
return json.loads(page_json)
| 2.40625 | 2 |
uptee/testingstate/models.py | teeworldsCNFun/upTee | 2 | 12759434 | from django.contrib.auth.models import User
from django.db import models
def generate_random_key():
return User.objects.make_random_password(length=16)
class TestingKey(models.Model):
key = models.CharField(blank=True, max_length=16)
is_used = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.pk:
self.key = generate_random_key()
super(TestingKey, self).save(*args, **kwargs)
| 2.5 | 2 |
Stage/fof_app/controllers/admin_view.py | zuoziji/transaction | 0 | 12759435 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2016 by <NAME>
:mail: <EMAIL>.
:license: Apache 2.0, see LICENSE for more details.
"""
from flask_admin.contrib.sqla import ModelView
from ..models import FoFModel,get_all_fof
from logging import getLogger
from ..extensions import cache
from wtforms import PasswordField
logger = getLogger()
class CustomView(ModelView):
pass
# list_template = 'manager/list.html'
# create_template = 'manager/create.html'
# edit_template = 'manager/edit.html'
class FofAdmin(CustomView):
column_display_pk = True
form_choices = {'strategy_type': [
('股票多头策略','股票多头策略'),
('股票多空策略', '股票多空策略'),
('事件驱动策略', '事件驱动策略'),
('其他股票策略', '其他股票策略'),
('阿尔法策略', '阿尔法策略'),
('债券策略','债券策略'),
('货币市场策略', '货币市场策略'),
('管理期货策略', '管理期货策略'),
('套利策略', '套利策略'),
('宏观策略', '宏观策略'),
('组合基金策略','组合基金策略'),
('现金管理', '现金管理'),
],
'rank':[
('0',"未评级"),
('1',"不关注"),
('2',"观察"),
('3',"备选"),
('4',"核心池")
]}
form_columns = ['wind_code','sec_name','strategy_type','fund_setupdate','fund_maturitydate','fund_mgrcomp','fund_status','alias',
'fund_existingyear','fund_ptmyear','fund_type','fund_fundmanager','nav_acc_latest','nav_acc_mdd','sharpe',
'nav_date_latest','annual_return','scale_tot','scale_a','scale_b','priority_asset','inferior_asset',
'priority_interest_rate','rank','file','fh_inv_manager','fh_prod_manager','fh_channel_manager','nav_maintain_mode']
column_labels = dict(
wind_code='基金代码',sec_name='基金名称',strategy_type='策略名称',fund_setupdate='成立时间',fund_maturitydate='终止日',
fund_mgrcomp='基金经理',fund_status='基金状态',alias='别名',
fund_existingyear='存在年限',fund_ptmyear='存续年限',fund_type='基金类型',fund_fundmanager='基金管理人员',
nav_acc_latest='最新净值',nav_acc_mdd="最大回撤比",sharpe='夏普比',
nav_date_latest="最新净值日期",annual_return="年化收益率",scale_tot="总规模",scale_a="A类份额规模",scale_b="B类份额规模",
priority_asset="优先级资产规模",inferior_asset="劣后级资产规模",fh_inv_manager="投资负责人",fh_prod_manager="产品负责人",fh_channel_manager="渠道负责人",
priority_interest_rate="优先级年化收益率",rank="基金评级信息",file="文件",nav_maintain_mode='净值模式')
column_searchable_list = ('wind_code','sec_name')
column_list = ('wind_code','sec_name','strategy_type','fund_setupdate','fund_maturitydate','fund_mgrcomp','fund_status')
export_max_rows = 10
class PctAdmin(ModelView):
column_display_pk = True
column_labels = dict(
invest_scale='投资规模',
date_adj = '调整日期',
wind_code_s = "子基金",
fund_info ="母基金"
)
column_list = ('wind_code_s', 'fund_info', 'date_adj', 'invest_scale')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class UserAdmin(ModelView):
column_display_pk = True
column_labels = dict(
username = "用户名",
email="邮箱",
password_hash = "密码",
role = "角色",
is_admin= "管理员",
is_staff="复华",
update_nav="净值修改",
is_report="研究员",
confirmed='已激活'
)
column_list = ('username', 'email','password_hash','is_admin','is_staff','update_nav','is_report')
column_formatters = dict(
password_hash=lambda v, c, m, p: '*****' + m.password_hash[-6:],
)
form_excluded_columns = ('password_hash')
form_extra_fields = {
'password2': PasswordField('密码哈希')
}
def on_model_change(self, form, User, is_created):
if len(form.password2.data) > 0 :
User.password_hash = User.set_password(form.password2.data)
fof_list = get_all_fof(User)
if fof_list is None:
logger.warning("用户没有可管理的基金,删除缓存")
cache.delete(str(User.id))
else:
logger.info("用户{}的缓存已更新".format(User.username))
cache.set(str(User.id), fof_list)
class StgAdmin(ModelView):
form_choices = {'stg_code': [
('股票多头策略', '股票多头策略'),
('股票多空策略', '股票多空策略'),
('事件驱动策略', '事件驱动策略'),
('其他股票策略', '其他股票策略'),
('阿尔法策略', '阿尔法策略'),
('债券策略', '债券策略'),
('货币市场策略', '货币市场策略'),
('管理期货策略', '管理期货策略'),
('套利策略', '套利策略'),
('宏观策略', '宏观策略'),
('现金管理', '现金管理'),
]}
column_labels = dict(
fund_info='基金名称',
stg_code='策略类型',
trade_date='调整日期',
stg_pct='策略比例'
)
column_list = ('wind_code','stg_code','trade_date','stg_pct')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class RoleAdmin(ModelView):
column_display_pk = True
column_labels = dict(
name ='角色名称',
permissions = '权限',
file_type ='文件类型',
fof = '母基金',
user = "用户名"
)
column_list = ('name','fof', 'user','permissions','file_type')
form_ajax_refs = {
'fof': {
'fields': (FoFModel.wind_code,FoFModel.sec_name,FoFModel.alias)
}
}
def after_model_change(self, form, model, is_created):
user = model.user
if len(user) > 0 :
for i in user:
fof_list = get_all_fof(i)
if fof_list is None:
logger.warning("用户没有可管理的基金,删除缓存")
cache.delete(str(i.id))
else:
logger.info("用户{}的缓存已更新".format(i.username))
cache.set(str(i.id),fof_list)
class PerAdmin(ModelView):
column_display_pk = True
column_labels = dict(
name='权限名称',
action = "函数名称",
roles="角色"
)
column_list = ('name', 'action', 'roles')
class FileTypeAdmin(ModelView):
column_display_pk = True
column_labels = dict(
file='文件',
type_name = "类型",
role="角色"
)
column_list = ('file', 'type_name', 'roles')
form_columns = ["type_name",'file']
class FileAdmin(ModelView):
column_display_pk = True
column_labels = dict(
wind_code='基金',
show_name='文件名称',
type_name='文件类型',
file_path='文件路径',
upload_datetime="上传时间",
fund_info="母基金"
)
column_list = ('fund_info', 'show_name', 'type_name','file_path','upload_datetime')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class AccAdmin(ModelView):
column_display_pk = True
form_columns =["wind_code","nav_date","nav","nav_acc","source_mark","nav_tot"]
column_searchable_list = ('wind_code',)
class SecAdmin(ModelView):
column_display_pk = True
class EventAdmin(ModelView):
form_choices = {'event_type': [
('投资事项', '投资事项'),
('投后事项', '投后事项'),
('法务事项', '法务事项'),
('其他事项', '其他事项'),
]}
column_labels = dict(
fund_info='基金名称',
event_date='到期时间',
event_type='事件类型',
remind_date='提醒日期',
handle_status='提醒状态',
notation="消息正文",
wind_code='基金名称',
create_user="用户"
)
column_list = ('wind_code','event_date','event_type','remind_date','handle_status','create_user')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class ChildMapping(ModelView):
column_display_pk = True
form_columns = ['wind_code_s','wind_code','sec_name_s','date_start','date_end','warning_line','winding_line']
column_labels = dict(
wind_code_s = '批次代码',
wind_code = '子基金代码',
sec_name_s = '批次名称',
date_start = '开始时间',
date_end = '结束时间',
warning_line = '预警线',
winding_line = '清盘线'
)
class Invest_corp_admin(ModelView):
column_display_pk = True
form_columns = ["name", "alias", "review_status"]
column_searchable_list = ('name',)
class Invest_corp_file_admin(ModelView):
column_display_pk = True
form_columns = ["file_id",'mgrcomp_id','file_type','upload_user_id','upload_datetime','file_name'] | 2 | 2 |
tools/test/test_test_selections.py | stungkit/pytorch | 2 | 12759436 | <gh_stars>1-10
import random
import unittest
from tools.testing.test_selections import calculate_shards
from typing import Dict, List, Tuple
class TestCalculateShards(unittest.TestCase):
tests: List[str] = [
"super_long_test",
"long_test1",
"long_test2",
"normal_test1",
"normal_test2",
"normal_test3",
"short_test1",
"short_test2",
"short_test3",
"short_test4",
"short_test5",
]
test_times: Dict[str, float] = {
"super_long_test": 55,
"long_test1": 22,
"long_test2": 18,
"normal_test1": 9,
"normal_test2": 7,
"normal_test3": 5,
"short_test1": 1,
"short_test2": 0.6,
"short_test3": 0.4,
"short_test4": 0.3,
"short_test5": 0.01,
}
def assert_shards_equal(
self,
expected_shards: List[Tuple[float, List[str]]],
actual_shards: List[Tuple[float, List[str]]],
) -> None:
for expected, actual in zip(expected_shards, actual_shards):
self.assertAlmostEqual(expected[0], actual[0])
self.assertListEqual(expected[1], actual[1])
def test_calculate_2_shards_with_complete_test_times(self) -> None:
expected_shards = [
(60, ["super_long_test", "normal_test3"]),
(
58.31,
[
"long_test1",
"long_test2",
"normal_test1",
"normal_test2",
"short_test1",
"short_test2",
"short_test3",
"short_test4",
"short_test5",
],
),
]
self.assert_shards_equal(
expected_shards, calculate_shards(2, self.tests, self.test_times)
)
def test_calculate_1_shard_with_complete_test_times(self) -> None:
expected_shards = [
(
118.31,
[
"super_long_test",
"long_test1",
"long_test2",
"normal_test1",
"normal_test2",
"normal_test3",
"short_test1",
"short_test2",
"short_test3",
"short_test4",
"short_test5",
],
),
]
self.assert_shards_equal(
expected_shards, calculate_shards(1, self.tests, self.test_times)
)
def test_calculate_5_shards_with_complete_test_times(self) -> None:
expected_shards = [
(55.0, ["super_long_test"]),
(
22.0,
[
"long_test1",
],
),
(
18.0,
[
"long_test2",
],
),
(
11.31,
[
"normal_test1",
"short_test1",
"short_test2",
"short_test3",
"short_test4",
"short_test5",
],
),
(12.0, ["normal_test2", "normal_test3"]),
]
self.assert_shards_equal(
expected_shards, calculate_shards(5, self.tests, self.test_times)
)
def test_calculate_2_shards_with_incomplete_test_times(self) -> None:
incomplete_test_times = {
k: v for k, v in self.test_times.items() if "test1" in k
}
expected_shards = [
(
22.0,
[
"long_test1",
"long_test2",
"normal_test3",
"short_test3",
"short_test5",
],
),
(
10.0,
[
"normal_test1",
"short_test1",
"super_long_test",
"normal_test2",
"short_test2",
"short_test4",
],
),
]
self.assert_shards_equal(
expected_shards, calculate_shards(2, self.tests, incomplete_test_times)
)
def test_calculate_5_shards_with_incomplete_test_times(self) -> None:
incomplete_test_times = {
k: v for k, v in self.test_times.items() if "test1" in k
}
expected_shards = [
(22.0, ["long_test1", "normal_test2", "short_test5"]),
(9.0, ["normal_test1", "normal_test3"]),
(1.0, ["short_test1", "short_test2"]),
(0.0, ["super_long_test", "short_test3"]),
(0.0, ["long_test2", "short_test4"]),
]
self.assert_shards_equal(
expected_shards, calculate_shards(5, self.tests, incomplete_test_times)
)
def test_calculate_2_shards_against_optimal_shards(self) -> None:
for _ in range(100):
random.seed(120)
random_times = {k: random.random() * 10 for k in self.tests}
# all test times except first two
rest_of_tests = [
i
for k, i in random_times.items()
if k != "super_long_test" and k != "long_test1"
]
sum_of_rest = sum(rest_of_tests)
random_times["super_long_test"] = max(sum_of_rest / 2, max(rest_of_tests))
random_times["long_test1"] = sum_of_rest - random_times["super_long_test"]
# An optimal sharding would look like the below, but we don't need to compute this for the test:
# optimal_shards = [
# (sum_of_rest, ['super_long_test', 'long_test1']),
# (sum_of_rest, [i for i in self.tests if i != 'super_long_test' and i != 'long_test1']),
# ]
calculated_shards = calculate_shards(2, self.tests, random_times)
max_shard_time = max(calculated_shards[0][0], calculated_shards[1][0])
if sum_of_rest != 0:
# The calculated shard should not have a ratio worse than 7/6 for num_shards = 2
self.assertGreaterEqual(7.0 / 6.0, max_shard_time / sum_of_rest)
sorted_tests = sorted(self.tests)
sorted_shard_tests = sorted(
calculated_shards[0][1] + calculated_shards[1][1]
)
# All the tests should be represented by some shard
self.assertEqual(sorted_tests, sorted_shard_tests)
if __name__ == "__main__":
unittest.main()
| 2.625 | 3 |
moksha/controllers/root.py | lmacken/moksha | 1 | 12759437 | <reponame>lmacken/moksha
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME> <<EMAIL>>
import moksha
import pylons
from tg import url, config
from tg import expose, flash, tmpl_context, redirect
from tg.decorators import override_template
from pylons.i18n import ugettext as _
from moksha.lib.base import BaseController
from moksha.controllers.error import ErrorController
from moksha.controllers.apps import AppController
from moksha.controllers.widgets import WidgetController
from moksha.controllers.secure import SecureController
class DefaultRootController(BaseController):
@expose('mako:moksha.templates.index')
def index(self, *args, **kw):
if 'default_menu' in moksha.menus:
tmpl_context.menu_widget = moksha.menus['default_menu']
else:
tmpl_context.menu_widget = lambda: ''
#tmpl_context.contextual_menu_widget = moksha.menus['contextual_menu']
return dict(title='[ Moksha ]')
class RootController(BaseController):
apps = AppController()
widgets = WidgetController()
error = ErrorController()
moksha_admin = SecureController()
@expose()
def _lookup(self, *remainder):
if moksha.root:
return moksha.root(), remainder
else:
# If we're running moksha without a root specified on the
# moksha.root entry-point, then redirect to a moksha logo
return DefaultRootController(), remainder
@expose()
def livesocket(self, topic=None, callback=None, json=True):
"""Returns a raw Moksha live socket, for use in non-Moksha apps.
<script> function bar(msg) { alert('bar(' + msg + ')'); } </script>
<script type="text/javascript"
src="http://localhost:8080/livesocket?topic=foo&callback=bar">
</script>
"""
data = {'topic': topic, 'callback': callback, 'json': json}
backend = config.get('moksha.livesocket.backend', 'stomp').lower()
if backend == 'stomp':
override_template(self.livesocket, 'mako:moksha.templates.stomp_socket')
data['stomp_host'] = config.get('stomp_host', 'localhost')
data['stomp_port'] = config.get('stomp_port', 61613)
data['stomp_user'] = config.get('stomp_user', 'guest')
data['stomp_pass'] = config.get('stomp_pass', '<PASSWORD>')
elif backend == 'amqp':
override_template(self.livesocket, 'mako:moksha.templates.amqp_socket')
data['amqp_broker_host'] = config.get('amqp_broker_host', 'localhost')
data['amqp_broker_port'] = config.get('amqp_broker_port', 5672)
data['amqp_broker_user'] = config.get('amqp_broker_user', 'guest')
data['amqp_broker_pass'] = config.get('amqp_broker_pass', '<PASSWORD>')
data['orbited_host'] = config.get('orbited_host', 'localhost')
data['orbited_port'] = config.get('orbited_port', 9000)
data['orbited_scheme'] = config.get('orbited_scheme', 'http')
data['orbited_url'] = '%s://%s:%s' % (data['orbited_scheme'],
data['orbited_host'], data['orbited_port'])
environ = pylons.request.environ
data['server'] = '%s://%s:%s%s' % (environ.get('wsgi.url_scheme', 'http'),
environ['SERVER_NAME'],
environ['SERVER_PORT'],
environ['toscawidgets.prefix'])
return data
@expose('mako:moksha.templates.login')
def login(self, came_from=None):
"""Start the user login."""
if not came_from:
came_from = url('/')
login_counter = pylons.request.environ['repoze.who.logins']
if login_counter > 0:
flash(_('Wrong credentials'), 'warning')
return dict(page='login', login_counter=str(login_counter),
came_from=came_from)
@expose()
def post_login(self, came_from=None):
"""
Redirect the user to the initially requested page on successful
authentication or redirect her back to the login page if login failed.
"""
if not came_from:
came_from = url('/')
if not pylons.request.identity:
login_counter = pylons.request.environ['repoze.who.logins'] + 1
redirect(url('/login', came_from=came_from, __logins=login_counter))
userid = pylons.request.identity['repoze.who.userid']
flash(_('Welcome back, %s!') % userid)
redirect(came_from)
@expose()
def post_logout(self, came_from=None):
"""
Redirect the user to the initially requested page on logout and say
goodbye as well.
"""
flash(_('We hope to see you soon!'))
if not came_from:
came_from = url('/')
redirect(came_from)
| 2.03125 | 2 |
crestify/archivers/__init__.py | punto1/crestify | 214 | 12759438 | <reponame>punto1/crestify<gh_stars>100-1000
from archive_service import ArchiveService, ArchiveException
from archiveorg import ArchiveOrgService
from archivetoday import ArchiveTodayService
| 1.125 | 1 |
meet/elm.py | neurophysics/meet | 4 | 12759439 | <reponame>neurophysics/meet<gh_stars>1-10
'''
Extreme Learning Machine Classification
Submodule of the Modular EEg Toolkit - MEET for Python.
This module implements regularized Extreme Learning Machine
Classification and Weighted Extreme Learning Machine Classification.
Classification is implemented in the ClassELM class
For faster execution of dot product the module dot_new is imported since
it avoids the need of temporary copy and calls fblas directly.
The code is available here: http://pastebin.com/raw.php?i=M8TfbURi
In future this will be available in numpy directly:
https://github.com/numpy/numpy/pull/2730
1. Extreme Learning Machine for Regression and Multiclass Classification
<NAME>, <NAME>, <NAME>, <NAME>
IEEE Transactions of Systems, Man and Cybernetics - Pat B: Cybernetics,
Vol. 42, No. 2. April 2012
2. Weighted extreme learning machine for imbalance learning.
<NAME>, <NAME>, <NAME>
Neurocomputing 101 (2013) 229-242
Author & Contact
----------------
Written by <NAME>
email: gunnar[dot]waterstraat[at]charite.de
'''
from . import _np
from . import _linalg
try: from ._dot_new import dot as _dot
except: _dot = _np.dot
def accuracy(conf_matrix):
'''
Measure of the performance of the classifier.
The Accuracy is the proportion of correctly classified items in
relation to the total number of items.
You should be aware that this is very sensitive to imbalanced data
(data with very unequal sizes of each class):
Imagine a sample with 99% of the items belonging to class 0 and 1%
of items belonging to class 1. A classifier might have an accuracy
of 99% by just assigning all items to class 0. However, the
sensitivity for class 1 is 0% in that case. It depends on your needs
if this is acceptable or not.
Input:
------
conf_matrix - shape ny x ny, where ny is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class
j, while it would have belonged to class i
Output:
-------
float - the accuracy
'''
return _np.trace(conf_matrix) / float(conf_matrix.sum(None))
def G_mean(conf_matrix):
'''
The G-mean is the geometric mean of the per-class-sensitivities. It
is much more stable to imbalance of the dataset than the global
accuray. However it depends on your needs, which measure of
performance of the classifier to use.
Input:
------
conf_matrix - shape ny x ny, where ny is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class j,
while it would have belonged to class i
Output:
-------
the geometric mean of per-class sensitivities
'''
from scipy.stats.mstats import gmean as _gmean
# get per-class accuracy - which is the number of items correctly
# classified in this class
# in relation to total number of items actually in this class
per_class = _np.diag(conf_matrix) / conf_matrix.sum(1).astype(float)
# get geometric average
if _np.any(per_class == 0): return 0.
else: return _gmean(per_class)
def Matthews(conf_matrix):
'''
The Matthews correlation coefficient is used in machine learning as
a measure of the quality of binary (two-class) classifications. It
takes into account true and false positives and negatives and is
generally regarded as a balanced measure which can be used even if
the classes are of very different sizes. The MCC is in essence a
correlation coefficient between the observed and predicted binary
classifications; it returns a value between -1 and +1. A coefficient
of +1 represents a perfect prediction, 0 no better than random
prediction and -1 indicates total disagreement between prediction
and observation.
Source: Wikipedia (2013-09-25)
Input:
------
conf_matrix - shape 2 x 2, where 2 is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class j,
while it would have belonged to class i
AN ERROR IS THROWN IF THE SHAPE OF THE MATRIX IS NOT
CORRECT
Output:
-------
float - the the Matthews Correlation Coefficient
'''
try: conf_matrix.shape
except:
raise TypeError('conf_matrix must be numpy array or' +
'numpy matrix')
if not conf_matrix.shape == (2,2):
raise ValueError('conf_matrix must be of shape 2x2')
TN, FP, FN, TP = conf_matrix.ravel()
N = conf_matrix.sum(None)
S = (TP + FN)/float(N)
P = (TP + FP)/float(N)
try: MCC = (TP/float(N) - S*P) / _np.sqrt(S*P*(1 - S)*(1 - P))
except: MCC = 0
return MCC
def PPV2DR1(conf_matrix):
"""
Calculate the weighted average (WA) of Positive Preditive Value (PPV)
and Detection Rate (DR):
WA = (2*PPV + DR) / 3.
Input:
------
conf_matrix - shape 2 x 2, where 2 is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class j,
while it would have belonged to class i
AN ERROR IS THROWN IF THE SHAPE OF THE MATRIX IS NOT
CORRECT
Output:
-------
float - the WA
"""
PPV_result = PPV(conf_matrix)
DR_result = DR(conf_matrix)
return (2*PPV_result + DR_result)/3.
def DR(conf_matrix):
'''
Calculate the Detection Rate
Input:
------
conf_matrix - shape 2 x 2, where 2 is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class j,
while it would have belonged to class i
AN ERROR IS THROWN IF THE SHAPE OF THE MATRIX IS NOT
CORRECT
Output:
-------
float - the PPV
'''
try: conf_matrix.shape
except:
raise TypeError('conf_matrix must be numpy array or numpy' +
'matrix')
if not conf_matrix.shape == (2,2):
raise ValueError('conf_matrix must be of shape 2x2')
TN, FP, FN, TP = conf_matrix.ravel()
DR_result = TP / float(TP+FN)
return DR_result
def PPV(conf_matrix):
'''
Calculate the Positive Predictive Value
Input:
------
conf_matrix - shape 2 x 2, where 2 is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class j,
while it would have belonged to class i
AN ERROR IS THROWN IF THE SHAPE OF THE MATRIX IS NOT
CORRECT
Output:
-------
float - the PPV
'''
try: conf_matrix.shape
except:
raise TypeError('conf_matrix must be numpy array or numpy' +
'matrix')
if not conf_matrix.shape == (2,2):
raise ValueError('conf_matrix must be of shape 2x2')
TN, FP, FN, TP = conf_matrix.ravel()
PPV_result = TP / float(TP+FP)
return PPV_result
def ssk_cv(data, labels, folds=3):
'''
Cut data into folds with method:
shuffled, stratified, k-folds cross-validation
Input:
------
data - numpy array - shape n x p, with n items anf p features
Output:
-------
returns a list, with each list-element including the indices of one
fold
'''
from itertools import chain
N = data.shape[0] # number of samples and features
# shuffle data and labels - however, their relation shouldn't be
# changed
rarray = _np.random.random(N).argsort()
data = data[rarray]
labels = labels[rarray]
# sort according to the labels
order = labels.argsort()
data = data[order]
labels = labels[order]
rarray = rarray[order] # the original indices
# find number of classes and count
cc, ci = _np.unique(labels, return_index = True) # the classes and
#first index of each class
ci = _np.append(ci,N)
#find the number of items of each class in each fold
start_stop = _np.array([_np.round(_np.linspace(ci[i], ci[i+1],
folds+1, endpoint=True),0).astype(int)
for i in range(len(cc))])
# start_stop is a ny x kfolds + 1 array with start and stop indices
# for each class and fold
result = []
for f in range(folds):
temp = [rarray[start_stop[i,f] : start_stop[i, f+1]]
for i in range(len(cc))]
result.append(list(chain.from_iterable(temp)))
return result
def get_conf_matrix(true, pred, class_ratios=None):
'''
Get a confusion matrix
Input:
------
true - the true labels
pred - the predicted labels
class_ratios - None or numpy array
- the actual ratio of classes; if None, it is
assumed that the actual class ratio equals the class
ratio during cross-validation. If this is not true
the actual class ratio can be given a numpy array
of length (number of classes).
class_ratios[0] is the actual frequency of class 0
class_ratios[1] is the actual frequency of class 1
.
.
.
Output:
-------
conf_matrix - shape ny x ny, where ny is the number of classes
the rows belong to the actual, the columns to the
predicted class: item ij is hence predicted as class j,
while it would have belonged to class i
'''
# let the smallest class label be 0
s = _np.min([true, pred], None)
true -= s
pred -= s
# find the maximum number of classes (classes are consecutive
# integers)
n = _np.max([true, pred], None) + 1
conf_matrix = _np.bincount(n * (true) + (pred),
minlength=n*n).reshape(n, n)
if class_ratios != None:
assert isinstance(class_ratios, _np.ndarray), (
'class_ratios must be None or 1d numpy array')
assert class_ratios.ndim==1, (
'dimensionality of class_ratios must be 1')
assert len(class_ratios)==n, (
'length of class_ratios must match number of classes')
conf_matrix = (conf_matrix.T / conf_matrix.sum(1).astype(float) * class_ratios).T
return conf_matrix
class ClassELM:
'''
Class for Extreme Learning Machine Classification
--------------------------------------------
Input:
------
L - (int) - dimensionality of the feature space (defaults to 1000)
change_alg - (int) - number of samples to change from implementation
I to II
kernel - (str) - any of: 'sigmoid' - Sigmoid function
- more functions not implemented yet
--------------------------------------------
use self.cv() for cross-validation
use self.train() for training
after cross-validation or training use
self.classify()
'''
def __init__(self, L=1000, kernel='sigmoid'):
if type(L) == int: self.L = L
else:
raise TypeError('L must by an integer, representing the' +
'number of hidden neurons.')
if kernel == 'sigmoid':
self.kernel = _sigmoid
else:
raise Exception('Only kernel \'sigmoid\' is implemented' +
'at the current stage.')
self._w = False # set weights to False
self._pseudoy = False # set pseudo-output to false
return
def cv(self, data, labels, method='ssk_cv', C_array=None, folds=3,
precision_func='accuracy', scale = True, weights=True,
class_ratios=None, mem_size=512, verbose = True):
'''
Perform Cross-Validation of Extreme Learning Machine parameter C
Input:
------
data - numpy array - shape (n x p) with n being sample number
and p being number of features
labels - numpy array - shape (n) with the class labels
0,1,...,ny-2,ny-1, where ny is the number of classes
method - string - cross-validation method
- 'ssk_cv' - shuffled stratified k-folds
cross-validation
C_array - numpy array - default is None - the C's which are
cross-validated
- if None from 2**(-25), 2**(-24), ...,
2**(24), 2**(25)
folds - integer - default 3 - number of folds
precision_func - string or function - standard is 'accuray' -
Measure of performance
- as string implemented: 'accuracy' -
proportion of
correctly classified
to total number of
samples
'G_mean' - geometric
mean of per-class
accuracies
'Matthews' - Matthews
Correlation
Coefficient - Only
for binary
classification
'PPV' - Positive
Predictive Value -
Only for binary
classification
'PPV2DR1' - The weighted
average of Positive
Predictive Value (PPV)
and Detection Rate (DR):
(2*PPV + DR)/3.
- if function: with confusion matrix as single
input and float (0,1) as single output
scale - bool (True | False) - whether data should be scaled to
range (-1,1)
weights - can be: - bool (True | False): - standard is True
if True, data is
re-weighted to a
class ratio of 1.0
if False, data is not
re-weighted
- float in half-open interval [0,1)
- data is re-weighted such that the
minority / majority ratio is this
float
- minority classes are the ones having
less members than on average, majority
classes have more than average
- Zong et al. proposed to use the golden
ratio (approx. 0.618 -
scipy.Constants.golden) as a good
value
- numpy array with weights for each sorted
unique class in labels, each class weight is
expected to be in half-open interval [0,1) -
each class is "down-weighed" by this float
value, where the result depends on the ratio
this values to each other
class_ratios - None or numpy array
- the actual ratio of classes; if None, it is
assumed that the actual class ratio equals the class
ratio during cross-validation. If this is not true
the actual class ratio can be given a numpy array
of length (number of classes).
class_ratios[0] is the actual frequency of class 0
class_ratios[1] is the actual frequency of class 1
.
.
.
mem_size - int or float - default 512 - calculation is done in
batches of this size in Mb
Output:
-------
class instance of ClassELM
'''
from itertools import chain
if C_array == None:
C_array = 2**_np.arange(-25,25,1).astype(float)
if method == 'ssk_cv':
get_folds = ssk_cv
else:
raise NotImplementedError('Only shuffled stratified' +
'k-fold cross-validation (\'ssk_cv\') is' +
'implemented.')
if type(precision_func) == str:
if precision_func == 'accuracy':
precision_func = accuracy
elif precision_func == 'G_mean':
precision_func = G_mean
elif precision_func == 'Matthews':
precision_func = Matthews
elif precision_func == 'PPV':
precision_func = PPV
elif precision_func == 'PPV2DR1':
precision_func = PPV2DR1
else:
raise Exception('The function \'%s\' is not' +
'implemented (yet?).' % (precision_func))
# check and get weights
self._get_w(weights=weights, labels = labels)
# create pseudo_output for each label
self._get_pseudoy(labels = labels)
#scale the dataset with the complete (!) training
# set - this is why scaling is set to False
# in the train and classifiy functions later
self.min = _np.min(data, 0)
self.ptp = _np.ptp(data, 0)
if scale:
data = 2 * (data - self.min) / self.ptp - 1
# cut data into folds
partitions = get_folds(data, labels, folds)
# partitions is a list with sublists of indices of each fold
print('Running Cross-Validation')
result = _np.ones(C_array.shape, float)
for n,C in enumerate(C_array):
for k in range(folds):
test = partitions[k] # fold k is used to test
train = list(chain.from_iterable(partitions[:k] +
partitions[k+1:]))
# the other (k-1) folds are used to train the network
#in each training instance new random initialization
# parameters are created
self.train(data[train], labels[train], C=C,
mem_size = mem_size, scale=False,
weights=weights) # -> the weigh argument to that
#method is no ignored since self._w already was
# initialized get the estimated labels
est_labels = self.classify(data[test], scale = False)
conf_matrix = get_conf_matrix(labels[test], est_labels, class_ratios=class_ratios)
result[n] = result[n] * precision_func(conf_matrix)
if ((verbose) and (n % 1 == 0)):
print('Finished %d of %d Cross-Validations.' % (n+1,
len(C_array)))
result = result**(1./folds)
# fix C as the C with the best cv-result
try:
C = C_array[_np.nanargmax(result)]
except:
C = C_array[0]
# now train the network with the final C
print('Cross-Validation finished, Training final network')
self.train(data, labels, C=C, mem_size=mem_size, scale = False,
weights=self._w)
print('Network trained')
return result
def train(self, data, labels, C, scale=True, weights = True,
mem_size=512):
'''
Train the ELM Classifier
-----------------------
Input:
------
data - (numpy array) - shape n x p
n - number of observations
p - number of dimensions
if data.ndim > 2, the array is reshaped as (n,-1)
labels - array with integer labels
C - regularization parameter
scale - bool (True | False) - standard is True
- switch, if the features of the
dataset should be scaled
to the interval (-1,1)
weights - can be: - bool (True | False): - standard is True
if True, data is re
weighted to a class
ratio of 1.0 if
False, data is not
re-weighted
- float in half-open interval [0,1)
- data is re-weighted such that the
minority / majority ratio is this
float
- minority classes are the ones having
less members than on average, majority
classes have more than average
- Zong et al. proposed to use the golden
ratio (approx. 0.618 -
scipy.Constants.golden) as a good value
- numpy array with weights for each sorted
unique class in labels, each class weight is
expected to be in half-open interval [0,1)
mem_size - number - memory size of temporary array in Mb -
defaulte to 512
Output:
-------
No user ouput (Weights are generated and stored in the Class as
self._beta) self.istrained is set to True
'''
# reshape data
data = data.reshape(data.shape[0],-1)
n,p = data.shape
self.n = n
self.p = p
self.C = C
# initialize parameters
a, b = _get_parameters(self.L, self.p)
self.a = a
self.b = b
# transform labels
if _np.all(self._w == False): # initialized as False in the
#class definition
# if weights are not already fixed, they should be
# determined here if weighing should be performed
self._get_w(weights=weights, labels = labels)
if not _np.any(self._pseudoy): # if the pseudo-output has not
# been created before, do it now
self._get_pseudoy(labels)
if scale:
# normalize the dataset to range (-1,1)
self.min = _np.min(data,0)
self.ptp = _np.ptp(data, 0)
data = 2 * (data - self.min) / self.ptp - 1
if n <= self.L:
#self._algI(data, labels, mem_size=mem_size)
# algI is not implemented yet so up to now use
# algorithm I in any case
self._algII(data, labels, mem_size=mem_size)
else:
self._algII(data, labels, mem_size=mem_size)
self.istrained = True
return
def _get_w(self, weights, labels):
'''
This method checks the input argument 'weights'
and broadcasts it to the class parameter self._w
'''
if type(weights) == bool:
if weights == True:
self.weigh = True
w = 1.
else: self.weigh = False
else:
# check if weights is a number
from numbers import Number
if isinstance(weights, Number):
if ((weights > 0) and (weights <= 1.0)):
self.weigh = True
w = float(weights)
else:
raise Exception('weights should be either a' +
'Boolean, a numpy array or a number in the' +
'half-open interval [0,1)')
elif type(weights) == _np.ndarray:
if ((weights.size == labels.max() + 1) and
_np.all(weights > 0) and _np.all(weights <= 1.0)):
self.weigh = True
self._w = weights
else:
raise TypeError('weights should be either a' +
'Boolean, a numpy array or a number in the' +
'half-open interval [0,1)')
else:
raise TypeError('weights should be either a Boolean,' +
'a numpy array or a number in the half-open'
'interval [0,1)')
if self.weigh and not _np.any(self._w):
# if re-weighing of imbalances should occur, then find
#the minority and majority classes
# find number of classes per label
n_per_class = _np.bincount(labels)
# assign w / n_per_class to the minority classes and
# 1.0 / n_per_class to the majority classes
self._w = _np.where(n_per_class < n_per_class.mean(), w /
n_per_class, 1.0 / n_per_class)
return
def _get_pseudoy(self, labels):
ul = _np.unique(labels)
if len(ul) == 1:
self._pseudoy = _np.array([1,1]).astype(int)
self.m = 1
elif len(ul) == 2:
self._pseudoy = _np.array([-1,1]).astype(int)
self.m = 1
else:
self._pseudoy = _np.eye(len(ul)).astype(int)
self.m = len(ul)
return
def _algII(self, data, labels, mem_size):
'''
Train ELM with algorithm II
i.e. formula (38) in:
Huang et al.: Extreme Learning Machine for Regression and
Multiclass Classification
IEEE Transactions of Systems, Man, and Cybernetics - Part B:
Cybernetics, Vol 42, No. 2, April 2012
'''
###
# split data into batches of maximum size=mem_size
n = data.shape[0]
data_bytes = data.nbytes / data.size
batch_len = int(mem_size * 1024.0**2 / data_bytes / self.L)
num_batches = int(_np.ceil(n / float(batch_len)))
borders = _np.linspace(0,n,num_batches+1,
endpoint=True).astype(int)
# initialze result array
HTH = _np.zeros([self.L,self.L], dtype=data.dtype)
if self.m == 1:
HTT = _np.zeros(self.L, HTH.dtype)
else:
HTT = _np.zeros((self.L, self.m), HTH.dtype)
# run in batches
for k in range(num_batches):
temp = self._get_HTH_HTT(data =
data[borders[k]:borders[k+1]],
labels=labels[borders[k]:borders[k+1]])
HTH += temp[0]
if self.m == 1:
HTT += temp[1].reshape(self.L)
else:
HTT += temp[1]
try: # solution might be invalid due to singular matrix
self._beta = _linalg.solve(_np.diag(1./self.C *
_np.ones(self.L)) + HTH, HTT, sym_pos=True,
check_finite=False)
except:
try:
#try least squares solution
self._beta = _linalg.lstsq(_np.diag(1./self.C *
_np.ones(self.L)) + HTH, HTT, check_finite=False)[0]
except:
raise Exception('This did not work')
return
def classify(self, data, mem_size = 512, scale=True):
'''
Classify a dataset.
Input:
------
data - numpy array, shape N x p, where N is number of items, p
is number of features
mem_size - number - memory size of temporary array in Mb -
defaulte to 512
scale - bool (True | False) - if the input should be scaled
by the network with parameters obtained during training
Output:
-------
labels - the predicted class labels: 0, 1, ..., ny-2, ny-1,
where ny is the total number of classes
----------------------------------------------------------------
Internally the method _run() is used
'''
if self.istrained == False:
raise Exception('Network is not trained.')
if scale:
data = 2 * (data - self.min) / self.ptp - 1
out = self._run(data, mem_size = mem_size)
if self.m == 1:
return _np.sign(out).clip(0,1).astype(int)
else:
return _np.nanargmax(out, -1).astype(int)
def _run(self, data, mem_size=512):
'''
Internal function! Not for end user! Use the method classify!
-------------------------------------------------------------
Get the responses of the neuron when exposed to input data
'''
n = data.shape[0]
data_bytes = data.nbytes / data.size
batch_len = int(mem_size * 1024.0**2 / data_bytes / self.L)
num_batches = int(_np.ceil(n / float(batch_len)))
borders = _np.linspace(0,n,num_batches+1,
endpoint=True).astype(int)
if self.m > 1:
out = _np.empty((n, self.m), float)
else:
out = _np.empty((n), float)
for k in range(len(borders)-1):
out[borders[k]:borders[k+1]] = self.kernel(
data[borders[k]:borders[k+1]],
self.a, self.b).T.dot(self._beta)
return out
def _get_HTH_HTT(self, data, labels):
'''
Internal function! Not for end user!
------------------------------------
Needed for algorithm II
'''
temp = self.kernel(data, self.a, self.b)
if self.weigh:
HTH = _dot(temp * self._w[labels], temp.T)
HTT = _np.dot(temp * self._w[labels],
self._pseudoy[labels])
else:
HTH = _dot(temp, temp.T)
HTT = _np.dot(temp, self._pseudoy[labels])
return HTH, HTT
def _get_parameters(L, p):
'''
Internal function! Not for end user!
------------------------------------
Initialize random parameters a and b
a - neuron center in interval (-1,1)
b - neuron width in interval (0,1)
'''
a = _np.random.uniform(low=-1.0, high=1.0, size=(L,p))
b = _np.random.uniform(low = 0.0, high=1.0, size=L)
return a, b
def _algI(data, a, b, kernel, C, labels):
'''
Train ELM with algorithm I
i.e. formula (32) in:
Huang et al.: Extreme Learning Machine for Regression and Multiclass
Classification
IEEE Transactions of Systems, Man, and Cybernetics - Part B:
Cybernetics, Vol 42, No. 2, April 2012
'''
#######################
# not implemented yet #
#######################
raise NotImplementedError('Algorithm I is not implemented yet!')
def _sigmoid(data, a, b):
'''
Calculate response of neurons with sigmoid kernel
and pre-generated parameters a and b.
Output shape is L x n, where L is number of neurons
and n is number of input samples
'''
if a.ndim > 1: b = b[:,None]
return 1. / (1 + _np.exp(-1*(_np.dot(a,data.T) + b)))
| 2.59375 | 3 |
connect_db_query.py | ruifgmonteiro/python_oracledb | 0 | 12759440 | <gh_stars>0
#!/usr/bin/env python
'''
File name: connect_db_query.py
Author: <NAME>
Date created: 16/01/2019
Date last modified: 16/01/2019
Python Version: 3.6
'''
import os
import cx_Oracle
import json
import sys
import time
import csv
from datetime import datetime
class ConnectDB(object):
"""docstring for ConnectDB"""
def __init__(self, poolDB):
super(ConnectDB, self).__init__()
self.poolDB = poolDB
def extractQuery(self, query, o_csv):
while True:
try:
con = self.poolDB.acquire()
break
except:
print("Error acquiring connection!")
print(self.poolDB.opened)
time.sleep(4)
if con is None:
print('Error! No connections in the pool.')
sys.exit(0)
cur = con.cursor()
cur.execute(query)
# Write to csv.
with open(o_csv, mode='w') as csv_file:
skulist_writer = csv.writer(csv_file, delimiter=',')
for result in cur:
skulist_writer.writerow(result)
# Close connection.
cur.close()
self.poolDB.release(con)
def main():
start = time.time()
# Configure session credentials.
crd = json.load( open('access.json') )['TST']['BD']
dsn = cx_Oracle.makedsn(crd['HOST'], crd['PORT'], crd['SID'])
mypool = cx_Oracle.SessionPool(user=crd['USER'], password=crd['PASS'], dsn=dsn, min=4, max=20, increment=2, threaded = True)
os.environ["NLS_LANG"] = ".WE8ISO8859P1"
# Example query.
query = "select * from skulist_detail a where a.skulist = 13012"
queryTime = datetime.now().strftime("%Y%m%d_%H%M%S")
sess = ConnectDB(mypool)
sess.extractQuery(query, 'query_result_' + format(queryTime) + '.csv')
end = time.time()
elapsed_time = end - start
print("Finished in " + str(round(elapsed_time, 3)) + " seconds.")
if __name__ == "__main__":
main()
| 2.359375 | 2 |
scripts/create_release.py | sigongzoa/pill_yourself | 1 | 12759441 | #!/usr/bin/env python3
import json
import os
import subprocess
import sys
from collections import OrderedDict
script_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.dirname(script_dir)
sys.path.append(root_dir)
import emsdk # noqa
def version_to_list(version_string):
return [int(part) for part in version_string.split('.')]
def main(args):
if subprocess.check_output(['git', 'status', '--porcelain'], cwd=root_dir).strip():
print('tree is not clean')
sys.exit(1)
release_info = emsdk.load_releases_info()
new_version = version_to_list(release_info['latest'])
new_version[-1] += 1
branch_name = 'version_%s' % '_'.join(str(part) for part in new_version)
# Create a new git branch
subprocess.check_call(['git', 'checkout', '-b', branch_name], cwd=root_dir)
new_version = '.'.join(str(part) for part in new_version)
new_hash = emsdk.get_emscripten_releases_tot()
print('Creating new release: %s -> %s' % (new_version, new_hash))
release_info['releases'][new_version] = new_hash
releases = [(k, v) for k, v in release_info['releases'].items()]
releases.sort(key=lambda pair: version_to_list(pair[0]))
release_info['releases'] = OrderedDict(reversed(releases))
release_info['latest'] = new_version
with open(os.path.join(root_dir, 'emscripten-releases-tags.txt'), 'w') as f:
f.write(json.dumps(release_info, indent=2))
f.write('\n')
subprocess.check_call(os.path.join(script_dir, 'update_bazel_workspace.sh'), cwd=root_dir)
# Create auto-generated changes to the new git branch
subprocess.check_call(['git', 'add', '-u', '.'], cwd=root_dir)
subprocess.check_call(['git', 'commit', '-m', new_version], cwd=root_dir)
print('New relase created in branch: `%s`' % branch_name)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.46875 | 2 |
apps/banks/tasks.py | KarpovDenis74/banking_analytics | 0 | 12759442 | import datetime
import xml.etree.ElementTree as ET
from pathlib import Path
from banking_analytics.celery import app
from apps.banks.models import BalanceAccount, Bank, Region
from apps.cbrf.views import CBRF
@app.task
def get_accounts():
cb = CBRF()
response = cb.query(method='Form101IndicatorsEnumXML')
data_directory = Path('apps/banks/cbr_data/Form101IndicatorsEnum')
now_date = datetime.date.today()
data_file = Path(f'{now_date.strftime("%Y_%m_%d")}.xml')
file_name = data_directory / data_file
with open(file_name, 'w+', encoding="utf-8") as file:
file.write(response.text)
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_name, parser=parser)
ns = cb.methods.get('Form101IndicatorsEnumXML').get('ns')
root = tree.findall(
cb.methods.get('Form101IndicatorsEnumXML').get('root'), ns)
for child in root:
indCode = str(child.find('IndCode').text)
name = str(child.find('name').text)
indType = str(child.find('IndType').text)
indChapter = str(child.find('IndChapter').text)
bank, _ = BalanceAccount.objects.get_or_create(
indCode=indCode,
defaults={'name': name,
'indType': indType,
'indChapter': indChapter,
}
)
file_name.unlink(missing_ok=False)
return 'OK'
@app.task
def get_regions():
cb = CBRF()
response = cb.query(method='EnumRegions')
data_directory = Path('apps/banks/cbr_data/EnumRegions')
now_date = datetime.date.today()
data_file = Path(f'{now_date.strftime("%Y_%m_%d")}.xml')
file_name = data_directory / data_file
with open(file_name, 'w+', encoding="utf-8") as file:
file.write(response.text)
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_name, parser=parser)
ns = cb.methods.get('EnumRegions').get('ns')
root = tree.findall(
cb.methods.get('EnumRegions').get('root'), ns)
for child in root:
name = str(child.find('Name').text)
code = int(child.find('rgn').text)
region, _ = Region.objects.get_or_create(
code=code,
defaults={'name': name}
)
file_name.unlink(missing_ok=False)
return 'OK'
@app.task
def get_bics():
cb = CBRF()
response = cb.query(method='EnumBIC')
data_directory = Path('apps/banks/cbr_data/EnumBIC')
now_date = datetime.date.today()
data_file = Path(f'{now_date.strftime("%Y_%m_%d")}.xml')
file_name = data_directory / data_file
with open(file_name, 'w+', encoding="utf-8") as file:
file.write(response.text)
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_name, parser=parser)
ns = cb.methods.get('EnumBIC').get('ns')
root = tree.findall(
cb.methods.get('EnumBIC').get('root'), ns)
for child in root:
bic = str(child.find('BIC').text)
reg_date = str(child.find('RC').text)
reg_date = datetime.datetime.strptime(
reg_date, '%Y-%m-%dT%H:%M:%S%z').date()
name = str(child.find('NM').text)
ogrn = str(child.find('RB').text)
cregnr = str(child.find('cregnr').text)
internal_number = str(child.find('intCode').text)
reg_number = str(child.find('RN').text)
bank, _ = Bank.objects.get_or_create(
ogrn=ogrn,
defaults={'bic': bic,
'reg_date': reg_date,
'name': name,
'cregnr': cregnr,
'internal_number': internal_number,
'reg_number': reg_number,
}
)
file_name.unlink(missing_ok=False)
return 'OK'
| 2.109375 | 2 |
src/open_FRED/netcdftools/monthly.py | SabineHaas/cli | 0 | 12759443 | """ Merges netCDF files covering the same month into one file.
Currently depends a lot on the input files having the right structure and
naming, i.e. a lot of assumptions are hardwired into this script. Maybe I'll
get to generalizing it by pulling information out of every file found, but for
now, this will have to do.
Parameters
----------
sys.argv[1]: directory
The script will look into every tar file in this directory, an merge and
netCDF file the tar file contains with the others, provided the name
contains the right variables and date.
sys.argv[2]: path prefix
The new netCDF file containing the merged data found will be written to
the file "sys.argv[2]-YEAR_MONTH.nc" where YEAR_MONTH is pulled out of
filenames considered for the merge.
sys.argv[3]: str, comma separated list of variables
The argument is split by occurrences of "," and the results have any
surrounding whitespace removed. The result is then treated as the list of
variables eligible for a merge.
sys.argv[4]: regular expression
A pattern to further limit the file names the script actually handles.
Useful during development to not act on more files than absolutely
necessary.
sys.argv[5]: filename
A single file containing all merged data fill be created using this name.
"""
from glob import iglob
from pprint import pprint as pp
from tempfile import TemporaryDirectory as TD
from subprocess import call
import os.path as osp
import re
import sys
import tarfile
from dask.diagnostics import ProgressBar
import xarray as xr
from add_dimension import add_dimension
""" Command lines:
python ../monthly.py '../' './no-height' 'ASWDIFD_S, ASWDIR_S, ASWDIR_NS2'
python ../monthly.py '../' './' \
'WSS_zlevel, T_zlevel, P_zlevel, WDIRlat_zlevel'
Special: Z0
(no height but weird time bounds due to mean/instantaneous measurement)
python ../../cli/code/monthly.py './' './import-test' \
'WSS_zlevel, P_zlevel' \
'2015' \
'2015.WSS.P.nc'
"""
def merge(variable, tar, store):
# chunks={"time": 12, "rlat": 11, "rlon": 11}
chunks = {}
with TD(dir="./_T_") as tmp:
members = tar.getmembers()
netcdfs = []
for member in members:
if variable not in member.name:
continue
print("Handling {}.".format(member.name))
netcdfs.append(member.name)
tar.extractall(tmp, members=[member])
path = osp.join(tmp, member.name)
fix_height = re.search("WSS_10M|WDIRlat_10M", member.name)
if fix_height:
print("Fixing height.")
add_dimension(
source=path,
target=osp.join(tmp, "fixed"),
variable=fix_height.group(),
dimension="height",
position=2,
value=10.0,
new_name=fix_height.group()[:-4],
)
call(["mv", osp.join(tmp, "fixed"), path])
netcdfs = [osp.join(tmp, f) for f in netcdfs]
print("Merging:")
pp(netcdfs)
target = osp.join(store, "{}.nc".format(variable))
print("--> {}".format(target))
datasets = [
xr.open_dataset(n, decode_cf=False, chunks=chunks) for n in netcdfs
]
merged = xr.merge(
d[v] for d in datasets for v in d.data_vars if v != "rotated_pole"
)
computation = merged.to_netcdf(target, format="NETCDF4", compute=False)
with ProgressBar():
computation.compute()
return target
if __name__ == "__main__":
""" Variables:
"WSS_zlevel", "T_zlevel", "P_zlevel", "Z0", "WDIRlat_zlevel",
"ASWDIFD_S", "ASWDIR_S", "ASWDIR_NS2"
"""
variables = [s.strip() for s in sys.argv[3].split(",")]
# chunks={"time": 12, "rlat": 11, "rlon": 11}
chunks = {}
with TD(dir="./_T_/") as tmp:
tars = list(
tarfile.open(tar)
for tar in iglob(osp.join(sys.argv[1], "*.tar"))
if re.search(sys.argv[4], tar)
)
everything = []
for tar in tars:
year = re.search(r"(\d\d\d\d_\d\d)\.tar", tar.name).groups()[0]
merged = []
for variable in variables:
merged.append(merge(variable, tar, tmp))
print("Merging/Compressing to:")
pp(merged)
mergetarget = "{}-{}.nc".format(sys.argv[2], year)
print("--> {}".format(mergetarget))
datasets = (
xr.open_dataset(path, decode_cf=False, chunks=chunks)
for path in merged
)
data_vars = [d[v] for d in datasets for v in d.data_vars]
for dv in data_vars:
if dv.name[0].isupper():
dv.encoding["least_significant_digit"] = 3
ds = xr.merge(data_vars)
computation = ds.to_netcdf(
mergetarget,
format="NETCDF4",
compute=False,
encoding={
v: {"complevel": 9, "zlib": True}
for v in list(ds.variables)
},
)
with ProgressBar():
computation.compute()
call(["rm", "-r"] + merged)
ds.close()
everything.append(mergetarget)
"""
print("Compressing to {}.".format(sys.argv[5]))
computation = (
xr.merge(
d[v]
for d in [
xr.open_dataset(p, chunks=chunks)
for p in everything]
for v in d.data_vars)
.to_netcdf(sys.argv[5], format='NETCDF4',
encoding={
v: {'complevel': 9, 'zlib': True}
for v in list(ds.variables)},
compute=False))
with ProgressBar():
computation.compute()
#call(["mv", tmpeverything, sys.argv[5]])
"""
print("All done.")
| 2.890625 | 3 |
Python/Samples/Builder/UtBuilder.py | plasroom46/DesignPattern.Sample | 9 | 12759444 | import unittest
from abc import ABC, abstractmethod
from Builder_models import MainData, Report, LeaveRecord
from Builders import Builder, BuilderFI,BuilderIT
from Directors import Director, DirectorCEO
class UtBuilder(unittest.TestCase):
def test_director(self):
myBuilder = BuilderFI()
director = Director(builder=myBuilder)
mainData = director.construct()
self.assertEqual(mainData.targetBU, "Financial Department")
self.assertEqual(mainData.report.name, "ROI report")
self.assertEqual(mainData.leaveRecord.weeks, 2)
def test_directorCEO(self):
myBuilder1 = BuilderFI()
myBuilder2 = BuilderIT()
director = DirectorCEO(builder1=myBuilder1, builder2=myBuilder2)
mainData = director.construct()
self.assertEqual(mainData.targetBU, "CEO")
self.assertEqual(mainData.report.name, "ROI report")
self.assertEqual(mainData.leaveRecord.weeks, 4)
if __name__ == '__main__':
unittest.main()
| 3.21875 | 3 |
preggy/assertions/like.py | Zearin/preggy | 0 | 12759445 | # -*- coding: utf-8 -*-
'''preggy 'like' assertions. For use with `expect()` (see `preggy.core`).
'''
# preggy assertions
# https://github.com/heynemann/preggy
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2013 <NAME> <EMAIL>
from __future__ import absolute_import, print_function
import re
from datetime import datetime
import difflib
from uuid import UUID
try:
from six import string_types, binary_type
except ImportError: # pragma: no cover
import warnings
warnings.warn('Ignoring six. Probably setup.py installing package.')
import numbers
from preggy import assertion
from preggy import utils
__all__ = ('to_be_like', 'not_to_be_like')
#-------------------------------------------------------------------------------------------------
# CONSTANTS
#-------------------------------------------------------------------------------------------------
DATE_THRESHOLD = 5.0
RESET = '\033[m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
REMOVE_COLORS_REGEX = re.compile(
r'(\033|\x1b|\x03)' # prefixes
r'\[' # non-regex bracket
r'([0-9]*[;])?' # semi-colon
r'[0-9]*m', # suffix
flags=re.UNICODE
)
NORMALIZE_WHITESPACE_REGEX = re.compile(
r'\s+',
flags=re.UNICODE | re.MULTILINE | re.IGNORECASE
)
#-------------------------------------------------------------------------------------------------
# HELPERS
#-------------------------------------------------------------------------------------------------
_filter_str = lambda s: NORMALIZE_WHITESPACE_REGEX.sub('', s.lower()).strip()
def compare(first, second):
matcher = difflib.SequenceMatcher(None, first, second)
first = get_match_for_text(matcher, first, True)
second = get_match_for_text(matcher, second, True)
return matcher, first, second
def get_match_for_text(matcher, text, first):
result = []
COLOR_MAP = {
'delete': RED,
'insert': GREEN,
'replace': YELLOW
}
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
start, stop = (i1, i2) if first else (j1, j2)
to_append = text[start:stop]
if tag in COLOR_MAP:
to_append = ''.join((COLOR_MAP[tag], to_append, RESET))
result.append(to_append)
return ''.join(result)
def _match_alike(expected, topic, diff=False):
'''Determines the types of `expected` and `topic`, and calls the appropriate comparison function.'''
if topic is None:
return expected is None
if isinstance(topic, UUID):
return _compare_uuid(expected, topic)
if isinstance(topic, string_types + (binary_type, )):
return _compare_strings(expected, topic)
if isinstance(topic, numbers.Number):
return _compare_numbers(expected, topic)
if isinstance(topic, (list, tuple, set)):
return _compare_lists(expected, topic)
if isinstance(topic, dict):
return _compare_dicts(expected, topic)
if isinstance(topic, datetime):
return _compare_datetime(expected, topic)
raise RuntimeError('Could not compare {expected} and {topic}'.format(expected=expected, topic=topic))
def _strip_string(text):
if not text:
return text
text = utils.fix_string(text)
text = REMOVE_COLORS_REGEX.sub('', text)
text = _filter_str(text)
return text
def _compare_strings(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as strings.
Allows some leeway. (Strings don't have to exactly match.)
'''
topic = _strip_string(topic)
expected = _strip_string(expected)
return expected == _filter_str(topic)
def _compare_uuid(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as UUID.'''
topic = str(topic)
expected = str(expected)
return expected == topic
def __timedelta_to_seconds(timedelta):
ms = 10 ** 6 # microseconds/second
days = 24 * 60 * 60 # seconds/day
ms_as_seconds = float(timedelta.microseconds) / ms
seconds = float(timedelta.seconds)
days_as_seconds = float(timedelta.days) * days
total_seconds = sum((ms_as_seconds,
seconds,
days_as_seconds))
return abs(total_seconds) # abs() comes last
def _compare_datetime(expected, topic):
return __timedelta_to_seconds(topic - expected) <= DATE_THRESHOLD
def _compare_numbers(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as Numbers.'''
FALSE_CONDITIONS = (not isinstance(topic, numbers.Number),
not isinstance(expected, numbers.Number), )
if any(FALSE_CONDITIONS):
return False
return float(expected) == float(topic)
def _compare_dicts(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as dicts.'''
return _match_dicts(expected, topic) and _match_dicts(topic, expected)
def _match_dicts(expected, topic):
'''Asserts the "like"-ness of all keys and values in `topic` and `expected`.'''
for k, v in expected.items():
if not k in topic or not _match_alike(topic[k], v):
return False
return True
def _compare_lists(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as lists.'''
return _match_lists(expected, topic) and _match_lists(topic, expected)
def _match_lists(expected, topic):
'''Asserts the "like"-ness each item in of `topic` and `expected` (as lists or tuples).'''
# TODO: Rewrite this using itertools
# http://docs.python.org/2/library/itertools.html
for item in expected:
if isinstance(item, (list, tuple)):
found = False
for inner_item in topic:
if isinstance(inner_item, (list, tuple)) and _compare_lists(item, inner_item):
found = True
break
if not found:
return False
elif not item in topic:
return False
return True
#-------------------------------------------------------------------------------------------------
# Assertions
#-------------------------------------------------------------------------------------------------
@assertion
def to_be_like(topic, expected, diff=True):
'''Asserts that `topic` is like (similar to) `expected`. Allows some leeway.'''
result = _match_alike(expected, topic, diff=diff)
is_str = lambda x: isinstance(x, string_types + (binary_type,))
if not result:
if diff is True and (
is_str(topic) and
is_str(expected)
):
first, second = _strip_string(topic), _strip_string(expected)
matcher, first, second = compare(first, second)
print()
print('Expected strings to be equal, but they were different:')
print(first)
print(second)
print()
raise AssertionError("Expected topic('{topic}') to be like '{expected}'".format(topic=topic, expected=expected))
@assertion
def not_to_be_like(topic, expected, diff=False):
'''Asserts that `topic` is NOT like (NOT similar to) `expected`. Allows some leeway.'''
result = _match_alike(expected, topic, diff=diff)
if result:
raise AssertionError("Expected topic('{topic}') not to be like '{expected}'".format(topic=topic, expected=expected))
| 2.265625 | 2 |
bloom_filter.py | xpao24/crawler | 0 | 12759446 | <gh_stars>0
#!/usr/bin/python
# -*- coding:UTF-8 -*-
from bitarray import bitarray
import mmh3
class BloomFilter(object):
def __init__(self,size=2**20,seeds=None):
if size == None:
self.size = 2**20
else:
self.size = size
self.bitset = bitarray(self.size)
self.bitset.setall(False)
if seeds == None:
self.seeds = [5,7,11,13,31,67]
else:
self.seeds = seeds
def notcontains(self,ele):
for i in self.seeds:
hash = mmh3.hash(ele,i) % self.size
if self.bitset[hash] == False:
return True
return False
def add(self,ele):
for i in self.seeds:
hash = mmh3.hash(ele,i) % self.size
self.bitset[hash] = True
#f = BloomFilter(2*20)
#f.add("123")
#print f.notcontains("123")
| 3.140625 | 3 |
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/scripts/test/test_tap2rpm.py | timkrentz/SunTracker | 4 | 12759447 | <reponame>timkrentz/SunTracker
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.scripts.tap2rpm}.
"""
import os
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python import procutils
from twisted.python import versions
from twisted.python import deprecate
from twisted.python.failure import Failure
from twisted.internet import utils
from twisted.scripts import tap2rpm
# When we query the RPM metadata, we get back a string we'll have to parse, so
# we'll use suitably rare delimiter characters to split on. Luckily, ASCII
# defines some for us!
RECORD_SEPARATOR = "\x1E"
UNIT_SEPARATOR = "\x1F"
def _makeRPMs(tapfile=None, maintainer=None, protocol=None, description=None,
longDescription=None, setVersion=None, rpmfile=None, type_=None):
"""
Helper function to invoke tap2rpm with the given parameters.
"""
args = []
if not tapfile:
tapfile = "dummy-tap-file"
handle = open(tapfile, "w")
handle.write("# Dummy TAP file\n")
handle.close()
args.extend(["--quiet", "--tapfile", tapfile])
if maintainer:
args.extend(["--maintainer", maintainer])
if protocol:
args.extend(["--protocol", protocol])
if description:
args.extend(["--description", description])
if longDescription:
args.extend(["--long_description", longDescription])
if setVersion:
args.extend(["--set-version", setVersion])
if rpmfile:
args.extend(["--rpmfile", rpmfile])
if type_:
args.extend(["--type", type_])
return tap2rpm.run(args)
def _queryRPMTags(rpmfile, taglist):
"""
Helper function to read the given header tags from the given RPM file.
Returns a Deferred that fires with dictionary mapping a tag name to a list
of the associated values in the RPM header. If a tag has only a single
value in the header (like NAME or VERSION), it will be returned as a 1-item
list.
Run "rpm --querytags" to see what tags can be queried.
"""
# Build a query format string that will return appropriately delimited
# results. Every field is treated as an array field, so single-value tags
# like VERSION will be returned as 1-item lists.
queryFormat = RECORD_SEPARATOR.join([
"[%%{%s}%s]" % (tag, UNIT_SEPARATOR) for tag in taglist
])
def parseTagValues(output):
res = {}
for tag, values in zip(taglist, output.split(RECORD_SEPARATOR)):
values = values.strip(UNIT_SEPARATOR).split(UNIT_SEPARATOR)
res[tag] = values
return res
def checkErrorResult(failure):
# The current rpm packages on Debian and Ubuntu don't properly set up
# the RPM database, which causes rpm to print a harmless warning to
# stderr. Unfortunately, .getProcessOutput() assumes all warnings are
# catastrophic and panics whenever it sees one.
#
# See also:
# http://twistedmatrix.com/trac/ticket/3292#comment:42
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=551669
# http://rpm.org/ticket/106
failure.trap(IOError)
# Depending on kernel scheduling, we might read the whole error
# message, or only the first few bytes.
if str(failure.value).startswith("got stderr: 'error: "):
newFailure = Failure(SkipTest("rpm is missing its package "
"database. Run 'sudo rpm -qa > /dev/null' to create one."))
else:
# Not the exception we were looking for; we should report the
# original failure.
newFailure = failure
# We don't want to raise the exception right away; we want to wait for
# the process to exit, otherwise we'll get extra useless errors
# reported.
d = failure.value.processEnded
d.addBoth(lambda _: newFailure)
return d
d = utils.getProcessOutput("rpm",
("-q", "--queryformat", queryFormat, "-p", rpmfile))
d.addCallbacks(parseTagValues, checkErrorResult)
return d
class TestTap2RPM(TestCase):
def setUp(self):
return self._checkForRpmbuild()
def _checkForRpmbuild(self):
"""
tap2rpm requires rpmbuild; skip tests if rpmbuild is not present.
"""
if not procutils.which("rpmbuild"):
raise SkipTest("rpmbuild must be present to test tap2rpm")
def _makeTapFile(self, basename="dummy"):
"""
Make a temporary .tap file and returns the absolute path.
"""
path = basename + ".tap"
handle = open(path, "w")
handle.write("# Dummy .tap file")
handle.close()
return path
def _verifyRPMTags(self, rpmfile, **tags):
"""
Check the given file has the given tags set to the given values.
"""
d = _queryRPMTags(rpmfile, tags.keys())
d.addCallback(self.assertEqual, tags)
return d
def test_optionDefaults(self):
"""
Commandline options should default to sensible values.
"sensible" here is defined as "the same values that previous versions
defaulted to".
"""
config = tap2rpm.MyOptions()
config.parseOptions([])
self.assertEqual(config['tapfile'], 'twistd.tap')
self.assertEqual(config['maintainer'], 'tap2rpm')
self.assertEqual(config['protocol'], 'twistd')
self.assertEqual(config['description'], 'A TCP server for twistd')
self.assertEqual(config['long_description'],
'Automatically created by tap2rpm')
self.assertEqual(config['set-version'], '1.0')
self.assertEqual(config['rpmfile'], 'twisted-twistd')
self.assertEqual(config['type'], 'tap')
self.assertEqual(config['quiet'], False)
self.assertEqual(config['twistd_option'], 'file')
self.assertEqual(config['release-name'], 'twisted-twistd-1.0')
def test_protocolCalculatedFromTapFile(self):
"""
The protocol name defaults to a value based on the tapfile value.
"""
config = tap2rpm.MyOptions()
config.parseOptions(['--tapfile', 'pancakes.tap'])
self.assertEqual(config['tapfile'], 'pancakes.tap')
self.assertEqual(config['protocol'], 'pancakes')
def test_optionsDefaultToProtocolValue(self):
"""
Many options default to a value calculated from the protocol name.
"""
config = tap2rpm.MyOptions()
config.parseOptions([
'--tapfile', 'sausages.tap',
'--protocol', 'eggs',
])
self.assertEqual(config['tapfile'], 'sausages.tap')
self.assertEqual(config['maintainer'], 'tap2rpm')
self.assertEqual(config['protocol'], 'eggs')
self.assertEqual(config['description'], 'A TCP server for eggs')
self.assertEqual(config['long_description'],
'Automatically created by tap2rpm')
self.assertEqual(config['set-version'], '1.0')
self.assertEqual(config['rpmfile'], 'twisted-eggs')
self.assertEqual(config['type'], 'tap')
self.assertEqual(config['quiet'], False)
self.assertEqual(config['twistd_option'], 'file')
self.assertEqual(config['release-name'], 'twisted-eggs-1.0')
def test_releaseNameDefaultsToRpmfileValue(self):
"""
The release-name option is calculated from rpmfile and set-version.
"""
config = tap2rpm.MyOptions()
config.parseOptions([
"--rpmfile", "beans",
"--set-version", "1.2.3",
])
self.assertEqual(config['release-name'], 'beans-1.2.3')
def test_basicOperation(self):
"""
Calling tap2rpm should produce an RPM and SRPM with default metadata.
"""
basename = "frenchtoast"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename))
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=["twisted-%s" % (basename,)],
VERSION=["1.0"],
RELEASE=["1"],
SUMMARY=["A TCP server for %s" % (basename,)],
DESCRIPTION=["Automatically created by tap2rpm"],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=["twisted-%s" % (basename,)],
VERSION=["1.0"],
RELEASE=["1"],
SUMMARY=["A TCP server for %s" % (basename,)],
DESCRIPTION=["Automatically created by tap2rpm"],
))
return d
def test_protocolOverride(self):
"""
Setting 'protocol' should change the name of the resulting package.
"""
basename = "acorn"
protocol = "banana"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename),
protocol=protocol)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=["twisted-%s" % (protocol,)],
SUMMARY=["A TCP server for %s" % (protocol,)],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=["twisted-%s" % (protocol,)],
SUMMARY=["A TCP server for %s" % (protocol,)],
))
return d
def test_rpmfileOverride(self):
"""
Setting 'rpmfile' should change the name of the resulting package.
"""
basename = "cherry"
rpmfile = "donut"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename),
rpmfile=rpmfile)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=[rpmfile],
SUMMARY=["A TCP server for %s" % (basename,)],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=[rpmfile],
SUMMARY=["A TCP server for %s" % (basename,)],
))
return d
def test_descriptionOverride(self):
"""
Setting 'description' should change the SUMMARY tag.
"""
description = "eggplant"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
description=description)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
SUMMARY=[description],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
SUMMARY=[description],
))
return d
def test_longDescriptionOverride(self):
"""
Setting 'longDescription' should change the DESCRIPTION tag.
"""
longDescription = "fig"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
longDescription=longDescription)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
DESCRIPTION=[longDescription],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
DESCRIPTION=[longDescription],
))
return d
def test_setVersionOverride(self):
"""
Setting 'setVersion' should change the RPM's version info.
"""
version = "123.456"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
setVersion=version)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
VERSION=["123.456"],
RELEASE=["1"],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
VERSION=["123.456"],
RELEASE=["1"],
))
return d
def test_tapInOtherDirectory(self):
"""
tap2rpm handles tapfiles outside the current directory.
"""
# Make a tapfile outside the current directory.
tempdir = self.mktemp()
os.mkdir(tempdir)
tapfile = self._makeTapFile(os.path.join(tempdir, "bacon"))
# Try and make an RPM from that tapfile.
_makeRPMs(tapfile=tapfile)
def test_unsignedFlagDeprecationWarning(self):
"""
The 'unsigned' flag in tap2rpm should be deprecated, and its use
should raise a warning as such.
"""
config = tap2rpm.MyOptions()
config.parseOptions(['--unsigned'])
warnings = self.flushWarnings()
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
deprecate.getDeprecationWarningString(
config.opt_unsigned, versions.Version("Twisted", 12, 1, 0)),
warnings[0]['message'])
self.assertEqual(1, len(warnings))
| 2.140625 | 2 |
examples/basics/intro-to-streams/intro-to-streams.py | qua-platform/qua-libs | 21 | 12759448 | <reponame>qua-platform/qua-libs<filename>examples/basics/intro-to-streams/intro-to-streams.py
"""
intro_to_integration.py: Demonstrate usage of the integration in the measure statement
Author: <NAME> - Quantum Machines
Created: 31/12/2020
Created on QUA version: 0.6.393
"""
from qm.QuantumMachinesManager import QuantumMachinesManager
from qm.qua import *
from qm.qua import math
from qm import SimulationConfig, LoopbackInterface
from configuration import *
import matplotlib.pyplot as plt
# Open communication with the server.
QMm = QuantumMachinesManager()
# Create a quantum machine based on the configuration.
QM1 = QMm.open_qm(config)
with program() as measureProg:
ind = declare(int)
r = Random()
temp = declare(int)
stream1 = declare_stream()
stream2 = declare_stream()
with for_(ind, 0, ind < 100, ind + 1):
save(ind, stream1)
assign(temp, Random().rand_int(10))
save(temp, stream2)
with stream_processing():
stream1.save_all("stream1")
stream1.buffer(10).save_all("stream2")
stream1.buffer(10, 10).save_all("2d_buffer")
stream1.buffer(10).average().save_all("stream2avg")
stream1.buffer(10).average().save("stream2avg_single")
stream1.buffer(3).map(FUNCTIONS.average()).save_all("buffer_average")
stream2.zip(stream1).save_all("zipped_streams")
job = QM1.simulate(
measureProg,
SimulationConfig(
4000, simulation_interface=LoopbackInterface([("con1", 1, "con1", 1)])
),
)
res = job.result_handles
str1 = res.stream1.fetch_all()
str2 = res.stream2.fetch_all()
str3 = res.stream2avg.fetch_all()
str4 = res.stream2avg_single.fetch_all()
str5 = res.zipped_streams.fetch_all()
str6 = res.buffer_average.fetch_all()
| 2.765625 | 3 |
tests/test_types.py | smartwaivercom/python-sdk | 0 | 12759449 | # Copyright 2017 Smartwaiver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import sys
sys.path.insert(0, '../')
import smartwaiver
import factory
class SmartwaiverCustomFieldTest(unittest.TestCase):
def test_required_keys(self):
custom_field_data = factory.custom_field()
custom_field_data.pop('value')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverCustomField(custom_field_data)
self.assertEqual('Cannot create a SmartwaiverCustomField with missing field: value', str(cm.exception))
def test_success(self):
custom_field_data = factory.custom_field()
custom_field = smartwaiver.types.SmartwaiverCustomField(custom_field_data)
self.assertEqual(custom_field_data['value'], custom_field.value)
self.assertEqual(custom_field_data['displayText'], custom_field.display_text)
class SmartwaiverGuardianTest(unittest.TestCase):
def test_required_keys(self):
guardian_data = factory.guardian()
guardian_data.pop('firstName')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverGuardian(guardian_data)
self.assertEqual('Cannot create a SmartwaiverGuardian with missing field: firstName', str(cm.exception))
def test_success(self):
guardian_data = factory.guardian()
custom_field = smartwaiver.types.SmartwaiverGuardian(guardian_data)
self.assertEqual(guardian_data['firstName'], custom_field.first_name)
self.assertEqual(guardian_data['middleName'], custom_field.middle_name)
self.assertEqual(guardian_data['lastName'], custom_field.last_name)
self.assertEqual(guardian_data['phone'], custom_field.phone)
self.assertEqual(guardian_data['relationship'], custom_field.relationship)
class SmartwaiverParticipantTest(unittest.TestCase):
def test_required_keys(self):
participant_data = factory.participant()
participant_data.pop('firstName')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverParticipant(participant_data)
self.assertEqual('Cannot create a SmartwaiverParticipant with missing field: firstName', str(cm.exception))
def test_success(self):
participant_data = factory.participant()
participant = smartwaiver.types.SmartwaiverParticipant(participant_data)
self.assertEqual(participant_data['firstName'], participant.first_name)
self.assertEqual(participant_data['middleName'], participant.middle_name)
self.assertEqual(participant_data['lastName'], participant.last_name)
self.assertEqual(participant_data['dob'], participant.dob)
self.assertEqual(participant_data['isMinor'], participant.is_minor)
self.assertEqual(participant_data['gender'], participant.gender)
self.assertEqual(participant_data['phone'], participant.phone)
self.assertEqual(participant_data['tags'], participant.tags)
self.assertTrue(len(participant_data['customParticipantFields']), len(participant.custom_participant_fields))
for guid in participant.custom_participant_fields:
self.assertIs(type(participant.custom_participant_fields[guid]), smartwaiver.types.SmartwaiverCustomField)
class SmartwaiverTemplateTest(unittest.TestCase):
def test_required_keys(self):
template_data = factory.template()
template_data.pop('templateId')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverTemplate(template_data)
self.assertEqual('Cannot create a SmartwaiverTemplate with missing field: templateId', str(cm.exception))
def test_success(self):
template_data = factory.template()
template = smartwaiver.types.SmartwaiverTemplate(template_data)
self.assertEqual(template_data['templateId'], template.template_id)
self.assertEqual(template_data['title'], template.title)
self.assertEqual(template_data['publishedVersion'], template.published_version)
self.assertEqual(template_data['publishedOn'], template.published_on)
self.assertEqual(template_data['webUrl'], template.web_url)
self.assertEqual(template_data['kioskUrl'], template.kiosk_url)
class SmartwaiverTypeTest(unittest.TestCase):
def test_required_keys(self):
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverType({'key1': 'val1'}, ['key1', 'key2'], 'SmartwaiverType')
self.assertEqual('Cannot create a SmartwaiverType with missing field: key2', str(cm.exception))
class SmartwaiverWaiverSummaryTest(unittest.TestCase):
def test_required_keys(self):
waiver_summary_data = factory.waiver_summary()
waiver_summary_data.pop('waiverId')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverWaiverSummary(waiver_summary_data)
self.assertEqual('Cannot create a SmartwaiverWaiverSummary with missing field: waiverId', str(cm.exception))
def test_success(self):
waiver_summary_data = factory.waiver_summary()
waiver_summary = smartwaiver.types.SmartwaiverWaiverSummary(waiver_summary_data)
self.assertEqual(waiver_summary_data['waiverId'], waiver_summary.waiver_id)
self.assertEqual(waiver_summary_data['templateId'], waiver_summary.template_id)
self.assertEqual(waiver_summary_data['title'], waiver_summary.title)
self.assertEqual(waiver_summary_data['createdOn'], waiver_summary.created_on)
self.assertEqual(waiver_summary_data['expirationDate'], waiver_summary.expiration_date)
self.assertEqual(waiver_summary_data['expired'], waiver_summary.expired)
self.assertEqual(waiver_summary_data['verified'], waiver_summary.verified)
self.assertEqual(waiver_summary_data['kiosk'], waiver_summary.kiosk)
self.assertEqual(waiver_summary_data['firstName'], waiver_summary.first_name)
self.assertEqual(waiver_summary_data['middleName'], waiver_summary.middle_name)
self.assertEqual(waiver_summary_data['lastName'], waiver_summary.last_name)
self.assertEqual(waiver_summary_data['dob'], waiver_summary.dob)
self.assertEqual(waiver_summary_data['isMinor'], waiver_summary.is_minor)
self.assertEqual(waiver_summary_data['tags'], waiver_summary.tags)
class SmartwaiverWaiverTest(unittest.TestCase):
def test_required_keys(self):
waiver_data = factory.waiver()
waiver_data.pop('waiverId')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverWaiver(waiver_data)
self.assertEqual('Cannot create a SmartwaiverWaiver with missing field: waiverId', str(cm.exception))
def test_success(self):
waiver_data = factory.waiver()
waiver = smartwaiver.types.SmartwaiverWaiver(waiver_data)
self.assertEqual(waiver_data['waiverId'], waiver.waiver_id)
self.assertEqual(waiver_data['templateId'], waiver.template_id)
self.assertEqual(waiver_data['title'], waiver.title)
self.assertEqual(waiver_data['createdOn'], waiver.created_on)
self.assertEqual(waiver_data['expirationDate'], waiver.expiration_date)
self.assertEqual(waiver_data['expired'], waiver.expired)
self.assertEqual(waiver_data['verified'], waiver.verified)
self.assertEqual(waiver_data['kiosk'], waiver.kiosk)
self.assertEqual(waiver_data['firstName'], waiver.first_name)
self.assertEqual(waiver_data['middleName'], waiver.middle_name)
self.assertEqual(waiver_data['lastName'], waiver.last_name)
self.assertEqual(waiver_data['dob'], waiver.dob)
self.assertEqual(waiver_data['isMinor'], waiver.is_minor)
self.assertEqual(waiver_data['tags'], waiver.tags)
self.assertTrue(len(waiver_data['participants']), len(waiver.participants))
for participant in waiver.participants:
self.assertIs(type(participant), smartwaiver.types.SmartwaiverParticipant)
self.assertEqual(waiver_data['email'], waiver.email)
self.assertEqual(waiver_data['marketingAllowed'], waiver.marketing_allowed)
self.assertEqual(waiver_data['addressLineOne'], waiver.address_line_one)
self.assertEqual(waiver_data['addressLineTwo'], waiver.address_line_two)
self.assertEqual(waiver_data['addressCity'], waiver.address_city)
self.assertEqual(waiver_data['addressState'], waiver.address_state)
self.assertEqual(waiver_data['addressZip'], waiver.address_zip)
self.assertEqual(waiver_data['addressCountry'], waiver.address_country)
self.assertEqual(waiver_data['emergencyContactName'], waiver.emergency_contact_name)
self.assertEqual(waiver_data['emergencyContactPhone'], waiver.emergency_contact_phone)
self.assertEqual(waiver_data['insuranceCarrier'], waiver.insurance_carrier)
self.assertEqual(waiver_data['insurancePolicyNumber'], waiver.insurance_policy_number)
self.assertEqual(waiver_data['driversLicenseNumber'], waiver.drivers_license_number)
self.assertEqual(waiver_data['driversLicenseState'], waiver.drivers_license_state)
self.assertTrue(len(waiver_data['customWaiverFields']), len(waiver.custom_waiver_fields))
for guid in waiver.custom_waiver_fields:
self.assertIs(type(waiver.custom_waiver_fields[guid]), smartwaiver.types.SmartwaiverCustomField)
self.assertIs(type(waiver.guardian), smartwaiver.types.SmartwaiverGuardian)
self.assertEqual(waiver_data['pdf'], waiver.pdf)
def test_participant_not_list(self):
waiver_data = factory.waiver()
waiver_data['participants'] = ''
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverWaiver(waiver_data)
self.assertEqual('Participants field must be a list', str(cm.exception))
if __name__ == "__main__":
unittest.main()
| 2.109375 | 2 |
nets/VGG16.py | vangiel/sncnn | 0 | 12759450 | <filename>nets/VGG16.py<gh_stars>0
from torchvision.models import vgg16
vgg16_model = vgg16()
# for child in vgg16_model.children():
# print(child)
object_methods = [method_name for method_name in dir(vgg16_model)
if callable(getattr(vgg16_model, method_name))]
# print(object_methods)
VGG16 = vgg16_model
# vgg16_model.classifier = vgg16_model.classifier[:-1]
# print(vgg16_model.classifier)
| 2.359375 | 2 |