hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
459b244b831dbaca46e8bc6cd6ce8c182a965598 | 75 | py | Python | icls/models/resnet/resnet_test.py | TaikiInoue/iClassification | 7045c7721c9ee86e3823b13722d22b6c0095b76a | [
"MIT"
] | null | null | null | icls/models/resnet/resnet_test.py | TaikiInoue/iClassification | 7045c7721c9ee86e3823b13722d22b6c0095b76a | [
"MIT"
] | null | null | null | icls/models/resnet/resnet_test.py | TaikiInoue/iClassification | 7045c7721c9ee86e3823b13722d22b6c0095b76a | [
"MIT"
] | null | null | null | from torchvision.models import resnet50
model = resnet50(pretrained=True)
| 18.75 | 39 | 0.826667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
459c03c8888c050fd9ced363ad52f606db8a1218 | 2,328 | py | Python | core/templatetags/core_menu.py | baxtea/pipeline | 0cbdd4b3ee6d05611e1d3ed5a4936f597182e963 | [
"MIT"
] | 18 | 2018-11-14T21:52:33.000Z | 2022-03-23T08:10:54.000Z | core/templatetags/core_menu.py | baxtea/pipeline | 0cbdd4b3ee6d05611e1d3ed5a4936f597182e963 | [
"MIT"
] | 83 | 2018-09-21T16:08:51.000Z | 2022-03-27T20:31:05.000Z | core/templatetags/core_menu.py | baxtea/pipeline | 0cbdd4b3ee6d05611e1d3ed5a4936f597182e963 | [
"MIT"
] | 6 | 2019-09-27T20:50:54.000Z | 2020-06-12T14:07:38.000Z | from django import template
from core.models import (
ArticlesIndexPage,
ArticlePage,
StaffPage,
CandidatePage,
ElectionIndexPage,
)
from home.models import HomePage
register = template.Library()
@register.simple_tag(takes_context=True)
def get_site_root(context):
# NB this returns a core.Page, not the implementation-specific model used
# so object-comparison to self will return false as objects would differ
return context["request"].site.root_page
# Retrieves the top menu items - the immediate children of the parent page
# The has_menu_children method is necessary because the bootstrap menu requires
# a dropdown class to be applied to a parent
@register.inclusion_tag("core/tags/top_menu.html", takes_context=True)
def top_menu(context, parent, calling_page=None):
divider_index = None
menuitems = parent.get_children().live().in_menu()
for i in range(len(menuitems)):
menuitem = menuitems[i]
# menuitem.show_dropdown = has_menu_children(menuitem)
# We don't directly check if calling_page is None since the template
# engine can pass an empty string to calling_page
# if the variable passed as calling_page does not exist.
menuitem.active = (
calling_page.path.startswith(menuitem.path) if calling_page else False
)
# Add a divider after the links to sections and before the static pages.
if (
divider_index is None
and len(menuitems) > 1
and not isinstance(menuitem.specific, ArticlesIndexPage)
):
divider_index = i - 1
return {
"calling_page": calling_page,
"menuitems": menuitems,
"divider_index": divider_index,
"is_home": isinstance(calling_page, HomePage),
# required by the pageurl tag that we want to use within this template
"request": context["request"],
}
@register.inclusion_tag("core/tags/bottom_menu.html")
def bottom_menu():
try:
home = HomePage.objects.get()
except HomePage.DoesNotExist:
return {"pages": []}
pages = (
home.get_descendants()
.live()
.public()
.not_type((ArticlePage, StaffPage, CandidatePage, ElectionIndexPage))
.order_by("title")
)
return {"pages": pages}
| 32.788732 | 82 | 0.676117 | 0 | 0 | 0 | 0 | 1,901 | 0.816581 | 0 | 0 | 861 | 0.369845 |
459d4f106f5448c3e38cc2b536e47e739b25817d | 11,195 | py | Python | onmt/modules/UniversalTransformer/Layers.py | esalesky/NMTGMinor | b6eafff21f5aabb874720e6df30cd6b91c339a7c | [
"MIT"
] | 5 | 2020-05-25T01:11:51.000Z | 2021-02-18T08:55:43.000Z | onmt/modules/UniversalTransformer/Layers.py | esalesky/NMTGMinor | b6eafff21f5aabb874720e6df30cd6b91c339a7c | [
"MIT"
] | null | null | null | onmt/modules/UniversalTransformer/Layers.py | esalesky/NMTGMinor | b6eafff21f5aabb874720e6df30cd6b91c339a7c | [
"MIT"
] | 1 | 2020-12-13T07:41:33.000Z | 2020-12-13T07:41:33.000Z | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.Bottle import Bottle
from onmt.modules.Transformer.Layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.StaticDropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class UniversalEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
position encoder: adding embedding based on position
time encoder: adding embedding based on time (the loop)
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, pos_encoder, time_encoder, attn_p=0.1, version=1.0):
super(UniversalEncoderLayer, self).__init__()
self.version = version
# position and time embedding is added into the input before the layer
self.pos_encoder = pos_encoder
self.time_encoder = time_encoder
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.Constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.Constants.static)
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.Constants.static)
if onmt.Constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.Constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, t, pad_mask=None):
# apply layer normalization
query = self.preprocess_attn(input)
# add position encoding and time encoding
query = self.pos_encoder(query) + self.time_encoder(t)
out, _ = self.multihead(query, query, query, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),
mask=pad_mask)
input = self.postprocess_ffn(out, input)
return input
class UniversalDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, position_encoder, time_encoder, attn_p=0.1, version=1.0):
super(UniversalDecoderLayer, self).__init__()
self.version = version
self.position_encoder = position_encoder
self.time_encoder = time_encoder
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.Constants.static)
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.Constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.Constants.static)
self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.Constants.static)
self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.Constants.static)
if onmt.Constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, static=onmt.Constants.static)
elif onmt.Constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, context, t, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
#~ print(input.size())
#~ print(context.size())
#~ print(pad_mask_tgt.size())
query = self.preprocess_attn(input)
# add position encoding and time encoding
query = self.position_encoder(query) + self.time_encoder(t)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, coverage
def step(self, input, context, pos_step, t, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input, mask=pad_mask_tgt)
# add position encoding and time encoding (before the buffer because the previous steps are already added)
query = self.position_encoder(query, t=pos_step) + self.time_encoder(t)
if buffer is not None:
buffer = torch.cat([buffer, query], dim=1)
else:
buffer = query
out, _ = self.multihead_tgt(query, buffer, buffer, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, context, mask_src,
query_mask=pad_mask_tgt, value_mask=None)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
class TimeEncoding(nn.Module):
"""Adds positional embeddings to standard word embeddings
This matches the original TensorFlow implementation at https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py.
Args:
d_model: dimension of model
p: dropout probability
len_max: max seq length for pre-calculated positional embeddings
Inputs Shapes:
word_emb: batch_size x len_seq x d_model
Outputs Shapes:
out: batch_size x len_seq x d_model
"""
def __init__(self, d_model, p=0, len_max=64):
# save a fixed positional embedding matrix up to len_max,
# so that no need to recreate it everytime
super(TimeEncoding , self).__init__()
self.len_max=len_max
self.d_model = d_model
self.renew(len_max)
self.p = p
def renew(self, new_max_len):
## detele the old variable to avoid Pytorch's error when register new buffer
if hasattr(self, 'time_emb'):
del self.time_emb
times = torch.arange(0,new_max_len).float()
num_timescales = self.d_model // 2
log_timescale_increment = math.log(10000) / (num_timescales-1)
inv_timescales = torch.exp(torch.arange(0, num_timescales).float() * -log_timescale_increment)
scaled_time = times.unsqueeze(1) * inv_timescales.unsqueeze(0)
time_emb = torch.cat((torch.sin(scaled_time), torch.cos(scaled_time)), 1)
# wrap in a buffer so that model can be moved to GPU
self.register_buffer('time_emb', time_emb)
def forward(self, t):
# print('hello')
# out = word_emb + Variable(self.pos_emb[:len_seq, :][-1, :], requires_grad=False)
time_emb = Variable(self.time_emb[t, :], requires_grad=False) # 1 x dim
# out should have size 1 x 1 x dim
# all positions share the time embedding
# all batch elements share the time embedding
out = time_emb.unsqueeze(0)
return out
| 38.737024 | 156 | 0.617329 | 10,624 | 0.948995 | 0 | 0 | 0 | 0 | 0 | 0 | 3,841 | 0.3431 |
459e672376134fe4df8011810380fe61fed6944e | 1,530 | py | Python | upload_folder_to_root.py | cbhramar/Google-Drive-APIs-in-Python | 72b25721a88aa14d75ab529011181b7cee80e5e2 | [
"MIT"
] | null | null | null | upload_folder_to_root.py | cbhramar/Google-Drive-APIs-in-Python | 72b25721a88aa14d75ab529011181b7cee80e5e2 | [
"MIT"
] | null | null | null | upload_folder_to_root.py | cbhramar/Google-Drive-APIs-in-Python | 72b25721a88aa14d75ab529011181b7cee80e5e2 | [
"MIT"
] | null | null | null | from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from oauth2client.client import OAuth2Credentials
import json
import sys
import os
LOCAL_FOLDER_NAME='h5downloads'
def load_saved_credentials():
with open('token.json','r') as cred_file:
creds = json.load(cred_file)
creds['access_token'] = creds['token']
creds['token_expiry'] = creds['expiry']
creds['user_agent'] = 'nil'
creds['invalid'] = 'nil'
return json.dumps(creds)
def get_authenticated_service():
gauth = GoogleAuth()
gauth.credentials = OAuth2Credentials.from_json(load_saved_credentials())
drive = GoogleDrive(gauth)
return drive
def upload_folder_to_root(local_folder=LOCAL_FOLDER_NAME):
drive = get_authenticated_service()
empty_folder = drive.CreateFile({'title': local_folder,
'mimeType': 'application/vnd.google-apps.folder'})
empty_folder.Upload()
os.chdir(os.getcwd()+'/'+local_folder)
for filename in os.listdir(os.getcwd()):
file = drive.CreateFile({'parents': [{'id': empty_folder['id']}]})
file.SetContentFile(filename)
file.Upload()
print('Uploaded ' + filename)
print('Created folder %s in Drive' % (local_folder))
if __name__ == '__main__':
if len(sys.argv) == 1:
upload_folder_to_root()
elif len(sys.argv) == 2:
upload_folder_to_root(local_folder=sys.argv[1])
else:
print('Wrong number of arguments')
print('Usage -> python upload_folder_to_root.py <local_folder>')
| 31.875 | 77 | 0.688889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.201307 |
459f50da0f5e27af8527dfe49afeda2568f40a0c | 2,935 | py | Python | Server/app.py | AkashSasank/Covid-19-X-ray-scanner | dc4e4ab8258a40c7209a120cb75c51b3ad68d145 | [
"MIT"
] | 1 | 2020-12-31T06:34:08.000Z | 2020-12-31T06:34:08.000Z | Server/app.py | AkashSasank/Covid-19-X-ray-scanner | dc4e4ab8258a40c7209a120cb75c51b3ad68d145 | [
"MIT"
] | null | null | null | Server/app.py | AkashSasank/Covid-19-X-ray-scanner | dc4e4ab8258a40c7209a120cb75c51b3ad68d145 | [
"MIT"
] | null | null | null | import os
from flask import Flask, render_template, request, redirect, url_for, \
make_response # These are all we need for our purposes
from flask_cors import CORS
import tensorflow as tf
from keras.preprocessing.image import load_img, img_to_array
from werkzeug.utils import secure_filename
from ML.utils import predict_covid, predict_xray
from Server.urls import urls
from Server.log import Logger
app = Flask(__name__)
CORS(app)
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
urls['error_text'] = ''
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/")
def index():
return render_template("home.html", **urls)
@app.route("/x-ray-test")
def form():
trial = request.args.get('again')
if trial == '1':
urls['error_text'] = 'Try again with a valid image.'
else:
urls['error_text'] = ''
response = make_response(render_template("form.html", **urls))
response.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
return response
@app.route("/test-result", methods=['POST'])
def predict():
try:
image_file = request.files.get('x-ray', None)
if image_file and allowed_file(image_file.filename):
filename = secure_filename(image_file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
image_file.save(file_path)
img = img_to_array(load_img(file_path, target_size=(200, 200))) / 255.0
os.remove(file_path)
img = tf.expand_dims(img, axis=0)
img_type = predict_xray(img, model_path='../ML/best_xray_identifier')
# Check classification accuracy for xray_identifier
if img_type[2][0] * 100 > 95 and img_type[0][0] == 1:
output = predict_covid(img, covid_model_path='../ML/best_covid_classifier')
accuracy = output[2][0] * 100
category = output[1][0]
args = {'diagnosis': category, 'accuracy': str(accuracy) + '%'}
for i in urls.items():
args[i[0]] = i[1]
response = make_response(render_template("result.html", **args))
response.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
return response
else:
return redirect(url_for('form', **{'again': '1'}))
else:
return redirect(url_for('error_500'))
except Exception as e:
Logger.get_logger().exception(e)
return redirect(url_for('error_500'))
@app.route("/error500")
def error_500():
return render_template("error500.html", code=500)
if __name__ == "__main__":
app.run(debug=True)
| 33.352273 | 119 | 0.632368 | 0 | 0 | 0 | 0 | 2,130 | 0.725724 | 0 | 0 | 624 | 0.212606 |
459f5157ab1931fdcbdbea8249e936a2e4eee079 | 332 | py | Python | _fred-v1/fred/endpoints/shutdown.py | elviva404/frontend-regression-validator | 21df2a127712bdf0688dc9aedf478c6a2a90a3c3 | [
"ECL-2.0",
"Apache-2.0"
] | 70 | 2019-09-16T13:30:49.000Z | 2022-02-25T17:46:23.000Z | _fred-v1/fred/endpoints/shutdown.py | elviva404/frontend-regression-validator | 21df2a127712bdf0688dc9aedf478c6a2a90a3c3 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-01-13T09:15:47.000Z | 2020-07-29T11:49:25.000Z | _fred-v1/fred/endpoints/shutdown.py | elviva404/frontend-regression-validator | 21df2a127712bdf0688dc9aedf478c6a2a90a3c3 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2019-10-06T08:22:05.000Z | 2022-02-03T18:45:08.000Z | from flask_restful import Resource
from flask import request
class Shutdown(Resource):
def get(self):
shutdown = request.environ.get('werkzeug.server.shutdown')
if shutdown is None:
raise RuntimeError('Not running with the Werkzeug Server')
shutdown()
return 'Server shutting down'
| 27.666667 | 70 | 0.683735 | 268 | 0.807229 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.259036 |
459fe7e7898bb17af2c0a9177f991a1f10c9ca46 | 1,555 | py | Python | module/object/sql.py | arvin-chou/mc | b82305a4a91fe6150caa5423205a0798f3815724 | [
"MIT"
] | null | null | null | module/object/sql.py | arvin-chou/mc | b82305a4a91fe6150caa5423205a0798f3815724 | [
"MIT"
] | null | null | null | module/object/sql.py | arvin-chou/mc | b82305a4a91fe6150caa5423205a0798f3815724 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from sqlalchemy import Table, Column, Integer, String, MetaData, \
ForeignKey, DateTime, UniqueConstraint
from config.config import _logging, metadata
from .model import ObjectsIpaddrs, ObjectsIpgroups
from .__init__ import __objects_ipaddrs_ipgroups_tablename__, \
__objects_ipaddrs_tablename__, __objects_ipgroups_tablename__
logger = _logging.getLogger(__name__)
SchemaObjectsIpaddrs = Table(__objects_ipaddrs_tablename__, metadata,
Column('id', Integer, primary_key=True),
Column('name', String(64), unique=True, nullable=False),
Column('type', String(6)), # tuple (Single, Range, Subnet)
Column('ipVersion', String(4)), # tuple (IPv4, IPv6)
Column('addr1', String(46)), # INET6_ADDRSTRLEN to be 46
Column('addr2', String(46)), # INET6_ADDRSTRLEN to be 46
Column('description', String(255)),
UniqueConstraint('name')
)
SchemaObjectsIpgroups = Table(__objects_ipgroups_tablename__, metadata,
Column('id', Integer, primary_key=True),
Column('name', String(64), unique=True, nullable=False),
Column('description', String(255)),
UniqueConstraint('name')
)
SchemaObjectsIpaddrsIpgroups = Table(__objects_ipaddrs_ipgroups_tablename__, metadata,
Column('id', Integer, primary_key=True),
Column('ipaddr_id', None, ForeignKey(ObjectsIpaddrs.id,
onupdate="CASCADE", ondelete="CASCADE")),
Column('ipgroup_id', None, ForeignKey(ObjectsIpgroups.id,
onupdate="CASCADE", ondelete="CASCADE")),
UniqueConstraint('ipaddr_id', 'ipgroup_id')
)
| 38.875 | 86 | 0.730547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.194855 |
45a076569b4ca7968147de86328747e7fa0b3846 | 1,286 | py | Python | instaclient/instagram/postmedia.py | pthalin/instaclient | 7800c641db3b5f81d0b171b6c131752502023fc5 | [
"MIT"
] | null | null | null | instaclient/instagram/postmedia.py | pthalin/instaclient | 7800c641db3b5f81d0b171b6c131752502023fc5 | [
"MIT"
] | null | null | null | instaclient/instagram/postmedia.py | pthalin/instaclient | 7800c641db3b5f81d0b171b6c131752502023fc5 | [
"MIT"
] | null | null | null | from typing import Optional, List, TYPE_CHECKING
if TYPE_CHECKING:
from instaclient.client.instaclient import InstaClient
from instaclient.instagram.instaobject import InstaBaseObject
class PostMedia(InstaBaseObject):
def __init__(self,
client:'InstaClient',
id:int,
type:str,
viewer:str,
shortcode:str,
src_url:str,
is_video:bool,
accessibility_caption:Optional[str]=None,
tagged_users:Optional[List[str]]=None,
# If Media is Video
has_audio:Optional[bool]=None,
video_duration:Optional[float]=None,
video_view_count:Optional[int]=None,
**kwargs
) -> None:
super().__init__(client, id, type, viewer)
self.shortcode = shortcode
self.src_url = src_url
self.is_video = is_video
self.accessibility_caption = accessibility_caption
self.tagged_users = tagged_users
# IF Media is Video
self.has_audio = has_audio
self.video_duration = video_duration
self.video_view_count = video_view_count
def __repr__(self) -> str:
return f'PostMedia<{self.shortcode}>'
def __eq__(self, o: object) -> bool:
if isinstance(o, PostMedia):
if o.shortcode == self.shortcode:
return True
return False | 31.365854 | 61 | 0.671073 | 1,097 | 0.853033 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.062986 |
45a1215c7fb379ec9d3e7cd1d0925e710f781704 | 1,117 | py | Python | source/server/annotation/migrations/0002_auto_20200622_0656.py | shizacat/shanno | e370dc1bf8a884d8ee5538b702b39275751e5f5d | [
"MIT"
] | 1 | 2020-08-27T12:48:47.000Z | 2020-08-27T12:48:47.000Z | source/server/annotation/migrations/0002_auto_20200622_0656.py | shizacat/shanno | e370dc1bf8a884d8ee5538b702b39275751e5f5d | [
"MIT"
] | 5 | 2021-03-30T12:56:24.000Z | 2021-06-27T17:42:28.000Z | source/server/annotation/migrations/0002_auto_20200622_0656.py | shizacat/shanno | e370dc1bf8a884d8ee5538b702b39275751e5f5d | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-22 06:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('annotation', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='projects',
name='type',
field=models.CharField(choices=[('text_label', 'Text Labeling'), ('document_classificaton', 'Document classification')], max_length=50),
),
migrations.CreateModel(
name='DCDocLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dl_doc', to='annotation.Documents')),
('label', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dl_label', to='annotation.TlLabels')),
],
options={
'unique_together': {('document', 'label')},
},
),
]
| 36.032258 | 148 | 0.605192 | 991 | 0.887198 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.265891 |
45a398c3801e0cae9986d9b12a66be8c29607231 | 439 | py | Python | setup.py | GiorgioBalestrieri/renewables-ninja-client | 5c068f07e0e8e972f8802fd0b3ed28466bbf8c23 | [
"MIT"
] | 1 | 2020-05-27T14:15:00.000Z | 2020-05-27T14:15:00.000Z | setup.py | GiorgioBalestrieri/renewables-ninja-client | 5c068f07e0e8e972f8802fd0b3ed28466bbf8c23 | [
"MIT"
] | 1 | 2020-05-27T14:27:29.000Z | 2020-05-27T14:27:29.000Z | setup.py | GiorgioBalestrieri/renewables-ninja-client | 5c068f07e0e8e972f8802fd0b3ed28466bbf8c23 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name = "renewables_ninja_client",
version = "0.1.0",
description = ("Client for Renewables Ninja API."),
author = ["Giorgio Balestrieri"],
packages = find_packages(exclude=[
"docs", "tests", "examples",
"sandbox", "scripts"]),
install_requires=[
"pandas",
"numpy",
"requests",
'typing;python_version<"3.7"'],
) | 27.4375 | 55 | 0.587699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.414579 |
45a434893b809e66ad61ea906d250e906d94a42d | 5,446 | py | Python | pysigep/correios/__init__.py | primeschool-it/trustcode-pysigep | 45f845cce385e3682e897948af8f98e19eec696d | [
"MIT"
] | null | null | null | pysigep/correios/__init__.py | primeschool-it/trustcode-pysigep | 45f845cce385e3682e897948af8f98e19eec696d | [
"MIT"
] | null | null | null | pysigep/correios/__init__.py | primeschool-it/trustcode-pysigep | 45f845cce385e3682e897948af8f98e19eec696d | [
"MIT"
] | 1 | 2021-12-17T10:30:52.000Z | 2021-12-17T10:30:52.000Z | # -*- coding: utf-8 -*-
# © 2016 Alessandro Fernandes Martini, Trustcode
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# #############################################################################
#
# Brazillian Carrier Correios Sigep WEB
# Copyright (C) 2015 KMEE (http://www.kmee.com.br)
# @author: Michell Stuttgart <michell.stuttgart@kmee.com.br>
# @author: Rodolfo Bertozo <rodolfo.bertozo@kmee.com.br>
# Sponsored by Europestar www.europestar.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pysigep import send, _url
from pysigep.utils import _valida
import base64
import io
from PIL import Image, ImageDraw, ImageFont
from io import StringIO
import textwrap
import os
BASE_DIR = os.path.dirname(__file__)
_TTF_ARIAL = os.path.join(BASE_DIR, 'data/fonts/arial.ttf')
_TTF_ARIAL_N = os.path.join(BASE_DIR, 'data/fonts/arial_negrito.ttf')
def calcular_preco_prazo(**kwargs):
"""
#>>> request = {'nCdEmpresa': '08082650', 'sDsSenha': 'n5f9t8',\
# 'nCdServico': '40215', 'sCepOrigem': '05311900',\
# 'sCepDestino': '83010140', 'nVlPeso': 1, 'nCdFormato': 1,\
# 'nVlComprimento': 20, 'nVlAltura': 20, 'nVlLargura': 20,\
# 'nVlDiametro': 20, 'sCdMaoPropria': 'S',\
# 'nVlValorDeclarado': 0, 'sCdAvisoRecebimento': 'S'}
#>>> calcular_preco_prazo(**request).cServico.Codigo
#40215
#>>> (calcular_preco_prazo(**request).cServico.ValorMaoPropria) > 0
#True
#>>> request['nVlPeso'] = 99999999999999
#>>> calcular_preco_prazo(**request) #doctest: +ELLIPSIS
#<Element Servicos at 0x...>
#>>> request['nVlPeso'] = 0
#>>> calcular_preco_prazo(**request) #doctest: +ELLIPSIS
#<Element Servicos at 0x...>
#>>> request['sCepDestino'] = '12345678'
#>>> calcular_preco_prazo(**request).cServico.Erro
#8
"""
path = 'CalcularPrecoPrazo.xml'
api = 'CalcularFretePrazo'
_valida('calcular_preco_prazo', api, kwargs)
ambiente = kwargs['ambiente'] if 'ambiente' in kwargs else 1
url = _url(ambiente, api)
return send(path, 'CalcPrecoPrazoResponse', api, url,
soap_action='http://tempuri.org/CalcPrecoPrazo', **kwargs)
def get_eventos(**kwargs):
"""
# >>> user = {'usuario': 'ECT', 'senha': 'SRO',\
# 'objetos': ['PL207893158BR']}
# >>> get_eventos(**user) #doctest: +ELLIPSIS
# <Element return at 0x...>
# >>> get_eventos(**user).objeto.evento.destino.cidade
# 'Rio De Janeiro'
"""
api = 'BuscaEventos'
_valida('get_eventos', api, kwargs)
ambiente = kwargs['ambiente'] if 'ambiente' in kwargs else 1
url = _url(ambiente, api)
path = 'BuscaEventos.xml'
return send(path, 'buscaEventosListaResponse', api, url,
soap_action='eventos', **kwargs)
def sign_chancela(chancela, usuario_correios):
"""
:params:
chancela: imagem da chancela, codificada em base64
usuario_correios: {'contrato': idContrato,
'nome': nome da empresa,
'ano_assinatura': ano de assinatura,
'origem': sigla do estado de origem,
'postagem': sigla de estado de destino,}
:return:
imagem em base64
"""
t = base64.decodestring(chancela)
img = Image.open(StringIO(t)).convert("RGB")
draw = ImageDraw.ImageDraw(img)
font = ImageFont.truetype(_TTF_ARIAL, int(img.size[0]*0.07))
draw.setfont(font)
texto = usuario_correios['contrato'] + '/' + usuario_correios['ano_assinatura']
texto += ' - DR/' + usuario_correios['origem']
if usuario_correios['postagem'] != usuario_correios['origem']:
texto += '/' + usuario_correios['postagem']
tamanho_texto = draw.textsize(texto)
h_position = (img.size[0] - tamanho_texto[0]) / 2
v_position = img.size[1] / 2
draw.text((h_position, v_position), texto, fill=(0, 0, 0))
list_name = textwrap.wrap(usuario_correios['nome'], width=20)
font = ImageFont.truetype(_TTF_ARIAL_N, int(img.size[0]*0.07))
draw.setfont(font)
v_position = img.size[1] / 2 + int(img.size[0]*0.07)
y_text = v_position
for line in list_name:
width, height = font.getsize(line)
h_position = (img.size[0] - width) / 2
draw.text((h_position, y_text), line, fill=(0, 0, 0))
y_text += height + 5
size = max(img.size[0], img.size[1])
bg = Image.new("RGBA", (size, size), (255, 255, 255))
h_position = (bg.size[0] - img.size[0]) / 2
v_position = (bg.size[1] - img.size[1]) / 2
bg.paste(img, box=(h_position, v_position))
tmp = io.BytesIO()
bg.save(tmp, 'png')
bg = base64.b64encode(tmp.getvalue())
return bg
| 40.044118 | 83 | 0.622292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,242 | 0.59519 |
45a5d652cf5968d8ff355947ff4f952cf9d7e299 | 1,408 | py | Python | urls.py | enisimsar/watchtower-news | 222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6 | [
"MIT"
] | 2 | 2019-02-21T18:29:09.000Z | 2021-01-27T14:52:46.000Z | urls.py | enisimsar/watchtower-news | 222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6 | [
"MIT"
] | 3 | 2018-11-22T08:34:04.000Z | 2021-06-01T22:47:19.000Z | urls.py | enisimsar/watchtower-news | 222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6 | [
"MIT"
] | 1 | 2019-06-13T10:45:46.000Z | 2019-06-13T10:45:46.000Z | """
Endpoints
"""
from handlers.auth import UserHandler, AuthHandler
from handlers.base import StaticHandler
from handlers.invitations import InvitationHandler, InvitationsHandler, InvitationPostHandler
from handlers.logs import LogHandler, LogsHandler
from handlers.swagger import SwaggerHandler
from handlers.topics import TopicHandler, TopicsHandler, TopicPostHandler
from handlers.news import NewsHandler, SingleNewsHandler
from handlers.tweets import TweetsHandler, TweetHandler
from settings import app_settings
__author__ = 'Enis Simsar'
url_patterns = [
# ----- API ENDPOINTS ----- #
# AUTH
# (r"/api/auth", AuthHandler),
# (r"/api/user", UserHandler),
# TOPIC
(r"/api/topic", TopicPostHandler),
(r"/api/topic/(.*)$", TopicHandler),
(r"/api/topics", TopicsHandler),
# NEWS
(r"/api/single_news/(.*)$", SingleNewsHandler),
(r"/api/news", NewsHandler),
# TWEETS
# (r"/api/tweet/(.*)$", TweetHandler),
# (r"/api/tweets", TweetsHandler),
# INVITATIONS
# (r"/api/invitation", InvitationPostHandler),
# (r"/api/invitation/(.*)$", InvitationHandler),
# (r"/api/invitations", InvitationsHandler),
# LOGS
# (r'/api/logs', LogsHandler),
# (r'/api/log/(.*)$', LogHandler),
# ----- UI ENDPOINTS ----- #
(r'/', SwaggerHandler),
(r"/static/(.*)", StaticHandler, {'path': app_settings['template_path']}),
]
| 26.566038 | 93 | 0.666903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 590 | 0.419034 |
45a5dd444dba72a08ec81120f4521eb550ac09a2 | 1,757 | py | Python | src/compose.py | vimc/montagu | c9682b3e57cf25e75b5b7688f748c8dbe882666d | [
"MIT"
] | null | null | null | src/compose.py | vimc/montagu | c9682b3e57cf25e75b5b7688f748c8dbe882666d | [
"MIT"
] | 59 | 2017-07-28T09:27:15.000Z | 2021-11-01T17:16:59.000Z | src/compose.py | vimc/montagu | c9682b3e57cf25e75b5b7688f748c8dbe882666d | [
"MIT"
] | 1 | 2020-09-23T11:08:34.000Z | 2020-09-23T11:08:34.000Z | from subprocess import Popen
from docker_helpers import montagu_registry
import shutil
import versions
def start(settings):
run("up -d", settings)
def stop(settings):
run("stop", settings)
run("rm -f", settings)
def pull(settings):
run("pull", settings)
def run(args, settings):
docker_prefix = settings["docker_prefix"]
staging_file = "-f ../docker-compose.staging.yml" if settings["fake_smtp"] else ""
exe = shutil.which("docker-compose")
if not exe:
raise Exception("Did not find docker-compose on path")
prefix = '{} -f ../docker-compose.yml {} --project-name {} '.format(
exe, staging_file, docker_prefix)
cmd = prefix + args
print(cmd)
p = Popen(cmd, env=get_env(settings), shell=True)
p.wait()
if p.returncode != 0:
raise Exception("An error occurred: docker-compose returned {}".format(p.returncode))
def get_env(settings):
port = settings["port"]
hostname = settings["hostname"]
if settings["use_production_db_config"]:
db_config_file = "postgresql.production.conf"
else:
db_config_file = "postgresql.conf"
return {
'MONTAGU_REGISTRY': montagu_registry,
'VIMC_REGISTRY': "vimc",
'MONTAGU_PORT': str(port),
'MONTAGU_HOSTNAME': hostname,
'MONTAGU_API_VERSION': versions.api,
'MONTAGU_DB_VERSION': versions.db,
'MONTAGU_DB_CONF': "/etc/montagu/" + db_config_file,
'MONTAGU_CONTRIB_PORTAL_VERSION': versions.contrib_portal,
'MONTAGU_ADMIN_PORTAL_VERSION': versions.admin_portal,
'MONTAGU_PROXY_VERSION': versions.proxy,
'MONTAGU_STATIC_VERSION': versions.static,
'MONTAGU_TASK_QUEUE_VERSION': versions.task_queue
}
| 26.223881 | 93 | 0.663631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.345475 |
45a6463ce5c3af0a8d920e0f6fb362535e3511bb | 5,234 | py | Python | bread/layout/components/notification.py | tpokorra/bread | 6e6d0f91275ef5e580ad4fa0626472d3c2b5705b | [
"BSD-3-Clause"
] | null | null | null | bread/layout/components/notification.py | tpokorra/bread | 6e6d0f91275ef5e580ad4fa0626472d3c2b5705b | [
"BSD-3-Clause"
] | null | null | null | bread/layout/components/notification.py | tpokorra/bread | 6e6d0f91275ef5e580ad4fa0626472d3c2b5705b | [
"BSD-3-Clause"
] | null | null | null | import datetime
import htmlgenerator
from django.utils.translation import gettext as _
from .button import Button
from .icon import Icon
KIND_ICON_MAPPING = {
"error": "error--filled",
"info": "information--filled",
"info-square": "information--square--filled",
"success": "checkmark--filled",
"warning": "warning--filled",
"warning-alt": "warning--alt--filled",
}
class InlineNotification(htmlgenerator.DIV):
def __init__(
self,
title,
subtitle,
action=None,
kind="info",
lowcontrast=False,
hideclosebutton=False,
**attributes,
):
"""
action: typle with (action_name, javascript_onclick), e.g. ("Open Google", "windows.location='https://google.com'")
kind: can be one of "error" "info", "info-square", "success", "warning", "warning-alt"
"""
assert (
kind in KIND_ICON_MAPPING
), f"kind '{kind}' does not exists, must be one of {KIND_ICON_MAPPING.keys()}"
assert action is None or (
len(action) == 2
), "action must be a tuple with: (action_name, javascript_onclick)"
attributes["data-notification"] = True
attributes["_class"] = (
attributes.get("_class", "")
+ f" bx--inline-notification bx--inline-notification--{kind}"
)
if lowcontrast:
attributes["_class"] += " bx--inline-notification--low-contrast"
attributes["role"] = "alert"
children = [
htmlgenerator.DIV(
Icon(
KIND_ICON_MAPPING[kind],
size=20,
_class="bx--inline-notification__icon",
),
htmlgenerator.DIV(
htmlgenerator.P(title, _class="bx--inline-notification__title"),
htmlgenerator.P(
subtitle, _class="bx--inline-notification__subtitle"
),
_class="bx--inline-notification__text-wrapper",
),
_class="bx--inline-notification__details",
),
]
if action is not None:
children.append(
Button(
action[0],
onclick=action[1],
type="ghost",
small=True,
_class="bx--inline-notification__action-button",
)
)
if not hideclosebutton:
children.append(
htmlgenerator.BUTTON(
Icon(
"close", size=20, _class="bx--inline-notification__close-icon"
),
data_notification_btn=True,
_class="bx--inline-notification__close-button",
aria_label="close",
)
)
super().__init__(*children, **attributes)
class ToastNotification(htmlgenerator.DIV):
def __init__(
self,
title,
subtitle,
kind="info",
lowcontrast=False,
hideclosebutton=False,
hidetimestamp=False,
**attributes,
):
"""
kind: can be one of "error" "info", "info-square", "success", "warning", "warning-alt"
"""
assert (
kind in KIND_ICON_MAPPING
), f"kind '{kind}' does not exists, must be one of {KIND_ICON_MAPPING.keys()}"
self.hidetimestamp = hidetimestamp
attributes["data-notification"] = True
attributes["_class"] = (
attributes.get("_class", "")
+ f" bx--toast-notification bx--toast-notification--{kind}"
)
if lowcontrast:
attributes["_class"] += " bx--toast-notification--low-contrast"
attributes["role"] = "alert"
timestampelem = (
[
htmlgenerator.P(
_("Time stamp "), _class="bx--toast-notification__caption"
)
]
if not hidetimestamp
else []
)
children = [
Icon(
KIND_ICON_MAPPING[kind],
size=20,
_class="bx--toast-notification__icon",
),
htmlgenerator.DIV(
htmlgenerator.H3(title, _class="bx--toast-notification__title"),
htmlgenerator.P(subtitle, _class="bx--toast-notification__subtitle"),
*timestampelem,
_class="bx--toast-notification__details",
),
]
if not hideclosebutton:
children.append(
htmlgenerator.BUTTON(
Icon("close", size=20, _class="bx--toast-notification__close-icon"),
data_notification_btn=True,
_class="bx--toast-notification__close-button",
aria_label="close",
)
)
super().__init__(*children, **attributes)
def render(self, context):
if not self.hidetimestamp:
self[1][2].append(
"[" + datetime.datetime.now().time().isoformat()[:8] + "]"
)
return super().render(context)
| 33.126582 | 123 | 0.509553 | 4,836 | 0.923959 | 0 | 0 | 0 | 0 | 0 | 0 | 1,639 | 0.313145 |
45a8c0e961f64f8351c00134142cfebe4ed805f4 | 388 | py | Python | Desafio53.py | VictorCastao/Curso-em-Video-Python | aeee8baaa73c04b839a27ae37ba24ecc0b863075 | [
"MIT"
] | null | null | null | Desafio53.py | VictorCastao/Curso-em-Video-Python | aeee8baaa73c04b839a27ae37ba24ecc0b863075 | [
"MIT"
] | null | null | null | Desafio53.py | VictorCastao/Curso-em-Video-Python | aeee8baaa73c04b839a27ae37ba24ecc0b863075 | [
"MIT"
] | null | null | null | print('=' * 12 + 'Desafio 53' + '=' * 12)
frase = input('Digite sua frase: ')
frase = frase.strip().replace(" ","").upper()
tamanho = len(frase)
contador = 0
igual = 0
for i in range(tamanho - 1, -1, -1):
if frase[contador] == frase[i]:
igual += 1
contador += 1
if contador == tamanho:
print('A frase é um palíndromo!')
else:
print('A frase não é um palíndromo!')
| 25.866667 | 45 | 0.587629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.264631 |
45a932ed0daa8d90eab54ad2fe63256555a33c87 | 2,777 | py | Python | example/pytorch/run.py | alibaba/sionnx | 3f3e18826ddcc26402b4e2af96ca8aac15560456 | [
"Apache-2.0"
] | 34 | 2019-05-29T03:15:48.000Z | 2022-03-24T03:14:58.000Z | example/pytorch/run.py | alibaba/sionnx | 3f3e18826ddcc26402b4e2af96ca8aac15560456 | [
"Apache-2.0"
] | 1 | 2020-05-21T11:44:22.000Z | 2020-05-21T11:44:22.000Z | example/pytorch/run.py | alibaba/sionnx | 3f3e18826ddcc26402b4e2af96ca8aac15560456 | [
"Apache-2.0"
] | 4 | 2019-12-16T18:49:42.000Z | 2021-10-11T18:41:54.000Z | #*
#* Copyright (C) 2017-2019 Alibaba Group Holding Limited
#*
#* Licensed under the Apache License, Version 2.0 (the "License");
#* you may not use this file except in compliance with the License.
#* You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing, software
#* distributed under the License is distributed on an "AS IS" BASIS,
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#* See the License for the specific language governing permissions and
#* limitations under the License.
import numpy as np
import onnx
import os
import glob
import caffe2.python.onnx.backend
from caffe2.python import core, workspace
from onnx import numpy_helper
import os
fail_sum = 0
dir_path = os.path.dirname(os.path.realpath(__file__))
test_dir = glob.glob(os.path.join(dir_path, 'test_*'))
model_paths = glob.glob(os.path.join(os.path.join(dir_path, 'test_*'), '*.onnx'))
m_len = len(model_paths)
for k in range(m_len):
model = onnx.load(model_paths[k])
test_data_dir = os.path.join(test_dir[k], 'test_data_set_0')
# Load inputs
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
# Load reference outputs
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for j in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(j))
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
# Run the model on the backend
try:
outputs = list(caffe2.python.onnx.backend.run_model(model, inputs))
except RuntimeError:
print("!!Error: Model execution of " + test_dir[k] + " failed.")
fail_sum = fail_sum + 1
continue
idx = 0
# Results verification with golden data.
for ref_o, o in zip(ref_outputs, outputs):
try:
np.testing.assert_almost_equal(ref_o, o, decimal=5, err_msg="Failed test: " + test_dir[k])
except AssertionError:
print("!!Error: Output " + str(idx) + " of test: " + test_dir[k] + " failed")
fail_sum = fail_sum + 1
idx = idx + 1
print("============Summary:=============")
print(str(m_len) + " tests in total.")
print(str(m_len - fail_sum) + " tests passed.")
print(str(fail_sum) + " tests failed.")
print("=================================")
| 35.151899 | 100 | 0.673749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,027 | 0.369824 |
45a961e0e1f604b60a477e34ad903c8057c5ec23 | 877 | py | Python | scripts/ipu/callbacks.py | BastienArcelin/IPU-GPU | dde946686478ce77a06821a1517b5b8206ab8de9 | [
"BSD-3-Clause"
] | null | null | null | scripts/ipu/callbacks.py | BastienArcelin/IPU-GPU | dde946686478ce77a06821a1517b5b8206ab8de9 | [
"BSD-3-Clause"
] | null | null | null | scripts/ipu/callbacks.py | BastienArcelin/IPU-GPU | dde946686478ce77a06821a1517b5b8206ab8de9 | [
"BSD-3-Clause"
] | null | null | null | import sys, os
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau, TerminateOnNaN, ModelCheckpoint
import tensorflow.keras.backend as K
import tensorflow as tf
import time
###### Callbacks
# Create a callback to compute time spent between 10th and 110th epoch
class time_callback(Callback):
def __init__(self):
'''
Compute time spent between 10th and 110th epoch
'''
self.epoch = 1
self.t1 =0
self.t2 = 0
def on_epoch_end(self, epoch, t1):
if (self.epoch == 10):
self.t1 =time.time()
print('t1: '+str(self.t1))
elif (self.epoch == 110):
self.t2 = time.time()
print('t2: '+str(self.t2))
print('for 100 epochs from 10 to 110: '+str(self.t2 - self.t1))
self.epoch +=1 | 31.321429 | 99 | 0.616876 | 549 | 0.625998 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.230331 |
45a9f4fd71c58a688f030ddcf7a969454a4a8652 | 1,400 | py | Python | ag/tests/test_metrics.py | justyre/jus | 1339c010ac4499c253061d2cce5e638ec06062bd | [
"MIT"
] | null | null | null | ag/tests/test_metrics.py | justyre/jus | 1339c010ac4499c253061d2cce5e638ec06062bd | [
"MIT"
] | null | null | null | ag/tests/test_metrics.py | justyre/jus | 1339c010ac4499c253061d2cce5e638ec06062bd | [
"MIT"
] | null | null | null | """Unit tests for the metrics module."""
import pytest
from forest import metrics
def test_counter():
"""Test counter."""
counter = metrics.Counter()
counter.increase()
assert counter.count == 1
counter.increase(10)
assert counter.count == 11
counter.decrease()
assert counter.count == 10
counter.decrease(11)
assert counter.count == -1
def test_histogram():
"""Test histogram."""
histogram = metrics.Histogram()
for value in range(0, 10):
histogram.update(value=value)
result = histogram.report()
assert result["min"] == 0
assert result["max"] == 9
assert result["medium"] == pytest.approx(4.5)
assert result["mean"] == pytest.approx(4.5)
assert result["stdDev"] == pytest.approx(2.8, rel=0.1) # relative tolerance of 0.1
assert result["percentile"]["75"] == pytest.approx(6.75)
assert result["percentile"]["95"] == pytest.approx(8.5, 0.1)
assert result["percentile"]["99"] == pytest.approx(9, 0.1)
def test_registry():
"""Test metrics registry."""
counter = metrics.Counter()
histogram = metrics.Histogram()
registry = metrics.MetricRegistry()
registry.register("counter", counter)
registry.register("histogram", histogram)
assert registry.get_metric("counter") is counter
assert registry.get_metric("histogram") is histogram
| 28.571429 | 87 | 0.642143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.182143 |
45aa3b7da60de08d1a053fb874e8708de93af93e | 1,280 | py | Python | appmap/test/conftest.py | calvinsomething/appmap-python | 7234f7cdb240eadfa74a1e6021bc8695ceb60179 | [
"MIT"
] | null | null | null | appmap/test/conftest.py | calvinsomething/appmap-python | 7234f7cdb240eadfa74a1e6021bc8695ceb60179 | [
"MIT"
] | null | null | null | appmap/test/conftest.py | calvinsomething/appmap-python | 7234f7cdb240eadfa74a1e6021bc8695ceb60179 | [
"MIT"
] | null | null | null | import importlib
import pytest
import yaml
import appmap._implementation
from appmap._implementation.env import Env
from appmap._implementation.recording import Recorder
def _data_dir(pytestconfig):
return pytestconfig.rootpath / 'appmap' / 'test' / 'data'
@pytest.fixture(name='data_dir')
def fixture_data_dir(pytestconfig):
return _data_dir(pytestconfig)
@pytest.fixture(name='with_data_dir')
def fixture_with_data_dir(data_dir, monkeypatch):
monkeypatch.syspath_prepend(data_dir)
return data_dir
@pytest.fixture
def events():
rec = Recorder()
rec.clear()
rec.enabled = True
yield rec.events
rec.enabled = False
rec.clear()
@pytest.hookimpl
def pytest_runtest_setup(item):
mark = item.get_closest_marker('appmap_enabled')
env = {}
if mark:
appmap_yml = mark.kwargs.get('config', 'appmap.yml')
d = _data_dir(item.config)
config = d / appmap_yml
Env.current.set('APPMAP_CONFIG', config)
env = {'APPMAP': 'true', 'APPMAP_CONFIG': config}
appmap._implementation.initialize(env=env) # pylint: disable=protected-access
# Some tests want yaml instrumented, others don't.
# Reload it to make sure it's instrumented, or not, as set in appmap.yml.
importlib.reload(yaml)
| 27.826087 | 82 | 0.714844 | 0 | 0 | 134 | 0.104688 | 1,009 | 0.788281 | 0 | 0 | 282 | 0.220313 |
45aa576cd9312ff2a881e945aab29cabb485ed59 | 7,097 | py | Python | occ_sim/animation.py | refmitchell/dcidb-supplemental-repository | 44af88de1a51e61ee3b19eaf8823fe7e79d17a12 | [
"MIT"
] | null | null | null | occ_sim/animation.py | refmitchell/dcidb-supplemental-repository | 44af88de1a51e61ee3b19eaf8823fe7e79d17a12 | [
"MIT"
] | null | null | null | occ_sim/animation.py | refmitchell/dcidb-supplemental-repository | 44af88de1a51e61ee3b19eaf8823fe7e79d17a12 | [
"MIT"
] | null | null | null | """
animation.py
This script is used to procduce animations of population behaviour
over a range of changing conditions. For example, if we wanted to
see how a population would change as light was elevated and wind
kept constant, we could produce the animation and watch the
general trend. This was mostly useful for visualisation, less for
formal analysis.
The script first constructs a series of Treatments which will be
simulated. The simulations are then run, each simulation produces
a plot which is stored in a target directory. These frames can
then be stitched together using a suitable tool, such as ffmpeg.
Note: the script is written for changing light elevations but
it should be reasonably straightforward to modify it for changing
other variables.
"""
from util.deserialiser import Deserialiser
from util.integration_models import *
from util.treatment import Treatment
from util.models import ReliabilityModel
from world.light import Light
from world.wind import Wind
import definitions as defn
import matplotlib.pyplot as plt
from scipy.special import i0
import numpy as np
import os
import shutil
def main():
#
# Simulator - can be anything in the util/integration_models module
#
simulator = CMLE()
rel_model = ReliabilityModel()
#
# Set the target output directory
#
os.chdir("frames/BWS")
print(os.getcwd())
start = 30 # Start elevation in degrees
end = 90 # End elevation
increment = 1 # Adjustment increment in degrees
iterations = 15 # Number of simulations to run at each elevation
treatnent_n = 30 # Number of individuals per treatment
elevation = np.radians(start)
filenumber = 0
wind_speed = 2.5 # Wind speed for each trial (this is assumed to be constant)
# While elevation still in range
while elevation < np.radians(end):
#
# Create the requisite treatment
#
treatment = Treatment()
treatment.set_reliability_model(rel_model)
treatment.set_n(treatnent_n)
treatment.set_id("Elevation {:.01f} degrees".format(np.degrees(elevation)))
init_light = Light(elevation, np.radians(0), treatment)
init_wind = Wind(wind_speed, np.radians(0), treatment)
initial = [init_wind, init_light]
conf_light = Light(elevation, np.radians(0), treatment)
conf_wind = Wind(wind_speed, np.radians(120), treatment)
conflict = [conf_wind, conf_light]
treatment.set_initial_cues(initial)
treatment.set_conflict_cues(conflict)
#
# Simulate the current treatment for some number of iterations.
#
for n in range(iterations):
#
# The filename format string is set to produce regular filenames
# which can easily be stitched into a video using ffmpeg. This can
# be modified.
#
filename = "{:05d}.png".format(filenumber)
simulator.simulate_treatment(treatment)
#
# Plot production
#
changes = treatment.get_changes_in_bearing()
avg_r, avg_t = treatment.get_avg_change()
plt.tight_layout()
ax = plt.subplot(121, projection='polar')
ax.plot(changes, np.ones(len(changes)), 'bo', color='magenta', alpha=0.2)
ax.plot(avg_t, avg_r, 'ro', markeredgecolor='k', label="R={:.02f},T={:.01f}".format(avg_r, np.degrees(avg_t)))
ax.set_title(treatment.get_id())
ax.set_rlim(0,1.1)
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
ax.legend(loc='lower left')
params = treatment.get_cue_distribution_parameters()
initial_dist_ax = plt.subplot(222)
initial_light = params["initial"][0]
initial_wind = params["initial"][1]
light_mu = initial_light[0]
wind_mu = initial_wind[0]
light_kappa = initial_light[1]
wind_kappa = initial_wind[1]
light_x = np.linspace(-np.pi, np.pi, num=100)
light_y = np.exp(light_kappa*np.cos(light_x - light_mu))/(2*np.pi*i0(light_kappa))
wind_x = np.linspace(-np.pi, np.pi, num=100)
wind_y = np.exp(wind_kappa*np.cos(wind_x - wind_mu))/(2*np.pi*i0(wind_kappa))
initial_dist_ax.plot(np.degrees(light_x), light_y,
color='green',
label="Light: kappa={:.02f}".format(light_kappa)
)
initial_dist_ax.plot(np.degrees(wind_x),
wind_y,
color='blue',
label="Wind: kappa={:.02f}".format(wind_kappa))
initial_dist_ax.set_ylim([0,1])
initial_dist_ax.legend()
initial_dist_ax.set_title("Initial cue probability density")
initial_dist_ax.set_ylabel("Probability density")
conflict_dist_ax = plt.subplot(224)
conflict_light = params["conflict"][0]
conflict_wind = params["conflict"][1]
light_mu = conflict_light[0]
wind_mu = conflict_wind[0]
light_kappa = conflict_light[1]
wind_kappa = conflict_wind[1]
light_x = np.linspace(-np.pi, np.pi, num=100)
light_y = np.exp(light_kappa*np.cos(light_x - light_mu))/(2*np.pi*i0(light_kappa))
wind_x = np.linspace(-np.pi, np.pi, num=100)
wind_y = np.exp(wind_kappa*np.cos(wind_x - wind_mu))/(2*np.pi*i0(wind_kappa))
conflict_dist_ax.plot(np.degrees(light_x), light_y,
color='green',
label="Light: kappa={:.02f}".format(light_kappa)
)
conflict_dist_ax.plot(np.degrees(wind_x),
wind_y, color='blue',
label="Wind: kappa={:.02f}".format(wind_kappa))
conflict_dist_ax.set_ylim([0,1])
conflict_dist_ax.set_xlim([-180,180])
conflict_dist_ax.set_title("Conflict cue probability distributions")
conflict_dist_ax.set_xlabel("Degrees")
conflict_dist_ax.set_ylabel("Probability density")
# Bin data into 360/nbins degree bins to plot the population mass
nbins = 72
ch_hist = np.histogram(np.degrees(changes), np.linspace(-180, 180, nbins + 1))[0]
ch_hist_norm = ch_hist / sum(ch_hist)
# Plot population response alongside the cue distributions
plt.bar(np.linspace(-180, 180, nbins),
ch_hist_norm, width=360/nbins,
color='magenta',edgecolor='k', alpha=0.5,
label='Population response')
conflict_dist_ax.legend()
plt.gcf().set_size_inches(16,10)
plt.savefig(filename)
plt.clf()
# Loop admin
filenumber+=1
elevation+=np.radians(increment)
if __name__ == '__main__':
main()
| 36.772021 | 122 | 0.610681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,955 | 0.275469 |
45acd3a5f2cd6ccaa0b7e6db6bf65eb041445b32 | 285 | py | Python | Day18/turtle_dashed_line.py | CodePuzzler/100-Days-Of-Code-Python | 4f6da9dabc73f747266ce0e66057d10754ecc54e | [
"MIT"
] | null | null | null | Day18/turtle_dashed_line.py | CodePuzzler/100-Days-Of-Code-Python | 4f6da9dabc73f747266ce0e66057d10754ecc54e | [
"MIT"
] | null | null | null | Day18/turtle_dashed_line.py | CodePuzzler/100-Days-Of-Code-Python | 4f6da9dabc73f747266ce0e66057d10754ecc54e | [
"MIT"
] | null | null | null | # Day18 of my 100DaysOfCode Challenge
# Draw a dashed line using Turtle Graphics
from turtle import Turtle, Screen
groot = Turtle()
for _ in range(15):
groot.forward(10)
groot.penup()
groot.forward(10)
groot.pendown()
my_screen = Screen()
my_screen.exitonclick()
| 15.833333 | 42 | 0.708772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.277193 |
45addf929fefc716794fc0840e1e7815bf960f1d | 5,142 | py | Python | src/rxn_network/reactions/open.py | GENESIS-EFRC/reaction-network | 1482ac1b6b550a5bc9961e9210e33c86c07f64cf | [
"BSD-3-Clause-LBNL"
] | 29 | 2020-08-04T07:07:04.000Z | 2022-02-22T22:09:20.000Z | src/rxn_network/reactions/open.py | GENESIS-EFRC/reaction-network | 1482ac1b6b550a5bc9961e9210e33c86c07f64cf | [
"BSD-3-Clause-LBNL"
] | 70 | 2021-02-22T07:01:40.000Z | 2022-03-31T20:11:56.000Z | src/rxn_network/reactions/open.py | GENESIS-EFRC/reaction-network | 1482ac1b6b550a5bc9961e9210e33c86c07f64cf | [
"BSD-3-Clause-LBNL"
] | 3 | 2021-04-20T09:29:39.000Z | 2022-02-02T17:43:52.000Z | """
A reaction class that builds reactions based on ComputedEntry objects under the
presence of an open entry (e.g. O2), and provides information about reaction
thermodynamics computed as changes in grand potential.
"""
from typing import Dict, List, Optional, Union
import numpy as np
from pymatgen.analysis.phase_diagram import GrandPotPDEntry
from pymatgen.core.composition import Element, Composition
from pymatgen.entries.computed_entries import ComputedEntry
from rxn_network.reactions.computed import ComputedReaction
class OpenComputedReaction(ComputedReaction):
"""
Extends the ComputedReaction class to add support for "open" reactions,
where the reaction energy is calculated as a change in grand potential.
"""
def __init__(
self,
entries: List[ComputedEntry],
coefficients: Union[np.ndarray, List[float]],
chempots: Dict[Element, float],
data: Optional[Dict] = None,
lowest_num_errors=None,
):
"""
Args:
entries: List of ComputedEntry objects.
coefficients: List of reaction coefficients.
chempots: Dict of chemical potentials corresponding to open elements
data: Optional dict of data
lowest_num_errors: number of "errors" encountered during reaction balancing
"""
super().__init__(
entries=entries,
coefficients=coefficients,
data=data,
lowest_num_errors=lowest_num_errors,
)
self.chempots = chempots
self.open_elems = list(chempots.keys())
grand_entries = []
for e in entries:
comp = e.composition.reduced_composition
if len(comp.elements) == 1 and comp.elements[0] in self.open_elems:
grand_entries.append(e)
else:
grand_entries.append(GrandPotPDEntry(e, chempots))
self.grand_entries = grand_entries
@classmethod
def balance( # type: ignore
cls,
reactant_entries: List[ComputedEntry],
product_entries: List[ComputedEntry],
chempots: Dict[Element, float] = None,
data: Optional[Dict] = None,
): # pylint: disable = W0221
"""
Args:
reactant_entries:
product_entries:
chempots:
data:
Returns:
"""
reactant_comps = [e.composition.reduced_composition for e in reactant_entries]
product_comps = [e.composition.reduced_composition for e in product_entries]
coefficients, lowest_num_errors = cls._balance_coeffs(
reactant_comps, product_comps
)
entries = list(reactant_entries) + list(product_entries)
args = {
"entries": entries,
"coefficients": coefficients,
"data": data,
"lowest_num_errors": lowest_num_errors,
}
if not chempots:
rxn = ComputedReaction(**args) # type: ignore
else:
rxn = cls(chempots=chempots, **args) # type: ignore
return rxn
@property
def energy(self) -> float:
"""
Returns (float):
The calculated reaction energy.
"""
calc_energies: Dict[Composition, float] = {}
for entry in self.grand_entries:
attr = "composition"
if type(entry) == GrandPotPDEntry:
attr = "original_comp"
comp, factor = getattr(entry, attr).get_reduced_composition_and_factor()
calc_energies[comp] = min(
calc_energies.get(comp, float("inf")), entry.energy / factor
)
return sum(
[
amt * calc_energies[c]
for amt, c in zip(self.coefficients, self.compositions)
]
)
@property
def elements(self) -> List[Element]:
"""
List of elements in the reaction
"""
return list(
set(el for comp in self.compositions for el in comp.elements)
- set(self.open_elems)
)
@property
def total_chemical_system(self) -> str:
"""
Chemical system string, including open elements
"""
return "-".join(
sorted([str(e) for e in set(self.elements) | set(self.open_elems)])
)
def copy(self) -> "OpenComputedReaction":
"""
Returns a copy of the OpenComputedReaction object.
"""
return OpenComputedReaction(
self.entries,
self.coefficients,
self.chempots,
self.data,
self.lowest_num_errors,
)
def reverse(self):
"""
Returns a copy of reaction with reactants/products swapped
"""
return OpenComputedReaction(
self.entries,
-1 * self.coefficients,
self.chempots,
self.data,
self.lowest_num_errors,
)
def __repr__(self):
cp = f"({','.join([f'mu_{e}={m}' for e, m in self.chempots.items()])})"
return f"{super().__repr__()} {cp}"
| 29.215909 | 87 | 0.586153 | 4,610 | 0.896538 | 0 | 0 | 2,380 | 0.462855 | 0 | 0 | 1,514 | 0.294438 |
45aeb9485e10641d6d2f2f2177a15994e6ffa7cd | 3,004 | py | Python | image_classifier_flowers/ImageClassifier/data_management.py | ChrisEdel/AI-Programming-with-Python-Nanodegree | 918dc383cc46b50dd2d6c420cbef1c2347ad1c43 | [
"MIT"
] | null | null | null | image_classifier_flowers/ImageClassifier/data_management.py | ChrisEdel/AI-Programming-with-Python-Nanodegree | 918dc383cc46b50dd2d6c420cbef1c2347ad1c43 | [
"MIT"
] | null | null | null | image_classifier_flowers/ImageClassifier/data_management.py | ChrisEdel/AI-Programming-with-Python-Nanodegree | 918dc383cc46b50dd2d6c420cbef1c2347ad1c43 | [
"MIT"
] | null | null | null | import torch
from torchvision import datasets, transforms, models
from PIL import Image
def load_data(path):
print("Loading and preprocessing data from {} ...".format(path))
train_dir = path + '/train'
valid_dir = path + '/valid'
test_dir = path + '/test'
# Define transforms for the training, validation, and testing sets
train_transform = transforms.Compose([transforms.RandomRotation(50),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform = train_transform)
valid_data = datasets.ImageFolder(valid_dir, transform = valid_transform)
test_data = datasets.ImageFolder(test_dir, transform = test_transform)
# Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size = 64, shuffle = True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size = 64)
testloader = torch.utils.data.DataLoader(test_data, batch_size = 64)
print("Finished loading and preprocessing data.")
return train_data, trainloader, validloader, testloader
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
image = Image.open(image)
image_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
return image_transform(image) | 50.915254 | 97 | 0.498336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.128162 |
45af30155ad4a759e535f66f6ceb28eae04047f8 | 398 | py | Python | map_the_data.py | andrewnash/Thar-She-Blows | 3019d4150c4b4763e979416c33ff62ab9dbcc306 | [
"Beerware"
] | 1 | 2021-01-16T14:16:38.000Z | 2021-01-16T14:16:38.000Z | map_the_data.py | stuckatmarine/Thar-She-Blows | 3019d4150c4b4763e979416c33ff62ab9dbcc306 | [
"Beerware"
] | null | null | null | map_the_data.py | stuckatmarine/Thar-She-Blows | 3019d4150c4b4763e979416c33ff62ab9dbcc306 | [
"Beerware"
] | 1 | 2019-01-27T20:13:40.000Z | 2019-01-27T20:13:40.000Z | def create_box(input_corners):
x = (float(input_corners[0][0]), float(input_corners[1][0]))
y = (float(input_corners[0][1]), float(input_corners[1][1]))
windmill_lats, windmill_lons = zip(*[
(max(x), max(y)),
(min(x), max(y)),
(min(x), min(y)),
(max(x), min(y)),
(max(x), max(y))
])
return windmill_lats, windmill_lons
| 26.533333 | 64 | 0.525126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45b197a9d2ff1bd1c087f9b6df99427829783c26 | 11,544 | py | Python | sdk/python/pulumi_oci/dns/get_resolver_endpoint.py | EladGabay/pulumi-oci | 6841e27d4a1a7e15c672306b769912efbfd3ba99 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2021-08-17T11:14:46.000Z | 2021-12-31T02:07:03.000Z | sdk/python/pulumi_oci/dns/get_resolver_endpoint.py | pulumi-oci/pulumi-oci | 6841e27d4a1a7e15c672306b769912efbfd3ba99 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-09-06T11:21:29.000Z | 2021-09-06T11:21:29.000Z | sdk/python/pulumi_oci/dns/get_resolver_endpoint.py | pulumi-oci/pulumi-oci | 6841e27d4a1a7e15c672306b769912efbfd3ba99 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-08-24T23:31:30.000Z | 2022-01-02T19:26:54.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetResolverEndpointResult',
'AwaitableGetResolverEndpointResult',
'get_resolver_endpoint',
]
@pulumi.output_type
class GetResolverEndpointResult:
"""
A collection of values returned by getResolverEndpoint.
"""
def __init__(__self__, compartment_id=None, endpoint_type=None, forwarding_address=None, id=None, is_forwarding=None, is_listening=None, listening_address=None, name=None, nsg_ids=None, resolver_endpoint_name=None, resolver_id=None, scope=None, self=None, state=None, subnet_id=None, time_created=None, time_updated=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if endpoint_type and not isinstance(endpoint_type, str):
raise TypeError("Expected argument 'endpoint_type' to be a str")
pulumi.set(__self__, "endpoint_type", endpoint_type)
if forwarding_address and not isinstance(forwarding_address, str):
raise TypeError("Expected argument 'forwarding_address' to be a str")
pulumi.set(__self__, "forwarding_address", forwarding_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_forwarding and not isinstance(is_forwarding, bool):
raise TypeError("Expected argument 'is_forwarding' to be a bool")
pulumi.set(__self__, "is_forwarding", is_forwarding)
if is_listening and not isinstance(is_listening, bool):
raise TypeError("Expected argument 'is_listening' to be a bool")
pulumi.set(__self__, "is_listening", is_listening)
if listening_address and not isinstance(listening_address, str):
raise TypeError("Expected argument 'listening_address' to be a str")
pulumi.set(__self__, "listening_address", listening_address)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nsg_ids and not isinstance(nsg_ids, list):
raise TypeError("Expected argument 'nsg_ids' to be a list")
pulumi.set(__self__, "nsg_ids", nsg_ids)
if resolver_endpoint_name and not isinstance(resolver_endpoint_name, str):
raise TypeError("Expected argument 'resolver_endpoint_name' to be a str")
pulumi.set(__self__, "resolver_endpoint_name", resolver_endpoint_name)
if resolver_id and not isinstance(resolver_id, str):
raise TypeError("Expected argument 'resolver_id' to be a str")
pulumi.set(__self__, "resolver_id", resolver_id)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if self and not isinstance(self, str):
raise TypeError("Expected argument 'self' to be a str")
pulumi.set(__self__, "self", self)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if subnet_id and not isinstance(subnet_id, str):
raise TypeError("Expected argument 'subnet_id' to be a str")
pulumi.set(__self__, "subnet_id", subnet_id)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The OCID of the owning compartment. This will match the resolver that the resolver endpoint is under and will be updated if the resolver's compartment is changed.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> str:
"""
The type of resolver endpoint. VNIC is currently the only supported type.
"""
return pulumi.get(self, "endpoint_type")
@property
@pulumi.getter(name="forwardingAddress")
def forwarding_address(self) -> str:
"""
An IP address from which forwarded queries may be sent. For VNIC endpoints, this IP address must be part of the subnet and will be assigned by the system if unspecified when isForwarding is true.
"""
return pulumi.get(self, "forwarding_address")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isForwarding")
def is_forwarding(self) -> bool:
"""
A Boolean flag indicating whether or not the resolver endpoint is for forwarding.
"""
return pulumi.get(self, "is_forwarding")
@property
@pulumi.getter(name="isListening")
def is_listening(self) -> bool:
"""
A Boolean flag indicating whether or not the resolver endpoint is for listening.
"""
return pulumi.get(self, "is_listening")
@property
@pulumi.getter(name="listeningAddress")
def listening_address(self) -> str:
"""
An IP address to listen to queries on. For VNIC endpoints this IP address must be part of the subnet and will be assigned by the system if unspecified when isListening is true.
"""
return pulumi.get(self, "listening_address")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resolver endpoint. Must be unique, case-insensitive, within the resolver.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nsgIds")
def nsg_ids(self) -> Sequence[str]:
"""
An array of network security group OCIDs for the resolver endpoint. These must be part of the VCN that the resolver endpoint is a part of.
"""
return pulumi.get(self, "nsg_ids")
@property
@pulumi.getter(name="resolverEndpointName")
def resolver_endpoint_name(self) -> str:
return pulumi.get(self, "resolver_endpoint_name")
@property
@pulumi.getter(name="resolverId")
def resolver_id(self) -> str:
return pulumi.get(self, "resolver_id")
@property
@pulumi.getter
def scope(self) -> str:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def self(self) -> str:
"""
The canonical absolute URL of the resource.
"""
return pulumi.get(self, "self")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the resource.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
The OCID of a subnet. Must be part of the VCN that the resolver is attached to.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the resource was created in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time the resource was last updated in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
return pulumi.get(self, "time_updated")
class AwaitableGetResolverEndpointResult(GetResolverEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResolverEndpointResult(
compartment_id=self.compartment_id,
endpoint_type=self.endpoint_type,
forwarding_address=self.forwarding_address,
id=self.id,
is_forwarding=self.is_forwarding,
is_listening=self.is_listening,
listening_address=self.listening_address,
name=self.name,
nsg_ids=self.nsg_ids,
resolver_endpoint_name=self.resolver_endpoint_name,
resolver_id=self.resolver_id,
scope=self.scope,
self=self.self,
state=self.state,
subnet_id=self.subnet_id,
time_created=self.time_created,
time_updated=self.time_updated)
def get_resolver_endpoint(resolver_endpoint_name: Optional[str] = None,
resolver_id: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResolverEndpointResult:
"""
This data source provides details about a specific Resolver Endpoint resource in Oracle Cloud Infrastructure DNS service.
Gets information about a specific resolver endpoint. Note that attempting to get a resolver endpoint
in the DELETED lifecycle state will result in a `404` response to be consistent with other operations of the
API. Requires a `PRIVATE` scope query parameter.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_resolver_endpoint = oci.dns.get_resolver_endpoint(resolver_endpoint_name=oci_dns_resolver_endpoint["test_resolver_endpoint"]["name"],
resolver_id=oci_dns_resolver["test_resolver"]["id"],
scope="PRIVATE")
```
:param str resolver_endpoint_name: The name of the target resolver endpoint.
:param str resolver_id: The OCID of the target resolver.
:param str scope: Value must be `PRIVATE` when listing private name resolver endpoints.
"""
__args__ = dict()
__args__['resolverEndpointName'] = resolver_endpoint_name
__args__['resolverId'] = resolver_id
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:dns/getResolverEndpoint:getResolverEndpoint', __args__, opts=opts, typ=GetResolverEndpointResult).value
return AwaitableGetResolverEndpointResult(
compartment_id=__ret__.compartment_id,
endpoint_type=__ret__.endpoint_type,
forwarding_address=__ret__.forwarding_address,
id=__ret__.id,
is_forwarding=__ret__.is_forwarding,
is_listening=__ret__.is_listening,
listening_address=__ret__.listening_address,
name=__ret__.name,
nsg_ids=__ret__.nsg_ids,
resolver_endpoint_name=__ret__.resolver_endpoint_name,
resolver_id=__ret__.resolver_id,
scope=__ret__.scope,
self=__ret__.self,
state=__ret__.state,
subnet_id=__ret__.subnet_id,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated)
| 41.228571 | 326 | 0.669179 | 8,629 | 0.747488 | 803 | 0.06956 | 7,731 | 0.669699 | 0 | 0 | 4,491 | 0.389033 |
45b52df1d91555aa0d10d06f736b608de959c083 | 60 | py | Python | tests/__init__.py | ExterraGroup/pyrsi | 7999e58dc9260ec61dd7efe33c32dbcf02fdfa86 | [
"MIT"
] | 8 | 2018-12-10T17:07:35.000Z | 2020-12-25T01:25:15.000Z | tests/__init__.py | ExterraGroup/pyrsi | 7999e58dc9260ec61dd7efe33c32dbcf02fdfa86 | [
"MIT"
] | 1 | 2020-01-15T03:52:47.000Z | 2020-01-15T03:52:47.000Z | tests/__init__.py | ExterraGroup/pyrsi | 7999e58dc9260ec61dd7efe33c32dbcf02fdfa86 | [
"MIT"
] | 2 | 2019-05-16T16:43:30.000Z | 2020-06-05T11:24:07.000Z | # -*- coding: utf-8 -*-
"""Unit test package for pyrsi."""
| 15 | 34 | 0.55 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.95 |
45b610b0abf81014b491f3486a035e492906a5f7 | 3,980 | py | Python | src/morphforge/traces/methods/trace_methods_std_filters.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | src/morphforge/traces/methods/trace_methods_std_filters.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | src/morphforge/traces/methods/trace_methods_std_filters.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.traces.traceobjpluginctrl import copy_trace_attrs
from morphforge.traces.traceobjpluginctrl import TraceMethodCtrl
from morphforge.traces import TraceFixedDT
from morphforge import units
import numpy as np
def _butterworthfilter(tr, filterorder, cutoff_frequency):
cutoff_frequency.rescale('Hz')
import scipy.signal
frequency_hz = 1 / float(tr.get_dt_new().rescale('s'))
n_frq_hz = frequency_hz / 2.0
cuttoff_norm = cutoff_frequency / n_frq_hz
(coeff_num, coeff_denom) = scipy.signal.filter_design.butter(filterorder, cuttoff_norm)
filteredsignal = scipy.signal.lfilter(coeff_num, coeff_denom, tr.data_pts_np)
tr_new = TraceFixedDT(time=tr.time_pts, data=filteredsignal * tr.data_unit,)
copy_trace_attrs(tr, tr_new, comment="+(Butterworth Filtered)" )
return tr_new
TraceMethodCtrl.register(TraceFixedDT, 'filterbutterworth', _butterworthfilter, can_fallback_to_fixed_trace=True)
def _besselfilter(tr, filterorder, cutoff_frequency):
cutoff_frequency.rescale('Hz')
import scipy.signal
frequency_hz = 1 / float(tr.get_dt_new().rescale('s'))
n_frq_hz = frequency_hz / 2.0
cuttoff_norm = cutoff_frequency / n_frq_hz
(coeff_num, coeff_denom) = scipy.signal.filter_design.bessel(filterorder, cuttoff_norm)
filteredsignal = scipy.signal.lfilter(coeff_num, coeff_denom, tr.data_pts_np)
time_shift = tr.get_dt_new() * max(len(coeff_denom), len(coeff_num))
tr_new = TraceFixedDT(time=tr.time_pts - time_shift,
data=filteredsignal * tr.data_unit,
)
copy_trace_attrs(tr, tr_new, comment="+(Bessel Filtered)" )
return tr_new
TraceMethodCtrl.register(TraceFixedDT, 'filterbessel', _besselfilter, can_fallback_to_fixed_trace=True)
def _filterlowpassrc(tr, tau):
import scipy.signal
assert isinstance(tr, TraceFixedDT)
dt = tr.get_dt_new()
k = 1. / tau * dt
k = float(k.rescale(units.dimensionless))
coeff_denom = np.array([1, k - 1])
coeff_num = np.array([0, k])
xp = scipy.signal.lfilter(coeff_num, coeff_denom, tr.data_pts_np)
tr_new = TraceFixedDT(time=tr.time_pts,
data=xp * tr.data_unit,
)
copy_trace_attrs(tr, tr_new, comment="+(LP RC Filtered)" )
return tr_new
TraceMethodCtrl.register(TraceFixedDT, 'filterlowpassrc', _filterlowpassrc, can_fallback_to_fixed_trace=True)
| 39.019608 | 113 | 0.710804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,636 | 0.411055 |
45b6462ff728650517df81b64394ea4eb748f3d3 | 160 | py | Python | snek/exts/syncer/__init__.py | Snek-Network/snek | 5f443c00d701c985ef9362d0d98d2ac07b1c56e0 | [
"MIT"
] | null | null | null | snek/exts/syncer/__init__.py | Snek-Network/snek | 5f443c00d701c985ef9362d0d98d2ac07b1c56e0 | [
"MIT"
] | 20 | 2020-07-25T17:16:46.000Z | 2020-10-01T19:05:55.000Z | snek/exts/syncer/__init__.py | Snek-Network/snek | 5f443c00d701c985ef9362d0d98d2ac07b1c56e0 | [
"MIT"
] | 3 | 2020-08-02T20:15:58.000Z | 2020-12-29T08:48:12.000Z | from snek.bot import Snek
from snek.exts.syncer.cog import Syncer
def setup(bot: Snek) -> None:
"""Load the `Syncer` cog."""
bot.add_cog(Syncer(bot))
| 20 | 39 | 0.675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.175 |
45b69d9e8f458c8ffc0da247fee252792dcb4f07 | 1,176 | py | Python | Python/Testing/NoiseRemove.py | mishranilesh012/Natural_Language_Processing_Techniques | cb7056d07ce339929eea289cb27aff6d08850781 | [
"Apache-2.0"
] | 1 | 2019-04-02T18:15:36.000Z | 2019-04-02T18:15:36.000Z | Python/Testing/NoiseRemove.py | mishranilesh012/Natural_Language_Processing_Techniques | cb7056d07ce339929eea289cb27aff6d08850781 | [
"Apache-2.0"
] | null | null | null | Python/Testing/NoiseRemove.py | mishranilesh012/Natural_Language_Processing_Techniques | cb7056d07ce339929eea289cb27aff6d08850781 | [
"Apache-2.0"
] | null | null | null | import struct
import scipy.io.wavfile as wf
import numpy
import pydub
# for i in range(wave_file.getnframes()):
# # read a single frame and advance to next frame
# current_frame = wave_file.readframes(1)
#
# # check for silence
# silent = True
# # wave frame samples are stored in little endian**
# # this example works for a single channel 16-bit per sample encoding
# unpacked_signed_value = struct.unpack("<h", current_frame) # *
# if abs(unpacked_signed_value[0]) > 500:
# silent = False
#
# if silent:
# print("Frame %s is silent." % wave_file.tell())
# else:
# print("Frame %s is not silent." % wave_file.tell())
# rate, data = wf.read('testing.wav')
# # data0 is the data from channel 0.
# data0 = data[:, 0]
#
# print(data0)
# from pydub import AudioSegment
# from pydub.silence import detect_silence, detect_nonsilent
#
# song = AudioSegment.from_wav("soundaudio.wav")
# val = detect_silence(song)
# print(val)
from pyAudioAnalysis import audioSegmentation as aS
[flagsInd, classesAll, acc, CM] = aS.mtFileClassification("data/scottish.wav", "data/svmSM", "svm", True, 'data/scottish.segments') | 28 | 131 | 0.681122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 947 | 0.805272 |
45b6b44fc3e96a3e82849428dcf6936a0c8ee650 | 67 | py | Python | merchant-server/constants.py | googleinterns/product-catalog-builder-for-smbs | c328801df5ac5064bf62372f3d32299329c0e361 | [
"Apache-2.0"
] | 2 | 2020-05-28T05:35:37.000Z | 2020-05-29T15:49:22.000Z | merchant-server/constants.py | googleinterns/product-catalog-builder-for-smbs | c328801df5ac5064bf62372f3d32299329c0e361 | [
"Apache-2.0"
] | 13 | 2020-05-28T10:42:18.000Z | 2022-03-31T04:10:45.000Z | merchant-server/constants.py | googleinterns/product-catalog-builder-for-smbs | c328801df5ac5064bf62372f3d32299329c0e361 | [
"Apache-2.0"
] | 1 | 2020-10-18T06:23:00.000Z | 2020-10-18T06:23:00.000Z | NEW_ORDER = "NEW_ORDER"
ONGOING = "ONGOING"
PRODUCTS_PER_PAGE = 10
| 16.75 | 23 | 0.761194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.298507 |
45b7cd551ab39a42cf6f34b3fa05a631ee56b120 | 6,836 | py | Python | Keras_2_trainOnBatch/train.py | sunshower76/Polyp-Segmentation | 764a62dd8de134e462939aafb0c1c24d67dfb564 | [
"MIT"
] | 2 | 2020-02-13T08:09:14.000Z | 2020-09-04T01:52:06.000Z | Keras_2_trainOnBatch/train.py | sunshower76/Polyp-Segmentation | 764a62dd8de134e462939aafb0c1c24d67dfb564 | [
"MIT"
] | null | null | null | Keras_2_trainOnBatch/train.py | sunshower76/Polyp-Segmentation | 764a62dd8de134e462939aafb0c1c24d67dfb564 | [
"MIT"
] | null | null | null | import os
"""
# If you have multi-gpu, designate the number of GPU to use.
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
"""
import argparse
import logging
from tqdm import tqdm # progress bar
import numpy as np
import matplotlib.pyplot as plt
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import segmentation_models as sm
from segmentation_models.utils import set_trainable
from dataset import DataGenerator
def train_model(model, train_gen, valid_gen, epochs, batch_size, save_cp=True):
total_batch_count = 0
train_batch_num = len(train_gen)
train_num = train_batch_num * batch_size
#train_gen_out = iter_sequence_infinite(train_gen)
valid_batch_num = len(valid_gen)
valid_num = valid_batch_num * batch_size
#valid_gen_out = iter_sequence_infinite(valid_gen)
for epoch in range(epochs): # interation as many epochs
set_trainable(model)
epoch_loss = 0 # loss in this epoch
epoch_iou = 0
count = 0
with tqdm(total=train_num, desc=f'Epoch {epoch + 1}/{epochs}', position=0, leave=True, unit='img') as pbar: # make progress bar
for batch in train_gen:
#batch = next(train_gen_out)
imgs = batch[0]
true_masks = batch[1]
loss, iou = model.train_on_batch(imgs, true_masks) # value of loss of this batch
epoch_loss += loss
epoch_iou += iou
pbar.set_postfix(**{'Batch loss': loss, 'Batch IoU': iou}) # floating the loss at the post in the pbar
pbar.update(imgs.shape[0]) # update progress
count += 1
total_batch_count += 1
print( "Epoch : loss: {}, IoU : {}".format(epoch_loss/count, epoch_iou/count))
# Do validation
validation_model(model, valid_gen, valid_num)
train_gen.on_epoch_end()
valid_gen.on_epoch_end()
if save_cp:
try:
if not os.path.isdir(checkpoint_dir):
os.mkdir(checkpoint_dir)
logging.info('Created checkpoint directory')
else:
pass
except OSError:
pass
model.save_weights(os.path.join(checkpoint_dir , f'CP_epoch{epoch + 1}.h5'))
logging.info(f'Checkpoint {epoch + 1} saved !')
def validation_model(model, valid_gen, valid_num):
epoch_loss = 0 # loss in this epoch
epoch_iou = 0
count = 0
with tqdm(total=valid_num, desc='Validation round', position=0, leave=True, unit='img') as pbar: # make progress bar
for batch in valid_gen:
#batch = next(valid_gen_out)
imgs = batch[0]
true_masks = batch[1]
loss, iou = model.test_on_batch(imgs, true_masks) # value of loss of this batch
epoch_loss += loss
epoch_iou += iou
pbar.set_postfix(**{'Batch, loss': loss, 'Batch IoU': iou}) # floating the loss at the post in the pbar
pbar.update(imgs.shape[0]) # update progress
count += 1
print("Validation loss: {}, IoU: {}".format(epoch_loss / count, epoch_iou / count))
pred_mask = model.predict(np.expand_dims(imgs[0],0))
plt.subplot(131)
plt.imshow(imgs[0])
plt.subplot(132)
plt.imshow(true_masks[0].squeeze(), cmap="gray")
plt.subplot(133)
plt.imshow(pred_mask.squeeze(), cmap="gray")
plt.show()
print()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=100,
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch_size', metavar='B', type=int, nargs='?', default=4,
help='Batch size', dest='batch_size')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=1e-4,
help='Learning rate', dest='lr')
parser.add_argument('-bb', '--backbone', default='resnet50', metavar='FILE',
help="backcone name")
parser.add_argument('-w', '--weight', dest='load', type=str, default=False,
help='Load model from a .h5 file')
parser.add_argument('-s', '--resizing', dest='resizing', type=int, default=384,
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=20.0,
help='Percent of the data that is used as validation (0-100)')
return parser.parse_args()
if __name__ == '__main__':
img_dir = '../data/train/imgs/' # ./data/train/imgs/CVC_Original/'
mask_dir = '../data/train/masks/' # ./data/train/masks/CVC_Ground Truth/'
checkpoint_dir = './checkpoints'
args = get_args()
# train path
train_ids = os.listdir(img_dir)
# Validation Data Size
n_val = int(len(train_ids) * args.val/100) # size of validation set
valid_ids = train_ids[:n_val] # list of image ids used for validation of result 0 to 9
train_ids = train_ids[n_val:] # list of image ids used for training dataset
# print(valid_ids, "\n\n")
print("training_size: ", len(train_ids), "validation_size: ", len(valid_ids))
train_gen = DataGenerator(train_ids, img_dir, mask_dir, img_size=args.resizing, batch_size=args.batch_size)
valid_gen = DataGenerator(valid_ids, img_dir, mask_dir, img_size=args.resizing, batch_size=args.batch_size)
print("total training batches: ", len(train_gen))
print("total validaton batches: ", len(valid_gen))
train_steps = len(train_ids) // args.batch_size
valid_steps = len(valid_ids) // args.batch_size
# define model
model = sm.Unet(args.backbone, encoder_weights='imagenet')
optimizer = optimizers.Adam(lr=args.lr, decay=1e-4)
model.compile(
optimizer=optimizer,
# "Adam",
loss=sm.losses.bce_dice_loss, # sm.losses.bce_jaccard_loss, # sm.losses.binary_crossentropy,
metrics=[sm.metrics.iou_score],
)
#model.summary()
callbacks = [
EarlyStopping(patience=6, verbose=1),
ReduceLROnPlateau(factor=0.1, patience=3, min_lr=1e-7, verbose=1),
ModelCheckpoint('./weights.Epoch{epoch:02d}-Loss{loss:.3f}-VIou{val_iou_score:.3f}.h5', verbose=1,
monitor='val_accuracy', save_best_only=True, save_weights_only=True)
]
train_model(model=model, train_gen=train_gen,
valid_gen=valid_gen, epochs=args.epochs, batch_size=args.batch_size)
| 39.744186 | 137 | 0.628584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,917 | 0.280427 |
45b8989508eca27d897d3e4bed1d5e5680833bf5 | 295 | py | Python | app/api/auth/api_v1/scheme.py | renovate-tests/pol | dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf | [
"MIT"
] | 5 | 2019-05-11T05:14:44.000Z | 2019-09-07T10:22:53.000Z | app/api/auth/api_v1/scheme.py | renovate-tests/pol | dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf | [
"MIT"
] | 161 | 2019-09-09T07:30:25.000Z | 2022-03-14T19:52:43.000Z | app/api/auth/api_v1/scheme.py | renovate-tests/pol | dca9aa4ce34273575d69a140dc3bb1d2ac14ecbf | [
"MIT"
] | 3 | 2019-09-07T13:15:05.000Z | 2020-05-06T04:30:46.000Z | from fastapi.security.api_key import APIKeyCookie, APIKeyHeader
API_KEY_NAME = "api_key"
cookie_scheme = APIKeyCookie(name="bgm-tv-auto-tracker", auto_error=False)
API_KEY_HEADER = APIKeyHeader(name="api-key", auto_error=False)
API_KEY_COOKIES = APIKeyCookie(name="api-key", auto_error=False)
| 36.875 | 74 | 0.80678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.162712 |
45b987982837b9ed03a23d347da0eb3d6742289f | 1,447 | py | Python | py_code/concise-tensorflow/cnn_model.py | xiangnan-fan/proj01 | 856b1a444a526fa35e3fc1328669526429fd56af | [
"Apache-2.0"
] | null | null | null | py_code/concise-tensorflow/cnn_model.py | xiangnan-fan/proj01 | 856b1a444a526fa35e3fc1328669526429fd56af | [
"Apache-2.0"
] | null | null | null | py_code/concise-tensorflow/cnn_model.py | xiangnan-fan/proj01 | 856b1a444a526fa35e3fc1328669526429fd56af | [
"Apache-2.0"
] | null | null | null | #!/bin/python3
# encoding: utf-8
import tensorflow as tf
tf.enable_eager_execution()
class CNN(tf.keras.Model):
def __init__(self):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu
)
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.conv2 = tf.keras.layers.Conv2D(
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu
)
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(units=10)
def call(self, inputs):
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
x = self.conv1(inputs) # [batch_size, 28, 28, 32]
x = self.pool1(x) # [batch_size, 14, 14, 32]
x = self.conv2(x) # [batch_size, 14, 14, 64]
x = self.pool2(x) # [batch_size, 7, 7, 64]
x = self.flatten(x) # [batch_size, 7 * 7 * 64]
x = self.dense1(x) # [batch_size, 1024]
x = self.dense2(x) # [batch_size, 10]
return x
def predict(self, inputs):
logits = self(inputs)
return tf.argmax(logits, axis=-1)
| 32.886364 | 78 | 0.559088 | 1,357 | 0.937802 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.144437 |
45bc6a97de7432ce7d1214a830d477fd1ce1003b | 222 | py | Python | app/test/test_DQI.py | qianjing2020/lambda_lab | 53a29796e42fe1d2c4e5785eaa65938cb64af67c | [
"MIT"
] | null | null | null | app/test/test_DQI.py | qianjing2020/lambda_lab | 53a29796e42fe1d2c4e5785eaa65938cb64af67c | [
"MIT"
] | 6 | 2021-04-30T21:10:55.000Z | 2022-03-12T00:32:07.000Z | app/test/test_DQI.py | qianjing2020/lambda_lab | 53a29796e42fe1d2c4e5785eaa65938cb64af67c | [
"MIT"
] | null | null | null | import context
from modules.data_preprocess import DataCleaning, DataQualityCheck
from modules.db_connect import dbConnect
from test_sequence import sale
qc = DataQualityCheck()
result = qc.generate_QC(sale)
print(result) | 27.75 | 66 | 0.846847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45be19aa04aea9d081ff9b9affbf49589fe19ece | 411 | py | Python | traceml/traceml/vendor/matplotlylib/__init__.py | jinheeson1008/tensorflow-lstm-regression | f31fc1181a5696a25f5737398ee0715c24626248 | [
"Apache-2.0"
] | 4 | 2022-01-07T11:30:53.000Z | 2022-03-22T11:48:20.000Z | traceml/traceml/vendor/matplotlylib/__init__.py | jinheeson1008/tensorflow-lstm-regression | f31fc1181a5696a25f5737398ee0715c24626248 | [
"Apache-2.0"
] | 9 | 2022-01-02T08:47:28.000Z | 2022-03-18T23:18:20.000Z | traceml/traceml/vendor/matplotlylib/__init__.py | jinheeson1008/tensorflow-lstm-regression | f31fc1181a5696a25f5737398ee0715c24626248 | [
"Apache-2.0"
] | null | null | null | """
matplotlylib
============
This module converts matplotlib figure objects into JSON structures which can
be understood and visualized by Plotly.
Most of the functionality should be accessed through the parent directory's
'tools' module or 'plotly' package.
"""
from __future__ import absolute_import
from .renderer import PlotlyRenderer
from .mplexporter import Exporter
from .tools import mpl_to_plotly
| 24.176471 | 77 | 0.793187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.647202 |
45bf0a95bab68706143f320e56db3c875bc7a37a | 510 | py | Python | tests/r/test_labour.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_labour.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_labour.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.labour import labour
def test_labour():
"""Test module labour.py by downloading
labour.csv and testing shape of
extracted data has 569 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = labour(test_path)
try:
assert x_train.shape == (569, 4)
except:
shutil.rmtree(test_path)
raise()
| 21.25 | 44 | 0.75098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.245098 |
45bf333cb64a334ea0df54fde036b9b03fcc7508 | 7,449 | py | Python | tests/app_test/test_undelete.py | yucealiosman/soft-delete | 43e437c7fb7428c086721d42a907889f22fc8943 | [
"MIT"
] | null | null | null | tests/app_test/test_undelete.py | yucealiosman/soft-delete | 43e437c7fb7428c086721d42a907889f22fc8943 | [
"MIT"
] | null | null | null | tests/app_test/test_undelete.py | yucealiosman/soft-delete | 43e437c7fb7428c086721d42a907889f22fc8943 | [
"MIT"
] | null | null | null | from django.test import TestCase
from .factories import *
from .models import DEFAULT_EMPLOYEE_PK
class UndeleteTest(TestCase):
def setUp(self):
self.default_employee = EmployeeFactory(pk=DEFAULT_EMPLOYEE_PK)
self.employee = EmployeeFactory()
self.employee1 = EmployeeFactory()
self.emp_pk = self.employee.pk
employee_hobby = EmployeeHobby.objects.get(employee=self.employee)
self.employee_hobby_pk = employee_hobby.pk
self.employee_hobby_type_pk = employee_hobby.type.pk
def test_cascade_with_soft_deletion_model_for_instance(self):
self.employee.delete()
emp = Employee.all_objects.get(pk=self.emp_pk)
emp.undelete()
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.check_cascade_relation()
def test_cascade_with_not_soft_deletion_model_for_instance(self):
self.employee.delete()
emp = Employee.all_objects.get(pk=self.emp_pk)
emp.undelete()
self.check_cascade_relation_with_no_soft_deletion_model()
def test_cascade_no_revive_with_soft_deletion_model_for_instance(self):
self.employee.delete()
emp = Employee.all_objects.get(pk=self.emp_pk)
emp.undelete()
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.check_cascade_no_revive_relation()
def test_cascade_with_soft_deletion_model_for_qs(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.undelete()
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.check_cascade_relation()
def test_cascade_with_not_soft_deletion_model_for_qs(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.undelete()
self.check_cascade_relation_with_no_soft_deletion_model()
def test_cascade_no_revive_with_soft_deletion_model_for_qs(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.undelete()
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.check_cascade_no_revive_relation()
def test_counter_for_qs(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
count, d = emp.undelete()
self.assertEqual(count, 3)
def test_update_or_create_with_multiple_parameter(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.update_or_create(defaults={'deleted_at': None,
'last_name': 'test5'})
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.assertEqual(Employee.objects.get(pk=self.emp_pk).last_name,
"test5")
def test_cascade_with_soft_deletion_model_using_update_or_create(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.update_or_create(defaults={'deleted_at': None})
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.check_cascade_relation()
def test_cascade_with_not_soft_deletion_model_using_update_or_create(self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.update_or_create(defaults={'deleted_at': None})
self.check_cascade_relation_with_no_soft_deletion_model()
def test_cascade_no_revive_with_soft_deletion_model_using_update_or_create(
self):
Employee.objects.exclude(id=DEFAULT_EMPLOYEE_PK).delete()
emp = Employee.all_objects.filter(pk=self.emp_pk)
emp.update_or_create(defaults={'deleted_at': None})
self.assertTrue(Employee.objects.filter(pk=self.emp_pk).exists())
self.check_cascade_no_revive_relation()
def test_undelete_with_state(self):
author = AuthorFactory()
old_book = author.books.first()
old_book.delete()
new_book = BookFactory()
author.books.add(new_book)
author.delete()
author.undelete()
self.assertTrue(Author.objects.filter(pk=author.pk).exists())
self.assertTrue(Book.objects.filter(pk=new_book.pk).exists())
self.assertFalse(Book.objects.filter(pk=old_book.pk).exists())
def test_undelete_with_state_qs(self):
author1, author2 = AuthorFactory.create_batch(size=2)
old_book1 = author1.books.first()
old_book2 = author2.books.first()
old_book1.delete()
new_book = BookFactory()
author1.books.add(new_book)
Author.objects.all().delete()
Author.all_objects.all().undelete()
self.assertEqual(Author.objects.all().count(), 2)
self.assertTrue(Book.objects.filter(pk=new_book.pk).exists())
self.assertFalse(Book.objects.filter(pk=old_book1.pk).exists())
self.assertTrue(Book.objects.filter(pk=old_book2.pk).exists())
def test_undelete_state_with_nested_cascade_relation(self):
author = AuthorFactory()
old_book = author.books.first()
old_chapter = old_book.chapters.first()
old_book.delete()
new_book = BookFactory()
new_chapter = new_book.chapters.first()
author.books.add(new_book)
author.delete()
author.undelete()
self.assertTrue(Chapter.objects.filter(pk=new_chapter.pk).exists())
self.assertFalse(Chapter.objects.filter(pk=old_chapter.pk).exists())
def test_undelete_state_with_multiple_cascade_relation(self):
author = AuthorFactory()
old_book = author.books.first()
old_poem = author.poems.first()
old_book.delete()
old_poem.delete()
new_book = BookFactory()
new_poem = PoemFactory()
author.books.add(new_book)
author.poems.add(new_poem)
author.delete()
author.undelete()
self.assertFalse(Book.objects.filter(pk=old_book.pk).exists())
self.assertTrue(Book.objects.filter(pk=new_book.pk).exists())
self.assertFalse(Poem.objects.filter(pk=old_poem.pk).exists())
self.assertTrue(Poem.objects.filter(pk=new_poem.pk).exists())
def check_cascade_relation(self):
# test cascade with soft deletion model
h = HealthStatus.objects.filter(employee=self.emp_pk)
self.assertTrue(h.exists())
self.assertTrue(Checkup.objects.filter(health_status__in=h).exists())
def check_cascade_no_revive_relation(self):
self.assertFalse(
EmployeeProfile.objects.filter(employee=self.emp_pk).exists())
ep = EmployeeProfile.all_objects.filter(employee=self.emp_pk)
self.assertTrue(ep.exists())
def check_cascade_relation_with_no_soft_deletion_model(self):
# test cascade with not soft deletion model
self.assertFalse(
EmployeeHobby.objects.filter(employee=self.emp_pk).exists())
self.assertFalse(HobbyLocation.objects.filter(
employee_hobby=self.employee_hobby_pk).exists())
self.assertTrue(
HobbyType.objects.filter(pk=self.employee_hobby_type_pk).exists())
| 41.614525 | 79 | 0.696469 | 7,347 | 0.986307 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.020808 |
45bffe57f36e86594b385e2bf7ca20c5100b34da | 660 | py | Python | bin/query_config.py | ubccr/pavilion2 | 4c6d043b436761d9162d8824657f51cedc9907cc | [
"BSD-3-Clause"
] | 1 | 2019-08-30T14:34:52.000Z | 2019-08-30T14:34:52.000Z | bin/query_config.py | ubccr/pavilion2 | 4c6d043b436761d9162d8824657f51cedc9907cc | [
"BSD-3-Clause"
] | null | null | null | bin/query_config.py | ubccr/pavilion2 | 4c6d043b436761d9162d8824657f51cedc9907cc | [
"BSD-3-Clause"
] | null | null | null |
from pavilion import config
import argparse
import sys
parser = argparse.ArgumentParser(
description="Finds the pavilion configuration, and prints the asked for "
"config value.")
parser.add_argument('key', nargs=1, action="store",
help="The config key to look up.")
args = parser.parse_args()
key = args.key[0]
try:
pav_cfg = config.find(warn=False)
except Exception as err:
print(err, file=sys.stderr)
sys.exit(1)
if key in pav_cfg:
value = pav_cfg[key]
if value is not None:
print(pav_cfg[key])
else:
print("No such config key: '{}'".format(key), file=sys.stderr)
sys.exit(1)
| 23.571429 | 77 | 0.65303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.215152 |
45c03d1aad06cf4ed40419309fe025d69a433126 | 4,679 | py | Python | examples/assessing_frontier/zt_loader.py | OscarDeGar/py_grama | 0a02c291326b394a8d0c127dad4c58121e568777 | [
"MIT"
] | 13 | 2020-02-24T16:51:51.000Z | 2022-03-30T18:56:55.000Z | examples/assessing_frontier/zt_loader.py | OscarDeGar/py_grama | 0a02c291326b394a8d0c127dad4c58121e568777 | [
"MIT"
] | 78 | 2019-12-30T19:13:21.000Z | 2022-02-23T18:17:54.000Z | examples/assessing_frontier/zt_loader.py | OscarDeGar/py_grama | 0a02c291326b394a8d0c127dad4c58121e568777 | [
"MIT"
] | 7 | 2020-10-19T17:49:25.000Z | 2021-08-15T20:46:52.000Z | ## Data Loader: TE-CCA zT Dataset
# Zachary del Rosario (zdelrosario@outlook.com) 2021-03-12
#
from citrination_client import CitrinationClient, PifSystemReturningQuery
from citrination_client import DataQuery, DatasetQuery, Filter
from matminer.featurizers.base import MultipleFeaturizer
from matminer.featurizers import composition as cf
from pymatgen import Composition
from sl_utils import pifs2df, setResDir
import pandas as pd
import numpy as np
import os
import time
prefix = "zT"
file_responses = prefix + "_responses.csv"
file_features = prefix + "_features.csv"
## Helper functions
def get_compostion(c):
"""Attempt to parse composition, return None if failed"""
try:
return Composition(c)
except:
return None
def load_data_zT():
results_dir = setResDir()
## Metadata
keys_response = [
'Seebeck coefficient; squared',
'Electrical resistivity',
'Thermal conductivity'
]
sign = np.array([
+1, # Seebeck
-1, # Electric resistivity
-1 # Thermal conductivity
])
## Load data, if possible
# --------------------------------------------------
try:
df_X_all = pd.read_csv(results_dir + file_features)
X_all = df_X_all.drop(df_X_all.columns[0], axis = 1).values
df_Y_all = pd.read_csv(results_dir + file_responses)
Y_all = df_Y_all.drop(df_Y_all.columns[0], axis = 1).values
print("Cached data loaded.")
except FileNotFoundError:
## Data Import
# --------------------------------------------------
# Initialize client
print("Accessing data from Citrination...")
site = 'https://citrination.com' # Citrination
client = CitrinationClient(api_key=os.environ['CITRINATION_API_KEY'], site=site)
search_client = client.search
# Aluminum dataset
dataset_id = 178480 # ucsb_te_roomtemp_seebeck
system_query = PifSystemReturningQuery(
size=1000,
query=DataQuery(
dataset=DatasetQuery(id=Filter(equal=str(dataset_id)))
)
)
query_result = search_client.pif_search(system_query)
print(" Found {} PIFs in dataset {}.".format(
query_result.total_num_hits,
dataset_id
))
## Wrangle
# --------------------------------------------------
pifs = [x.system for x in query_result.hits]
# Utility function will tabularize PIFs
df_response = pifs2df(pifs)
# Down-select columns to play well with to_numeric
df_response = df_response[
['Seebeck coefficient', 'Electrical resistivity', 'Thermal conductivity']
]
df_response = df_response.apply(pd.to_numeric)
# Parse chemical compositions
formulas = [pif.chemical_formula for pif in pifs]
df_comp = pd.DataFrame(
columns = ['chemical_formula'],
data = formulas
)
# Join
df_data = pd.concat([df_comp, df_response], axis = 1)
print(" Accessed data.")
# Featurize
print("Featurizing data...")
df_data['composition'] = df_data['chemical_formula'].apply(get_compostion)
f = MultipleFeaturizer([
cf.Stoichiometry(),
cf.ElementProperty.from_preset("magpie"),
cf.ValenceOrbital(props=['avg']),
cf.IonProperty(fast=True)
])
X = np.array(f.featurize_many(df_data['composition']))
# Find valid response values
keys_original = [
'Seebeck coefficient',
'Electrical resistivity',
'Thermal conductivity'
]
index_valid_response = {
key: df_data[key].dropna().index.values for key in keys_original
}
index_valid_all = df_data[keys_original].dropna().index.values
X_all = X[index_valid_all, :]
Y_all = df_data[keys_original].iloc[index_valid_all].values
# Manipulate columns for proper objective values
Y_all[:, 0] = Y_all[:, 0] ** 2 # Squared seebeck
print(" Data prepared; {0:} valid observations.".format(X_all.shape[0]))
# Cache data
pd.DataFrame(data = X_all).to_csv(results_dir + file_features)
pd.DataFrame(
data = Y_all,
columns = keys_response
).to_csv(results_dir + file_responses)
print("Data cached in results directory.")
return X_all, Y_all, sign, keys_response, prefix
if __name__ == "__main__":
X_all, Y_all, sign, keys_response, prefix = load_data_zT()
| 32.268966 | 88 | 0.59906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,341 | 0.2866 |
45c165348de67e43f65f6c67a932110f6b90f7e5 | 577 | py | Python | server/requests_test.py | vikram628/postive.ly | 89e25d1ac37800c8ddbeba0e3936946d3a602fc8 | [
"Apache-2.0"
] | 1 | 2022-03-30T09:21:27.000Z | 2022-03-30T09:21:27.000Z | server/requests_test.py | pbs12/postive.ly | 753edce6de39d4adac4392ba2c6c1971f88277f2 | [
"Apache-2.0"
] | null | null | null | server/requests_test.py | pbs12/postive.ly | 753edce6de39d4adac4392ba2c6c1971f88277f2 | [
"Apache-2.0"
] | 1 | 2021-11-29T20:05:58.000Z | 2021-11-29T20:05:58.000Z | import requests
import json
from datetime import datetime
headers = {"Content-type": "application/json", "Accept": "text/plain"}
def addUser():
url = "http://10.194.223.134:5000/add_user"
data = {"username": "test_user"}
requests.post(url, data=json.dumps(data), headers=headers)
def addMessage():
url = "http://10.194.223.134:5000/phone_data/test_user"
data = {"message": "My Sample Message", "timestamp": datetime.timestamp(datetime.now())}
r = requests.post(url, data=json.dumps(data), headers=headers)
print(r.json())
addUser()
addMessage()
| 28.85 | 92 | 0.686308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.343154 |
45c1caa23ecd364d3384d99dfa0f6a2683ce53fe | 951 | py | Python | virtualisation/triplestore/triplestoreadapter.py | CityPulse/CP_Resourcemanagement | aa670fa89d5e086a98ade3ccc152518be55abf2e | [
"MIT"
] | 2 | 2016-11-03T14:57:45.000Z | 2019-05-13T13:21:08.000Z | virtualisation/triplestore/triplestoreadapter.py | CityPulse/CP_Resourcemanagement | aa670fa89d5e086a98ade3ccc152518be55abf2e | [
"MIT"
] | null | null | null | virtualisation/triplestore/triplestoreadapter.py | CityPulse/CP_Resourcemanagement | aa670fa89d5e086a98ade3ccc152518be55abf2e | [
"MIT"
] | 1 | 2020-07-23T11:27:15.000Z | 2020-07-23T11:27:15.000Z | from abc import abstractmethod
from abc import ABCMeta
__author__ = 'Marten Fischer (m.fischer@hs-osnabrueck.de)'
class TripleStoreAdapter:
__metaclass__ = ABCMeta
@abstractmethod
def graphExists(self, graphName):
pass
@abstractmethod
def createGraph(self, graphName):
pass
@abstractmethod
def saveTriple(self, graphName, subject, predicate, object):
pass
@abstractmethod
def saveGraph(self, graph, graphName):
pass
@abstractmethod
def saveMultipleGraphs(self, serialisedGraph, graphName):
pass
@abstractmethod
def getObservationGraph(self, graphName, sensor, start, end, asGraph):
pass
@abstractmethod
def deleteGraph(self, graphName):
pass
@abstractmethod
def getLastQoIData_List(self, graphName, sensorName):
pass
@abstractmethod
def getStreamMinMaxDate(self, graphName, sensorName):
pass | 21.613636 | 74 | 0.684543 | 834 | 0.876972 | 0 | 0 | 727 | 0.764458 | 0 | 0 | 45 | 0.047319 |
45c2c705433f6454e6ff7fae79f52f092ad52824 | 2,503 | py | Python | tests/test_lists.py | al3xandru/html2md | fe9c49c7a263f4236a057763d9ad68d237e8cf15 | [
"RSA-MD"
] | 8 | 2015-02-14T04:30:16.000Z | 2019-07-10T05:06:49.000Z | tests/test_lists.py | al3xandru/html2md | fe9c49c7a263f4236a057763d9ad68d237e8cf15 | [
"RSA-MD"
] | 2 | 2015-02-20T12:16:54.000Z | 2017-09-21T10:02:53.000Z | tests/test_lists.py | al3xandru/html2md | fe9c49c7a263f4236a057763d9ad68d237e8cf15 | [
"RSA-MD"
] | 4 | 2016-02-06T04:28:16.000Z | 2021-04-22T00:12:39.000Z | import unittest
from context import html2md
from assertions import assertEq
__author__ = 'alex'
class SpecialListsTest(unittest.TestCase):
def test_text_and_paragraph(self):
in_html = '''<ul>
<li>item 1</li>
<li>item 2
<p>item 2 paragraph</p>
<p>item 2 item 2</p>
</li>
<li>item 3</li>
</ul>'''
out_md = '''* item 1
* item 2
item 2 paragraph
item 2 item 2
* item 3'''
assertEq(out_md, html2md.html2md(in_html))
def test_paragraph_mixed(self):
in_html = '''<ul>
<li>item 1</li>
<li>item 2</li>
<li><p>item 3</p>
<p>item 3 paragraph 2</p></li>
<li>item 4</li>
<li>item 5</li>
</ul>'''
out_md = '''* item 1
* item 2
* item 3
item 3 paragraph 2
* item 4
* item 5'''
assertEq(out_md, html2md.html2md(in_html))
def test_blockquote(self):
in_html = '''
<ul>
<li><blockquote>
<p>item 1</p>
</blockquote></li>
<li><blockquote>
<p>item 2 paragraph 1</p>
<p>item 2 paragraph 2</p>
</blockquote></li>
<li><p>item 3</p></li>
</ul>
'''
out_md = '''* > item 1
* > item 2 paragraph 1
> item 2 paragraph 2
* item 3'''
assertEq(out_md, html2md.html2md(in_html))
def test_blockquote_complex(self):
in_html = '''<ul>
<li>item 1</li>
<li><p>item 2</p>
<blockquote>
<p>item 2 paragraph 1</p>
<p>item 2 paragraph 2</p>
</blockquote></li>
<li><p>item 3</p>
<blockquote>
<p>item 3 blockquote</p>
</blockquote></li>
</ul>'''
out_md = '''* item 1
* item 2
> item 2 paragraph 1
> item 2 paragraph 2
* item 3
> item 3 blockquote'''
assertEq(out_md, html2md.html2md(in_html))
def test_cheatsheet(self):
in_html = '''
<ul>
<li><p>A list item.</p>
<p>With multiple paragraphs.</p>
<blockquote>
<p>And a blockquote</p>
</blockquote></li>
<li><p>Another List item with
a hard wrapped 2nd line.</p>
<pre><code>
project/
__init__.py
example1.py
test/
__init__.py
test_example1.py
</code></pre></li>
</ul>'''
out_md = '''* A list item.
With multiple paragraphs.
> And a blockquote
* Another List item with
a hard wrapped 2nd line.
project/
__init__.py
example1.py
test/
__init__.py
test_example1.py'''
assertEq(out_md, html2md.html2md(in_html))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(SpecialListsTest)
if __name__ == '__main__':
unittest.main() | 17.143836 | 72 | 0.581302 | 2,265 | 0.904914 | 0 | 0 | 0 | 0 | 0 | 0 | 1,619 | 0.646824 |
45c36e8fe678ea3c189f3feca284ae944260da2c | 2,281 | py | Python | repo2apptainer/app.py | andersy005/repo2apptainer | 6ba9bda304ecb410e74d53d4124c98aaf0660a1e | [
"BSD-3-Clause"
] | 1 | 2022-03-16T20:12:08.000Z | 2022-03-16T20:12:08.000Z | repo2apptainer/app.py | andersy005/repo2apptainer | 6ba9bda304ecb410e74d53d4124c98aaf0660a1e | [
"BSD-3-Clause"
] | 1 | 2022-03-16T20:13:51.000Z | 2022-03-16T20:13:51.000Z | repo2apptainer/app.py | andersy005/repo2apptainer | 6ba9bda304ecb410e74d53d4124c98aaf0660a1e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import pathlib
import subprocess
import pydantic
from repo2docker.app import Repo2Docker
from .config import config as _config
from .console import console
from .helpers import generate_image_name
@pydantic.dataclasses.dataclass
class Repo2Apptainer:
"""An application for converting git repositories to Apptainer/Singularity images."""
repo: str
ref: str
force: bool = False
def __post_init__(self) -> None:
self.cache_dir = pathlib.Path(
_config.get('cache_dir') or pathlib.Path.home() / '.singularity/cache'
).resolve()
self.cache_dir.mkdir(parents=True, exist_ok=True)
self.r2d = Repo2Docker()
self.r2d.repo = self.repo
self.r2d.ref = self.ref
self.r2d.output_image_spec = generate_image_name(self.repo, self.r2d.ref)
self.sif_image = self.cache_dir / f'{self.r2d.output_image_spec}.sif'
self.apptainer_image = (
f"{_config.get('apptainer_in_docker.image')}:{_config.get('apptainer_in_docker.tag')}"
)
def build_docker(self) -> None:
"""Build docker image from repository"""
with console.status('Building Docker image'):
self.r2d.initialize()
self.r2d.build()
def build_sif(self) -> None:
"""Build Apptainer/Singularity Image File (SIF) from built docker image"""
with console.status('Building Apptainer/Singularity image from the built docker image'):
if not self.force and self.sif_image.exists():
console.print(f'Skipping rebuild of {self.sif_image}')
else:
docker_uri = f'docker-daemon://{self.r2d.output_image_spec}:latest'
cmd = [
'docker',
'run',
'--privileged',
'-v',
'/var/run/docker.sock:/var/run/docker.sock',
'-v',
f'{str(self.cache_dir)}:/work',
self.apptainer_image,
'build',
'--force',
self.sif_image.name,
docker_uri,
]
console.print(cmd)
subprocess.check_output(cmd)
| 34.560606 | 98 | 0.580009 | 2,011 | 0.881631 | 0 | 0 | 2,043 | 0.89566 | 0 | 0 | 657 | 0.288032 |
45c485b5da6f55ef3077ce6aefa0412ae13bd817 | 28,187 | py | Python | letsencrypt/plugins/standalone/tests/authenticator_test.py | stewnorriss/letsencrypt | 4b8651274f83394909af23905abbb715f150b8bf | [
"Apache-2.0"
] | 1 | 2018-08-27T03:17:09.000Z | 2018-08-27T03:17:09.000Z | letsencrypt/plugins/standalone/tests/authenticator_test.py | rsumnerz/certbot | a65f14635257e6ce0e1b9cfca6bf4c801214d14c | [
"Apache-2.0",
"MIT"
] | null | null | null | letsencrypt/plugins/standalone/tests/authenticator_test.py | rsumnerz/certbot | a65f14635257e6ce0e1b9cfca6bf4c801214d14c | [
"Apache-2.0",
"MIT"
] | null | null | null | """Tests for letsencrypt.plugins.standalone.authenticator."""
import os
import pkg_resources
import psutil
import signal
import socket
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import mock
import OpenSSL
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt.tests import acme_util
KEY_PATH = pkg_resources.resource_filename(
"letsencrypt.tests", os.path.join("testdata", "rsa512_key.pem"))
KEY_DATA = pkg_resources.resource_string(
"letsencrypt.tests", os.path.join("testdata", "rsa512_key.pem"))
KEY = jose.JWKRSA(key=jose.ComparableRSAKey(serialization.load_pem_private_key(
KEY_DATA, password=None, backend=default_backend())))
PRIVATE_KEY = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, KEY_DATA)
CONFIG = mock.Mock(dvsni_port=5001)
# Classes based on to allow interrupting infinite loop under test
# after one iteration, based on.
# http://igorsobreira.com/2013/03/17/testing-infinite-loops.html
class _SocketAcceptOnlyNTimes(object):
# pylint: disable=too-few-public-methods
"""
Callable that will raise `CallableExhausted`
exception after `limit` calls, modified to also return
a tuple simulating the return values of a socket.accept()
call
"""
def __init__(self, limit):
self.limit = limit
self.calls = 0
def __call__(self):
self.calls += 1
if self.calls > self.limit:
raise CallableExhausted
# Modified here for a single use as socket.accept()
return (mock.MagicMock(), "ignored")
class CallableExhausted(Exception):
# pylint: disable=too-few-public-methods
"""Exception raised when a method is called more than the
specified number of times."""
class ChallPrefTest(unittest.TestCase):
"""Tests for chall_pref() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_chall_pref(self):
self.assertEqual(self.authenticator.get_chall_pref("example.com"),
[challenges.DVSNI])
class SNICallbackTest(unittest.TestCase):
"""Tests for sni_callback() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.cert = achallenges.DVSNI(
challb=acme_util.DVSNI_P,
domain="example.com", key=KEY).gen_cert_and_response()[0]
self.authenticator.private_key = PRIVATE_KEY
self.authenticator.tasks = {"abcdef.acme.invalid": self.cert}
self.authenticator.child_pid = 12345
def test_real_servername(self):
connection = mock.MagicMock()
connection.get_servername.return_value = "abcdef.acme.invalid"
self.authenticator.sni_callback(connection)
self.assertEqual(connection.set_context.call_count, 1)
called_ctx = connection.set_context.call_args[0][0]
self.assertTrue(isinstance(called_ctx, OpenSSL.SSL.Context))
def test_fake_servername(self):
"""Test behavior of SNI callback when an unexpected name is received.
(Currently the expected behavior in this case is to return the
"first" certificate with which the listener was configured,
although they are stored in an unordered data structure so
this might not be the one that was first in the challenge list
passed to the perform method. In the future, this might result
in dropping the connection instead.)"""
connection = mock.MagicMock()
connection.get_servername.return_value = "example.com"
self.authenticator.sni_callback(connection)
self.assertEqual(connection.set_context.call_count, 1)
called_ctx = connection.set_context.call_args[0][0]
self.assertTrue(isinstance(called_ctx, OpenSSL.SSL.Context))
class ClientSignalHandlerTest(unittest.TestCase):
"""Tests for client_signal_handler() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.authenticator.tasks = {"foononce.acme.invalid": "stuff"}
self.authenticator.child_pid = 12345
def test_client_signal_handler(self):
self.assertTrue(self.authenticator.subproc_state is None)
self.authenticator.client_signal_handler(signal.SIGIO, None)
self.assertEqual(self.authenticator.subproc_state, "ready")
self.authenticator.client_signal_handler(signal.SIGUSR1, None)
self.assertEqual(self.authenticator.subproc_state, "inuse")
self.authenticator.client_signal_handler(signal.SIGUSR2, None)
self.assertEqual(self.authenticator.subproc_state, "cantbind")
# Testing the unreached path for a signal other than these
# specified (which can't occur in normal use because this
# function is only set as a signal handler for the above three
# signals).
self.assertRaises(
ValueError, self.authenticator.client_signal_handler,
signal.SIGPIPE, None)
class SubprocSignalHandlerTest(unittest.TestCase):
"""Tests for subproc_signal_handler() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.authenticator.tasks = {"foononce.acme.invalid": "stuff"}
self.authenticator.child_pid = 12345
self.authenticator.parent_pid = 23456
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_subproc_signal_handler(self, mock_exit, mock_kill):
self.authenticator.ssl_conn = mock.MagicMock()
self.authenticator.connection = mock.MagicMock()
self.authenticator.sock = mock.MagicMock()
self.authenticator.subproc_signal_handler(signal.SIGINT, None)
self.assertEquals(self.authenticator.ssl_conn.shutdown.call_count, 1)
self.assertEquals(self.authenticator.ssl_conn.close.call_count, 1)
self.assertEquals(self.authenticator.connection.close.call_count, 1)
self.assertEquals(self.authenticator.sock.close.call_count, 1)
mock_kill.assert_called_once_with(
self.authenticator.parent_pid, signal.SIGUSR1)
mock_exit.assert_called_once_with(0)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_subproc_signal_handler_trouble(self, mock_exit, mock_kill):
"""Test attempting to shut down a non-existent connection.
(This could occur because none was established or active at the
time the signal handler tried to perform the cleanup)."""
self.authenticator.ssl_conn = mock.MagicMock()
self.authenticator.connection = mock.MagicMock()
self.authenticator.sock = mock.MagicMock()
# AttributeError simulates the case where one of these properties
# is None because no connection exists. We raise it for
# ssl_conn.close() instead of ssl_conn.shutdown() for better code
# coverage.
self.authenticator.ssl_conn.close.side_effect = AttributeError("!")
self.authenticator.connection.close.side_effect = AttributeError("!")
self.authenticator.sock.close.side_effect = AttributeError("!")
self.authenticator.subproc_signal_handler(signal.SIGINT, None)
self.assertEquals(self.authenticator.ssl_conn.shutdown.call_count, 1)
self.assertEquals(self.authenticator.ssl_conn.close.call_count, 1)
self.assertEquals(self.authenticator.connection.close.call_count, 1)
self.assertEquals(self.authenticator.sock.close.call_count, 1)
mock_kill.assert_called_once_with(
self.authenticator.parent_pid, signal.SIGUSR1)
mock_exit.assert_called_once_with(0)
class AlreadyListeningTest(unittest.TestCase):
"""Tests for already_listening() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_race_condition(self, mock_get_utility, mock_process, mock_net):
# This tests a race condition, or permission problem, or OS
# incompatibility in which, for some reason, no process name can be
# found to match the identified listening PID.
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.side_effect = psutil.NoSuchProcess("No such PID")
# We simulate being unable to find the process name of PID 4416,
# which results in returning False.
self.assertFalse(self.authenticator.already_listening(17))
self.assertEqual(mock_get_utility.generic_notification.call_count, 0)
mock_process.assert_called_once_with(4416)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_not_listening(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
self.assertFalse(self.authenticator.already_listening(17))
self.assertEqual(mock_get_utility.generic_notification.call_count, 0)
self.assertEqual(mock_process.call_count, 0)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_listening_ipv4(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
result = self.authenticator.already_listening(17)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
mock_process.assert_called_once_with(4416)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_listening_ipv6(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=10, type=1, laddr=("::", 12345), raddr=(),
status="LISTEN", pid=4420),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
result = self.authenticator.already_listening(12345)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
mock_process.assert_called_once_with(4420)
class PerformTest(unittest.TestCase):
"""Tests for perform() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.achall1 = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r="whee", nonce="foo"), "pending"),
domain="foo.example.com", key=KEY)
self.achall2 = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r="whee", nonce="bar"), "pending"),
domain="bar.example.com", key=KEY)
bad_achall = ("This", "Represents", "A Non-DVSNI", "Challenge")
self.achalls = [self.achall1, self.achall2, bad_achall]
def test_perform_when_already_listening(self):
self.authenticator.already_listening = mock.Mock()
self.authenticator.already_listening.return_value = True
result = self.authenticator.perform([self.achall1])
self.assertEqual(result, [None])
def test_can_perform(self):
"""What happens if start_listener() returns True."""
self.authenticator.start_listener = mock.Mock()
self.authenticator.start_listener.return_value = True
self.authenticator.already_listening = mock.Mock(return_value=False)
result = self.authenticator.perform(self.achalls)
self.assertEqual(len(self.authenticator.tasks), 2)
self.assertTrue(
self.authenticator.tasks.has_key(self.achall1.nonce_domain))
self.assertTrue(
self.authenticator.tasks.has_key(self.achall2.nonce_domain))
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
self.assertTrue(isinstance(result[0], challenges.ChallengeResponse))
self.assertTrue(isinstance(result[1], challenges.ChallengeResponse))
self.assertFalse(result[2])
self.authenticator.start_listener.assert_called_once_with(
CONFIG.dvsni_port, KEY)
def test_cannot_perform(self):
"""What happens if start_listener() returns False."""
self.authenticator.start_listener = mock.Mock()
self.authenticator.start_listener.return_value = False
self.authenticator.already_listening = mock.Mock(return_value=False)
result = self.authenticator.perform(self.achalls)
self.assertEqual(len(self.authenticator.tasks), 2)
self.assertTrue(
self.authenticator.tasks.has_key(self.achall1.nonce_domain))
self.assertTrue(
self.authenticator.tasks.has_key(self.achall2.nonce_domain))
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
self.assertEqual(result, [None, None, False])
self.authenticator.start_listener.assert_called_once_with(
CONFIG.dvsni_port, KEY)
def test_perform_with_pending_tasks(self):
self.authenticator.tasks = {"foononce.acme.invalid": "cert_data"}
extra_achall = acme_util.DVSNI_P
self.assertRaises(
ValueError, self.authenticator.perform, [extra_achall])
def test_perform_without_challenge_list(self):
extra_achall = acme_util.DVSNI_P
# This is wrong because a challenge must be specified.
self.assertRaises(ValueError, self.authenticator.perform, [])
# This is wrong because it must be a list, not a bare challenge.
self.assertRaises(
ValueError, self.authenticator.perform, extra_achall)
# This is wrong because the list must contain at least one challenge.
self.assertRaises(
ValueError, self.authenticator.perform, range(20))
class StartListenerTest(unittest.TestCase):
"""Tests for start_listener() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.fork")
def test_start_listener_fork_parent(self, mock_fork):
self.authenticator.do_parent_process = mock.Mock()
self.authenticator.do_parent_process.return_value = True
mock_fork.return_value = 22222
result = self.authenticator.start_listener(1717, "key")
# start_listener is expected to return the True or False return
# value from do_parent_process.
self.assertTrue(result)
self.assertEqual(self.authenticator.child_pid, 22222)
self.authenticator.do_parent_process.assert_called_once_with(1717)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.fork")
def test_start_listener_fork_child(self, mock_fork):
self.authenticator.do_parent_process = mock.Mock()
self.authenticator.do_child_process = mock.Mock()
mock_fork.return_value = 0
self.authenticator.start_listener(1717, "key")
self.assertEqual(self.authenticator.child_pid, os.getpid())
self.authenticator.do_child_process.assert_called_once_with(
1717, "key")
class DoParentProcessTest(unittest.TestCase):
"""Tests for do_parent_process() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_ok(self, mock_get_utility):
self.authenticator.subproc_state = "ready"
result = self.authenticator.do_parent_process(1717)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_inuse(self, mock_get_utility):
self.authenticator.subproc_state = "inuse"
result = self.authenticator.do_parent_process(1717)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_cantbind(self, mock_get_utility):
self.authenticator.subproc_state = "cantbind"
result = self.authenticator.do_parent_process(1717)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_timeout(self, mock_get_utility):
# Normally times out in 5 seconds and returns False. We can
# now set delay_amount to a lower value so that it times out
# faster than it would under normal use.
result = self.authenticator.do_parent_process(1717, delay_amount=1)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
class DoChildProcessTest(unittest.TestCase):
"""Tests for do_child_process() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.cert = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r=("x" * 32), nonce="abcdef"), "pending"),
domain="example.com", key=KEY).gen_cert_and_response()[0]
self.authenticator.private_key = PRIVATE_KEY
self.authenticator.tasks = {"abcdef.acme.invalid": self.cert}
self.authenticator.parent_pid = 12345
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_do_child_process_cantbind1(
self, mock_exit, mock_kill, mock_socket):
mock_exit.side_effect = IndentationError("subprocess would exit here")
eaccess = socket.error(socket.errno.EACCES, "Permission denied")
sample_socket = mock.MagicMock()
sample_socket.bind.side_effect = eaccess
mock_socket.return_value = sample_socket
# Using the IndentationError as an error that cannot easily be
# generated at runtime, to indicate the behavior of sys.exit has
# taken effect without actually causing the test process to exit.
# (Just replacing it with a no-op causes logic errors because the
# do_child_process code assumes that calling sys.exit() will
# cause subsequent code not to be executed.)
self.assertRaises(
IndentationError, self.authenticator.do_child_process, 1717, KEY)
mock_exit.assert_called_once_with(1)
mock_kill.assert_called_once_with(12345, signal.SIGUSR2)
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_do_child_process_cantbind2(self, mock_exit, mock_kill,
mock_socket):
mock_exit.side_effect = IndentationError("subprocess would exit here")
eaccess = socket.error(socket.errno.EADDRINUSE, "Port already in use")
sample_socket = mock.MagicMock()
sample_socket.bind.side_effect = eaccess
mock_socket.return_value = sample_socket
self.assertRaises(
IndentationError, self.authenticator.do_child_process, 1717, KEY)
mock_exit.assert_called_once_with(1)
mock_kill.assert_called_once_with(12345, signal.SIGUSR1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"socket.socket")
def test_do_child_process_cantbind3(self, mock_socket):
"""Test case where attempt to bind socket results in an unhandled
socket error. (The expected behavior is arguably wrong because it
will crash the program; the reason for the expected behavior is
that we don't have a way to report arbitrary socket errors.)"""
eio = socket.error(socket.errno.EIO, "Imaginary unhandled error")
sample_socket = mock.MagicMock()
sample_socket.bind.side_effect = eio
mock_socket.return_value = sample_socket
self.assertRaises(
socket.error, self.authenticator.do_child_process, 1717, KEY)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"OpenSSL.SSL.Connection")
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
def test_do_child_process_success(
self, mock_kill, mock_socket, mock_connection):
sample_socket = mock.MagicMock()
sample_socket.accept.side_effect = _SocketAcceptOnlyNTimes(2)
mock_socket.return_value = sample_socket
mock_connection.return_value = mock.MagicMock()
self.assertRaises(
CallableExhausted, self.authenticator.do_child_process, 1717, KEY)
mock_socket.assert_called_once_with()
sample_socket.bind.assert_called_once_with(("0.0.0.0", 1717))
sample_socket.listen.assert_called_once_with(1)
self.assertEqual(sample_socket.accept.call_count, 3)
mock_kill.assert_called_once_with(12345, signal.SIGIO)
# TODO: We could have some tests about the fact that the listener
# asks OpenSSL to negotiate a TLS connection (and correctly
# sets the SNI callback function).
class CleanupTest(unittest.TestCase):
"""Tests for cleanup() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.achall = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r="whee", nonce="foononce"), "pending"),
domain="foo.example.com", key="key")
self.authenticator.tasks = {self.achall.nonce_domain: "stuff"}
self.authenticator.child_pid = 12345
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.time.sleep")
def test_cleanup(self, mock_sleep, mock_kill):
mock_sleep.return_value = None
mock_kill.return_value = None
self.authenticator.cleanup([self.achall])
mock_kill.assert_called_once_with(12345, signal.SIGINT)
mock_sleep.assert_called_once_with(1)
def test_bad_cleanup(self):
self.assertRaises(
ValueError, self.authenticator.cleanup, [achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r="whee", nonce="badnonce"), "pending"),
domain="bad.example.com", key="key")])
class MoreInfoTest(unittest.TestCase):
"""Tests for more_info() method. (trivially)"""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import (
StandaloneAuthenticator)
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_more_info(self):
"""Make sure exceptions aren't raised."""
self.authenticator.more_info()
class InitTest(unittest.TestCase):
"""Tests for more_info() method. (trivially)"""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import (
StandaloneAuthenticator)
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_prepare(self):
"""Make sure exceptions aren't raised.
.. todo:: Add on more once things are setup appropriately.
"""
self.authenticator.prepare()
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 47.373109 | 79 | 0.688438 | 27,000 | 0.957888 | 0 | 0 | 14,931 | 0.529712 | 0 | 0 | 7,303 | 0.259091 |
45c4fed50c074794b1dbfec41eb5a7feda4afe4a | 6,477 | py | Python | pymoo/algorithms/genetic_algorithm.py | Electr0phile/pymoo | 652428473cc68b6d9deada3792635bc8a831b255 | [
"Apache-2.0"
] | 1 | 2020-03-07T08:26:16.000Z | 2020-03-07T08:26:16.000Z | pymoo/algorithms/genetic_algorithm.py | Electr0phile/pymoo | 652428473cc68b6d9deada3792635bc8a831b255 | [
"Apache-2.0"
] | null | null | null | pymoo/algorithms/genetic_algorithm.py | Electr0phile/pymoo | 652428473cc68b6d9deada3792635bc8a831b255 | [
"Apache-2.0"
] | null | null | null | import math
import numpy as np
from pymoo.model.algorithm import Algorithm
from pymoo.model.duplicate import DefaultDuplicateElimination
from pymoo.model.individual import Individual
from pymoo.model.population import Population
class GeneticAlgorithm(Algorithm):
def __init__(self,
pop_size,
sampling,
selection,
crossover,
mutation,
survival,
n_offsprings=None,
eliminate_duplicates=DefaultDuplicateElimination(),
repair=None,
individual=Individual(),
**kwargs
):
super().__init__(**kwargs)
# population size of the genetic algorithm
self.pop_size = pop_size
# initial sampling method: object, 2d array, or population (already evaluated)
self.sampling = sampling
# the method to be used to select parents for recombination
self.selection = selection
# method to do the crossover
self.crossover = crossover
# method for doing the mutation
self.mutation = mutation
# function to repair an offspring after mutation if necessary
self.repair = repair
# survival selection
self.survival = survival
# number of offsprings to generate through recombination
self.n_offsprings = n_offsprings
# set the duplicate detection class - a boolean value chooses the default duplicate detection
if isinstance(eliminate_duplicates, bool):
if eliminate_duplicates:
self.eliminate_duplicates = DefaultDuplicateElimination()
else:
self.eliminate_duplicates = None
else:
self.eliminate_duplicates = eliminate_duplicates
# the object to be used to represent an individual - either individual or derived class
self.individual = individual
# if the number of offspring is not set - equal to population size
if self.n_offsprings is None:
self.n_offsprings = pop_size
# other run specific data updated whenever solve is called - to share them in all algorithms
self.n_gen = None
self.pop = None
self.off = None
# this can be used to store additional data in submodules e.g. survival, recombination and so on
self.data = {}
def _initialize(self):
# ! get the initial population - different ways are possible
# provide a whole population object - (individuals might be already evaluated)
if isinstance(self.sampling, Population):
pop = self.sampling
else:
pop = Population(0, individual=self.individual)
if isinstance(self.sampling, np.ndarray):
pop = pop.new("X", self.sampling)
else:
pop = self.sampling.do(self.problem, self.pop_size, pop=pop, algorithm=self)
# repair all solutions that are not already evaluated
if self.repair:
I = [k for k in range(len(pop)) if pop[k].F is None]
pop = self.repair.do(self.problem, pop[I], algorithm=self)
# then evaluate using the objective function
self.evaluator.eval(self.problem, pop, algorithm=self)
# that call is a dummy survival to set attributes that are necessary for the mating selection
if self.survival:
pop = self.survival.do(self.problem, pop, len(pop), algorithm=self)
self.pop = pop
def _next(self):
# do the mating using the current population
self.off = self._mating(self.pop, n_max_iterations=100)
# if the mating could not generate any new offspring (duplicate elimination might make that happen)
if len(self.off) == 0:
self.termination.force_termination = True
return
# if not the desired number of offspring could be created
elif len(self.off) < self.n_offsprings:
if self.verbose:
print("WARNING: Mating could not produce the required number of (unique) offsprings!")
# evaluate the offspring
self.evaluator.eval(self.problem, self.off, algorithm=self)
# merge the offsprings with the current population
self.pop = self.pop.merge(self.off)
# the do survival selection
self.pop = self.survival.do(self.problem, self.pop, self.pop_size, algorithm=self)
def _mating(self, pop, n_max_iterations=100):
# the population object to be used
off = pop.new()
# mating counter - counts how often the mating needs to be done to fill up n_offsprings
n_matings = 0
# iterate until enough offsprings are created
while len(off) < self.n_offsprings:
# how many parents need to be select for the mating - depending on number of offsprings remaining
n_select = math.ceil((self.n_offsprings - len(off)) / self.crossover.n_offsprings)
# select the parents for the mating - just an index array
parents = self.selection.do(pop, n_select, self.crossover.n_parents, algorithm=self)
# do the crossover using the parents index and the population - additional data provided if necessary
_off = self.crossover.do(self.problem, pop, parents, algorithm=self)
# do the mutation on the offsprings created through crossover
_off = self.mutation.do(self.problem, _off, algorithm=self)
# repair the individuals if necessary
if self.repair:
_off = self.repair.do(self.problem, _off, algorithm=self)
if self.eliminate_duplicates is not None:
_off = self.eliminate_duplicates.do(_off, pop, off)
# if more offsprings than necessary - truncate them randomly
if len(off) + len(_off) > self.n_offsprings:
n_remaining = self.n_offsprings - len(off)
I = np.random.permutation(len(_off))[:n_remaining]
_off = _off[I]
# add to the offsprings and increase the mating counter
off = off.merge(_off)
n_matings += 1
# if no new offsprings can be generated within a pre-specified number of generations
if n_matings > n_max_iterations:
break
return off
def _finalize(self):
pass
| 37.011429 | 113 | 0.628377 | 6,243 | 0.963872 | 0 | 0 | 0 | 0 | 0 | 0 | 2,238 | 0.34553 |
45c6f1f7c44194c1019849c21a21086304334bfa | 1,670 | py | Python | frag_permute.py | bluhm/frag-regress | 5a14f47940a3cb2964ea48bbd12340de67d1860e | [
"0BSD"
] | 2 | 2017-09-11T10:17:08.000Z | 2017-09-13T14:55:04.000Z | frag_permute.py | bluhm/frag-regress | 5a14f47940a3cb2964ea48bbd12340de67d1860e | [
"0BSD"
] | null | null | null | frag_permute.py | bluhm/frag-regress | 5a14f47940a3cb2964ea48bbd12340de67d1860e | [
"0BSD"
] | null | null | null | #!/usr/local/bin/python3
print("send 3 non-overlapping ping fragments in all possible orders")
# |----|
# |----|
# |----|
import os
from addr import *
from scapy.all import *
permute=[]
permute.append([0,1,2])
permute.append([0,2,1])
permute.append([1,0,2])
permute.append([2,0,1])
permute.append([1,2,0])
permute.append([2,1,0])
pid=os.getpid()
payload=b"ABCDEFGHIJKLMNOP"
for p in permute:
pid += 1
eid=pid & 0xffff
packet=IP(src=LOCAL_ADDR, dst=REMOTE_ADDR)/ \
ICMP(type='echo-request', id=eid)/payload
frag=[]
fid=pid & 0xffff
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
flags='MF')/bytes(packet)[20:28])
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
frag=1, flags='MF')/bytes(packet)[28:36])
frag.append(IP(src=LOCAL_ADDR, dst=REMOTE_ADDR, proto=1, id=fid,
frag=2)/bytes(packet)[36:48])
eth=[]
for i in range(3):
eth.append(Ether(src=LOCAL_MAC, dst=REMOTE_MAC)/frag[p[i]])
if os.fork() == 0:
time.sleep(1)
sendp(eth, iface=LOCAL_IF)
os._exit(0)
ans=sniff(iface=LOCAL_IF, timeout=3, filter=
"ip and src "+REMOTE_ADDR+" and dst "+LOCAL_ADDR+" and icmp")
for a in ans:
if a and a.type == ETH_P_IP and \
a.payload.proto == 1 and \
a.payload.frag == 0 and a.payload.flags == 0 and \
icmptypes[a.payload.payload.type] == 'echo-reply':
id=a.payload.payload.id
print("id=%#x" % (id))
if id != eid:
print("WRONG ECHO REPLY ID")
exit(2)
data=a.payload.payload.payload.load
print("payload=%s" % (data))
if data == payload:
break
print("PAYLOAD!=%s" % (payload))
exit(1)
else:
print("NO ECHO REPLY")
exit(2)
| 25.30303 | 69 | 0.635928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.168862 |
45c861ea52f629c0da41726ba7d71afeee2dfae5 | 692 | py | Python | tests/lineblocks_test.py | srackham/rimu-py | 3da67cb362b6d34fd363e9f4ce5e0afb019baa4c | [
"MIT"
] | null | null | null | tests/lineblocks_test.py | srackham/rimu-py | 3da67cb362b6d34fd363e9f4ce5e0afb019baa4c | [
"MIT"
] | 4 | 2020-03-24T17:59:43.000Z | 2021-06-02T00:48:53.000Z | tests/lineblocks_test.py | srackham/rimu-py | 3da67cb362b6d34fd363e9f4ce5e0afb019baa4c | [
"MIT"
] | null | null | null | from rimu import lineblocks, io, api
from typing import Dict
def test_render():
tests: Dict[str, str] = {
r'# foo': r'<h1>foo</h1>',
r'// foo': r'',
r'<image:foo|bar>': r'<img src="foo" alt="bar">',
r'<<#foo>>': r'<div id="foo"></div>',
r'.class #id "css"': r'',
r".safeMode='0'": r'',
r"|code|='<code>|</code>'": r'',
r"^='<sup>|</sup>'": r'',
r"/\.{3}/i = '…'": r'',
r"{foo}='bar'": r'',
}
api.init()
for k, v in tests.items():
reader = io.Reader(k)
writer = io.Writer()
lineblocks.render(reader, writer)
got = writer.toString()
assert got == v
| 27.68 | 57 | 0.447977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.362717 |
45cb0abbb048c590842b5f4cafecc09e7d95dbd9 | 4,758 | py | Python | outlierDetection/DataGenerator.py | mohazahran/Detecting-anomalies-in-user-trajectories | e1513905c2ef7b87a5050b36060c4a49006e8b87 | [
"BSD-3-Clause"
] | 7 | 2017-02-07T07:27:20.000Z | 2021-04-29T05:20:21.000Z | outlierDetection/DataGenerator.py | mohazahran/Detecting-anomalies-in-user-trajectories | e1513905c2ef7b87a5050b36060c4a49006e8b87 | [
"BSD-3-Clause"
] | null | null | null | outlierDetection/DataGenerator.py | mohazahran/Detecting-anomalies-in-user-trajectories | e1513905c2ef7b87a5050b36060c4a49006e8b87 | [
"BSD-3-Clause"
] | 4 | 2017-02-07T07:27:28.000Z | 2019-12-23T08:43:12.000Z | '''
Created on Nov 30, 2016
@author: zahran
'''
import pandas as pd
import numpy as np
import random
class DataGenerator(object):
def __init__(self, MODEL_PATH, DATA_GEN, perUserSequences):
self.MODEL_PATH = MODEL_PATH
self.DATA_GEN = DATA_GEN
self.perUserSequences = perUserSequences
store = pd.HDFStore(MODEL_PATH)
self.Theta_zh = store['Theta_zh'].values
self.Psi_oz = store['Psi_sz'].values
self.true_mem_size = store['Dts'].values.shape[1]
self.hyper2id = dict(store['hyper2id'].values)
self.obj2id = dict(store['source2id'].values)
self.id2obj = dict((v, k) for k, v in self.obj2id.items())
self.nz, self.nh = self.Theta_zh.shape
self.no, self.nz = self.Psi_oz.shape
#normalizing
#axis 0 is summing the cols. i.e. normalizing by the col sum. (i.e for each env)
self.Psi_oz = self.Psi_oz / self.Psi_oz.sum(axis=0)
self.Theta_zh = self.Theta_zh / self.Theta_zh.sum(axis=0)
#for optimization, save the transitions for each environment
self.envTransitions = {}
store.close()
def getTransitionMatrixForEnv(self, z):
#Compute transitions for a given env
if(z in self.envTransitions):
return self.envTransitions[z]
T = np.outer(self.Psi_oz[:, z], self.Psi_oz[:, z]) #the P[ dest | source, z ] matrix
np.fill_diagonal(T, 0)
T = T / T.sum(axis=0) #Re-normalize
self.envTransitions[z] = T
return T #(o x o)
def sample(self, srcs, probs):
#numpy.random.choice(a, size=None, replace=True, p=None)
#replace =True. i.e. put back the sampled item to the space
#replace =False. i.e. once picked, it's removed and thus affecting the probability of the remainging items
mySample = np.random.choice(srcs, 1, replace =True, p=probs)
return mySample
def generateOneSequence(self, T, starto):
seq = [starto]
currento = starto
for i in range(self.true_mem_size):
currento_o = T[:,currento]
sampledo = self.sample(list(range(0,self.no)), currento_o)[0]
seq.append(sampledo)
currento = sampledo
return seq
def generateSequenceByUser(self, h):
h_z = self.Theta_zh[:,h]
sampledZ = self.sample(list(range(0,self.nz)), h_z)[0]
z_o = self.Psi_oz[:,sampledZ]
firsto = self.sample(list(range(0,self.no)), z_o)[0]
T = self.getTransitionMatrixForEnv(sampledZ)
seqIds = self.generateOneSequence(T, firsto)
seq = []
for s in seqIds:
seq.append(self.id2obj[s])
return seq
def generate(self):
w = open(self.DATA_GEN, 'w')
cnt = 1
for userName in self.hyper2id:
if(cnt % 10 == 0):
print(str(cnt)+' users are finished ...')
cnt+=1
h = self.hyper2id[userName]
for i in range(self.perUserSequences):
w.write(str(userName)+'\t')
seq = self.generateSequenceByUser(h)
for s in seq:
w.write(s + '\t')
for g in range(self.true_mem_size+1):
w.write('false\t')
w.write('\n')
w.close()
def main():
MODEL_PATH = '/Users/mohame11/Documents/myFiles/Career/Work/New_Linux/lastfm_win10.trace_noob.h5'
#MODEL_PATH = '/Users/mohame11/Documents/myFiles/Career/Work/New_Linux/PARSED_pins_repins_win10_noop_NoLeaveOut_pinterest.h5'
DATA_GEN = '/Users/mohame11/Documents/myFiles/Career/Work/New_Linux/lastfmDataGen'
perUserSequences = 5
dg = DataGenerator(MODEL_PATH, DATA_GEN, perUserSequences)
dg.generate()
if __name__ == "__main__":
# d = {'a':0.0, 'b':0.0, 'c':0.0, 'd':0.0}
# srcs = d.keys()
# probs = [0.6, 0.2, 0.15, 0.05]
# tot = 10000
# for i in range(tot):
# mySample = np.random.choice(srcs, 1, replace =True, p=probs)[0]
# d[mySample] += 1
# for k in srcs:
# print(k,d[k]/tot)
main()
print('DONE!') | 33.744681 | 130 | 0.514922 | 3,702 | 0.778058 | 0 | 0 | 0 | 0 | 0 | 0 | 1,224 | 0.257251 |
45cc48368c5bd4bdb2b8a53510a939cb14c27b75 | 1,644 | py | Python | runtests.py | mattijevi/django-sendgrid | f930cb1759ea034a4a54dc1e077e8e8bb8b7206b | [
"BSD-2-Clause"
] | 7 | 2015-03-29T05:56:50.000Z | 2018-09-13T09:48:18.000Z | runtests.py | mattijevi/django-sendgrid | f930cb1759ea034a4a54dc1e077e8e8bb8b7206b | [
"BSD-2-Clause"
] | 10 | 2015-02-12T17:06:21.000Z | 2019-11-28T07:59:22.000Z | runtests.py | mattijevi/django-sendgrid | f930cb1759ea034a4a54dc1e077e8e8bb8b7206b | [
"BSD-2-Clause"
] | 9 | 2015-01-02T06:27:13.000Z | 2019-06-19T02:11:10.000Z | #!/usr/bin/env python
import sys
import os
import django
from django.conf import settings
if not settings.configured:
# Choose database for settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sendgrid',
'USER': 'root',
})
elif test_db == 'postgres':
DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sendgrid',
'OPTIONS': {
'autocommit': True,
}
})
settings.configure(
DATABASES=DATABASES,
INSTALLED_APPS=(
'django.contrib.contenttypes',
'sendgrid',
),
SITE_ID=1,
SECRET_KEY='this-is-just-for-tests-so-not-that-secret',
ROOT_URLCONF='sendgrid.urls',
TIME_ZONE='UTC', # so we can switch USE_TZ on and off in-flight with postgres
MIDDLEWARE_CLASSES=('django.middleware.csrf.CsrfViewMiddleware', )
)
from django.test.utils import get_runner
def run_tests():
if hasattr(django, 'setup'):
django.setup()
apps = sys.argv[1:] or ['sendgrid', ]
test_runner = get_runner(settings)
test_runner = test_runner(verbosity=1, interactive=True, failfast=False)
failures = test_runner.run_tests(apps)
sys.exit(failures)
if __name__ == '__main__':
run_tests()
| 25.292308 | 86 | 0.576034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.336983 |
45ccbaf3609b8e08f015d18250cacd6410fc0ced | 902 | py | Python | jp.atcoder/abc085/abc085_c/8338197.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc085/abc085_c/8338197.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc085/abc085_c/8338197.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | # author: kagemeka
# created: 2019-11-08 23:31:18(JST)
### modules
## from standard library
import sys
# import collections
# import math
# import string
# import bisect
# import re
# import itertools
# import statistics
# import functools
# import operator
## from external libraries
# import scipy.special
# import scipy.misc
# import numpy as np
def main():
n, y = (int(i) for i in sys.stdin.readline().split())
y //= 1000
if 10 * n < y or n > y:
print(-1, -1, -1)
exit()
for i in range(y // 10 + 1):
for j in range((y - i * 10) // 5 + 1):
k = n - (i + j)
if 10 * i + 5 * j + k == y:
print(i, j, k)
exit()
print(-1, -1, -1)
if __name__ == "__main__":
# execute only if run as a script
main()
| 20.976744 | 60 | 0.487805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.477827 |
45cd6b7941697c04db1bb1d94a1276f8ec7934e6 | 177 | py | Python | tests/__init__.py | obytes/tap-python | bc119cb785dc353f0c67241a64d9fcb30f21fbf7 | [
"MIT"
] | 3 | 2019-05-28T16:19:57.000Z | 2022-03-05T13:16:43.000Z | tests/__init__.py | obytes/tap-python | bc119cb785dc353f0c67241a64d9fcb30f21fbf7 | [
"MIT"
] | 2 | 2019-07-02T21:45:09.000Z | 2019-07-03T13:55:54.000Z | tests/__init__.py | obytes/tap-python | bc119cb785dc353f0c67241a64d9fcb30f21fbf7 | [
"MIT"
] | null | null | null | import vcr
tap_vcr = vcr.VCR(
serializer='yaml',
cassette_library_dir='tests/fixtures/vcr_cassettes',
record_mode='new_episodes',
match_on=['uri', 'method'],
)
| 19.666667 | 56 | 0.689266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.355932 |
45cdd41f4d05f74cf02c535ff1cac89f60151b06 | 3,500 | py | Python | Examples/graphing/swarmGraph.py | juartinv/pulpy | a901caba8570cdc29c8179f34ec4e53e7d024d02 | [
"MIT"
] | null | null | null | Examples/graphing/swarmGraph.py | juartinv/pulpy | a901caba8570cdc29c8179f34ec4e53e7d024d02 | [
"MIT"
] | null | null | null | Examples/graphing/swarmGraph.py | juartinv/pulpy | a901caba8570cdc29c8179f34ec4e53e7d024d02 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append("./../")
from swarm import Bird
class GraphMaker():
"""
"""
def __init__(self, env , birds, FIELD_SIZE ):
self.env= env
fig, ax = plt.subplots()
self.fig=fig
self.ax=ax
self.birds=birds
self.FIELD_SIZE=FIELD_SIZE
self.locations=Locations(self.birds)
locations, colors=self.locations.get_locations()
update_graph(self.fig, self.ax, birds=self.birds, locations=locations, colors=colors, title= "Time "+str(self.env.now), FIELD_SIZE=self.FIELD_SIZE)
for bird in self.birds:
bird.__class__=graphing_Bird
bird.tographing(self.locations, self.FIELD_SIZE)
def run(self):
yield self.env.timeout(.0001)
while True:
if self.locations.updated:
self.fig.clear()
locations, colors=self.locations.get_locations()
update_graph(self.fig, self.ax, birds=self.birds, locations=locations, colors=colors, title= "Time "+str(self.env.now), FIELD_SIZE=self.FIELD_SIZE)
self.locations.update()
yield self.env.timeout(.03)
class Locations():
"""
Keeps track of Locations for graphing
"""
def __init__(self, birds):
self.birds=[b.name for b in birds]
self.locations=[[[b.x, b.y, "blue"]] for b in birds]
self.updated=0
def update(self):
for l , location in enumerate(self.locations.copy()):
if len(location)>1:
self.locations[l].pop(0)
def set_location(self, name, location):
if name in self.birds:
self.locations[self.birds.index(name)].append(location)
self.updated=1
else:
raise ValueError("Could not find ", name , " in ", self.birds)
def get_locations(self):
return ([b[0][0] for b in self.locations], [b[0][1] for b in self.locations]) ,[b[0][2] for b in self.locations]
class graphing_Bird(Bird):
"""
A normal shower manager but it also updates the temperature graph everytime a temperature change is made.
"""
def __init__(self, name, context, bandwidth = 1.0, hard_limit_concurrency = 20, space_capacity = 10, verbose=True, id=0, max_temp=70, min_temp=-20):
super.__init__( name, context, bandwidth , hard_limit_concurrency , space_capacity , verbose, id, max_temp, min_temp)
self.tographing()
self.locations=None
def tographing(self, locations, FIELD_SIZE):
self.locations=locations
self.restricted_movement=[FIELD_SIZE, FIELD_SIZE]
def update_graph(fig, ax, birds, locations, colors, title="", FIELD_SIZE=1000):
"""
Updates the location graph.
"""
p1 = plt.scatter(*locations, color=colors , s=5)
plt.axhline(0, color='grey', linewidth=0.8)
ax.set_ylabel(' ')
ax.set_xlabel(' ')
#ax.set_xticks(ind)
#ax.set_xticklabels([s.name for s in shower_Managers])
colors=["blue", "red", "yellow", "green", "black", "indigo", "darkred", "lime", "seagreen", "pink"]
plt.scatter([],[], color="blue", label= "Independant")
plt.scatter([],[], color="red", label= "Calling")
plt.scatter([],[], color="seagreen", label= "Listening")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), title="Birds:")
plt.ylim(0, FIELD_SIZE)
plt.xlim(0, FIELD_SIZE)
plt.title(title)
plt.tight_layout()
plt.draw()
plt.pause(0.000001)
| 35.714286 | 164 | 0.626 | 2,528 | 0.722286 | 454 | 0.129714 | 0 | 0 | 0 | 0 | 513 | 0.146571 |
45d0870a48ab7440b5a0e5513d3f93147c7b5176 | 9,824 | py | Python | faostat.py | OCHA-DAP/hdxscraper-faostat | 27ee1d2e303b36992037b1717a4e0558315d5dcb | [
"MIT"
] | 1 | 2019-12-03T11:52:07.000Z | 2019-12-03T11:52:07.000Z | faostat.py | OCHA-DAP/hdxscraper-faostat | 27ee1d2e303b36992037b1717a4e0558315d5dcb | [
"MIT"
] | 1 | 2021-09-22T22:00:31.000Z | 2021-09-22T22:00:31.000Z | faostat.py | OCHA-DAP/hdx-scraper-faostat | 27ee1d2e303b36992037b1717a4e0558315d5dcb | [
"MIT"
] | 1 | 2019-12-03T11:52:10.000Z | 2019-12-03T11:52:10.000Z | #!/usr/bin/python
"""
FAOSTAT:
-------
Reads FAOSTAT JSON and creates datasets.
"""
import logging
from datetime import datetime, timedelta
from os import remove, rename
from os.path import basename, exists, getctime, join
from urllib.parse import urlsplit
from zipfile import ZipFile
from hdx.data.dataset import Dataset
from hdx.data.hdxobject import HDXError
from hdx.data.showcase import Showcase
from hdx.location.country import Country
from hdx.utilities.dateparse import parse_date_range
from hdx.utilities.dictandlist import dict_of_lists_add
from slugify import slugify
logger = logging.getLogger(__name__)
description = "FAO statistics collates and disseminates food and agricultural statistics globally. The division develops methodologies and standards for data collection, and holds regular meetings and workshops to support member countries develop statistical systems. We produce publications, working papers and statistical yearbooks that cover food security, prices, production and trade and agri-environmental statistics."
hxltags = {
"Iso3": "#country+code",
"StartDate": "#date+start",
"EndDate": "#date+end",
"Year": "#date+year",
"Area": "#country+name",
"Item Code": "#indicator+code",
"Item": "#indicator+name",
"Unit": "#indicator+type",
"Value": "#indicator+value+num",
}
def download_indicatorsets(filelist_url, indicatorsetnames, downloader, folder):
indicatorsets = dict()
response = downloader.download(filelist_url)
jsonresponse = response.json()
def add_row(row, filepath, indicatorsetname):
row["path"] = filepath
quickcharts = indicatorsetname.get("quickcharts")
if quickcharts and row["DatasetCode"] == quickcharts["code"]:
row["quickcharts"] = quickcharts["indicators"]
else:
row["quickcharts"] = None
dict_of_lists_add(indicatorsets, indicatorsetname["category"], row)
for row in jsonresponse["Datasets"]["Dataset"]:
for indicatorsetname in indicatorsetnames:
category = indicatorsetname["category"]
datasetname = row["DatasetName"]
if f"{category}:" not in datasetname or "archive" in datasetname.lower():
continue
filelocation = row["FileLocation"]
urlpath = urlsplit(filelocation).path
filename = basename(urlpath).replace("zip", "csv")
if "Archive" in filename:
continue
indicatorsetcode = row["DatasetCode"]
filepath = join(folder, f"{indicatorsetcode}.csv")
statusfile = join(folder, f"{indicatorsetcode}.txt")
if exists(filepath):
if exists(statusfile):
filedate = datetime.fromtimestamp(getctime(statusfile))
if filedate > (datetime.now() - timedelta(days=1)):
with open(statusfile) as f:
status = f.read()
if status == "OK":
add_row(row, filepath, indicatorsetname)
continue
remove(statusfile)
remove(filepath)
path = filepath.replace(".csv", ".zip")
if exists(path):
remove(path)
path = downloader.download_file(filelocation, path=path)
with ZipFile(path, "r") as zip:
path = zip.extract(filename, path=folder)
rename(path, filepath)
with open(statusfile, "w") as f:
f.write("OK")
add_row(row, filepath, indicatorsetname)
return indicatorsets
def get_countries(countries_url, downloader):
countrymapping = dict()
_, iterator = downloader.get_tabular_rows(
countries_url, headers=1, dict_form=True, format="csv"
)
for row in iterator:
countryiso = row["ISO3 Code"].strip()
if not countryiso:
continue
try:
int(countryiso)
continue
except ValueError:
pass
countrymapping[row["Country Code"].strip()] = (
countryiso,
row["Country"].strip(),
)
countries = list()
for countryiso, countryname in sorted(countrymapping.values()):
newcountryname = Country.get_country_name_from_iso3(countryiso)
if newcountryname:
countries.append(
{
"iso3": countryiso,
"countryname": newcountryname,
"origname": countryname,
}
)
return countries, countrymapping
def generate_dataset_and_showcase(
indicatorsetname,
indicatorsets,
country,
countrymapping,
showcase_base_url,
filelist_url,
downloader,
folder,
):
countryiso = country["iso3"]
countryname = country["countryname"]
indicatorset = indicatorsets[indicatorsetname]
if indicatorsetname == "Prices":
indicatorsetdisplayname = indicatorsetname
else:
indicatorsetdisplayname = f"{indicatorsetname} Indicators"
title = f"{countryname} - {indicatorsetdisplayname}"
name = f"FAOSTAT {indicatorsetdisplayname} for {countryname}"
slugified_name = slugify(name).lower()
logger.info(f"Creating dataset: {title}")
dataset = Dataset({"name": slugified_name, "title": title})
dataset.set_maintainer("196196be-6037-4488-8b71-d786adf4c081")
dataset.set_organization("ed727a5b-3e6e-4cd6-b97e-4a71532085e6")
dataset.set_expected_update_frequency("Every year")
dataset.set_subnational(False)
try:
dataset.add_country_location(countryiso)
except HDXError as e:
logger.exception(f"{countryname} has a problem! {e}")
return None, None, None, None
tags = ["hxl", "indicators"]
tag = indicatorsetname.lower()
if " - " in tag:
tags.extend(tag.split(" - "))
else:
tags.append(tag)
dataset.add_tags(tags)
def process_date(row):
countrycode = row.get("Area Code")
if countrycode is None:
return None
result = countrymapping.get(countrycode)
if result is None:
return None
isolookup, _ = result
if isolookup != countryiso:
return None
row["Iso3"] = countryiso
year = row["Year"]
month = row.get("Months")
if month is not None and month != "Annual value":
startdate, enddate = parse_date_range(f"{month} {year}")
else:
if "-" in year:
yearrange = year.split("-")
startdate, _ = parse_date_range(yearrange[0])
_, enddate = parse_date_range(yearrange[1])
row["Year"] = yearrange[1]
else:
startdate, enddate = parse_date_range(year)
row["StartDate"] = startdate.strftime("%Y-%m-%d")
row["EndDate"] = enddate.strftime("%Y-%m-%d")
return {"startdate": startdate, "enddate": enddate}
bites_disabled = [True, True, True]
qc_indicators = None
categories = list()
for row in indicatorset:
longname = row["DatasetName"]
url = row["path"]
category = longname.split(": ")[1]
filename = f"{category}_{countryiso}.csv"
description = f"*{category}:*\n{row['DatasetDescription']}"
if category[-10:] == "Indicators":
name = category
else:
name = f"{category} data"
resourcedata = {"name": f"{name} for {countryname}", "description": description}
header_insertions = [(0, "EndDate"), (0, "StartDate"), (0, "Iso3")]
indicators_for_qc = row.get("quickcharts")
if indicators_for_qc:
quickcharts = {
"hashtag": "#indicator+code",
"values": [x["code"] for x in indicators_for_qc],
"numeric_hashtag": "#indicator+value+num",
"cutdown": 2,
"cutdownhashtags": ["#indicator+code", "#country+code", "#date+year"],
}
qc_indicators = indicators_for_qc
else:
quickcharts = None
success, results = dataset.download_and_generate_resource(
downloader,
url,
hxltags,
folder,
filename,
resourcedata,
header_insertions=header_insertions,
date_function=process_date,
quickcharts=quickcharts,
encoding="WINDOWS-1252",
)
if success is False:
logger.warning(f"{category} for {countryname} has no data!")
continue
disabled_bites = results.get("bites_disabled")
if disabled_bites:
bites_disabled = disabled_bites
categories.append(category)
if dataset.number_of_resources() == 0:
logger.warning(f"{countryname} has no data!")
return None, None, None, None
dataset.quickcharts_resource_last()
notes = [
f"{indicatorsetdisplayname} for {countryname}.\n\n",
f"Contains data from the FAOSTAT [bulk data service]({filelist_url})",
]
if len(categories) == 1:
notes.append(".")
else:
notes.append(f" covering the following categories: {', '.join(categories)}")
dataset["notes"] = "".join(notes)
showcase = Showcase(
{
"name": f"{slugified_name}-showcase",
"title": title,
"notes": f"{indicatorsetname} Data Dashboard for {countryname}",
"url": f"{showcase_base_url}{countryiso}",
"image_url": "https://pbs.twimg.com/profile_images/1375385494167691269/Bc49-Yx8_400x400.jpg",
}
)
showcase.add_tags(tags)
return dataset, showcase, bites_disabled, qc_indicators
| 37.212121 | 424 | 0.603522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,414 | 0.245725 |
45d13bddb4847d731177692bebac88cbbf7f4d4b | 1,344 | py | Python | scripts/collapse_subtypes.py | edawson/rkmh | ea3d2e6791e8202ec0e487e648c0182f1766728b | [
"MIT"
] | 43 | 2016-06-29T15:55:36.000Z | 2022-03-07T03:18:45.000Z | scripts/collapse_subtypes.py | edawson/rkmh | ea3d2e6791e8202ec0e487e648c0182f1766728b | [
"MIT"
] | 12 | 2016-06-29T12:37:01.000Z | 2021-07-06T18:58:00.000Z | scripts/collapse_subtypes.py | edawson/rkmh | ea3d2e6791e8202ec0e487e648c0182f1766728b | [
"MIT"
] | 8 | 2016-09-01T17:10:53.000Z | 2021-02-26T10:55:31.000Z | import sys
from collections import Counter
## 5 |strains A1:23146 C:377 B1:546 unclassified:211701 A3:133 A2:212 A4:2230 B2:1052 D2:551 D3:3685 D1:30293 |sketch sketchSize=1000 kmer=16
if __name__ == "__main__":
for line in sys.stdin:
x_d = Counter()
tokens = line.split("|")
features = tokens[1].split(" ")
for i in features:
if i.startswith("A"):
x_d["A"] += int(i.strip().split(":")[1])
elif i.startswith("B"):
x_d["B"] += int(i.strip().split(":")[1])
elif i.startswith("C"):
x_d["C"] += int(i.strip().split(":")[1])
elif i.startswith("D"):
x_d["D"] += int(i.strip().split(":")[1])
elif i.startswith("u"):
x_d["U"] = int(i.strip().split(":")[1])
total = sum([x_d[x] for x in x_d])
#feat_l = [str(x + ":" + str( float(x_d[x]) / float(total) )) for x in x_d if x is not "U"]
feat_l = [str(x + ":" + str( float(x_d[x]) / float(total) )) for x in x_d]
#feat_l = [str(x + ":" + str((x_d[x]) )) for x in x_d]
x_feat = "|vir " + " ".join(feat_l)
#xtra_namespace = "|" + tokens[2]
#print " ".join([ tokens[0].strip(), x_feat, xtra_namespace] ).strip()
print " ".join([ tokens[0].strip(), x_feat] ).strip()
| 44.8 | 141 | 0.497024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.346726 |
45d23d997247a4451b26d7281ff5d784047731ab | 1,066 | py | Python | django_blog/blog/models.py | lidysun/test1 | 94db6637be6c0008f2454a0192121b1f1286c437 | [
"MIT"
] | null | null | null | django_blog/blog/models.py | lidysun/test1 | 94db6637be6c0008f2454a0192121b1f1286c437 | [
"MIT"
] | null | null | null | django_blog/blog/models.py | lidysun/test1 | 94db6637be6c0008f2454a0192121b1f1286c437 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from time import timezone
from django.db import models
# Create your models here.
class UserInfo(models.Model):
user= models.CharField(max_length = 30)
pwd = models.CharField(max_length = 30)
# class Publisher(models.Model):
# name = models.CharField(max_length=30)
# address = models.CharField(max_length=50)
# website = models.URLField()
# class Author(models.Model):
# email = models.EmailField()
# first_name = models.CharField(max_length = 30)
# last_name = models.CharField(max_length = 30)
# class Book(models.Model):
# title = models.CharField(max_length = 150)
# authors = models.ManyToManyField(Author)
# publisher = models.ForeignKey(Publisher)
class DoubanMovie(models.Model):
name = models.CharField(max_length = 200)
info = models.CharField(max_length = 10000)
rating = models.CharField(max_length = 20)
num = models.CharField(max_length = 50)
quote = models.CharField(max_length = 150)
img_url = models.CharField(max_length = 300)
| 31.352941 | 50 | 0.721388 | 414 | 0.388368 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.491557 |
45d55c19dc9d34d55c3334d5dfa1a8ae32a77f25 | 3,210 | py | Python | collegedatascraper/extractors.py | vertuli/collegedatascraper | 3891ba1159eb23a64ba23649635c3ffca4528453 | [
"MIT"
] | null | null | null | collegedatascraper/extractors.py | vertuli/collegedatascraper | 3891ba1159eb23a64ba23649635c3ffca4528453 | [
"MIT"
] | null | null | null | collegedatascraper/extractors.py | vertuli/collegedatascraper | 3891ba1159eb23a64ba23649635c3ffca4528453 | [
"MIT"
] | null | null | null | import pandas as pd
def extract_series(df):
"""Returns a pandas Series of all info extracted from a DataFrame."""
# Remove index, value pairs from DataFrame if index is NaN.
missing = df.index.isna()
missing_idx = df[missing].index
df.drop(missing_idx, inplace=True)
# Extract a Series from a single col DataFrame.
if len(df.columns) == 1:
s = df.iloc[:, 0]
# Extract a Series from a wide DataFrame with multiple columns.
if len(df.columns) > 1:
s = wide_df_to_series(df)
return s
##############################################################################
# EXTRACTING SERIES FROM WIDE DATAFRAMES FUNCTIONS
##############################################################################
def wide_df_to_series(df):
"""Create a single pandas Series from a list of pandas DataFrames objects
representing CollegeData.com <table> tags holding multiple columns."""
# There are only four scraped tables from which we want to extract Series.
# These two are both 'traditional' tables with cells having various vals.
if df.index.name in ['Subject', 'Exam']:
s = multival_wide_df_to_series(df)
# These two both similarly have cell values that 'mark' a row/col label.
elif df.index.name in ['Factor', 'Intercollegiate Sports Offered']:
# These can be processed the same way if 'Factor' table is flipped:
if df.index.name == 'Factor':
df = df.T # Transpose
df.index.name = 'Factor'
s = singleval_wide_df_to_series(df) # Returns a tuple of marked vals.
# 'Factor' table should only have one val marked, so we'll extract it.
if df.index.name == 'Factor':
s = s.str[0]
# There is one other table (on the Overview) which is a shortened copy of
# the 'Factor' table, which we can ignore.
else:
s = None
return s
def multival_wide_df_to_series(df):
"""Create a pandas Series from a DataFrame with labeled rows and columns
and differing values in each 'cell'. The returned Series contains up to
m x n values 'cell' values from the DataFrame, each indexed by its former
DataFrame row label comma seperated from its column label.
label"""
s = pd.Series()
for col in df.columns:
col_s = df[col]
col_s.index = df.index.name + ', ' + col_s.index + ', ' + col
s = s.append(col_s)
return s
def singleval_wide_df_to_series(df):
"""Creates a pandas Series from a DataFrame with labeled rows and columns
but only a single value (or null) in each 'cell' - with this value serving
to 'mark' a row. The returned Series contains a tuple of all marked row
labels indexed by the column names."""
s = pd.Series()
for col in df.columns:
# Use the col label + the table index name as the final 'label'.
key = df.index.name + ', ' + col
vals = df[col].dropna().index.tolist() # Marked row label list.
if vals:
s[key] = tuple(vals) # Save multiple marked rows as tuple.
return s
def main():
"""This function executes if module is run as a script."""
if __name__ == '__main__':
main()
| 32.1 | 78 | 0.619003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,964 | 0.611838 |
45d64512716035b616ac8ec0c6bb1bdd8e298d41 | 974 | py | Python | cbmcfs3_runner/scenarios/static_demand.py | xapple/cbm_runner | ec532819e0a086077475bfd479836a378f187f6f | [
"MIT"
] | 2 | 2019-07-11T23:49:22.000Z | 2019-10-31T19:11:45.000Z | cbmcfs3_runner/scenarios/static_demand.py | xapple/cbm_runner | ec532819e0a086077475bfd479836a378f187f6f | [
"MIT"
] | null | null | null | cbmcfs3_runner/scenarios/static_demand.py | xapple/cbm_runner | ec532819e0a086077475bfd479836a378f187f6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux.
JRC biomass Project.
Unit D1 Bioeconomy.
"""
# Built-in modules #
# First party modules #
from plumbing.cache import property_cached
# Internal modules #
from cbmcfs3_runner.scenarios.base_scen import Scenario
from cbmcfs3_runner.core.runner import Runner
###############################################################################
class StaticDemand(Scenario):
"""
This scenario represents a demand that is pre-calculated and is not a
function of the maximum wood supply (no interaction yet with the GFTM model).
"""
short_name = 'static_demand'
@property_cached
def runners(self):
"""A dictionary of country codes as keys with a list of runners as values."""
# Create all runners #
result = {c.iso2_code: [Runner(self, c, 0)] for c in self.continent}
# Don't modify these runners #
return result
| 27.055556 | 85 | 0.63655 | 535 | 0.549281 | 0 | 0 | 294 | 0.301848 | 0 | 0 | 592 | 0.607803 |
45d7be5aecc095fc8c811a04d03b02510ef8c196 | 1,310 | py | Python | personalization/shared/utils.py | alshedivat/federated | 100f0e0940282818c42c39156407ae419f26de50 | [
"Apache-2.0"
] | null | null | null | personalization/shared/utils.py | alshedivat/federated | 100f0e0940282818c42c39156407ae419f26de50 | [
"Apache-2.0"
] | null | null | null | personalization/shared/utils.py | alshedivat/federated | 100f0e0940282818c42c39156407ae419f26de50 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Maruan Al-Shedivat.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for loading and preprocessing EMNIST training and testing data."""
import contextlib
@contextlib.contextmanager
def result_type_is_sequence_hack(tf_computation):
# Monkey patch the result type of the dataset computation to avoid TypeError
# being raised inside `tff.simultation.iterative_process_compositions`.
# TODO: propose to relax the assumption about the type signature of the
# dataset computation being SequenceType in TFF.
try:
# Monkey-patch tf_computation's result type.
tf_computation.type_signature.result.is_sequence = lambda: True
yield
finally:
# Monkey-unpatch tf_computation's result type.
tf_computation.type_signature.result.is_sequence = lambda: False
| 40.9375 | 78 | 0.772519 | 0 | 0 | 598 | 0.456489 | 625 | 0.477099 | 0 | 0 | 1,011 | 0.771756 |
45d82914df1d0506310b57788fd50743db64c8a9 | 2,635 | py | Python | vars/Staging_security_port_scanning.py | rlennon/Doodle | 60b1645fd327192848c4daaccb572fa456974526 | [
"MIT"
] | 5 | 2019-02-25T20:10:18.000Z | 2019-04-24T20:21:04.000Z | vars/Staging_security_port_scanning.py | rlennon/Doodle | 60b1645fd327192848c4daaccb572fa456974526 | [
"MIT"
] | 28 | 2019-02-26T13:50:52.000Z | 2019-04-24T20:10:29.000Z | vars/Staging_security_port_scanning.py | rlennon/Doodle | 60b1645fd327192848c4daaccb572fa456974526 | [
"MIT"
] | 6 | 2019-02-28T20:54:09.000Z | 2019-04-06T22:18:50.000Z | import sys, os, socket
class Ssh_Util:
def port_scan(self, remote_host_ip):
def print_box(print_line):
print("-" * 78)
print(print_line)
print("-" * 78)
# Validate the IP of the remote host
# remote_host_ip = "172.28.25.122"
# Using the range function to specify ports (here it will scans all ports between 1 and 1024)
try:
from_port = 1
to_port = 200
print("Scanning Port range - {} to {}.".format(from_port, to_port))
for port in range(from_port, to_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remote_host_ip, port))
ssh = False
http = False
other_ports = False
port_list = []
if result == 0:
if port == 22:
print("\n\t\t\tPort {} - open for SSH!\n".format(port))
ssh = True
sock.close()
elif port == 80:
print("\n\t\t\tPort {} - open for HTTP!\n".format(port))
http = True
sock.close()
else:
print("\n\t\t\tPort {} - open!".format(port))
other_ports = True
port_list = [port]
sock.close()
print("\n\t\t\tThe connection to {} over Port {} has now been closed".format(remote_host_ip, port))
# Printing the information to screen
print_box("Scanning Completed for {}".format(remote_host_ip))
print("\t\t\tSummary")
if ssh:
print("\tPort 22, Is open for SSH!")
if http:
print("\tPort 80, Is open for HTTP!")
if other_ports:
for item in port_list:
print("\tPort {} is Open.".format(item))
if not other_ports:
print("\tNo other Ports are available!")
print("-" * 78)
except socket.error:
print("Couldn't connect to server")
sys.exit()
def main():
# Initialize the ssh object
ssh_obj = Ssh_Util()
print(" ")
print(" ")
print(" ")
print("Scanning the Staging Web Server")
ssh_obj.port_scan("172.28.25.129")
print(" ")
print(" ")
print(" ")
print("Scanning the Staging API Server")
ssh_obj.port_scan("172.28.25.128")
main() | 36.09589 | 123 | 0.470209 | 2,245 | 0.851992 | 0 | 0 | 0 | 0 | 0 | 0 | 725 | 0.275142 |
45d84ac7252e18717ee747f54457d11d39050e86 | 162 | py | Python | src/controllers/main_ctrl.py | donglinwu6066/2022-NYCU-EVA-lab-project-demo-app | 5de1021173240b2f9b325510e2c75f59cf3b14e1 | [
"MIT"
] | null | null | null | src/controllers/main_ctrl.py | donglinwu6066/2022-NYCU-EVA-lab-project-demo-app | 5de1021173240b2f9b325510e2c75f59cf3b14e1 | [
"MIT"
] | null | null | null | src/controllers/main_ctrl.py | donglinwu6066/2022-NYCU-EVA-lab-project-demo-app | 5de1021173240b2f9b325510e2c75f59cf3b14e1 | [
"MIT"
] | 1 | 2022-03-25T10:08:41.000Z | 2022-03-25T10:08:41.000Z | from PyQt5.QtCore import QObject, pyqtSlot
class MainController(QObject):
def __init__(self, model):
super().__init__()
self._model = model
| 20.25 | 42 | 0.679012 | 117 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45d9682e1d524e94988cc35b75b5c4e2cdeee9ed | 516 | py | Python | sims-g2/pos-adv/code/plot-rn.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims-g2/pos-adv/code/plot-rn.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims-g2/pos-adv/code/plot-rn.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | from pylab import *
cfl = 0.1
def getAlpha(r):
if r < 2.2:
return (1+r/3.0)*exp(2.0*r/3.0)
else:
return min(1/cfl, 6/(3-r))
def getDgAlpha(r):
return 1+r
def getRn(r):
al = getAlpha(r)
return (r-3*cfl*al+6*cfl)/(1-cfl*al)
r = linspace(0, 2.39, 100)
rn = r*0.0
alr = r*0.0
for i in range(r.shape[0]):
alr[i] = getAlpha(r[i])
#figure(1)
#plot(r, alr)
#grid()
for i in range(r.shape[0]):
rn[i] = getRn(r[i])
figure(2)
plot(r, rn)
grid()
show()
| 13.578947 | 40 | 0.51938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.065891 |
45da0fdf8d57270d1bbf22d1902ef827d862f813 | 121,867 | py | Python | datatube/test/coerce_dtypes_test.py | eerkela/archivetube | a295987cf4a1234de58c1611fa0f45a626e76c2e | [
"MIT"
] | null | null | null | datatube/test/coerce_dtypes_test.py | eerkela/archivetube | a295987cf4a1234de58c1611fa0f45a626e76c2e | [
"MIT"
] | null | null | null | datatube/test/coerce_dtypes_test.py | eerkela/archivetube | a295987cf4a1234de58c1611fa0f45a626e76c2e | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta, timezone
import random
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytz
if __name__ == "__main__":
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
from datatube.dtype import coerce_dtypes
unittest.TestCase.maxDiff = None
class CoerceDtypeBasicTests(unittest.TestCase):
def test_coerce_dtypes_returns_copy(self):
# series
in_series = pd.Series([1, 2, 3])
out_series = coerce_dtypes(in_series, float)
self.assertNotEqual(id(in_series), id(out_series))
# dataframe
in_df = pd.DataFrame({"copy": [1, 2, 3]})
out_df = coerce_dtypes(in_df, {"copy": float})
self.assertNotEqual(id(in_df), id(out_df))
class CoerceIntegerDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
size = 3 # minimum 3
cls.integers = [-1 * size // 2 + i + 1 for i in range(size)]
# integers = [..., -1, 0, 1, ...]
cls.bool_flags = [(i + 1) % 2 for i in range(size)]
# bool_flags = [1, 0, 1, 0, 1, ...]
cls.col_name = "integers"
def test_coerce_from_integer_to_integer_no_na(self):
in_data = self.integers
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_integer_with_na(self):
in_data = self.integers + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_float_no_na(self):
in_data = self.integers
out_data = [float(i) for i in self.integers]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_float_with_na(self):
in_data = self.integers + [None]
out_data = [float(i) for i in self.integers] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_complex_no_na(self):
in_data = self.integers
out_data = [complex(i, 0) for i in self.integers]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_complex_with_na(self):
in_data = self.integers + [None]
out_data = [complex(i, 0) for i in self.integers] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_string_no_na(self):
in_data = self.integers
out_data = [str(i) for i in self.integers]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_string_with_na(self):
in_data = self.integers + [None]
out_data = [str(i) for i in self.integers] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_generic_integer_to_boolean_no_na(self):
in_data = self.integers
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_generic_integer_to_boolean_with_na(self):
in_data = self.integers + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_integer_bool_flag_to_boolean_no_na(self):
in_data = self.bool_flags
out_data = [bool(i) for i in self.bool_flags]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_bool_flag_to_boolean_with_na(self):
in_data = self.bool_flags + [None]
out_data = [bool(i) for i in self.bool_flags] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_datetime_no_na(self):
in_data = self.integers
out_data = [datetime.fromtimestamp(i, tz=timezone.utc)
for i in self.integers]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_datetime_with_na(self):
in_data = self.integers + [None]
out_data = [datetime.fromtimestamp(i, tz=timezone.utc)
for i in self.integers] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_timedelta_no_na(self):
in_data = self.integers
out_data = [timedelta(seconds=i) for i in self.integers]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_timedelta_with_na(self):
in_data = self.integers + [None]
out_data = [timedelta(seconds=i) for i in self.integers] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_object_no_na(self):
in_series = pd.Series(self.integers)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_to_object_with_na(self):
in_series = pd.Series(self.integers + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceFloatDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3 # minimum 3
cls.whole_floats = [-1 * size // 2 + i + 1.0 for i in range(size)]
# whole_flats = [..., -1.0, 0.0, 1.0, ...]
cls.decimal_floats = [-1 * size // 2 + i + 1 + random.random()
for i in range(size)]
# decimal_floats = [..., -1.0 + e, 0.0 + e, 1.0 + e, ...]
cls.decimal_floats_between_0_and_1 = [random.random()
for _ in range(size)]
# decimal_floats_between_0_and_1 = [0.xxxx, 0.xxxx, 0.xxxx, ...]
cls.bool_flags = [(i + 1.0) % 2 for i in range(size)]
# bool_flags = [1.0, 0.0, 1.0, 0.0, 1.0, ...]
cls.col_name = "floats"
def test_coerce_from_whole_float_to_integer_no_na(self):
in_data = self.whole_floats
out_data = [int(f) for f in self.whole_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_whole_float_to_integer_with_na(self):
in_data = self.whole_floats + [None]
out_data = [int(f) for f in self.whole_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_decimal_float_to_integer_no_na(self):
in_data = self.decimal_floats
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_decimal_float_to_integer_with_na(self):
in_data = self.decimal_floats + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_float_to_float_no_na(self):
in_data = self.decimal_floats
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_float_with_na(self):
in_data = self.decimal_floats + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_complex_no_na(self):
in_data = self.decimal_floats
out_data = [complex(f, 0) for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_complex_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [complex(f, 0) for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_string_no_na(self):
in_data = self.decimal_floats
out_data = [str(f) for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_string_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [str(f) for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_generic_float_to_boolean_no_na(self):
in_data = self.decimal_floats
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_generic_float_to_boolean_with_na(self):
in_data = self.decimal_floats + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_float_bool_flag_to_boolean_no_na(self):
in_data = self.bool_flags
out_data = [bool(f) for f in self.bool_flags]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_float_bool_flag_to_boolean_with_na(self):
in_data = self.bool_flags + [None]
out_data = [bool(f) for f in self.bool_flags] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_decimal_float_between_0_and_1_to_boolean_no_na(self):
in_data = self.decimal_floats_between_0_and_1
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_decimal_float_between_0_and_1_to_boolean_with_na(self):
in_data = self.decimal_floats_between_0_and_1 + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_float_to_datetime_no_na(self):
in_data = self.decimal_floats
out_data = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_datetime_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_timedelta_no_na(self):
in_data = self.decimal_floats
out_data = [timedelta(seconds=f) for f in self.decimal_floats]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_timedelta_with_na(self):
in_data = self.decimal_floats + [None]
out_data = [timedelta(seconds=f) for f in self.decimal_floats] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_object_no_na(self):
in_series = pd.Series(self.decimal_floats)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_float_to_object_with_na(self):
in_series = pd.Series(self.decimal_floats + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceComplexDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3
cls.real_whole_complex = [complex(-1 * size // 2 + i + 1.0, 0)
for i in range(size)]
# ^ = [..., complex(-1, 0), complex(0, 0), complex(1, 0), ...]
cls.real_complex = [complex(-1 * size // 2 + i + 1 + random.random(), 0)
for i in range(size)]
# ^ = [..., complex(-1+e, 0), complex(0+e, 0), complex(1+e, 0), ...]
cls.real_complex_between_0_and_1 = [complex(random.random(), 0)
for _ in range(size)]
# ^ = [complex(0.xxxx, 0), complex(0.xxxx, 0), complex(0.xxxx, 0), ...]
cls.imag_complex = [complex(-1 * size // 2 + i + 1 + random.random(),
-1 * size // 2 + i + 1 + random.random())
for i in range(size)]
# ^ = [..., complex(-1+e,-1+e), complex(0+e,0+e), complex(1+e,1+e), ...]
cls.bool_flags = [complex((i + 1) % 2, 0) for i in range(size)]
# ^ = [complex(1, 0), complex(0, 0), complex(1, 0), complex(0, 0), ...]
cls.col_name = "complex"
def test_coerce_from_real_whole_complex_to_integer_no_na(self):
in_data = self.real_whole_complex
out_data = [int(c.real) for c in self.real_whole_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_real_whole_complex_to_integer_with_na(self):
in_data = self.real_whole_complex + [None]
out_data = [int(c.real) for c in self.real_whole_complex] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_real_decimal_complex_to_integer_no_na(self):
in_data = self.real_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_decimal_complex_to_integer_with_na(self):
in_data = self.real_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_integer_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_integer_with_na(self):
in_data = self.imag_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_to_float_no_na(self):
in_data = self.real_complex
out_data = [c.real for c in self.real_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_real_complex_to_float_with_na(self):
in_data = self.real_complex + [None]
out_data = [c.real for c in self.real_complex] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_imaginary_complex_to_float_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {float} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, float)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {float} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: float})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_float_with_na(self):
in_data = self.imag_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {float} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, float)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {float} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: float})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_complex_to_complex_no_na(self):
in_data = self.imag_complex
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_to_complex_with_na(self):
in_data = self.imag_complex + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_to_string_no_na(self):
in_data = self.imag_complex
out_data = [str(c) for c in self.imag_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_to_string_with_na(self):
in_data = self.imag_complex + [None]
out_data = [str(c) for c in self.imag_complex] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_bool_flag_to_boolean_no_na(self):
in_data = self.bool_flags
out_data = [bool(c.real) for c in self.bool_flags]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_bool_flag_to_boolean_with_na(self):
in_data = self.bool_flags + [None]
out_data = [bool(c.real) for c in self.bool_flags] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_real_complex_to_boolean_no_na(self):
in_data = self.real_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_to_boolean_with_na(self):
in_data = self.real_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_whole_complex_to_boolean_no_na(self):
in_data = self.real_whole_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_whole_complex_to_boolean_with_na(self):
in_data = self.real_whole_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_between_0_and_1_to_boolean_no_na(self):
in_data = self.real_complex_between_0_and_1
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_between_0_and_1_to_boolean_with_na(self):
in_data = self.real_complex_between_0_and_1 + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_boolean_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_boolean_with_na(self):
in_data = self.imag_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_to_datetime_no_na(self):
in_data = self.real_complex
out_data = [datetime.fromtimestamp(c.real, tz=timezone.utc)
for c in self.real_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_real_complex_to_datetime_with_na(self):
in_data = self.real_complex + [None]
out_data = [datetime.fromtimestamp(c.real, tz=timezone.utc)
for c in self.real_complex] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_imaginary_complex_to_datetime_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {datetime} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, datetime)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {datetime} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: datetime})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_datetime_with_na(self):
in_data = self.imag_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {datetime} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, datetime)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {datetime} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: datetime})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_to_timedelta_no_na(self):
in_data = self.real_complex
out_data = [timedelta(seconds=c.real) for c in self.real_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_real_complex_to_timedelta_with_na(self):
in_data = self.real_complex + [None]
out_data = ([timedelta(seconds=c.real) for c in self.real_complex] +
[None])
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_imaginary_complex_to_timedelta_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {timedelta} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, timedelta)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {timedelta} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: timedelta})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_timedelta_with_na(self):
in_data = self.imag_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {timedelta} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, timedelta)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {timedelta} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: timedelta})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_complex_to_object_no_na(self):
in_series = pd.Series(self.imag_complex)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_to_object_wth_na(self):
in_series = pd.Series(self.imag_complex + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceStringDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3
cls.integers = [-1 * size // 2 + i + 1 for i in range(size)]
# ^ = [..., -1, 0, 1, ...]
cls.floats = [i + random.random() for i in cls.integers]
# ^ = [..., -1+e, 0+e, 1+e, ...]
cls.complex = [complex(f, f) for f in cls.floats]
# ^ = [..., complex(-1+e,-1+e), complex(0+e,0+e), complex(1+e,1+e), ...]
cls.characters = [chr((i % 26) + ord("a")) for i in range(size)]
# ^ = ["a", "b", "c", ..., "a", "b", "c", ...]
cls.booleans = [bool((i + 1) % 2) for i in range(size)]
# ^ = [True, False, True, False, ...]
cls.naive_datetimes = [datetime.utcfromtimestamp(f) for f in cls.floats]
# ^ = [..., utc time -1+e, utc time 0+e, utc_time 1+e, ...] (no tz)
cls.aware_datetimes = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in cls.floats]
# ^ = [..., utc time -1+e, utc time 0+e, utc_time 1+e, ...] (with tz)
cls.aware_naive_datetimes = []
for index, f in enumerate(cls.floats):
if index % 2: # naive
cls.aware_naive_datetimes.append(datetime.utcfromtimestamp(f))
else: # aware
val = datetime.fromtimestamp(f, tz=timezone.utc)
cls.aware_naive_datetimes.append(val)
# ^ = [aware, naive, aware, naive, aware, ...]
cls.mixed_timezones = []
for index, f in enumerate(cls.floats):
tz_name = pytz.all_timezones[index % len(pytz.all_timezones)]
tz = pytz.timezone(tz_name)
val = datetime.fromtimestamp(f, tz=tz)
cls.mixed_timezones.append(val)
# ^ = ["Africa/Abidjan", "Africa/Accra", "Africa/Addis_Ababa", ...]
cls.timedeltas = [timedelta(seconds=f) for f in cls.floats]
# ^ = [..., -1+e seconds, 0+e seconds, 1+e seconds, ...]
cls.col_name = "strings"
def test_coerce_from_integer_string_to_integer_no_na(self):
in_data = [str(i) for i in self.integers]
out_data = self.integers
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_string_to_integer_with_na(self):
in_data = [str(i) for i in self.integers] + [None]
out_data = self.integers + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_float_string_to_float_no_na(self):
in_data = [str(f) for f in self.floats]
out_data = self.floats
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_float_string_to_float_with_na(self):
in_data = [str(f) for f in self.floats] + [None]
out_data = self.floats + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_string_to_complex_no_na(self):
in_data = [str(c) for c in self.complex]
out_data = self.complex
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_string_to_complex_with_na(self):
in_data = [str(c) for c in self.complex] + [None]
out_data = self.complex + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_character_string_to_string_no_na(self):
in_data = self.characters
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_character_string_to_string_with_na(self):
in_data = self.characters + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_string_to_boolean_no_na(self):
in_data = [str(b) for b in self.booleans]
out_data = self.booleans
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_string_to_boolean_with_na(self):
in_data = [str(b) for b in self.booleans] + [None]
out_data = self.booleans + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_datetime_string_to_datetime_no_na(self):
in_data = [str(d) for d in self.naive_datetimes]
out_data = self.naive_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_datetime_string_to_datetime_with_na(self):
in_data = [str(d) for d in self.naive_datetimes] + [None]
out_data = self.naive_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_ISO_8601_string_to_datetime_no_na(self):
in_data = [d.isoformat() for d in self.naive_datetimes]
out_data = self.naive_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_ISO_8601_string_to_datetime_with_na(self):
in_data = [d.isoformat() for d in self.naive_datetimes] + [None]
out_data = self.naive_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_datetime_string_to_datetime_no_na(self):
in_data = [str(d) for d in self.aware_datetimes]
out_data = self.aware_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_datetime_string_to_datetime_with_na(self):
in_data = [str(d) for d in self.aware_datetimes] + [None]
out_data = self.aware_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_ISO_8601_string_to_datetime_no_na(self):
in_data = [d.isoformat() for d in self.aware_datetimes]
out_data = self.aware_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_ISO_8601_string_to_datetime_with_na(self):
in_data = [d.isoformat() for d in self.aware_datetimes] + [None]
out_data = self.aware_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_naive_datetime_string_to_datetime_no_na(self):
raise NotImplementedError()
def test_coerce_from_aware_naive_datetime_string_to_datetime_with_na(self):
raise NotImplementedError()
def test_coerce_from_aware_naive_ISO_8601_string_to_datetime_no_na(self):
raise NotImplementedError()
def test_coerce_from_aware_naive_ISO_8601_string_to_datetime_with_na(self):
raise NotImplementedError()
def test_coerce_from_mixed_tz_datetime_string_to_datetime_no_na(self):
raise NotImplementedError()
def test_coerce_from_mixed_tz_datetime_string_to_datetime_with_na(self):
raise NotImplementedError()
def test_coerce_from_mixed_tz_ISO_8601_string_to_datetime_no_na(self):
raise NotImplementedError()
def test_coerce_from_mixed_tz_ISO_8601_string_to_datetime_with_na(self):
raise NotImplementedError()
def test_coerce_from_timedelta_string_to_timedelta_no_na(self):
in_data = [str(t) for t in self.timedeltas]
out_data = self.timedeltas
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_string_to_timedelta_with_na(self):
in_data = [str(t) for t in self.timedeltas] + [None]
out_data = self.timedeltas + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_string_to_object_no_na(self):
in_series = pd.Series(self.timedeltas)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_string_to_object_with_na(self):
in_series = pd.Series(self.timedeltas + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceBooleanDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
size = 3
cls.booleans = [bool((i + 1) % 2) for i in range(size)]
# ^ = [True, False, True, False, ...]
cls.col_name = "booleans"
def test_coerce_from_boolean_to_integer_no_na(self):
in_data = self.booleans
out_data = [int(b) for b in self.booleans]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_integer_with_na(self):
in_data = self.booleans + [None]
out_data = [int(b) for b in self.booleans] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_float_no_na(self):
in_data = self.booleans
out_data = [float(b) for b in self.booleans]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_float_with_na(self):
in_data = self.booleans + [None]
out_data = [float(b) for b in self.booleans] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_complex_no_na(self):
in_data = self.booleans
out_data = [complex(b, 0) for b in self.booleans]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_complex_with_na(self):
in_data = self.booleans + [None]
out_data = [complex(b, 0) for b in self.booleans] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_string_no_na(self):
in_data = self.booleans
out_data = [str(b) for b in self.booleans]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_string_with_na(self):
in_data = self.booleans + [None]
out_data = [str(b) for b in self.booleans] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_boolean_no_na(self):
in_data = self.booleans
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_boolean_with_na(self):
in_data = self.booleans + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_datetime_no_na(self):
in_data = self.booleans
out_data = [datetime.fromtimestamp(b, tz=timezone.utc)
for b in self.booleans]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_datetime_with_na(self):
in_data = self.booleans + [None]
out_data = [datetime.fromtimestamp(b, tz=timezone.utc)
for b in self.booleans] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_timedelta_no_na(self):
in_data = self.booleans
out_data = [timedelta(seconds=b) for b in self.booleans]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_timedelta_with_na(self):
in_data = self.booleans + [None]
out_data = [timedelta(seconds=b) for b in self.booleans] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_object_no_na(self):
in_series = pd.Series(self.booleans)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: in_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_to_object_with_na(self):
in_series = pd.Series(self.booleans + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: in_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceDatetimeDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3
integers = [-1 * size // 2 + i + 1 for i in range(size)]
floats = [i + random.random() for i in integers]
cls.whole_datetimes = [datetime.fromtimestamp(i, tz=timezone.utc)
for i in integers]
# ^ = [..., utc time -1, utc time 0, utc time 1, ...]
cls.datetimes_between_0_and_1 = [datetime.fromtimestamp(random.random(),
tz=timezone.utc)
for _ in range(size)]
# ^ = [utc time 0+e, utc time 0+e, utc time 0+e, ...]
cls.bool_flags = [datetime.fromtimestamp((i + 1) % 2, tz=timezone.utc)
for i in range(size)]
# ^ = [utc time 1, utc time 0, utc time 1, utc time 0, ...]
cls.naive_datetimes = [datetime.utcfromtimestamp(f) for f in floats]
# ^ = [..., utc time -1+e, utc time 0+e, utc time 1+e, ...] (no tz)
cls.aware_datetimes = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in floats]
# ^ = [..., utc time -1+e, utc time 0+e, utc_time 1+e, ...] (with tz)
cls.aware_naive_datetimes = []
for index, f in enumerate(floats):
if index % 2: # naive
cls.aware_naive_datetimes.append(datetime.utcfromtimestamp(f))
else: # aware
val = datetime.fromtimestamp(f, tz=timezone.utc)
cls.aware_naive_datetimes.append(val)
# ^ = [aware, naive, aware, naive, aware, ...]
cls.mixed_timezones = []
for index, f in enumerate(floats):
tz_name = pytz.all_timezones[index % len(pytz.all_timezones)]
tz = pytz.timezone(tz_name)
val = datetime.fromtimestamp(f, tz=tz)
cls.mixed_timezones.append(val)
# ^ = ["Africa/Abidjan", "Africa/Accra", "Africa/Addis_Ababa", ...]
cls.col_name = "datetimes"
def test_coerce_from_whole_datetime_to_integer_no_na(self):
in_data = self.whole_datetimes
out_data = [int(d.timestamp()) for d in self.whole_datetimes]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_whole_datetime_to_integer_with_na(self):
in_data = self.whole_datetimes + [None]
out_data = [int(d.timestamp()) for d in self.whole_datetimes] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_random_datetime_to_integer_no_na(self):
in_data = self.aware_datetimes
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_random_datetime_to_integer_with_na(self):
in_data = self.aware_datetimes + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_datetime_to_float_no_na(self):
in_data = self.aware_datetimes
out_data = [d.timestamp() for d in self.aware_datetimes]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_datetime_to_float_with_na(self):
in_data = self.aware_datetimes + [None]
out_data = [d.timestamp() for d in self.aware_datetimes] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_complex_no_na(self):
in_data = self.aware_datetimes
out_data = [complex(d.timestamp(), 0) for d in self.aware_datetimes]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_complex_with_na(self):
in_data = self.aware_datetimes + [None]
out_data = ([complex(d.timestamp(), 0) for d in self.aware_datetimes] +
[None])
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_string_no_na(self):
in_data = self.aware_datetimes
out_data = [d.isoformat() for d in self.aware_datetimes]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_string_with_na(self):
in_data = self.aware_datetimes + [None]
out_data = [d.isoformat() for d in self.aware_datetimes] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_bool_flag_to_boolean_no_na(self):
in_data = self.bool_flags
out_data = [bool(d.timestamp()) for d in self.bool_flags]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_bool_flag_to_boolean_with_na(self):
in_data = self.bool_flags + [None]
out_data = [bool(d.timestamp()) for d in self.bool_flags] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_random_datetime_to_boolean_no_na(self):
in_data = self.aware_datetimes
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_random_datetime_to_boolean_with_na(self):
in_data = self.aware_datetimes + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_datetime_between_0_and_1_to_boolean_no_na(self):
in_data = self.datetimes_between_0_and_1
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_datetime_between_0_and_1_to_boolean_with_na(self):
in_data = self.datetimes_between_0_and_1 + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_datetime_to_datetime_no_na(self):
in_data = self.aware_datetimes
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_datetime_with_na(self):
in_data = self.aware_datetimes + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_timedelta_no_na(self):
in_data = self.aware_datetimes
out_data = [timedelta(seconds=d.timestamp())
for d in self.aware_datetimes]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_timedelta_with_na(self):
in_data = self.aware_datetimes + [None]
out_data = [timedelta(seconds=d.timestamp())
for d in self.aware_datetimes] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_object_no_na(self):
in_series = pd.Series(self.aware_datetimes)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_datetime_to_object_with_na(self):
in_series = pd.Series(self.aware_datetimes + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceTimedeltaDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3
integers = [-1 * size // 2 + i + 1 for i in range(size)]
floats = [i + random.random() for i in integers]
cls.whole_timedeltas = [timedelta(seconds=i) for i in integers]
# ^ = [..., timedelta(-1), timedelta(0), timedelta(1), ...]
cls.timedeltas = [timedelta(seconds=f) for f in floats]
# ^ = [..., timedelta(-1+e), timedelta(0+e), timedelta(1+e), ...]
cls.timedeltas_between_0_and_1 = [timedelta(seconds=random.random())
for _ in range(size)]
# ^ = [timedelta(0+e), timedelta(0+e), timedelta(0+e), ...]
cls.bool_flags = [timedelta(seconds=(i + 1) % 2) for i in range(size)]
# ^ = [timedelta(1), timedelta(0), timedelta(1), timedelta(0), ...]
cls.col_name = "timedeltas"
def test_coerce_from_whole_timedelta_to_integer_no_na(self):
in_data = self.whole_timedeltas
out_data = [int(t.total_seconds()) for t in self.whole_timedeltas]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_whole_timedelta_to_integer_with_na(self):
in_data = self.whole_timedeltas + [None]
out_data = ([int(t.total_seconds()) for t in self.whole_timedeltas] +
[None])
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_random_timedelta_to_integer_no_na(self):
in_data = self.timedeltas
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_random_timedelta_to_integer_with_na(self):
in_data = self.timedeltas + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {int} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, int)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {int} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: int})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_timedelta_to_float_no_na(self):
in_data = self.timedeltas
out_data = [t.total_seconds() for t in self.timedeltas]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_float_with_na(self):
in_data = self.timedeltas + [None]
out_data = [t.total_seconds() for t in self.timedeltas] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_complex_no_na(self):
in_data = self.timedeltas
out_data = [complex(t.total_seconds(), 0) for t in self.timedeltas]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_complex_with_na(self):
in_data = self.timedeltas + [None]
out_data = ([complex(t.total_seconds(), 0) for t in self.timedeltas] +
[None])
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_string_no_na(self):
in_data = self.timedeltas
out_data = [str(pd.Timedelta(t)) for t in self.timedeltas]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_string_with_na(self):
in_data = self.timedeltas + [None]
out_data = [str(pd.Timedelta(t)) for t in self.timedeltas] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_bool_flag_to_boolean_no_na(self):
in_data = self.bool_flags
out_data = [bool(d.total_seconds()) for d in self.bool_flags]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_bool_flag_to_boolean_with_na(self):
in_data = self.bool_flags + [None]
out_data = [bool(d.total_seconds()) for d in self.bool_flags] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_random_timedelta_to_boolean_no_na(self):
in_data = self.timedeltas
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_random_timedelta_to_boolean_with_na(self):
in_data = self.timedeltas + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_timedelta_between_0_and_1_to_boolean_no_na(self):
in_data = self.timedeltas_between_0_and_1
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_timedelta_between_0_and_1_to_boolean_with_na(self):
in_data = self.timedeltas_between_0_and_1 + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {bool} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, bool)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {bool} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: bool})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_timedelta_to_datetime_no_na(self):
in_data = self.timedeltas
out_data = [datetime.fromtimestamp(t.total_seconds(), tz=timezone.utc)
for t in self.timedeltas]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_datetime_with_na(self):
in_data = self.timedeltas + [None]
out_data = [datetime.fromtimestamp(t.total_seconds(), tz=timezone.utc)
for t in self.timedeltas] + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_timedelta_no_na(self):
in_data = self.timedeltas
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_timedelta_with_na(self):
in_data = self.timedeltas + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_object_no_na(self):
in_series = pd.Series(self.timedeltas)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_timedelta_to_object_with_na(self):
in_series = pd.Series(self.timedeltas + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceObjectDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
class NonCastableObject:
pass
class CastableObject:
def to_datetime(self) -> datetime:
return datetime.fromtimestamp(random.randint(0, 86400),
tz=timezone.utc)
def to_timedelta(self) -> timedelta:
return timedelta(seconds=random.randint(0, 86400))
def __int__(self) -> int:
return random.randint(0, 10)
def __float__(self) -> float:
return random.random()
def __complex__(self) -> complex:
return complex(random.random(), random.random())
def __str__(self) -> str:
return chr(random.randint(0, 26) + ord("a"))
def __bool__(self) -> bool:
return bool(random.randint(0, 1))
size = 3
cls.non_castable_objects = [NonCastableObject() for _ in range(size)]
cls.castable_objects = [CastableObject() for _ in range(size)]
cls.nones = [None for _ in range(size)]
cls.col_name = "objects"
def test_coerce_from_object_to_integer(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_float(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_complex(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_string(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_boolean(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_datetime(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_timedelta(self):
pass
# raise NotImplementedError()
def test_coerce_from_object_to_object(self):
pass
# raise NotImplementedError()
# def test_check_dtypes_datetime_mixed_timezones(self):
# test_df = pd.DataFrame({"timestamp": [datetime.now(timezone.utc),
# datetime.now()]})
# self.assertTrue(check_dtypes(test_df, timestamp=datetime))
# def test_coerce_dtypes_kwargless_error(self):
# atomics = [t.__name__ if isinstance(t, type) else str(t)
# for t in AVAILABLE_DTYPES]
# err_msg = (f"[datatube.stats.coerce_dtypes] `coerce_dtypes` must be "
# f"invoked with at least one keyword argument mapping a "
# f"column in `data` to an atomic data type: "
# f"{tuple(atomics)}")
# with self.assertRaises(RuntimeError) as err:
# coerce_dtypes(self.no_na)
# self.assertEqual(str(err.exception), err_msg)
# def test_coerce_dtypes_kwargs_no_na_no_errors(self):
# for col_name, expected in self.conversions.items():
# for conv in expected:
# coerce_dtypes(self.no_na, **{col_name: conv})
# def test_coerce_dtypes_kwargs_with_na_no_errors(self):
# for col_name, expected in self.conversions.items():
# for conv in expected:
# coerce_dtypes(self.with_na, **{col_name: conv})
# def test_coerce_dtypes_matches_check_dtypes(self):
# # This does not work for coercion to <class 'object'> because of the
# # automatic convert_dtypes() step of check_dtypes. These columns will
# # always be better represented by some other data type, unless it was
# # an object to begin with.
# for col_name, expected in self.conversions.items():
# for conv in expected:
# result = coerce_dtypes(self.no_na, **{col_name: conv})
# na_result = coerce_dtypes(self.with_na, **{col_name: conv})
# check_result = check_dtypes(result, **{col_name: conv})
# check_na_result = check_dtypes(na_result, **{col_name: conv})
# if conv != object:
# try:
# self.assertTrue(check_result)
# self.assertTrue(check_na_result)
# except AssertionError as exc:
# err_msg = (f"col_name: {repr(col_name)}, typespec: "
# f"{conv}, expected: {expected}")
# raise AssertionError(err_msg) from exc
# def test_coerce_dtypes_returns_copy(self):
# result = coerce_dtypes(self.with_na, a=float)
# self.assertNotEqual(list(result.dtypes), list(self.with_na.dtypes))
# def test_coerce_dtypes_datetime_preserves_timezone(self):
# raise NotImplementedError()
if __name__ == "__main__":
unittest.main() | 40.286612 | 80 | 0.634175 | 121,379 | 0.995996 | 0 | 0 | 8,533 | 0.070019 | 0 | 0 | 19,217 | 0.157688 |
45dbe199ff3f79ba88ba30ee2d67fa4703fdbf6b | 3,556 | py | Python | app/settings/dev.py | Pixsel1/movie-warehouse | 038c061aa565365ff45dc10bc2c4ab58fdf11f01 | [
"MIT"
] | null | null | null | app/settings/dev.py | Pixsel1/movie-warehouse | 038c061aa565365ff45dc10bc2c4ab58fdf11f01 | [
"MIT"
] | 5 | 2021-03-19T01:58:15.000Z | 2021-09-22T18:52:59.000Z | app/settings/dev.py | Pixsel1/movie-warehouse | 038c061aa565365ff45dc10bc2c4ab58fdf11f01 | [
"MIT"
] | null | null | null | import logging
import os
import sentry_sdk # NOQA
from sentry_sdk.integrations.django import DjangoIntegration # NOQA
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.debug("loading settings dev.py")
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "XXX")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SENTRY_DSN = os.environ.get("SENTRY_DSN")
# Application definition
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Third party
"rest_framework",
"django_extensions",
"django_filters",
"drf_yasg",
# Local
"moviewarehouse.movies",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "moviewarehouse.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "moviewarehouse.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"HOST": os.getenv("POSTGRES_HOST"),
"NAME": os.getenv("POSTGRES_DB"),
"USER": os.getenv("POSTGRES_USER"),
"PASSWORD": os.getenv("POSTGRES_PASSWORD"),
"ENGINE": "django.db.backends.postgresql_psycopg2",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# OMDB API
OMDB_API_KEY = os.getenv("OMDB_API_KEY")
OMDB_API_URL = "https://www.omdbapi.com/?apikey={apikey}&t={title}&type=movie&r=json"
# REST FRAMEWORK
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 100,
}
| 26.340741 | 90 | 0.702193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,283 | 0.642013 |
45dbf9c2a1021b223b18f87bcb344162c1d1b922 | 1,599 | py | Python | hwtLib/examples/arithmetic/privateSignals.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | 24 | 2017-02-23T10:00:50.000Z | 2022-01-28T12:20:21.000Z | hwtLib/examples/arithmetic/privateSignals.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | 32 | 2017-04-28T10:29:34.000Z | 2021-04-27T09:16:43.000Z | hwtLib/examples/arithmetic/privateSignals.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | 8 | 2019-09-19T03:34:36.000Z | 2022-01-21T06:56:58.000Z | from hwt.synthesizer.unit import Unit
from hwt.interfaces.std import VectSignal
from hwt.hdl.types.struct import HStruct
from hwt.interfaces.utils import addClkRstn
class PrivateSignalsOfStructType(Unit):
def _declr(self):
addClkRstn(self)
self.a = VectSignal(8)
self.b = VectSignal(8)._m()
self.c = VectSignal(8)
self.d = VectSignal(8)._m()
def _impl(self):
t = self.a._dtype
tmp_t = \
HStruct(
(t, "a0"),
(t, "a1"),
(t[2], "a2_3"),
(HStruct(
(t, "a4"),
(t[2], "a5_6"),
),
"a4_5_6"
),
)
tmp = self._sig("tmp", tmp_t)
self.connect_tmp_chain(tmp, self.a, self.b)
tmp_reg = self._reg("tmp_reg", tmp_t, def_val={
"a0": 0,
"a1": 1,
"a2_3": [2, 3],
"a4_5_6": {
"a4": 4,
"a5_6": [5, 6],
}
})
self.connect_tmp_chain(tmp_reg, self.c, self.d)
def connect_tmp_chain(self, tmp, a_in, a_out):
# a connected to b using chain of tmp signals from tmp sig
tmp.a0(a_in)
tmp.a1(tmp.a0)
tmp.a2_3[0](tmp.a1)
tmp.a2_3[1](tmp.a2_3[0])
tmp.a4_5_6.a4(tmp.a2_3[1])
tmp.a4_5_6.a5_6[0](tmp.a4_5_6.a4)
tmp.a4_5_6.a5_6[1](tmp.a4_5_6.a5_6[0])
a_out(tmp.a4_5_6.a5_6[1])
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = PrivateSignalsOfStructType()
print(to_rtl_str(u))
| 25.790323 | 66 | 0.507817 | 1,290 | 0.806754 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.091307 |
45df59c4c66385bfd79fb073d750cb26999b1094 | 1,758 | py | Python | reader/common/labels.py | nicolay-r/RuSentRel | 4fc0df1580d3da21f0be1e832e403652f73caed1 | [
"MIT"
] | 5 | 2019-01-28T10:31:07.000Z | 2021-09-08T06:25:30.000Z | reader/common/labels.py | nicolay-r/RuSentRel | 4fc0df1580d3da21f0be1e832e403652f73caed1 | [
"MIT"
] | null | null | null | reader/common/labels.py | nicolay-r/RuSentRel | 4fc0df1580d3da21f0be1e832e403652f73caed1 | [
"MIT"
] | 1 | 2019-08-18T18:30:56.000Z | 2019-08-18T18:30:56.000Z |
class Label:
@staticmethod
def from_str(value):
for l in Label._get_supported_labels():
if l.to_str() == value:
return l
raise Exception("Label by value '{}' doesn't supported".format(value))
@staticmethod
def from_int(value):
assert(isinstance(value, int))
for l in Label._get_supported_labels():
if l.to_int() == value:
return l
raise Exception("Label by value '{}' doesn't supported".format(value))
@staticmethod
def from_uint(value):
assert(isinstance(value, int) and value >= 0)
for l in Label._get_supported_labels():
if l.to_uint() == value:
return l
raise Exception("Label by unsigned value '{}' doesn't supported".format(value))
@staticmethod
def _get_supported_labels():
supported_labels = [
PositiveLabel(),
NegativeLabel()
]
return supported_labels
def to_str(self):
raise NotImplementedError()
def to_int(self):
raise NotImplementedError()
def to_uint(self):
raise Exception("Not implemented exception")
def __eq__(self, other):
assert(isinstance(other, Label))
return self.to_int() == other.to_int()
def __ne__(self, other):
assert(isinstance(other, Label))
return self.to_int() != other.to_int()
class PositiveLabel(Label):
def to_str(self):
return 'pos'
def to_int(self):
return int(1)
def to_uint(self):
return int(1)
class NegativeLabel(Label):
def to_str(self):
return 'neg'
def to_int(self):
return int(-1)
def to_uint(self):
return int(2)
| 22.831169 | 87 | 0.584187 | 1,748 | 0.994312 | 0 | 0 | 956 | 0.5438 | 0 | 0 | 163 | 0.092719 |
45dff5ae66853bb0ebe532decb37848df59bad29 | 1,368 | py | Python | Examples/mouselight_api.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
] | null | null | null | Examples/mouselight_api.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
] | null | null | null | Examples/mouselight_api.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
] | null | null | null | """
This tutorial shows how to download and render neurons from the MouseLight project
using the MouseLightAPI class.
You can also download data manually from the neuronbrowser website and render them by
passing the downloaded files to `scene.add_neurons`.
"""
import brainrender
brainrender.USE_MORPHOLOGY_CACHE = True
from brainrender.scene import Scene
from brainrender.Utils.MouseLightAPI.mouselight_api import MouseLightAPI
from brainrender.Utils.MouseLightAPI.mouselight_info import mouselight_api_info, mouselight_fetch_neurons_metadata
# Fetch metadata for neurons with some in the secondary motor cortex
neurons_metadata = mouselight_fetch_neurons_metadata(filterby='soma', filter_regions=['MOs'])
# Then we can download the files and save them as a .json file
ml_api = MouseLightAPI()
neurons_files = ml_api.download_neurons(neurons_metadata[:2]) # just saving the first couple neurons to speed things up
# Show neurons and ZI in the same scene:
scene = Scene()
scene.add_neurons(neurons_files, soma_color='orangered', dendrites_color='orangered',
axon_color='darkseagreen', neurite_radius=8) # add_neurons takes a lot of arguments to specify how the neurons should look
# make sure to check the source code to see all available optionsq
scene.add_brain_regions(['MOs'], alpha=0.15)
scene.render(camera='coronal') | 45.6 | 138 | 0.79386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.518275 |
45e031706dae8d7873c5c1451d151847eb8f490f | 4,073 | py | Python | ponnobot/spiders/daraz_spider.py | ahmedshahriar/bd-ponno | 4ffd537d66ed05fe297a4d81d5ea414fbcdaa26c | [
"Apache-2.0"
] | 3 | 2022-03-09T19:29:36.000Z | 2022-03-17T03:01:50.000Z | ponnobot/spiders/daraz_spider.py | ahmedshahriar/bd-ponno | 4ffd537d66ed05fe297a4d81d5ea414fbcdaa26c | [
"Apache-2.0"
] | null | null | null | ponnobot/spiders/daraz_spider.py | ahmedshahriar/bd-ponno | 4ffd537d66ed05fe297a4d81d5ea414fbcdaa26c | [
"Apache-2.0"
] | null | null | null | import json
import re
from urllib.parse import urljoin
import scrapy
from ponnobot.items import ProductItem
class DarazSpider(scrapy.Spider):
name = "daraz"
allowed_domains = ['daraz.com.bd']
BASE_URL = 'https://www.daraz.com.bd'
# HEADERS = {
# 'authority': 'my.daraz.com.bd',
# 'pragma': 'no-cache',
# 'cache-control': 'no-cache',
# 'dnt': '1',
# 'origin': 'https://www.daraz.com.bd',
# 'referer': 'https://www.daraz.com.bd/',
# 'upgrade-insecure-requests': '1',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
# 'accept': 'application/json, text/javascript',
# 'accept-encoding': 'gzip, deflate, br',
# 'content-type': 'application/x-www-form-urlencoded',
# 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-fetch-site': 'same-site',
# 'sec-fetch-mode': 'cors',
# 'sec-fetch-dest': 'empty',
# 'accept-language': 'en-US,en;q=0.9,bn;q=0.8,hi;q=0.7',
# }
# HEADERS = {
# 'authority': 'my.daraz.com.bd',
# 'pragma': 'no-cache',
# 'cache-control': 'no-cache',
# 'dnt': '1',
# 'upgrade-insecure-requests': '1',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
# 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'sec-fetch-site': 'none',
# 'sec-fetch-mode': 'navigate',
# 'sec-fetch-dest': 'document',
# 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
# }
def start_requests(self):
yield scrapy.Request(url=self.BASE_URL, callback=self.begin_parse)
def begin_parse(self, response):
urls = response.css('ul.lzd-site-menu-sub li.lzd-site-menu-sub-item > a::attr("href")').getall()
for url in urls:
url = "https:" + str(url)
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response, **kwargs):
"""
:param response:
:return: products and pagination callback
"""
""" parse products """
raw_product_list = re.compile(r'window.pageData=(.*)</script>').search(response.text)
product_list = json.loads(raw_product_list.group(1).strip())['mods']['listItems']
product_page_links = [urljoin(self.BASE_URL, product["thumbs"][0]['productUrl']) for product in product_list]
yield from response.follow_all(product_page_links, self.parse_product)
""" pagination """
try:
pagination_links = response.css('link[rel="next"] ::attr("href")').get()
yield response.follow(pagination_links, self.parse)
except IndexError as ie:
# logging.info(ie, logging.WARN)
print(ie)
except TypeError as te:
# logging.info(te, logging.WARN)
print(te)
except ValueError as ve:
print(ve)
def parse_product(self, response):
item = ProductItem()
raw_product_data = re.compile(r'app.run\((.*)\);').search(response.text)
product_json = json.loads(raw_product_data.group(1).strip())['data']['root']['fields']['skuInfos']['0']
# print(product_json,type(product_json))
# print(raw_product_data.group(1))
try:
item['vendor'] = self.name
item['product_url'] = response.url
item['name'] = product_json["dataLayer"]["pdt_name"]
item['image_url'] = product_json["image"]
item['price'] = int(float(product_json["price"]["salePrice"]["value"]))
item['in_stock'] = True if product_json["stock"] > 0 else False
except Exception as e:
print(e, response.url)
if item['name'] is not None:
item.save()
| 39.931373 | 147 | 0.57697 | 3,960 | 0.972256 | 1,355 | 0.332679 | 0 | 0 | 0 | 0 | 2,095 | 0.514363 |
45e1b1ddf278cb31a93dc4443d544327d2019794 | 1,692 | py | Python | runtest/__init__.py | thautwarm/gkdtex | e6e7404b0be503a684aee89f2437770ef7b4b04a | [
"MIT"
] | 3 | 2020-12-04T08:48:12.000Z | 2020-12-07T17:54:33.000Z | runtest/__init__.py | thautwarm/gkdtex | e6e7404b0be503a684aee89f2437770ef7b4b04a | [
"MIT"
] | 5 | 2020-11-30T03:46:10.000Z | 2020-12-03T08:07:27.000Z | runtest/__init__.py | thautwarm/gkdtex | e6e7404b0be503a684aee89f2437770ef7b4b04a | [
"MIT"
] | null | null | null | from gkdtex.wrap import parse
from gkdtex.interpreter import Interpreter, CBVFunction
from gkdtex.developer_utilities import *
import sys
src = r"""
\newcommand{\GKDCreateId}{\input{|"gkdmgr --op uuid --rt A"}}
\makeatletter
\newcommand*\GKDNewTemp[2]{
\@ifundefined{GKDTemp#1}{
\expandafter\newcommand\csname GKDTemp#1\endcsname{#2}
}{
\expandafter\renewcommand\csname GKDTemp#1\endcsname{#2}
}
}
\makeatother
\GKDNewTemp{ConstID}{\GKDCreateId}
\newcommand{\GKDSet}[2]{\input{|"gkdmgr --op set --rt \GKDTempConstID #1 #2"}}
\newcommand{\GKDGet}[1]{\input{|"gkdmgr --op get --rt \GKDTempConstID #1"}}
\newcommand{\GKDPush}[2]{\input{|"gkdmgr --op push --rt \GKDTempConstID #1 #2"}}
\newcommand{\GKDPop}[1]{\input{|"gkdmgr --op pop --rt \GKDTempConstID #1"}}
\newcommand{\GKDPyCall}[2]{\input{|"gkdmgr --op call --rt \GKDTempConstID #1 #2"}}
\makeatletter
\newenvironment{GKDBNF}[1]
{\VerbatimEnvironment
\GKDNewTemp{A}{#1}
\input{|"gkdmgr --op createDirFor --rt any ./gkdbnf/#1.bnf"}
\VerbatimOut{./gkdbnf/#1.bnf}
}%
{%
\endVerbatimOut%
\toks0{\immediate\write18}%
\begin{bnf*}
\input{|"gkdmgr --op bnf --rt any ./gkdbnf/\GKDTempA.bnf"}%
\end{bnf*}
}
\verb{a}
\makeatother
"""
body = parse(r"""$ #\1^{ #\1#1 } $""")
interpreter = Interpreter()
interpreter.filename = "a.tex"
interpreter.src = src
interpreter.globals['mk'] = CBVFunction([""], [None], dict(d=0), body)
def verb(a: Group, *, self: Interpreter, tex_print):
tex_print('<<')
tex_print(get_raw_from_span_params(self.src, a.offs))
tex_print('>>')
interpreter.globals['verb'] = verb
interpreter.interp(sys.stdout.write, parse(src, "a.tex"))
| 25.636364 | 82 | 0.663121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.687352 |
45e49b8126b2a37679ad85ae284597124464cca3 | 465 | py | Python | netmiko/ciena/ciena_saos_ssh.py | mostau1/netmiko | 5b5463fb01e39e771be553281748477a48c7391c | [
"MIT"
] | null | null | null | netmiko/ciena/ciena_saos_ssh.py | mostau1/netmiko | 5b5463fb01e39e771be553281748477a48c7391c | [
"MIT"
] | 8 | 2020-02-05T14:17:32.000Z | 2021-09-23T23:27:46.000Z | netmiko/ciena/ciena_saos_ssh.py | mostau1/netmiko | 5b5463fb01e39e771be553281748477a48c7391c | [
"MIT"
] | null | null | null | """Ciena SAOS support."""
from __future__ import print_function
from __future__ import unicode_literals
from netmiko.cisco_base_connection import CiscoSSHConnection
class CienaSaosSSH(CiscoSSHConnection):
"""Ciena SAOS support."""
def session_preparation(self):
self._test_channel_read()
self.set_base_prompt()
self.disable_paging(command="system shell session set more off\n")
def enable(self, *args, **kwargs):
pass
| 29.0625 | 74 | 0.733333 | 297 | 0.63871 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.187097 |
45e5a4410a39940eb2533688242f5202c261dd12 | 6,393 | py | Python | serve.py | rik/mesconseilscovid | ff4b365a677da6bb73284ca5bba73651cde570e9 | [
"MIT"
] | 26 | 2020-05-08T07:52:32.000Z | 2021-12-27T10:25:44.000Z | serve.py | rik/mesconseilscovid | ff4b365a677da6bb73284ca5bba73651cde570e9 | [
"MIT"
] | 419 | 2020-05-10T10:28:26.000Z | 2022-03-31T13:19:41.000Z | serve.py | rik/mesconseilscovid | ff4b365a677da6bb73284ca5bba73651cde570e9 | [
"MIT"
] | 19 | 2020-05-10T10:23:16.000Z | 2021-12-03T19:48:04.000Z | """
Start local development server
"""
import argparse
import logging
import shlex
import subprocess
import webbrowser
from contextlib import suppress
from http.server import HTTPServer, SimpleHTTPRequestHandler
from pathlib import Path
from ssl import wrap_socket
from tempfile import NamedTemporaryFile
from threading import Thread
from livereload.server import LogFormatter, Server
from watchdog.observers import Observer
from watchdog.tricks import ShellCommandTrick
import build
PARCEL_CLI = "./node_modules/.bin/parcel"
BUNDLER_COMMAND = f"{PARCEL_CLI} watch --no-hmr src/*.html"
LIVERELOAD_DELAY = 0.1
ROOT_DIR = "dist/"
PATHS_TO_WATCH_FOR_THEMATIQUES = (
"build.py",
"mistune_toc.py",
"contenus/meta/*.md",
"contenus/thematiques/*.md",
"templates/thematique.html",
)
PATHS_TO_WATCH_FOR_INDEX = (
"build.py",
"contenus/conseils/*.md",
"contenus/meta/*.md",
"contenus/questions/*.md",
"contenus/réponses/*.md",
"contenus/statuts/*.md",
"contenus/suivi/*.md",
"templates/index.html",
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--address", default="0.0.0.0")
parser.add_argument("--port", type=int, default=None)
parser.add_argument("--ssl", action="store_true")
parser.add_argument("--ssl-cert", default="cert.pem")
parser.add_argument("--ssl-key", default="key.pem")
parser.add_argument("--open", action="store_true")
parser.add_argument("--watch", action="store_true")
return parser.parse_args()
def serve(address, port, open_, watch, ssl, ssl_cert, ssl_key, bundler_watch_filename):
if ssl:
return serve_https(
address=args.address,
port=args.port or 8443,
open_=args.open,
watch=args.watch,
ssl_cert=args.ssl_cert,
ssl_key=args.ssl_key,
)
else:
return serve_http(
address=args.address,
port=args.port or 5500,
open_=args.open,
watch=args.watch,
bundler_watch_filename=bundler_watch_filename,
)
class CustomServer(Server):
"""
Custom server with logger that decodes bytes in logs
"""
def _setup_logging(self):
super()._setup_logging()
logger = logging.getLogger("livereload")
formatter = self.BytesFormatter()
for handler in logger.handlers:
handler.setFormatter(formatter)
class BytesFormatter(LogFormatter):
def format(self, record):
if isinstance(record.msg, bytes):
with suppress(UnicodeDecodeError):
record.msg = record.msg.decode("utf-8")
return super().format(record)
def serve_http(address, port, open_, watch, bundler_watch_filename):
server = CustomServer()
if watch:
for path in PATHS_TO_WATCH_FOR_THEMATIQUES:
server.watch(path, build.thematiques, delay="forever")
for path in PATHS_TO_WATCH_FOR_INDEX:
server.watch(path, build.index, delay="forever")
server.watch(bundler_watch_filename, delay=LIVERELOAD_DELAY)
server.serve(
host=address,
port=port,
root=ROOT_DIR,
open_url_delay=0.1 if open_ else None,
)
def serve_https(address, port, open_, watch, ssl_cert, ssl_key):
class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=ROOT_DIR, **kwargs)
def log_request(self, *args, **kwargs):
pass
class BuildThematiquesEventHandler(ShellCommandTrick):
def __init__(self):
super().__init__(
shell_command="python3 build.py thematiques",
wait_for_process=True,
drop_during_process=True,
)
def on_any_event(self, event):
if event.event_type == "modified" and not event.is_directory:
super().on_any_event(event)
class BuildIndexEventHandler(ShellCommandTrick):
def __init__(self):
super().__init__(
shell_command="python3 build.py index",
wait_for_process=True,
drop_during_process=True,
)
def on_any_event(self, event):
if event.event_type == "modified" and not event.is_directory:
super().on_any_event(event)
if watch:
observer = Observer()
thematiques_handler = BuildThematiquesEventHandler()
for pattern in PATHS_TO_WATCH_FOR_THEMATIQUES:
directory = Path(pattern).parts[0]
observer.schedule(thematiques_handler, directory, recursive=True)
index_handler = BuildIndexEventHandler()
for pattern in PATHS_TO_WATCH_FOR_THEMATIQUES:
directory = Path(pattern).parts[0]
observer.schedule(index_handler, directory, recursive=True)
observer.start()
url = f"https://{address}:{port}/"
print(f"Listening on {url}")
if open_:
webbrowser.open(url)
logging.getLogger()
httpd = HTTPServer((address, port), MyHTTPRequestHandler)
httpd.socket = wrap_socket(
httpd.socket, certfile=ssl_cert, keyfile=ssl_key, server_side=True
)
httpd.serve_forever()
class BundlerThread(Thread):
def __init__(self, watch_file):
super().__init__()
self.watch_file = watch_file
self.daemon = True
def run(self):
proc = subprocess.Popen(shlex.split(BUNDLER_COMMAND), stdout=subprocess.PIPE)
while True:
for line_bytes in proc.stdout:
line = line_bytes.decode("utf-8")
print(line)
if line.startswith("✨ Built in"):
self.trigger_livereload()
def trigger_livereload(self):
self.watch_file.truncate(0)
if __name__ == "__main__":
args = parse_args()
with NamedTemporaryFile(delete=True) as bundler_watch_file:
bundler_thread = BundlerThread(watch_file=bundler_watch_file)
bundler_thread.start()
serve(
address=args.address,
port=args.port,
open_=args.open,
watch=args.watch,
ssl=args.ssl,
ssl_cert=args.ssl_cert,
ssl_key=args.ssl_key,
bundler_watch_filename=bundler_watch_file.name,
)
| 29.873832 | 87 | 0.638667 | 2,257 | 0.352877 | 0 | 0 | 0 | 0 | 0 | 0 | 774 | 0.121013 |
45e5af780de1fe9491f7c7e69ad7b1dd20dd302f | 1,630 | py | Python | scripts/review_weblog.py | akrherz/iemwebfarm | a4a2a74714e3b589a6fee42752723733b9ba4157 | [
"Apache-2.0"
] | null | null | null | scripts/review_weblog.py | akrherz/iemwebfarm | a4a2a74714e3b589a6fee42752723733b9ba4157 | [
"Apache-2.0"
] | null | null | null | scripts/review_weblog.py | akrherz/iemwebfarm | a4a2a74714e3b589a6fee42752723733b9ba4157 | [
"Apache-2.0"
] | null | null | null | """Process what our weblog has.
Run every minute, sigh.
"""
import sys
import subprocess
import psycopg2
THRESHOLD = 30
def logic(counts, family):
"""Should we or should we not, that is the question."""
exe = "iptables" if family == 4 else "ip6tables"
for addr, hits in counts.items():
if len(hits) < THRESHOLD or addr == '127.0.0.1':
continue
# NOTE the insert to the front of the chain
cmd = f"/usr/sbin/{exe} -I INPUT -s {addr} -j DROP"
print(f"{addr} with {len(hits)}/{THRESHOLD} 404s\n{cmd}\nSample 10\n")
for hit in hits[:10]:
print(f"{hit[0]} uri:|{hit[2]}| ref:|{hit[3]}|")
print()
subprocess.call(cmd, shell=True)
def main(argv):
"""Go Main Go."""
family = int(argv[1]) # either 4 or 6
pgconn = psycopg2.connect(
database="mesosite",
host="iemdb-mesosite.local",
user="nobody",
connect_timeout=5,
# gssencmode="disable",
)
cursor = pgconn.cursor()
cursor.execute(
"SELECT valid, client_addr, uri, referer from weblog WHERE "
"http_status = 404 and family(client_addr) = %s ORDER by valid ASC",
(family,),
)
valid = None
counts = {}
for row in cursor:
d = counts.setdefault(row[1], [])
d.append(row)
valid = row[0]
if valid is None:
return
cursor.execute(
"DELETE from weblog where valid <= %s and family(client_addr) = %s",
(valid, family),
)
cursor.close()
pgconn.commit()
logic(counts, family)
if __name__ == "__main__":
main(sys.argv)
| 25.46875 | 78 | 0.571779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 638 | 0.391411 |
45e62668ada234da171d4601a142283296ac3f75 | 181 | py | Python | carmcmc/__init__.py | metegenez/WAVEPAL | fa2bb91e2c7e63681ae4592929215c96bc523597 | [
"MIT"
] | 39 | 2015-01-25T19:24:09.000Z | 2022-02-28T11:55:28.000Z | carmcmc/__init__.py | metegenez/WAVEPAL | fa2bb91e2c7e63681ae4592929215c96bc523597 | [
"MIT"
] | 13 | 2015-04-29T12:37:45.000Z | 2021-11-28T23:31:29.000Z | carmcmc/__init__.py | metegenez/WAVEPAL | fa2bb91e2c7e63681ae4592929215c96bc523597 | [
"MIT"
] | 19 | 2015-09-15T00:41:28.000Z | 2021-07-28T07:28:47.000Z | from _carmcmc import *
from carma_pack import CarmaModel, CarmaSample, Car1Sample, power_spectrum, carma_variance, \
carma_process, get_ar_roots
from samplers import MCMCSample
| 36.2 | 93 | 0.828729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45e649838e23cf031bb5255dcfd4e23a12c7bf08 | 551 | py | Python | events/tracon2022/urls.py | con2/kompassi | 4938059b8f239ad5ecf06a526adf8b62bce52263 | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | events/tracon2022/urls.py | con2/kompassi | 4938059b8f239ad5ecf06a526adf8b62bce52263 | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | events/tracon2022/urls.py | con2/kompassi | 4938059b8f239ad5ecf06a526adf8b62bce52263 | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | from django.conf.urls import url
from .views import tracon2022_afterparty_participants_view, tracon2022_afterparty_summary_view
urlpatterns = [
url(
r'^events/(?P<event_slug>tracon2022)/labour/surveys/kaatoilmo/results.xlsx$',
tracon2022_afterparty_participants_view,
name='tracon2022_afterparty_participants_view',
),
url(
r'^events/(?P<event_slug>tracon2022)/labour/surveys/kaatoilmo/summary/?$',
tracon2022_afterparty_summary_view,
name='tracon2022_afterparty_summary_view',
),
]
| 29 | 94 | 0.733212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.410163 |
45e64fe89ff0dac219cbb0a832f57189d9fdcbc8 | 1,273 | py | Python | Adult_dataset/asd-screen.py | blessinvarkey/asd_screening | d9aa38e247b4f74564a564c98e94191ebe71cf7a | [
"MIT"
] | 1 | 2022-03-30T08:46:14.000Z | 2022-03-30T08:46:14.000Z | Adult_dataset/asd-screen.py | blessinvarkey/asd-screening-ml | 35967aa1253df70091e0f206133df06b12788697 | [
"MIT"
] | null | null | null | Adult_dataset/asd-screen.py | blessinvarkey/asd-screening-ml | 35967aa1253df70091e0f206133df06b12788697 | [
"MIT"
] | 1 | 2021-02-11T07:43:32.000Z | 2021-02-11T07:43:32.000Z | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import missingno
import seaborn as sns
import unittest
class ASDScreening:
#constructor
def __init__(self):
self.data = []
def read_file(self, dataset):
with open(dataset, 'r') as file:
print(pd.read_csv(file))
self.data = df
def oneHotEncoding():
pass
def train_test_split(self, df, train_frac = 0.7, seed=1):
df_matrix = df.values
np.random.seed(seed)
np.random.shuffle(df_matrix) #shuffle the data
train_size = int(df_matrix.shape[0]*train_frac) #train the data
train_features = def_matrix[:train_size, :-1] #except last column
train_labels = df_matrix[:train_size, -1] #last column
test_features = df_matrix[train_size:, :-1] #test data
test_labels = df_matrix[train_size:, -1]
return (train_features, train_labels), (test_features, test_labels)
def plot_heatmap(self):
fig = plt.figure(figsize=(12,10))
sns.heatmap(self.data.corr())
dataset_adult = ASDScreening()
dataset_adult.read_file("Autism_Data.arff")
#Unit test
# class TestASDScreening(unittest.TestCase):
# assert read_file(df = "Autism_Data.arff") == True
| 27.085106 | 75 | 0.667714 | 944 | 0.741555 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.168892 |
45e769b7d426bf28508e3b1de1e23a3961422074 | 1,416 | py | Python | util.py | rwberendsen/aprilsnow | dbea6219928f0729a2d0000c4b3c272ee4e602e9 | [
"MIT"
] | null | null | null | util.py | rwberendsen/aprilsnow | dbea6219928f0729a2d0000c4b3c272ee4e602e9 | [
"MIT"
] | null | null | null | util.py | rwberendsen/aprilsnow | dbea6219928f0729a2d0000c4b3c272ee4e602e9 | [
"MIT"
] | null | null | null | from snowflake.connector import DictCursor, ProgrammingError
import logging
def run(conn, sql, params=None):
cur = conn.cursor()
try:
cur.execute(sql, params)
except ProgrammingError:
raise
finally:
cur.close()
def run_and_fetchall(conn, sql, params=None):
cur = conn.cursor(DictCursor)
try:
cur.execute(sql, params)
except ProgrammingError:
raise
else:
results = cur.fetchall()
finally:
cur.close()
# Lowercase all column names because we use lowercase identifiers everywhere in our SQL code
return [{k.lower(): v for k, v in rec.items()} for rec in results]
def run_own_connection(get_conn_callback, sql, params=None):
conn = get_conn_callback()
run(conn, sql, params)
conn.close()
def run_and_fetchall_own_connection(get_conn_callback, sql, params=None):
conn = get_conn_callback()
rows = run_and_fetchall(conn, sql, params)
conn.close()
return rows
def sql_ts_nodash_to_timestamp_ntz(ts_nodash):
return f"""TO_TIMESTAMP_NTZ({ts_nodash}, 'YYYYMMDD"T"HH24MISS')"""
def sql_surrogate_key(unique_key_columns):
cols_casted_and_md5ed = [f"MD5(COALESCE(IDENTIFIER('{col}')::VARCHAR, '__EQUAL_NULL__'))" for col in unique_key_columns]
sql_concatenation = "\n || '||'\n ||".join(cols_casted_and_md5ed)
return f"""MD5(
{sql_concatenation}
)""" | 27.764706 | 124 | 0.67726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.20339 |
afd90c97074e4cc4b8d8e640b7c29f7804ac9b97 | 1,172 | py | Python | tests/database/test_projects.py | hueyyeng/AssetsBrowser | eb00d495f3ed7eab3d5ef8b995f38237936037ea | [
"MIT"
] | 7 | 2019-12-09T07:06:59.000Z | 2021-11-05T22:38:01.000Z | tests/database/test_projects.py | hueyyeng/AssetsBrowser | eb00d495f3ed7eab3d5ef8b995f38237936037ea | [
"MIT"
] | 5 | 2019-01-15T03:44:39.000Z | 2022-03-27T07:36:15.000Z | tests/database/test_projects.py | hueyyeng/AssetsBrowser | eb00d495f3ed7eab3d5ef8b995f38237936037ea | [
"MIT"
] | null | null | null | from pytest import mark
import peewee as pw
from database.models import (
Asset,
Category,
Project,
)
from database.db import Database
class TestProjects:
def setup(self):
self.test_db = Database()
def test_create_project(self):
project_name = "Super Good"
project_short_name = "SG"
project_desc = "Sequel to Good"
project_data = {
"name": project_name,
"short_name": project_short_name,
"description": project_desc,
}
Project.create(**project_data)
p = Project.get(**project_data)
assert p.name == project_name
assert p.description == project_desc
assert p.short_name == project_short_name
assert str(p) == "SG - Super Good"
def test_create_project_with_short_name_only(self):
project_short_name = "SG"
project_data = {
"short_name": project_short_name,
}
Project.create(**project_data)
p = Project.get(**project_data)
assert not p.name
assert not p.description
assert p.short_name == project_short_name
assert str(p) == "SG"
| 27.255814 | 55 | 0.614334 | 1,021 | 0.87116 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.085324 |
afd90efacf7b107495449294e2bf213bb6622473 | 3,388 | py | Python | dungeon_game.py | JoeSamyn/Dungeon_Game_Git | 3929f38a3d067aa0827e9488290edcd903d7a4e0 | [
"MIT"
] | 1 | 2018-12-14T09:42:10.000Z | 2018-12-14T09:42:10.000Z | dungeon_game.py | JoeSamyn/Dungeon_Game_Git | 3929f38a3d067aa0827e9488290edcd903d7a4e0 | [
"MIT"
] | 3 | 2018-12-03T18:23:29.000Z | 2018-12-03T18:24:09.000Z | dungeon_game.py | JoeSamyn/Dungeon_Game_Git | 3929f38a3d067aa0827e9488290edcd903d7a4e0 | [
"MIT"
] | null | null | null | import random
import os
CELLS = [(0, 0), (1, 0), (2, 0), (3 , 0), (4, 0),
(0, 1), (1, 1), (2, 1), (3 , 1), (4, 1),
(0, 2), (1, 2), (2, 2), (3 , 2), (4, 2),
(0, 3), (1, 3), (2, 3), (3 , 3), (4, 3),
(0, 4), (1, 4), (2, 4), (3 , 4), (4, 4)
]
def print_map(player):
print(" _"*5)
tile = "|{}"
for cell in CELLS:
player_x, player_y = cell
if player_x < 4:
line_ending = ""
if cell == player:
output = tile.format("X")
else:
output = tile.format("_")
else:
line_ending = "\n"
if cell == player:
output = tile.format("X|")
else:
output = tile.format("_|")
print(output, end = line_ending)
def get_locations():
return random.sample(CELLS, 3)
def move_player(player, move):
# get the players location
player_x, player_y = player
# if move == LEFT, x-1
if move == 'LEFT' and player_x > 0:
player_x -= 1
return (player_x, player_y)
# if move == RIGHT x + 1
elif move == 'RIGHT' and player_x < 4:
player_x += 1
return (player_x, player_y)
# if move == DOWN y - 1
elif move == 'UP' and player_y > 0:
player_y -= 1
return (player_x, player_y)
# if move == UP y + 1
elif move == 'DOWN' and player_y < 4:
player_y += 1
return (player_x, player_y)
else:
print("*INVALID PLAYER MOVE*")
return (player_x, player_y)
def clear_Screen():
os.system('cls' if os.name == 'nt' else "clear")
def get_moves(player):
moves = ["LEFT", "UP", "RIGHT", "DOWN"]
player_x, player_y = player
if player_y == 0:
moves.remove("UP")
elif player_y == 4:
moves.remove("DOWN")
elif player_x == 0:
moves.remove("LEFT")
elif player_x == 4:
moves.remove("RIGHT")
elif player_x == 4 and player_y == 0:
moves.remove("RIGHT")
moves.remove("UP")
elif player_x == 0 and player_y == 0:
moves.remove("LEFT")
moves.remove("UP")
elif player_x == 0 and player_y == 4:
moves.remove("LEFT")
moves.remove("DOWN")
elif player_x == 4 and player_y == 4:
moves.remove("RIGHT")
moves.remove("DOWN")
return (", ").join(moves)
player, monster, door = get_locations()
print("Welcome to the Dungeon!")
start = input("Press enter to start or 'Q' to quit.")
if start.upper() == 'Q':
print("Okay see you next time!")
elif start == "":
clear_Screen()
while True:
print("You're currently in room {}".format(player)) #fill with player position
print("You can move {}".format(get_moves(player))) # fill with available moves
print_map(player)
move = input("> ")
move = move.upper()
if move == 'QUIT' or move == 'Q':
break
# good move? change player position
else:
clear_Screen()
player = move_player(player, move)
#print(move_player(player, move))
# hit door? They win
if player == door:
print("You made it to the door! You Win!")
break
elif player == monster:
print("You hit the monster! Sorry you lose!:(")
break;
# hit monster? they lose
| 28.470588 | 86 | 0.510331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.205431 |
afd925dfee06bf46bcafe0d3b3e160d1c36eddbe | 3,940 | py | Python | tests/providers/test_credit_card.py | pablofm/faker | f09ad1128da99ec15510aad79b2bc27f79e3165d | [
"MIT"
] | 2 | 2020-02-12T20:12:50.000Z | 2020-02-12T22:02:53.000Z | tests/providers/test_credit_card.py | pablofm/faker | f09ad1128da99ec15510aad79b2bc27f79e3165d | [
"MIT"
] | null | null | null | tests/providers/test_credit_card.py | pablofm/faker | f09ad1128da99ec15510aad79b2bc27f79e3165d | [
"MIT"
] | 1 | 2020-07-12T12:50:15.000Z | 2020-07-12T12:50:15.000Z | import re
import unittest
from faker import Faker
from faker.providers.bank.ru_RU import Provider as RuBank
class TestCreditCardProvider(unittest.TestCase):
def setUp(self):
self.fake = Faker(locale='en_US')
Faker.seed(0)
self.provider = self.fake.provider('faker.providers.credit_card')
self.mastercard_pattern = r'^(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}$'
self.visa_pattern = r'^4[0-9]{12}([0-9]{3}){0,2}$'
self.discover_pattern = r'^6(?:011|5[0-9]{2})[0-9]{12}$'
self.diners_club_pattern = r'^3(?:0[0-5]|[68][0-9])[0-9]{11}$'
self.jcb_pattern = r'^(?:2131|1800|35\d{3})\d{11}$'
def test_mastercard(self):
for prefix in self.provider.prefix_mastercard:
number = self.provider._generate_number(prefix, 16)
assert re.match(self.mastercard_pattern, number)
def test_visa13(self):
for prefix in self.provider.prefix_visa:
number = self.provider._generate_number(prefix, 13)
assert re.match(self.visa_pattern, number)
def test_visa16(self):
for prefix in self.provider.prefix_visa:
number = self.provider._generate_number(prefix, 16)
assert re.match(self.visa_pattern, number)
def test_visa19(self):
for prefix in self.provider.prefix_visa:
number = self.provider._generate_number(prefix, 19)
assert re.match(self.visa_pattern, number)
def test_discover(self):
for prefix in self.provider.prefix_discover:
number = self.provider._generate_number(prefix, 16)
assert re.match(self.discover_pattern, number)
def test_diners_club(self):
for prefix in self.provider.prefix_diners:
number = self.provider._generate_number(prefix, 14)
assert re.match(self.diners_club_pattern, number)
def test_jcb16(self):
for prefix in self.provider.prefix_jcb16:
number = self.provider._generate_number(prefix, 16)
assert re.match(self.jcb_pattern, number)
def test_jcb15(self):
for prefix in self.provider.prefix_jcb15:
number = self.provider._generate_number(prefix, 15)
assert re.match(self.jcb_pattern, number)
class TestRuRu(unittest.TestCase):
""" Tests credit card in the ru_RU locale """
def setUp(self):
self.fake = Faker('ru_RU')
Faker.seed(0)
self.visa_pattern = r'^4[0-9]{15}$'
self.mastercard_pattern = r'^(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}$'
self.mir_pattern = r'^220[0-4][0-9]{12}$'
self.maestro_pattern = r'^50|5[6-9]|6[0-9][0-9]{14}$'
self.amex_pattern = r'^3[4|7][0-9]{13}$'
self.unionpay_pattern = r'^62|81[0-9]{14}$'
def test_visa(self):
number = self.fake.credit_card_number('visa')
assert re.match(self.visa_pattern, number)
def test_mastercard(self):
number = self.fake.credit_card_number('mastercard')
assert re.match(self.mastercard_pattern, number)
def test_mir(self):
number = self.fake.credit_card_number('mir')
assert re.match(self.mir_pattern, number)
def test_maestro(self):
number = self.fake.credit_card_number('maestro')
assert re.match(self.maestro_pattern, number)
def test_amex(self):
number = self.fake.credit_card_number('amex')
assert re.match(self.amex_pattern, number)
def test_unionpay(self):
number = self.fake.credit_card_number('unionpay')
assert re.match(self.unionpay_pattern, number)
def test_owner(self):
card_data = self.fake.credit_card_full().split('\n')
assert re.match('[A-Za-z]+', card_data[1])
def test_issuer(self):
card_data = self.fake.credit_card_full().split('\n')
assert card_data[4] in RuBank.banks
| 37.52381 | 120 | 0.636802 | 3,825 | 0.970812 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.14264 |
afdb3c7f2904c4a3f81472a824f67882d1a662fb | 4,622 | py | Python | tools/benchmark/do_not_run_create_benchmark_data.py | dangervon/ironic | 01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2 | [
"Apache-2.0"
] | null | null | null | tools/benchmark/do_not_run_create_benchmark_data.py | dangervon/ironic | 01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2 | [
"Apache-2.0"
] | null | null | null | tools/benchmark/do_not_run_create_benchmark_data.py | dangervon/ironic | 01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
from oslo_db.sqlalchemy import enginefacade
from sqlalchemy import sql
from ironic.common import service
from ironic.conf import CONF # noqa To Load Configuration
from ironic.objects import node
def _create_test_nodes():
print("Starting creation of fake nodes.")
start = time.time()
node_count = 10000
checkin = time.time()
for i in range(0, node_count):
new_node = node.Node({
'power_state': 'power off',
'driver': 'ipmi',
'driver_internal_info': {'test-meow': i},
'name': 'BenchmarkTestNode-%s' % i,
'driver_info': {
'ipmi_username': 'admin',
'ipmi_password': 'admin',
'ipmi_address': 'testhost%s.env.top.level.domain' % i},
'resource_class': 'CUSTOM_BAREMETAL',
'properties': {
'cpu': 4,
'memory': 32,
'cats': i,
'meowing': True}})
new_node.create()
delta = time.time() - checkin
if delta > 10:
checkin = time.time()
print('* At %s nodes, %0.02f seconds. Total elapsed: %s'
% (i, delta, time.time() - start))
created = time.time()
elapse = created - start
print('Created %s nodes in %s seconds.\n' % (node_count, elapse))
def _mix_up_nodes_data():
engine = enginefacade.writer.get_engine()
conn = engine.connect()
# A list of commands to mix up indexed field data a bit to emulate what
# a production database may somewhat look like.
commands = [
"UPDATE nodes set maintenance = True where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set driver = 'redfish' where RAND() < 0.5", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor01' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor02' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor03' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor04' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor05' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor06' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'active' where RAND() < 0.8", # noqa Easier to read this way
"UPDATE nodes set power_state = 'power on' where provision_state = 'active' and RAND() < 0.95", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'available' where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'manageable' where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'clean wait' where RAND() < 0.05", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'error' where RAND() < 0.05", # noqa Easier to read this way
"UPDATE nodes set owner = (select UUID()) where RAND() < 0.2", # noqa Easier to read this way
"UPDATE nodes set lessee = (select UUID()) where RAND() < 0.2", # noqa Easier to read this way
"UPDATE nodes set instance_uuid = (select UUID()) where RAND() < 0.95 and provision_state = 'active'", # noqa Easier to read this way
"UPDATE nodes set last_error = (select UUID()) where RAND() <0.05", # noqa Easier to read this way
]
start = time.time()
for command in commands:
print("Executing SQL command: \\" + command + ";\n")
conn.execute(sql.text(command))
print("* Completed command. %0.04f elapsed since start of commands."
% (time.time() - start))
def main():
service.prepare_command()
CONF.set_override('debug', False)
_create_test_nodes()
if __name__ == '__main__':
sys.exit(main())
| 46.22 | 142 | 0.629598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,009 | 0.651017 |
afdbbfd6b9fd80257fe07451c26105c2209f7e5f | 1,803 | py | Python | sheet_names_xlrd.py | patkujawa-wf/excel-file-reverse-engineering | 6a1780a62d6fda34964659607e2b299e62066671 | [
"MIT"
] | 2 | 2021-04-17T06:07:02.000Z | 2021-04-17T06:30:14.000Z | sheet_names_xlrd.py | patkujawa-wf/excel-file-reverse-engineering | 6a1780a62d6fda34964659607e2b299e62066671 | [
"MIT"
] | null | null | null | sheet_names_xlrd.py | patkujawa-wf/excel-file-reverse-engineering | 6a1780a62d6fda34964659607e2b299e62066671 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
> time \ls -1 **/*.xlsx | python sheet_names_xlrd.py
> for fname in **/*.xlsx; do time echo $fname | python sheet_names_xlrd.py; done
❯ time echo 'xlsx/SOX Controls Testing Template.xlsx' | python sheet_names_xlrd.py
[u'Interim Testing', u'Year End Testing']
echo 'xlsx/SOX Controls Testing Template.xlsx' 0.00s user 0.00s system 31% cpu 0.003 total
python sheet_names_xlrd.py 62.72s user 0.87s system 99% cpu 1:03.79 total
INFO:sheet_names_api:Slow (took longer than 0.5 seconds) files:
{
"0.5802590847015381": "xlsm/Outline Check.xlsm",
"0.7123560905456543": "xlsx/SOX Failure Listing Status.xlsx",
"65.0460250377655": "xlsx/SOX Controls Testing Template.xlsx",
"0.87471604347229": "xlsx/-hp8gt.xlsx",
"1.2309041023254395": "xlsx/SOX Testing Status.xlsx",
"1.5334298610687256": "xlsm/Compare XML.xlsm"
}
"""
from sheet_names_api import main
def _xlrd(filepath):
import xlrd
# https://secure.simplistix.co.uk/svn/xlrd/trunk/xlrd/doc/xlrd.html?p=4966
# with xlrd.open_workbook(filepath, on_demand=True, ragged_rows=True) as wb:
# sheet_names = wb.sheet_names()
# return sheet_names
# How about with memory mapping? Nope, blows up on both xls and xlsx
import contextlib
import mmap
import os
length = 2**10 * 4
# length = 0 # whole file
with open(filepath, 'rb') as f:
# mmap throws if length is larger than file size
length = min(os.path.getsize(filepath), length)
with contextlib.closing(mmap.mmap(f.fileno(), length, access=mmap.ACCESS_READ)) as m,\
xlrd.open_workbook(on_demand=True, file_contents=m) as wb:
sheet_names = wb.sheet_names()
return sheet_names
if __name__ == '__main__':
main(_xlrd, ['xls/SMITH 2014 TRIP-Master List.xls'])
| 37.5625 | 94 | 0.691625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,249 | 0.691967 |
afdc784deb128427072c09b4859dccbc1d1a352d | 513 | py | Python | public/logger.py | IcyCC/fly6to4 | a15a37b1764a4739dd476584b420749864dd7a8a | [
"MIT"
] | null | null | null | public/logger.py | IcyCC/fly6to4 | a15a37b1764a4739dd476584b420749864dd7a8a | [
"MIT"
] | null | null | null | public/logger.py | IcyCC/fly6to4 | a15a37b1764a4739dd476584b420749864dd7a8a | [
"MIT"
] | null | null | null | import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='monk.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
log = logging.getLogger(__name__)
log.addHandler(console) | 36.642857 | 97 | 0.619883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.28655 |
afdef504fcb6a404c0cfe9f0b040dbead2f75afe | 441 | py | Python | GeneratedCode/LISTENER_3_from_1.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | 1 | 2021-09-25T04:17:55.000Z | 2021-09-25T04:17:55.000Z | GeneratedCode/LISTENER_3_from_1.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | null | null | null | GeneratedCode/LISTENER_3_from_1.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | null | null | null | #---- Python VM startup for LISTENERLISTENER_3_from_1 ---
import SSL_listener
incomingIP="localhost"
incomingPort=10031
incomingPrivateKeyFile="server.key"
incomingPublicKeyFile="server.crt"
outgoingIP="localhost"
outgoingPort=00000
outgoingPublicKeyFile="server.crt"
def startLISTENER_3_from_1():
incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,"" )
#-------
| 29.4 | 131 | 0.825397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.283447 |
afe139ed89c12c99f9d7247c22954b5bf3007f6e | 960 | py | Python | downloader.py | Esshahn/cryptoticker | 6fb32712e380cb2a0605bafcfa64fe7fdf0367b7 | [
"MIT"
] | null | null | null | downloader.py | Esshahn/cryptoticker | 6fb32712e380cb2a0605bafcfa64fe7fdf0367b7 | [
"MIT"
] | null | null | null | downloader.py | Esshahn/cryptoticker | 6fb32712e380cb2a0605bafcfa64fe7fdf0367b7 | [
"MIT"
] | null | null | null | import json
import sys
import requests
def download_latest_crypto_data(config):
url = "https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest"
parameters = {
"convert": config["currency"],
}
headers = {
"X-CMC_PRO_API_KEY": config["api_key"],
"Accept": "application/json"
}
response = requests.get(url, params=parameters, headers=headers)
data = response.json()
return data
def load_json(filename):
# load JSON
with open(sys.path[0] + '/' + filename) as json_file:
json_data = json.load(json_file)
return json_data
def save_file(filename, data):
f = open(sys.path[0] + '/' + filename, "w")
f.write(data)
f.close()
print("saving: "+filename)
# ----- uncomment these if you want to run this script on its own ----- #
#config = load_json("user-data.json")
#data = download_latest_crypto_data(config)
#save_file("crypto-data.json", json.dumps(data))
| 24.615385 | 79 | 0.652083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.388542 |
afe19566c2c0b240b9664234918a7ca0baf1f668 | 1,296 | py | Python | core/src/zeit/campus/browser/social.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 5 | 2019-05-16T09:51:29.000Z | 2021-05-31T09:30:03.000Z | core/src/zeit/campus/browser/social.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 107 | 2019-05-24T12:19:02.000Z | 2022-03-23T15:05:56.000Z | core/src/zeit/campus/browser/social.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 3 | 2020-08-14T11:01:17.000Z | 2022-01-08T17:32:19.000Z | # XXX 100% copy&paste from zeit.magazin.browser.social
import copy
import zeit.push.browser.form
import zeit.push.interfaces
class SocialBase(zeit.push.browser.form.SocialBase):
campus_fields = ('facebook_campus_text', 'facebook_campus_enabled')
social_fields = copy.copy(zeit.push.browser.form.SocialBase.social_fields)
social_fields.fields = (
social_fields.fields[:2] +
campus_fields +
social_fields.fields[2:]
)
def __init__(self, *args, **kw):
super(SocialBase, self).__init__(*args, **kw)
# Insert campus_fields at the wanted position
self.form_fields = self.form_fields.omit(*self.social_fields.fields)
self.form_fields += self.social_form_fields.select(
*self.social_fields.fields)
@property
def social_form_fields(self):
form_fields = super(SocialBase, self).social_form_fields
return (
form_fields +
self.FormFieldsFactory(zeit.push.interfaces.IAccountData).select(
*self.campus_fields))
def setUpWidgets(self, *args, **kw):
super(SocialBase, self).setUpWidgets(*args, **kw)
if self.request.form.get('%s.facebook_campus_enabled' % self.prefix):
self._set_widget_required('facebook_campus_text')
| 35.027027 | 78 | 0.682099 | 1,168 | 0.901235 | 0 | 0 | 267 | 0.206019 | 0 | 0 | 196 | 0.151235 |
afe21934aecb7f7f4d3c5856a5a4ed1415ed6039 | 1,584 | py | Python | sample/helpers.py | jeffs2696/AnalyticalDuctModes | 67d8e1729fca8a6ad269583591f6a0a61a274f8d | [
"BSD-2-Clause"
] | null | null | null | sample/helpers.py | jeffs2696/AnalyticalDuctModes | 67d8e1729fca8a6ad269583591f6a0a61a274f8d | [
"BSD-2-Clause"
] | null | null | null | sample/helpers.py | jeffs2696/AnalyticalDuctModes | 67d8e1729fca8a6ad269583591f6a0a61a274f8d | [
"BSD-2-Clause"
] | null | null | null | import pychebfun
import numpy as np
from scipy import special as sp
def get_answer():
"""Get an answer."""
return True
def kradial(m,a,b):
""" Compute the bessel functions as well as the zero crossings
Inputs
------
m : int
radial mode number
a : float
starting point
b : float
ending point
Outputs
-------
f_cheb: output from the from_function method from the Chebfun class.
See "BattlesTrefethen.ipynb in the github page for help)
roots : def
a method (function) included in the Chebfun class defined in
chebfun.py (see github page for pychebfun in docs)
F :
"""
# creating a chebfun
Jp = lambda m,x : 0.5*(sp.jv(m-1,x) - sp.jv(m+1,x))
Yp = lambda m,x : 0.5*(sp.yv(m-1,x) - sp.yv(m+1,x))
F = lambda k,m,a,b :Jp(m,k*a)*Yp(m,k*b)-Jp(m,k*b)*Yp(m,k*a)
f_cheb = pychebfun.Chebfun.from_function(lambda x: F(x, m, a, b), domain = (10,100))
re_roots = f_cheb.roots().real
im_roots = f_cheb.roots().imag
roots = re_roots + im_roots*1j
print(roots)
return roots, re_roots, im_roots, F, f_cheb
def k_axial(M, krad,k_wave):
freq = 726.6
omega = 2*np.pi*freq # angular frequency
c0 = 343.15 # speed of sound
# rho0 = 1.225 # density
k_wave = omega/c0 # wave number
beta = 1-M**2
kaxial = (-M*k_wave + np.sqrt(k_wave**2 - beta*krad**2)) / beta**2
print(k_wave,kaxial)
return kaxial
| 25.548387 | 88 | 0.558081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 691 | 0.436237 |
afe3566098dc4e4a8a080871d6a7f8b01b9ea714 | 9,922 | py | Python | server_py_files/data/filestream.py | bopopescu/timing_system_software | 11dbb8143dc883507c886a4136cf1de0e3534602 | [
"MIT"
] | 1 | 2019-02-03T14:55:48.000Z | 2019-02-03T14:55:48.000Z | server_py_files/data/filestream.py | bopopescu/timing_system_software | 11dbb8143dc883507c886a4136cf1de0e3534602 | [
"MIT"
] | null | null | null | server_py_files/data/filestream.py | bopopescu/timing_system_software | 11dbb8143dc883507c886a4136cf1de0e3534602 | [
"MIT"
] | 1 | 2020-07-23T17:25:41.000Z | 2020-07-23T17:25:41.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 05 21:26:33 2014
@author: Nate
"""
import time, datetime, uuid, io, os
import xstatus_ready
import file_locations
import XTSM_Server_Objects
import pdb
import msgpack
import cStringIO
import zlib
import zipfile
import pprint
DEFAULT_CHUNKSIZE=100*1000*1000
class FileStream_old(xstatus_ready.xstatus_ready, XTSM_Server_Objects.XTSM_Server_Object):
"""
A custom file stream object for data bombs and XTSM stacks; wraps io module to create
an infinite output stream to a series of files of approximately one
'chunksize' length. As data is written in, this stream will automatically
close files that exceed the chunksize and open another. the write method
will return the name data was written into - no chunk of data passed in
a single call to write will be segmented into multiple files
"""
def __init__(self, params={}):
print "class FileStream, func __init__()"
today = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d')
self.compression_strength = 9 # 1-9, 1 fastest, least compression. 6 default
self.compressobj = zlib.compressobj(self.compression_strength)
defaultparams = { 'timecreated':time.time(),
'chunksize': DEFAULT_CHUNKSIZE,
'byteswritten' : 0}
try:
location_root = file_locations.file_locations[params['file_root_selector']][uuid.getnode()]
defaultparams.update({'location_root':location_root+'/'+today+'/'})
except KeyError:
print "error"
raise self.UnknownDestinationError
for key in params.keys():
defaultparams.update({key:params[key]})
for key in defaultparams.keys():
setattr(self, key, defaultparams[key])
self.location = self.location_root + str(uuid.uuid1()) + '.msgp'
try:
#self.zip_file = zipfile.ZipFile(self.location, mode='a', compression=zipfile.ZIP_DEFLATED)
self.stream = io.open(self.location, 'ab')
except IOError: #Folder doesn't exist, then we make the day's folder.
os.makedirs(self.location_root)
#self.zip_file = zipfile.ZipFile()
self.stream = io.open(self.location, 'ab')
#self.write(msgpack.packb('}'))
self.filehistory = [self.location]
print self.location
class UnknownDestinationError(Exception):
pass
def output_log(self):
"""
outputs a log of recently written files
"""
logstream=io.open(self.location_root+'DBFS_LOG.txt','a')
time_format = '%Y-%m-%d %H:%M:%S'
time1 = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
time2 = datetime.datetime.fromtimestamp(time.time()).strftime(time_format)
timeheader= time1 + " through "+ time2
msg = "\nThis is a log of file writes from the DataBomb module:\n"
msg = msg + "This module has written the files below from the time period\n"
msg = msg + timeheader + '\n\n'.join(self.filehistory)
logstream.write(unicode(msg))
pprint.pprint(dir(self), logstream)
logstream.close()
def write(self,bytestream, keep_stream_open=False):
"""
writes bytes to the io stream - if the total bytes written by this
and previous calls since last chunk started exceeds chunksize,
opens a new file for the next chunk after writing the current request
returns the file location of the chunk written.
"""
self.byteswritten += len(bytestream)
cBlock = self.compressobj.compress(bytestream)
self.stream.write(cBlock)
if (self.byteswritten > self.chunksize) and (not keep_stream_open):
self.__flush__()
self.stream.write(bytestream)
self.byteswritten += len(bytestream)
return self.location
def open_file(self):
fileName = 'c:/wamp/www/raw_buffers/DBFS/2014-10-13/6ea6bf2e-52fe-11e4-b225-0010187736b5.msgp'
import zlib
import cStringIO
import zipfile
zf = zipfile.ZipFile(fileName, 'r')
print zf.namelist()
for info in zf.infolist():
print info.filename
print '\tComment:\t', info.comment
print '\tModified:\t', datetime.datetime(*info.date_time)
print '\tSystem:\t\t', info.create_system, '(0 = Windows, 3 = Unix)'
print '\tZIP version:\t', info.create_version
print '\tCompressed:\t', info.compress_size, 'bytes'
print '\tUncompressed:\t', info.file_size, 'bytes'
print
#info = zf.getinfo(filename)
#data = zf.read(filename)
f = open(fileName,'rb')
c = zlib.decompressobj()
cBlock = c.decompress(f.read())
print cBlock
output = cStringIO.StringIO(cBlock)
unpacker = msgpack.Unpacker(output,use_list=False)# If data was msgpacked
print unpacker.next()
print cBlock
def chunkon(self):
"""
this method creates a file for the next chunk of data
"""
#self.stream.write(msgpack.packb('{'))
self.stream.close()
self.location = self.location_root + str(uuid.uuid1()) + '.msgp'
self.stream = io.open(self.location,'ab')
self.compressobj = zlib.compressobj(self.compression_strength)
#self.stream.write(msgpack.packb('}'))
self.filehistory.append(self.location)
self.byteswritten = 0
def __flush__(self):
cBlock = self.compressobj.flush()
self.stream.write(cBlock)
self.stream.flush()
self.chunkon()
self.output_log()
##############################################################################
class Filestream(xstatus_ready.xstatus_ready, XTSM_Server_Objects.XTSM_Server_Object):
"""
CP
A custom file stream object for data bombs and XTSM stacks; wraps zipfile
module. the write method
will return the full path to the data. no chunk of data passed in
a single call to write will be segmented into multiple files.
"""
def __init__(self, params={}):
print "class FS, func __init__()"
self.init_time = time.time()
self.today = datetime.datetime.fromtimestamp(self.init_time).strftime('%Y-%m-%d')
self.defaultparams = {'zip archive created':self.init_time}
try:
self.location_root = file_locations.file_locations[params['file_root_selector']][uuid.getnode()]
self.defaultparams.update({'location_root':self.location_root+'\\'+self.today+'\\'})
except KeyError:
print "error"
pdb.set_trace()
raise self.UnknownDestinationError
for key in params.keys():
self.defaultparams.update({key:params[key]})
for key in self.defaultparams.keys():
setattr(self, key, self.defaultparams[key])
self.logstream = io.open(self.location_root + 'filestream_log.txt', 'a')
self.logstream.write(unicode('This is a log of file writes\n'))
self.root_zip_name = str(uuid.uuid1()) + '.zip'
print self.location_root
class UnknownDestinationError(Exception):
pass
def output_log(self):
"""
outputs a log of recently written files
"""
self.logstream = io.open(self.location_root + 'filestream_log.txt', 'a')
time_format = '%Y-%m-%d %H:%M:%S'
time1 = datetime.datetime.fromtimestamp(self.init_time).strftime(time_format)
timeheader= time1
msg = "This module has written,\n"
msg = msg + self.zip_file_name + '\\' + self.fileName + '\n'
msg = msg + "at time, " + timeheader + '\n'
self.logstream.write(unicode(msg))
#pprint.pprint(unicode(dir(self)), logstream)
self.logstream.close()
def _write_file(self, msg, comments='', prefix='', extension='.dat', is_backup=False):
"""
writes a file to the zip archive.
"""
if is_backup:
self.zip_file_name = self.location_root + 'Backup_' + self.root_zip_name
else:
self.zip_file_name = self.location_root + self.root_zip_name
try:
self.zip_file = zipfile.ZipFile(self.zip_file_name,
mode='a',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True)
except IOError: #Folder doesn't exist, then we make the day's folder.
os.makedirs(self.location_root)
self.zip_file = zipfile.ZipFile(self.zip_file_name,
mode='a',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True)
self.fileName = str(prefix) + str(uuid.uuid1()) + str(extension)
info = zipfile.ZipInfo(self.fileName, date_time=time.localtime(time.time()))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = comments + str(self.defaultparams)
self.zip_file.writestr(info, msg)
self.zip_file.close()
self.output_log()
return self.zip_file_name + "/" + self.fileName
def write_file(self, msg, comments='', prefix='', extension='.dat', is_backup=False):
"""
writes a file to the zip archive.
"""
self._write_file(msg, comments=comments, prefix='Backup_'+prefix, extension=extension, is_backup=True)
self.check_todays_files()
return self._write_file(msg, comments=comments, prefix=prefix, extension=extension, is_backup=False)
def __flush__(self):
pass
| 42.042373 | 110 | 0.606632 | 9,504 | 0.957871 | 0 | 0 | 0 | 0 | 0 | 0 | 2,890 | 0.291272 |
afe3c60ac04c3ecfe19a0cf5fecce4f3eaf59c99 | 559 | py | Python | backend/portfolify/wsgi.py | JermyTan/Portfolify | 40d86862747699a69e8b1fe55fa5fe0fec0b9776 | [
"MIT"
] | 3 | 2021-01-16T16:03:49.000Z | 2022-03-03T15:11:14.000Z | backend/portfolify/wsgi.py | JermyTan/portfolify | 40d86862747699a69e8b1fe55fa5fe0fec0b9776 | [
"MIT"
] | null | null | null | backend/portfolify/wsgi.py | JermyTan/portfolify | 40d86862747699a69e8b1fe55fa5fe0fec0b9776 | [
"MIT"
] | null | null | null | """
WSGI config for portfolify project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# only for dev/test
from dotenv import load_dotenv
TESTING = "test" in sys.argv
load_dotenv(".env.backend.test" if TESTING else ".env.backend.local")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolify.settings')
application = get_wsgi_application()
| 24.304348 | 78 | 0.779964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.595707 |
afe3f3d133c9cba1317bdc7b0f15dfe2cc037e1d | 356 | py | Python | Raspberry_Pi_Pico/7_segment_display/four_digit_display.py | jckantor/cbe61622 | bdc08e6c4f0674c5e991617945cafd1b121d6b4b | [
"MIT"
] | 2 | 2021-11-22T20:36:35.000Z | 2021-12-07T07:52:10.000Z | Raspberry_Pi_Pico/7_segment_display/four_digit_display.py | jckantor/cbe-virtual-laboratory | bdc08e6c4f0674c5e991617945cafd1b121d6b4b | [
"MIT"
] | null | null | null | Raspberry_Pi_Pico/7_segment_display/four_digit_display.py | jckantor/cbe-virtual-laboratory | bdc08e6c4f0674c5e991617945cafd1b121d6b4b | [
"MIT"
] | 1 | 2021-12-11T20:39:32.000Z | 2021-12-11T20:39:32.000Z | import tm1637
import machine
import utime
disp = tm1637.TM1637(clk=machine.Pin(3), dio=machine.Pin(2))
adc = machine.ADC(4)
def display_mv(timer):
global adc, disp
mv = 0
N = 50
for k in range(N):
mv += 3300*adc.read_u16()/65535/N
disp.number(int(mv))
machine.Timer(freq=2, mode=machine.Timer.PERIODIC, callback=display_mv)
| 19.777778 | 71 | 0.674157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
afe4611c351cf1f11c0820aaf3d3d5d4db377839 | 649 | py | Python | leetcode/climbingStairs.py | montukv/Coding-problem-solutions | 973009c00038cc57500d965871376a60f8c4e0d1 | [
"MIT"
] | null | null | null | leetcode/climbingStairs.py | montukv/Coding-problem-solutions | 973009c00038cc57500d965871376a60f8c4e0d1 | [
"MIT"
] | null | null | null | leetcode/climbingStairs.py | montukv/Coding-problem-solutions | 973009c00038cc57500d965871376a60f8c4e0d1 | [
"MIT"
] | null | null | null | '''70. Climbing Stairs
Easy
3866
127
Add to List
Share
You are climbing a stair case. It takes n steps to reach to the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Note: Given n will be a positive integer.
Example 1:
Input: 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
Example 2:
Input: 3
Output: 3
Explanation: There are three ways to climb to the top.
1. 1 step + 1 step + 1 step
2. 1 step + 2 steps
3. 2 steps + 1 step'''
n = int(input())
a,b = 1,2
for i in range(n-1):
a,b = b,a+b
print(a) | 17.078947 | 97 | 0.647149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.878274 |
afe80106cb57e7805a34937dd45e9e7caf4d8c02 | 433 | py | Python | server/run_server.py | juandisay/twisted-docker | c3b317e70100801cbd8f883598484f6b77f56f73 | [
"MIT"
] | 5 | 2016-01-30T21:09:21.000Z | 2021-08-31T15:17:05.000Z | server/run_server.py | juandisay/twisted-docker | c3b317e70100801cbd8f883598484f6b77f56f73 | [
"MIT"
] | null | null | null | server/run_server.py | juandisay/twisted-docker | c3b317e70100801cbd8f883598484f6b77f56f73 | [
"MIT"
] | 4 | 2019-02-25T10:58:45.000Z | 2020-04-21T23:56:32.000Z | from twisted.application import service, internet
from server import HTTPEchoFactory
import os
# default port in case of the env var not was properly set.
ECHO_SERVER_PORT = 8000
proxy_port = int(os.environ.get('ECHO_SERVER_PORT', ECHO_SERVER_PORT))
application = service.Application('TwistedDockerized')
factory = HTTPEchoFactory()
server = internet.TCPServer(proxy_port, factory)
server.setServiceParent(application) | 33.307692 | 71 | 0.796767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.224018 |
afe8be14fffe4eab282e23a16681b34737931618 | 1,946 | py | Python | lesson-12/01/timestamp.py | minimum-hsu/tutorial-python | 667692e7cd13a8a4d061a4da530dc2dfe25ac1de | [
"MIT"
] | null | null | null | lesson-12/01/timestamp.py | minimum-hsu/tutorial-python | 667692e7cd13a8a4d061a4da530dc2dfe25ac1de | [
"MIT"
] | null | null | null | lesson-12/01/timestamp.py | minimum-hsu/tutorial-python | 667692e7cd13a8a4d061a4da530dc2dfe25ac1de | [
"MIT"
] | null | null | null | from datetime import datetime
def parse_timestamp(t):
try:
return datetime.strptime(
t,
'%Y-%m-%dT%H:%M:%SZ'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%dT%H:%M:%S.%fZ'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%dT%H:%M:%S%z'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%dT%H:%M:%S.%f%z'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%dT%H:%M:%S'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%dT%H:%M:%S.%f'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%d %H:%M:%SZ'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%d %H:%M:%S.%fZ'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%d %H:%M:%S%z'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%d %H:%M:%S.%f%z'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%d %H:%M:%S'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%Y-%m-%d %H:%M:%S.%f'
).utctimetuple()
except:
pass
try:
return datetime.strptime(
t,
'%a %b %d %H:%M:%S %Z %Y'
).utctimetuple()
except:
pass
return None
| 17.531532 | 37 | 0.400308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.145427 |
afe9167f50f2d41a307f7dfa62dcb20c1ec54f39 | 1,645 | py | Python | futaba/journal/listener.py | Hoffs/futaba | 4d07c421c4229c81ddd42da1a49594b8cf11832d | [
"MIT"
] | 23 | 2018-09-17T09:06:27.000Z | 2021-05-27T15:21:37.000Z | futaba/journal/listener.py | Hoffs/futaba | 4d07c421c4229c81ddd42da1a49594b8cf11832d | [
"MIT"
] | 257 | 2018-08-18T21:27:54.000Z | 2020-12-29T23:27:10.000Z | futaba/journal/listener.py | Hoffs/futaba | 4d07c421c4229c81ddd42da1a49594b8cf11832d | [
"MIT"
] | 22 | 2018-09-09T09:03:13.000Z | 2021-11-09T03:34:34.000Z | #
# journal/listener.py
#
# futaba - A Discord Mod bot for the Programming server
# Copyright (c) 2017-2020 Jake Richardson, Ammon Smith, jackylam5
#
# futaba is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
import logging
from abc import abstractmethod
from pathlib import PurePath
logger = logging.getLogger(__name__)
__all__ = ["Listener"]
class Listener:
def __init__(self, router, path, recursive=True):
self.router = router
self.path = PurePath(path)
self.recursive = recursive
def check(self, path, guild, content, attributes):
if not self.filter(path, guild, content, attributes):
logger.debug("Filter rejected journal entry")
return False
if not self.recursive:
if self.path != path:
logger.debug("Ignoring non-recursive listener")
return False
return True
# This method is meant to provide a default implementation that can be overriden.
# pylint: disable=no-self-use
def filter(self, path, guild, content, attributes):
"""
Overridable method for further filtering listener events that are passed through.
"""
return True
@abstractmethod
async def handle(self, path, guild, content, attributes):
"""
Abstract method for handling the event, in whatever way
the implementation decides.
"""
# - pass -
| 28.859649 | 89 | 0.66383 | 1,088 | 0.661398 | 0 | 0 | 221 | 0.134347 | 201 | 0.122188 | 819 | 0.497872 |
afe927e59a5060c167cb4d8b2cbd1290c4055a6e | 1,215 | py | Python | tests/test_db.py | haniffalab/adifa | ff799cee05573e7506e017fbc6269c65f1caadff | [
"MIT"
] | null | null | null | tests/test_db.py | haniffalab/adifa | ff799cee05573e7506e017fbc6269c65f1caadff | [
"MIT"
] | 7 | 2022-01-10T14:14:42.000Z | 2022-03-30T14:56:20.000Z | tests/test_db.py | haniffalab/sci-adifa | c771b1192fa965baecfa898e5f4e7402ea630b31 | [
"MIT"
] | null | null | null | from datetime import datetime
import sqlite3
import pytest
from adifa import models
#from adifa.db import get_db
def test_post_dataset(session):
post = models.Dataset(
filename='test.h5ad',
hash='1234',
title='test',
desc='',
date_created=datetime.now(),
date_modified=datetime.now(),
data_obs='{}',
data_var='{}',
data_uns='{}',
data_obsm='{}',
data_varm='{}',
pub_doi='',
pub_link='',
pub_author='',
pub_group='',
pub_date=datetime.now(),
download_link=''
)
session.add(post)
session.commit()
assert post.id > 0
def test_get_close_db(db):
return True
with pytest.raises(sqlite3.ProgrammingError) as e:
db.execute("SELECT 1")
current_app.logger.error(e)
assert "closed" in str(e.value)
def test_init_db_command(runner, monkeypatch):
return True
class Recorder:
called = False
def fake_init_db():
Recorder.called = True
monkeypatch.setattr("adifa.db.init_db", fake_init_db)
result = runner.invoke(args=["init-db"])
assert "Initialized" in result.output
assert Recorder.called
| 21.696429 | 57 | 0.606584 | 38 | 0.031276 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.116049 |
afe94ca2d45bfd979c9c6fa0ac9fdd90eb8ec0aa | 1,647 | py | Python | authors/apps/articles/views/article_favourite_view.py | AmosWels/ah-django | 5b8e39053d63f1dfb3c14066b163a1d37af91076 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/articles/views/article_favourite_view.py | AmosWels/ah-django | 5b8e39053d63f1dfb3c14066b163a1d37af91076 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/articles/views/article_favourite_view.py | AmosWels/ah-django | 5b8e39053d63f1dfb3c14066b163a1d37af91076 | [
"BSD-3-Clause"
] | null | null | null | from django.http import Http404
from django.core import exceptions
from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.permissions import (
IsAuthenticatedOrReadOnly, IsAuthenticated
)
from rest_framework.response import Response
from rest_framework.views import APIView
from ..models import Article
from ..renderers import ArticleJSONRenderer
from ..serializers import (
ArticleSerializer
)
from ..exceptions import ArticleDoesNotExist
from django.db.models import Avg
from django.shortcuts import render, get_object_or_404
class ArticleFavouriteAPIView(APIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (ArticleJSONRenderer,)
serializer_class = ArticleSerializer
def post(self, request, slug):
profile = request.user.profile
serializer_context = {'request': request}
article = self.get_article(slug)
profile.favourite(article)
serializer = self.serializer_class(article, context=serializer_context)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, slug):
profile = request.user.profile
serializer_context = {'request': request}
article = self.get_article(slug)
profile.unfavourite(article)
serializer = self.serializer_class(article, context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
def get_article(self, slug):
try:
return Article.objects.get(slug=slug)
except Article.DoesNotExist:
raise Http404
| 29.410714 | 79 | 0.743169 | 1,043 | 0.633273 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.010929 |
afea6a5fe0f97d79b126baf17d6fd7a57777dd2b | 7,576 | py | Python | apt-select.py | pombredanne/apt-select | 2efc793e5d3c958a9c9ef2c67e88fcdab11adf08 | [
"MIT"
] | null | null | null | apt-select.py | pombredanne/apt-select | 2efc793e5d3c958a9c9ef2c67e88fcdab11adf08 | [
"MIT"
] | null | null | null | apt-select.py | pombredanne/apt-select | 2efc793e5d3c958a9c9ef2c67e88fcdab11adf08 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from sys import exit, stderr, version_info
from os import getcwd, path
from subprocess import check_output
from arguments import get_args
from util_funcs import get_html, HTMLGetError
from mirrors import Mirrors
def not_ubuntu():
"""Notify of incompatibility"""
exit("Not an Ubuntu OS")
def confirm_mirror(uri):
"""Check if line follows correct sources.list URI"""
deb = ('deb', 'deb-src')
proto = ('http://', 'ftp://')
if (uri and (uri[0] in deb) and
(proto[0] in uri[1] or
proto[1] in uri[1])):
return True
return False
def assign_defaults(info, keys, default):
"""Assign a default dict value to key if key is not present"""
for key in keys:
if key not in info:
info[key] = default
def ask(query):
"""Ask for unput from user"""
answer = get_input(query)
return answer
def yes_or_no(query):
"""Get definitive answer"""
opts = ('yes', 'no')
answer = ask(query)
while answer != opts[0]:
if answer == opts[1]:
exit(0)
answer = ask("Please enter '%s' or '%s': " % opts)
def apt_select():
"""Run apt-select: Ubuntu archive mirror reporting tool"""
parser = get_args()
args = parser.parse_args()
flag_number = args.top_number[0]
# Convert status argument to format used by Launchpad
flag_status = args.min_status[0].replace('-', ' ')
if flag_status != 'unknown':
flag_status = flag_status[0].upper() + flag_status[1:]
flag_list = args.list_only
flag_choose = args.choose
flag_ping = args.ping_only
if flag_choose and (not flag_number or flag_number < 2):
parser.print_usage()
exit((
"error: -c/--choose option requires -t/--top-number NUMBER "
"where NUMBER is greater than 1."
))
try:
release = check_output(["lsb_release", "-ics"])
except OSError:
not_ubuntu()
else:
release = [s.strip() for s in release.decode('utf-8').split()]
if release[0] == 'Debian':
exit("Debian is not currently supported")
elif release[0] != 'Ubuntu':
not_ubuntu()
directory = '/etc/apt/'
apt_file = 'sources.list'
sources_path = directory + apt_file
if not path.isfile(sources_path):
exit("%s must exist as file" % sources_path)
mirrors_loc = "mirrors.ubuntu.com"
mirrors_url = "http://%s/mirrors.txt" % mirrors_loc
stderr.write("Getting list of mirrors...")
try:
mirrors_list = get_html(mirrors_url)
except HTMLGetError as err:
exit("Error getting list from %s:\n\t%s" % (mirrors_list, err))
stderr.write("done.\n")
mirrors_list = mirrors_list.splitlines()
codename = release[1][0].upper() + release[1][1:]
hardware = check_output(["uname", "-m"]).strip().decode('utf-8')
if hardware == 'x86_64':
hardware = 'amd64'
else:
hardware = 'i386'
archives = Mirrors(mirrors_list, flag_status, codename, hardware)
archives.get_rtts()
if archives.got["ping"] < flag_number:
flag_number = archives.got["ping"]
if flag_number == 0:
exit("Cannot connect to any mirrors in %s\n." % mirrors_list)
if not flag_ping:
archives.get_launchpad_urls()
if not archives.abort_launch:
archives.status_num = flag_number
stderr.write("Looking up %d status(es)\n" % flag_number)
archives.lookup_statuses()
if flag_number > 1:
stderr.write('\n')
repo_name = ""
found = False
skip_gen_msg = "Skipping file generation."
with open(sources_path, 'r') as sources_file:
lines = sources_file.readlines()
repos = []
required_repo = "main"
for line in lines:
fields = line.split()
if confirm_mirror(fields):
if (not found and
(release[1] in fields[2]) and
(fields[3] == required_repo)):
repos += [fields[1]]
found = True
continue
elif fields[2] == '%s-security' % (release[1]):
repos += [fields[1]]
break
if not repos:
stderr.write((
"Error finding current %s repository in %s\n%s\n" %
(required_repo, sources_path, skip_gen_msg)
))
else:
repo_name = repos[0]
rank = 0
current_key = -1
if flag_ping:
archives.top_list = archives.ranked[:flag_number+1]
for url in archives.top_list:
info = archives.urls[url]
host = info["Host"]
if url == repo_name:
host += " (current)"
current_key = rank
if not flag_ping and not archives.abort_launch:
if "Status" in info:
assign_defaults(info, ("Org", "Speed"), "N/A")
print((
"%(rank)d. %(mirror)s\n%(tab)sLatency: %(ms)d ms\n"
"%(tab)sOrg: %(org)s\n%(tab)sStatus: %(status)s\n"
"%(tab)sSpeed: %(speed)s" % {
'tab': ' ',
'rank': rank + 1,
'mirror': host,
'ms': info["Latency"],
'org': info["Organisation"],
'status': info["Status"],
'speed': info["Speed"]
}
))
else:
print("%d. %s: %d ms" % (rank+1, info["Host"], info["Latency"]))
rank += 1
if rank == flag_number:
break
key = 0
if flag_choose:
key = ask((
"Choose a mirror (1 - %d)\n'q' to quit " %
len(archives.top_list)
))
while True:
try:
key = int(key)
except ValueError:
if key == 'q':
exit()
if (type(key) is not str) and (key >= 1) and (key <= rank):
break
key = ask("Invalid entry ")
key -= 1
if flag_list:
exit()
# Avoid generating duplicate sources.list
if current_key == key:
exit((
"%s is the currently used mirror.\n%s" %
(archives.urls[repo_name]["Host"], skip_gen_msg)
))
mirror = archives.top_list[key]
lines = ''.join(lines)
for repo in repos:
lines = lines.replace(repo, mirror)
work_dir = getcwd()
if work_dir == directory[0:-1]:
query = (
"'%(dir)s' is the current directory.\n"
"Generating a new '%(apt)s' file will "
"overwrite the current file.\n"
"You should copy or backup '%(apt)s' before replacing it.\n"
"Continue?\n[yes|no] " % {
'dir': directory,
'apt': apt_file
}
)
yes_or_no(query)
write_file = work_dir.rstrip('/') + '/' + apt_file
try:
with open(write_file, 'w') as sources_file:
sources_file.write(lines)
except IOError as err:
exit("Unable to generate sources.list:\n\t%s\n" % err)
else:
print("New config file saved to %s" % write_file)
exit()
if __name__ == '__main__':
# Support input for both Python 2 and 3
get_input = input
if version_info[:2] <= (2, 7):
get_input = raw_input
try:
apt_select()
except KeyboardInterrupt:
stderr.write("Aborting...\n")
| 29.364341 | 76 | 0.531415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,775 | 0.234293 |
afea7d8addbac7d13049654934c396b5dce1bde8 | 8,176 | py | Python | tests/test_wms_utils.py | LiamOSullivan/datacube-ows | 6ff10853081b08d8de8485bd3c9c976cdae03390 | [
"Apache-2.0"
] | null | null | null | tests/test_wms_utils.py | LiamOSullivan/datacube-ows | 6ff10853081b08d8de8485bd3c9c976cdae03390 | [
"Apache-2.0"
] | null | null | null | tests/test_wms_utils.py | LiamOSullivan/datacube-ows | 6ff10853081b08d8de8485bd3c9c976cdae03390 | [
"Apache-2.0"
] | null | null | null | # This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import MagicMock
import pytest
import datacube_ows.wms_utils
from datacube_ows.ogc_exceptions import WMSException
def test_parse_time_delta():
from dateutil.relativedelta import relativedelta
tests = {
relativedelta(hours=1): ['P0Y0M0DT1H0M0S', 'PT1H0M0S', 'PT1H', ],
relativedelta(months=18): ['P1Y6M0DT0H0M0S', 'P1Y6M0D', 'P0Y18M0DT0H0M0S', 'P18M', ],
relativedelta(minutes=90): ['PT1H30M', 'P0Y0M0DT1H30M0S', 'PT90M'],
}
for td, td_str_list in tests.items():
for td_str in td_str_list:
assert td == datacube_ows.wms_utils.parse_time_delta(td_str)
def test_parse_wms_time_strings():
import datetime as dt
tests = {
'2018-01-10/2019-01-10': (dt.datetime(2018, 1, 10, 0, 0), dt.datetime(2019, 1, 10, 23, 23, 59, 999999)),
'2000/P1Y': (dt.datetime(2000, 1, 1, 0, 0), dt.datetime(2000, 12, 31, 23, 59, 59, 999999)),
'2018-01-10/P5D': (dt.datetime(2018, 1, 10, 0, 0), dt.datetime(2018, 1, 14, 23, 59, 59, 999999)),
'P1M/2018-01-10': (dt.datetime(2017, 12, 10, 0, 0, 0, 1), dt.datetime(2018, 1, 10, 23, 23, 59, 999999)),
}
for value, result in tests.items():
assert result == datacube_ows.wms_utils.parse_wms_time_strings(value.split('/'))
def test_parse_wms_time_strings_with_present():
import datetime as dt
start, end = datacube_ows.wms_utils.parse_wms_time_strings('2018-01-10/PRESENT'.split('/'))
assert start == dt.datetime(2018, 1, 10, 0, 0)
assert (dt.datetime.utcnow() - end).total_seconds() < 60
@pytest.fixture
def dummy_product():
dummy = MagicMock()
return dummy
def test_parse_userbandmath(dummy_product):
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "0,2"
})
def test_parse_userbandmath_nobands(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2+(4.0*72)",
"colorscheme": "viridis",
"colorscalerange": "0,2"
})
assert "Code expression invalid" in str(e.value)
assert "Expression references no bands" in str(e.value)
def test_parse_userbandmath_banned_op(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "red<green",
"colorscheme": "viridis",
"colorscalerange": "0,2"
})
assert "not supported" in str(e.value)
assert "Code expression invalid" in str(e.value)
def test_parse_userbandmath_bad_code(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red@nir)/(red#nir)",
"colorscheme": "viridis",
"colorscalerange": "0,2"
})
assert "Code expression invalid" in str(e.value)
def test_parse_userbandmath_bad_scheme(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "i_am_not_a_matplotlib_scheme",
"colorscalerange": "0,2"
})
assert "Invalid Matplotlib ramp name:" in str(e.value)
def test_parse_no2_colorscalerange(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "0,2,4,6,8,9,15,52"
})
assert "Colorscale range must be two numbers, sorted and separated by a comma." in str(e.value)
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "2"
})
assert "Colorscale range must be two numbers, sorted and separated by a comma." in str(e.value)
def test_parse_nonnumeric_colorscalerange(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "0,spam",
})
assert "Colorscale range must be two numbers, sorted and separated by a comma." in str(e.value)
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "spam,2"
})
assert "Colorscale range must be two numbers, sorted and separated by a comma." in str(e.value)
def test_parse_unsorted_colorscalerange(dummy_product):
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "0,spam",
})
assert "Colorscale range must be two numbers, sorted and separated by a comma." in str(e.value)
with pytest.raises(WMSException) as e:
style = datacube_ows.wms_utils.single_style_from_args(dummy_product,
{
"code": "2*(red-nir)/(red+nir)",
"colorscheme": "viridis",
"colorscalerange": "2,0"
})
assert "Colorscale range must be two numbers, sorted and separated by a comma." in str(e.value)
| 50.469136 | 112 | 0.474315 | 0 | 0 | 0 | 0 | 77 | 0.009418 | 0 | 0 | 1,815 | 0.221991 |
afeb7607481ba24537f5730e63e67c9233d3bd33 | 420 | py | Python | hello.py | gwenzek/func_argparser | 80afe8eb46c3fa85d0679c13eaad0f2b519d9b62 | [
"BSD-3-Clause"
] | 9 | 2019-12-22T09:06:47.000Z | 2022-03-04T10:38:39.000Z | hello.py | gwenzek/func_argparser | 80afe8eb46c3fa85d0679c13eaad0f2b519d9b62 | [
"BSD-3-Clause"
] | 2 | 2020-03-06T19:11:29.000Z | 2020-05-03T12:33:09.000Z | hello.py | gwenzek/func_argparser | 80afe8eb46c3fa85d0679c13eaad0f2b519d9b62 | [
"BSD-3-Clause"
] | 1 | 2020-05-02T15:45:05.000Z | 2020-05-02T15:45:05.000Z | """Say hello or goodbye to the user."""
import func_argparse
def hello(user: str, times: int = None):
"""Say hello.
Arguments:
user: name of the user
"""
print(f"Hello {user}" * (1 if times is None else times))
def bye(user: str, see_you: float = 1.0):
"""Say goodbye."""
print(f"Goodbye {user}, see you in {see_you:.1f} days")
if __name__ == "__main__":
func_argparse.main()
| 19.090909 | 60 | 0.604762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.471429 |
afede93e46006b05184c36eff60d75f0f70407ff | 3,996 | py | Python | python/batchd/blenderclient.py | portnov/batchd | 44366eec3eadc654e0e6e3f7d914cc6dcae6a1a2 | [
"BSD-3-Clause"
] | 5 | 2016-03-21T06:55:32.000Z | 2020-04-02T23:19:13.000Z | python/batchd/blenderclient.py | portnov/batchd | 44366eec3eadc654e0e6e3f7d914cc6dcae6a1a2 | [
"BSD-3-Clause"
] | 45 | 2016-03-22T07:22:29.000Z | 2022-02-06T07:43:40.000Z | python/batchd/blenderclient.py | portnov/batchd | 44366eec3eadc654e0e6e3f7d914cc6dcae6a1a2 | [
"BSD-3-Clause"
] | 2 | 2019-09-08T01:40:22.000Z | 2020-02-07T05:56:55.000Z | try:
import bpy
from bpy.types import WindowManager, AddonPreferences
from bpy.props import StringProperty, EnumProperty
in_blender = True
except ImportError as e:
in_blender = False
if in_blender:
from batchd import client
batchd_client = None
batchd_queues = []
batchd_types = []
def get_preferences():
return bpy.context.user_preferences.addons.get("batchd").preferences
def get_batchd_client(context):
global batchd_client
if batchd_client is not None:
return batchd_client
addon = get_preferences()
batchd_client = client.Client(addon.manager_url, addon.username, addon.password)
return batchd_client
def queues_from_batchd(self, context):
global batchd_queues
if len(batchd_queues) > 0 or context is None:
return batchd_queues
c = get_batchd_client(context)
for queue in c.get_queues():
name = queue.get('name', None)
title = queue.get('title', name)
batchd_queues.append((name, title, title))
return batchd_queues
def types_from_batchd(self, context):
global batchd_types
if len(batchd_types) > 0 or context is None:
print("types: {}, context: {}".format(batchd_types, context))
return batchd_types
c = get_batchd_client(context)
for type in c.get_job_types():
name = type.get('name')
title = type.get('title', name)
if not title:
title = name
batchd_types.append((name, title, title))
print(batchd_types)
return batchd_types
class SettingsPanel(bpy.types.AddonPreferences):
bl_label = "Batchd settings"
bl_idname = __package__
manager_url = StringProperty(
name = "batchd manager URL",
default = "http://localhost:9681")
batchd_queue = EnumProperty(name="Queue", items = queues_from_batchd)
job_type_name = EnumProperty(name="batchd job type", items = types_from_batchd)
username = StringProperty(name="batchd user name")
password = StringProperty(name="batchd password", subtype="PASSWORD")
def draw(self, context):
layout = self.layout
layout.prop(self, "manager_url")
layout.prop(self, "username")
layout.prop(self, "password")
layout.prop(self, "batchd_queue")
layout.prop(self, "job_type_name")
class EnqueuePanel(bpy.types.Panel):
bl_label = "Submit to batchd"
bl_idname = "batchd.enqueue.panel"
bl_space_type = "PROPERTIES"
bl_context = "render"
bl_region_type = "WINDOW"
def draw(self, context):
layout = self.layout
wm = context.window_manager
layout.operator("batchd.enqueue")
class EnqueueOperator(bpy.types.Operator):
bl_label = "Submit to batchd"
bl_idname = "batchd.enqueue"
def execute(self, context):
wm = context.window_manager
bpy.ops.file.pack_all()
current_file = bpy.data.filepath
target_file = bpy.path.abspath(bpy.context.scene.render.filepath)
job_type_name = get_preferences().job_type_name
queue_name = get_preferences().batchd_queue
c = get_batchd_client(context)
params = dict(input=current_file, output=target_file, frame="1")
c.do_enqueue(queue_name, job_type_name, params)
return {'FINISHED'}
def register():
bpy.utils.register_class(SettingsPanel)
bpy.utils.register_class(EnqueueOperator)
bpy.utils.register_class(EnqueuePanel)
def unregister():
bpy.utils.unregister_class(EnqueuePanel)
bpy.utils.unregister_class(EnqueueOperator)
bpy.utils.unregister_class(SettingsPanel)
if __name__ == "__main__":
register()
| 30.976744 | 88 | 0.625375 | 1,903 | 0.476226 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.097598 |
afee20d2b8c7e85de3a27f20ac4aaa7c1fa22ef1 | 2,242 | py | Python | sample.py | uguratar/pyzico | b779d590b99392df60db7c5e2df832708df9b6a2 | [
"MIT"
] | 6 | 2015-05-03T10:48:54.000Z | 2018-03-06T12:36:02.000Z | sample.py | uguratar/pyzico | b779d590b99392df60db7c5e2df832708df9b6a2 | [
"MIT"
] | 1 | 2021-06-01T22:06:45.000Z | 2021-06-01T22:06:45.000Z | sample.py | uguratar/pyzico | b779d590b99392df60db7c5e2df832708df9b6a2 | [
"MIT"
] | null | null | null | # coding=utf-8
from iyzico import Iyzico
from iyzico_objects import IyzicoCard, IyzicoCustomer, \
IyzicoCardToken, IyzicoHTTPException, IyzicoValueException
if __name__ == '__main__':
my_card = IyzicoCard("4242424242424242", "10", "2015", "000",
"Python Test")
my_customer = IyzicoCustomer("First Name", "Last Name",
"email@email")
payment = Iyzico()
try:
result = payment.debit_with_installment(6.6612132, my_card,
"Installment "
"Iyzico python library test",
"TRY", my_customer, True, 6)
if result.success:
print result.transaction_state
print result.transaction_id
print result.reference_id
print result.request_id
print result.card_token
my_token = IyzicoCardToken(result.card_token)
else:
print result.error_code
print result.error_message
except (IyzicoHTTPException, IyzicoValueException) as ex:
print ex
'''result = payment.debit_with_token(1, my_token,
"Python debit with "
"card token",
"TRY")'''
'''result = payment.register_card(my_card)
result = payment.delete_card(my_token)'''
'''result2 = payment.pre_authorize(1, my_card,
"Iyzico python library test",
"TRY")
print result2.success
result3 = payment.capture(1, result2.transaction_id,
"Iyzico python library test",
"TRY")
print result3.success
result4 = payment.reversal(1, result.transaction_id,
"Iyzico python library test",
"TRY")
print result4.success
result5 = payment.refund(1, result3.transaction_id,
"Iyzico python library test",
"TRY")
print result5.success'''
| 32.028571 | 73 | 0.501784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,221 | 0.544603 |
aff13d43cede2cfa1ff39948574ef2511d2500e0 | 7,220 | py | Python | nbsite/gallery/thumbnailer.py | dipesh1432/nbsite | 866c6d747879b9a4b88e0a30a43e35b9802645bd | [
"BSD-3-Clause"
] | 15 | 2018-04-06T19:13:26.000Z | 2019-05-24T04:08:51.000Z | nbsite/gallery/thumbnailer.py | dipesh1432/nbsite | 866c6d747879b9a4b88e0a30a43e35b9802645bd | [
"BSD-3-Clause"
] | 71 | 2018-03-09T16:32:59.000Z | 2019-07-09T19:45:36.000Z | nbsite/gallery/thumbnailer.py | dipesh1432/nbsite | 866c6d747879b9a4b88e0a30a43e35b9802645bd | [
"BSD-3-Clause"
] | 23 | 2019-12-13T17:38:46.000Z | 2021-09-08T14:37:26.000Z | from __future__ import unicode_literals
import os, sys, subprocess, ast
from nbconvert.preprocessors import Preprocessor
from holoviews.core import Dimensioned, Store
from holoviews.ipython.preprocessors import OptsMagicProcessor, OutputMagicProcessor
from holoviews.ipython.preprocessors import StripMagicsProcessor
from holoviews.util.command import export_to_python
import tempfile
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def comment_out_magics(source):
"""
Utility used to make sure AST parser does not choke on unrecognized
magics.
"""
filtered = []
for line in source.splitlines():
if line.strip().startswith('%'):
filtered.append('# ' + line)
else:
filtered.append(line)
return '\n'.join(filtered)
def wrap_cell_expression(source, template='{expr}'):
"""
If a cell ends in an expression that could be displaying a HoloViews
object (as determined using the AST), wrap it with a given prefix
and suffix string.
If the cell doesn't end in an expression, return the source unchanged.
"""
cell_output_types = (ast.IfExp, ast.BoolOp, ast.BinOp, ast.Call,
ast.Name, ast.Attribute)
try:
node = ast.parse(comment_out_magics(source))
except SyntaxError:
return source
filtered = source.splitlines()
if node.body != []:
last_expr = node.body[-1]
if not isinstance(last_expr, ast.Expr):
pass # Not an expression
elif isinstance(last_expr.value, cell_output_types):
# CAREFUL WITH UTF8!
expr_end_slice = filtered[last_expr.lineno-1][:last_expr.col_offset]
expr_start_slice = filtered[last_expr.lineno-1][last_expr.col_offset:]
start = '\n'.join(filtered[:last_expr.lineno-1]
+ ([expr_end_slice] if expr_end_slice else []))
ending = '\n'.join(([expr_start_slice] if expr_start_slice else [])
+ filtered[last_expr.lineno:])
if ending.strip().endswith(';'):
return source
# BUG!! Adds newline for 'foo'; <expr>
return start + '\n' + template.format(expr=ending)
return source
def strip_specific_magics(source, magic):
"""
Given the source of a cell, filter out specific cell and line magics.
"""
filtered=[]
for line in source.splitlines():
if line.startswith(f'%{magic}'):
filtered.append(line.lstrip(f'%{magic}').strip(' '))
if line.startswith(f'%%{magic}'):
filtered.append(line.lstrip(f'%%{magic}').strip(' '))
else:
filtered.append(line)
return '\n'.join(filtered)
class StripTimeMagicsProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source strips out just time
magics while keeping the rest of the cell.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
cell['source'] = strip_specific_magics(cell['source'], 'time')
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
def strip_trailing_semicolons(source, function):
"""
Give the source of a cell, filter out lines that contain a specified
function call and end in a semicolon.
"""
filtered=[]
for line in source.splitlines():
if line.endswith(f'{function}();'):
filtered.append(line[:-1])
else:
filtered.append(line)
return '\n'.join(filtered)
class StripServableSemicolonsProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source strips out just semicolons
that come after the servable function call.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
cell['source'] = strip_trailing_semicolons(cell['source'], 'servable')
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
def thumbnail(obj, basename):
import os
if isinstance(obj, Dimensioned) and not os.path.isfile(basename+'.png'):
Store.renderers[Store.current_backend].save(obj, basename, fmt='png')
elif 'panel' in sys.modules:
from panel.viewable import Viewable
if isinstance(obj, Viewable) and not os.path.isfile(basename+'.png'):
obj.save(basename+'.png')
return obj
class ThumbnailProcessor(Preprocessor):
def __init__(self, basename, **kwargs):
self.basename = basename
super(ThumbnailProcessor, self).__init__(**kwargs)
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
template = 'from nbsite.gallery.thumbnailer import thumbnail;thumbnail({{expr}}, {basename!r})'
cell['source'] = wrap_cell_expression(cell['source'],
template.format(
basename=self.basename))
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
def execute(code, cwd, env):
with tempfile.NamedTemporaryFile('wb', delete=True) as f:
f.write(code)
f.flush()
proc = subprocess.Popen(['python', f.name], cwd=cwd, env=env)
proc.wait()
return proc.returncode
def notebook_thumbnail(filename, subpath):
basename = os.path.splitext(os.path.basename(filename))[0]
dir_path = os.path.abspath(os.path.join(subpath, 'thumbnails'))
absdirpath= os.path.abspath(os.path.join('.', dir_path))
if not os.path.exists(absdirpath):
os.makedirs(absdirpath)
preprocessors = [OptsMagicProcessor(),
OutputMagicProcessor(),
StripTimeMagicsProcessor(),
StripServableSemicolonsProcessor(),
StripMagicsProcessor(),
ThumbnailProcessor(os.path.abspath(os.path.join(dir_path, basename)))]
return export_to_python(filename, preprocessors)
if __name__ == '__main__':
files = []
abspath = os.path.abspath(sys.argv[1])
split_path = abspath.split(os.path.sep)
if os.path.isdir(abspath):
if 'examples' not in split_path:
print('Can only thumbnail notebooks in examples/')
sys.exit()
subpath = os.path.sep.join(split_path[split_path.index('examples')+1:])
files = [os.path.join(abspath, f) for f in os.listdir(abspath)
if f.endswith('.ipynb')]
elif os.path.isfile(abspath):
subpath = os.path.sep.join(split_path[split_path.index('examples')+1:-1])
files=[abspath]
else:
print('Path {path} does not exist'.format(path=abspath))
for f in files:
print('Generating thumbnail for file {filename}'.format(filename=f))
code = notebook_thumbnail(f, subpath)
try:
retcode = execute(code.encode('utf8'), cwd=os.path.split(f)[0], env={})
except Exception as e:
print('Failed to generate thumbnail for {filename}'.format(filename=f))
print(str(e))
| 36.836735 | 107 | 0.632964 | 1,649 | 0.228393 | 0 | 0 | 0 | 0 | 0 | 0 | 1,494 | 0.206925 |