max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
omnidet/train_semantic.py | AtlasGooo2/WoodScape | 348 | 12773651 | <filename>omnidet/train_semantic.py
"""
Semantic segmentation training for OmniDet.
# author: <NAME> <<EMAIL>>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; Authors provide no warranty with the software
and are not liable for anything.
"""
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
from data_loader.woodscape_loader import WoodScapeRawDataset
from losses.semantic_loss import CrossEntropyLoss2d, FocalLoss
from models.resnet import ResnetEncoder
from models.semantic_decoder import SemanticDecoder
from utils import TrainUtils, semantic_color_encoding, IoU
class SemanticInit(TrainUtils):
def __init__(self, args):
super().__init__(args)
semantic_class_weights = dict(
woodscape_enet=([3.25, 2.33, 20.42, 30.59, 38.4, 45.73, 10.76, 34.16, 44.3, 49.19]),
woodscape_mfb=(0.04, 0.03, 0.43, 0.99, 2.02, 4.97, 0.17, 1.01, 3.32, 20.35))
print(f"=> Setting Class weights based on: {args.semantic_class_weighting} \n"
f"=> {semantic_class_weights[args.semantic_class_weighting]}")
semantic_class_weights = torch.tensor(semantic_class_weights[args.semantic_class_weighting]).to(args.device)
# Setup Metrics
self.metric = IoU(args.semantic_num_classes, args.dataset, ignore_index=None)
if args.semantic_loss == "cross_entropy":
self.semantic_criterion = CrossEntropyLoss2d(weight=semantic_class_weights)
elif args.semantic_loss == "focal_loss":
self.semantic_criterion = FocalLoss(weight=semantic_class_weights, gamma=2, size_average=True)
self.best_semantic_iou = 0.0
self.alpha = 0.5 # to blend semantic predictions with color image
self.color_encoding = semantic_color_encoding(args)
class SemanticModel(SemanticInit):
def __init__(self, args):
super().__init__(args)
# --- Init model ---
self.models["encoder"] = ResnetEncoder(num_layers=self.args.network_layers, pretrained=True).to(self.device)
self.models["semantic"] = SemanticDecoder(self.models["encoder"].num_ch_enc,
n_classes=args.semantic_num_classes).to(self.device)
self.parameters_to_train += list(self.models["encoder"].parameters())
self.parameters_to_train += list(self.models["semantic"].parameters())
if args.use_multiple_gpu:
self.models["encoder"] = torch.nn.DataParallel(self.models["encoder"])
self.models["semantic"] = torch.nn.DataParallel(self.models["semantic"])
print(f"=> Training on the {self.args.dataset.upper()} dataset \n"
f"=> Training model named: {self.args.model_name} \n"
f"=> Models and tensorboard events files are saved to: {self.args.output_directory} \n"
f"=> Training is using the cuda device id: {self.args.cuda_visible_devices} \n"
f"=> Loading {self.args.dataset} training and validation dataset")
# --- Load Data ---
train_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
path_file=args.train_file,
is_train=True,
config=args)
self.train_loader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False)
val_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
path_file=args.val_file,
is_train=False,
config=args)
self.val_loader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
print(f"=> Total number of training examples: {len(train_dataset)} \n"
f"=> Total number of validation examples: {len(val_dataset)}")
self.num_total_steps = len(train_dataset) // args.batch_size * args.epochs
self.configure_optimizers()
if args.pretrained_weights:
self.load_model()
self.save_args()
if 'cuda' in self.device:
torch.cuda.synchronize()
def semantic_train(self):
for self.epoch in range(self.args.epochs):
# switch to train mode
self.set_train()
data_loading_time = 0
gpu_time = 0
before_op_time = time.time()
for batch_idx, inputs in enumerate(self.train_loader):
current_time = time.time()
data_loading_time += (current_time - before_op_time)
before_op_time = current_time
# -- PUSH INPUTS DICT TO DEVICE --
self.inputs_to_device(inputs)
features = self.models["encoder"](inputs["color_aug", 0, 0])
outputs = self.models["semantic"](features)
losses = dict()
losses["semantic_loss"] = self.semantic_criterion(outputs["semantic", 0],
inputs["semantic_labels", 0, 0])
# -- COMPUTE GRADIENT AND DO OPTIMIZER STEP --
self.optimizer.zero_grad()
losses["semantic_loss"].backward()
self.optimizer.step()
duration = time.time() - before_op_time
gpu_time += duration
if batch_idx % self.args.log_frequency == 0:
self.log_time(batch_idx, duration, losses["semantic_loss"].cpu().data, data_loading_time, gpu_time)
self.semantic_statistics("train", inputs, outputs, losses)
data_loading_time = 0
gpu_time = 0
self.step += 1
before_op_time = time.time()
# Validate on each step, save model on improvements
val_metrics = self.semantic_val()
print(self.epoch, "IoU:", val_metrics["mean_iou"])
if val_metrics["mean_iou"] >= self.best_semantic_iou:
print(f"=> Saving model weights with mean_iou of {val_metrics['mean_iou']:.3f} "
f"at step {self.step} on {self.epoch} epoch.")
self.best_semantic_iou = val_metrics["mean_iou"]
self.save_model()
self.lr_scheduler.step(val_metrics["mean_iou"])
print("Training complete!")
@torch.no_grad()
def semantic_val(self):
"""Validate the semantic model"""
self.set_eval()
losses = dict()
for inputs in self.val_loader:
self.inputs_to_device(inputs)
features = self.models["encoder"](inputs["color", 0, 0])
outputs = self.models["semantic"](features)
losses["semantic_loss"] = self.semantic_criterion(outputs["semantic", 0], inputs["semantic_labels", 0, 0])
_, predictions = torch.max(outputs["semantic", 0].data, 1)
self.metric.add(predictions, inputs["semantic_labels", 0, 0])
outputs["class_iou"], outputs["mean_iou"] = self.metric.value()
# Compute stats for the tensorboard
self.semantic_statistics("val", inputs, outputs, losses)
self.metric.reset()
del inputs, losses
self.set_train()
return outputs
def semantic_statistics(self, mode, inputs, outputs, losses) -> None:
writer = self.writers[mode]
for loss, value in losses.items():
writer.add_scalar(f"{loss}", value.mean(), self.step)
if mode == "val":
writer.add_scalar(f"mean_iou", outputs["mean_iou"], self.step)
for k, v in outputs["class_iou"].items():
writer.add_scalar(f"class_iou/{k}", v, self.step)
writer.add_scalar("learning_rate", self.optimizer.param_groups[0]['lr'], self.step)
for j in range(min(4, self.args.batch_size)): # write maximum of four images
if self.args.train == "semantic":
writer.add_image(f"color/{j}", inputs[("color", 0, 0)][j], self.step)
# Predictions is one-hot encoded with "num_classes" channels.
# Convert it to a single int using the indices where the maximum (1) occurs
_, predictions = torch.max(outputs["semantic", 0][j].data, 0)
predictions_gray = predictions.byte().squeeze().cpu().detach().numpy()
color_semantic = np.array(self.trans_pil(inputs[("color", 0, 0)].cpu()[j].data))
not_background = predictions_gray != 0
color_semantic[not_background, ...] = (color_semantic[not_background, ...] * (1 - self.alpha) +
self.color_encoding[predictions_gray[not_background]] * self.alpha)
writer.add_image(f"semantic_pred_0/{j}", color_semantic.transpose(2, 0, 1), self.step)
labels = inputs["semantic_labels", 0, 0][j].data
labels_gray = labels.byte().squeeze().cpu().detach().numpy()
labels_rgb = np.array(self.trans_pil(inputs[("color", 0, 0)].cpu()[j].data))
not_background = labels_gray != 0
labels_rgb[not_background, ...] = (labels_rgb[not_background, ...] * (1 - self.alpha) +
self.color_encoding[labels_gray[not_background]] * self.alpha)
writer.add_image(f"semantic_labels_0/{j}", labels_rgb.transpose(2, 0, 1), self.step)
| 2.515625 | 3 |
DjangoAPI/apiApp/migrations/0001_initial.py | ChunjunHu/webservice | 0 | 12773652 | <reponame>ChunjunHu/webservice
# Generated by Django 3.2.5 on 2022-03-09 22:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Pic',
fields=[
('user_id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('id', models.UUIDField(default=uuid.uuid4, editable=False)),
('file_name', models.CharField(blank=True, max_length=50, null=True)),
('url', models.CharField(blank=True, max_length=32, null=True)),
('upload_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='UserRegister',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(blank=True, max_length=32, null=True)),
('last_name', models.CharField(blank=True, max_length=32, null=True)),
('password', models.CharField(blank=True, max_length=90, null=True)),
('username', models.EmailField(max_length=254, null=True)),
('account_created', models.DateTimeField(default=django.utils.timezone.now)),
('account_updated', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(max_length=50, verbose_name='verdify token')),
('phone', models.CharField(max_length=11, verbose_name='phoneMNumber')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.835938 | 2 |
convertE00toSHP/converte00toshp.py | borchert/metadata-tools | 0 | 12773653 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# converte00toshp.py
# Created on: 2014-01-08
# Description: Convert e00 files to shapefiles
# ---------------------------------------------------------------------------
# Convert e00 files to coverage and each file within the coverage to shapefile.
print "Importing arcpy"
import arcpy
from arcpy import env
import find_Drive
import os
def convertE00toShapefile(importE00File, drivePath, record, extraDir, option=None):
if option == 0:
pass
elif option == 1:
importE00File = validateInput("e00Name", "E00 name: ", drivePath, record, extraDir)
else:
importE00File = validateInput("e00Name", "E00 name: ", drivePath, record, extraDir)
if importE00File.endswith('.e00'):
e00Ext = importE00File
else:
e00Ext = importE00File + '.e00'
if extraDir == '':
envDir = os.path.join(drivePath,record)
else:
envDir = os.path.join(drivePath,record,extraDir)
outDirectory = os.path.join(drivePath, record + '\converted\GISfiles', importE00File)
outName = str(e00Ext[:-4])
# Set environment settings
if os.path.exists(outDirectory):
pass
else:
os.makedirs(outDirectory)
print 'Created: ' + str(outDirectory)
env.workspace = envDir
env.overwriteOutput = True
# Delete pre-existing output
if env.overwriteOutput:
if os.path.exists(outName):
os.remove(outName)
# Execute ImportFromE00
arcpy.ImportFromE00_conversion(e00Ext, outDirectory, outName)
envDir = os.path.join(drivePath,record + '\converted\GISfiles', e00Ext[:-4], outName)
env.workspace = envDir
fc = arcpy.ListFeatureClasses()
for x in fc:
if x[:10] != 'annotation':
# Set local variables
inFeatures = os.path.join(outDirectory,outName,x)
print 'inFeatures -', inFeatures
outLocation = outDirectory + "\\shapefiles"
print 'outLocation -', outLocation
if os.path.exists(outLocation):
pass
else:
os.makedirs(outLocation)
print 'Created: ' + str(outLocation)
outFeatureClass = importE00File + '_' + x + ".shp"
print 'outFeatureClass - ', outFeatureClass
errorLog = outDirectory + '\_errorLog.txt'
try:
# Execute FeatureClassToFeatureClass
arcpy.FeatureClassToFeatureClass_conversion(inFeatures, outLocation,
outFeatureClass)
except:
e = open(errorLog, 'a')
error = [x, '- could not convert to SHP']
e.write(''.join(error))
e.close()
try:
featureCount = arcpy.GetCount_management(x)
except:
e = open(errorLog, 'a')
error = [x, '- could not count features']
e.write(''.join(error))
e.close()
featureCountFile = outDirectory + '\_featureCount.txt'
f = open(featureCountFile, 'a')
writeOutput = str(x)+' - '+ str(featureCount)+' features\n'
f.write(writeOutput)
f.close()
print 'Complete\n-------------------------\n'
def validateInput(type, msg, drivePath, record = None, extraDir = None):
val = raw_input(msg)
if type == 'record':
valDir = os.path.join(drivePath,val)
error = 'That record number doesn\'t exist. Please try again.\n'
if type == 'extraDir':
valDir = os.path.join(drivePath,record,val)
error = 'That extra directory doesn\'t exist. Please try again.\n'
if type == 'e00Name':
val += '.e00'
valDir = os.path.join(drivePath,record,extraDir, val)
error = 'That e00 file doesn\'t exist. Please try again.\n'
while True:
try:
if not os.path.exists(valDir):
print error
val = raw_input(msg)
if type == 'record':
valDir = os.path.join(drivePath,val)
if type == 'extraDir':
valDir = os.path.join(drivePath,record,val)
if type == 'e00Name':
val += '.e00'
valDir = os.path.join(drivePath,record,extraDir,val)
else:
break
except:
break
if val.endswith('.e00'):
return val[:-4]
else:
return val
# Find users Google Drive path
drivePath = find_Drive.main()
print "Enter a record number to get started: "
record = validateInput("record","Record Number: ", drivePath)
extraDir = validateInput("extraDir", "Extra directory: ", drivePath, record)
importE00File = validateInput("e00Name", "E00 name: ", drivePath, record, extraDir)
convertE00toShapefile(importE00File, drivePath, record, extraDir, option=0)
while True:
exitScript = raw_input("Press x to quit, n to enter a new record, or enter to continue with same inputs: ")
print '\n--------------------\n'
if exitScript in ('x', 'X'):
break
elif exitScript in ('n', 'N'):
record = validateInput("record","Record Number: ", drivePath)
extraDir = validateInput("extraDir", "Extra directory: ", drivePath, record)
convertE00toShapefile(importE00File, drivePath, record, extraDir)
else:
convertE00toShapefile(importE00File, drivePath, record, extraDir, option = 1)
| 2.234375 | 2 |
Python/tests/test_support_vector_machine.py | RyanShahidi/easyml | 37 | 12773654 | """TO BE EDITED.
"""
# from easyml import
def test_foo():
assert 1 == 1
| 1.445313 | 1 |
force_bdss/tests/probe_classes/ui_hooks.py | scottwedge/force-bdss | 2 | 12773655 | # (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
from traits.api import Bool
from force_bdss.api import BaseUIHooksFactory, BaseUIHooksManager
class ProbeUIHooksManager(BaseUIHooksManager):
before_execution_called = Bool()
after_execution_called = Bool()
before_save_called = Bool()
# Set this one to raise an exception in the methods
before_execution_raises = Bool(False)
after_execution_raises = Bool(False)
before_save_raises = Bool(False)
def before_execution(self, task):
self.before_execution_called = True
if self.before_execution_raises:
raise Exception("Boom")
def after_execution(self, task):
self.after_execution_called = True
if self.after_execution_raises:
raise Exception("Boom")
def before_save(self, task):
self.before_save_called = True
if self.before_save_raises:
raise Exception("Boom")
class ProbeUIHooksFactory(BaseUIHooksFactory):
create_ui_hooks_manager_raises = Bool()
def get_identifier(self):
return "probe_ui_hooks"
def get_name(self):
return "Probe UI Hooks"
def get_ui_hooks_manager_class(self):
return ProbeUIHooksManager
def create_ui_hooks_manager(self):
if self.create_ui_hooks_manager_raises:
raise Exception("Boom")
return self.ui_hooks_manager_class(self)
| 2.140625 | 2 |
backend/stock/migrations/0019_auto_20210215_2009.py | fengxia41103/stock | 1 | 12773656 | <reponame>fengxia41103/stock
# Generated by Django 3.1.6 on 2021-02-15 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0018_balancesheet_common_stock'),
]
operations = [
migrations.AddField(
model_name='mystock',
name='beta',
field=models.FloatField(default=1, null=True),
),
migrations.AddField(
model_name='mystock',
name='roe',
field=models.FloatField(default=0, null=True, verbose_name='Return on Equity'),
),
migrations.AddField(
model_name='mystock',
name='roi',
field=models.FloatField(default=0, null=True, verbose_name='Return on Assets'),
),
]
| 1.625 | 2 |
web-backend/routes/student.py | Nahemah1022/Seat-Reservation-System | 0 | 12773657 | <filename>web-backend/routes/student.py
from email import message
from fastapi import APIRouter, HTTPException,Depends
from sqlalchemy.orm import Session
from typing import List
from config.db import getDBSession
from models.model import t_Student
from schemas import studentSchema
studentRouter = APIRouter()
@studentRouter.get("/users",response_model= List[studentSchema.dbStudent],tags=["Student"])
def getAllStudent(conn:Session = Depends(getDBSession)):
return conn.execute(t_Student.select()).fetchall()
@studentRouter.post("/users/register", response_model= studentSchema.Message,tags=["Student"])
def createStudent(stu: studentSchema.dbStudent,conn:Session = Depends(getDBSession)):
# search id in db first
dbStudentData = conn.execute(t_Student.select().where(t_Student.c.id == stu.id)).first()
if dbStudentData is not None:
raise HTTPException(status_code= 400, detail= "ID already registered")
else:
conn.execute(t_Student.insert().values(stu.dict()))
message = studentSchema.Message(message= "success", data= stu)
return message
@studentRouter.post("/users/login", response_model= studentSchema.Message,tags=["Student"])
def loginStudent(user: studentSchema.StudnetLogin,conn:Session = Depends(getDBSession)):
studentData = conn.execute(t_Student.select().where(t_Student.c.id == user.id)).first()
if studentData is None:
raise HTTPException(status_code= 400, detail= "Invalid ID/Password")
elif studentData.password == <PASSWORD>:
message = studentSchema.Message(message= "success", data= studentData)
return message
else:
raise HTTPException(status_code= 400, detail= "Invalid ID/Password") | 2.640625 | 3 |
yoka_bot/items.py | CooperLuan/yoka_bot | 0 | 12773658 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class YokaBotItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class YokaBotBrandListItem(scrapy.Item):
item_name = scrapy.Field()
name = scrapy.Field()
link = scrapy.Field()
is_hot = scrapy.Field()
pass
class YokaBotBrandItem(scrapy.Item):
item_name = scrapy.Field()
url = scrapy.Field()
avator = scrapy.Field()
brand_cn = scrapy.Field()
brand_en = scrapy.Field()
country = scrapy.Field()
created = scrapy.Field()
official_url = scrapy.Field()
story = scrapy.Field()
product_list_url = scrapy.Field()
pass
class YokaBotProductListItem(scrapy.Item):
item_name = scrapy.Field()
url = scrapy.Field()
page = scrapy.Field()
product_url = scrapy.Field()
img = scrapy.Field()
title = scrapy.Field()
pass
class YokaBotProductItem(scrapy.Item):
item_name = scrapy.Field()
url = scrapy.Field()
product_id = scrapy.Field()
breadcrumb = scrapy.Field()
title = scrapy.Field()
attrib = scrapy.Field()
img = scrapy.Field()
| 2.484375 | 2 |
make_dataset/color_correct.py | enomotokenji/mcgan-cvprw2017-chainer | 3 | 12773659 | <filename>make_dataset/color_correct.py
import os
import argparse
from PIL import Image
from tqdm import tqdm
import colorcorrect.algorithm as cca
from colorcorrect.util import from_pil, to_pil
def color_correct(args):
in_dir = args.in_dir
out_dir = args.out_dir
if not os.path.exists(in_dir):
raise Exception('{} does not exists.'.format(in_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
files = os.listdir(in_dir)
grey_world = cca.grey_world if args.grey_world else lambda x: x
stretch = cca.stretch if args.stretch else lambda x: x
max_white = cca.max_white if args.max_white else lambda x: x
for file in tqdm(files):
image = Image.open(os.path.join(in_dir, file))
try:
image = to_pil(stretch(max_white(grey_world(from_pil(image)))))
except:
print(file)
pass
image.save(os.path.join(out_dir, file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in_dir', type=str, required=True)
parser.add_argument('--out_dir', type=str, required=True)
parser.add_argument('--grey_world', action='store_true')
parser.add_argument('--stretch', action='store_true')
parser.add_argument('--max_white', action='store_true')
args = parser.parse_args()
color_correct(args) | 2.78125 | 3 |
old/System/Utils.py | broadinstitute/cms | 13 | 12773660 |
"""Miscellaneous utilities.
"""
import sys, os, inspect, time, shutil, types, pickle, numpy, re
from string import Template
from traceback import print_exc
from numpy import nan, isnan
#import Tools.pp.pp as pp
def TypeInfer(column):
"""Take a list of strings, and attempt to infer a datatype that fits them all.
If the strings are all integers, returns a list of corresponding Python integers.
If the strings are all floats, returns a list of corresponding Python floats.
Otherwise, returns the original list of strings.
Typically used to determine the datatype of a column read from a tab-
or comma-separated text file.
"""
try:
return [int(x) if x != '' else numpy.nan for x in column]
except:
try:
return [float(x) if x != '' else numpy.nan for x in column]
except:
return column
def PermInverse(s):
'''
Fast invert a numpy permutation.
'''
X = numpy.array(range(len(s)))
X[s] = range(len(s))
return X
class BadCheckError:
'''
Error class used for raising I/O errors (maybe should be moved to system_io_override?)
'''
def __init__(self,iofunc,readfiles,writefiles, Dependencies,Creates):
print "\nCHECK_ERROR: An I/O exception occured in function", iofunc, ": either the files" , readfiles , "aren't in" , Dependencies, "or the files", writefiles, "aren't in ", Creates, '. \n'
def RecursiveFileList(ToList,Avoid=None):
'''
Given list of top-level directories, recursively gets a list of files in the directories.
ARGUMENTS:
--ToList = list of top-level diretories as python list of path strings
--Avoid = List of regexps of directory name patterns to NOT look in
e..g if a directory name matches at any level, the function will not
look further into that directory.
'''
if isinstance(ToList,list):
return ListUnion([RecursiveFileList(x,Avoid) for x in ToList])
elif IsFile(ToList):
return [ToList]
elif IsDir(ToList):
if ToList[-1] != '/':
ToList += '/'
L = []
for l in listdir(ToList):
if IsFile(ToList + l):
L += [ToList + l]
elif IsDir(ToList + l):
if Avoid == None or not any([re.search(a,l) for a in Avoid]):
#if Avoid == None or l not in Avoid :
L += RecursiveFileList(ToList + l,Avoid)
return L
else:
return []
def Max(x):
if any(isnan(x)):
return nan
else:
return max(x)
def enumeratefrom(i,A):
assert i >= 0, 'index must be larger than 0 for enumeratefrom to work'
return list(enumerate(['',]*i + list(A)))[i:]
def uniqify(seq, idfun=None):
'''
Relatively fast pure python uniqification function that preservs ordering
ARGUMENTS:
seq = sequence object to uniqify
idfun = optional collapse function to identify items as the same
RETURNS:
python list with first occurence of each item in seq, in order
'''
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def FastArrayUniqify(X):
'''
Very fast uniqify routine for numpy array
ARGUMENT:
X = a numpy array
RETURNS:
[D,s] where s is a permutation that will sort X, and D is the list of "first
differences" in the sorted verion of X
This can be used to produce a uniqified version of X by simply taking:
X[s][D]
or
X[s[D.nonzero()[0]]]
But sometimes the information of D and s is useful.
'''
s = X.argsort()
X = X[s]
return [numpy.append([True],X[1:] != X[:-1]),s]
def FastRecarrayUniqify(X):
'''
Record array version of FastArrayUniqify.
ARGUMENT:
X = numpy record array
RETURNS:
[D,s] where s is a permutation that will sort all the columsn of X in some order,
and D is a list of "first differences" in the sorted version of X
This can be used to produce a uniqified version of X by simply taking:
X[s][D]
or
X[s[D.nonzero()[0]]]
But sometimes the information of D and s is useful.
'''
N = X.dtype.names
s = X.argsort(order=N)
X = X[s]
return [numpy.append([True],X[1:] != X[:-1]),s]
def DirName(path):
'''
utility that gets dir name; sometimes this is the right thing to use intead of os.path.dirname itself
'''
if path[-1] == '/':
path = path[:-1]
return os.path.dirname(path)
def open_for_read(ToRead):
return [open(ToRead,'r'),True]
def open_for_read_universal(ToRead):
return [open(ToRead,'rU'),True]
def open_for_write(ToWrite):
return [open(ToWrite,'w'),True]
def open_for_append(ToAddTo):
return [open(ToAddTo,'a'),True]
def chkExists( path ):
"""If the given file or directory does not exist, raise an exception"""
if not os.path.exists(path): raise IOError("Directory or file %s does not exist" % path)
def PathCompress(path):
return path.replace('../','')
def redirect(x,To):
'''
utility that 'redirects' a path name from to a different directory.
ARGUMENTS:
x = path to redirect
To = location to redirect x to
RETURNS:
path created by taking file component of x and appending to To
'''
x = PathCompress(x)
if '/' in x[:-1]:
j = max([i for i in range(len(x)-1) if x[i] == '/'])
if To[-1] != '/':
To.append('/')
return To + x[:j+1].replace('/','__') + x[j+1:]
else:
return To + x
def ListArrayTranspose(L):
'''
Tranposes the simple array presentation of a list of lists (of equal length).
Argument:
L = [row1, row2, ...., rowN]
where the rowi are python lists of equal length.
Returns:
LT, a list of python lists such that LT[j][i] = L[i][j].
'''
return [[row[i] for row in L] for i in range(len(L[0]))]
def GetFunctionsDefinedInModule(Module):
'''
Given live module, use inspect module to capture list of functions in
module whose definition attribute is equal to the module name.
'''
Z = inspect.getmembers(Module)
return dict([(a[0],a[1]) for a in Z if type(a[1]) == types.FunctionType and a[1].__module__ == Module.__name__ ])
def GetFunctionsMentionedInModule(Module):
'''
Given live module, use inspect module to capture list of functions that
appear somewhere live in the module (e.g. are loaded when the
module is imported)
'''
Z = inspect.getmembers(Module)
return dict([(a[0],a[1]) for a in Z if type(a[1]) == types.FunctionType])
def RedirectList(ToRedirect,To):
return tuple([redirect(x,To) for x in ToRedirect])
def FixedPath(path):
'''
Does some path compression, like os.path.normpath but proper for the
Data Environment (consider replacing with references to os.path.normpath)
'''
if path[:2] == './':
path = path[2:]
path = path.replace('/./','/')
if path[:3] != '../':
path = '../Temp/' + path
return path
def GetDataEnvironmentDirectory():
x = os.environ
if 'DataEnvironmentDirectory' in x.keys():
return x['DataEnvironmentDirectory']
else:
print 'DataEnvironmentDirectory not an environment variable, assuming it is ' , os.getcwd()[:os.getcwd().find('/')] + '/'
return os.getcwd()[:os.getcwd().find('/')] + '/'
def PathAlong(a,b):
'''
returns true when path a is inside the filetree under path b.
'''
return PathStrictlyAlong(a,b) or (FixedPath(a) == FixedPath(b))
def PathStrictlyAlong(a,b):
'''
returns true when path a is strictly insider the filetree under path b.
'''
a = FixedPath(a); b = FixedPath(b)
return len(a) > len(b) and a[:len(b)] == b and (a[len(b)] == '/' or b[-1] == '/')
def funcname():
'''
returns name of function in call stack in which funcname() is being called
'''
return sys._getframe(1).f_code.co_name
def caller(level = 2):
'''
returns name of function in call stack which is calling the
function that is calling caller()
'''
return sys._getframe(level).f_code.co_name
def callermodule():
'''
returns name of module from which the function that is calling caller() was imported
'''
return sys._getframe(2).f_code.co_filename
def TimeStamp(T = None):
'''
deprecated TimeStamp function (should be replaced by time module formatters)
'''
return time.localtime(T).__str__().replace(',','_').replace(' ','').strip('()')
def Union(ListOfSets):
'''
takes python list of python sets [S1,S2, ..., SN] and returns their union
'''
u = set([])
for s in ListOfSets:
u.update(s)
return u
def ListUnion(ListOfLists):
'''
takes python list of python lists
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and returns the aggregated list
[l11,l12, ..., l21, l22 , ...]
'''
u = []
for s in ListOfLists:
if s != None:
u.extend(s)
return u
def GetDefaultVal(func,varname,NoVal = None):
'''
given a live python function object "func", return the default value for
variable with name "varname" if it exists as a keyword variable, else
return NoVal
'''
V = inspect.getargspec(func)
if varname in V[0]:
varname_pos = min([i for i in range(len(V[0])) if V[0][i] == varname]) - len(V[0])
return V[3][varname_pos]
else:
return NoVal
def MakeDir(DirName,creates = ()):
'''
is a "strong" directory maker -- if DirName already exists, this deletes it first
'''
if os.path.exists(DirName):
delete(DirName)
os.mkdir(DirName)
def MakeDirs(DirName,creates = ()):
'''
"strong" version of os.makedirs
'''
if os.path.exists(DirName):
delete(DirName)
os.makedirs(DirName)
def strongcopy(tocopy,destination,use2 = False):
'''
"strong" version of copy -- if destination already exists, it removes
it first befire copying
'''
if os.path.isfile(tocopy):
if use2:
shutil.copy2(tocopy,destination)
else:
shutil.copy(tocopy,destination)
elif os.path.isdir(tocopy):
if os.path.exists(destination):
delete(destination)
shutil.copytree(tocopy,destination)
def delete(ToDelete):
'''
unified "strong" version of delete that uses os.remove for a file
and shutil.rmtree for a directory tree
'''
if os.path.isfile(ToDelete):
os.remove(ToDelete)
elif os.path.isdir(ToDelete):
shutil.rmtree(ToDelete)
def Log(s):
"""Log a debug message"""
# by default, do nothing.
def TemplateInstance(templatepath,outpath, **kws):
'''
Apply python template at "templatepath" with substitutions from
keyword arguments **kws passed in, and write out result to outpath
Useful for html templating
'''
TemplateString = Template(open(templatepath,'r').read())
OutFile = open(outpath,'w')
NewString = TemplateString.substitute(kws)
OutFile.write(NewString)
OutFile.close()
def MakeT(r):
'''
If input 'r' is a comma-delimited string, return tuple split on
commas, else return tuple(r)
'''
return tuple(r.split(',')) if isinstance(r,str) else tuple(r)
def getKalong(LL1,LL2,k):
'''
Fast version of "K-along" paths.
ARGUMENTS:
--LL1 = numpy array of paths
--LL2 = sorted numpy array of paths
--k = nonnegative integer
RETURNS:
[A,B] where A and B are numpy arrays of indices in LL1 such that
LL2[A[i]:B[i]] contains precisely those paths in B that are k
directory levels down from LL1[i] -- as path strings (no actual
directory testing is done). A[i] = B[i] = 0 if no paths in LL2 are k
directory levels down from LL1[i]
E.g. if
LL1 = numpy.array(['../Data/Dan_Data/', '../Users/DanYamins/','../Users/SijiaWang/'])
and
LL2 = numpy.array(['../Data/Dan_Data/NPR_Puzzle_Solutions',
'../Data/Dan_Data/RandomData','../Users/DanYamins/Finance/'])
then
getKalong(LL1,LL2,1) = [A,B] = [[0,2,0],[2,3,0]]
'''
SlashList1 = numpy.array([len(y.split('/')) - (1 if y[-1] == '/' else 0) for y in LL1])
SlashList2 = numpy.array([len(z.split('/')) - (1 if z[-1] == '/' else 0) for z in LL2])
M1 = numpy.rec.fromarrays([LL1,SlashList1],names=['Val','S'])
M2 = numpy.rec.fromarrays([LL2,SlashList2],names=['Val','S'])
s1 = M1.argsort(order=['S','Val']); M1 = M1[s1]; SlashList1 = SlashList1[s1]
s2 = M2.argsort(order=['S','Val']); M2 = M2[s2]; SlashList2 = SlashList2[s2]
Max = max(SlashList1)
Min = min(SlashList1)
W = numpy.zeros(len(LL1),int)
U = numpy.zeros(len(LL1),int)
for i in range(Min,Max+1):
I1 = (SlashList1 == i); nz1 = I1.nonzero()[0];
if len(nz1) > 0:
st1 = min(nz1)
I2 = (SlashList2 == i + k); nz2 = I2.nonzero()[0];
if len(nz2) >0:
st2 = min(nz2)
[A,B] = getpathalong(M1['Val'][I1],M2['Val'][I2])
W[st1:st1 + len(nz1)] = A + st2
U[st1:st1 + len(nz1)] = B + st2
return [s1,s2,W,U]
def maximalpathalong(YY,ZZ):
'''
Fast function for determining indices of elements of YY such that
they are "path along" some element of ZZ, for numpy arrays YY
and ZZ. When YY[i] is path along several element of ZZ, returns
index of the first occurence of the closest path. If YY[i] is not
path-along any elements of Z, returns ''.
'''
ZZ = ZZ.copy() ; ZZ.sort()
# Y = numpy.array([y + '/' if y[-1] != '/' else y for y in YY])
# Z = numpy.array([y + '/' if y[-1] != '/' else y for y in ZZ])
Y = YY
Z = ZZ
SlashList = numpy.array([len(y.split('/')) - (1 if y[-1] == '/' else 0) for y in Z])
Max = max(SlashList) if len(SlashList) > 0 else 0
Min = min(SlashList) if len(SlashList) > 0 else 0
C = -1*numpy.ones((len(YY),),int)
for i in range(Min,Max+1):
T = numpy.array(['/'.join(z.split('/')[:i]) + ('/' if len(z.split('/')) > i else '') for z in Y])
[A,B] = fastequalspairs(T,Z)
M = (B>A)
C[M] = B[M] - 1
z = numpy.append(ZZ,[''])
return z[C]
def getpathalong(YY,ZZ):
'''
Fast version of path long for numpy arrays.
ARGUMENTS:
LL1 = numpy array of paths
LL2 = sorted numpy array of paths
RETURNS:
[A,B] where A and B are numpy arrays of indices in LL1 such
that LL2[A[i]:B[i]] contains precisely those paths in B that are
in the directory tree of paths in LL1[i] ( as path strings -- no actual
filesystem existence testing is done). A[i] = B[i] = 0 if no paths in
LL2 are in the directory tree under LL1[i]
E.g. if
LL1 = numpy.array(['../Data/Dan_Data/', '../Users/DanYamins/','../Users/SijiaWang/'])
and
LL2 = numpy.array(['../Data/Dan_Data/NPR_Puzzle_Solutions', '../Data/Dan_Data/NPR_Puzzle_Solutions/AmericaPensacolaPuzzle/',
'../Data/Dan_Data/RandomData','../Users/DanYamins/Finance/'])
then
getpathalong(LL1,LL2) = [A,B] = [[0,3,0],[3,4,0]]
'''
# Y = numpy.array([y[:-1] if y[-1] == '/' else y for y in YY])
# Z = numpy.array([z[:-1] if z[-1] == '/' else z for z in ZZ])
Y = numpy.array([y + '/' if y[-1] != '/' else y for y in YY])
Z = numpy.array([y + '/' if y[-1] != '/' else y for y in ZZ])
SlashList = numpy.array([len(y.split('/')) - (1 if y[-1] == '/' else 0) for y in Y])
Max = max(SlashList) if len(SlashList) > 0 else 0
Min = min(SlashList) if len(SlashList) > 0 else 0
W = numpy.zeros(len(Y),int)
U = numpy.zeros(len(Y),int)
for i in range(Min,Max+1):
T = numpy.array(['/'.join(z.split('/')[:i]) + ('/' if len(z.split('/')) > i else '') for z in Z ]) #get i-reduced slash list from Z, call it T
R = (T[1:] != T[:-1]).nonzero()[0]
R = numpy.append(R,numpy.array([len(T)-1]))
M = R[R.searchsorted(range(len(T)))]
#get set of guys in Y with i slashes, call it L
L = (SlashList == i)
H = Y[L]
D = T.searchsorted(H)
T = numpy.append(T,numpy.array([0]))
M = numpy.append(M,numpy.array([0]))
W[L] = (T[D] == H) * D
U[L] = (T[D] == H) * (M[D] + 1)
return [W,U]
def getpathalongs(Y,Z):
'''
Returns numpy array of indices i in numpy array Y such that Y[i] is
path-along some path string in Z
'''
s = Z.argsort()
Z = Z[s]
[A,B] = getpathalong(Y,Z)
L = ListUnion([range(A[i],B[i]) for i in range(len(A)) if A[i] < B[i]])
return s[L]
def getpathstrictlyalong(YY,ZZ):
'''
Version of getpathalong that requires "strictly path along"
'''
[A,B] = getpathalong(YY,ZZ)
YY = numpy.array([y + '/' if y[-1] != '/' else y for y in YY])
ZZ = numpy.array([y + '/' if y[-1] != '/' else y for y in ZZ])
[C,D] = fastequalspairs(YY,ZZ)
return [D,B]
def fastequalspairs(Y,Z):
'''
ARGUMENTS:
LL1 = numpy array of paths
LL2 = sorted numpy array of paths
RETURNS:
[A,B] where A and B are numpy arrays of indices in LL1 such that:
LL2[A[i]:B[i]] = LL1[i].
A[i] = B[i] = 0 if LL1[i] not in LL2
'''
T = Z.copy()
R = (T[1:] != T[:-1]).nonzero()[0]
R = numpy.append(R,numpy.array([len(T)-1]))
M = R[R.searchsorted(range(len(T)))]
D = T.searchsorted(Y)
T = numpy.append(T,numpy.array([0]))
M = numpy.append(M,numpy.array([0]))
W = (T[D] == Y) * D
U = (T[D] == Y) * (M[D] + 1)
return [W,U]
def ModContents(obj,Cond = None):
'''
Modified version of Contents function, avoiding recursive inspection
of objects that satisfy condition Cond
ARGUMENTS:
--obj = BeautifulSoup object
--Cond = two-place boolean function with arugments (o1,o2) where
o1,o2 are meant to be BeautifulSoup objects
If Cond = None this is the same as Contents
'''
if 'contents' not in dir(obj):
return str(obj)
else:
if Cond == None:
return ''.join([ModContents(o) for o in obj.contents])
else:
return ''.join([ModContents(o,Cond) for o in obj.contents if not Cond(o,obj)])
def Contents(obj):
'''
Convenience function for working with BeautifulSoup contents
objects (move this somewhere else?)
ARGUMENT:
--obj = BeautifulSoup object.
Given BeautifulSoup object 'obj', extract the "string contents" recursively.
'''
if 'contents' not in dir(obj):
return str(obj)
else:
return ''.join([Contents(newobj) for newobj in obj.contents])
def fastisin(Y,Z):
'''
fast routine for determining indices of elements in numpy array
Y that appear in numpy array Z
returns boolean array of those indices
'''
if len(Z) > 0:
T = Z.copy()
T.sort()
D = T.searchsorted(Y)
T = numpy.append(T,numpy.array([0]))
W = (T[D] == Y)
if isinstance(W,bool):
return numpy.zeros((len(Y),),bool)
else:
return (T[D] == Y)
else:
return numpy.zeros((len(Y),),bool)
def FastRecarrayEquals(Y,Z):
'''
fast routine for determining whether numpy record array Y
equals record array Z
'''
if Y.dtype.names != Z.dtype.names or len(Y) != len(Z):
return False
else:
NewY = numpy.array([str(l) for l in Y])
NewZ = numpy.array([str(l) for l in Z])
NewZ.sort(); NewY.sort()
return all(NewY == NewZ)
def FastRecarrayEqualsPairs(Y,Z):
NewY = numpy.array([str(l) for l in Y])
NewZ = numpy.array([str(l) for l in Z])
s = NewZ.argsort() ; NewZ.sort()
[A,B] = fastequalspairs(NewY,NewZ)
return [A,B,s]
def IsDotPath(s,path=None):
'''
Determine whether s is possible valid dot path of a python module, and is
more accurante when the putative real (relative) file path is
given in path. (If path argument is given this requires path to
be a DataEnvironment-relative path, starting with ../)
'''
step1 = set(s.replace('.','').lower()) <= set(['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','0','1','2','3','4','5','6','7','8','9','_'])
if path == None:
return step1
else:
return step1 and path.endswith('.py') and s == path[:-3].strip('./').replace('/','.')
def FastRecarrayIsIn(Y,Z):
'''
Fast routine for determining which records in numpy record array
Y appear in record array Z
'''
if Y.dtype.names != Z.dtype.names:
return numpy.zeros((len(Y),),bool)
else:
NewY = numpy.array([str(l) for l in Y])
NewZ = numpy.array([str(l) for l in Z])
NewZ.sort()
return fastisin(NewY,NewZ)
def FastRecarrayDifference(X,Y):
'''
fast routine for determining which records in numpy array X do
not appear in numpy array Y
'''
if len(Y) > 0:
Z = FastRecarrayIsIn(X,Y)
return X[numpy.invert(Z)]
else:
return X
def fastarraymax(X,Y):
'''
fast way to achieve:
ARGUMENTS:
X,Y numpy arrays of equal length
RETURNS:
Z where Z[i] = max(X[i],Y[i])
'''
Z = numpy.zeros((len(X),),int)
A = X <= Y
B = Y < X
Z[A] = Y[A]
Z[B] = X[B]
return Z
def fastarraymin(X,Y):
'''
fast way to achieve:
ARGUMENTS:
X,Y numpy arrays of equal length
RETURNS:
Z where Z[i] = min(X[i],Y[i])
'''
Z = numpy.zeros((len(X),),int)
A = X <= Y
B = Y < X
Z[A] = X[A]
Z[B] = Y[B]
return Z
def SimpleStack(seq,UNIQIFY=False):
'''
Vertically stack sequences numpy record arrays.
Avoids some of the problems of numpy.v_stack
'''
newseq = [ss for ss in seq if len(ss) > 0]
if len(newseq) > 1:
seq = newseq
names = seq[0].dtype.names
formats = [max([a.dtype[att] for a in seq]).str for att in names]
if UNIQIFY:
X = numpy.rec.fromarrays([ListUnion([a[att].tolist() for a in seq]) for att in names], names = names, formats = formats)
[D,s] = FastRecarrayUniqify(X)
return X[s][D]
else:
return numpy.rec.fromarrays([ListUnion([a[att].tolist() for a in seq]) for att in names], names = names, formats = formats)
elif len(newseq) == 1:
return newseq[0]
else:
return seq[0][0:0]
def SimpleStack1(seq,UNIQIFY=False):
'''
Vertically stack sequences numpy record arrays.
Avoids some of the problems of numpy.v_stack but is slower
if UNIQIFY set to true, only retains unique records
'''
newseq = [ss for ss in seq if len(ss) > 0]
if len(newseq) > 1:
seq = newseq
names = seq[0].dtype.names
formats = [max([a.dtype[att] for a in seq]).str for att in names]
if UNIQIFY:
numpy.rec.fromrecords(uniqify(ListUnion([ar.tolist() for ar in newseq])), names = names, formats = formats)
else:
return numpy.rec.fromrecords(ListUnion([ar.tolist() for ar in newseq]), names = names, formats = formats)
elif len(newseq) == 1:
return newseq[0]
else:
return seq[0][0:0]
def SimpleColumnStack(seq):
'''
Stack columns in sequences of numpy record arrays.
Avoids some of the problems of numpy.c_stack but is slower
'''
Columns = ListUnion([[a[l] for l in a.dtype.names] for a in seq])
names = ListUnion([list(a.dtype.names) for a in seq])
return numpy.rec.fromarrays(Columns,names=names)
def RemoveColumns(recarray,ToRemove):
'''
Given numpy recarray and list of column names ToRemove,
return recarray with columns whose names are not in ToRemove
'''
newdtype = numpy.dtype([x for x in recarray.dtype.descr if x[0] not in ToRemove])
return numpy.rec.fromarrays([recarray[name] for name in recarray.dtype.names if name not in ToRemove],dtype = newdtype)
def MaximalCommonPath(PathList):
'''
Given list of paths, return common prefix. Like
os.path.commonprefixbut proper for Data Environment purposes.
'''
PathList = PathList[:]
PathList = list(set(PathList))
OriginalPathList = PathList
if len(PathList) > 0:
PathList = [p[:-1] if p[-1] == '/' else p for p in PathList]
splitlist = [x.split('/') for x in PathList]
minslash = min([len(x) for x in splitlist])
done = False
i = 0
cpath = ''
while not done and i <= minslash:
L = ['/'.join(x[:i]) for x in splitlist]
if len(set(L)) == 1:
cpath = L[0]
i = i+1
else:
done = True
if (cpath in OriginalPathList): # cpath is a file path
if sum([p.startswith(cpath + '/') for p in OriginalPathList]) > 0: # cpath + '/' is also a directory
cpath = '/'.join(cpath.split('/')[:-1])
if cpath != '':
if sum([p.startswith(cpath + '/') for p in OriginalPathList]) > 0: # cpath + '/' is a directory
cpath += '/'
return cpath
else:
return ''
def Backslash(Dir,Verbose=False):
'''
Adds '/' to end of a path (meant to make formatting of directory
Paths consistently have the slash)
'''
if Dir[-1] != '/':
if Verbose:
print "Warning: the directory name, ", Dir, ", was provided. The character '/' was appended to the end of the name."
return Dir + '/'
else:
return Dir
def MakeDirWithDummy(Dir):
'''
makes a directory with a empty file 'dummy' in it
'''
Dir = Backslash(Dir)
MakeDir(Dir)
open_for_write(Dir + 'dummy')[0].write('')
def MakeDirWithInit(Dir):
'''
makes a directory with an empty file '__init__.py' in it
'''
Dir = Backslash(Dir)
MakeDir(Dir)
open_for_write(Dir + '__init__.py')[0].write('')
def GetTimeStampedArchiveName(toarchive):
'''
given path string, return corresponding name that it would have
in the Archive, with timestamp attached
'''
#this assumes path starts with '../'
TS = TimeStamp()
modifiedpath = toarchive[3:].replace('/','__')
return 'Archive_' + TS + '_' + modifiedpath
def copy_to_archive(toarchive,depends_on=('../',),creates=('../Archive/',)):
'''
copy file or directory to archive with proper archive name
'''
#archives the file or folder 'toarchive' to the archive with new name generated by timestamp
ArchivedName = GetTimeStampedArchiveName(toarchive)
if not PathExists('../Archive/'):
print 'Creating Archive ....'
MakeDir('../Archive/')
if PathExists(toarchive):
strongcopy(toarchive,'../Archive/' + ArchivedName)
else:
print 'ERROR: The path', toarchive, 'does not exist; nothing archived.'
def move_to_archive(toarchive,depends_on=('../',),creates=('../Archive/',)):
'''
move file or directory to archive with proper archive name
'''
#archives the file or folder 'toarchive' to the archive with new name generated by timestamp
ArchivedName = GetTimeStampedArchiveName(toarchive)
if not PathExists('../Archive/'):
print 'Creating Archive ....'
MakeDir('../Archive/')
if PathExists(toarchive):
Rename(toarchive,'../Archive/' + ArchivedName)
else:
print 'ERROR: The path', toarchive, 'does not exist; nothing archived.'
def CompilerChecked(ToCheck):
'''
cleans a list of strings representing python regular expressions ToCheck,
returning only those that are not empty and properly compile
'''
X = []
for L in ToCheck:
LL = L[4:] if L.startswith('NOT ') else L
try:
re.compile(LL)
except:
print "Error: the string, ", LL, " was found calling", funcname(), ". This string could not be compiled as a regular expression and will not be loaded."
else:
X += [L if L != '' else '^$']
return X
def CheckInOutFormulae(ExpList,S):
'''
Given a list ExpList of Regular expression strings and "NOT '-prefixed
regular expression strings, return list of all strings in list S that:
-- match at least one of the expressions in ExpList that are _not_ prefixed by 'NOT '
-- match none of the expressions in ExpList that _are_ prefixed by 'NOT '
'''
if isinstance(ExpList,str):
ExpList = ExpList.split(',')
InExpList = [exp for exp in ExpList if not exp.startswith('NOT ')]
OutExpList = [exp[4:] for exp in ExpList if exp.startswith('NOT ')]
F = lambda x,y,z : any([re.match(RegExp,z) != None for RegExp in x]) and not any([re.match(RegExp,z) != None for RegExp in y])
return F(InExpList,OutExpList,S)
def AddInitsAbove(opfile):
'''
Given a python to a pytho module opfile, add an empty __init__.py file to the
directory containing opfile, if not such file exists.
Reset mod time of directory so it appears as if nothing as changed.
The intent of this is to allow python modules to be placed in directories
in the Data Environment and then be accessed by package imports,
without the user having to remember to put the '__init__.py' in the directory
that the module is in. The timestamp of the containing directory is reset
if the __init__.py is added to make sure that no stupid re-computations are
done that make it appear as if things have changed when the havent.
'''
DirList = opfile.split('/')[:-1]
for ii in range(1,len(DirList)):
DirName = '/'.join(DirList[:ii+1]) + '/'
oldatime = os.path.getatime(DirName)
oldmtime = os.path.getmtime(DirName)
if '__init__.py' not in listdir(DirName):
F = open(DirName + '__init__.py','w')
F.close()
os.utime(DirName,(oldatime,oldmtime))
class multicaster():
'''
Class creating object that multicasts a string output stream to have both
its original desired effect and also to print any output to a log file.
typical Usage:
sys.stdout = multicaster(sys.__stdout__,'LogFile.txt')
Then, whenever a 'print ' statement is made, output is directed both to
original stdout as well as to the logfile "LogFile.txt"
'''
def __init__(self,filename,OldObject,New=False):
'''
ARGUMENTS:
filename = name of file to write to
OldObject = original output stream to multicast
NEW = boolean which overwrites log file if true; otherwise,
output of stream is _appended_ to 'filename'
'''
self.file = filename
self.old = OldObject
if New:
F = open(filename,'w')
F.write('\n\n------------------------------------------------------------------------------------------------------------------------------------------------------\n')
F.write('STARTING LOG: ' + time.strftime('%c %Z') + '\n')
F.write('------------------------------------------------------------------------------------------------------------------------------------------------------\n\n')
F.close()
def __getattr__(self,name):
'''
This is intended to answer that whenever the stdout is asked to
do something other than write the function is undisturbed. If the
stdout object were its own class (instead of it being merely a file
\object that is being used for the purpose of output), this would be
unneccesary, we'd simple subclass the stdout object.
'''
if name != 'write':
return self.old.__getattribute__(name)
def write(self,s):
F = open(self.file,'a')
F.write(s)
F.close()
return self.old.write(s)
def DictInvert(D):
'''
ARGUMENT:
dictionary D
OUTPUT:
--dictionary whose keys are unique elements of values of D, and
whose values on key 'K' are lists of keys 'k' in D such that D[k] = K
'''
return dict([(v,set([j for j in D.keys() if D[j] == v])) for v in set(D.values())])
def PathExists(ToCheck):
'''
convenient name for os function
The reason it's done this way as opposed to merely setting
PathExists = os.path.exists
in this module is that this will disturb the system i/o intercept because this module needs to be execfiled FIRST before system_io_override.
'''
return os.path.exists(ToCheck)
def Rename(src,dest):
'''
convenient name for os function
The reason it's done this way as opposed to merely setting
PathExists = os.path.exists
in this module is that this will disturb the system i/o intercept because
this module needs to be execfiled FIRST before system_io_override.
'''
os.rename(src,dest)
def IsDir(ToCheck):
'''
convenient name for os function
The reason it's done this way as opposed to merely setting
PathExists = os.path.exists
in this module is that this will disturb the system i/o intercept because this
module needs to be execfiled FIRST before system_io_override.
'''
return os.path.isdir(ToCheck)
def IsFile(ToCheck):
'''
convenient name for os function
The reason it's done this way as opposed to merely setting
PathExists = os.path.exists
in this module is that this will disturb the system i/o intercept
because this module needs to be execfiled FIRST before system_io_override.
'''
return os.path.isfile(ToCheck)
def FindAtime(ToAssay):
'''
convenient name for os function
The reason it's done this way as opposed to merely setting
PathExists = os.path.exists
in this module is that this will disturb the system i/o intercept
because this module needs to be execfiled FIRST before system_io_override.
'''
return os.path.getatime(ToAssay)
def listdir(ToList):
'''
convenient name for os function
The reason it's done this way as opposed to merely setting
PathExists = os.path.exists
in this module is that this will disturb the system i/o intercept
because this module needs to be execfiled FIRST before system_io_override.
'''
return os.listdir(ToList)
ListAnd = all
ListOr = any
| 2.78125 | 3 |
autobahn/twisted/flashpolicy.py | dimddev/AutobahnPython | 0 | 12773661 | <gh_stars>0
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import re
from twisted.internet.protocol import Protocol, Factory
__all__ = (
'FlashPolicyProtocol',
'FlashPolicyFactory'
)
class FlashPolicyProtocol(Protocol):
"""
Flash Player 9 (version 9.0.124.0 and above) implements a strict new access
policy for Flash applications that make Socket or XMLSocket connections to
a remote host. It now requires the presence of a socket policy file
on the server.
We want this to support the Flash WebSockets bridge which is needed for
older browser, in particular MSIE9/8.
.. seealso::
* `Autobahn WebSocket fallbacks example <https://github.com/tavendo/AutobahnPython/tree/master/examples/twisted/websocket/echo_wsfallbacks>`_
* `Flash policy files background <http://www.lightsphere.com/dev/articles/flash_socket_policy.html>`_
"""
REQUESTPAT = re.compile("^\s*<policy-file-request\s*/>")
REQUESTMAXLEN = 200
REQUESTTIMEOUT = 5
POLICYFILE = """<?xml version="1.0"?><cross-domain-policy><allow-access-from domain="%s" to-ports="%s" /></cross-domain-policy>"""
def __init__(self, allowedDomain, allowedPorts):
"""
:param allowedPort: The port to which Flash player should be allowed to connect.
:type allowedPort: int
"""
self._allowedDomain = allowedDomain
self._allowedPorts = allowedPorts
self.received = ""
self.dropConnection = None
def connectionMade(self):
# DoS protection
##
def dropConnection():
self.transport.abortConnection()
self.dropConnection = None
self.dropConnection = self.factory.reactor.callLater(FlashPolicyProtocol.REQUESTTIMEOUT, dropConnection)
def connectionLost(self, reason):
if self.dropConnection:
self.dropConnection.cancel()
self.dropConnection = None
def dataReceived(self, data):
self.received += data
if FlashPolicyProtocol.REQUESTPAT.match(self.received):
# got valid request: send policy file
##
self.transport.write(FlashPolicyProtocol.POLICYFILE % (self._allowedDomain, self._allowedPorts))
self.transport.loseConnection()
elif len(self.received) > FlashPolicyProtocol.REQUESTMAXLEN:
# possible DoS attack
##
self.transport.abortConnection()
else:
# need more data
##
pass
class FlashPolicyFactory(Factory):
def __init__(self, allowedDomain=None, allowedPorts=None, reactor=None):
"""
:param allowedDomain: The domain from which to allow Flash to connect from.
If ``None``, allow from anywhere.
:type allowedDomain: str or None
:param allowedPorts: The ports to which Flash player should be allowed to connect.
If ``None``, allow any ports.
:type allowedPorts: list of int or None
:param reactor: Twisted reactor to use. If not given, autoimport.
:type reactor: obj
"""
# lazy import to avoid reactor install upon module import
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
self._allowedDomain = str(allowedDomain) or "*"
if allowedPorts:
self._allowedPorts = ",".join([str(port) for port in allowedPorts])
else:
self._allowedPorts = "*"
def buildProtocol(self, addr):
proto = FlashPolicyProtocol(self._allowedDomain, self._allowedPorts)
proto.factory = self
return proto
| 1.554688 | 2 |
Contest/ABC037/a/main.py | mpses/AtCoder | 0 | 12773662 | <gh_stars>0
#!/usr/bin/env python3
a, b, c = map(int, input().split())
print(c // min(a, b))
| 2.46875 | 2 |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/22.01-Binary Search.py | shihab4t/Books-Code | 0 | 12773663 | <gh_stars>0
def binary_search(nums, target):
low = 0
high = len(nums) - 1
while low <= high:
mid = low + ((high - low) // 2)
if nums[mid] > target:
high = mid-1
elif nums[mid] < target:
low = mid+1
else:
return mid
return -1
if __name__ == "__main__":
lst = [0, 1, 2, 5, 6, 7, 8]
print(binary_search(lst, 10))
print(binary_search(lst, 7))
print(binary_search(lst, 1))
| 3.78125 | 4 |
lib/solutions/CHK/checkout_solution.py | DPNT-Sourcecode/CHK-vkwa01 | 0 | 12773664 | <reponame>DPNT-Sourcecode/CHK-vkwa01
from collections import Counter
class Basket:
"""
Helper class to calculate checkout
"""
prices = {
"A": 50,
"B": 30,
"C": 20,
"D": 15,
"E": 40,
"F": 10,
"G": 20,
"H": 10,
"I": 35,
"J": 60,
"K": 70,
"L": 90,
"M": 15,
"N": 40,
"O": 10,
"P": 50,
"Q": 30,
"R": 50,
"S": 20,
"T": 20,
"U": 40,
"V": 50,
"W": 20,
"X": 17,
"Y": 20,
"Z": 21,
}
# The offers are run in order, so the most attractive offer should be at the top.
buy_x_get_y_free_offers = [
{"quantity": 2, "sku": "E", "free": "B"},
{"quantity": 3, "sku": "F", "free": "F"},
{"quantity": 3, "sku": "N", "free": "M"},
{"quantity": 3, "sku": "R", "free": "Q"},
{"quantity": 4, "sku": "U", "free": "U"},
]
# The offers are run in order, so the most attractive offer should be at the top.
# And the most expensive sku should be first for the buy any of X offers.
multi_item_offers = [
{"quantity": 5, "skus": ("A", ), "price": 200},
{"quantity": 3, "skus": ("A", ), "price": 130},
{"quantity": 2, "skus": ("B", ), "price": 45},
{"quantity": 10, "skus": ("H", ), "price": 80},
{"quantity": 5, "skus": ("H", ), "price": 45},
{"quantity": 2, "skus": ("K", ), "price": 120},
{"quantity": 5, "skus": ("P", ), "price": 200},
{"quantity": 3, "skus": ("Q", ), "price": 80},
{"quantity": 3, "skus": ("V", ), "price": 130},
{"quantity": 2, "skus": ("V", ), "price": 90},
{"quantity": 3, "skus": ("Z", "S", "T", "Y", "X"), "price": 45}
]
def __init__(self, skus: str):
if any(sku not in self.prices for sku in skus):
raise ValueError(f"Invalid basket, all skus must be one of {', '.join(sku for sku in self.prices)}")
self.sku_counter = Counter(skus)
def _calculate_offers_and_remove_skus(self) -> int:
"""
Find offers in the basket and remove them from the list of skus.
Returns:
The total value of offers found.
"""
offer_value = 0
for offer in self.buy_x_get_y_free_offers:
quantity = offer["quantity"]
remove = offer["free"]
sku = offer["sku"]
matches = self.sku_counter[sku]
offers_found = matches // quantity
self.sku_counter[remove] = max(0, self.sku_counter[remove] - offers_found)
for offer in self.multi_item_offers:
quantity = offer["quantity"]
price = offer["price"]
skus = offer["skus"]
matches = sum(self.sku_counter[sku] for sku in skus)
offers_found = matches // quantity
offer_value += offers_found * price
for _ in range(offers_found * quantity):
sku_to_remove = next(sku for sku in skus if self.sku_counter[sku] > 0)
self.sku_counter[sku_to_remove] -= 1
return offer_value
def calculate_checkout(self) -> int:
"""
Returns
The basket checkout value
"""
checkout_value = self._calculate_offers_and_remove_skus()
checkout_value += sum(self.prices[sku] * self.sku_counter[sku] for sku in self.sku_counter)
return checkout_value
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus: str) -> int:
"""
Returns the value of the basket
Args:
skus: String where each letter represents an item in the basket.
"""
try:
basket = Basket(skus)
return basket.calculate_checkout()
except ValueError:
return -1
| 2.375 | 2 |
epollchain/models.py | yashgupta10/blockchain | 1 | 12773665 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 22:40:45 2020
@author: yashm
"""
import json
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from epollchain import login_manager#db,
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
user = User()
return user
class User(UserMixin):
with open('epollchain/data/personal.json') as file:
personal = json.load(file)
id = personal['emailid']
username = personal['name']
email = personal['emailid']
#image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = personal['password']
#posts = db.relationship('Post', backref='author', lazy=True)
image_file = 'profile.jpg'
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User
def __repr__(self):
return f"User('{self.username}', '{self.email}')"
| 2.3125 | 2 |
mswindows/get_pytables_version.py | joshmoore/PyTables | 3 | 12773666 | <reponame>joshmoore/PyTables
# Print the version splitted in three components
import sys
verfile = sys.argv[1]
f = open(verfile)
version = f.read()
l = [a[0] for a in version.split('.') if a[0] in '0123456789']
# If no revision, '0' is added
if len(l) == 2:
l.append('0')
for i in l:
print i,
f.close()
| 2.90625 | 3 |
tests/test_hvr3_polyC.py | ryanraaum/oldowan.mtconvert | 2 | 12773667 | <filename>tests/test_hvr3_polyC.py
from oldowan.mtconvert.seq2sites import seq2sites
from oldowan.polymorphism import Polymorphism
def test_normal_polyC():
"""Normal Poly C stretch at end of HVR3
Seq: CAAAGACACCCCCCACA
Seq: CAAAGACACCCCCCACA
rCRS: CAAAGACACCCCCCACA
Sites: <None>
"""
seq = 'CAAAGACACCCCCCACA'
result = seq2sites(seq)
assert len(result) == 0
def test_expanded_polyC():
"""Expanded Poly C stretch at end of HVR3
Seq: CAAAGACACCCCCCCCCACA
Seq: CAAAGACACCCCCCCCCACA
rCRS: CAAAGACACCCCCC---ACA
Sites: 573.1C 573.2C 573.3C
"""
a = Polymorphism(573,1,'C')
b = Polymorphism(573,2,'C')
c = Polymorphism(573,3,'C')
d = Polymorphism(573,4,'C')
e = Polymorphism(573,5,'C')
f = Polymorphism(573,6,'C')
seq = 'ACCCCATACCCCGAACCAACCAAACCCCAAAGACACCCCCCCCCCCCACA'
result = seq2sites(seq)
assert len(result) == 6
assert a in result
assert b in result
assert c in result
assert d in result
assert e in result
assert f in result
| 2.40625 | 2 |
gtfs_bounds.py | Trillium-Solutions/gtfs-bounds | 0 | 12773668 | #!/usr/bin/env python3
#
# Copyright (C) 2020 Trillium Solutions <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this program except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
import csv
import argparse
import subprocess
from sys import argv, stderr, stdout
from io import TextIOWrapper
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(
allow_abbrev=False,
description="""
Find the lat/lon bounds of a GTFS file.
If an OSM input file is provided, create an output file which is a
trimmed version of the input file.
Alternatively, OSM may be downloaded from the Overpass API
(https://wiki.openstreetmap.org/wiki/Overpass_API) and written to
an output file in OSM XML format.
""")
input_group = parser.add_mutually_exclusive_group()
input_group.add_argument('-i', '--osm-input',
type=argparse.FileType(),
help="Input OSM file, used by osmconvert.")
input_group.add_argument('-d', '--download-from-overpass',
action='store_true',
help="Download OSM from Overpass API, and save to the OSM_OUTPUT file. Uses the wget program.")
parser.add_argument('-o',
'--osm-output',
help="Output OSM file, will be overwritten.")
parser.add_argument('--force',
action='store_true',
help="Force overwrite of the OSM_OUTPUT file.")
parser.add_argument('--buffer-degrees',
type=float,
help="Increase the bounds by a Buffer of this many degrees.")
parser.add_argument('gtfs_file', nargs='*', help="Input GTFS file. Multiple files may be provided.")
args = parser.parse_args()
for g in args.gtfs_file:
if not zipfile.is_zipfile(g):
parser.print_help()
print ("\nERROR, the GTFS file '%s' doesn't appear to be a zip archive." % g, file=stderr)
exit(1)
if args.osm_output:
o = Path(args.osm_output)
if o.exists() and not args.force:
parser.print_help()
print ("\nERROR, output osm file '%s' exists and --force was not used." % args.osm_output)
exit(1)
return args
def main():
min_lat = 1000
max_lat = -1000
min_lon = 1000
max_lon = -1000
args = parse_args()
for g in args.gtfs_file:
with zipfile.ZipFile(g) as z:
#print ('z is: %s' % z, file=stderr)
stopsfile = TextIOWrapper(z.open('stops.txt'))
#print ('stops is: %s' % stopsfile, file=stderr)
stops = csv.DictReader(stopsfile)
for stop in stops:
try:
min_lat = min(min_lat, float(stop['stop_lat']))
max_lat = max(max_lat, float(stop['stop_lat']))
min_lon = min(min_lon, float(stop['stop_lon']))
max_lon = max(max_lon, float(stop['stop_lon']))
except e:
pass
if 1000 in (min_lat, min_lon) or -1000 in (max_lat, max_lon):
print('Sorry, bounds not found.')
exit(1)
print('Note: please use caution when intepreting these results near longitude +180/-180!', file=stderr)
print('Bounds are lat: [%s, %s] lon: [%s, %s]' %(min_lat, max_lat, min_lon, max_lon))
if args.buffer_degrees:
min_lat -= args.buffer_degrees
min_lon -= args.buffer_degrees
max_lat += args.buffer_degrees
max_lon += args.buffer_degrees
print('Buffered Bounds are lat: [%s, %s] lon: [%s, %s]' %(min_lat, max_lat, min_lon, max_lon))
# print ('osmconvert -b=%s,%s,%s,%s --complete-ways ' % (min_lon,min_lat,max_lon,max_lat))
if args.osm_input and args.osm_output:
run_arguments = [
'osmconvert',
args.osm_input.name,
'-b=%s,%s,%s,%s' % (min_lon, min_lat, max_lon, max_lat),
'--complete-ways',
'-o=%s' % args.osm_output,
]
# print('Running:', ' '.join(run_arguments), file=stderr)
subprocess.run(run_arguments)
if args.osm_output and args.download_from_overpass:
url = 'https://overpass-api.de/api/map?bbox=%s,%s,%s,%s' % (min_lon,min_lat,max_lon,max_lat)
print('Downloading from the Overpass URL: %s' % url)
run_arguments = [ 'wget', url, '--compression=gzip', '-O', args.osm_output ]
subprocess.run(run_arguments)
if __name__ == '__main__':
main()
| 2.1875 | 2 |
python/eet/pipelines/text_classification.py | SidaZh/EET | 0 | 12773669 | #
# Created by djz on 2022/04/01.
#
import numpy as np
from typing import Dict
from transformers.file_utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import GenericTensor, Pipeline
def sigmoid(_outputs):
return 1.0 / (1.0 + np.exp(-_outputs))
def softmax(_outputs):
maxes = np.max(_outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class ClassificationFunction(ExplicitEnum):
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
NONE = "none"
class TextClassificationPipeline(Pipeline):
return_all_scores = False
function_to_apply = ClassificationFunction.NONE
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, **tokenizer_kwargs):
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if return_all_scores is not None:
postprocess_params["return_all_scores"] = return_all_scores
if isinstance(function_to_apply, str):
function_to_apply = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self, *args, **kwargs):
result = super().__call__(*args, **kwargs)
if isinstance(args[0], str):
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
return_tensors = 'pt'
return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs, function_to_apply=None, return_all_scores=False):
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
function_to_apply = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
function_to_apply = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
function_to_apply = self.model.config.function_to_apply
else:
function_to_apply = ClassificationFunction.NONE
outputs = model_outputs["logits"][0]
outputs = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
scores = sigmoid(outputs)
elif function_to_apply == ClassificationFunction.SOFTMAX:
scores = softmax(outputs)
elif function_to_apply == ClassificationFunction.NONE:
scores = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
if return_all_scores:
return [{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores)]
else:
return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()}
| 2.46875 | 2 |
users/__init__.py | Tech-With-Tim/models | 2 | 12773670 | <reponame>Tech-With-Tim/models
from .user import User
from .token import Token
__all__ = (
User,
Token,
)
| 1.273438 | 1 |
vln/trainer.py | raphael-sch/map2seq_vln | 1 | 12773671 | import torch
from utils import AverageMeter
import time
import math
class OutdoorVlnTrainer:
def __init__(self, opts, agent, optimizer):
self.opts = opts
self.agent = agent
self.optimizer = optimizer
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(self, epoch, train_env, tb_logger=None):
print('Training on {} env ...'.format(train_env.splits[0]))
print('Learning rate: {}'.format(self.optimizer.param_groups[0]['lr']))
self.agent.env = train_env
self.agent.model.train()
self.agent.instr_encoder.train()
self.agent.env.reset_epoch()
losses = AverageMeter()
batch_time = AverageMeter()
end = time.time()
self.train_iters_epoch = math.ceil(len(train_env.data) / self.opts.batch_size)
for iter_ in range(1, self.train_iters_epoch + 1):
loss, _, _ = self.agent.rollout(is_test=False)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), len(self.agent.env.batch))
end = time.time()
if tb_logger and iter_ % 10 == 0:
current_iter = iter_ + (epoch - 1) * self.train_iters_epoch
tb_logger.add_scalar('train/loss_train', loss, current_iter)
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\n'.format(
epoch, iter_, self.train_iters_epoch, batch_time=batch_time,
loss=losses), end='')
if tb_logger:
tb_logger.add_scalar('epoch/learning_rate', self.optimizer.param_groups[0]['lr'], epoch)
tb_logger.add_scalar('epoch/train/loss', losses.avg, epoch)
def eval_(self, epoch, val_env, tb_logger=None):
phase = val_env.env.name
print('Evaluating on {} env ...'.format(phase))
losses = AverageMeter()
batch_time = AverageMeter()
self.agent.env = val_env
self.agent.env.reset_epoch()
self.agent.model.eval()
self.agent.instr_encoder.eval()
val_iters_epoch = math.ceil(len(val_env.data) / self.opts.batch_size)
metrics = [0] * 3 # [TC, SPD, SED]
if self.opts.CLS:
metrics += [0]
if self.opts.DTW:
metrics += [0] * 5
with torch.no_grad():
end = time.time()
for iter_ in range(1, val_iters_epoch + 1):
_, trajs, agent_actions = self.agent.rollout(is_test=True)
#print_actions(agent_actions)
self.agent.env.eva_metrics(trajs, metrics)
batch_time.update(time.time() - end)
end = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
epoch, iter_, val_iters_epoch, batch_time=batch_time))
metrics = [m / len(val_env.data) for m in metrics]
metrics = [m * 100 if m < 1 else m for m in metrics]
if tb_logger:
tb_logger.add_scalar('epoch/{}/TC'.format(phase), metrics[0], epoch)
tb_logger.add_scalar('epoch/{}/SPD'.format(phase), metrics[1], epoch)
tb_logger.add_scalar('epoch/{}/SED'.format(phase), metrics[2], epoch)
d_metrics = dict(TC=metrics[0], SPD=metrics[1], SED=metrics[2])
print("=======[%s] Evaluation Metrics=======" % phase)
print("TC: %.2f, SPD: %.2f, SED: %.2f" % tuple(metrics[:3]), end='')
if self.opts.CLS:
print(', CLS:%.2f' % metrics[3], end='')
d_metrics['CLS'] = metrics[3]
if self.opts.DTW:
print(', DTW:%.2f, nDTW:%.2f, SDTW:%.2f' % tuple(metrics[-3:]))
d_metrics['DTW'] = metrics[-3]
d_metrics['nDTW'] = metrics[-2]
d_metrics['SDTW'] = metrics[-1]
else:
print('')
print("================================")
return d_metrics
| 2.390625 | 2 |
src/epstats/server/res.py | samuelpucek/ep-stats | 12 | 12773672 | <gh_stars>10-100
import pandas as pd
from typing import List
from pydantic import BaseModel, Field
from ..toolkit import Evaluation
from .req import Experiment, Metric, Check
class MetricStat(BaseModel):
"""
Per-variant metric evaluation result.
"""
exp_variant_id: str = Field(title="Variant in the Experiment")
diff: float = Field(
title="Difference",
description="""Relative difference of means of this variant and control variant.
If this is a variant `b` and `a` is the control variant, then `diff = (b.mean - a.mean) / a.mean`.""",
)
mean: float = Field(
title="Metric Mean",
description="""Nominator and denominator to calculate the mean
are given in metric definition. `mean = nominator / denominator`.""",
)
sum_value: float = Field(
title="Metric Value",
description="""Value of the metric, it is given by the
nominator in the metric definition.""",
)
p_value: float = Field(
title="p-Value",
description="""We calculate p-value (under `confidence_level` statistical significance) of the relative
difference (`diff`) of this variant mean and the control variant mean. We use
[2-tailed Welch's test](https://en.wikipedia.org/wiki/Welch%27s_t-test)
with unknown and unequal variance assumption and Welch–Satterthwaite equation approximation of degrees
of freedom.""",
)
confidence_interval: float = Field(
title="Confidence Interval",
description="""Confidence interval for relative difference ('diff`)
of means of this variant and control variant - `[mean - confidence_interval, mean + confidence_interval]`.
Associated confidence level is the next parameter.""",
)
confidence_level: float = Field(
title="Confidence Level (Statistical Significance)",
description="""Confidence level used
to compute (obtain) `confidence_interval`.""",
)
@staticmethod
def from_df(df: pd.DataFrame):
return [
MetricStat(
exp_variant_id=r["exp_variant_id"],
diff=r["diff"],
mean=r["mean"],
sum_value=r["sum_value"],
p_value=r["p_value"],
confidence_interval=r["confidence_interval"],
confidence_level=r["confidence_level"],
)
for i, r in df.iterrows()
]
class MetricResult(BaseModel):
"""
Result of single metric evaluation.
"""
id: int = Field(
title="Metric Id",
description="""Database id of the metric, not used at the moment in ep-stats""",
)
name: str = Field(
title="Metric Name",
description="""Official metric name as it appears in EP.
The name is only for debugging and has no meaning for ep-stats.""",
)
stats: List[MetricStat] = Field(
title="Per-variant statistics",
description="""List with one entry per
variant statistical results.""",
)
@staticmethod
def from_df(metrics: List[Metric], df: pd.DataFrame):
return [MetricResult(id=m.id, name=m.name, stats=MetricStat.from_df(df[df.metric_id == m.id])) for m in metrics]
class CheckStat(BaseModel):
variable_id: str = Field(
title="Check Variable",
description="""Every check can return different
variables and their values. E.g. SRM check returns `test_stat` and `p_value` variables with
their `value`s.""",
)
value: float = Field(
title="Value of the Variable",
description="""Value of some variable returned by
the check. E.g. SRM check returns `test_stat` and `p_value` variables with
their `value`s.""",
)
@staticmethod
def from_df(df: pd.DataFrame):
return [CheckStat(variable_id=r["variable_id"], value=r["value"]) for i, r in df.iterrows()]
class CheckResult(BaseModel):
"""
Result of single check evaluation.
"""
id: int = Field(
title="Check Id",
description="Database id of the check, not used at the moment.",
)
name: str = Field(
title="Check Name",
description="""Official check name as it appears in EP.
The name is only for debugging and has no meaning for ep-stats.""",
)
stats: List[CheckStat] = Field(
title="Per-variant statistics",
description="""List with one entry per
variant statistical results.""",
)
@staticmethod
def from_df(checks: List[Check], df: pd.DataFrame):
return [CheckResult(id=c.id, name=c.name, stats=CheckStat.from_df(df[df.check_id == c.id])) for c in checks]
class ExposureStat(BaseModel):
"""
Exposures in the experiment per-variant.
"""
exp_variant_id: str = Field(title="Variant in the Experiment")
count: int = Field(
title="Per-variant exposures",
description="""Exposure count of experiment (randomization) unit.""",
)
@staticmethod
def from_df(df: pd.DataFrame):
return [ExposureStat(exp_variant_id=r["exp_variant_id"], count=r["exposures"]) for i, r in df.iterrows()]
class ExposureResult(BaseModel):
"""
Exposures in the experiment.
"""
unit_type: str = Field(
title="Experiment/randomization Unit Type",
description="""Experiment (randomization) unit type is
needed to correctly retrieve number of exposures per experiment variant.""",
)
stats: List[ExposureStat] = Field(
title="Experiment Exposures",
description="""List with experiment variant exposure counts per entry.""",
)
@staticmethod
def from_df(experiment: Experiment, df: pd.DataFrame):
return ExposureResult(unit_type=experiment.unit_type, stats=ExposureStat.from_df(df))
class Result(BaseModel):
"""
Result of experiment evaluation.
Top-level element in the response.
"""
id: str = Field(
title="Experiment Id",
)
metrics: List[MetricResult] = Field(
title="Metric Results",
description="""List with one entry per evaluated metric.""",
)
checks: List[CheckResult] = Field(
title="Check Results",
description="""List with one entry per evaluated check.""",
)
exposure: ExposureResult = Field(title="Experiment Exposures")
@staticmethod
def from_evaluation(experiment: Experiment, evaluation: Evaluation):
metrics = MetricResult.from_df(experiment.metrics, evaluation.metrics)
checks = CheckResult.from_df(experiment.checks, evaluation.checks)
exposure = ExposureResult.from_df(experiment, evaluation.exposures)
return Result(id=experiment.id, metrics=metrics, checks=checks, exposure=exposure)
class Config:
schema_extra = {
"example": {
"id": "test-conversion",
"metrics": [
{
"id": 1,
"name": "Click-through Rate",
"stats": [
{
"exp_variant_id": "a",
"diff": 0,
"mean": 0.23809523809523808,
"sum_value": 5,
"p_value": 1,
"confidence_interval": 1.1432928868841614,
"confidence_level": 0.95,
},
{
"exp_variant_id": "b",
"diff": 0.13076923076923078,
"mean": 0.2692307692307692,
"sum_value": 7,
"p_value": 1,
"confidence_interval": 1.2327467657322932,
"confidence_level": 0.95,
},
{
"exp_variant_id": "c",
"diff": 0.26,
"mean": 0.3,
"sum_value": 9,
"p_value": 1,
"confidence_interval": 1.352808784877644,
"confidence_level": 0.95,
},
],
}
],
"checks": [
{
"id": 1,
"name": "SRM",
"stats": [
{"variable_id": "p_value", "value": 0.4528439055646014},
{"variable_id": "test_stat", "value": 1.5844155844155843},
{"variable_id": "confidence_level", "value": 0.999},
],
}
],
"exposure": {
"unit_type": "test_unit_type",
"stats": [
{"exp_variant_id": "a", "count": 21},
{"exp_variant_id": "b", "count": 26},
{"exp_variant_id": "c", "count": 30},
],
},
}
}
| 2.90625 | 3 |
main.py | Vexus178/Hikiri-chan | 0 | 12773673 | <filename>main.py
import inspect
import os, sys
from imgurpython import ImgurClient
import webbrowser
import praw
import random
client_id = 'imgur id'
client_secret = 'imgur secret'
access_token = '<PASSWORD> token'
refresh_token = '<PASSWORD>'
imgur_dictionary = None
with open("count.txt", "r") as coun:
count = coun.read()
coun.close()
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
r = praw.Reddit('Hikiri', user_agent="Hikiri-chan by /u/Vexus178")
def upload(client):
lista = []
filez = None
filez = random.choice(os.listdir("photos"))
with open("was_uploaded.txt", "r") as fi:
lines = fi.readlines()
for line in lines:
lista.append(line.strip("\n"))
fi.close()
if filez in lista:
print "Item was uploaded Before"
return None
else:
with open("was_uploaded.txt", "a") as fi:
fi.write(filez+"\n")
fi.close()
up = client.upload_from_path("photos/%s"% filez)
print "Uploaded to "+up['link']
return up
def submit(imgur_dictionary, r, count):
count = unicode(int(count) + 1)
subreddit = r.subreddit("reddit_api_test")
title = "Daily Photo #"+count
post = subreddit.submit(title, url=imgur_dictionary['link'])
post.reply("This Message is Generated by [Hikiri-chan](https://github.com/Vexus178/Hikiri-chan) written By /u/Vexus178")
with open("count.txt", "w") as coun:
coun.write(count.encode("utf-8"))
coun.close()
return True
if __name__ == '__main__':
while not isinstance(imgur_dictionary, dict):
imgur_dictionary = upload(client)
submit(imgur_dictionary, r, count)
exit()
| 3.140625 | 3 |
src/Server/config/settings/productiondemo.py | tejpratap545/denselight_system | 0 | 12773674 | <gh_stars>0
from .base import * # noqa pylint: disable=unused-import,unused-wildcard-import,wildcard-import
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
import logging
import sentry_sdk
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = os.environ.get(
"SECRET_KEY", "<KEY>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
DATABASES["default"]["ATOMIC_REQUESTS"] = True
INSTALLED_APPS += [
"drf_spectacular",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
]
MIDDLEWARE += [
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
]
#
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
}
]
EMAIL_HOST = "smtp.gmail.com"
EMAIL_PORT = 587
EMAIL_HOST_USER = "<EMAIL>"
EMAIL_HOST_PASSWORD = "<PASSWORD>"
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
CORS_ALLOW_ALL_ORIGINS = True
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"backend.Profile.authentication.JWTAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [],
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
],
"DEFAULT_PARSER_CLASSES": [
"rest_framework.parsers.JSONParser",
"rest_framework.parsers.FormParser",
"rest_framework.parsers.MultiPartParser",
"rest_framework.parsers.FileUploadParser",
],
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
"DEFAULT_FILTER_BACKENDS": (
"drf_spectacular.contrib.django_filters.DjangoFilterBackend",
),
}
SPECTACULAR_SETTINGS = {
"SCHEMA_PATH_PREFIX": r"api",
# available SwaggerUI configuration parameters
# https://swagger.io/docs/open-source-tools/swagger-ui/usage/configuration/
"SWAGGER_UI_SETTINGS": {
"deepLinking": True,
"persistAuthorization": True,
"displayOperationId": True,
"filter": True,
"defaultModelsExpandDepth": 5,
"defaultModelExpandDepth": 5,
},
# General schema metadata. Refer to spec for valid inputs
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#openapi-object
"TITLE": "Denselight API",
"DESCRIPTION": "Api for an Denselight Application build with django and django rest framework",
# Optional: MAY contain "name", "url", "email"
"CONTACT": {
"name": "<NAME>",
"email": "<EMAIL>",
},
"LICENSE": {
"name": "MIT",
"url": "https://github.com/tejpratap545/denselight_system/blob/main/LICENSE",
},
# Optional: MUST contain "name", MAY contain URL
"VERSION": "1.0.0",
# available SwaggerUI versions: https://github.com/swagger-api/swagger-ui/releases
"SWAGGER_UI_DIST": "//unpkg.com/swagger-ui-dist@3.36.0", # default
# "SWAGGER_UI_FAVICON_HREF": STATIC_URL + "shopit.png", # default is swagger favicon
# Oauth2 related settings. used for example by django-oauth2-toolkit.
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#oauth-flows-object
# "OAUTH2_FLOWS": [],
# "OAUTH2_AUTHORIZATION_URL": None,
# "OAUTH2_TOKEN_URL": "http://127.0.0.1:8000/api/auth/token/",
# "OAUTH2_REFRESH_URL": "http://127.0.0.1:8000/api/auth/token/",
# "OAUTH2_SCOPES": None,
}
# Sentry
# ------------------------------------------------------------------------------
# https://docs.sentry.io/platforms/python/guides/django/#Configure
# SECURITY
# ------------------------------------------------------------------------------
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = os.environ.get("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = os.environ.get(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = os.environ.get("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = os.environ.get(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SENTRY_DSN = os.environ.get("SENTRY_DSN")
SENTRY_LOG_LEVEL = os.environ.get("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
CORS_ALLOW_ALL_ORIGINS = True
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
| 1.671875 | 2 |
_import_grammars.py | andsild/dragonfly-modules | 1 | 12773675 | <reponame>andsild/dragonfly-modules<gh_stars>1-10
"""
To load grammars, natlink has a file core\natlinkmain.py
That file uses reflection and many if-statements, making it hard to trace bugs from reloading.
Most importantly, it doesn't load directories.
This files lets you define grammar-rules whereever you want (also outside of this directory :) )
It also adds a reload feature, so we can break out of errors from natlink
(example bug: natlink reloads a file but not the global variables, giving a lot of "None" errors)
The downsides is that:
We can no longer have context-specific grammars (e.g. grammars that are only enabled when chrome is)
(I hope to fix #2 sometime in the future, although PRs are very welcome)
"""
from dragonfly import (Grammar, MappingRule, Function)
import __builtin__
from IPython.lib import deepreload
import sys
import datetime
# see also __init__.py in that grammars directory: you need to define __all__
from grammars import *
from grammars import __all__ as grammarFiles
grammarFiles = map(lambda s: "grammars." + s, grammarFiles)
grammar = Grammar("to rule them all")
def load_module(module_name):
if len(module_name) > 8 and module_name[-8:] == "__init__": return
module = sys.modules[module_name]
try:
print "Loaded module %s ... " % module_name,
deepreload.reload(module, exclude=('sys', 'os.path', 'builtins', '__main__', 'numpy', 'numpy._globals', 'dragonfly', 'natlink', 'os', 'glob', 're', 'subprocess', 'posixpath', 'stat', 'ctypes', 'time', 'win32con', 'dragonfly.actions', 'win32api', 'copy_reg', 'types'))
import_rule = getattr(__import__(module_name, fromlist=["rules"]), "rules")
grammar.add_rule(import_rule)
print "successfully"
except AttributeError as ae:
print "successfully"
except RuntimeError as runtime_error:
"There was an error in file %s" % module_name
print runtime_error, '\n', '\n'
except NameError as nameerror:
"Forgot something in file %s?" % module_name
print nameerror, '\n', '\n'
def reload_grammars():
global grammar
unload()
assert grammar is None
grammar = Grammar("to rule them all")
now = datetime.datetime.now()
print "begun reloading at %02d:%02d" % (now.hour, now.minute)
map(load_module, grammarFiles)
grammar.add_rule(get_reloader_rules()) # for the "reload grammar module" code in get_reloader_rules
grammar.load()
print "reloaded all modules"
def unload():
""" Needs to be here for natlink
"""
global grammar
grammar.unload()
grammar = None
def get_reloader_rules():
return MappingRule(name = 'Reloader rules',
mapping = {
'reload grammar module': Function(reload_grammars),
})
reload_grammars() | 1.992188 | 2 |
m209/tests/test_main.py | gremmie/m209 | 0 | 12773676 | <filename>m209/tests/test_main.py
# Copyright (C) 2013 by <NAME>.
# This file is part of m209, the M-209 simulation.
# m209 is released under the MIT License (see LICENSE.txt).
import os
import tempfile
import unittest
from ..main import main
class KeyGenTestCase(unittest.TestCase):
def setUp(self):
self.fp = tempfile.NamedTemporaryFile(mode='w')
def tearDown(self):
self.fp.close()
def test_overwrite(self):
"""Verify we exit if the key file already exists"""
argv = ['keygen', '--start=YA', '--number=100', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
def test_too_many(self):
"""Verify we exit if we can't generate N key lists if start is too
high
"""
argv = ['keygen', '--start=YA', '--number=100', '-o', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
def test_nominal_random(self):
"""Test we can generate N key lists with random indicators"""
argv = ['keygen', '--number=10', '-o', '-z', self.fp.name]
main(argv)
def test_nominal_start(self):
"""Test we can generate N key lists with a fixed starting indicator"""
argv = ['keygen', '--start=GG', '--number=10', '-o', '-z', self.fp.name]
main(argv)
class EncryptDecryptBadArgsTestCase(unittest.TestCase):
def test_no_key_file(self):
"""Ensure we exit if key file doesn't exist"""
fp = tempfile.NamedTemporaryFile()
name = fp.name
fp.close()
argv = ['encrypt', '--text=TEST', '-z', name]
self.assertRaises(SystemExit, main, argv)
argv = ['decrypt', '--text=TEST', '-z', name]
self.assertRaises(SystemExit, main, argv)
class EncryptDecryptTestCase(unittest.TestCase):
def setUp(self):
self.fp = tempfile.NamedTemporaryFile(mode='w')
argv = ['keygen', '--start=GG', '--number=10', '-o', '-z', self.fp.name]
main(argv)
def tearDown(self):
self.fp.close()
def test_conflicting_sources(self):
"""Ensure -f or -t is supplied but not both"""
argv = ['encrypt', '--text=TEST', '-f', '-', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
argv = ['encrypt', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
argv = ['decrypt', '--text=TEST', '-f', '-', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
argv = ['decrypt', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
def test_encrypt_text(self):
argv = ['encrypt', '--text=TEST', '--key-list-ind=GG', '-z', self.fp.name]
main(argv)
def test_encrypt_text_no_key_list(self):
argv = ['encrypt', '--text=TEST', '--key-list-ind=GA', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
def test_encrypt_file(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write("TEST")
filename = infile.name
infile.close()
argv = ['encrypt', '-f', filename, '--key-list-ind=GG', '-z', self.fp.name]
try:
main(argv)
finally:
os.remove(filename)
def test_decrypt_text(self):
argv = ['decrypt', '-t', 'OOOZS IENGA DSGJX OOOZS IENGA', '-z', self.fp.name]
self.assertRaises(SystemExit, main, argv)
def test_decrypt_text_no_key_list(self):
argv = ['decrypt', '-t', 'OOOZS IENGG DSGJX OOOZS IENGG', '-z', self.fp.name]
main(argv)
def test_decrypt_file(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write("OOOZS IENGG DSGJX OOOZS IENGG")
filename = infile.name
infile.close()
argv = ['decrypt', '-f', filename, '-z', self.fp.name]
try:
main(argv)
finally:
os.remove(filename)
| 2.796875 | 3 |
carebt_kb/carebt_kb/plugin_base.py | adlatusrobotics/carebt_ros2 | 1 | 12773677 | <reponame>adlatusrobotics/carebt_ros2<gh_stars>1-10
# Copyright 2022 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rclpy.node import Node
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from carebt_simple_kb.carebt_simple_kb import KbServer # pragma: no cover
def import_class(name: str) -> str:
components = name.split('.')
mod = __import__(".".join(components[0:-1]), fromlist=components[-1])
clazz = getattr(mod, components[-1])
return clazz
class PluginBase():
def __init__(self, kb_server: 'KbServer', plugin_name: str):
self._kb_server = kb_server
self.on_init_callback(plugin_name)
# PUBLIC
def on_init_callback(self, plugin_name: str):
pass
def on_update_callback(self):
pass | 2.0625 | 2 |
372.Super Pow.py | zyd2001/LeetCode | 0 | 12773678 | class Solution:
def superPow(self, a: int, b: List[int]) -> int:
if (a % 1337 == 0):
return 0
return pow(a, reduce(lambda x, y: x * 10 + y, b), 1337) | 2.9375 | 3 |
python/pts-multi.py | vasaantk/bin | 0 | 12773679 | #! /usr/bin/env python
# Written by <NAME>. Friday, 27 May 2016.
import re
from pylab import *
import sys
from mpldatacursor import datacursor
usrFile = sys.argv[1:]
if len(usrFile) == 0:
print ""
print "# pts-multi.py takes input from multiple COMP.PTS files"
print "# from <NAME>'s pts-diff.f and plots the"
print "# relative positions of the features."
print "# It is a variation of pts-test.py."
print "# The component positions and velocities are flux weighted."
print "# The script is useful for allocating maser emission across"
print "# several epochs to the same feature."
print ""
print "# plot = produces scatterplot."
print "# err = plots flux weighted errorbars."
print "# atate = annotate the spots with their component number."
print "# vatate = annotate the spots with their velocity identifier."
print "# vel = specify velocty range for the colourbar."
print "# scale = scale the size of the datapoints by a factor."
print "# print = print the details of the flux weighted components."
print "# sort = options are: comp, chan, vels, flux"
print "# ref = reference component to fix at origin."
print ""
print "--> pts-multi.py file_name.COMP.PTS plot* vel=xx.x,yy.y atate scale=xx sort=xxxx ref=xx"
print ""
exit()
defaultVels = True # Otherwise usrVelLim
defaultScale = True # Otherwise usrScale
offsetRequest = False # Reposition the centre position
ints = '\s+(\d+)' # 'Channel' variable from *.COMP
floats = '\s+([+-]?\d+.\d+)' # Any float variable from *.COMP
manyFloats = 14*floats # space+floats seq gets repeated this many times after chans
#=====================================================================#
#=====================================================================#
# Setup preliminaries. #
#=====================================================================#
#=====================================================================#
#=====================================================================
# Adaptation of <NAME>'s MATlAB function
#
def wMean(x,W,rms=False):
wmean = sum(multiply(x,W))/sum(W) # element-by-element multiplication
if len(x) == 1:
wrms = 0
else:
x = [(x - wmean)**2 for x in x]
wrms = sqrt(sum(multiply(x,W))/sum(W))
if rms:
return wmean,wrms
else:
return wmean
#=====================================================================
# User requested reference component:
#
for i in usrFile:
compRequest = re.search('ref='+'(\d+)',i)
if compRequest:
relativeComp = int(compRequest.group(1))
offsetRequest = True
#=====================================================================
# Scale the maser spots by a factor of userScale:
#
for i in usrFile:
userScale = re.search('scale='+'([+-]?\d+)',i)
if userScale:
defaultScale = False # Don't use scaleFactor = 1 if user has defined it in usrFile
scaleFactor = int(userScale.group(1))
if defaultScale: # This allows "scale=" to appear anywhere in usrFile
scaleFactor = 1
#=====================================================================
# Determine which are the COMP.PTS files:
#
ptsFiles = []
for i in usrFile:
compPTS = re.search('COMP.PTS',i)
if compPTS:
ptsFiles.append(i)
#=====================================================================
# Find the maximum/minimum velocities from all .COMP.PTS files:
#
vels = []
vTmp = []
velMask = []
for pts in range(len(ptsFiles)):
for line in open(ptsFiles[pts],'r'):
reqInfo = re.search(ints + floats + ints + manyFloats, line)
if reqInfo: # Populate temp arrays, which are reset after each component is harvested
vels.append(float(reqInfo.group(2)))
if line == '\n': # Allow each component to exist as its own list within the complete array
vels.append(vTmp)
vTmp = []
close(ptsFiles[pts])
vels.append(vTmp)
for n in xrange(len(vels)):
if vels[n] != []:
velMask.append(int(n))
vels = [vels[m] for m in velMask]
velsAbsMax = max(vels)
velsAbsMin = min(vels)
#=====================================================================
# Sorting
# http://stackoverflow.com/questions/6618515/sorting-list-based-on-values-from-another-list
sortRequest = 'chan'
for i in usrFile:
usrSort = re.search('sort=(\S+)',i)
if usrSort:
if usrSort.group(1) in ['comp','vels','xoff','yoff','flux','chan']:
sortRequest = str(usrSort.group(1))
usrFile.append('print')
#=====================================================================
# Condition to automatically toggle plotting
#
if 'atate' in usrFile or 'vatate' in usrFile:
usrFile.append('plot')
#=====================================================================#
#=====================================================================#
# End preliminaries. #
#=====================================================================#
#=====================================================================#
#=====================================================================#
#=====================================================================#
# Main script starts here. #
#=====================================================================#
#=====================================================================#
for pts in range(len(ptsFiles)): # Iterate through each of the input files.
#=====================================================================
# Define variables:
#
cTmp = [] # chan temp
vTmp = [] # velo temp
iTmp = [] # flux temp (integrated)
pTmp = [] # peak temp
xTmp = [] # xoff temp
xeTp = [] # xerr temp
yTmp = [] # yoff temp
yeTp = [] # xerr temp
mTmp = [] # Co(m)p temp
chan = []
vels = []
flux = []
peak = []
xoff = []
xerr = []
yoff = []
yerr = []
comp = []
compMask = [] # Subset of arrays which are not blank
velAvg = [] # Average of each component
homoVelTmp = []
homoVel = [] # Homogenised velocity
#=====================================================================
# Harvest values:
#
for line in open(ptsFiles[pts],'r'):
reqInfo = re.search(ints + floats + ints + manyFloats, line)
if reqInfo: # Populate temp arrays, which are reset after each component is harvested
mTmp.append( int(reqInfo.group(1)))
vTmp.append(float(reqInfo.group(2)))
cTmp.append( int(reqInfo.group(3)))
iTmp.append(float(reqInfo.group(4)))
pTmp.append(float(reqInfo.group(5)))
xTmp.append(float(reqInfo.group(8)))
xeTp.append(float(reqInfo.group(9)))
yTmp.append(float(reqInfo.group(10)))
yeTp.append(float(reqInfo.group(11)))
if line == '\n': # This statement allows each component to exist as its own list within the complete array
comp.append(mTmp)
vels.append(vTmp)
chan.append(cTmp)
flux.append(iTmp)
peak.append(pTmp)
xoff.append(xTmp)
xerr.append(xeTp)
yoff.append(yTmp)
yerr.append(yeTp)
mTmp = [] # Reset temp arrays
vTmp = []
cTmp = []
iTmp = []
pTmp = []
xTmp = []
xeTp = []
yTmp = []
yeTp = []
close(ptsFiles[pts])
#=====================================================================
# The final values from *Tmp need to be mannualy added:
#
comp.append(mTmp)
vels.append(vTmp)
chan.append(cTmp)
flux.append(iTmp)
peak.append(pTmp)
xoff.append(xTmp)
xerr.append(xeTp)
yoff.append(yTmp)
yerr.append(yeTp)
#=====================================================================
# Based on 'comp' array, determine the positions of the '\n's:
#
for n in xrange(len(comp)):
if comp[n] != []:
compMask.append(int(n))
#=====================================================================
# Remove the '\n's:
#
comp = [comp[i] for i in compMask]
vels = [vels[i] for i in compMask]
chan = [chan[i] for i in compMask]
flux = [flux[i] for i in compMask]
peak = [peak[i] for i in compMask]
xoff = [xoff[i] for i in compMask]
xerr = [xerr[i] for i in compMask]
yoff = [yoff[i] for i in compMask]
yerr = [yerr[i] for i in compMask]
#=====================================================================
# Determine weighted means:
#
vels = [wMean(vels[i],flux[i]) for i in xrange(len(comp))]
xoff = [wMean(xoff[i],flux[i]) for i in xrange(len(comp))]
xerr = [wMean(xerr[i],flux[i]) for i in xrange(len(comp))]
yoff = [wMean(yoff[i],flux[i]) for i in xrange(len(comp))]
yerr = [wMean(yerr[i],flux[i]) for i in xrange(len(comp))]
# These do not need weighted means, using the element with greatest flux:
comp = [comp[i][0] for i in xrange(len(comp))]
chan = [chan[i][flux[i].index(max(flux[i]))] for i in xrange(len(chan))]
peak = [peak[i][flux[i].index(max(flux[i]))] for i in xrange(len(comp))]
flux = [flux[i][flux[i].index(max(flux[i]))] for i in xrange(len(comp))]
#=====================================================================
# Component uniqueness test.
#
occuranceTest = True
for i in xrange(len(comp)):
occuranceCount = comp.count(comp[i])
if occuranceCount != 1:
if occuranceTest:
print "\t\tWARNING. Component ** %d ** is not unique in %s"%(comp[i],ptsFiles[pts])
occuranceTest = False
#=====================================================================
# Apply offset to obtain relative position.
#
if offsetRequest:
compCountKeep = 0
for i in xrange(len(comp)):
if relativeComp == comp[i]: # If component exists in the .PTS file
compCountKeep = compCountKeep + 1
# Determine position of component of relative spot in the array:
compPosArray = [i for i,x in enumerate(comp) if x == relativeComp]
compPos = compPosArray[0]
# Now compute relative x/y-offsets:
xZero = xoff[compPos]
yZero = yoff[compPos]
xoff = [i-xZero for i in xoff]
yoff = [i-yZero for i in yoff]
#=====================================================================
# Sorting
#
if sortRequest == 'comp':
vels = [x for (y,x) in sorted(zip(comp,vels),key=lambda pair: pair[0])]
xoff = [x for (y,x) in sorted(zip(comp,xoff),key=lambda pair: pair[0])]
xerr = [x for (y,x) in sorted(zip(comp,xerr),key=lambda pair: pair[0])]
yoff = [x for (y,x) in sorted(zip(comp,yoff),key=lambda pair: pair[0])]
yerr = [x for (y,x) in sorted(zip(comp,yerr),key=lambda pair: pair[0])]
chan = [x for (y,x) in sorted(zip(comp,chan),key=lambda pair: pair[0])]
flux = [x for (y,x) in sorted(zip(comp,flux),key=lambda pair: pair[0])]
peak = [x for (y,x) in sorted(zip(comp,peak),key=lambda pair: pair[0])]
comp = sorted(comp)
if sortRequest == 'vels':
comp = [x for (y,x) in sorted(zip(vels,comp),key=lambda pair: pair[0],reverse=True)]
xoff = [x for (y,x) in sorted(zip(vels,xoff),key=lambda pair: pair[0],reverse=True)]
xerr = [x for (y,x) in sorted(zip(vels,xerr),key=lambda pair: pair[0],reverse=True)]
yoff = [x for (y,x) in sorted(zip(vels,yoff),key=lambda pair: pair[0],reverse=True)]
yerr = [x for (y,x) in sorted(zip(vels,yerr),key=lambda pair: pair[0],reverse=True)]
chan = [x for (y,x) in sorted(zip(vels,chan),key=lambda pair: pair[0],reverse=True)]
flux = [x for (y,x) in sorted(zip(vels,flux),key=lambda pair: pair[0],reverse=True)]
peak = [x for (y,x) in sorted(zip(vels,peak),key=lambda pair: pair[0],reverse=True)]
vels = sorted(vels,reverse=True)
if sortRequest == 'xoff':
vels = [x for (y,x) in sorted(zip(xoff,vels),key=lambda pair: pair[0],reverse=True)]
comp = [x for (y,x) in sorted(zip(xoff,comp),key=lambda pair: pair[0],reverse=True)]
xerr = [x for (y,x) in sorted(zip(xoff,xerr),key=lambda pair: pair[0],reverse=True)]
yoff = [x for (y,x) in sorted(zip(xoff,yoff),key=lambda pair: pair[0],reverse=True)]
yerr = [x for (y,x) in sorted(zip(xoff,yerr),key=lambda pair: pair[0],reverse=True)]
chan = [x for (y,x) in sorted(zip(xoff,chan),key=lambda pair: pair[0],reverse=True)]
flux = [x for (y,x) in sorted(zip(xoff,flux),key=lambda pair: pair[0],reverse=True)]
peak = [x for (y,x) in sorted(zip(xoff,peak),key=lambda pair: pair[0],reverse=True)]
xoff = sorted(xoff,reverse=True)
if sortRequest == 'yoff':
vels = [x for (y,x) in sorted(zip(yoff,vels),key=lambda pair: pair[0],reverse=True)]
xoff = [x for (y,x) in sorted(zip(yoff,xoff),key=lambda pair: pair[0],reverse=True)]
xerr = [x for (y,x) in sorted(zip(yoff,xerr),key=lambda pair: pair[0],reverse=True)]
comp = [x for (y,x) in sorted(zip(yoff,comp),key=lambda pair: pair[0],reverse=True)]
yerr = [x for (y,x) in sorted(zip(yoff,yerr),key=lambda pair: pair[0],reverse=True)]
chan = [x for (y,x) in sorted(zip(yoff,chan),key=lambda pair: pair[0],reverse=True)]
flux = [x for (y,x) in sorted(zip(yoff,flux),key=lambda pair: pair[0],reverse=True)]
peak = [x for (y,x) in sorted(zip(yoff,peak),key=lambda pair: pair[0],reverse=True)]
yoff = sorted(yoff,reverse=True)
if sortRequest == 'flux':
vels = [x for (y,x) in sorted(zip(flux,vels),key=lambda pair: pair[0],reverse=True)]
xoff = [x for (y,x) in sorted(zip(flux,xoff),key=lambda pair: pair[0],reverse=True)]
xerr = [x for (y,x) in sorted(zip(flux,xerr),key=lambda pair: pair[0],reverse=True)]
yoff = [x for (y,x) in sorted(zip(flux,yoff),key=lambda pair: pair[0],reverse=True)]
yerr = [x for (y,x) in sorted(zip(flux,yerr),key=lambda pair: pair[0],reverse=True)]
chan = [x for (y,x) in sorted(zip(flux,chan),key=lambda pair: pair[0],reverse=True)]
comp = [x for (y,x) in sorted(zip(flux,comp),key=lambda pair: pair[0],reverse=True)]
peak = [x for (y,x) in sorted(zip(flux,peak),key=lambda pair: pair[0],reverse=True)]
flux = sorted(flux,reverse=True)
if sortRequest == 'chan':
vels = [x for (y,x) in sorted(zip(chan,vels),key=lambda pair: pair[0])]
xoff = [x for (y,x) in sorted(zip(chan,xoff),key=lambda pair: pair[0])]
xerr = [x for (y,x) in sorted(zip(chan,xerr),key=lambda pair: pair[0])]
yoff = [x for (y,x) in sorted(zip(chan,yoff),key=lambda pair: pair[0])]
yerr = [x for (y,x) in sorted(zip(chan,yerr),key=lambda pair: pair[0])]
comp = [x for (y,x) in sorted(zip(chan,comp),key=lambda pair: pair[0])]
flux = [x for (y,x) in sorted(zip(chan,flux),key=lambda pair: pair[0])]
peak = [x for (y,x) in sorted(zip(chan,peak),key=lambda pair: pair[0])]
chan = sorted(chan)
#=====================================================================
# Determine if user has requested for custom vel range:
#
for i in usrFile:
usrVelLim = re.search('vel='+'([+-]?\d+.?\d+),([+-]?\d+.?\d+)',i)
if usrVelLim:
defaultVels = False # Don't use defaultVels if user has defined it in usrFile
velOne = float(usrVelLim.group(1))
velTwo = float(usrVelLim.group(2))
if velOne > velTwo:
velMax = velOne
velMin = velTwo
elif velTwo > velOne:
velMax = velTwo
velMin = velOne
elif velOne == velTwo:
print "User velocities are identical. Reverting to default."
defaultVels = True
if defaultVels: # Default vels are the min/max of the velAvg for each comp.
velMin = velsAbsMin
velMax = velsAbsMax
#=====================================================================
# Each component is assigned a single homogenised vel for all spots,
# instead of each spot having its own individual vel:
#
homoVel = vels
#=====================================================================
# Format to match input .PTS file:
#
if 'print' in usrFile:
print ""
print str(ptsFiles[pts])
for k in xrange(len(chan)):
print '%6d %10.3f %4d %13.5f %13.5f %33.6f %10.7f %14.6f %10.7f'%(
int(comp[k]),float(vels[k]),int(chan[k]),float(flux[k]),
float(peak[k]),float(xoff[k]),float(xerr[k]),float(yoff[k]),
float(yerr[k]))
print ""
#=====================================================================
# Plots spot map of maser emission:
#
if 'plot' in usrFile:
for j in xrange(len(chan)):
if pts == 0: # First marker is a circle....
flux[j] = flux[j] + 1.0 # Add 1 to ensure that components with flux<1 are not negative when log.
scatter( xoff[j],yoff[j],s=scaleFactor*log(flux[j]),c=homoVel[j],cmap=matplotlib.cm.jet,vmin=velMin,vmax=velMax,marker="o")
else: # ... second marker onwards corresponds to number of corners.
flux[j] = flux[j] + 1.0 # Add 1 to ensure that components with flux<1 are not negative when log.
scatter( xoff[j],yoff[j],s=scaleFactor*log(flux[j]),c=homoVel[j],cmap=matplotlib.cm.jet,vmin=velMin,vmax=velMax,marker=(pts+1,1,0))
if 'err' in usrFile:
errorbar(xoff[j],yoff[j],xerr=xerr[j],yerr=yerr[j])
if 'atate' in usrFile:
annotate(comp[j],xy=(xoff[j],yoff[j]))
if 'vatate' in usrFile:
annotate(float("{0:.1f}".format(vels[j])),xy=(xoff[j],yoff[j]))
#=====================================================================#
#=====================================================================#
# The main for-loop stops here. #
#=====================================================================#
#=====================================================================#
#=====================================================================
# Plot title and axes info.
#
if 'plot' in usrFile:
titleName = ''
for i in range(len(ptsFiles)):
titleName = titleName + ptsFiles[i][:-9] + ' ** '
titleName = titleName[:-5] # Remove the trailing "** " for the final title name
gca().invert_xaxis()
title(titleName)
xlabel('x offset')
ylabel('y offset')
cbar = colorbar()
cbar.set_label('Velocity')
datacursor(hover=True)
show()
| 2.15625 | 2 |
mq/credentials2.py | suparek/rabbitlib | 1 | 12773680 | # -*- coding: utf-8 -*
import hmac
import base64
import time
import hashlib
import os
import sys
import pika
class AliyunCredentialsProvider:
"""
Python2.7适用,根据阿里云的 accessKey,accessSecret,instanceId算出amqp连接使用的username和password
instanceId可以从AMQP控制台首页复制
"""
ACCESS_FROM_USER = 0
def __init__(self, access_key, access_secret, instanceId):
self.accessKey = access_key
self.accessSecret = access_secret
self.instanceId = instanceId
def get_username(self):
t = '%i:%s:%s' % (self.ACCESS_FROM_USER, self.instanceId, self.accessKey)
return base64.b64encode(t.encode('utf-8'))
def get_password(self):
ts = str(int(round(time.time() * 1000)))
h = hmac.new(ts.encode('utf-8'), self.accessSecret.encode('utf-8'), hashlib.sha1)
sig = h.hexdigest().upper()
sig_str = "%s:%s" % (sig, ts)
return base64.b64encode(sig_str.encode('utf-8'))
| 2.734375 | 3 |
back/boxtribute_server/models/definitions/size_range.py | flisowna/boxtribute | 0 | 12773681 | <reponame>flisowna/boxtribute
from peewee import CharField, IntegerField
from ...db import db
class SizeRange(db.Model):
label = CharField(null=True)
seq = IntegerField(null=True)
class Meta:
table_name = "sizegroup"
| 2.359375 | 2 |
chat/admin.py | bekzod-fayzikuloff/djChat | 0 | 12773682 | from django.contrib import admin
from . import models
@admin.register(models.Message)
class MessageAdmin(admin.ModelAdmin):
list_filter = ('owner', 'created')
list_display = ('text', 'to_chat')
search_fields = ('owner', 'to_chat')
@admin.register(models.Chat)
class ChatAdmin(admin.ModelAdmin):
pass
@admin.register(models.Member)
class MemberAdmin(admin.ModelAdmin):
pass
| 1.828125 | 2 |
yari/mixins/visibility.py | cr8ivecodesmith/yari | 0 | 12773683 | <reponame>cr8ivecodesmith/yari<filename>yari/mixins/visibility.py
"""
Visibility Mixin
"""
__all__ = ('Visibility',)
from kivy.properties import BooleanProperty
def handle_visibility(this, value):
this.size_hint_x = 1 if this.visible else 0
this.opacity = 1 if this.visible else 0
this.disabled = not this.visible
class Visibility:
visible = BooleanProperty(True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(visible=handle_visibility)
def hide(self): self.visible = False
def show(self): self.visible = True
| 2.234375 | 2 |
lrtc_lib/orchestrator/orchestrator_api.py | MovestaDev/low-resource-text-classification-framework | 57 | 12773684 | # (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import glob
import logging
import os
import traceback
from collections import Counter
from enum import Enum
from typing import Mapping, List, Sequence, Tuple, Set
import lrtc_lib.data_access.data_access_factory as data_access_factory
from lrtc_lib.active_learning.strategies import ActiveLearningStrategy
from lrtc_lib.data_access.core.data_structs import Label, TextElement
from lrtc_lib.data_access.core.utils import get_workspace_labels_dump_filename
from lrtc_lib.definitions import PROJECT_PROPERTIES
from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api
from lrtc_lib.orchestrator.core.state_api.orchestrator_state_api import ModelInfo, ActiveLearningRecommendationsStatus
from lrtc_lib.train_and_infer_service.model_type import ModelType
from lrtc_lib.train_and_infer_service.train_and_infer_api import ModelStatus
from lrtc_lib.training_set_selector import training_set_selector_factory
# constants
MAX_VALUE = 1000000
TRAIN_COUNTS_STR_KEY = "train_counts"
DEV_COUNTS_STR_KEY = "dev_counts"
LABEL_POSITIVE = "true"
LABEL_NEGATIVE = "false"
BINARY_LABELS = frozenset({LABEL_NEGATIVE, LABEL_POSITIVE})
# members
active_learning_strategy = PROJECT_PROPERTIES["active_learning_strategy"]
training_set_selection_strategy = PROJECT_PROPERTIES["training_set_selection"]
active_learner = PROJECT_PROPERTIES["active_learning_factory"].get_active_learner(active_learning_strategy)
data_access = data_access_factory.get_data_access()
train_and_dev_sets_selector = training_set_selector_factory.get_training_set_selector(
selector=training_set_selection_strategy)
def _delete_orphan_labels():
"""
delete labels that are not attached to a known workspace
"""
all_label_dump_files = glob.glob(get_workspace_labels_dump_filename(workspace_id='*', dataset_name='*'))
existing_workspace_ids = [w.workspace_id for w in orchestrator_state_api.get_all_workspaces()]
dump_files_with_parents = [file for wid in existing_workspace_ids for file in
glob.glob(get_workspace_labels_dump_filename(workspace_id=wid, dataset_name='*'))]
for dump_file in all_label_dump_files:
if dump_file not in dump_files_with_parents:
logging.info(f"deleting orphan labels file {dump_file}")
os.remove(dump_file)
_delete_orphan_labels()
def copy_workspace(existing_workspace_id: str, new_workspace_id: str):
"""
Creates a copy of a given workspace with its labels under a new workspace id
:param existing_workspace_id:
:param new_workspace_id:
:return:
"""
workspace = get_workspace(existing_workspace_id)
dataset_name = workspace.dataset_name
dev_dataset_name = workspace.dev_dataset_name
data_access.copy_labels_to_new_workspace(existing_workspace_id, new_workspace_id, dataset_name, dev_dataset_name)
orchestrator_state_api.copy_workspace(existing_workspace_id, new_workspace_id)
return new_workspace_id
def set_training_set_selection_strategy(new_training_set_selection_strategy=None):
"""
Set the logic for selecting training examples from the training dataset.
The default strategy is ALL_LABELED, which means we use all the labeled elements
other strategies enable to add weak labels, for example by using unlabeled elements as weak negative
:return:
"""
global train_and_dev_sets_selector
global training_set_selection_strategy
if new_training_set_selection_strategy is not None:
training_set_selection_strategy = new_training_set_selection_strategy
train_and_dev_sets_selector = training_set_selector_factory.get_training_set_selector(
selector=training_set_selection_strategy)
def set_active_learning_strategy(new_active_learning_strategy=None):
"""
Set active learning policy to use
:param new_active_learning_strategy:
:return:
"""
global active_learner, active_learning_strategy
if new_active_learning_strategy is not None:
active_learning_strategy = new_active_learning_strategy
active_learner = PROJECT_PROPERTIES["active_learning_factory"].get_active_learner(active_learning_strategy)
def create_workspace(workspace_id: str, dataset_name: str, dev_dataset_name: str = None, test_dataset_name: str = None):
"""
create a new workspace
:param workspace_id:
:param dataset_name:
:param dev_dataset_name:
:param test_dataset_name:
"""
orchestrator_state_api.create_workspace(workspace_id, dataset_name, dev_dataset_name, test_dataset_name)
logging.info(f"Creating workspace {workspace_id} using dataset {dataset_name}")
def create_new_category(workspace_id: str, category_name: str, category_description: str,
category_labels: Set[str] = BINARY_LABELS):
"""
declare a new category in the given workspace
:param workspace_id:
:param category_name:
:param category_description:
:param category_labels:
"""
orchestrator_state_api.add_category_to_workspace(workspace_id, category_name, category_description, category_labels)
class DeleteModels(Enum):
ALL = 0
FALSE = 1
ALL_BUT_FIRST_MODEL = 2
def delete_workspace(workspace_id: str, delete_models: DeleteModels = DeleteModels.ALL, ignore_errors=False):
"""
delete a given workspace
:param workspace_id:
:param delete_models: ALL - delete all the models of the workspace, FALSE - do not delete models,
ALL_BUT_FIRST_MODEL - keep the first model of each category
:param ignore_errors:
"""
logging.info(f"deleting workspace {workspace_id} ignore errors {ignore_errors}")
models_to_delete = []
if workspace_exists(workspace_id):
try:
workspace = orchestrator_state_api.get_workspace(workspace_id)
if delete_models != DeleteModels.FALSE:
for category in workspace.category_to_models:
for idx, model_id in enumerate(workspace.category_to_models[category]):
if idx == 0 and delete_models == DeleteModels.ALL_BUT_FIRST_MODEL:
continue
models_to_delete.append(_get_model(workspace_id, model_id))
orchestrator_state_api.delete_workspace_state(workspace_id)
except Exception as e:
logging.error(f"error deleting workspace {workspace_id}")
traceback.print_exc()
if not ignore_errors:
raise e
try:
data_access.clear_saved_labels(workspace_id, workspace.dataset_name)
if workspace.dev_dataset_name:
data_access.clear_saved_labels(workspace_id, workspace.dev_dataset_name)
except Exception as e:
logging.error(f"error clearing saved label for workspace {workspace_id}")
traceback.print_exc()
if not ignore_errors:
raise e
for model in models_to_delete:
model_type = model.model_type
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model_type)
train_and_infer.delete_model(model.model_id)
def edit_category(workspace_id: str, prev_category_name: str, new_category_name: str, new_category_description: str):
raise Exception("Not implemented yet")
def delete_category(workspace_id: str, category_name: str):
raise Exception("Not implemented yet")
def add_documents(dataset_name, docs):
data_access.add_documents(dataset_name=dataset_name, documents=docs)
def query(workspace_id: str, dataset_name: str, category_name: str, query: str,
sample_size: int, unlabeled_only: bool = False, remove_duplicates=False) -> Mapping[str, object]:
"""
query a dataset using the given regex, returning up to *sample_size* elements that meet the query
:param workspace_id:
:param dataset_name:
:param category_name:
:param query: regex string
:param unlabeled_only: if True, filters out labeled elements
:param sample_size: maximum items to return
:param remove_duplicates: if True, remove duplicate elements
:return: a dictionary with two keys: 'results' whose value is a list of TextElements, and 'hit_count' whose
value is the total number of TextElements in the dataset matched by the query.
{'results': [TextElement], 'hit_count': int}
"""
if unlabeled_only:
return data_access.sample_unlabeled_text_elements(workspace_id=workspace_id, dataset_name=dataset_name,
category_name=category_name, sample_size=sample_size,
query=query, remove_duplicates=remove_duplicates)
else:
return data_access.sample_text_elements_with_labels_info(workspace_id=workspace_id, dataset_name=dataset_name,
sample_size=sample_size, query=query,
remove_duplicates=remove_duplicates)
def get_documents(workspace_id: str, dataset_name: str, uris: Sequence[str]) -> List[object]:
"""
:rtype: list of Document
:param workspace_id:
:param dataset_name:
:param uris:
"""
return data_access.get_documents_with_labels_info(workspace_id, dataset_name, uris)
def get_text_elements(workspace_id: str, dataset_name: str, uris: Sequence[str]) -> List[object]:
"""
:param workspace_id:
:param dataset_name:
:param uris:
"""
return data_access.get_text_elements_with_labels_info(workspace_id, dataset_name, uris)
def _update_recommendation(workspace_id, dataset_name, category_name, count, model: ModelInfo = None):
"""
Using the AL strategy, update the workspace with next recommended elements for labeling
:param workspace_id:
:param dataset_name:
:param category_name:
:param count:
:param model: model to use or None to use the latest model in status READY
"""
if model is None:
model = orchestrator_state_api.get_latest_model_by_state(workspace_id, category_name, ModelStatus.READY)
curr_cat_recommendations = orchestrator_state_api.get_current_category_recommendations(workspace_id, category_name,
model.model_id)
num_recommendations = len(curr_cat_recommendations)
if num_recommendations < count:
orchestrator_state_api.update_active_learning_status(workspace_id, category_name, model.model_id,
ActiveLearningRecommendationsStatus.AL_IN_PROGRESS)
new_recommendations = active_learner.get_recommended_items_for_labeling(
workspace_id=workspace_id, model_id=model.model_id, dataset_name=dataset_name, category_name=category_name,
sample_size=count)
orchestrator_state_api.update_category_recommendations(workspace_id=workspace_id, category_name=category_name,
model_id=model.model_id,
recommended_items=new_recommendations)
orchestrator_state_api.update_active_learning_status(workspace_id, category_name, model.model_id,
ActiveLearningRecommendationsStatus.READY)
return model.model_id
def get_model_active_learning_status(workspace_id, model_id):
return orchestrator_state_api.get_active_learning_status(workspace_id, model_id)
def get_elements_to_label(workspace_id: str, category_name: str, count: int) -> Sequence[TextElement]:
"""
returns a list of the top *count* elements recommended for labeling by the AL strategy.
The active learner is invoked only if the requested count of elements have not yet been added to the workspace.
:param workspace_id:
:param category_name:
:param count:
"""
dataset_name = get_workspace(workspace_id).dataset_name
model_id = _update_recommendation(workspace_id, dataset_name, category_name, count)
updated_recommended = \
orchestrator_state_api.get_current_category_recommendations(workspace_id, category_name, model_id)
return updated_recommended
def set_labels(workspace_id: str, labeled_sentences: Sequence[Tuple[str, Mapping[str, Label]]],
propagate_to_duplicates=False):
"""
set labels for URIs.
:param workspace_id:
:param labeled_sentences: Sequence of tuples of URI and a dict in the format of {"category_name":Label},
where Label is an instance of data_structs.Label
:param propagate_to_duplicates: if True, also set the same labels for additional URIs that are duplicates of
the URIs provided.
"""
return_value = data_access.set_labels(workspace_id, labeled_sentences, propagate_to_duplicates)
return return_value
def unset_labels(workspace_id: str, category_name, uris: Sequence[str]):
"""
unset labels of a given category for URIs.
:param workspace_id:
:param category_name:
:param uris:
"""
data_access.unset_labels(workspace_id, category_name, uris)
def _convert_to_dicts_with_numeric_labels(data, category_name, all_category_labels: Set[str]) -> Sequence[Mapping]:
"""
convert textual labels to integers and convert to expected inference input format
:param data:
"""
text_to_number = {label: i for i, label in enumerate(sorted(all_category_labels))}
def get_numeric_value(labels_set):
if len(labels_set) == 1:
return text_to_number[next(iter(labels_set))]
else:
raise ValueError("multilabel is not supported currently")
converted_data = [{"text": element.text,
"label": get_numeric_value(element.category_to_label[category_name].labels)}
for element in data]
return converted_data
def train(workspace_id: str, category_name: str, model_type: ModelType, train_params=None, infer_after_train=True):
"""
train a model for a category in the specified workspace
:param workspace_id:
:param category_name:
:param model_type:
:param train_params:
:param infer_after_train:
:return: model id
"""
workspace = get_workspace(workspace_id)
dataset_name = workspace.dataset_name
(train_data, train_counts), (dev_data, dev_counts) = train_and_dev_sets_selector.get_train_and_dev_sets(
workspace_id=workspace_id, train_dataset_name=dataset_name, category_name=category_name,
dev_dataset_name=workspace.dev_dataset_name)
logging.info(f"training a new model with {train_counts}")
# label_counts != train_counts as train_counts may refer to negative and weak negative labels separately
labels = [element.category_to_label[category_name].labels for element in train_data]
labels = [item for subset in labels for item in subset] # flatten list of sets
label_counts = Counter(labels)
all_category_labels = workspace.category_to_labels[category_name]
labels_not_in_train = [label for label in all_category_labels if label_counts[label] == 0]
if len(labels_not_in_train) > 0:
raise Exception(f"no train examples for labels: {labels_not_in_train}, cannot train a model: {train_counts}")
model_metadata = dict()
model_metadata[TRAIN_COUNTS_STR_KEY] = train_counts
if dev_data is not None:
model_metadata[DEV_COUNTS_STR_KEY] = dev_counts
logging.info(
f"workspace {workspace_id} training a model for category '{category_name}', model_metadata: {model_metadata}")
train_data = _convert_to_dicts_with_numeric_labels(train_data, category_name, all_category_labels)
if dev_data:
dev_data = _convert_to_dicts_with_numeric_labels(dev_data, category_name, all_category_labels)
elements_to_infer = None
if infer_after_train: # add data to be inferred and cached after the training process
test_dataset = data_access.sample_text_elements(workspace.test_dataset_name, MAX_VALUE)['results'] \
if workspace.test_dataset_name is not None else []
all_train_dataset = data_access.sample_text_elements(workspace.dataset_name, MAX_VALUE)['results']
elements_to_infer = [{"text": element.text} for element in test_dataset + all_train_dataset]
params = model_metadata if train_params is None else {**train_params, **model_metadata}
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model_type)
model_id = train_and_infer.train(train_data=train_data, dev_data=dev_data, test_data=elements_to_infer,
train_params=params)
logging.info(f"new model id is {model_id}")
model_status = train_and_infer.get_model_status(model_id)
orchestrator_state_api.add_model(workspace_id=workspace_id, category_name=category_name, model_id=model_id,
model_status=model_status, model_type=model_type, model_metadata=params)
return model_id
def get_model_status(workspace_id: str, model_id: str) -> ModelStatus:
"""
ModelStatus can be TRAINING, READY or ERROR
:param workspace_id:
:param model_id:
:return:
"""
model = _get_model(workspace_id, model_id)
return model.model_status
def get_model_train_counts(workspace_id: str, model_id: str) -> Mapping:
"""
number of elements for each label that were used to train a given model
:param workspace_id:
:param model_id:
:return:
"""
model = _get_model(workspace_id, model_id)
return model.model_metadata[TRAIN_COUNTS_STR_KEY]
def get_all_models_for_category(workspace_id, category_name: str):
"""
:param workspace_id:
:param category_name:
:return: dict from model_id to ModelInfo
"""
workspace = get_workspace(workspace_id)
return workspace.category_to_models.get(category_name, {})
def infer(workspace_id: str, category_name: str, texts_to_infer: Sequence[TextElement], model_id: str = None,
infer_params: dict = None, use_cache: bool = True) -> dict:
"""
get the prediction for a list of TextElements
:param workspace_id:
:param category_name:
:param texts_to_infer: list of TextElements
:param model_id: model_id to use. If set to None, the latest model for the category will be used
:param infer_params: dictionary for additional inference parameters. Default is None
:param use_cache: utilize a cache that stores inference results
:return: a dictionary of inference results, with at least the "labels" key, where the value is a list of string
labels for each element in texts_to_infer. Additional keys, with list values of the same length, can be passed.
e.g. {"labels": ['false', 'true', 'true'],
"scores": [0.23, 0.79, 0.98],
"gradients": [[0.24, -0.39, -0.66, 0.25], [0.14, 0.29, -0.26, 0.16], [-0.46, 0.61, -0.02, 0.23]]}
"""
models = get_all_models_for_category(workspace_id, category_name)
if len(models) == 0:
raise Exception(f"There are no models in workspace {workspace_id} for category {category_name}")
if model_id is None: # use latest
model = orchestrator_state_api.get_latest_model_by_state(workspace_id=workspace_id,
category_name=category_name,
model_status=ModelStatus.READY)
else:
model = _get_model(workspace_id, model_id)
if model.model_status is not ModelStatus.READY:
raise Exception(f"model id {model_id} is not in READY status")
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model.model_type)
list_of_dicts = [{"text": element.text} for element in texts_to_infer]
infer_results = train_and_infer.infer(model_id=model.model_id, items_to_infer=list_of_dicts,
infer_params=infer_params, use_cache=use_cache)
all_labels = get_workspace(workspace_id).category_to_labels[category_name]
numeric_label_to_text = {i: label for i, label in enumerate(sorted(all_labels))}
infer_results['labels'] = [numeric_label_to_text[l] for l in infer_results['labels']]
return infer_results
def infer_by_uris(workspace_id: str, category_name: str, uris_to_infer: Sequence[str], model_id: str = None,
infer_params: dict = None, use_cache: bool = True) -> dict:
"""
get the prediction for a list of URIs
:param workspace_id:
:param category_name:
:param uris_to_infer: list of uris (str)
:param model_id: model_id to use. If set to None, the latest model for the category will be used
:param infer_params: dictionary for additional inference parameters. Default is None
:param use_cache: utilize a cache that stores inference results
:return: a dictionary of inference results, with at least the "labels" key, where the value is a list of string
labels for each element in texts_to_infer. Additional keys, with list values of the same length, can be passed.
e.g. {"labels": ['false', 'true', 'true'],
"scores": [0.23, 0.79, 0.98],
"gradients": [[0.24, -0.39, -0.66, 0.25], [0.14, 0.29, -0.26, 0.16], [-0.46, 0.61, -0.02, 0.23]]}
"""
dataset_name = get_workspace(workspace_id).dataset_name
elements_to_infer = data_access.get_text_elements_with_labels_info(workspace_id, dataset_name, uris_to_infer)
return infer(workspace_id, category_name, elements_to_infer, model_id, infer_params, use_cache)
def get_all_text_elements(dataset_name: str) -> List[TextElement]:
"""
get all the text elements of the given dataset
:param dataset_name:
"""
return data_access.get_all_text_elements(dataset_name=dataset_name)
def get_all_text_elements_uris(dataset_name: str) -> List[str]:
"""
Return a List of all TextElement uris in the given dataset_name.
:param dataset_name: the name of the dataset from which the TextElement uris should be retrieved.
:return: a List of all TextElement uris in the given dataset_name.
"""
return data_access.get_all_text_elements_uris(dataset_name=dataset_name)
def get_all_document_uris(workspace_id):
dataset_name = get_workspace(workspace_id).dataset_name
return data_access.get_all_document_uris(dataset_name)
def get_label_counts(workspace_id: str, dataset_name: str, category_name: str, remove_duplicates=True):
"""
get the number of elements that were labeled.
:param workspace_id:
:param dataset_name:
:param category_name:
:param remove_duplicates: whether to count all labeled elements or only unique instances
:return:
"""
return data_access.get_label_counts(workspace_id, dataset_name, category_name, remove_duplicates=remove_duplicates)
def is_model_compatible_with_active_learning(al: ActiveLearningStrategy, model: ModelType):
"""
return true if active learning strategy is supported by the given model type
for example, ActiveLearningStrategies.CORE_SET and ActiveLearningStrategies.DAL are not supported by Naive Bayes
defined in method get_compatible_models() under lrtc_lib.active_learning.strategies.py
:param al:
:param model:
:return:
"""
return PROJECT_PROPERTIES["models_compatible_with_strategies_func"](model, al)
def delete_model_from_workspace(workspace_id, category_name, model_id):
model_type = _get_model(workspace_id, model_id).model_type
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model_type)
logging.info(f"deleting model id {model_id} from workspace {workspace_id} in category {category_name}")
orchestrator_state_api.delete_model(workspace_id, category_name, model_id)
train_and_infer.delete_model(model_id)
def add_train_param(workspace_id: str, train_param_key: str, train_param_value: str):
raise Exception("Not implemented yet")
def workspace_exists(workspace_id: str) -> bool:
return orchestrator_state_api.workspace_exists(workspace_id)
def get_workspace(workspace_id):
if not workspace_exists(workspace_id):
raise Exception(f"workspace_id '{workspace_id}' doesn't exist")
return orchestrator_state_api.get_workspace(workspace_id)
def _get_model(workspace_id, model_id):
workspace = get_workspace(workspace_id)
all_models = {k: v for d in workspace.category_to_models.values() for k, v in d.items()}
if all_models[model_id]:
return all_models[model_id]
raise Exception(f"model id {model_id} does not exist in workspace {workspace_id}")
| 1.71875 | 2 |
Python/Fundamentals/Introduction/hello_world.py | handtjaxon1/Coding-Dojo-Development | 0 | 12773685 | <reponame>handtjaxon1/Coding-Dojo-Development
print("Hello, world!")
x = "Hello World"
print(x)
y = 42
print(y) | 2.640625 | 3 |
coremodules/files/eudata.py | devinligman/tcam2 | 0 | 12773686 | import os
import coredata.DataManager as dataManager
import coredata.TestController as testController
event_dispatcher = None
def setup():
print("setup?")
def runNumberChanged(event):
if runFileExists() is False:
initRunFile()
def getRunFileLocation():
runNumber = dataManager.runNumber
tcamDir = testController.testDirectory + os.sep + 'TCAM' + os.sep
return tcamDir + 'RUN_' + str(int(runNumber)).zfill(4) + '.EU'
def runFileExists():
print(getRunFileLocation())
return os.path.isfile(getRunFileLocation())
def initRunFile():
file = open(getRunFileLocation(), 'a')
file.write(':Test = 9985\n')
file.write(':Run = 30\n')
file.write('\'' + str(len(dataManager.columns)) + ' channels\n')
file.write('\'')
for column in dataManager.columns:
file.write(column.formatColumName())
# for columnName in dataManager.dataColumns[1:]:
# file.write('{:>6}'.format(columnName))
file.write('\n')
file.close()
def addRow():
if not runFileExists():
initRunFile()
file = open(getRunFileLocation() , 'a')
for column in dataManager.columns:
file.write(column.formatEUValue())
file.write('\n')
file.close()
| 2.5 | 2 |
generators/root/templates/base/ansible/staging.py | thetribeio/generator-project | 19 | 12773687 | <reponame>thetribeio/generator-project<gh_stars>10-100
#!/usr/bin/env python3
"""
Staging inventory script
"""
from inventory import inventory
if __name__ == "__main__":
inventory("staging")
| 1.53125 | 2 |
Project4/code/SplitMerge.py | jyczju/CVprojects | 0 | 12773688 | <reponame>jyczju/CVprojects
'''
图像切割+边缘识别
浙江大学控制学院《数字图像处理与机器视觉》第三次作业
jyczju
2022/4/6 v1.0
'''
import cv2
import numpy as np
import sys
from imgnode import ImgNode
import matplotlib.pyplot as plt
def region_split_merge(img, min_area=(1,1), threshold=5.0):
'''
区域分裂合并算法,主要依靠ImgNode类实现
输入:待处理图像,分裂的最小区域,合并的相似性判断阈值
输出:前后景分割后的二值化图像
'''
draw_img = img.copy() # 用于绘制分裂结果的图像
start_node = ImgNode(img, None, 0, img.shape[0], 0, img.shape[1]) # 创建起始节点,即整幅图像
draw_img = start_node.split(draw_img, min_area) # 区域分裂
leaf_father = start_node.find_leaf_father() # 寻找开始合并的节点
region_img = np.zeros((int(img.shape[0]), int(img.shape[1]))) # 二值化图像初始化
region_img = leaf_father.sub_node3.merge(region_img, threshold) # 区域合并
return region_img,draw_img
def extract_contour(region_img):
'''
轮廓提取,某一像素周边若有背景像素,则认为其为轮廓
输入:二值化图像,目标像素为黑色,背景像素为白色
输出:轮廓图像,轮廓为黑色,背景为白色
'''
contour_img = region_img.copy() # 初始化轮廓图像
for h in range(1,region_img.shape[0]-1):
for w in range(1,region_img.shape[1]-1): # 遍历图像中的每一点
if np.sum(region_img[h-1:h+2, w-1:w+2]) == 0: # 如果该点为黑色且周围全为白色,则认为该点为轮廓,8邻域
# if region_img[h][w] == 0 and region_img[h-1][w] == 0 and region_img[h+1][w] == 0 and region_img[h][w-1] == 0 and region_img[h][w+1] == 0: #4邻域
contour_img[h][w] = 255 # 若像素本身及其周围像素均为黑色,则其为内部点,将其置为白色
return contour_img
def track_contour(img, start_point, all_cnts):
'''
轮廓跟踪
输入:边界图像,当前轮廓起始点,已被跟踪的轮廓点集合
输出:当前轮廓freeman链码
'''
neibor = [(0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1)] # 8连通方向码
dir = 5 # 起始搜索方向
freeman = [start_point] # 用于存储轮廓方向码
current_point = start_point # 将轮廓的开始点设为当前点
neibor_point = tuple(np.array(current_point) + np.array(neibor[dir])) # 通过当前点和邻域点集以及链码值确定邻点
if neibor_point[0] >= img.shape[0] or neibor_point[1] >= img.shape[1] or neibor_point[0] < 0 or neibor_point[1] < 0: # 若邻点超出边界,则轮廓结束
return freeman
while True: # 轮廓扫描循环
# print('current_point',current_point)
while img[neibor_point[0], neibor_point[1]] != 0: # 邻点不是边界点
dir += 1 # 逆时针旋转45度进行搜索
if dir >= 8:
dir -= 8
neibor_point = tuple(np.array(current_point) + np.array(neibor[dir])) # 更新邻点
if neibor_point[0] >= img.shape[0] or neibor_point[1] >= img.shape[1] or neibor_point[0] < 0 or neibor_point[1] < 0: # 若邻点超出边界,则轮廓结束
return freeman
else:
current_point = neibor_point # 将符合条件的邻域点设为当前点进行下一次的边界点搜索
if current_point in all_cnts: # 如果当前点已经在轮廓中,则轮廓结束
return freeman
freeman.append(dir) # 将当前方向码加入轮廓方向码list
if (dir % 2) == 0:
dir += 7
else:
dir += 6
if dir >= 8:
dir -= 8 # 更新方向
neibor_point = tuple(np.array(current_point) + np.array(neibor[dir])) # 更新邻点
if neibor_point[0] >= img.shape[0] or neibor_point[1] >= img.shape[1] or neibor_point[0] < 0 or neibor_point[1] < 0: # 若邻点超出边界,则轮廓结束
return freeman
if current_point == start_point:
break # 当搜索点回到起始点,搜索结束,退出循环
return freeman
def draw_contour(img, contours, color=(0, 0, 255)):
'''
在img上绘制轮廓
输入:欲绘制的图像,轮廓链码,颜色
输出:绘制好的图像
'''
for (x, y) in contours: # 绘制轮廓
img[x-1:x+1, y-1:y+1] = color # 粗
# img_cnt[x,y] = color # 细
return img
def find_start_point(img, all_cnts):
'''
寻找起始点
输入:边界图像,已被识别到的轮廓list
输出:起始点
'''
start_point = (-1, -1) # 初始化起始点
# 寻找起始点
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i, j] == 0 and (i, j) not in all_cnts: # 点为黑色且不在已识别到的轮廓list中
start_point = (i, j) # 找到新的起始点
break
if start_point != (-1, -1):
break
return start_point
def find_cnts(img):
'''
寻找轮廓集合
输入:边界图像
输出:轮廓集合(list,每一项都是一个轮廓链码)
'''
contours = [] # 当前边界轮廓初始化
cnts = [] # 轮廓集合初始化
freemans = [] # 轮廓方向码集合初始化
all_cnts = [] # 所有已找到的轮廓点
while True:
start_point = find_start_point(img, all_cnts) # 寻找当前边界的轮廓起始点
if start_point == (-1, -1): # 若找不到新的起始点,则说明所有的轮廓点都已被找到,退出循环
break
freeman = track_contour(img, start_point, all_cnts) # 寻找当前边界的轮廓
contours = freeman2contour(freeman) # 将轮廓方向码转换为轮廓链码
cnts.append(contours) # 将找到的轮廓加入轮廓集合中
freemans.append(freeman) # 将找到的轮廓方向码加入轮廓方向码集合中
all_cnts = all_cnts + contours # 将找到的轮廓点加入轮廓点集合中
# 去掉短轮廓(干扰轮廓)
fms = []
for fm in freemans:
if len(fm) >= 10:
fms.append(fm)
return fms
def draw_cnts(cntlists, img, color=(0, 0, 255), mode='freeman'):
'''
绘制所有轮廓
输入:轮廓集合,欲绘制的图像,颜色
输出:绘制好的图像
'''
if mode == 'freeman':
for freeman in cntlists:
cnt = freeman2contour(freeman)
img = draw_contour(img, cnt, color) # 逐一绘制每个轮廓
elif mode == 'contour':
for cnt in cntlists:
img = draw_contour(img, cnt, color) # 逐一绘制每个轮廓
return img
def contours_filter(freemans, windows_size = 13):
'''
对轮廓进行滤波(均值滤波)
输入:轮廓集合,滤波窗口大小
输出:滤波后的轮廓集合
'''
if (windows_size % 2) == 0:
windows_size += 1 # 保证windows_size为奇数
cnts_filter = [] # 初始化滤波后的轮廓集合
for freeman in freemans:
cnt = freeman2contour(freeman) # 将轮廓方向码转换为轮廓链码
for i in range(int((windows_size-1)/2), len(cnt)-int((windows_size-1)/2)):
ix = np.mean([cnt[j][0] for j in range(i-int((windows_size-1)/2), i+int((windows_size-1)/2)+1)])
iy = np.mean([cnt[j][1] for j in range(i-int((windows_size-1)/2), i+int((windows_size-1)/2)+1)])
cnt[i] = (int(ix), int(iy)) # 均值滤波
cnts_filter.append(cnt) # 将滤波后的轮廓添加到集合中
return cnts_filter
def freeman2contour(freeman):
'''
轮廓方向码转换为轮廓
输入:轮廓方向码
输出:轮廓
'''
neibor = [(0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1)] # 8连通方向码
cnt = [freeman[0]] # 初始化轮廓
for i in range(1,len(freeman)):
cnt.append(tuple(np.array(cnt[-1]) + np.array(neibor[freeman[i]])))
return cnt
def contour2freeman(cnt):
'''
轮廓转换为轮廓方向码
输入:轮廓
输出:轮廓方向码
'''
neibor = [(0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1)] # 8连通方向码
freeman = [] # 初始化轮廓方向码
for i in range(len(cnt)-1):
freeman.append(neibor.index(tuple(np.array(cnt[i+1]) - np.array(cnt[i]))))
return freeman
if __name__ == '__main__':
sys.setrecursionlimit(100000) # 设置最大允许递归深度
# read_path = 'zju_logo.png' # 设置读取图像的路径
# read_path = 'zjui_logo.png' # 设置读取图像的路径
# read_path = 'zju_logo_gauss.png' # 设置读取图像的路径
# read_path = 'zjui_logo_gauss.png' # 设置读取图像的路径
# read_path = 'zju_logo_uneven.png' # 设置读取图像的路径
read_path = 'zjui_logo_uneven.png' # 设置读取图像的路径
save_path = read_path[:-4]+'_results.png' # 设置保存图像的路径
print('save the result to '+save_path)
img = cv2.imread(read_path, 0) # 读入图像
origin_img = img.copy() # 备份原始图像
# cv2.imshow('origin_img', origin_img)
region_img,draw_img = region_split_merge(img, min_area=(1,1), threshold=5.0) # 5.0 # 区域分裂合并
# cv2.imshow('draw_img', draw_img) # 显示区域分裂结果
cv2.imwrite('draw_img.png', draw_img)
# cv2.imshow('region_img', region_img) # 显示区域合并结果
region_img[0:20, 0:450] = 255 # 将区域图像中的一部分置为白色
region_img[275:300, 0:450] = 255 # 将区域图像中的一部分置为白色
region_img[0:300, 445:450] = 255 # 将区域图像中的一部分置为白色
cv2.imwrite('region_img.png', region_img)
contour_img = extract_contour(region_img) # 轮廓提取
# cv2.imshow('contour_img', contour_img) # 显示轮廓图像
cv2.imwrite('contour_img.png', contour_img)
freemans = find_cnts(contour_img) # 轮廓跟踪
print('freemans:')
print(freemans)
# img_cnt = cv2.cvtColor(origin_img, cv2.COLOR_GRAY2BGR)
img_cnt = 255*np.ones([img.shape[0], img.shape[1], 3])
img_cnt = draw_cnts(freemans, img_cnt, color = (0, 0, 255), mode='freeman') # 绘制轮廓跟踪结果
# cv2.imshow('img_cnt', img_cnt)
cv2.imwrite('img_cnt.png', img_cnt)
cnts_filter = contours_filter(freemans, windows_size = 11) # 轮廓链码滤波
# img_cnt_filter = cv2.cvtColor(origin_img, cv2.COLOR_GRAY2BGR)
img_cnt_filter = 255*np.ones([img.shape[0], img.shape[1], 3])
img_cnt_filter = draw_cnts(cnts_filter, img_cnt_filter, color=(255, 0, 0), mode='contour') # 绘制轮廓链码滤波结果
# cv2.imshow('img_cnt_filter', img_cnt_filter)
cv2.imwrite('img_cnt_filter.png', img_cnt_filter)
plt.figure(figsize=(9, 9.5))
title_size = 12
plt.subplot(321)
plt.axis('off')
plt.imshow(origin_img,cmap='gray')
plt.title("Figure 1: Original image",fontdict={'weight':'normal','size': title_size})
plt.subplot(322)
plt.axis('off')
plt.imshow(draw_img,cmap='gray')
plt.title("Figure 2: Splited image",fontdict={'weight':'normal','size': title_size})
plt.subplot(323)
plt.axis('off')
plt.imshow(region_img,cmap='gray')
plt.title("Figure 3: Merged image",fontdict={'weight':'normal','size': title_size})
plt.subplot(324)
plt.axis('off')
plt.imshow(contour_img,cmap='gray')
plt.title("Figure 4: Contours",fontdict={'weight':'normal','size': title_size})
plt.subplot(325)
plt.axis('off')
plt.imshow(cv2.cvtColor(img_cnt.astype(np.float32),cv2.COLOR_BGR2RGB))
plt.title("Figure 5: Contours tracked by ChainCode",fontdict={'weight':'normal','size': title_size})
plt.subplot(326)
plt.axis('off')
plt.imshow(cv2.cvtColor(img_cnt_filter.astype(np.float32),cv2.COLOR_BGR2RGB))
plt.title("Figure 6: Filtered Contours",fontdict={'weight':'normal','size': title_size})
plt.savefig(save_path, bbox_inches='tight')
plt.show()
cv2.waitKey(0)
| 2.515625 | 3 |
src/misc.py | Dorijan-Cirkveni/AutoSolvingMatrix | 0 | 12773689 | def StringToDict(s):
RES = dict()
for e in set(s):
RES[e] = 0
for e in s:
RES[e] += 1
return RES
| 3.46875 | 3 |
webapp/creators/corrections.py | PASTAplus/umbra | 0 | 12773690 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Mod: corrections
:Synopsis:
Read in the various corrections files in XML format:
NICKNAMES_FILE = 'corrections_nicknames.xml'
ORCID_CORRECTIONS_FILE = 'corrections_orcids.xml'
ORGANIZATIONS_FILE = 'organizations.xml'
OVERRIDES_FILE = 'corrections_overrides.xml'
PERSON_VARIANTS_FILE = 'corrections_name_variants.xml'
:Author:
ide
:Created:
6/1/21
"""
from lxml import etree
from webapp.config import Config
class Override:
def __init__(self, original_surname, original_givenname, surname, givenname, scope, _original_surname_raw=None):
self._original_surname = original_surname
self._original_givenname = original_givenname
self._surname = surname
self._givenname = givenname
self._scope = scope
self._original_surname_raw = _original_surname_raw
def __repr__(self):
return f'{self.original_surname}, {self.original_givenname} -> {self.surname}, {self.givenname} - {self.scope}'
@property
def original_surname(self):
return self._original_surname
@property
def original_givenname(self):
return self._original_givenname
@property
def surname(self):
return self._surname
@property
def givenname(self):
return self._givenname
@property
def scope(self):
return self._scope
@property
def original_surname_raw(self):
return self._original_surname_raw
class PersonVariant:
def __init__(self, surname, givenname, scope):
self._surname = surname
self._givenname = givenname
self._scope = scope
def __repr__(self):
return f'{self.surname}, {self.givenname} - {self.scope}'
@property
def surname(self):
return self._surname
@property
def givenname(self):
return self._givenname
@property
def scope(self):
return self._scope
def __hash__(self):
return hash((self.surname, self.givenname, self.scope))
def __eq__(self, other):
return (self.surname, self.givenname, self.scope) == (other.surname, other.givenname, other.scope)
class Person:
def __init__(self, variants, comment):
self._variants = variants
self._comment = comment
def __repr__(self):
return f'{self.variants} - {self.comment if self.comment else ""}'
@property
def variants(self):
return self._variants
@property
def comment(self):
return self._comment
class ORCIDCorrection:
def __init__(self, type, surname, givenname, orcid):
self._type = type
self._surname = surname
self._givenname = givenname
self._orcid = orcid
def __repr__(self):
return f'{self.surname}, {self.givenname} - {self.orcid} ({self.type})'
@property
def surname(self):
return self._surname
@property
def givenname(self):
return self._givenname
@property
def orcid(self):
return self._orcid
@property
def type(self):
return self._type
def init_person_variants():
with open(f'{Config.DATA_FILES_PATH}/{Config.PERSON_VARIANTS_FILE}', 'r') as variants_file:
xml = variants_file.read()
persons = []
root = etree.fromstring(xml.encode("utf-8"))
person_elements = root.findall('person')
for person_element in person_elements:
variants = set()
variant_elements = person_element.findall('variant')
for variant_element in variant_elements:
surname = variant_element.find('surname').text
givenname = variant_element.find('givenname').text
scope = variant_element.find('scope').text
variant = PersonVariant(surname, givenname, scope)
variants.add(variant)
comment_element = person_element.find('comment')
if comment_element is not None:
comment = comment_element.text
else:
comment = None
person = Person(variants, comment)
persons.append(person)
return persons
def init_nicknames():
with open(f'{Config.DATA_FILES_PATH}/{Config.NICKNAMES_FILE}', 'r') as nicknames_file:
xml = nicknames_file.read()
nicknames = []
root = etree.fromstring(xml.encode("utf-8"))
nickname_elements = root.findall('nickname')
for nickname_element in nickname_elements:
name1 = nickname_element.find('name1').text
name2 = nickname_element.find('name2').text
nicknames.append((name1, name2))
return nicknames
def init_override_corrections():
with open(f'{Config.DATA_FILES_PATH}/{Config.OVERRIDES_FILE}', 'r') as overrides_file:
xml = overrides_file.read()
override_corrections = []
root = etree.fromstring(xml.encode("utf-8"))
override_correction_elements = root.findall('override')
for override_correction_element in override_correction_elements:
original_element = override_correction_element.find('original')
original_surname = original_element.find('surname').text
original_givenname = original_element.find('givenname').text
corrected_element = override_correction_element.find('corrected')
surname = corrected_element.find('surname').text
givenname = corrected_element.find('givenname').text
scope = override_correction_element.find('scope').text
original_surname_raw = original_element.find('surname_raw')
if original_surname_raw is not None:
original_surname_raw = original_surname_raw.text
override_correction = Override(original_surname, original_givenname, surname, givenname, scope, original_surname_raw)
override_corrections.append(override_correction)
return override_corrections
def init_orcid_corrections():
with open(f'{Config.DATA_FILES_PATH}/{Config.ORCID_CORRECTIONS_FILE}', 'r') as orcids_file:
xml = orcids_file.read()
orcid_corrections = []
root = etree.fromstring(xml.encode("utf-8"))
orcid_correction_elements = root.findall('correction')
for orcid_correction_element in orcid_correction_elements:
surname = orcid_correction_element.find('surname').text
givenname = orcid_correction_element.find('givenname').text
orcid = orcid_correction_element.find('orcid').text
orcid_correction = ORCIDCorrection('correction', surname, givenname, orcid)
orcid_corrections.append(orcid_correction)
orcid_correction_elements = root.findall('stipulation')
for orcid_correction_element in orcid_correction_elements:
surname = orcid_correction_element.find('surname').text
givenname = orcid_correction_element.find('givenname').text
orcid = orcid_correction_element.find('orcid').text
orcid_correction = ORCIDCorrection('stipulation', surname, givenname, orcid)
orcid_corrections.append(orcid_correction)
return orcid_corrections
if __name__ == '__main__':
persons = init_person_variants()
for person in persons:
print(person)
print()
orcid_corrections = init_orcid_corrections()
for orcid_correction in orcid_corrections:
print(orcid_correction)
print()
override_corrections = init_override_corrections()
for override_correction in override_corrections:
print(override_correction) | 2.296875 | 2 |
Zane/cogs/dbots.py | crrapi/Zane | 0 | 12773691 | <filename>Zane/cogs/dbots.py
import os
import dbl
import asyncio
class DiscordBotsOrgAPI:
def __init__(self, bot):
self.bot = bot
self.token = os.environ['DBL_TOKEN']
self.dbl_py = dbl.Client(self.bot, self.token)
self.bot.loop.create_task(self._update_stats())
async def _update_stats(self):
while True:
try:
await self.dbl_py.post_server_count(shard_count=len(self.bot.shards))
except Exception:
pass
await asyncio.sleep(1800)
def setup(bot):
bot.add_cog(DiscordBotsOrgAPI(bot))
| 2.40625 | 2 |
fragile/core/base_classes.py | Guillemdb/fragile | 35 | 12773692 | <reponame>Guillemdb/fragile<filename>fragile/core/base_classes.py
from typing import Callable, Dict, List, Tuple, Union
import judo
from judo import random_state
from fragile.core.states import OneWalker, States, StatesEnv, StatesModel, StatesWalkers
from fragile.core.typing import StateDict, Tensor
class StatesOwner:
"""
Every class that stores its data in :class:`States` must inherit \
from this class.
"""
random_state = random_state
STATE_CLASS = States
@classmethod
def seed(cls, seed: int = None):
"""Set the random seed of the random number generator."""
seed = random_state.seed() if seed is None else seed
cls.random_state.seed(seed)
@classmethod
def get_params_dict(cls) -> StateDict:
"""
Return an state_dict to be used for instantiating an States class.
In order to define the tensors, a state_dict dictionary needs to be specified \
using the following structure::
import numpy as numpy
state_dict = {"name_1": {"size": tuple([1]),
"dtype": numpy.float32,
},
}
Where tuple is a tuple indicating the shape of the desired tensor, that \
will be accessed using the name_1 attribute of the class.
"""
raise NotImplementedError
def create_new_states(self, batch_size: int) -> "StatesOwner.STATE_CLASS":
"""Create new states of given batch_size to store the data of the class."""
param_dict = self.get_params_dict()
return self.STATE_CLASS(state_dict=param_dict, batch_size=batch_size)
def states_from_data(self, batch_size: int, **kwargs) -> States:
"""
Initialize a :class:`States` with the data provided as kwargs.
Args:
batch_size: Number of elements in the first dimension of the \
:class:`State` attributes.
**kwargs: Attributes that will be added to the returned :class:`States`.
Returns:
A new :class:`States` created with the class ``params_dict`` updated \
with the attributes passed as keyword arguments.
"""
state = self.create_new_states(batch_size=batch_size)
state.update(**kwargs)
return state
class BaseCritic(StatesOwner):
"""
Perform additional computation. It can be used in a :class:`Walkers` \
or a :class:`Model`.
"""
random_state = random_state
@classmethod
def get_params_dict(cls) -> StateDict:
"""
Return an state_dict to be used for instantiating an States class.
In order to define the tensors, a state_dict dictionary needs to be specified \
using the following structure::
import numpy as numpy
state_dict = {"name_1": {"size": tuple([1]),
"dtype": numpy.float32,
},
}
Where tuple is a tuple indicating the shape of the desired tensor, that \
will be accessed using the name_1 attribute of the class.
"""
state_dict = {
"critic_score": {"size": tuple([1]), "dtype": judo.float32},
}
return state_dict
def calculate(
self,
batch_size: int = None,
model_states: StatesModel = None,
env_states: StatesEnv = None,
walkers_states: StatesWalkers = None,
) -> States:
"""
Calculate the target time step values.
Args:
batch_size: Number of new points to the sampled.
model_states: :class:`StatesModel` corresponding to the :class:`Model` data.
env_states: :class:`StatesEnv` corresponding to the :class:`Environment` data.
walkers_states: :class:`StatesWalkers` corresponding to the :class:`Walkers` data.
Returns:
States containing the the internal state of the :class:`BaseCritic`
"""
raise NotImplementedError
def reset(
self,
batch_size: int = 1,
model_states: StatesModel = None,
env_states: StatesEnv = None,
walkers_states: StatesWalkers = None,
*args,
**kwargs
) -> Union[States, None]:
"""
Restart the `Critic` and reset its internal state.
Args:
batch_size: Number of elements in the first dimension of the model \
States data.
model_states: States corresponding to model data. If provided the \
model will be reset to this state.
env_states: :class:`StatesEnv` corresponding to the :class:`Environment` data.
walkers_states: :class:`StatesWalkers` corresponding to the :class:`Walkers` data.
args: Additional arguments not related to :class:`BaseCritic` data.
kwargs: Additional keyword arguments not related to :class:`BaseCritic` data.
Returns:
States containing the information of the current state of the \
:class:`BaseCritic` (after the reset).
"""
pass
def update(
self,
batch_size: int = 1,
model_states: StatesModel = None,
env_states: StatesEnv = None,
walkers_states: StatesWalkers = None,
*args,
**kwargs
) -> Union[States, None]:
"""
Update the :class:`BaseCritic` internal state.
Args:
batch_size: Number of elements in the first dimension of the model \
States data.
model_states: States corresponding to model data. If provided the \
model will be reset to this state.
env_states: :class:`StatesEnv` corresponding to the :class:`Environment` data.
walkers_states: :class:`StatesWalkers` corresponding to the :class:`Walkers` data.
args: Additional arguments not related to :class:`BaseCritic` data.
kwargs: Additional keyword arguments not related to :class:`BaseCritic` data.
Returns:
States containing the information of the current state of the \
:class:`BaseCritic`.
"""
pass
class BaseEnvironment(StatesOwner):
"""
The Environment is in charge of stepping the walkers, acting as an state \
transition function.
For every different problem a new Environment needs to be implemented \
following the :class:`BaseEnvironment` interface.
"""
STATE_CLASS = StatesEnv
def __call__(self, *args, **kwargs) -> "BaseEnvironment":
"""
Return the current instance of :class:`BaseEnvironment`.
This is used to avoid defining a ``environment_callable `` as \
``lambda: environment_instance`` when initializing a :class:`Swarm`. If the \
:class:`Environment` needs is passed to a remote process, you may need \
to write custom serialization for it, or resort to creating an appropriate \
``environment_callable``.
"""
return self
def step(self, model_states: StatesModel, env_states: StatesEnv) -> StatesEnv:
"""
Step the environment for a batch of walkers.
Args:
model_states: :class:`StatesModel` representing the data to be used \
to act on the environment.
env_states: :class:`StatesEnv` representing the data to be set in \
the environment.
Returns:
States representing the next state of the environment and all \
the needed information.
"""
transition_data = self.states_to_data(model_states=model_states, env_states=env_states)
if not isinstance(transition_data, (dict, tuple)):
raise ValueError(
"The returned values from states_to_data need to "
"be an instance of dict or tuple. "
"Got %s instead" % type(transition_data)
)
new_data = (
self.make_transitions(*transition_data)
if isinstance(transition_data, tuple)
else self.make_transitions(**transition_data)
)
new_env_state = self.states_from_data(len(env_states), **new_data)
return new_env_state
def states_from_data(self, batch_size: int, **kwargs) -> StatesEnv:
"""
Initialize a :class:`StatesEnv` with the data provided as kwargs.
Args:
batch_size: Number of elements in the first dimension of the \
:class:`State` attributes.
**kwargs: Attributes that will be added to the returned :class:`States`.
Returns:
A new :class:`StatesEmv` created with the ``params_dict``, and \
updated with the attributes passed as keyword arguments.
"""
return super(BaseEnvironment, self).states_from_data(batch_size=batch_size, **kwargs)
def states_to_data(
self, model_states: StatesModel, env_states: StatesEnv
) -> Union[Dict[str, Tensor], Tuple[Tensor, ...]]:
"""
Extract the data from the :class:`StatesEnv` and the :class:`StatesModel` \
and return the values that will be passed to ``make_transitions``.
Args:
model_states: :class:`StatesModel` representing the data to be used \
to act on the environment.
env_states: :class:`StatesEnv` representing the data to be set in \
the environment.
Returns:
Tuple of arrays or dictionary of arrays. If the returned value is a \
tuple it will be passed as *args to ``make_transitions``. If the returned \
value is a dictionary it will be passed as **kwargs to ``make_transitions``.
"""
raise NotImplementedError
def make_transitions(self, *args, **kwargs) -> Dict[str, Tensor]:
"""
Return the data corresponding to the new state of the environment after \
using the input data to make the corresponding state transition.
Args:
*args: List of arguments passed if the returned value from the \
``states_to_data`` function of the class was a tuple.
**kwargs: Keyword arguments passed if the returned value from the \
``states_to_data`` function of the class was a dictionary.
Returns:
Dictionary containing the data representing the state of the environment \
after the state transition. The keys of the dictionary are the names of \
the data attributes and its values are arrays representing a batch of \
new values for that attribute.
The :class:`StatesEnv` returned by ``step`` will contain the returned \
data.
"""
raise NotImplementedError
def get_params_dict(self) -> StateDict:
"""
Return an state_dict to be used for instantiating the states containing \
the data describing the Environment.
In order to define the arrays, a state_dict dictionary needs to be specified \
using the following structure::
import numpy as numpy
# Example of an state_dict for planning.
state_dict = {
"states": {"size": self._env.get_state().shape, "dtype": numpy.int64},
"observs": {"size": self._env.observation_space.shape, "dtype": numpy.float32},
"rewards": {"dtype": numpy.float32},
"oobs": {"dtype": numpy.bool_},
"terminals": {"dtype": numpy.bool_},
}
"""
raise NotImplementedError
def reset(self, batch_size: int = 1, env_states: StatesEnv = None, **kwargs) -> StatesEnv:
"""
Reset the environment and return an States class with batch_size copies \
of the initial state.
Args:
batch_size: Number of walkers that the resulting state will have.
env_states: States class used to set the environment to an arbitrary \
state.
kwargs: Additional keyword arguments not related to environment data.
Returns:
States class containing the information of the environment after the \
reset.
"""
raise NotImplementedError
class BaseModel(StatesOwner):
"""
The model is in charge of calculating how the walkers will act on the \
Environment, effectively working as a policy.
"""
STATE_CLASS = StatesModel
def __call__(self, *args, **kwargs) -> "BaseModel":
"""
Return the current instance of :class:`BaseModel`.
This is used to avoid defining a ``model_callable `` as \
``lambda: model_instance`` when initializing a :class:`Swarm`. If a \
:class:`Model` is passed to a remote process, you may need to write custom \
serialization for it, or resort to creating an appropriate \
``model_callable``.
"""
return self
def get_params_dict(self) -> StateDict:
"""
Return an state_dict to be used for instantiating the states containing \
the data describing the Model.
In order to define the arrays, a state_dict dictionary needs to be \
specified using the following structure::
import numpy as numpy
# Example of an state_dict for a DiscreteUniform Model.
n_actions = 10
state_dict = {"actions": {"size": (n_actions,),
"dtype": numpy.float32,
},
"critic": {"size": tuple([n_actions]),
"dtype": numpy.float32,
},
}
Where size is a tuple indicating the shape of the desired tensor, \
that will be accessed using the actions attribute of the class.
"""
raise NotImplementedError
def reset(
self, batch_size: int = 1, model_states: StatesModel = None, *args, **kwargs
) -> StatesModel:
"""
Restart the model and reset its internal state.
Args:
batch_size: Number of elements in the first dimension of the model \
States data.
model_states: States corresponding to model data. If provided the \
model will be reset to this state.
args: Additional arguments not related to model data.
kwargs: Additional keyword arguments not related to model data.
Returns:
States containing the information of the current state of the \
model (after the reset).
"""
raise NotImplementedError
def predict(
self,
batch_size: int = None,
model_states: StatesModel = None,
env_states: StatesEnv = None,
walkers_states: StatesWalkers = None,
) -> StatesModel:
"""
Calculate States containing the data needed to interact with the environment.
Args:
batch_size: Number of new points to the sampled.
model_states: States corresponding to the model data.
env_states: States corresponding to the environment data.
walkers_states: States corresponding to the walkers data.
Returns:
Updated model_states with new model data.
"""
raise NotImplementedError
class BaseWalkers(StatesOwner):
"""
The Walkers is a data structure that takes care of all the data involved \
in making a Swarm evolve.
"""
random_state = random_state
STATE_CLASS = StatesWalkers
def __init__(
self,
n_walkers: int,
env_state_params: dict,
model_state_params: dict,
accumulate_rewards: bool = True,
max_epochs: int = None,
):
"""
Initialize a `BaseWalkers`.
Args:
n_walkers: Number of walkers. This is the number of states that will\
be iterated in parallel.
env_state_params: Dictionary to instantiate the :class:`StatesEnv`\
of an :class:`Environment`.
model_state_params: Dictionary to instantiate the :class:`StatesModel`\
of a :class:`Model`.
accumulate_rewards: If `True` accumulate the rewards after each step
of the environment.
max_epochs: Maximum number of iterations that the walkers are allowed \
to perform.
"""
super(BaseWalkers, self).__init__()
self.max_epochs = max_epochs if max_epochs is not None else int(1e12)
self._epoch = 0
self.n_walkers = n_walkers
self.id_walkers = None
self.death_cond = None
self._accumulate_rewards = accumulate_rewards
self.env_states_params = env_state_params
self.model_states_params = model_state_params
def __call__(self, *args, **kwargs) -> "BaseWalkers":
"""
Return the current instance of :class:`BaseWalkers`.
This is used to avoid defining a ``walkers_callable `` as \
``lambda: walkers_instance`` when initializing a :class:`Swarm`. If the \
:class:`Walkers` needs is passed to a remote process, you may need \
to write custom serialization for it, or resort to creating an appropriate \
``walkers_callable``.
"""
return self
def __len__(self) -> int:
"""Return length is the number of walkers."""
return self.n
@property
def n(self) -> int:
"""Return the number of walkers."""
return self.n_walkers
@property
def epoch(self) -> int:
"""Return the current epoch of the algorithm."""
return self._epoch
@property
def env_states(self) -> StatesEnv:
"""Return the States class where all the environment information is stored."""
raise NotImplementedError
@property
def model_states(self) -> StatesModel:
"""Return the States class where all the model information is stored."""
raise NotImplementedError
@property
def states(self) -> StatesWalkers:
"""Return the States class where all the model information is stored."""
raise NotImplementedError
def increment_epoch(self):
"""Increment the current epoch counter."""
self._epoch += 1
def get_params_dict(self) -> StateDict:
"""Return the params_dict of the internal StateOwners."""
state_dict = {
name: getattr(self, name).get_params_dict()
for name in {"states", "env_states", "model_states"}
}
return state_dict
def update_states(
self, env_states: StatesEnv = None, model_states: StatesModel = None, **kwargs
) -> None:
"""
Update the States variables that do not contain internal data and \
accumulate the rewards in the internal states if applicable.
Args:
env_states: States containing the data associated with the Environment.
model_states: States containing data associated with the Environment.
**kwargs: Internal states will be updated via keyword arguments.
"""
raise NotImplementedError
def reset(
self,
env_states: StatesEnv = None,
model_states: StatesModel = None,
walkers_states: StatesWalkers = None,
):
"""
Reset a :class:`Walkers` and clear the internal data to start a \
new search process.
Restart all the variables needed to perform the fractal evolution process.
Args:
model_states: :class:`StatesModel` that define the initial state of the environment.
env_states: :class:`StatesEnv` that define the initial state of the model.
walkers_states: :class:`StatesWalkers` that define the internal states of the walkers.
"""
raise NotImplementedError
def balance(self):
"""Perform FAI iteration to clone the states."""
raise NotImplementedError
def calculate_distances(self):
"""Calculate the distances between the different observations of the walkers."""
raise NotImplementedError
def calculate_virtual_reward(self):
"""Apply the virtual reward formula to account for all the different goal scores."""
raise NotImplementedError
def calculate_end_condition(self) -> bool:
"""Return a boolean that controls the stopping of the iteration loop. \
If True, the iteration process stops."""
raise NotImplementedError
def clone_walkers(self):
"""Sample the clone probability distribution and clone the walkers accordingly."""
raise NotImplementedError
def get_in_bounds_compas(self) -> Tensor:
"""
Return an array of indexes corresponding to an alive walker chosen \
at random.
"""
raise NotImplementedError
class BaseSwarm:
"""
The Swarm implements the iteration logic to make the :class:`Walkers` evolve.
It contains the necessary logic to use an Environment, a Model, and a \
Walkers instance to create the algorithm execution loop.
"""
def __init__(
self,
env: Callable[[], BaseEnvironment],
model: Callable[[BaseEnvironment], BaseModel],
walkers: Callable[..., BaseWalkers],
n_walkers: int,
reward_scale: float = 1.0,
distance_scale: float = 1.0,
*args,
**kwargs
):
"""
Initialize a :class:`BaseSwarm`.
Args:
env: A callable that returns an instance of an Environment.
model: A callable that returns an instance of a Model.
walkers: A callable that returns an instance of BaseWalkers.
n_walkers: Number of walkers of the swarm.
reward_scale: Virtual reward exponent for the reward score.
distance_scale:Virtual reward exponent for the distance score.
*args: Additional args passed to init_swarm.
**kwargs: Additional kwargs passed to init_swarm.
"""
self._walkers = None
self._model = None
self._env = None
self.tree = None
self._epoch = 0
self.init_swarm(
env_callable=env,
model_callable=model,
walkers_callable=walkers,
n_walkers=n_walkers,
reward_scale=reward_scale,
distance_scale=distance_scale,
*args,
**kwargs
)
@property
def epoch(self) -> int:
"""Return the current epoch of the search algorithm."""
return self._epoch
@property
def max_epochs(self) -> int:
"""Return the maximum number allowed of epochs the algorithm."""
return self.walkers.max_epochs
@max_epochs.setter
def max_epochs(self, val) -> None:
"""Set the maximum number allowed of epochs the algorithm."""
self.walkers.max_epochs = val
@property
def env(self) -> BaseEnvironment:
"""All the simulation code (problem specific) will be handled here."""
return self._env
@property
def model(self) -> BaseModel:
"""
All the policy and random perturbation code (problem specific) will \
be handled here.
"""
return self._model
@property
def walkers(self) -> BaseWalkers:
"""
Access the :class:`Walkers` in charge of implementing the FAI \
evolution process.
"""
return self._walkers
def increment_epoch(self) -> None:
"""Increment the current epoch of the algorithm."""
self._epoch += 1
self.walkers.increment_epoch()
def reset(
self,
model_states: StatesModel = None,
env_states: StatesEnv = None,
walkers_states: StatesWalkers = None,
):
"""
Reset a :class:`fragile.Swarm` and clear the internal data to start a \
new search process.
Args:
model_states: States that define the initial state of the environment.
env_states: States that define the initial state of the model.
walkers_states: States that define the internal states of the walkers.
"""
raise NotImplementedError
def run(
self,
root_walker: OneWalker = None,
model_states: StatesModel = None,
env_states: StatesEnv = None,
walkers_states: StatesWalkers = None,
):
"""
Run a new search process until the stop condition is met.
Args:
root_walker: Walker representing the initial state of the search. \
The walkers will be reset to this walker, and it will \
be added to the root of the :class:`StateTree` if any.
model_states: States that define the initial state of the environment.
env_states: States that define the initial state of the model.
walkers_states: States that define the internal states of the walkers.
Returns:
None.
"""
raise NotImplementedError
def step_walkers(self):
"""
Make the walkers undergo a perturbation process in the swarm \
:class:`Environment`.
This function updates the :class:`StatesEnv` and the :class:`StatesModel`.
"""
raise NotImplementedError
def init_swarm(
self,
env_callable: Callable,
model_callable: Callable,
walkers_callable: Callable,
n_walkers: int,
reward_scale: float = 1.0,
distance_scale: float = 1.0,
*args,
**kwargs
):
"""
Initialize and set up all the necessary internal variables to run the swarm.
This process involves instantiating the Swarm, the Environment and the \
model.
Args:
env_callable: A callable that returns an instance of an
:class:`fragile.Environment`.
model_callable: A callable that returns an instance of a
:class:`fragile.Model`.
walkers_callable: A callable that returns an instance of
:class:`fragile.Walkers`.
n_walkers: Number of walkers of the swarm.
reward_scale: Virtual reward exponent for the reward score.
distance_scale: Virtual reward exponent for the distance score.
args: Additional arguments passed to reset.
kwargs: Additional keyword arguments passed to reset.
Returns:
None.
"""
raise NotImplementedError
class BaseWrapper:
"""Generic wrapper to wrap any of the other classes."""
def __init__(self, data, name: str = "_unwrapped"):
"""
Initialize a :class:`BaseWrapper`.
Args:
data: Object that will be wrapped.
name: Assign a custom attribute name to the wrapped object.
"""
setattr(self, name, data)
self.__name = name
@property
def unwrapped(self):
"""Access the wrapped object."""
return getattr(self, self.__name)
def __repr__(self):
return self.unwrapped.__repr__()
def __call__(self, *args, **kwargs):
"""Call the wrapped class."""
return self.unwrapped.__call__(*args, **kwargs)
def __str__(self):
return self.unwrapped.__str__()
def __len__(self):
return self.unwrapped.__len__()
def __getattr__(self, attr):
# If a BaseWrapper is being wrapped forward the attribute to it
if isinstance(self.unwrapped, BaseWrapper):
return getattr(self.unwrapped, attr)
return self.unwrapped.__getattribute__(attr)
| 2.5625 | 3 |
marlo/crowdai_helpers.py | spMohanty/marlo | 214 | 12773693 | #!/usr/bin/env python
import os
import crowdai_api
########################################################################
# Instatiate Event Notifier
########################################################################
crowdai_events = crowdai_api.events.CrowdAIEvents()
class CrowdAIMarloEvents:
REQUEST_ENV_JOIN_TOKENS="marlo.events.REQUEST_JOIN_TOKENS"
END_OF_GRADING="marlo.events.END_OF_GRADING"
GAME_INIT="marlo.events.GAME_INIT"
ENV_RESET="marlo.events.ENV_RESET"
ENV_ACTION="marlo.events.ENV_ACTION"
STEP_REWARD="marlo.events.STEP_REWARD"
EPISODE_PENDING="marlo.events.EPISODE_PENDING"
EPISODE_INITIATED="marlo.events.EPISODE_INITIATED"
EPISODE_RUNNING="marlo.events.EPISODE_RUNNING"
EPISODE_DONE="marlo.events.EPISODE_DONE" #Episode Complete
EPISODE_ERROR="marlo.events.EPISODE_ERROR"
EVALUATION_PENDING="marlo.events.EVALUATION_PENDING"
EVALUATION_RUNNING="marlo.events.EVALUATION_RUNNING"
EVALUATION_ERROR="marlo.events.EVALUATION_ERROR"
EVALUATION_COMPLETE="marlo.events.EVALUATION_COMPLETE"
def is_grading():
"""Returns if the code is being executed inside the crowdAI evaluation
system.
:returns: bool
"""
return os.getenv("CROWDAI_IS_GRADING", False)
def evaluator_join_token(params={}):
"""Returns evaluator join tokens from the crowdAI evaluation system
:param params: a dictionary containing game params. Note that only a certain
subset of params will be considered by the grader. TODO: Add list
:type params: dict
:returns: a list of strings representing join tokens for all the agents
in a game; or marks the end of the evaluation
"""
crowdai_events = crowdai_api.CrowdAIEvents()
# Request a list of JOIN_TOKENS
response = crowdai_events.register_event(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type": CrowdAIMarloEvents.REQUEST_ENV_JOIN_TOKENS,
"params": params
},
blocking=True
)
if not response:
register_end_of_grading(crowdai_events)
return response
def register_end_of_grading(crowdai_events):
"""Marks the end of an evaluation, and waits for the rest of the
evaluation system to complete the post processing.
:param crowdai_events: a crowdai events object
:type `crowdai_api.CrowdAIEvents` object
"""
crowdai_events.register_event(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type": CrowdAIMarloEvents.END_OF_GRADING
},
blocking=True
)
class CrowdAiNotifier():
@staticmethod
def _send_notification(event_type, message, payload={}, blocking=False):
crowdai_events = crowdai_api.events.CrowdAIEvents()
default_payload = {"challenge_id": "MarLo"}
default_payload.update(payload)
crowdai_events.register_event(event_type, message, payload, blocking)
@staticmethod
def _game_init():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="Game Initialized",
payload={
"event_type" : CrowdAIMarloEvents.GAME_INIT
},
blocking=False)
@staticmethod
def _env_reset():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="Environment Reset",
payload={
"event_type" : CrowdAIMarloEvents.ENV_RESET
},
blocking=False)
@staticmethod
def _env_action(action):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.ENV_ACTION,
"action": action
},
blocking=False)
@staticmethod
def _step_reward(reward):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.STEP_REWARD,
"r":reward
},
blocking=False)
@staticmethod
def _episode_done():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.EPISODE_DONE,
},
blocking=False)
@staticmethod
def _env_error(error_message):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_ERROR,
message="execution_error",
payload={
"event_type" : CrowdAIMarloEvents.EPISODE_ERROR,
"error_message":error_message
},
blocking=False)
| 2.421875 | 2 |
utils/transfer_utils.py | sunayana/ph-poverty-mapping | 61 | 12773694 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""Utility methods for training Night Lights Model"""
from __future__ import print_function, division
import time
import os
import copy
import gc
import shutil
import wandb
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import pandas as pd
from decimal import Decimal
from sklearn import metrics
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torchsummary import summary
from torch.autograd import Variable
import logging
logging.basicConfig(level=logging.DEBUG)
SEED = 42
np.random.seed(SEED)
USE_GPU = "cuda:0" if torch.cuda.is_available() else "cpu"
DEVICE = torch.device(USE_GPU)
IMGNET_MEAN = [0.485, 0.456, 0.406]
IMGNET_STD = [0.229, 0.224, 0.225]
NUM_IMGS = 5
def load_transform_data(data_dir="../data", batch_size=32):
""" Transforms the training and validation sets.
Source: https://discuss.pytorch.org/t/questions-about-imagefolder/774/6
Parameters
----------
data_dir : str
Directory of the training and validations image sets
batch_size : int (default is 32)
Batch size
Returns
-------
dict
Contains the set images for training and validation set
list
Contains dataset sizes
list
Contains class names
"""
data_transforms = {
"train": transforms.Compose(
[
transforms.Resize(400),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(
IMGNET_MEAN, IMGNET_STD
),
]
),
"val": transforms.Compose(
[
transforms.Resize(400),
transforms.ToTensor(),
transforms.Normalize(
IMGNET_MEAN, IMGNET_STD
),
]
),
}
image_datasets = {
x: datasets.ImageFolder(
os.path.join(data_dir, x), data_transforms[x]
)
for x in ["train", "val"]
}
dataloaders = {
x: torch.utils.data.DataLoader(
image_datasets[x],
batch_size=batch_size,
shuffle=True,
num_workers=4,
)
for x in ["train", "val"]
}
dataset_sizes = {
x: len(image_datasets[x]) for x in ["train", "val"]
}
class_names = image_datasets["train"].classes
return dataloaders, dataset_sizes, class_names
def imshow(inp, title=None, size=(20, 20)):
"""Imshow for Pytorch tensor.
Parameters
----------
inp : torch.Tensor
The tensor of the input image
title : str (default is None)
Title of the image
size : tuple (default is (20, 20))
Size of image: (width, height)
"""
plt.figure(figsize=size)
inp = inp.numpy().transpose((1, 2, 0))
inp = IMGNET_STD * inp + IMGNET_MEAN
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
def save_plot(fig_dir, dict_, metric="loss"):
"""Saves train/ val loss curve as a PNG file
Parameters
----------
fig_dir : str
File to figures directory
dict_ : dict
A dictionary containing phase (train or val) as keys
and the series of evaluation metrics until current epoch as values
metric : str
Label of error metric
"""
fig = plt.figure()
for phase in dict_:
plt.plot(dict_[phase], label=phase)
plt.xlabel("epoch")
plt.ylabel(metric)
plt.legend()
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
fig.savefig(fig_dir + metric + ".png")
plt.close()
def save_checkpoint(
state, is_best, filename, checkpoint_dir
):
"""Saves latest model
Parameters
----------
state : dict
State of the model to be saved
is_best : boolean
Whether or not current model is the best model
checkpoint_dir : str
Path to models
"""
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
torch.save(state, checkpoint_dir + filename)
torch.save(state, os.path.join(wandb.run.dir, filename))
if is_best:
shutil.copyfile(
checkpoint_dir + filename,
checkpoint_dir + "model_best.pt",
)
def load_checkpoint(
model_best_path,
model=None,
optimizer=None,
scheduler=None,
epoch = 0,
):
# Load best model
if os.path.isfile(model_best_path):
# Load states
checkpoint = torch.load(model_best_path)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
# Update settings
epoch = checkpoint['epoch']
losses = checkpoint['losses']
accuracies = checkpoint['accuracies']
f_ones = checkpoint['f_ones']
logging.info("Loaded checkpoint '{}' (epoch {}) successfully.".format(model_best_path, epoch))
else:
logging.info("No checkpoint found.")
return model, optimizer, epoch
def train_model(
model,
dataloaders,
dataset_sizes,
class_names,
criterion,
optimizer,
scheduler,
num_epochs=100,
curr_epoch=0,
checkpoint_dir="models/",
):
""" Trains Night Lights Model
Parameters
----------
model : NTLModel class
The pretrained model to be fine-tuned
dataloaders :
Contains the set images for training and validation set
dataset_sizes : list
Contains dataset sizes
criterion
Loss function, e.g. cross entropy loss
optimizer
Optimization algorithm, e.g. SGD, Adam
scheduler
Learning rate scheduler
num_epochs : int (default is 25)
Number of epochs
Returns
-------
NTLModel class
The fine-tuned model
"""
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
loss = 10 ** 5
best_f1 = 0
phases = ["train", "val"]
iteration = {x: 0 for x in phases}
for epoch in range(curr_epoch, num_epochs):
logging.info("Epoch {}/{}".format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
update = {}
for phase in phases:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
preds_ = []
labels_ = []
# Iterate over data.
for idx, (inputs, labels) in tqdm(
enumerate(dataloaders[phase]),
total=len(dataloaders[phase]),
desc="Iteration"
):
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward: track history if only in train
with torch.set_grad_enabled(
phase == "train"
):
outputs = model(inputs)
outputs = outputs.view(
outputs.size(0), -1
)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(
preds == labels.data
)
preds_.extend(preds.cpu().numpy().tolist())
labels_.extend(
labels.data.cpu().numpy().tolist()
)
#wandb.log({
# "{} iteration loss".format(phase): loss.item()
#}, step=iteration[phase])
iteration[phase] += 1
# epoch loss, accuracy, and f1 score
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = (
running_corrects.double()
/ dataset_sizes[phase]
)
epoch_f1 = metrics.f1_score(
preds_, labels_, average="macro"
)
wandb.log({
"{} epoch loss".format(phase): epoch_loss
}, step=epoch)
wandb.log({
"{} epoch accuracy".format(phase): epoch_acc
}, step=epoch)
wandb.log({
"{} epoch F1".format(phase): epoch_f1
}, step=epoch)
if phase == 'val':
examples = []
images_so_far = 0
for j in range(inputs.size()[0]):
pred = class_names[preds[j]]
examples.append(wandb.Image(inputs.cpu().data[j], caption=pred))
if images_so_far > NUM_IMGS:
break
images_so_far += 1
wandb.log({
"{} examples".format(phase): examples
}, step=epoch)
# Print progress
learning_rate = optimizer.param_groups[0]["lr"]
logging.info(
"{} Loss: {:.4f} Accuracy: {:.4f} F1-Score: {:.4f} LR: {:.4E}".format(
phase.upper(),
epoch_loss,
epoch_acc,
epoch_f1,
Decimal(learning_rate),
)
)
if phase == "val":
# Update scheduler
scheduler.step(epoch_loss)
# Check if current model gives the best F1 score
is_best = False
if epoch_f1 > best_f1:
best_f1 = epoch_f1
best_model_wts = copy.deepcopy(
model.state_dict()
)
is_best = True
# Save states dictionary
state = {
"epoch": epoch + 1,
"lr": learning_rate,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict()
}
# Make filename verbose
filename = "model_{0:d}_{1:.3f}_{2:.3f}_{3:.3f}.pt".format(
epoch,
epoch_loss,
epoch_f1,
epoch_acc
)
# Save model checkpoint
save_checkpoint(
state,
is_best,
filename,
checkpoint_dir
)
if learning_rate <= 1e-10:
break
loss = loss.item()
time_elapsed = time.time() - since
logging.info(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
logging.info("Best Val Accuracy: {:4f}".format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(
model, dataloaders, class_names, num_images=4, size=(5, 5)
):
""" Prints the predicted labels for selected images.
Parameters
----------
model : NTLModel class
The model to be used for prediction
dataloaders : dict
Contains images in the validation set
num_images : int (default is 4)
Number of images to predict
"""
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
pred_list = []
with torch.no_grad():
for i, (inputs, labels) in enumerate(
dataloaders["val"]
):
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
pred = class_names[preds[j]]
imshow(
inputs.cpu().data[j],
title=pred,
size=size,
)
pred_list.append(pred)
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
def get_embedding(img_path, model_, size=4096, gpu=False):
""" Returns vector embedding from PIL image
Source: https://becominghuman.ai/extract-a-feature-vector-for-any-image-with-pytorch-9717561d1d4c
Parameters
----------
img_path : str
Path to image file
model_ : NTLModel class
The model to be used for prediction
size : int (default is 4096)
Size of the feature embedding
Returns
-------
tensor
The Pytorch tensor containing the feature embeddings
"""
model = copy.deepcopy(model_)
model.eval()
if gpu:
model = model.cuda()
image = Image.open(img_path)
normalize = transforms.Normalize(
mean=IMGNET_MEAN, std=IMGNET_STD
)
scaler = transforms.Resize(400)
to_tensor = transforms.ToTensor()
image = Variable(
normalize(to_tensor(scaler(image))).unsqueeze(0)
)
embedding = torch.zeros(1, size, 1, 1)
def copy_data(m, i, o):
embedding.copy_(o.data)
layer = list(model.classifier.children())[-3]
h = layer.register_forward_hook(copy_data)
image = image.to(DEVICE)
h_x = model(image)
h.remove()
return embedding.view(embedding.size(0), -1)
def get_embedding_per_image(report, model):
"""Iterates over each image and computes their corresponding feature embeddings
Parameters
----------
report : pandas DataFrame
The dataframe containing the file locations per image
model : model instance
The transfer model used to extract feature embeddings
Returns
-------
pandas Dataframe
Returns the report with an additional column indicating the extracted feature embeddings per image
"""
embeddings = []
for index, row in tqdm(report.iterrows(), total=len(report)):
filename = row['filename']
embedding = np.array(get_embedding(filename, model, gpu=True))
embeddings.append(embedding[0])
report['embeddings'] = embeddings
return report
def get_mean_embedding_per_cluster(report):
"""Calculates the mean feature embedding per cluster
Parameters
----------
report : pandas DataFrame
The dataframe containing the file locations per image with an embeddings columns
Returns
-------
pandas Dataframe
A DataFrame containing the mean feature embeddings per cluster
"""
cluster_embeddings = {'cluster': [], 'mean_embedding':[]}
clusters = report['DHSCLUST'].unique()
for cluster in tqdm(clusters, total=len(clusters)):
embeddings = report[report['DHSCLUST'] == cluster]['embeddings'].tolist()
mean_embedding = np.mean(embeddings, axis=0)
cluster_embeddings['cluster'].append(cluster)
cluster_embeddings['mean_embedding'].append(mean_embedding)
cluster_embeddings = pd.DataFrame(cluster_embeddings)
cluster_embeddings.head(3)
return cluster_embeddings | 2.328125 | 2 |
roombapy/roomba_factory.py | Erelen-Laiquendi/roombapy | 17 | 12773695 | <reponame>Erelen-Laiquendi/roombapy
from roombapy import Roomba
from roombapy.remote_client import RoombaRemoteClient
class RoombaFactory:
"""
Allows you to create Roomba class to control your robot
"""
@staticmethod
def create_roomba(
address=None, blid=None, password=<PASSWORD>, continuous=True, delay=1
):
remote_client = RoombaFactory._create_remote_client(
address, blid, password
)
return Roomba(remote_client, continuous, delay)
@staticmethod
def _create_remote_client(address=None, blid=None, password=<PASSWORD>):
return RoombaRemoteClient(address=address, blid=blid, password=password)
| 2.75 | 3 |
tests/dismod_execution/test_case_4.py | ihmeuw/cascade-at | 1 | 12773696 | <reponame>ihmeuw/cascade-at<filename>tests/dismod_execution/test_case_4.py
import pytest
if __name__ == '__main__':
import example_db
import dismod_tests
else:
from . import example_db
from . import dismod_tests
from cascade_at.dismod.api.run_dismod import run_dismod
from numpy import abs
import re
print ('Case 4: Location and group fixed and random effects with group covariate.')
use_group_mulcov = True
file_name = 'example.db'
config = {'sex_effect': False,
'node_effects': True,
'group_effects': True,
'use_group_mulcov': use_group_mulcov,
'include_group_data': False,
'zero_sum_mulcov': True}
truth, prior, node_effects, group_effects = dismod_tests.test_setup(use_group_mulcov)
db_kwds = dict(test_config = config,
truth = truth,
node_effects = node_effects,
subgroup_effects = group_effects,
tol_fixed = dismod_tests.tol_fixed,
tol_random = dismod_tests.tol_random)
def test_0(dismod, capsys, assert_correct = True):
"""
Parent rate and subgroup random effect densities must be something other than uniform for this problem to solve.
With gaussian priors, there seems to be a scaling problem in dismod (see the initial objective function),
and dismod does not converge well.
"""
prior['parent_density'] = 'gaussian'
prior['parent_std'] = 100
prior['subgroup_density'] = 'gaussian'
prior['subgroup_std'] = 100
db_kwds.update({'prior': prior})
db = example_db.example_db(file_name, **db_kwds)
run_dismod(db.path, 'init')
info = run_dismod(db.path, 'fit both')
print (info.stdout)
global stdout
stdout = info.stdout
lines = stdout.splitlines()
for i, line in enumerate(lines):
if 'iter' in line and 'objective' in line:
break
col_index = lines[i].split().index('objective')
objective = float(lines[i+1].split()[col_index])
if not (objective > -1e-10):
with capsys.disabled():
print (f"\nERROR: Dismod scaled this fit both problem incorrectly, objective was {objective}.")
# This is the assert to use after the dismod problem is fixed
# assert objective > -1e-10, f"Dismod scaled this fit both problem incorrectly, objective was {objective}."
def test_1(dismod, capsys, assert_correct = True):
"""
Parent rate and subgroup random effect densities must be something other than uniform for this problem to solve.
With gaussian priors, there seems to be a scaling problem in dismod (see the initial objective function),
and dismod does not converge well.
"""
prior['parent_density'] = 'gaussian'
prior['parent_std'] = 100
prior['subgroup_density'] = 'gaussian'
prior['subgroup_std'] = 100
db_kwds.update({'prior': prior})
db = example_db.example_db(file_name, **db_kwds)
run_dismod(db.path, 'set option print_level_fixed 0')
success, db = dismod_tests.run_test(file_name, config, truth)
if not success:
msg = 'ERROR: Dismod_AT succeeded, but there is unresolvable ambiguity between the rate and group rate mulcov.'
var = ((sum((db.fit_var.fit_var_value - db.truth_var.truth_var_value)[:4]**2))**.5 < 1e-7)
fit = ((sum(db.fit_data_subset.weighted_residual**2))**.5 < 1e-7)
if not var:
msg += '\n The fit_var values do not match the truth.'
if not fit:
msg += '\n Data weighted residual errors are too large.'
if not (var and fit):
msg += '\n Dismod may be scaling this problem improperly.'
if not (var and fit):
with capsys.disabled():
print('\n' + msg)
# This is the assert to use after the dismod problem is fixed
# assert var and fit, msg
def test_2(dismod, assert_correct = True):
"""
Parent rate must be log-gaussian and subgroup random effect densities must be something other than uniform for this problem to solve
With a log-gaussian prior, dismod converges well
"""
prior['parent_density'] = 'log_gaussian'
prior['parent_std'] = 100
prior['parent_eta'] = 1e-5
prior['subgroup_density'] = 'gaussian'
prior['subgroup_std'] = 1000
db_kwds.update({'prior': prior})
db = example_db.example_db(file_name, **db_kwds)
success, db = dismod_tests.run_test(file_name, config, truth)
if not success:
msg = 'Dismod_AT succeeded, but there is unresolvable ambiguity between the rate and group rate mulcov.'
var = ((sum((db.fit_var.fit_var_value - db.truth_var.truth_var_value)[:4]**2))**.5 < 1e-7)
fit = ((sum(db.fit_data_subset.weighted_residual**2))**.5 < 1e-7)
if not var:
msg += '\nThe fit values for the unambiguous variables failed.'
if not fit:
msg += "\nData weighted residual errors are too large."
if not (var and fit):
msg += '\nDismod may be scaling this problem improperly.'
assert var and fit, msg
def test_3(dismod, assert_correct = True):
"""
Parent rate must be log-gaussian and subgroup random effect densities must be something other than uniform for this problem to solve
Parent rate must have guidance for to resolve that ambiguity
"""
# Give the parent rate guidance
prior['parent_density'] = 'gaussian'
prior['parent_mean'] = truth['iota_parent_true']
prior['parent_std'] = 1e-10
prior['subgroup_density'] = 'gaussian'
prior['subgroup_std'] = 10000
db_kwds.update({'prior': prior})
db = example_db.example_db(file_name, **db_kwds)
success, db = dismod_tests.run_test(file_name, config, truth)
if assert_correct:
assert success, 'Dismod_AT ran, but there is ambiguity between the rate and group rate mulcov in this test.'
def test_4(dismod, capsys, assert_correct = True):
"""
Parent rate must be log-gaussian and subgroup random effect densities must be something other than uniform for this problem to solve
Parent rate must have guidance for to resolve that ambiguity
With this configuration, the asymptotic statistics work fine
"""
ignore, db = dismod_tests.run_test(file_name, config, truth, test_asymptotic = True)
hes = abs(db.hes_fixed.hes_fixed_value.values).max()
success = hes < 1e10
if not success:
with capsys.disabled():
print ('\nERROR: Dismod_AT asymptotics failed. The Hessian identifies the presence of the ambiguity.')
# This is the assert to use after the dismod problem is fixed
# assert success, 'Dismod_AT asymptotics failed. The Hessian identifies the presence of the ambiguity.'
if __name__ == '__main__':
test_0(None, assert_correct = True)
test_1(None, assert_correct = True)
test_2(None, assert_correct = True)
test_3(None, assert_correct = True)
test_4(None, assert_correct = True)
| 1.875 | 2 |
source/code/preprocessing/dataloader.py | IooHooI/NEURAL_NETWORKS | 0 | 12773697 | import os
import json
import numpy as np
import pandas as pd
import requests
from sklearn.datasets import load_boston
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from tqdm import tqdm
from source.code.preprocessing.itemsselector import ItemSelector
from source.code.preprocessing.mylabelbinarizer import MyLabelBinarizer
from source.code.preprocessing.utils import create_sub_folders
data_sources_description = '../../../data/data_sources.json'
local_path = '../../../data/dataset'
def download_data_from(from_param, to_param):
file_name = '{}.{}'.format(from_param['name'], from_param['fmt'])
file_path = os.path.join(to_param, file_name)
if not os.path.exists(to_param):
create_sub_folders(to_param)
if not os.path.exists(file_path):
response = requests.get(from_param['link'], stream=True)
with open(file_path, "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
return file_path
def read_and_clean_titanic_data():
data_sources = json.load(open(data_sources_description, 'r'))
titanic = pd.read_excel(download_data_from(data_sources[0], local_path))
titanic.age.fillna(titanic.age.mean(), inplace=True)
titanic.fare.fillna(titanic.fare.mean(), inplace=True)
titanic.sex.replace({'male': 0, 'female': 1}, inplace=True)
titanic.embarked.replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True)
titanic = titanic[~titanic.embarked.isnull()]
num_features = ['age', 'fare']
cat_features = ['pclass', 'embarked', 'parch', 'sibsp']
bin_features = ['sex']
pipeline = Pipeline([
('union', FeatureUnion([
('bin', Pipeline(
[
('choose', ItemSelector(bin_features))
]
)),
('num', Pipeline(
[
('choose', ItemSelector(num_features)),
('scale', StandardScaler())
]
))
]))
])
X = titanic[num_features + cat_features + bin_features]
X = pipeline.fit_transform(X)
y = titanic.survived.values
y = y.reshape([len(y), 1])
return X, y
def read_and_clean_thyroid_data():
data_sources = json.load(open(data_sources_description, 'r'))
hypothyroid = pd.read_csv(download_data_from(data_sources[1], local_path))
hypothyroid.sex.replace({'M': 0, 'F': 1}, inplace=True)
hypothyroid.replace({'f': 0, 't': 1}, inplace=True)
hypothyroid.replace({'?': 0}, inplace=True)
hypothyroid.drop(['TBG', 'TBG_measured'], axis=1, inplace=True)
num_features = ['age', 'TSH', 'T3', 'TT4', 'T4U', 'FTI']
cat_features = ['referral_source']
bin_features = [
'sex',
'on_thyroxine',
'query_on_thyroxine',
'on_antithyroid_medication',
'sick',
'pregnant',
'thyroid_surgery',
'I131_treatment',
'query_hypothyroid',
'query_hyperthyroid',
'lithium',
'goitre',
'tumor',
'hypopituitary',
'psych',
'TSH_measured',
'T3_measured',
'TT4_measured',
'T4U_measured',
'FTI_measured'
]
for feature in num_features:
hypothyroid[feature] = hypothyroid[feature].astype(np.float32)
hypothyroid[feature].fillna(hypothyroid[feature].mean(), inplace=True)
pipeline = Pipeline([
('union', FeatureUnion([
('bin', Pipeline(
[
('choose', ItemSelector(bin_features))
]
)),
('num', Pipeline(
[
('choose', ItemSelector(num_features)),
('scale', StandardScaler())
]
)),
('cat', Pipeline(
[
('choose', ItemSelector(cat_features)),
('binarize', MyLabelBinarizer())
]
))
]))
])
X = hypothyroid[num_features + cat_features + bin_features]
X = pipeline.fit_transform(X)
y = MyLabelBinarizer().fit_transform(hypothyroid.Class)
return X, y
def read_and_clean_boston_data():
X, y = load_boston(return_X_y=True)
X = StandardScaler().fit_transform(X)
X = PolynomialFeatures().fit_transform(X)
y = y.reshape([len(y), 1])
return X, y
def read_and_clean_feedback_data():
data_sources = json.load(open(data_sources_description, 'r'))
feedback = pd.read_csv(download_data_from(data_sources[2], local_path))
X = feedback['0'].values.T
y = feedback['1'].values.T
return X, y
| 2.5625 | 3 |
setup.py | robbertstruyven/cs207-FinalProject | 0 | 12773698 | import setuptools
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='AD-cs207',
version='1.0.0',
author='<NAME>, <NAME>, <NAME>, <NAME>',
author_email=" ",
description='Automatic Differentiation Package',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[ 'sympy>=1.3' ],
packages=setuptools.find_packages(),
keywords=['Automatic differentiation', 'gradients', 'Python'],
url='https://github.com/cs207-f18-WIRS/cs207-FinalProject',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
)
| 1.601563 | 2 |
main.py | imnotender/Wallpaperify | 0 | 12773699 | import requests
import json
import ctypes
import tempfile
import os
import time
import base64
from dotenv import load_dotenv
load_dotenv()
REFRESH_TOKEN = os.getenv("REFRESH_TOKEN")
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
EXPIRATION_TIME = float(os.getenv("EXPIRATION_TIME"))
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
authorization_str = CLIENT_ID + ":" + CLIENT_SECRET
authorization_str = base64.b64encode(authorization_str.encode("utf-8"))
#print("EXPIRATION_TIME={}".format(EXPIRATION_TIME))
#print("REFRESH_TOKEN= {} \n ACCESS_TOKEN=
def get_token():
global EXPIRATION_TIME
global REFRESH_TOKEN
global ACCESS_TOKEN
global CLIENT_ID
global CLIENT_SECRET
global authorization_str
if (time.time() > EXPIRATION_TIME):
headers = {
'Authorization': 'Basic OGIyOGEzYmFkMWZkNDM0Mjk3OTMwYzAyOGI1YTdjYjY6NWEwM2QyZGNhM2NkNDBmNjk0YWM4N2M1ZWQxYmZhMmM='.format(authorization_str),
}
data = {
'grant_type': 'refresh_token',
'refresh_token': '{}'.format(REFRESH_TOKEN)
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
print("REFRESH TOKEN REQUEST RESPONSE: {}".format(r.text))
return(r.text)
else:
print("Access token hasn't expired: returning stored one")
return ACCESS_TOKEN
def get_current_album():
global ACCESS_TOKEN
global album_id
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(ACCESS_TOKEN),
}
ra = requests.get('https://api.spotify.com/v1/me/player/currently-playing', headers=headers)
if not (ra.status_code == "200"):
if (ra.status_code == "429"):
print("RATE LIMITED")
print("STATUS CODE FROM GET ALBUM FUNCTION: {}".format(ra.status_code))
jsondata = json.loads(ra.text)
album_link = jsondata["item"]['album']['images'][0]["url"]
album_id = jsondata["item"]['album']['id']
print("Album id: " + album_id)
return(album_link, album_id)
def set_album(album_link, album_id):
fldr = tempfile.gettempdir() + "\\wallpaperify\\"
file = fldr + album_id + ".png"
if not os.path.isdir(fldr):
os.mkdir(fldr)
if os.path.exists(file):
ctypes.windll.user32.SystemParametersInfoW(20, 0, file , 0)
else:
rf = requests.get(album_link)
with open(file, 'wb') as f:
f.write(rf.content)
ctypes.windll.user32.SystemParametersInfoW(20, 0, file , 0)
def main():
get_token()
album_link, album_id = get_current_album()
set_album(album_link, album_id)
while True:
main()
time.sleep(60)
| 2.953125 | 3 |
app/admin/admin.py | hXtreme/HCP-Project | 0 | 12773700 | import csv
from io import StringIO
import os.path as op
from flask import request, redirect, Response, flash, url_for
from werkzeug.exceptions import HTTPException
from flask_admin import Admin
from flask_admin.base import expose
from flask_admin.contrib.sqla import ModelView
from flask_admin.contrib.fileadmin import FileAdmin
from app import app, db
from app.provider import Provider
from app.application import Application
from app.admin import ProviderImportForm
from app.admin.forms import CSV_SCHEMA
admin = Admin(app, name="Admin", template_mode="bootstrap3")
class ModelView(ModelView):
def is_accessible(self):
auth = request.authorization or request.environ.get(
"REMOTE_USER"
) # workaround for Apache
if (
not auth
or (auth.username, auth.password) != app.config["ADMIN_CREDENTIALS"]
):
raise HTTPException(
"",
Response(
"You have to be an administrator.",
401,
{"WWW-Authenticate": 'Basic realm="Login Required"'},
),
)
return True
class ApplicationView(ModelView):
can_export = True
class ProviderView(ModelView):
can_export = True
list_template = (
'admin/providers.html'
) # Extending the list view to allow for CSV import
@expose('/import', methods=['GET', 'POST'])
def import_file(self):
form = ProviderImportForm()
if form.validate_on_submit():
# Coerce form.file.data to a stream to read CSV data
file_content = form.file.data.stream.read().decode('utf-8')
with StringIO(file_content) as csv_file:
csv_file_reader = csv.DictReader(
csv_file, fieldnames=CSV_SCHEMA)
next(csv_file_reader) # Skip the header row
for item in csv_file_reader:
record = Provider.from_dict(item)
# Update or insert the record into the db.
db.session.merge(record)
db.session.commit()
flash('Provider info imported successfully.')
return redirect(url_for('provider.index_view'))
return self.render('admin/import.html', form=form)
# Applications
admin.add_view(ApplicationView(Application, db.session))
# Providers
admin.add_view(ProviderView(Provider, db.session))
# Static files
path = op.join(op.dirname(__file__), "../static")
admin.add_view(FileAdmin(path, "/static/", name="Static"))
| 2.3125 | 2 |
hackathon/migrations/0001_initial.py | natalijabujevic0708/ci-hackathon-app | 11 | 12773701 | # Generated by Django 3.1.1 on 2020-10-15 19:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Hackathon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('display_name', models.CharField(default='', max_length=254)),
('description', models.TextField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hackathon_created', to=settings.AUTH_USER_MODEL)),
('judges', models.ManyToManyField(blank=True, related_name='hackathon_judges', to=settings.AUTH_USER_MODEL)),
('organiser', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='hackathon_organiser', to=settings.AUTH_USER_MODEL)),
],
),
] | 1.75 | 2 |
mekpie/util.py | ejrbuss/mekpie | 0 | 12773702 | from re import sub
from os import walk, mkdir, rename, remove
from sys import stderr
from shutil import rmtree
from os.path import isfile, isdir, join, basename, splitext, exists
from filecmp import dircmp
import mekpie.debug as debug
import mekpie.messages as messages
from .cli import panic, log
# Numerical
# ---------------------------------------------------------------------------- #
def clamp(value, bottom, top):
return max(bottom, min(top, value))
# Collections
# ---------------------------------------------------------------------------- #
def empty(collection):
return len(collection) == 0
def first(collection):
if not empty(collection):
return collection[0]
def rest(collection):
if not empty(collection):
return collection[1:]
def last(collection):
if not empty(collection):
return collection[-1]
def shift(collection, n=1):
for _ in range(n):
if not empty(collection):
collection.pop(0)
def flatten(collection):
return sum(collection, [])
def split(collection, item):
if item not in collection:
return collection, []
index = collection.index(item)
first = collection[:index]
second = collection[index + 1:]
return first, second
# Strings
# ---------------------------------------------------------------------------- #
def tab(string, spaces=4):
return sub(r'^|\n', '\n' + (spaces * ' '), string)
def underline(element, collection):
top = ' '.join(collection)
bottom = ' '.join(underlined_collection(element, collection))
return f'{top}\n{bottom}'
def underlined_collection(underlined_element, collection):
def underline_or_hide(element):
rep = '^' if element == underlined_element else ' '
return sub(r'.', rep, str(element))
return map(underline_or_hide, collection)
# Files
# ---------------------------------------------------------------------------- #
def smkdir(path):
log(f'Creating directory {path}...')
if not exists(path):
mkdir(path)
def srmtree(path):
if exists(path):
rmtree(path)
def smv(source, destination):
log(f'Moving {source} to {destination}')
remove(destination)
if exists(source):
rename(source, destination)
def list_files(path, with_filter=None, with_ext=None, recursive=False):
if with_filter is None:
with_filter = lambda _ : True
if with_ext is not None:
with_filter = lambda filename : filename.endswith(with_ext)
return list(filter(with_filter, list_all_files(path)))
def list_all_files(path):
return flatten([[join(pre, post)
for post
in posts]
for (pre, _, posts)
in walk(path)
])
def list_all_dirs(path):
return flatten([[join(path, pre)
for pre
in pres]
for (_, pres, posts)
in walk(path)
])
def filename(path):
return splitext(basename(path))[0]
def file_as_str(path):
check_is_file(path)
log(f'Reading the contents of {path}...')
with open(path) as resource:
return resource.read()
def remove_contents(path):
log(f'Deleting the contents of {path}...')
srmtree(path)
smkdir(path)
def check_is_file(path):
if isfile(path):
return path
panic(messages.file_not_found.format(path))
def check_is_dir(path):
if isdir(path):
return path
panic(messages.directory_not_found.format(path))
def same_dir(dir1, dir2):
def recursive(dcmp):
if dcmp.diff_files:
return False
return all([recursive(sub_dcmp)
for sub_dcmp
in dcmp.subdirs.values()
])
return recursive(dircmp(dir1, dir2))
def exec_str(source, handle, ctx={}):
try:
exec(source, ctx)
except Exception as err:
panic(messages.execution_error.format(handle, tab(str(err))))
return ctx
def exec_file(path, ctx={}):
return exec_str(file_as_str(path), path, ctx)
# Types
# ---------------------------------------------------------------------------- #
def type_name(x):
return type(x).__name__
| 2.6875 | 3 |
backend/api/school/tests.py | jacorea/ismp | 3 | 12773703 | <reponame>jacorea/ismp
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIClient
from rest_framework.views import status
from api.school.models import School
from api.school.serializers import SchoolSerializer
class BaseViewTest(APITestCase):
client = APIClient()
@staticmethod
def create_school(name="", profile_picture_url="", page_description=""):
if True: # media_link should be optional
School.objects.create(name=name,
profile_picture_url=profile_picture_url,
page_description=page_description)
def setUp(self):
# add test data
self.create_school("UCSD", "", "this is UCSD")
self.create_school("UCLA", "example.com/image.jpg", "this is UCLA")
class GetAllSchoolsTest(BaseViewTest):
def test_get_all_schools(self):
"""
This test ensures that all songs added in the setUp method
exist when we make a GET request to the songs/ endpoint
"""
# hit the API endpoint
response = self.client.get(
reverse("school-list", kwargs={"version": "v1"})
)
# fetch the data from db
expected = School.objects.all()
serialized = SchoolSerializer(expected, many=True)
self.assertEqual(response.data['results'], serialized.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 2.46875 | 2 |
tools/check_jobs_documented.py | pmarcinkiewicz1/zuul-jobs | 1 | 12773704 | #!/usr/bin/env python
#
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Ensure that all jobs and roles appear in the documentation.
import os
import re
import sys
import yaml
class ZuulSafeLoader(yaml.SafeLoader):
def __init__(self, *args, **kwargs):
super(ZuulSafeLoader, self).__init__(*args, **kwargs)
self.add_multi_constructor('!encrypted/', self.construct_encrypted)
@classmethod
def construct_encrypted(cls, loader, tag_suffix, node):
return loader.construct_sequence(node)
class Layout(object):
def __init__(self):
self.jobs = []
class ZuulConfig(object):
def find_zuul_yaml(self):
root = os.getcwd()
while root:
for fn in ['zuul.yaml', '.zuul.yaml', 'zuul.d', '.zuul.d']:
path = os.path.join(root, fn)
if os.path.exists(path):
return path
root = os.path.split(root)[0]
raise Exception(
"Unable to find zuul config in zuul.yaml, .zuul.yaml,"
" zuul.d or .zuul.d")
def parse_zuul_yaml(self, path):
with open(path) as f:
data = yaml.load(f, Loader=ZuulSafeLoader)
layout = Layout()
for obj in data:
if 'job' in obj:
layout.jobs.append(obj['job'])
return layout
def parse_zuul_d(self, path):
layout = Layout()
for conf in os.listdir(path):
with open(os.path.join(path, conf)) as f:
data = yaml.load(f, Loader=ZuulSafeLoader)
for obj in data:
if 'job' in obj:
layout.jobs.append(obj['job'])
return layout
def parse_zuul_layout(self):
path = self.find_zuul_yaml()
if path.endswith('zuul.d'):
layout = self.parse_zuul_d(path)
else:
layout = self.parse_zuul_yaml(path)
return layout
def __init__(self):
self.layout = self.parse_zuul_layout()
class Docs(object):
def __init__(self):
self.jobs = set()
self.roles = set()
self.autojobs = False
self.autoroles = False
self.walk(os.path.join(os.getcwd(), 'doc', 'source'))
def walk(self, path):
for root, dirs, files in os.walk(path):
for fn in files:
if fn.endswith('.rst'):
with open(os.path.join(root, fn)) as f:
for line in f:
m = re.match(r'.*\.\. zuul:job:: (.*)$', line)
if m:
self.jobs.add(m.group(1))
m = re.match(r'.*\.\. zuul:autojob:: (.*)$', line)
if m:
self.jobs.add(m.group(1))
m = re.match(r'.*\.\. zuul:autojobs::.*$', line)
if m:
self.autojobs = True
m = re.match(r'.*\.\. zuul:role:: (.*)$', line)
if m:
self.roles.add(m.group(1))
m = re.match(r'.*\.\. zuul:autorole:: (.*)$', line)
if m:
self.roles.add(m.group(1))
m = re.match(r'.*\.\. zuul:autoroles::.*$', line)
if m:
self.autoroles = True
class Roles(object):
def __init__(self):
self.roles = set()
self.walk(os.path.join(os.getcwd(), 'roles'))
def walk(self, path):
for role in os.listdir(path):
if os.path.isdir(os.path.join(path, role, 'tasks')):
self.roles.add(role)
z = ZuulConfig()
r = Roles()
d = Docs()
ret = 0
for role in r.roles:
if role not in d.roles:
print("Role %s not included in document tree" % (role,))
ret = 1
for job in [x['name'] for x in z.layout.jobs]:
if job not in d.jobs:
print("Job %s not included in document tree" % (job,))
ret = 1
sys.exit(ret)
| 2.15625 | 2 |
fb5logging/loggerconstants.py | samiwilf/FAMBench | 9 | 12773705 | <reponame>samiwilf/FAMBench<gh_stars>1-10
"""
Master list of constants for logger
Mostly logger keys, but some other constants as well.
"""
# loggerkey - header
HEADER = "header"
# loggerkey - timing info
EPOCH_START = "epoch_start"
EPOCH_STOP = "epoch_stop"
RUN_START = "run_start"
RUN_STOP = "run_stop"
BATCH_START = "batch_start"
BATCH_STOP = "batch_stop"
# loggerkey - run information
NUM_BATCHES = "num_batches"
BATCH_SIZE = "batch_size"
FLOPS = "flops"
# loggerkey - model hyperparameters
LEARNING_RATE = "learning_rate"
# type of summary view saved to file
INTERMEDIATE_VIEW = "intermediate_view" # table view
RAW_VIEW = "raw_view" # json view
# available types of score metrics
EXPS = "exps" # examples/sec (throughput)
TFPS = "tfps" # teraflops/sec (floating point ops rate)
GBPS = "gbps" # gb/sec
| 1.273438 | 1 |
test/test.py | zephyrzoom/douyu | 28 | 12773706 | <reponame>zephyrzoom/douyu
import socket
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('172.16.17.32', 8011))
#s.sendall(b'type@=loginreq/username@=auto_qZ1JCRPZC6/password@=<PASSWORD>/ro<PASSWORD>ass@=/roomid@=265352/devid@=5DCE2FFCE0E61AB11F77D935D10E6E0B/rt@=1437381860/vk@=f78f45ff359eceeafbf1a0d2e2b9874b/ver@=20150714/')
time.sleep(4)
s.sendall(b'e1000000e1000000b102000074797065403d6c6f67696e7265712f757365726e616d65403d6175746f5f715a314a4352505a43362f70617373776f7264403d35393661393663633762663931303863643839366633336334346165646338612f726f6f6d70617373403d2f726f6f6d6964403d3232393435372f6465766964403d35444345324646434530453631414231314637374439333544313045364530422f7274403d313433373635333132332f766b403d33653732353038666430373562643464666133666535373233336666613337322f766572403d32303135303732312f00')
time.sleep(1)
s.sendall(b'3100000031000000b102000074797065403d6f6e6c696e655f676966745f696e666f5f7265712f756964403d323233363431372f00')
s.sendall(b'4200000042000000b102000074797065403d6d656d626572696e666f7265712f6c696e6b403d687474703a405340537777772e646f75797574762e636f6d40537a65656b2f00')
while True:
time.sleep(1)
s.sendall(b'5b0000005b000000b102000074797065403d6b6565706c6976652f7469636b403d313433373635333132332f766277403d302f63646e403d302f6b403d35393664653332353533373164636133653338653639306538643839356135312f00')
time.sleep(2)
data = s.recv(5)
print('Received', repr(data))
s.close()
# public static byte[] loginReq(string username,string password,string roomid,string uuid)
# {
# long time=Scholar.Framework.Utils.Util.UNIX_TIMESTAMP();
# string salt="7oE9nPEG9xXV69phU31FYCLUagKeYtsF";
# string vk=Scholar.Framework.Utils.Util.Md5(string.Format("{0}{1}{2}",time,salt,uuid));
# string p = string.Format("type@=loginreq/username@={0}/password@={1}/roomid@=
# {2}/ct@=2/devid@={3}/ver@={4}/rt@={5}/vk@={6}/",
# username, password, roomid, uuid, 20150515,time,vk);
# byte[] bin = Encoding.UTF8.GetBytes(p);
# ByteBuffer buf = new ByteBuffer();
# buf.Put("dc000000 dc000000 b1020000 ");
# buf.Put(bin);
# buf.Put(0);
# return buf.ToByteArray();
# }
#urlgreq is [type@=loginreq/username@=auto_qZ1JCRPZC6/
# password@=<PASSWORD>96f33c44aedc8a/roompass@=/
# roomid@=265352/devid@=5DCE2FFCE0E61AB11F77D935D10E6E0B/
# rt@=1437381860/vk@=f78f45ff359eceeafbf1a0d2e2b9874b/ver@=20150714/] | 2.21875 | 2 |
modulo1/4 Trabajo y formateo - Cadenas.py | OmarGP/Python1 | 0 | 12773707 | #####################################################################
# Trabajando con Cadenas de Texto #
#####################################################################
cadena = " <NAME>!! "
print(cadena)
print(cadena[10])
print(cadena[3:])
print(cadena[:10])
print(cadena[2:6])
print(cadena[-4])
print(cadena.lower())
print(cadena.upper())
print(cadena.capitalize())
print(cadena.strip())
print(cadena.replace("o", "+"))
print(cadena.isdigit())
print(len(cadena))
print(cadena.count())
print("")
#####################################################################
# Formateando Cadenas y Número #
#####################################################################
mensaje = "Mundo"
print("Hola " + mensaje + " !!!")
print("Hola {} !!!".format(mensaje))
print("Hola {s} !!!".format(s=mensaje))
print(f"Hola {mensaje} !!!")
numero = 10 / 3
print(numero)
print("Hola {n:1.2f} !!!".format(n=numero)) | 4.1875 | 4 |
build_local_zipnum.py | ikreymer/webarchive-indexing | 35 | 12773708 | <filename>build_local_zipnum.py
import os
import glob
from argparse import ArgumentParser
from zipnumclusterjob import ZipNumClusterJob
from mrjob.launch import MRJobLauncher
import logging
import sys
log = logging.getLogger(__name__)
#=============================================================================
def run_job(input_path, output_dir, shards, parallel, lines=None):
args = ['--no-output', '--output-dir', output_dir, '-r']
if parallel:
args.append('local')
else:
args.append('inline')
args.append('--shards=' + str(shards))
if lines:
args.append('--numlines=' + str(lines))
if isinstance(input_path, list):
args.extend(input_path)
else:
args.append(input_path)
output_dir = os.path.abspath(output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
os.environ['mapreduce_output_fileoutputformat_outputdir'] = output_dir
job = ZipNumClusterJob(args)
with job.make_runner() as runner:
runner.run()
def build_summary_and_loc(output_dir):
# Write summary file
full = os.path.join(output_dir, 'part-*')
inputs = sorted(glob.glob(full))
summary_file = os.path.join(output_dir, 'cluster.summary')
print('Building Summary File: ' + summary_file)
count = 1
with open(summary_file, 'w+b') as fh:
for filein in inputs:
with open(filein, 'r+b') as partfh:
for line in partfh:
line = line.rstrip()
line += '\t' + str(count)
fh.write(line + '\n')
count += 1
# Write loc file
full = os.path.join(output_dir, 'cdx-*')
inputs = sorted(glob.glob(full))
loc_file = os.path.join(output_dir, 'cluster.loc')
print('Building Loc File: ' + loc_file)
with open(loc_file, 'w+b') as fh:
for filename in inputs:
fh.write(os.path.basename(filename) + '\t' + filename + '\n')
def main():
parser = ArgumentParser()
parser.add_argument('output', help='ZipNum Cluster Output directory')
parser.add_argument('inputs', nargs='+', help='CDX Input glob eg: /cdx/*.cdx.gz')
parser.add_argument('-s', '--shards', default=10, type=int,
help='Number of ZipNum Cluster shards to create')
parser.add_argument('-l', '--numlines', default=3000, type=int,
help='Number of lines per gzip block (default 3000)')
parser.add_argument('-p', '--parallel', action='store_true',
help='Run in parllel (multiple maps/reducer processes)')
r = parser.parse_args()
MRJobLauncher.set_up_logging(quiet=False,
verbose=False,
stream=sys.stderr)
log.setLevel(logging.INFO)
compat_log = logging.getLogger('mrjob.compat')
compat_log.setLevel(logging.ERROR)
run_job(r.inputs, r.output, r.shards, r.parallel, r.numlines)
build_summary_and_loc(r.output)
if __name__ == "__main__":
main()
| 2.34375 | 2 |
handwriting-synthesis/hw_generator_config.py | blow-the-fluff/parking | 0 | 12773709 | DO_RUN_BIAS_TEST=False
DEBUG = False
INPUT_CSV=False
ROOT_FOLDER="/home/jupyter/forms-ocr"
OUTPUT_FOLDER= ROOT_FOLDER + "/sample_images"
GEN_FOLDER = OUTPUT_FOLDER + "/img"
LOCALE = "en_GB"
DUMMY_GENERATOR=0
FIELD_DICT_3FIELDS= {'Name':(0,0),'Tax':(0,0),
'Address':(0,0)
}
FIELD_DICT_7FIELDS= {'Name':(0,0),'BusinessName':(0,0), 'Tax':(0,0),
'Address':(0,0),
'City':(0,0), 'Requester':(0,0),
'Signature':(0,0)
}
FIELD_DICT_16FIELDS= {'Name':(0,0),'BusinessName':(0,0), 'Tax':(0,0),
'Address':(0,0),
'City':(0,0), 'Requester':(0,0),
'Signature':(0,0),
'ssn1':(0,0),
'ssn2':(0,0),
'ssn3':(0,0),
'ssn4':(0,0),
'ssn5':(0,0),
'ssn6':(0,0),
'ssn7':(0,0),
'ssn8':(0,0),
'ssn9':(0,0)
}
FIELD_DICT_23FIELDS= {'Name':(0,0),'BusinessName':(0,0), 'Tax':(0,0),
'Address':(0,0),
'City':(0,0), 'Requester':(0,0),
'Signature':(0,0),
'ssn1':(0,0),
'ssn2':(0,0),
'ssn3':(0,0),
'ssn4':(0,0),
'ssn5':(0,0),
'ssn6':(0,0),
'ssn7':(0,0),
'ssn8':(0,0),
'ssn9':(0,0),
'Tax2':(0,0),
'Tax3':(0,0),
'Tax4':(0,0),
'Tax5':(0,0),
'Tax6':(0,0),
'Tax7':(0,0),
'Date':(0,0)
}
FIELD_DICT_SSN= {'ssn1':(0,0),
'ssn2':(0,0),
'ssn3':(0,0),
'ssn4':(0,0),
'ssn5':(0,0),
'ssn6':(0,0),
'ssn7':(0,0),
'ssn8':(0,0),
'ssn9':(0,0)
}
FIELD_DICT= {'Name':(0,0),'BusinessName':(0,0), 'Tax':(0,0),
'Instructions':(0,0), 'Exemptions':(0,0),
'ExemptionCode':(0,0), 'Address':(0,0),
'City':(0,0), 'Requester':(0,0),'Account':(0,0),
'SocialSeciurityNumber':(0,0), 'EmpIdentificationNumber':(0,0)
}
###############################
FAKER_GENERATOR=1
HANDWRITING_FIXED=0
HANDWRITING_PRESET_FLAG=1
| 1.648438 | 2 |
Doraemon/Crawlers/google_KG.py | 131250208/Doraemon | 2 | 12773710 | <reponame>131250208/Doraemon
'''
the code is invalid @ 2020.10.09
'''
import random
import time
from bs4 import BeautifulSoup
from urllib import parse
import re
from Doraemon.Requests import requests_dora, proxies_dora
from tqdm import tqdm
import logging
# ORG_KEYWORDS = ["college", "company", "university", "school", "corporation",
# "institute", "organization", "association"]
def quote(queryStr):
try:
queryStr = parse.quote(queryStr)
except:
queryStr = parse.quote(queryStr.encode('utf-8', 'ignore'))
return queryStr
def google_search(queryStr, get_proxies_fun = None, page = 1):
url = 'https://www.google.com/search?biw=1920&safe=active&hl=en&q=%s&oq=%s&start=%d' % (queryStr, queryStr, (page-1) * 10)
response = requests_dora.try_best_2_get(url, headers=requests_dora.get_default_headers(), invoked_by="google_search", get_proxies_fun=get_proxies_fun, timeout=60)
status = response.status_code
if status == 200:
html = response.text
else:
print("status: {}, try again....".format(status))
random.seed(time.time())
time.sleep(3 + 5 * random.random())
return google_search(queryStr, page, get_proxies_fun)
return html
def get_entity(query_str, get_proxies_fun, wait=1.5):
rel_org_name_set = set()
logging.warning("start crawling {}...".format(query_str))
text = google_search(query_str, get_proxies_fun)
random.seed(time.time())
time.sleep(wait * random.random())
soup = BeautifulSoup(text, "lxml")
# is there an entity in google KG?
div_kg_hearer = soup.select_one("div.kp-header")
if div_kg_hearer is None: # if there is no knowledge graph at the right, drop it
logging.warning("no entity returned for this query")
return None
enti_name = div_kg_hearer.select_one("div[role=heading] span")
enti_name = enti_name.text if enti_name is not None else None
if enti_name is None or "..." in enti_name:
se = re.search('\["t-dhmk9MkDbvI",.*\[\["data",null,null,null,null,\[null,"\[\\\\"(.*)\\\\",', text)
if se is not None:
enti_name = se.group(1)
else:
logging.warning("sth went wrong when extracting the name of the entity")
return None
# identify the type
span_list = div_kg_hearer.select("span")
enti_type = span_list[-1].text if len(span_list) > 1 else "unknown"
# description from wikipedia
des = soup.find("h3", text="Description")
des_info = ""
if des is not None:
des_span = des.parent.select_one("span")
des_info = des_span.text if des_span is not None else ""
# extract attributes
attr_tags = soup.select("div.Z1hOCe")
attr_dict = {}
for attr in attr_tags:
attr_str = attr.get_text()
se = re.search("(.*?)[::](.*)", attr_str)
if se is None:
continue
key_attr = se.group(1)
val_attr = se.group(2)
attr_dict[key_attr] = val_attr
# relevant org name on current page
a_reltype_list = soup.select("div.MRfBrb > a")
for a in a_reltype_list:
rel_org_name_set.add(a["title"].strip())
# collect next urls e.g. : more x+
div_list = soup.select("div.yp1CPe")
next = []
host = "https://www.google.com"
for div in div_list:
a_list = div.select("a.EbH0bb")
for a in a_list:
if "http" not in a["href"]:
next.append("%s%s" % (host, a["href"]))
# crawl parent org
a_parent_org = soup.find("a", text="Parent organization")
if a_parent_org is not None:
parent_str = a_parent_org.parent.parent.text.strip()
parent_org = parent_str.split(":")[1]
rel_org_name_set.add(parent_org.strip())
# crawl subsidiaries
a_subsidiaries = soup.find("a", text="Subsidiaries")
if a_subsidiaries is not None:
href = a_subsidiaries["href"]
if "http" not in href:
subsidiaries_str = a_subsidiaries.parent.parent.text.strip()
subs = subsidiaries_str.split(":")[1].split(",")
for sub in subs:
sub = sub.strip()
if sub == "MORE":
continue
rel_org_name_set.add(sub)
next.append("%s%s" % (host, href))
# scrawl urls in list 'next'
for url in tqdm(next, desc="crawling relevant org names..."):
res = requests_dora.try_best_2_get(url, invoked_by="get_org_name", headers=requests_dora.get_default_headers(), get_proxies_fun=get_proxies_fun)
soup = BeautifulSoup(res.text, "lxml")
# crawl items at the top
a_list = soup.select("a.klitem")
for a in a_list:
rel_org_name = a["title"]
rel_org_name_set.add(rel_org_name.strip())
# crawl headings under the map if any
heading_list = soup.select("div.VkpGBb")
for heading in heading_list:
heading_str = heading.select_one("div[role='heading']")
rel_org_name_set.add(heading_str.get_text())
random.seed(time.time())
bar.update()
time.sleep(wait * random.random())
rel_org_name_list = [org_name for org_name in rel_org_name_set if len(org_name) > 1]
return {"query_str": query_str, "name": enti_name, "type": enti_type,
"des": des_info, "attributes": attr_dict, "rel_org": rel_org_name_list}
if __name__ == "__main__":
def get_proxy():
proxy_str = "127.0.0.1:1080"
proxies = {"http": "http://%s" % proxy_str,
"https": "http://%s" % proxy_str, }
return proxies
query_str = "阿里巴巴"
# html = google_search(query_str, get_proxies_fun = get_proxy)
# print(html)
entity = get_entity(query_str, get_proxies_fun = get_proxy)
print(entity) | 2.625 | 3 |
creational/abstract_factory/data/soldiers/soldier.py | Kozak24/Patterns | 0 | 12773711 | <gh_stars>0
from abc import ABC, abstractmethod
class Soldier(ABC):
_ID = 0
_SOLDIER_TYPE = "Soldier"
def __init__(self, hp, attack):
self._id = Soldier._ID
self._hp = hp
self._attack = attack
Soldier._ID += 1
@abstractmethod
def patrol(self):
pass
@abstractmethod
def shoot(self):
pass
def __str__(self) -> str:
return f"{self._SOLDIER_TYPE} with ID '{self._id}'"
| 3.328125 | 3 |
var/spack/repos/builtin/packages/py-agate/package.py | jeanbez/spack | 0 | 12773712 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyAgate(PythonPackage):
"""agate is a Python data analysis library that is optimized for humans
instead of machines. It is an alternative to numpy and pandas that solves
real-world problems with readable code."""
homepage = "https://agate.readthedocs.io/en/latest/"
pypi = "agate/agate-1.6.1.tar.gz"
version('1.6.1', sha256='c93aaa500b439d71e4a5cf088d0006d2ce2c76f1950960c8843114e5f361dfd3')
depends_on('py-setuptools', type='build')
depends_on('py-six@1.9.0:', type=('build', 'run'))
depends_on('py-pytimeparse@1.1.5:', type=('build', 'run'))
depends_on('py-parsedatetime@2.1:', type=('build', 'run'))
depends_on('py-babel@2.0:', type=('build', 'run'))
depends_on('py-isodate@0.5.4:', type=('build', 'run'))
depends_on('py-python-slugify@1.2.1:', type=('build', 'run'))
depends_on('py-leather@0.3.2:', type=('build', 'run'))
| 1.992188 | 2 |
lib/oci_utils/migrate/__init__.py | guidotijskens/oci-utils | 35 | 12773713 | <filename>lib/oci_utils/migrate/__init__.py<gh_stars>10-100
# oci-utils
#
# Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl.
"""
Initialisation of the migrate package.
"""
import logging
import os
import sys
import termios
import threading
import time
import tty
from datetime import datetime
import yaml
from ..migrate import migrate_data
_logger = logging.getLogger('oci_utils.migrate')
def _getch():
"""
Read a single keypress from stdin.
Returns
-------
The resulting character.
"""
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def read_yn(prompt, yn=True, waitenter=False, suppose_yes=False):
"""
Read yes or no form stdin, No being the default.
Parameters
----------
prompt: str
The message.
yn: bool
Add (y/N) to the prompt if True.
waitenter: bool
Wait for the enter key pressed if True, proceed immediately
otherwise.
suppose_yes: bool
if True, consider the answer is yes.
Returns
-------
bool: True on yes, False otherwise.
"""
yn_prompt = prompt + ' '
#
# if yes is supposed, write prompt and return True.
if suppose_yes:
_ = sys.stdout.write(yn_prompt)
sys.stdout.flush()
return True
#
# add y/N to prompt if necessary.
if yn:
yn_prompt += ' (y/N) '
#
# if wait is set, wait for return key.
if waitenter:
resp_len = 0
while resp_len == 0:
resp = input(yn_prompt).lstrip()
resp_len = len(resp)
yn = list(resp)[0]
#
# if wait is not set, proceed on any key pressed.
else:
_ = sys.stdout.write(yn_prompt)
sys.stdout.flush()
yn = _getch()
sys.stdout.write('\n')
return bool(yn.upper() == 'Y')
def error_msg(msg=None):
"""
Debug message
Parameters
----------
msg: str
Eventual message.
Returns
-------
No return value
"""
_logger.error(' %s', msg)
if msg is not None:
msg = ' *** ERROR *** %s' % msg
else:
msg = ' *** ERROR *** Unidentified error.'
sys.stderr.write('%s' % msg)
sys.stderr.flush()
result_msg(msg=msg)
time.sleep(1)
def exit_with_msg(msg, exit_code=1):
"""
Post a message on stdout and exit.
Parameters
----------
msg: str
The exit message.
exit_code: int
The exit code, default is 1.
Returns
-------
No return value.
"""
sys.stderr.write('\n %s\n' % msg)
sys.exit(exit_code)
def pause_msg(msg=None, pause_flag='_OCI_PAUSE'):
"""
Pause function.
Parameters:
----------
msg: str
Eventual pause message.
Returns
-------
No return value.
"""
if os.environ.get('_OCI_PAUSE') or os.environ.get(pause_flag):
ban0 = '\n Press a key to continue'
if msg is not None:
ban0 = '\n %s' % msg + ban0
_ = read_yn(ban0, False)
def console_msg(msg=None):
"""
Writes a message to the console.
Parameters:
----------
msg: str
The message
Returns:
-------
No return value.
"""
if msg is None:
msg = 'Notification.'
sys.stdout.write('\n %s\n' % msg)
def bytes_to_hex(bs):
"""
Convert a byte string to an hex string.
Parameters
----------
bs: bytes
byte string
Returns
-------
str: hex string
"""
return ''.join('%02x' % i for i in bs)
def result_msg(msg, flags='a', result=False):
"""
Write information to the log file, the result file and the console if the
result flag is set.
Parameters
----------
msg: str
The message.
flags: str
The flags for the open file command.
result: bool
Flag, write to console if True.
Returns
-------
No return value.
"""
msg = ' Just mentioning I am here.' if msg is None else msg
_logger.debug('%s', msg)
try:
with open(migrate_data.result_filename, flags) as f:
f.write(' %s: %s\n' % (datetime.now().strftime('%H:%M:%S'), msg))
except IOError as e:
error_nb, strerror = e.args
#
# trap permission denied errors if running as non root.
if error_nb != 13:
_logger.error(' Failed to write to %s: %s', migrate_data.result_filename, strerror)
except Exception as e:
_logger.error(' Failed to write to %s: %s', migrate_data.result_filename, str(e))
if result:
sys.stdout.write(' %s\n' % msg)
def terminal_dimension():
"""
Collect the dimension of the terminal window.
Returns
-------
tuple: (nb rows, nb colums)
"""
try:
terminal_size = os.get_terminal_size()
return terminal_size.lines, terminal_size.columns
except Exception as e:
#
# fail to get terminal dimension, because not connected to terminal?
# returning dummy
_logger.debug('Failed to determine terminal dimensions: %s; falling back to 80x80', str(e))
return 80, 80
class OciMigrateConfParam():
"""
Retrieve oci-image-migrate configuration data from the
oci-image-migrate configuration file, in yaml format.
"""
def __init__(self, yamlconf, tag):
"""
Initialisation of the oci image migrate configuration retrieval.
Parameters:
----------
yamlconf: str
The full path of the oci-image-migrate configuration file.
tag: str
The configuration structure to collect.
"""
self._yc = yamlconf
self._tg = tag
self._config_data = dict()
def __enter__(self):
"""
OciMigrateConfParam entry.
"""
with open(self._yc, 'r') as f:
self._config_data = yaml.load(f, Loader=yaml.SafeLoader)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""
OciMigrateConfParam exit
"""
if exc_value is not None:
_logger.error('Failed to read config file:%s - %s: %s', exc_type, exc_value, exc_traceback)
def get_values(self):
"""
Retrieve the configuration data, one entry if key is not '*', complete
data otherwise.
"""
return self._config_data if self._tg == '*' else self._config_data[self._tg]
class ProgressBar(threading.Thread):
"""
Class to generate an indication of progress, does not actually
measure real progress, just shows the process is not hanging.
"""
_default_progress_chars = ['#']
def __init__(self, bar_length, progress_interval, progress_chars=None):
"""
Progressbar initialisation.
Parameters:
----------
bar_length: int
Length of the progress bar.
progress_interval: float
Interval in sec of change.
progress_chars: list
List of char or str to use; the list is mirrored before use.
"""
self._stopthread = threading.Event()
threading.Thread.__init__(self)
#
# length of variable progress bar
self._bar_len = bar_length - 14
#
# progress interval in sec
self._prog_int = progress_interval
if progress_chars is None:
self._prog_chars = self._default_progress_chars
else:
self._prog_chars = progress_chars
#
# nb progress symbols
self._nb_prog_chars = len(self._prog_chars)
#
# the max len of the progress symbols, should be all equal
self._prog_len = 0
for s in self._prog_chars:
ls = len(s)
if ls > self._prog_len:
self._prog_len = ls
#
# nb iterations per bar
self._cntr = self._bar_len - self._prog_len + 1
self.stop_the_progress_bar = False
def run(self):
"""
Execute the progress bar.
Returns
-------
No return value.
"""
#
# counter in progress bar symbols
i = 0
j = i % self._nb_prog_chars
#
# counter in bar
k = 0
sys.stdout.write('\n')
sys.stdout.flush()
start_time = datetime.now()
while True:
now_time = datetime.now()
delta_time = now_time - start_time
hrs, rest = divmod(delta_time.seconds, 3600)
mins, secs = divmod(rest, 60)
pbar = ' ' \
+ '%02d:%02d:%02d' % (hrs, mins, secs) \
+ ' [' \
+ ' '*k \
+ self._prog_chars[j] \
+ ' ' * (self._bar_len - k - self._prog_len) \
+ ']'
sys.stdout.write('\r%s' % pbar)
sys.stdout.flush()
k += 1
if k == self._cntr:
k = 0
i += 1
j = i % self._nb_prog_chars
time.sleep(self._prog_int)
if self.stop_the_progress_bar:
now_time = datetime.now()
delta_time = now_time - start_time
hrs, rest = divmod(delta_time.seconds, 3600)
mins, secs = divmod(rest, 60)
pbar = ' ' \
+ '%02d:%02d:%02d' % (hrs, mins, secs) \
+ ' [ ' \
+ ' %s' % self._prog_chars[j] \
+ ' done ]' \
+ (self._bar_len - self._prog_len - 5)*' '
sys.stdout.write('\r%s\n' % pbar)
sys.stdout.flush()
break
def stop(self):
"""
Notify thread to stop the progress bar.
Returns
-------
No return value.
"""
self.stop_the_progress_bar = True
self.join()
sys.stdout.write('\n')
sys.stdout.flush()
def join(self, timeout=None):
"""
Terminate the thread.
Parameters
----------
timeout: float
Time to wait if set.
Returns
-------
No return value.
"""
self._stopthread.set()
threading.Thread.join(self, timeout)
| 2.53125 | 3 |
camera_calibration/camera_calibration.py | kamino410/edsdk-sample | 23 | 12773714 | #coding: UTF-8
import sys
import os
import os.path
import glob
import cv2
import numpy as np
CAPTUREDDIR = './captured'
CALIBFLAG = 0 # cv2.CALIB_FIX_K3
def calibFromImages(dirname, chess_shape, chess_block_size):
if not os.path.exists(dirname):
print('Directory \'' + dirname + '\' was not found')
return None
filenames = sorted(glob.glob(dirname + '/*'))
if len(filenames) == 0:
print('No image was found in \'' + dirname + '\'')
return None
print('=== Camera Calibration ===')
objp = np.zeros((chess_shape[0]*chess_shape[1], 3), np.float32)
objp[:, :2] = chess_block_size * \
np.mgrid[0:chess_shape[0], 0:chess_shape[1]].T.reshape(-1, 2)
print('Finding chess corners in input images ...')
objp_list = []
imgp_list = []
img_shape = None
for f in filenames:
print(' ' + f + ' : ', end='')
img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
if img_shape is None:
img_shape = img.shape
elif img_shape != img.shape:
print('Mismatch size')
continue
ret, imgp = cv2.findChessboardCorners(img, chess_shape, None)
if ret:
print('Found')
objp_list.append(objp)
imgp_list.append(imgp)
else:
print('Not found')
print(' ', len(objp_list), 'images are used')
ret, cam_int, cam_dist, rvecs, tvecs = cv2.calibrateCamera(
objp_list, imgp_list, img_shape, None, None, None, None, CALIBFLAG
)
print('Image size :', img_shape)
print('RMS :', ret)
print('Intrinsic parameters :')
print(cam_int)
print('Distortion parameters :')
print(cam_dist)
print()
rmtxs = list(map(lambda vec: cv2.Rodrigues(vec)[0], rvecs))
fs = cv2.FileStorage('calibration.xml', cv2.FILE_STORAGE_WRITE)
fs.write('img_shape', img_shape)
fs.write('rms', ret)
fs.write('intrinsic', cam_int)
fs.write('distortion', cam_dist)
fs.write('rotation_vectors', np.array(rvecs))
fs.write('rotation_matrixes', np.array(rmtxs))
fs.write('translation_vectors', np.array(tvecs))
fs.release()
return (img_shape, ret, cam_int, cam_dist, rvecs, tvecs)
if __name__ == '__main__':
if len(sys.argv) == 4:
chess_shape = (int(sys.argv[1]), int(sys.argv[2]))
chess_block_size = float(sys.argv[3])
calibFromImages(CAPTUREDDIR, chess_shape, chess_block_size)
else:
print('Usage :')
print(' Save captured images into \'' + CAPTUREDDIR + '\'')
print(
' Run \'python3 caliblate_camera_from_images.py <num of chess corners in vert> <num of chess corners in hori> <chess block size(m or mm)>')
| 2.796875 | 3 |
7KYU/largest_pair_sum.py | yaznasivasai/python_codewars | 4 | 12773715 | from typing import List
def largest_pair_sum(numbers: List[int]) -> int:
return sum(sorted(numbers, reverse=True)[:2])
| 3.25 | 3 |
weather_comparison/weather_api1/apps.py | is-dev-djangoproject-ss-2020/project-repository | 0 | 12773716 | from django.apps import AppConfig
class WeatherApi1Config(AppConfig):
name = 'weather_api1'
| 1.359375 | 1 |
language.py | ErrorNoInternet/Doge-Utilities | 6 | 12773717 | <reponame>ErrorNoInternet/Doge-Utilities
data = {
"en": {
"no_permission": "You do not have permission to use this command!",
"not_command_sender": "You are not the sender of that command!",
"vote_message": "Thank you for voting for Doge Utilities!",
"banned_message": "You are banned from using Doge Utilities!",
"error_message": "Oops! Doge Utilities has ran into an error...",
"use_in_server": "Please use Doge Utilities in a server for the best experience!",
"no_reminders": "You have no active reminders",
"reminders": "Reminders",
"reminder": "Reminder",
"time": "Time",
"text": "Text",
"vote_again": "Don't forget to vote for me!",
"todo_list": "To-do List",
"todo_empty": "Your to-do list is empty",
"generate_number": "Generate Number",
"number_prompt": "Your random number is",
"cpu_usage": "CPU Usage",
"ram_usage": "RAM Usage",
"thread_count": "Thread Count",
"joined_guilds": "Joined Guilds",
"active_shards": "Active Shards",
"member_count": "Member Count",
"channel_count": "Channel Count",
"command_count": "Command Count",
"disnake_version": "Disnake Version",
"bot_version": "Bot Version",
"bot_uptime": "Bot Uptime",
"latency": "Latency",
"shard": "Shard",
"members_lower": "members",
"guilds_lower": "guilds",
"i_choose": "I choose",
"shard_count": "Shard Count",
"current_shard": "Current Shard",
"reminder_removed": "That reminder has been successfully removed",
"reminder_added": "You will be reminded in **{}**",
"support_server": "Doge Utilities support server",
"user_accounts": "User accounts",
"bot_accounts": "Bot accounts",
"total_members": "Total members",
"guild_members": "Guild Members",
"todo_added": 'Successfully added **"{}"** to your to-do list',
"todo_removed": 'Successfully removed **"{}"** from your to-do list',
"uptime_description": "Doge Utilities has been running for",
"current_time": "Time",
"current_date": "Date",
"weekday": "Weekday",
"time_description": "Information for",
"weekdays": ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"],
"vote_websites": "You can vote for me on these websites",
"vote_reminder_added": "A **12 hour reminder** has been successfully added!",
"vote_reminder_exists": "A reminder already exists!",
"vote_add_reminder": "Add a reminder",
"vote_messages_enabled": "Vote messages have been successfully **enabled**",
"vote_messages_disabled": "Vote messages have been successfully **disabled**",
"enabled_lower": "enabled",
"disabled_lower": "disabled",
"vote_messages_status": "Vote messages are currently **{}**",
"language_update": "Your preferred language has been set to **{}**",
"language_name": "English",
"website_links": "Here are the links to my website",
"website": "Website",
"dashboard": "Dashboard",
"user_kicked": "**{}** has been **successfully kicked**",
"user_banned": "**{}** has been **successfully banned**",
"unable_to_kick": "Unable to kick **{}**",
"unable_to_ban": "Unable to ban **{}**",
"mention_valid_user": "Please mention a valid user!",
"user_id": "User ID",
"user_tag": "Tag",
"creation_time": "Creation Time",
"public_flags": "Public Flags",
"bot_user": "Bot User",
"system_user": "System User",
"command_cooldown": "Command Cooldown",
"command_cooldown_description": "Please wait **{}** before using the `{}` command again",
"invalid_duration": "Please enter a valid duration!",
"no_permission_kick": "You do not have permission to kick **{}**!",
"no_permission_ban": "You do not have permission to ban **{}**!",
"unable_to_mute": "Unable to mute **{}**",
"no_permission_mute": "You do not have permission to mute **{}**!",
"no_permission_unmute": "You do not have permission to unmute **{}**!",
"user_muted": "Successfully muted **{}** for **{}**",
"unable_to_unban": "Unable to unban **{}**",
"bot_error": "Bot Error",
"unable_to_unmute": "Unable to unmute **{}**",
"user_unmuted": "**{}** has been successfully unmuted",
"cannot_warn_self": "You cannot warn yourself!",
"cannot_warn_bot": "You cannot warn a bot!",
"cannot_warn_administrator": "You cannot warn an administrator!",
"no_permission_warn": "You do not have permission to warn **{}**!",
"warning": "Warning",
"warning_lower": "warning",
"warnings_lower": "warnings",
"warning_count": "You now have {0} {1} in {2}",
"user_warned": "Successfully warned **{}** (**{}**)",
"unable_to_warn": "Unable to warn **{}**",
"warnings_reset": "**{}**'s warnings have been successfully reset",
"not_specified": "Not specified",
"generate_number_limit": "You have generated **5 numbers** already. Please re-run the command to continue.",
"expression": "Expression",
"result": "Result",
"unknown_answer": "Unknown Answer",
"second": "second",
"seconds": "seconds",
"minute": "minute",
"minutes": "minutes",
"hour": "hours",
"hours": "hours",
"day": "day",
"days": "days",
"month": "months",
"months": "months",
"year": "year",
"years": "years",
"suggestion_sent": "Your suggestion has been successfully sent",
"sending_suggestion": "Sending your suggestion...",
"not_bot_owner": "You are not the owner of Doge Utilities!",
"infinity": "infinity",
"no_negative_numbers": "No negative numbers please!",
"duration_too_long": "The specified duration is too long!",
"current_language_description": "Your preferred language is set to **{}**",
"border_size_too_big": "The border size must not exceed 32!",
"qr_code": "QR Code",
"qr_create_failed": "Unable to create a QR code",
"correct_answer": "Correct answer!",
"wrong_answer": "Wrong answer... The correct answer was **{}**.",
"unable_to_clear": "Unable to clear messages",
"max_clear_messages": "You can only clear up to **1000 messages**!",
"cleared": "Successfully deleted **{} {}**",
"cleared_from": "Successfully deleted **{} {}** from **{}**",
"cleared_contains": 'Successfully deleted **{} {}** that contained **"{}"**',
"cleared_from_contains": 'Successfully deleted **{} {}** from **{}** that contained **"{}"**',
"message_lower": "message",
"messages_lower": "messages",
"bot_error_report": "Doge Utilities error report",
"joined_game": "Successfully joined the game!",
"already_joined": "You have already joined the game!",
"player_one": "Player 1",
"player_two": "Player 2",
"game_starts_in_three": "The game starts in **3 seconds**!",
"join_tictactoe": "Click to join the TicTacToe game",
"afk_removed": "Your AFK status has been removed!",
"afk_set": "Your AFK message has been set to **\"{}\"**",
"currently_afk": "**{}** is currently AFK ({}): **{}**",
"already_afk": "You are already AFK!",
"mentioned_user": "The user you mentioned",
"i_am_afk": "I am AFK",
"text_too_long": "The specified text is too long!",
"item_limit": "You can only add up to **{} items**!",
"channel_locked": "{} has been successfully locked",
"channel_unlocked": "{} has been successfully unlocked",
"unable_to_lock": "I am unable to lock {}",
"unable_to_unlock": "I am unable to unlock {}",
"wrong_turn": "It is not your turn!",
"not_in_game": "You did not join that game!",
"bot_status": "Bot Status",
"minecraft_server": "Minecraft Server",
"players": "Players",
"unknown_upper": "Unknown",
"unable_to_connect": "Unable to connect",
"nothing_here": "There is nothing here...",
"custom_embed_generated": "Your custom embed has been successfully generated!",
"no_permission_manage_role": "You do not have permission to manage this role!",
"enter_valid_message_id": "Please enter a valid message ID (that exists in this channel)",
"unable_to_add_emoji": "I am unable to add an emoji to that message!",
"reaction_role_created": "A new reaction role has been successfully created!",
"reaction_roles": "Reaction Roles",
"no_reaction_roles": "There are no reaction roles in this server",
"currency_list": "Currency List",
"invite_link": "Invite Link",
"leave_server": "Leave Server",
"leave_server_confirm": "Are you sure you want me to leave this server? Please press the button again to confirm.",
"leaving_server": "Leaving server...",
"here_is_invite_link": "Here is Doge Utilities' invite link",
"source_code": "Source Code",
"developers": "Developers",
"ideas": "Ideas",
"translators": "Translators",
"big_thanks_to": "Big thanks to {}, and a lot more awesome people!",
"currency_conversion": "Currency Conversion",
"source_code_here": "You can find my code [here]({})",
},
"zh-cn": {
"no_permission": "你没有权限使用这个指令!",
"not_command_sender": "这个指令不是你发的!",
"vote_message": "谢谢你给我投票!",
"banned_message": "你已被禁止使用Doge Utilities!",
"error_message": "很抱歉, Doge Utilities出错了。。。",
"use_in_server": "请你在服务器里使用Doge Utilities!",
"no_reminders": "你没有提醒",
"reminders": "提醒",
"reminder": "提醒",
"time": "时间",
"text": "文字",
"vote_again": "不要忘记给我投票!",
"todo_list": "待办事列表",
"todo_empty": "你的待办事列表是空的",
"generate_number": "生成数字",
"number_prompt": "你的随机数字是",
"cpu_usage": "CPU使用率",
"ram_usage": "RAM使用率",
"thread_count": "机器人程序数",
"joined_guilds": "机器人服务器",
"active_shards": "机器人碎片",
"member_count": "用户数",
"channel_count": "频道数",
"command_count": "指令数",
"disnake_version": "Disnake版本",
"bot_version": "机器人版本",
"bot_uptime": "运行时间",
"latency": "延迟",
"shard": "碎片",
"members_lower": "用户",
"guilds_lower": "服务器",
"i_choose": "我选择",
"shard_count": "碎片数",
"current_shard": "当前碎片",
"reminder_removed": "那个提醒已被成功移除",
"reminder_added": "你会在**{}**后被提醒",
"support_server": "我的服务器",
"user_accounts": "用户账号",
"bot_accounts": "机器人账号",
"total_members": "全部账号",
"guild_members": "服务器成员",
"todo_added": '我成功的把 **"{}"** 加到了你的待办事列表',
"todo_removed": '我成功的把你待办事列表里的 **"{}"** 移除了',
"uptime_description": "我已经运行了",
"current_time": "时间",
"current_date": "日期",
"weekday": "星期",
"time_description": "关于",
"weekdays": ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期天"],
"vote_websites": "你可以在这些网站上给我投票",
"vote_reminder_added": "我成功的给你加了一个**12小时的提醒**!",
"vote_reminder_exists": "你已经有了一个提醒!",
"vote_add_reminder": "加一个提醒",
"vote_messages_enabled": "投票通知已被成功**打开**",
"vote_messages_disabled": "投票通知已被成功**关闭**",
"enabled_lower": "打开",
"disabled_lower": "关闭",
"vote_messages_status": "投票消息现在属于**{}**状态",
"language_update": "你的语言已被换成**{}**",
"language_name": "中文",
"website_links": "这我是我的网站的链接",
"website": "网站",
"dashboard": "控制板",
"user_kicked": "**{}**已被**成功剔除**",
"user_banned": "**{}**已被**成功封锁**",
"unable_to_kick": "我无法剔除**{}**",
"unable_to_ban": "我无法封锁**{}**",
"mention_valid_user": "请@一个服务器成员!",
"user_id": "用户ID",
"user_tag": "账号",
"creation_time": "创造时间",
"public_flags": "属性",
"bot_user": "机器人用户",
"system_user": "系统用户",
"command_cooldown": "指令冷却",
"command_cooldown_description": "请在**{}**后使用`{}`",
"invalid_duration": "请输入一个正确的时间!",
"no_permission_kick": "你没有权限剔除**{}**!",
"no_permission_ban": "你没有权限封锁**{}**!",
"unable_to_mute": "我无法禁言**{}**",
"no_permission_mute": "你没有权限禁言**{}**!",
"no_permission_unmute": "你没有权限解**{}**的禁言!",
"user_muted": "我成功禁言了**{}** (**{}**)",
"unable_to_unban": "我无法解除**{}**的封锁",
"bot_error": "机器人错误",
"unable_to_unmute": "我无法解**{}**的禁言",
"user_unmuted": "**{}**的禁言已被成功解除",
"cannot_warn_self": "你不能警告你自己!",
"cannot_warn_bot": "你不能警告一个机器人!",
"cannot_warn_administrator": "你不能警告一个管理员!",
"no_permission_warn": "你没有权限警告**{}**!",
"warning": "警告",
"warning_lower": "警告",
"warnings_lower": "警告",
"warning_count": "你在{2}有{0}个{1}",
"user_warned": "成功的警告了**{}** (**{}**)",
"unable_to_warn": "我无法警告**{}**",
"warnings_reset": "**{}**的警告已被成功清除",
"not_specified": "无原因",
"generate_number_limit": "你已经生成了**五个数字**。如果你想继续, 请重新运行指令。",
"expression": "数学题",
"result": "结果",
"unknown_answer": "未知答案",
"second": "秒",
"seconds": "秒",
"minute": "分钟",
"minutes": "分钟",
"hour": "小时",
"hours": "小时",
"day": "天",
"days": "天",
"month": "月",
"months": "月",
"year": "年",
"years": "年",
"suggestion_sent": "你的建议已被成功发送",
"sending_suggestion": "正在发送你的建议。。。",
"not_bot_owner": "你不是Doge Utilities的主人!",
"infinity": "无数",
"no_negative_numbers": "请不要输入负数!",
"duration_too_long": "你提供的时间太长了!",
"current_language_description": "你的显示语言是**{}**",
"border_size_too_big": "边框大小不能超过32!",
"qr_code": "QR码",
"qr_create_failed": "我无法生成二维码",
"correct_answer": "你答对了!",
"wrong_answer": "你答错了。。。真确答案是**{}**。",
"unable_to_clear": "我无法清除消息",
"max_clear_messages": "你最多只能清除**1000个消息**!",
"cleared": "我成功清除了**{}个{}**",
"cleared_from": "我成功清除了来自**{2}**的**{0}个{1}**",
"cleared_contains": '我成功清除了含有**"{2}"**的**{0}个{1}**',
"cleared_from_contains": '我成功清除了来自**{2}**含有**"{3}"**的**{0}个{1}**',
"message_lower": "消息",
"messages_lower": "消息",
"bot_error_report": "Doge Utilities错误报告",
"joined_game": "成功加入了游戏!",
"already_joined": "你已经加入了游戏!",
"player_one": "玩家1",
"player_two": "玩家2",
"game_starts_in_three": "游戏会在**3秒**后开始!",
"join_tictactoe": "点击按钮加入TicTacToe游戏",
"afk_removed": "你的AFK已被移除!",
"afk_set": "你的AFK消息已被设成**\"{}\"**",
"currently_afk": "**{}**现在是AFK ({}): **{}**",
"already_afk": "你已经是AFK!",
"mentioned_user": "你@的用户",
"i_am_afk": "我不在",
"text_too_long": "你提供的字太长了!",
"item_limit": "你只能加**{}个**物品!",
"channel_locked": "{} 已被成功锁定",
"channel_unlocked": "{} 已被成功解锁",
"unable_to_lock": "我无法锁定 {}",
"unable_to_unlock": "我无法解锁 {}",
"wrong_turn": "这不是你玩的时候!",
"not_in_game": "你没有加入那个游戏!",
"bot_status": "机器人状态",
"minecraft_server": "我的世界服务器",
"players": "玩家",
"unknown_upper": "未知",
"unable_to_connect": "无法连接",
"nothing_here": "这里没东西。。。",
"custom_embed_generated": "你的嵌入已被成功生成!",
"no_permission_manage_role": "你没有权限管理这个身份组!",
"enter_valid_message_id": "请输入一个在这个频道里存在的消息ID!",
"unable_to_add_emoji": "我无法在那个消息上加emoji!",
"reaction_role_created": "我成功的创造了一个新的反应身分组!",
"reaction_roles": "反应身分组",
"no_reaction_roles": "这个服务器没有反应身分组",
"currency_list": "货币列表",
"invite_link": "邀请链接",
"leave_server": "退出服务器",
"leave_server_confirm": "你确定你要让我退出这个服务器吗? 如果你真的要让我退出, 请再按一次按钮。",
"leaving_server": "正在退出服务器。。。",
"here_is_invite_link": "这是我的邀请链接",
"source_code": "源代码",
"developers": "开发者",
"ideas": "主意",
"translators": "翻译",
"big_thanks_to": "很感谢 {}, 和很多其他人!",
"currency_conversion": "货币转换",
"source_code_here": "你可以在[这里]({})找到我的源代码",
},
"de": {
"no_permission": "Du hast keine Berechtigung diesen Befehl zu benutzen!",
"not_command_sender": "Du hast den Befehl nicht benutzt!",
"vote_message": "Danke dass du Doge Utilities Bewertet hast!",
"banned_message": "Du bist von Doge Utilities gesperrt",
"error_message": "Ups! Doge Utilities hatte einen Fehler...",
"shard": "Shard",
"latency": "Latenz",
"time": "Wann",
"text": "Text",
"reminder": "Erinnerung",
"reminders": "Erinnerungen",
"cpu_usage": "CPU Auslastung",
"ram_usage": "RAM Auslastung",
"thread_count": "Anzahl der Threads",
"joined_guilds": "Servern beigetreten",
"active_shards": "Aktive Shards",
"member_count": "Anzahl der Mitglieder",
"channel_count": "Anzahl der Kanäle",
"command_count": "Anzahl der Commands",
"disnake_version": "Disnake Version",
"bot_version": "Bot Version",
"bot_uptime": "Bot online seit",
"i_choose": "Ich wähle",
"members_lower": "Mitglieder",
"guilds_lower": "Server",
"current_shard": "Aktueller Shard",
"shard_count": "Shards",
"use_in_server": "Bitte nutze Doge Utilities in einem Server für das bestmögliche Ergebnis!",
"no_reminders": "Du hast keine aktiven Erinnerungen",
"vote_again": "Vergiss nicht für mich zu voten!",
"todo_list": "To-do Liste",
"todo_empty": "Deine To-do Liste ist leer",
"generate_number": "Generiere eine Nummer",
"number_prompt": "Deine generierte Nummer ist",
"reminder_removed": "Diese Erinnerung wurde erfolgreich entfernt",
"reminder_added": "Du wirst erinnert in **{}**",
"minute": "Minute",
"minutes": "Minuten",
"support_server": "Doge Utilities Support Server",
"user_accounts": "Nutzer Accounts",
"bot_accounts": "Bot Accounts",
"total_members": "Alle Mitglieder",
"guild_members": "Server Mitglieder",
"todo_added": '**"{}"** wurde erfolgreich zur To-do Liste hinzugefügt',
"todo_removed": '**"{}"** wurde erfolgreich aus der To-do Liste entfernt',
"uptime_description": "Doge Utilities ist online seit",
"current_time": "Uhrzeit",
"current_date": "Datum",
"weekday": "Wochentag",
"time_description": "Informationen für",
"weekdays": ["Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag"],
"vote_websites": "Du kannst für mich auf folgenden Websites voten",
"vote_reminder_added": "Eine **12 Stunden Erinnerung** wurde erfolgreich hinzugefügt!",
"vote_reminder_exists": "Es gibt bereits eine Erinnerung!",
"vote_add_reminder": "Füge eine Erinnerung hinzu",
"vote_messages_enabled": "Vote Benachrichtigungen wurden erfolgreich **aktiviert**",
"vote_messages_disabled": "Vote Benachrichtigungen wurden erfolgreich **deaktiviert**",
"enabled_lower": "aktiviert",
"disabled_lower": "deaktiviert",
"vote_messages_status": "Vote Benachrichtigungen sind derzeit **{}**",
"language_update": "Deine Sprache wurde auf **{}** gesetzt",
"language_name": "deutsch",
"website_links": "Hier sind Links für meine Website",
"website": "Website",
"dashboard": "Dashboard",
"user_kicked": "**{}** wurde erfolgreich gekickt",
"user_banned": "**{}** wurde erfolgreich gebannt",
"unable_to_kick": "**{}** Kicken nicht möglich",
"unable_to_ban": "**{}** bannen nicht möglich",
"mention_valid_user": "Bitte erwähne einen gültigen Nutzer",
"user_id": "Nutzer ID",
"user_tag": "Tag",
"creation_time": "Zeitpunkt der Erstellung",
"public_flags": "Public Flags",
"bot_user": "Bot Nutzer",
"system_user": "System Nutzer",
"command_cooldown": "Command cool-down",
"invalid_duration": "Bitte gib einen gültigen Zeitraum an",
"no_permission_kick": "Du hast keine Berechtigungen um **{}** zu kicken",
"no_permission_ban": "Du hast keine Berechtigungen **{}** zu bannen",
"unable_to_mute": "**{}** nuten nicht möglich",
"no_permission_mute": "Du hast keine Berechtigungen **{}** zu muten",
"no_permission_unmute": "Du hast keine Berechtigungen **{}** zu unmuten",
"user_muted_permanently": "**{}** wurde erfolgreich permanent gemuted",
"user_muted_temporarily": "**{}** wurde erfolgreich für **{}** gemuted",
"unable_to_unban": "**{}** bannen nicht möglich",
"bot_error": "Bot Fehler",
"unable_to_unmute": "**{}** unmuten nicht möglich",
"user_unmuted": "**{}** erfolgreich unmuted",
"cannot_warn_self": "Du kannst dich nicht selbst verwarnen!",
"cannot_warn_bot": "Du kannst keinen Bot verwarnen!",
"cannot_warn_administrator": "Du kannst keine Administratoren warnen!",
"no_permission_warn": "Du hast keine Berechtigungen **{}** zu warnen!",
"warning": "Warnung",
"warning_lower": "Warnung",
"warnings_lower": "Warnungen",
"warning_count": "Du hast jetzt {0} {1} in {2}",
"user_warned": "**{}** wurde erfolgreich gewarnt. (**{}**)",
"unable_to_warn": "**{}** warnen nicht möglich",
"warnings_reset": "Die Warnungen von **{}** wurden erfolgreich zurückgesetzt",
"not_specified": "Nicht angegeben",
"command_cooldown_description": "Bitte warte **{}** befor du den `{}` Befehl wieder benutzen kannst",
"generate_number_limit": "Du hast bereits 5 Zahlen generiert. Bitte führe den Befehl erneut aus, um weiterzumachen.",
"expression": "Rechnung",
"result": "Ergebnis",
"unknown_answer": "Unbekannte Antwort",
"second": "Sekunde",
"seconds": "Sekunden",
"hour": "Stunde",
"hours": "Stunden",
"day": "Tag",
"days": "Tage",
"month": "Monat",
"months": "Monate",
"year": "Jahr",
"years": "Jahre",
"suggestion_sent": "Dein Vorschlag wurde erfolgreich abgesendet",
"sending_suggestion": "Dein Vorschlag wird abgeschickt...",
"not_bot_owner": "Du bist nicht der Besitzer von Doge Utilities!",
"infinity": "Unendlich",
"no_negative_numbers": "Bitte keine negativen Zahlen!",
"duration_too_long": "Die angegebene Dauer ist zu lang!",
"current_language_description": "Deine bevorzugte Sprache wurde zu **{}** geändert",
"border_size_too_big": "Die länge der Randes darf 32 nicht überschreiten!",
"qr_code": "QR Code",
"qr_create_failed": "Erstellen des QR Codes ist nicht möglich",
"correct_answer": "Richtige Antwort!",
"wrong_answer": "Falsche Antwort... die richtige Antwort war **{}**",
"unable_to_clear": "Löschen der Nachrichten nicht möglich",
"max_clear_messages": "Du kannst maximal **1000 Nachrichten** löschen",
"cleared": "Erfolgreich **{} {}** gelöscht",
"cleared_from": "Erfolgreich **{} {}** von **{}** gelöscht",
"cleared_contains": "Erfolgreich **{} {}** gelöscht, mit dem Inhalt **{}**",
"cleared_from_contains": "Erfolgreich **{} {}** von **{}** gelöscht, mit dem Inhalt **{}**",
"message_lower": "Nachricht",
"messages_lower": "Nachrichten",
"bot_error_report": "Doge Utilities Error Report",
"joined_game": "Dem Spiel erfolgreich beigetreten!",
"already_joined": "Du bist dem Spiel bereits beigetreten!",
"player_one": "Spieler 1",
"player_two": "Spieler 2",
"game_starts_in_three": "Das Spiel startet in **3 Sekunden**!",
"join_tictactoe": "Klicke, um der TicTacToe Partie beizutreten",
"afk_removed": "Dein AFK wurde entfernt!",
"afk_set": "Deine AFK Nachricht wurde auf **\"{}\"** gesetzt",
"currently_afk": "**{}** ist im Moment AFK ({}): **{}**",
"already_afk": "Du bist bereits AFK!",
"mentioned_user": "Der Nutzer, den du erwähnt hast",
"i_am_afk": "Ich bin AFK",
"text_too_long": "Der angegebene Text ist zu lang!",
"item_limit": "Du kannst maximal **{} Elemente** hinzufügen!",
"channel_locked": "{} wurde erfolgreich gesperrt",
"channel_unlocked": "{} wurde erfolgreich entsperrt",
"unable_to_lock": "Ich kann {} nicht sperren",
"unable_to_unlock": "Ich kann {} nicht entsperren",
"wrong_turn": "Du bist nicht an der Reihe!",
"not_in_game": "Du bist diesem Spiel nicht beigetreten!",
"bot_status": "Bot Status",
},
"ru": {
"no_permission": "У вас нет разрешений на использование этой команды!",
"not_command_sender": "Вы не являетесь отправителем данной команды!",
"vote_message": "Спасибо за то, что вы проголосовали за меня!",
"banned_message": "Вам запретили использовать Doge Utilities!",
"error_message": "Упс... Doge Utilities столкнулся с ошибкой...",
"use_in_server": "Пожалуйста, используйте Doge Utilities на сервере для лучшего опыта!",
"no_reminders": "У вас нет активных напоминаний",
"reminders": "Напоминания",
"time": "Время",
"text": "Текст",
"todo_list": "Список дел",
"support_server": "Сервер поддержки Doge Utilities",
"vote_again": "Не забывай голосовать за меня!",
"todo_empty": "Ваш список дел пустой",
"generate_number": "Сгенерировать число",
"number_prompt": "Ваше случайное число",
"cpu_usage": "Использование CPU",
"ram_usage": "Использование RAM",
"thread_count": "Количество треадов",
"active_shards": "Активные шарды",
"member_count": "Количество участников",
"channel_count": "Количество каналов",
"command_count": "Количество команд",
"disnake_version": "Верс<NAME>",
"bot_version": "Версия бота",
"bot_uptime": "Время работы",
"shard": "Шард",
"members_lower": "участники",
"guilds_lower": "гильдии",
"i_choose": "Я выбрал",
"shard_count": "Количество шардов",
"current_shard": "Текущий шард",
"reminder_removed": "Это напоминание было успешно удалено",
"reminder_added": "Вам напомнят в **{}**",
"minute": "минута",
"minutes": "минуты",
"latency": "Задержка",
"joined_guilds": "Гильдий",
"uptime_description": "Doge Utilities работает",
"reminder": "Напоминание",
"user_accounts": "Учетные записи пользователей",
"bot_accounts": "Аккаунты ботов",
"total_members": "Всего участников",
"guild_members": "Участники гильдии",
"todo_added": '**"{}"** был успешно добавлен к вашему списку дел',
"todo_removed": '**"{}"** {} был успешно удален с вашего списка дел',
"current_time": "Время",
"current_date": "Дата",
"weekday": "День недели",
"time_description": "Информация для",
"weekdays": ["Понедельник", "вторник", "среда", "четверг", "пятница", "суббота", "воскресенье"],
"vote_websites": "Вы можете проголосовать за меня на этих вебсайтах",
"vote_reminder_added": "**12 часовое напоминание** было успешно добавлено!",
"vote_reminder_exists": "Напоминание уже существует!",
"vote_add_reminder": "Добавить напоминание",
"vote_messages_enabled": "Сообщения голосования успешно **включены**",
"vote_messages_disabled": "Сообщения голосования успешно **отключены**",
"enabled_lower": "включены",
"disabled_lower": "отключены",
"vote_messages_status": "Сообщения о голосовании в настоящее время **{}**",
"language_update": "Ваш основной язык установлен на **{}**",
"language_name": "русский",
"website_links": "Вот ссылка на мой вебсайт",
"website": "Вебсайт",
"dashboard": "Панель управления",
"user_kicked": "**{}** успешно кикнут",
"user_banned": "**{}** успешно забанен",
"unable_to_kick": "Невозможно кикнуть **{}**",
"unable_to_ban": "Невозможно забанить **{}**",
"mention_valid_user": "Пожалуйста, укажите правильного пользователь",
"user_id": "Айди",
"user_tag": "Тег",
"creation_time": "Дата создания",
"public_flags": "Публичные флаги",
"bot_user": "Боты",
"system_user": "Системный пользователь",
"command_cooldown": "Задержка команды",
"invalid_duration": "Пожалуйста, укажите правильную задержу",
"no_permission_kick": "Вы не имеете права кикнуть **{}**",
"no_permission_ban": "Вы не имеете права забанить **{}**",
"unable_to_mute": "Невозможно замьютить **{}**",
"no_permission_mute": "Вы не имеете право замьютить **{}**",
"no_permission_unmute": "Вы не имеете права замьютить **{}**",
"user_muted_permanently": "**{}** был успешно замьючен навсегда",
"user_muted_temporarily": "**{}** был успешно замьютен на **{}**",
"unable_to_unban": "Невозможно разбанить **{}**",
"bot_error": "Ошибка бота",
"unable_to_unmute": "Невозможно размьютить **{}**",
"user_unmuted": "**{}** успешно размьючен",
"cannot_warn_self": "Вы не можете дать предупреждение самому себе!",
"cannot_warn_bot": "Вы не можете дать предупреждение боту!",
"cannot_warn_administrator": "Вы не можете дать предупреждение администатору!",
"no_permission_warn": "Вы не имеете права дать предупреждение **{}**!",
"warning": "Предупреждение",
"warning_lower": "предупреждение",
"warnings_lower": "предупреждения",
"warning_count": "Теперь у вас есть {0} {1} в {2}",
"user_warned": "**{}** успешно предупреждён (**{}**)",
"unable_to_warn": "Невозможно дать предупреждение **{}**",
"warnings_reset": "Предупреждения **{}** было успешно сняты",
"not_specified": "Не указано",
},
"sk": {
"no_permission": "Nemáš oprávnenie na použitie tohto príkazu!",
"not_command_sender": "Nie si odosielateľom toho príkazu!",
"vote_message": "Ďakujeme Ti za hlasovanie za Doge Utilities!",
"banned_message": "Máš zakázané používať Doge Utilities!",
"error_message": "Ups! Doge Utilities narazil na chybu...",
"use_in_server": "Prosím, používaj Doge Utilities na serveri pre najlepší dojem!",
"no_reminders": "Nemáš žiadne aktívne pripomienky",
"reminders": "Pripomienky",
"reminder": "Pripomienka",
"time": "Čas",
"text": "Text",
"vote_again": "Nezabudni za mňa zahlasovať!",
"todo_list": "Zoznam úloh",
"todo_empty": "Tvoj zoznam úloh je prázdny",
"generate_number": "Vygeneruj Číslo",
"number_prompt": "Tvoje náhodné číslo je",
"cpu_usage": "Využitie procesora",
"ram_usage": "Využitie pamäte RAM",
"thread_count": "Počet vlákien",
"joined_guilds": "Pripojených serverov",
"active_shards": "Aktívnych inštancií",
"member_count": "Počet používateľov",
"channel_count": "Počet kanálov",
"command_count": "Počet príkazov",
"disnake_version": "Verzia Disnake",
"bot_version": "Verzia Bot-a",
"bot_uptime": "Čas prevádzky",
"latency": "Odozva",
"shard": "inštancia",
"members_lower": "používateľov",
"guilds_lower": "serverov",
"i_choose": "Ja si vyberám",
"shard_count": "Počet inštancií",
"current_shard": "Aktuálna inštancia",
"reminder_removed": "Daná pripomienka bola úspešne odstránená",
"reminder_added": "Pripomenutie bude uskutočnené za **{}**",
"minute": "minúta",
"minutes": "minúty",
"support_server": "Server podpory Doge Utilities",
"user_accounts": "Účty používateľov",
"bot_accounts": "Účty bot-ov",
"total_members": "Celkovo používateľov",
"guild_members": "Používatelia servera",
"todo_added": "Úspešne pridané **{}** do tvojho zoznamu úloh",
"todo_removed": "Z tvojho zoznamu úloh bolo úspešne odstránené **{}**",
"uptime_description": "Doge Utilities je v prevádzke",
"current_time": "Čas",
"current_date": "Dátum",
"weekday": "Pracovný deň",
"time_description": "Informácia pre",
"weekdays": ['Pondelok', 'Utorok', 'Streda', 'Štvrtok', 'Piatok', 'Sobota', 'Nedeľa'],
"vote_websites": "Hlasovať za mňa môžeš na týchto stránkach",
"vote_reminder_added": "12 hodinová pripomienka bola úspešne pridaná!",
"vote_reminder_exists": "Pripomienka už existuje!",
"vote_add_reminder": "Pridať pripomienku",
"vote_messages_enabled": "Správy na hlasovanie boli úspešne zapnuté",
"vote_messages_disabled": "Správy na hlasovanie boli úspešne vypnuté",
"enabled_lower": "zapnuté",
"disabled_lower": "vypnuté",
"vote_messages_status": "Správy na hlasovanie sú momentálne **{}**",
"language_update": "Tvoj preferovaný jazyk bol nastavený na **{}**",
"language_name": "Angličtina",
"website_links": "Tu sú odkazy na moje web stránky",
"website": "Webová stránka",
"dashboard": "Nástenka",
"user_kicked": "**{}** bol úspešne vyhodený",
"user_banned": "**{}** bol úspešne zablokovaný",
"unable_to_kick": "Nie som schopný vyhodiť **{}**",
"unable_to_ban": "Nie som schopný zablokovať **{}**",
"mention_valid_user": "Prosím, označ platného používateľa!",
"user_id": "Používateľské ID",
"user_tag": "Označenie",
"creation_time": "Čas vytvorenia",
"public_flags": "Verejné vlajky",
"bot_user": "Účet bot-a",
"system_user": "Systémový používateľ",
"command_cooldown": "Presiahnutá rýchlosť posielania príkazov",
"invalid_duration": "Prosím zadaj platnú dĺžku!",
"no_permission_kick": "Nemáš oprávnenie vyhodiť **{}**!",
"no_permission_ban": "Nemáš oprávnenie zablokovať **{}**!",
"unable_to_mute": "Nie som schopný stlmiť **{}**",
"no_permission_mute": "Nemáš oprávnenie stlmiť **{}**!",
"no_permission_unmute": "Nemáš oprávnenie zrušiť stlmenie **{}**!",
"user_muted_permanently": "**{}** bol úspešne stlmený navždy",
"user_muted_temporarily": "**{}** bol stlmený na **{}**",
"unable_to_unban": "Nie som schopný odblokovať **{}**",
"bot_error": "Chyba bot-a",
"unable_to_unmute": "Nie som schopný zrušiť stlmenie **{}**",
"user_unmuted": "Úspešne zrušené stlmenie používateľa **{}**",
"cannot_warn_self": "Nemôžeš varovať sám seba!",
"cannot_warn_bot": "Nemôžeš varovať bot-a!",
"cannot_warn_administrator": "Nemôžeš varovať administrátora!",
"no_permission_warn": "Nemáš oprávnenie varovať **{}**!",
"warning": "Varovanie",
"warning_lower": "varovanie",
"warnings_lower": "varovania",
"warning_count": "Teraz máš {0} {1} v {2}",
"user_warned": "Používateľ **{}** bol úspešne varovaný za (**{}**)",
"unable_to_warn": "Nie som schopný varovať **{}**",
"warnings_reset": "Varovania používateľa **{}** boli resetované",
"not_specified": "Nešpecifikované",
"command_cooldown_description": "Prosím počkaj **{}** pred opakovaným použitím príkazu `{}`",
"generate_number_limit": "Už si vygeneroval 5 náhodných čísel. Ak chceš generovať ďalšie náhodné čísla, spusti znovu príkaz.",
"expression": "Výraz",
"result": "Výsledok",
"unknown_answer": "Neznáma odpoveď",
"second": "sekunda",
"seconds": "sekundy",
"hour": "hodina",
"hours": "hodiny",
"day": "deň",
"days": "dni",
"month": "mesiac",
"months": "mesiace",
"year": "rok",
"years": "roky",
"suggestion_sent": "Tvoj nápad bol úspešne zaznamenaný",
"sending_suggestion": "Odosielam tvoj nápad...",
"not_bot_owner": "Nie si majiteľom Doge Utilities!",
"infinity": "nekonečno",
"no_negative_numbers": "Nepoužívajte prosím záporné čísla!",
"duration_too_long": "Daná časová dĺžka je príliš dlhá!",
"current_language_description": "Tvoj preferovaný jazyk bol nastavený na **{}**",
"border_size_too_big": "Veľkosť hranice nesmie presiahnuť 32!",
"qr_code": "QR kód",
"qr_create_failed": "Nebolo možné vytvoriť QR kód",
"correct_answer": "Správna odpoveď!",
"wrong_answer": "Nesprávne... Správna odpoveď bola **{}**.",
"unable_to_clear": "Nebolo možné vymazať správy",
"max_clear_messages": "Môžeš vymazať maximálne 1000 správ!",
"cleared": "Úspešne vymazaných **{} {}**",
"cleared_from": "Úspešne vymazaných **{} {}** z **{}**",
"cleared_contains": "Úspešne vymazaných **{} {}**, ktoré obsahovali **{}**",
"cleared_from_contains": "Úspešne vymazaných **{} {}** od **{}**, ktoré obsahovali **{}**",
"message_lower": "správa",
"messages_lower": "správy",
"bot_error_report": "Chybové hlásenie Doge Utilities",
"joined_game": "Úspešné pripojenie k hre!",
"already_joined": "Už si pripojený k hre!",
"player_one": "Hráč 1",
"player_two": "Hráč 2",
"game_starts_in_three": "Hra začína za 3 sekundy!",
"join_tictactoe": "Klikni pre pripojenie ku hre piškôriek",
"afk_removed": "Tvoj AFK bol odstránený!",
"afk_set": "Tvoja AFK správa bola nastavená na **\"{}\"**",
"currently_afk": "**{}** je momentálne AFK ({}): **{}**",
"already_afk": "Už si AFK!",
"mentioned_user": "Používateľ, ktorého si spomenul/a",
"i_am_afk": "Som AFK",
"text_too_long": "Daný text je príliš dlhý!",
"item_limit": "Môžeš pridať maximálne **{}** vecí!",
"channel_locked": "{} bol úspešne uzamknutý",
"channel_unlocked": "{} bol úspešne odomknutý",
"unable_to_lock": "Nie som schopný zamknúť {}",
"unable_to_unlock": "Nie sok schopný odomknúť {}",
},
}
data["zh-tw"] = data["zh-cn"]
def get(language, key):
language = language.lower()
key = key.lower()
if language not in data.keys():
language = "en"
language_data = data[language]
if key not in language_data:
language_data = data["en"]
if key not in language_data:
return "?"
else:
return language_data[key]
else:
return language_data[key]
| 1.8125 | 2 |
class exercise.py | UcheOlisa312/josephcsc102 | 0 | 12773718 | <reponame>UcheOlisa312/josephcsc102
class Student:
studentLevel = 'first year computer science 2020/2021 session'
studentCounter = 0
def __init__(self, thename, thematricno, thesex, thehostelname , theage,thecsc102examscore):
self.name = thename
self.matricno = thematricno
self.sex = thesex
self.hostelname = thehostelname
self.age = theage
self.csc102examscore = thecsc102examscore
Student.studentCounter = Student.studentCounter
registeredCourse = 'CSC102'
@classmethod
def registeredcourse(cls):
print("registerd course is {Student.registeredCourse}")
def function(self):
if self.age > 16:
return "yes, he is older than 16"
else:
return "no he is not older than 16"
def getName(self):
return self.name
def setName(self, thenewName):
self.name = thenewName
@staticmethod
def PAUNanthem():
print('Pau, here we come, Pau, here we come ')
studendt1 = Student('<NAME>', '021074', 'M','cooperative mall',15,98)
print(studendt1.getName())
studendt1.setName('<NAME>')
print(studendt1.getName())
print(studendt1.function())
Student.registeredCourse()
Student.PAUNanthem() | 3.75 | 4 |
ci/checks/commit-message.py | sineagles/Adlik | 2 | 12773719 | #!/usr/bin/env python3
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import re
import subprocess
def main():
subject_regex = re.compile(r'[^a-z\s]\S*( \S+)*[^.]')
has_failure = False
commit_messages = subprocess.check_output(args=['git', 'log', '--format=%s', 'origin/master..'],
universal_newlines=True)
for subject in commit_messages.splitlines():
if subject_regex.fullmatch(subject):
print('Valid commit message subject:', subject)
else:
has_failure = True
print('Invalid commit message subject:', subject)
if has_failure:
exit(1)
if __name__ == "__main__":
main()
| 2.484375 | 2 |
main.py | mattrwh-pC506/hexus-training-grounds | 1 | 12773720 | <filename>main.py
"""Train Reinforcement Learning agent.
To run:
python main.py
"""
import argparse
import sys
import matplotlib.pyplot as plt
import ujson as json
from game.hexus import Hexus
from rl.agent import Agent
def process_args():
"""Process command line args."""
message = 'Interactive Reinforcement Learning board game playing agent.'
parser = argparse.ArgumentParser(description=message)
default_game = 'tictactoe'
default_mode = 'train'
default_episodes = 10000
default_board_level = '#EB00A2'
parser.add_argument('-g', '--game',
dest='game',
help='Board game choice.',
default=default_game)
parser.add_argument('-m', '--mode',
dest='mode',
help='Mode for Agent can be ["train"].',
default=default_mode)
parser.add_argument('-bl', '--board_level',
dest='board_level',
help='Board for Agent can be ["#EB00A2", "#0D2EFF", "#00FF94", "#8700EB", "#FFFFFF", "#000000"].',
default=default_board_level)
parser.add_argument('-e', '--episodes',
dest='episodes',
help='Episodes for Agent can be [0-n].',
default=default_episodes)
options = parser.parse_args()
return options
def play_hexus(mode, episodes, board_level):
print('<><><><>HEXUS<><><><>')
if mode == 'train':
# Train agent to go first
agent = Agent(Hexus, epsilon=5e-1, learning_rate=25e-2, board_level=board_level)
n = episodes
history = agent.train(n)
print('After {} Episodes'.format(n))
elif mode == 'hyper':
# Hyper parameter optimization
max_e = 0.0
max_lr = 0.0
max_reward = 0.0
epsilons = [1e-1, 2e-1, 9e-2, 1e-2, 9e-3]
learning_rates = [1e-1, 2e-1, 3e-1, 25e-2, 9e-2]
for epsilon in epsilons:
for learning_rate in learning_rates:
agent = Agent(Hexus, player='B', epsilon=epsilon, learning_rate=learning_rate)
n = 10000
history = agent.train(n, history=[])
total = history[1][len(history[1]) - 1]
print(total)
if total > max_reward:
max_reward = total
max_e = epsilon
max_lr = learning_rate
print('Max e: {}'.format(max_e))
print('Max lr: {}'.format(max_lr))
print('Max reward: {}'.format(max_reward))
else:
print('Mode {} is invalid.'.format(mode))
def main():
"""Entry point."""
options = process_args()
if options.game == 'hexus':
play_hexus(options.mode, int(options.episodes), options.board_level)
else:
print('Game choice {} is current unsupported.'.format(options.game))
sys.exit(1)
if __name__ == '__main__':
main()
| 3.015625 | 3 |
tests/test_commands.py | peradecki/fold-fastalike | 1 | 12773721 | <filename>tests/test_commands.py<gh_stars>1-10
import os.path
import shutil
import subprocess
import time
import pathlib
# Handle paths and prepare output
test_folder = pathlib.Path(__file__).parent.resolve()
output_folder = os.path.join(test_folder, 'test_outputs')
if os.path.isdir(output_folder):
shutil.rmtree(output_folder)
def test_version():
print('fold-fastalike version: ')
subprocess.run(['python', '-m', 'fold-fastalike', '--version'])
def test_commands():
print('Running test commands ... ')
time.sleep(1)
subprocess.run(['python', '-m', 'fold-fastalike', 'tests/test_sequences.fasta',
'--output', 'tests/test_output/ff/',
'--fold'])
subprocess.run(['python', '-m', 'fold-fastalike', 'tests/test_sequences.fasta',
'--output', 'tests/test_output/lunp1/',
'--lunp', '1'])
subprocess.run(['python', '-m', 'fold-fastalike', 'tests/test_sequences.fasta',
'--output', 'tests/test_output/lunp10',
'--lunp', '10'])
subprocess.run(['python', '-m', 'fold-fastalike', 'tests/test_sequences.fasta',
'--output', 'tests/test_output/lunp0',
'--lunp', '0'])
subprocess.run(['python', '-m', 'fold-fastalike', 'tests/test_sequences.fasta',
'--output', 'tests/test_output/MEA',
'--MEA'])
| 2.21875 | 2 |
grnglow/glow/models/comment.py | xiaokai111/green-glow | 18 | 12773722 | # -*- encoding: utf-8 -*-
'''
Created on 2012-3-22
@author: Neil
'''
from django.db import models
# from photo import Photo
from user import User
class Comment(models.Model):
"""
评论的数据模型
"""
photo_id = models.IntegerField()
photo_owner = models.ForeignKey(User, related_name='comment_photo_owner') # related_name修改名字
author = models.ForeignKey(User, related_name='author')
content = models.TextField()
# 不要用默认值,默认值只在第一次时被赋值,以后都是用相同的默认值,也就是相同的时间
# date_posted = models.DateTimeField(default=datetime.datetime.now())
date_posted = models.DateTimeField(auto_now_add=True)
deleted_by_photo_owner = models.BooleanField(default=False) # 照片拥有者删除评论时,将不再照片的评论列表中显示
photo_deleted = models.BooleanField(default=False) # 照片已被删除
def __unicode__(self):
return str(self.id)
class Meta:
ordering = ['id']
app_label = 'glow'
| 2.328125 | 2 |
tfHub_sentence_similarity/tfHub_sentence_similarity.py | jae-yong-2/awesomeScripts | 245 | 12773723 | import re
import string
import tensorflow_hub as hub
from scipy.spatial.distance import cdist
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
class SimilarityModel():
def __init__(self):
print("Loading model from tf hub...")
self.model = hub.load(module_url)
print("module %s loaded" % module_url)
def process_text(self, text):
'''Clean text by removing unnecessary characters and altering the format of words.'''
re_print = re.compile('[^%s]' % re.escape(string.printable))
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[$-()\"#/@;:<>{}`+=~|.!?,'*-^]", "", text)
text = text.split()
text = [re_print.sub('', w) for w in text]
return ' '.join(text)
def similarity(self, sentence1, sentence2):
processed_sent1 = self.process_text(sentence1)
processed_sent2 = self.process_text(sentence2)
sent_vector1 = self.model([processed_sent1])
sent_vector2 = self.model([processed_sent2])
similarities = cdist(sent_vector1, sent_vector2, metric='cosine')
return similarities
if __name__ == "__main__":
sim_model = SimilarityModel()
sentence1 = "<NAME>"
sentence2 = "I want money"
distance = sim_model.similarity(sentence1, sentence2)
print("Similarity score is: ", 1 - distance[0][0])
| 2.65625 | 3 |
dist/book/codes/105-1.py | EManualResource/book-python-basic | 0 | 12773724 | <reponame>EManualResource/book-python-basic
#! /usr/bin/env python
#coding:utf-8
"""
请计算:19+2*4-8/2
"""
a = 19+2*4-8/2
print a
| 2.015625 | 2 |
examples/pytorch/dimenet/modules/embedding_block.py | ketyi/dgl | 9,516 | 12773725 | import numpy as np
import torch
import torch.nn as nn
from modules.envelope import Envelope
from modules.initializers import GlorotOrthogonal
class EmbeddingBlock(nn.Module):
def __init__(self,
emb_size,
num_radial,
bessel_funcs,
cutoff,
envelope_exponent,
num_atom_types=95,
activation=None):
super(EmbeddingBlock, self).__init__()
self.bessel_funcs = bessel_funcs
self.cutoff = cutoff
self.activation = activation
self.envelope = Envelope(envelope_exponent)
self.embedding = nn.Embedding(num_atom_types, emb_size)
self.dense_rbf = nn.Linear(num_radial, emb_size)
self.dense = nn.Linear(emb_size * 3, emb_size)
self.reset_params()
def reset_params(self):
nn.init.uniform_(self.embedding.weight, a=-np.sqrt(3), b=np.sqrt(3))
GlorotOrthogonal(self.dense_rbf.weight)
GlorotOrthogonal(self.dense.weight)
def edge_init(self, edges):
""" msg emb init """
# m init
rbf = self.dense_rbf(edges.data['rbf'])
if self.activation is not None:
rbf = self.activation(rbf)
m = torch.cat([edges.src['h'], edges.dst['h'], rbf], dim=-1)
m = self.dense(m)
if self.activation is not None:
m = self.activation(m)
# rbf_env init
d_scaled = edges.data['d'] / self.cutoff
rbf_env = [f(d_scaled) for f in self.bessel_funcs]
rbf_env = torch.stack(rbf_env, dim=1)
d_cutoff = self.envelope(d_scaled)
rbf_env = d_cutoff[:, None] * rbf_env
return {'m': m, 'rbf_env': rbf_env}
def forward(self, g):
g.ndata['h'] = self.embedding(g.ndata['Z'])
g.apply_edges(self.edge_init)
return g | 2.3125 | 2 |
Unit 1 Variables and Statements/Lesson4.2.py | ItsMrTurtle/PythonChris | 0 | 12773726 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 19:32:39 2020
@author: abcdk
"""
# 5 Data types
# Int, Float, Boolean, String, Null
| 1.484375 | 1 |
core/textinput.py | 3ldr0n/elzring | 0 | 12773727 | import pygame
from gamesettings import GameSettings as gs
class TextInput(pygame.sprite.Sprite):
def __init__(self, x, y, width, height):
"""Inputs any kind of text on screen.
Parameters
----------
x: int
X axis position of the input bar.
y: int
Y axis position of the input bar.
width: int
Width of the input bar. Its value is multiplied by the size of
the tile.
height: int
Height of the input bar. Its value is multiplied by the size of
the tile.
"""
self.x = x
self.y = y
self.width = width * gs.TILESIZE
self.height = height * gs.TILESIZE
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
self.font = pygame.font.SysFont(None, self.height//2)
self.text = " "
def update_text(self, events):
"""Updates text on key press.
Parameters
----------
events: list
Events queue.
Returns
-------
return_press: bool
Return key indicates the user finished the input.
"""
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return True
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
return False
else:
self.text += event.unicode
return False
def draw(self, screen):
"""Draws the input bar and the text.
Parameters
----------
screen: pygame.Surface
Screen object.
"""
pygame.draw.rect(screen, gs.WHITE, self.rect)
rendered = self.font.render(self.text, True, gs.LIGHT_RED)
screen.blit(rendered, (self.x, (self.y) +
rendered.get_rect().height))
def get_input(self):
"""Returns the text attribute. """
return self.text.strip()
| 3.859375 | 4 |
paleomix/common/utilities.py | MikkelSchubert/paleomix | 33 | 12773728 | #!/usr/bin/python3
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import copy
import itertools
from typing import (
Any,
AnyStr,
Callable,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Sequence,
Tuple,
TypeVar,
Union,
overload,
)
T = TypeVar("T")
def safe_coerce_to_tuple(value: Any) -> Tuple[Any, ...]:
"""Convert value to a tuple, unless it is a string or a non-sequence, in which case
it is return as a single-element tuple."""
if isinstance(value, str):
return (value,)
try:
return tuple(value)
except TypeError:
return (value,)
def safe_coerce_to_frozenset(value: Any) -> FrozenSet[Any]:
"""Convert value to a tuple, unless it is a string or a non-sequence, in which case
it is return as a single-element tuple."""
if isinstance(value, str):
return frozenset((value,))
try:
return frozenset(value)
except TypeError:
return frozenset((value,))
def try_cast(value: Any, cast_to: type) -> Any:
try:
return cast_to(value)
except (ValueError, TypeError):
return value
def set_in(dictionary: Dict[Any, Any], keys: Iterable[Hashable], value: Any) -> None:
"""Traverses a set of nested dictionaries using the given keys,
and assigns the specified value to the inner-most
dictionary (obtained from the second-to-last key), using
the last key in keys. Thus calling set_in is(d, [X, Y, Z], v)
is equivalent to calling
d.setdefault(X, {}).setdefault(Y, {})[Z] = v
Behavior on non-dictionaries is undefined."""
keys = list(keys)
if not keys:
raise ValueError("No keys passed to 'set_in'!")
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
new_dict = {} # type: Dict[Any, Any]
dictionary[key] = new_dict
dictionary = new_dict
dictionary[keys[-1]] = value
def get_in(
dictionary: Dict[Any, Any],
keys: Iterable[Hashable],
default: Any = None,
) -> Any:
"""Traverses a set of nested dictionaries using the keys in
kws, and returns the value assigned to the final keyword
in the innermost dictionary. Calling get_in(d, [X, Y])
is equivalent to calling d.get(X).get(Y), with the
difference that any missing keys causes the default value
to be returned.
Behavior on non-dictgionaries is undefined."""
keys = list(keys)
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary.get(keys[-1], default)
def split_before(
iterable: Iterable[Any], pred: Callable[[Any], bool]
) -> Iterator[List[Any]]:
"""Takes a sequence and splits it before every value where pred(v) is true.
Thus split_before(range(10), key = lambda x: x % 2 == 0) would return the
sequence [[1], [2,3], [4,5], [6,7], [7,8], [9]]"""
items = [] # type: List[Any]
for value in iterable:
if pred(value) and items:
yield items
items = []
items.append(value)
if items:
yield items
# Copied from the Python 'itertools' module documentation
def grouper(size: int, iterable: Iterable[Any], fillvalue: Any = None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * size
return itertools.zip_longest(fillvalue=fillvalue, *args)
def group_by_pred(
pred: Callable[[Any], bool], iterable: Iterable[Any]
) -> Tuple[List[Any], List[Any]]:
"""Splits items in a sequence into two lists, one containing
items matching the predicate, and another containing those that
do not."""
is_true = [] # type: List[Any]
is_false = [] # type: List[Any]
for item in iterable:
if pred(item):
is_true.append(item)
else:
is_false.append(item)
return is_true, is_false
@overload
def fragment(size: int, lstlike: AnyStr) -> Iterable[AnyStr]:
...
@overload
def fragment(size: int, lstlike: Sequence[T]) -> Iterable[Sequence[T]]:
...
_Fragmentable = Union[AnyStr, Sequence[T]]
def fragment(size: int, lstlike: _Fragmentable) -> Iterable[_Fragmentable]:
"""Faster alternative to grouper for lists/strings."""
return (lstlike[i : i + size] for i in range(0, len(lstlike), size))
def fill_dict(destination: Dict[Any, Any], source: Dict[Any, Any]) -> Dict[Any, Any]:
"""Returns a copy of 'destination' after setting missing key-
pairs with copies of those of 'source' recursively."""
if not isinstance(destination, dict) or not isinstance(source, dict):
raise TypeError("Non-dictionary parameters in 'fill_dict'")
def _fill_dict(cur_dest: Dict[Any, Any], cur_src: Dict[Any, Any]) -> Dict[Any, Any]:
for key in cur_src:
if isinstance(cur_src[key], dict) and isinstance(cur_dest.get(key), dict):
_fill_dict(cur_dest[key], cur_src[key])
elif key not in cur_dest:
cur_dest[key] = cur_src[key]
return cur_dest
return _fill_dict(copy.deepcopy(destination), copy.deepcopy(source))
class Immutable:
"""Mixin implementing a immutable class; member variables are specified in
the init function, cannot be changed afterwards; note that this does not
prevent changes to the member variables themselves (if not immutable)."""
def __init__(self, **kwargs: Any):
object.__init__(self)
for (key, value) in kwargs.items():
object.__setattr__(self, key, value)
def __setattr__(self, _name: str, _value: Any) -> None:
raise NotImplementedError("Object is immutable")
def __delattr__(self, _name: str) -> None:
raise NotImplementedError("Object is immutable")
class TotallyOrdered:
"""Mixin implementing a rich-comparison interface, provided
that the subclass implements the less-than operator (__lt__).
The __lt__ function should return NotImplemented if the other
object is not the same type.
The implementation assumes total order:
http://en.wikipedia.org/wiki/Total_order
"""
def __lt__(self, other: Any) -> bool:
raise NotImplementedError("__lt__ must be implemented!")
def __eq__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not ((self < other) or (other < self))
def __ne__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not (self == other)
def __le__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not (other < self)
def __ge__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not (self < other)
def __gt__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return other < self
| 2.15625 | 2 |
shared/database/ODBC/winODBCdeclarations.py | infostreams/webindex | 1 | 12773729 | <reponame>infostreams/webindex<gh_stars>1-10
# success values
# sql.h
SQL_INVALID_HANDLE = -2
SQL_ERROR = -1
SQL_SUCCESS = 0
SQL_SUCCESS_WITH_INFO = 1
SQL_STILL_EXECUTING = 2
SQL_NEED_DATA = 99
SQL_NO_DATA_FOUND = 100
sql_errors = [SQL_ERROR, SQL_INVALID_HANDLE]
# sqlext.h
SQL_FETCH_NEXT = 0x01
SQL_FETCH_FIRST = 0x02
SQL_FETCH_LAST = 0x04
SQL_FETCH_PRIOR = 0x08
SQL_FETCH_ABSOLUTE = 0x10
SQL_FETCH_RELATIVE = 0x20
SQL_FETCH_RESUME = 0x40
SQL_FETCH_BOOKMARK = 0x80
# sql types
SQL_TYPE_NULL = 0
SQL_CHAR = 1
SQL_NUMERIC = 2
SQL_DECIMAL = 3
SQL_INTEGER = 4
SQL_SMALLINT = 5
SQL_FLOAT = 6
SQL_REAL = 7
SQL_DOUBLE = 8
SQL_DATE = 9
SQL_TIME = 10
SQL_TIMESTAMP = 11
SQL_VARCHAR = 12
# SQL extended datatypes
SQL_LONGVARCHAR = -1
SQL_BINARY = -2
SQL_VARBINARY = -3
SQL_LONGVARBINARY = -4
SQL_BIGINT = -5
SQL_TINYINT = -6
SQL_BIT = -7
SQL_INTERVAL_YEAR = -80
SQL_INTERVAL_MONTH = -81
SQL_INTERVAL_YEAR_TO_MONTH = -82
SQL_INTERVAL_DAY = -83
SQL_INTERVAL_HOUR = -84
SQL_INTERVAL_MINUTE = -85
SQL_INTERVAL_SECOND = -86
SQL_INTERVAL_DAY_TO_HOUR = -87
SQL_INTERVAL_DAY_TO_MINUTE = -88
SQL_INTERVAL_DAY_TO_SECOND = -89
SQL_INTERVAL_HOUR_TO_MINUTE = -90
SQL_INTERVAL_HOUR_TO_SECOND = -91
SQL_INTERVAL_MINUTE_TO_SECOND = -92
SQL_UNICODE = -95
SQL_UNICODE_VARCHAR = -96
SQL_UNICODE_LONGVARCHAR = -97
SQL_UNICODE_CHAR = SQL_UNICODE
SQL_TYPE_DRIVER_START = SQL_INTERVAL_YEAR
SQL_TYPE_DRIVER_END = SQL_UNICODE_LONGVARCHAR
SQL_SIGNED_OFFSET = -20
SQL_UNSIGNED_OFFSET = -22
# Special length values (don't work yet)
SQL_NULL_DATA = -1
SQL_DATA_AT_EXEC = -2
SQL_NTS = -3
# SQLGetInfo type types
STRING = 's'
INT16 = 'h'
INT32 = 'l'
# C datatype to SQL datatype mapping SQL types
SQL_C_CHAR = SQL_CHAR # CHAR, VARCHAR, DECIMAL, NUMERIC
SQL_C_LONG = SQL_INTEGER # INTEGER
SQL_C_SHORT = SQL_SMALLINT # SMALLINT
SQL_C_FLOAT = SQL_REAL # REAL
SQL_C_DOUBLE = SQL_DOUBLE # FLOAT, DOUBLE
SQL_C_DEFAULT = 99
#
SQL_C_DATE = SQL_DATE
SQL_C_TIME = SQL_TIME
SQL_C_TIMESTAMP = SQL_TIMESTAMP
SQL_C_BINARY = SQL_BINARY
SQL_C_BIT = SQL_BIT
SQL_C_TINYINT = SQL_TINYINT
SQL_C_SLONG = SQL_C_LONG+SQL_SIGNED_OFFSET # SIGNED INTEGER
SQL_C_SSHORT = SQL_C_SHORT+SQL_SIGNED_OFFSET # SIGNED SMALLINT
SQL_C_STINYINT = SQL_TINYINT+SQL_SIGNED_OFFSET # SIGNED TINYINT
SQL_C_ULONG = SQL_C_LONG+SQL_UNSIGNED_OFFSET # UNSIGNED INTEGER
SQL_C_USHORT = SQL_C_SHORT+SQL_UNSIGNED_OFFSET # UNSIGNED SMALLINT
SQL_C_UTINYINT = SQL_TINYINT+SQL_UNSIGNED_OFFSET # UNSIGNED TINYINT
SQL_C_BOOKMARK = SQL_C_ULONG # BOOKMARK
# from "sql.h"
# Defines for SQLGetInfo
SQL_ACTIVE_CONNECTIONS = 0, INT16
SQL_ACTIVE_STATEMENTS = 1, INT16
SQL_DATA_SOURCE_NAME = 2, STRING
SQL_DRIVER_HDBC = 3, INT32
SQL_DRIVER_HENV = 4, INT32
SQL_DRIVER_HSTMT = 5, INT32
SQL_DRIVER_NAME = 6, STRING
SQL_DRIVER_VER = 7, STRING
SQL_FETCH_DIRECTION = 8, INT32
SQL_ODBC_API_CONFORMANCE = 9, INT16
SQL_ODBC_VER = 10, STRING
SQL_ROW_UPDATES = 11, STRING
SQL_ODBC_SAG_CLI_CONFORMANCE = 12, INT16
SQL_SERVER_NAME = 13, STRING
SQL_SEARCH_PATTERN_ESCAPE = 14, STRING
SQL_ODBC_SQL_CONFORMANCE = 15, INT16
SQL_DATABASE_NAME = 16, STRING
SQL_DBMS_NAME = 17, STRING
SQL_DBMS_VER = 18, STRING
SQL_ACCESSIBLE_TABLES = 19, STRING
SQL_ACCESSIBLE_PROCEDURES = 20, STRING
SQL_PROCEDURES = 21, STRING
SQL_CONCAT_NULL_BEHAVIOR = 22, INT16
SQL_CURSOR_COMMIT_BEHAVIOR = 23, INT16
SQL_CURSOR_ROLLBACK_BEHAVIOR = 24, INT16
SQL_DATA_SOURCE_READ_ONLY = 25, STRING
SQL_DEFAULT_TXN_ISOLATION = 26, INT32
SQL_EXPRESSIONS_IN_ORDERBY = 27, STRING
SQL_IDENTIFIER_CASE = 28, INT16
SQL_IDENTIFIER_QUOTE_CHAR = 29, STRING
SQL_MAX_COLUMN_NAME_LEN = 30, INT16
SQL_MAX_CURSOR_NAME_LEN = 31, INT16
SQL_MAX_OWNER_NAME_LEN = 32, INT16
SQL_MAX_PROCEDURE_NAME_LEN = 33, INT16
SQL_MAX_QUALIFIER_NAME_LEN = 34, INT16
SQL_MAX_TABLE_NAME_LEN = 35, INT16
SQL_MULT_RESULT_SETS = 36, STRING
SQL_MULTIPLE_ACTIVE_TXN = 37, STRING
SQL_OUTER_JOINS = 38, STRING
SQL_OWNER_TERM = 39, STRING
SQL_PROCEDURE_TERM = 40, STRING
SQL_QUALIFIER_NAME_SEPARATOR = 41, STRING
SQL_QUALIFIER_TERM = 42, STRING
SQL_SCROLL_CONCURRENCY = 43, INT32
SQL_SCROLL_OPTIONS = 44, INT32
SQL_TABLE_TERM = 45, STRING
SQL_TXN_CAPABLE = 46, INT16
SQL_USER_NAME = 47, STRING
SQL_CONVERT_FUNCTIONS = 48, INT32
SQL_NUMERIC_FUNCTIONS = 49, INT32
SQL_STRING_FUNCTIONS = 50, INT32
SQL_SYSTEM_FUNCTIONS = 51, INT32
SQL_TIMEDATE_FUNCTIONS = 52, INT32
SQL_CONVERT_BIGINT = 53, INT32
SQL_CONVERT_BINARY = 54, INT32
SQL_CONVERT_BIT = 55, INT32
SQL_CONVERT_CHAR = 56, INT32
SQL_CONVERT_DATE = 57, INT32
SQL_CONVERT_DECIMAL = 58, INT32
SQL_CONVERT_DOUBLE = 59, INT32
SQL_CONVERT_FLOAT = 60, INT32
SQL_CONVERT_INTEGER = 61, INT32
SQL_CONVERT_LONGVARCHAR = 62, INT32
SQL_CONVERT_NUMERIC = 63, INT32
SQL_CONVERT_REAL = 64, INT32
SQL_CONVERT_SMALLINT = 65, INT32
SQL_CONVERT_TIME = 66, INT32
SQL_CONVERT_TIMESTAMP = 67, INT32
SQL_CONVERT_TINYINT = 68, INT32
SQL_CONVERT_VARBINARY = 69, INT32
SQL_CONVERT_VARCHAR = 70, INT32
SQL_CONVERT_LONGVARBINARY = 71, INT32
SQL_TXN_ISOLATION_OPTION = 72, INT32
SQL_ODBC_SQL_OPT_IEF = 73, STRING
SQL_CORRELATION_NAME = 74, INT16
SQL_NON_NULLABLE_COLUMNS = 75, INT16
SQL_DRIVER_HLIB = 76, INT32
SQL_DRIVER_ODBC_VER = 77, STRING
SQL_LOCK_TYPES = 78, INT32
SQL_POS_OPERATIONS = 79, INT32
SQL_POSITIONED_STATEMENTS = 80, INT32
SQL_GETDATA_EXTENSIONS = 81, INT32
SQL_BOOKMARK_PERSISTENCE = 82, INT32
SQL_STATIC_SENSITIVITY = 83, INT32
SQL_FILE_USAGE = 84, INT16
SQL_NULL_COLLATION = 85, INT16
SQL_ALTER_TABLE = 86, INT32
SQL_COLUMN_ALIAS = 87, STRING
SQL_GROUP_BY = 88, INT16
SQL_KEYWORDS = 89, STRING
SQL_ORDER_BY_COLUMNS_IN_SELECT = 90, STRING
SQL_OWNER_USAGE = 91, INT32
SQL_QUALIFIER_USAGE = 92, INT32
SQL_QUOTED_IDENTIFIER_CASE = 93, INT32
SQL_SPECIAL_CHARACTERS = 94, STRING
SQL_SUBQUERIES = 95, INT32
SQL_UNION = 96, INT32
SQL_MAX_COLUMNS_IN_GROUP_BY = 97, INT16
SQL_MAX_COLUMNS_IN_INDEX = 98, INT16
SQL_MAX_COLUMNS_IN_ORDER_BY = 99, INT16
SQL_MAX_COLUMNS_IN_SELECT = 100, INT16
SQL_MAX_COLUMNS_IN_TABLE = 101, INT16
SQL_MAX_INDEX_SIZE = 102, INT32
SQL_MAX_ROW_SIZE_INCLUDES_LONG = 103, STRING
SQL_MAX_ROW_SIZE = 104, INT32
SQL_MAX_STATEMENT_LEN = 105, INT32
SQL_MAX_TABLES_IN_SELECT = 106, INT16
SQL_MAX_USER_NAME_LEN = 107, INT16
SQL_MAX_CHAR_LITERAL_LEN = 108, INT32
SQL_TIMEDATE_ADD_INTERVALS = 109, INT32
SQL_TIMEDATE_DIFF_INTERVALS = 110, INT32
SQL_NEED_LONG_DATA_LEN = 111, STRING
SQL_MAX_BINARY_LITERAL_LEN = 112, INT32
SQL_LIKE_ESCAPE_CLAUSE = 113, STRING
SQL_QUALIFIER_LOCATION = 114, INT16
#
# <Phew!>
#
# Level 1 Prototypes
# <sqlext.h>
# Options for SQLDriverConnect
SQL_DRIVER_NOPROMPT = 0
SQL_DRIVER_COMPLETE = 1
SQL_DRIVER_PROMPT = 2
SQL_DRIVER_COMPLETE_REQUIRED = 3
# For SQLGetFunctions
SQL_API_ALL_FUNCTIONS = 0
# Defines for SQLBindParameter and
# SQLProcedureColumns (returned in the result set)
SQL_PARAM_TYPE_UNKNOWN = 0
SQL_PARAM_INPUT = 1
SQL_PARAM_INPUT_OUTPUT = 2
SQL_RESULT_COL = 3
SQL_PARAM_OUTPUT = 4
SQL_RETURN_VALUE = 5
# SQLFreeStmt defines
SQL_CLOSE = 0
SQL_DROP = 1
SQL_UNBIND = 2
SQL_RESET_PARAMS = 3
# Added by <NAME>:
# SQL Handles
SQL_HANDLE_ENV = 1
SQL_HANDLE_DBC = 2
SQL_HANDLE_STMT = 3
SQL_HANDLE_DESC = 4
# SQLGetEnvAttr - Attributes
SQL_ATTR_ODBC_VERSION = 200
SQL_ATTR_CONNECTION_POOLING = 201
SQL_ATTR_CP_MATCH = 202
# SQLGetEnvAttr - SQL_ATTR_ODBC_VERSION
SQL_OV_ODBC2 = 2, INT32
SQL_OV_ODBC3 = 3, INT32
# # SQLGetEnvAttr - SQL_ATTR_CONNECTION_POOLING
# SQL_CP_OFF 0UL
# SQL_CP_ONE_PER_DRIVER 1UL
# SQL_CP_ONE_PER_HENV 2UL
# SQL_CP_DEFAULT SQL_CP_OFF
#
#
# # SQLGetEnvAttr - SQL_ATTR_CP_MATCH
# SQL_CP_STRICT_MATCH 0UL
# SQL_CP_RELAXED_MATCH 1UL
# SQL_CP_MATCH_DEFAULT SQL_CP_STRICT_MATCH
SQL_NO_DATA = 100
SQL_NULL_HENV = 0
SQL_NULL_HDBC = 0
SQL_NULL_HSTMT = 0
# Don't know if this works:
SQL_IS_POINTER = -4
SQL_IS_UINTEGER = -5
SQL_IS_INTEGER = -6
SQL_IS_USMALLINT = -7
SQL_IS_SMALLINT = -8
| 1.40625 | 1 |
Django/permission_demo/rbac/models.py | taoyan/python | 1 | 12773730 | <reponame>taoyan/python<filename>Django/permission_demo/rbac/models.py
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=32)
pwd = models.CharField(max_length=32)
roles = models.ManyToManyField(to="Role")
def __str__(self): return self.name
class Role(models.Model):
title = models.CharField(max_length=32)
permissions = models.ManyToManyField(to="Permission")
def __str__(self): return self.title
class Permission(models.Model):
title = models.CharField(max_length=32)
url = models.CharField(max_length=32)
def __str__(self): return self.title | 2.625 | 3 |
21/amicable.py | bobismijnnaam/bobe-euler | 0 | 12773731 | <reponame>bobismijnnaam/bobe-euler<gh_stars>0
import Utils
pf = Utils.NumberJuggler()
total = []
for i in range(1, 10000):
iDiv = pf.getDivisors(i);
j = sum(iDiv);
jDiv = pf.getDivisors(j);
k = sum(jDiv);
if i == k and i != j:
print(str(i) + " <=> " + str(j))
if i not in total:
total.append(i)
if j not in total:
total.append(j)
print("All amicable numbers < 10000: " + str(total))
print("The sum: " + str(sum(total)))
| 3.28125 | 3 |
Ec_candidates_tweets_sentiment_an.py | sayalaruano/Sentiment-analysis-of-Ecuadorian-political-tweets-from-2021election-with-Natural-Language-Processing | 1 | 12773732 | <gh_stars>1-10
# imports
import pandas as pd
import nltk
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords, words
from matplotlib import pyplot as plt
from keras.utils import to_categorical
import numpy as np
from sklearn.model_selection import train_test_split
from keras import models, layers
from collections import Counter
# Function for build the one hot matrix
def build_corpus(tweets):
# corpora downloads for nltk
nltk.download('stopwords')
# dictionary size
dictionarySize = 7000
# tokenize and get frequency
topUniqueWordsFiltered = []
tok = Tokenizer(filters='!"#$%&()*+,-./:;<=>?[\\]^_`{|}~\t\n')
tok.fit_on_texts(tweets)
# print(tok.__dict__.items())
topUniqueWords = sorted(tok.word_counts.items(), key=lambda x: x[1], reverse=True)
# print(topUniqueWords)
for word,_ in topUniqueWords:
if len(word)> 3 and ('@') not in word and 'http' not in word and word not in stopwords.words('spanish') and not word in topUniqueWordsFiltered:
topUniqueWordsFiltered.append(word)
return topUniqueWordsFiltered[:dictionarySize]
# Function to represent tweets as numerical vectors considering the corpus of TASS and candidate datsets as reference
def buildWordVectorMatrix(tweetsVect, corpusW):
# empty numpy matrix of necesary size
wordVectorMatrix = np.zeros((len(tweetsVect), len(corpusW)))
# fill matrix, with binary representation of words
for pos, tweetInPos in enumerate (tweetsVect):
# split each tweet into a list of its words
tweetWords = tweetInPos.lower().split()
# assign value of 1 in matrix position corresponding to the current tweet
# and the id, each of its contained words is located on the previously built dictionary
for word in tweetWords:
# only consider words that are part of the built dictionary
if word in corpusDictionary:
wordVectorMatrix[pos, corpusDictionary.index(word)] = 1
return wordVectorMatrix
# Load joined TASS dataset
tassDf = pd.read_csv("Datasets/ALL_TassDF.csv", encoding='utf8').reset_index(drop=True)[['Text', 'Tag']]
# Select tweets with tag of positive, neative or neutral
tassDf = tassDf.loc[(tassDf.Tag == 'P') | (tassDf.Tag == 'N') | (tassDf.Tag == 'NEU')]
# Verify the final dataset - 57454 tweets
print(tassDf, '\n\n', tassDf.shape)
print(tassDf.columns.values)
print(tassDf.values)
# Load replies to tweets from two Ecuadorian presidential candidates
candidDf = pd.read_csv( 'Datasets/ALL_candidates.csv', encoding='utf8')
# Merge TASS and canidiate datsets to create the corpus
joinedDfTexts = candidDf['text'].append(tassDf['Text'], ignore_index=True) # continuous idxs
print(joinedDfTexts, '\n\n', joinedDfTexts.shape)
print(joinedDfTexts.values)
# Build the one hot matrix considering TASS and candidate datasets
corpusDictionary = build_corpus(joinedDfTexts)
print (len(corpusDictionary), corpusDictionary)
# Observe Tass dataset balance, to see if we apply any method for balancing
# histogram to see dataset balance
plt.hist(tassDf['Tag'])
# Transform target names (P, N, NEU) of TASS dataset into integer representation
tassDf.loc[tassDf['Tag'] == 'N', 'Tag'] = 0
tassDf.loc[tassDf['Tag'] == 'NEU', 'Tag'] = 1
tassDf.loc[tassDf['Tag'] == 'P', 'Tag'] = 2
# Visualize transformation to numerical classes
plt.hist(tassDf['Tag'])
# Reprresent TASS dataset as vectors considering the corpus created before
X_data = np.array(buildWordVectorMatrix(tassDf['Text'], corpusDictionary))
# Select the y (target) of all data
y_data = np.array(tassDf['Tag'])
# Divide TASS dataset into training (70%), validation (15%) and test (15%)
# Training and validation-test
X_train, X_other, y_train, y_other = train_test_split(X_data, y_data, test_size=0.50, random_state=1, stratify=y_data)
# divide into validation and test datasets
X_validation, X_test, y_validation, y_test = train_test_split(X_other, y_other, test_size=0.50, random_state=1, stratify=y_other)
# Convert tags to binnary representation using to categorical function from Keras
y_train_bin = to_categorical(y_train, num_classes=3)
y_test_bin = to_categorical(y_test, num_classes=3)
y_validation_bin = to_categorical(y_validation, num_classes=3)
# Create the neuronal network model with keras Sequential class and add layer with given activation functions
model = models.Sequential()
model.add(layers.Dense(1200, activation='relu', input_shape=(len(corpusDictionary),)))
model.add(layers.Dense(300, activation='relu'))
model.add(layers.Dense(50, activation='relu'))
model.add(layers.Dense(3, activation='softmax'))
# Associate the metrics to the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['acc', 'AUC'])
# Run the model with processed data and selected metrics
print('xTest test', X_train.sum(axis=1))
print('X_test', X_train, X_train.shape)
print('y_train_bin', y_train_bin)
train_log = model.fit(X_train, y_train_bin,
epochs=10, batch_size=512,
validation_data=(X_validation, y_validation_bin))
# Model evaluation considering the accuracy of training and validations datasets
acc = train_log.history['acc']
val_acc = train_log.history['val_acc']
loss = train_log.history['loss']
val_loss = train_log.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# Calculate the accuracy considering test dataset
test_accuracy = model.evaluate(X_test, y_test_bin)
print(test_accuracy)
# Import data from replies to tweets of ecuadorian candidates
lassoPath = 'Datasets/replies_random1000_lasso_Diciembre.csv'
arauzPath = 'Datasets/replies_random1000_arauz_Diciembre.csv'
lassoDf = pd.read_csv(lassoPath, encoding='utf8').reset_index(drop=True)
arauzDf = pd.read_csv(arauzPath, encoding='utf8').reset_index(drop=True)
# Transform candidate tweets into numercial values by hot encoding
X_lasso = np.array(buildWordVectorMatrix(lassoDf['text'], corpusDictionary))
X_arauz = np.array(buildWordVectorMatrix(arauzDf['text'], corpusDictionary))
# Predict sentiment analysis of candidate tweets using outr trained model
# predict classes function will be deprecated for multi class distribution use (model.predict(X_lasso) > 0.5).astype("int32") in the future
# or numpy argmax, could be good option
classResultsLasso = model.predict_classes(X_lasso)
lassoPredDistribution = Counter(classResultsLasso).most_common()
print(lassoPredDistribution)
classResultsArauz = model.predict_classes(X_arauz) # predict classes function will be deprecated
arauzPredDistribution = Counter(classResultsArauz).most_common()
print(arauzPredDistribution)
# Candidate results
# (0 Negative, 1 neutral, 2 postive)
print('\nReplies to Lasso tweets results: ')
print(f'NEGATIVES: {lassoPredDistribution[0][1]}')
print(f'POSITIVES: {lassoPredDistribution[1][1]}')
print(f'NEUTRAL: {lassoPredDistribution[2][1]}')
print('\nReplies to Arauz tweets results:')
print(f'NEGATIVES: {arauzPredDistribution[0][1]}')
print(f'POSITIVES: {arauzPredDistribution[1][1]}')
print(f'NEUTRAL: {arauzPredDistribution[2][1]}')
# Pie charts
# Lasso
valuesLasso = [freq for word, freq in lassoPredDistribution]
labelsLasso = ['NEGATIVE', 'POSITIVE', 'NEUTRAL']
plt.figure('Guillermo Lasso')
figLasso1, axLasso1 = plt.subplots()
axLasso1.set_title('Guillermo Laso')
axLasso1.pie(valuesLasso, labels=labelsLasso, autopct='%1.1f%%', shadow=True, startangle=90)
plt.show()
# Arauz
valuesArauz = [freq for word, freq in arauzPredDistribution]
labelsArauz= ['NEGATIVE', 'POSITIVE', 'NEUTRAL']
plt.figure('Andres Arauz')
figArauz1, axArauz1 = plt.subplots()
axArauz1.set_title('<NAME>')
axArauz1.pie(valuesArauz, labels=labelsArauz, autopct='%1.1f%%', shadow=True, startangle=90)
plt.show()
| 3.109375 | 3 |
sksurgerysurfacematch/utils/registration_utils.py | UCL/scikit-surgerysurfacematch | 1 | 12773733 | # -*- coding: utf-8 -*-
""" Various registration routines to reduce duplication. """
import numpy as np
import sksurgerycore.transforms.matrix as mt
import sksurgerysurfacematch.interfaces.rigid_registration as rr
def do_rigid_registration(reconstructed_cloud,
reference_cloud,
rigid_registration: rr.RigidRegistration,
initial_ref2recon: np.ndarray = None,
):
"""
Triggers a rigid body registration using rigid_registration.
:param reconstructed_cloud: [Nx3] point cloud, e.g. from video.
:param reference_cloud: [Mx3] point cloud, e.g. from CT/MR
:param rigid_registration: Object that implements a rigid registration.
:param initial_ref2recon_transform: [4x4] ndarray representing an initial \
estimate.
:return: residual (float), [4x4] transform
"""
if initial_ref2recon is not None:
reference_cloud = \
np.matmul(
initial_ref2recon[0:3, 0:3], np.transpose(reference_cloud)) \
+ initial_ref2recon[0:3, 3].reshape((3, 1))
reference_cloud = np.transpose(reference_cloud)
# Do registration. Best to register recon points to
# the provided model (likely from CT or MR), and then invert.
residual, transform = \
rigid_registration.register(reconstructed_cloud,
reference_cloud
)
transform = np.linalg.inv(transform)
# Combine initial, if we have one.
if initial_ref2recon is not None:
init_mat = \
mt.construct_rigid_transformation(
initial_ref2recon[0:3, 0:3],
initial_ref2recon[0:3, 3]
)
transform = np.matmul(transform, init_mat)
return residual, transform
| 2.421875 | 2 |
wolfpack/lib/alpha_addr.py | s1na/wolfpack | 1 | 12773734 | import socket
import nmap
from wolfpack.beta.settings import PORT as alpha_port
def get_network_ip():
'''Get local IP address'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
return s.getsockname()[0]
def get_alpha_addr():
'''Find & get IP address of alpha in the local network'''
alpha_ip = ''
nm = nmap.PortScanner()
# it will probably take a while
nm.scan('%s/24' % get_network_ip(), str(alpha_port))
for host in nm.all_hosts():
if nm[host].state() == 'up' and nm[host]['tcp'][alpha_port]['state'] == 'open':
alpha_ip = host
break
if not alpha_ip:
raise Exception, 'Alpha not found'
return (alpha_ip, alpha_port)
| 2.828125 | 3 |
source_hunter/__main__.py | pyeprog/source_hunter | 2 | 12773735 | <reponame>pyeprog/source_hunter
from source_hunter.hunter import hunt
if __name__ == '__main__':
hunt()
| 1.1875 | 1 |
utilities.py | Iammuratc/semiconductorGrowth | 1 | 12773736 | <reponame>Iammuratc/semiconductorGrowth
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 23:39:10 2019
@author: Admin
"""
from datetime import datetime
def isTimeFormat(input):
try:
minute, second = input.split(':')
if minute.isdigit() and second.isdigit():
return True
except ValueError:
pass
try:
hour, minute, second = input.split(':')
if hour.isdigit() and minute.isdigit() and second.isdigit():
return True
except ValueError:
return False
def get_sec(time_str):
try:
m, s = time_str.split(':')
return int(m) * 60 + int(s)
except ValueError:
pass
try:
h, m, s = time_str.split(':')
return int(h)*3600 + int(m) * 60 + int(s)
except ValueError:
return False
def oliverEditor(file):
edited_file = file.replace('.EPI','').replace('.epi','') + '_edited.epi'
with open(file, 'r') as f:
lines = f.readlines()
with open(edited_file, 'w') as fw:
with open("oliverSubfunction", 'r') as subfunction:
for sub_line in subfunction:
fw.write(sub_line)
subfunction.close()
for line_index, line in enumerate(lines):
if 'loop' in line:
words = line.strip().split()
for i,word in enumerate(words):
# print(word)
loop_counter = 0
if word == 'loop' and words[i+2] == '{':
loop_time = words[i+1]
loop_start = line_index
for loop_index,item in enumerate(lines[line_index+1:]):
loop_words = item.strip().split()
for i,loop_word in enumerate(loop_words):
if loop_word == '}' and loop_counter==0:
loop_finish = line_index + loop_index+1
loop = [lines[loop_start:loop_finish+1]* (int(loop_time))]
loop_counter += 1
fw.write(''.join(loop[0]))
break
break
else:
fw.write(line)
return fw.name
def martinEditor(input_file):
'''Create the "_edited" files for Martin input files'''
with open(input_file, 'r') as f:
lines = []
for line in f:
split_line = line.split()
for i,word in enumerate(split_line):
first_word = split_line[0]
if ',' or ';' in word:
split_line[i] = word.replace(',','').replace(';','')
if first_word.isdigit():
m = int(int(first_word) / 60)
s = int(first_word) - m * 60
split_line[0] = word.replace(first_word,str(m)+':'+str(s))
if split_line[i] == 'open' or split_line[i] == 'close':
split_line[i] = split_line[i].replace('open','= open').replace('close','= close')
if 'ReactorTemp' in split_line[i]:
split_line[i] = word.replace(',','').replace(';','').replace('=', ' = ')
if word == '>' or word =='>>':
del split_line[i]
lines.append(' '.join(split_line) + '\n')
del lines[-1]
edited_file = input_file.replace('.EPI','').replace('.epi','') + '_edited.epi'
with open(edited_file, 'w') as fw:
for line in lines:
if 'loop' in open(input_file).read():
if 'loop' in line:
fw_split_line = line.split()
loop_time = fw_split_line[1]
loop_start = lines.index(line)
elif '}' in line:
loop_finish = lines.index(line)
loop = [''.join(lines[loop_start:loop_finish+1]* (int(loop_time)-1))]
fw.write(loop[0])
continue
fw.write(line)
f.close()
fw.close()
return fw.name
def compound_writer(compounds):
group_3 = set()
group_5 = set()
doping = set()
# Access elements from compounds e.g. NH3 >> N
for compound in compounds:
if compound.startswith('T'):
group_3.add(compound[2:4])
elif compound.startswith('N'):
group_5.add(compound[0])
elif compound.startswith('Si'):
doping.add(compound[0:3])
elif compound.startswith('MCp'):
doping.add('Mg')
# Name compounds e.g. N Al Ga >> AlGaN
my_compounds = set().union(group_3,group_5,doping)
# return my_compounds
# if group_3 and group_5 and doping:
# if all(i in my_compounds for i in ['In','Ga','N','Al','Si']):
# return 'InAlGaN (Si)'
# elif all(i in my_compounds for i in ['Ga','N','Al','Si']):
# return 'InGaN (Si)'
# elif all(i in my_compounds for i in ['Ga','N','Al','Mg']):
# return 'InGaN (Mg)'
# elif all(i in my_compounds for i in ['Ga','N','Mg']):
# return 'GaN (Mg)'
# elif all(i in my_compounds for i in ['Ga','N','Si']):
# return 'GaN (Si)'
# elif all(i in my_compounds for i in ['N','Al','Si']):
# return 'AlN (Si)'
# elif all(i in my_compounds for i in ['N','Al','Mg']):
# return 'AlN (Mg)'
if group_3 and group_5:
if all(i in my_compounds for i in ['In','Ga','N','Al']):
if doping:
return 'InAlGaN ({})'.format(str(list(doping)[0]))
return 'InAlGaN'
elif all(i in my_compounds for i in ['Al','Ga','N']):
if doping:
return 'AlGaN ({})'.format(str(list(doping)[0]))
return 'AlGaN'
elif all(i in my_compounds for i in ['In','Ga','N']):
if doping:
return 'InGaN ({})'.format(str(list(doping)[0]))
return 'InGaN'
elif all(i in my_compounds for i in ['Ga','N']):
if doping:
return 'GaN ({})'.format(str(list(doping)[0]))
return 'GaN'
elif all(i in my_compounds for i in ['Al','N']):
if doping:
return 'AlN ({})'.format(str(list(doping)[0]))
return 'AlN'
elif all(i in my_compounds for i in ['In','N']):
if doping:
return 'InN ({})'.format(str(list(doping)[0]))
return 'InN'
else:
return "{} is missing, please consider adding the necessary compound".format(compounds)
elif group_5:
return 0
| 2.71875 | 3 |
questions/q270_array_peak/code.py | aadhityasw/Competitive-Programs | 0 | 12773737 | class Solution:
# @param A : list of integers
# @return an integer
def perfectPeak(self, A):
n = len(A)
left = [0] * n
right = [0] * n
left[0] = A[0]
right[n-1] = A[n-1]
for i in range(1, n) :
left[i] = max(left[i-1], A[i])
for i in range(n-2, -1, -1) :
right[i] = min(right[i+1], A[i])
for i in range(1, n-1) :
if A[i] > left[i-1] and A[i] < right[i+1] :
return 1
return 0
A = [ 5706, 26963, 24465, 29359, 16828, 26501, 28146, 18468, 9962, 2996, 492, 11479, 23282, 19170, 15725, 6335 ]
ob = Solution()
print(ob.perfectPeak(A))
| 2.953125 | 3 |
utils/plot_networks.py | LiwenxuanNJU/TVpgGLM | 1 | 12773738 | # From https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY
# Plot weighted directed positive/negative network graph
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch, Circle
import numpy as np
def draw_curvy_network(G, pos, ax, node_radius=0.02, node_color='b', node_edge_color='b', node_alpha=0.5, edge_color=None, edge_alpha=0.5, edge_width=None):
assert isinstance(G, nx.Graph), "G must be a NetworkX graph!"
# Convert node colors to lists
def _to_list(x, N):
if isinstance(x, list):
assert len(x) == N
return x
else:
return [x] * N
node_radius = _to_list(node_radius, len(G.nodes()))
node_color = _to_list(node_color, len(G.nodes()))
node_edge_color = _to_list(node_edge_color, len(G.nodes()))
node_alpha = _to_list(node_alpha, len(G.nodes()))
if edge_color is None:
edge_color = _to_list('k', len(G.edges()))
edge_alpha = _to_list(edge_alpha, len(G.edges()))
# if user specify edge-width it is not the same
if edge_width is None:
edge_width = 2
edge_width = _to_list(edge_width, len(G.edges()))
# Plot the nodes
for n, r, a, fc, ec in zip(G, node_radius, node_alpha, node_color, node_edge_color):
c = Circle(pos[n], radius=r, alpha=a, fc=fc, ec=ec)
ax.add_patch(c)
G.node[n]['patch'] = c
# Plot the edges
seen = {}
for (u, v, d), a, lw, ec in zip(G.edges(data=True), edge_alpha, edge_width, edge_color):
n1 = G.node[u]['patch']
n2 = G.node[v]['patch']
rad = -0.1
if (u, v) in seen:
rad = seen.get((u, v))
rad = (rad + np.sign(rad) * 0.1) * -1
e = FancyArrowPatch(n1.center, n2.center, patchA=n1, patchB=n2, arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=lw, alpha=a, color=ec)
seen[(u, v)] = rad
ax.add_patch(e)
return e
if __name__ == "__main__":
from hips.plotting.colormaps import harvard_colors
color = harvard_colors()[0:10]
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)])
pos = nx.spring_layout(G)
ax = plt.gca()
edge_width = [5, 0.9, 0.8, 2, 2, 1, 5]
edge_color = [color[0], color[0], color[0], color[0], color[1], color[1], color[1]]
draw_curvy_network(G, pos, ax, node_color='k', node_edge_color='k', edge_width=edge_width, edge_color=edge_color)
ax.autoscale()
plt.axis('equal')
plt.axis('off')
# plt.savefig("graph.pdf")
plt.show()
| 3.25 | 3 |
app/editor/animation_editor/animation_properties.py | zerorock1312/lt-maker-master | 0 | 12773739 | <reponame>zerorock1312/lt-maker-master
import time
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, \
QSizePolicy, QFrame, QSplitter, QRadioButton, QLineEdit, QLabel, QSpinBox, \
QStyle, QToolButton
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap, QPainter, QImage, QColor, QPen
from app.extensions.custom_gui import PropertyBox
from app.extensions.spinbox_xy import SpinBoxXY
from app.utilities import utils, str_utils
from app.editor import timer
from app.editor.icon_editor.icon_view import IconView
class SpeedSpecification(QWidget):
def __init__(self, parent):
super().__init__()
self.window = parent.window
self.layout = QVBoxLayout()
self.int_speed = QRadioButton("Constant (ms)", self)
self.int_speed.toggled.connect(self.int_speed_toggled)
self.list_speed = QRadioButton("Variable (#frames)", self)
self.int_speed_box = QSpinBox(self)
self.int_speed_box.setRange(1, 1024)
self.int_speed_box.valueChanged.connect(self.change_spinbox)
self.list_speed_box = QLineEdit(self)
self.list_speed_box.setPlaceholderText("Enter integers separated by commas")
self.list_speed_box.textChanged.connect(self.change_text)
self.list_speed_box.editingFinished.connect(self.check_text)
self.list_speed_box.setEnabled(False)
self.list_speed_label = QLabel(self)
top_layout = QHBoxLayout()
top_layout.addWidget(self.int_speed)
top_layout.addWidget(self.int_speed_box)
bottom_layout = QHBoxLayout()
bottom_layout.addWidget(self.list_speed)
bottom_layout.addWidget(self.list_speed_box)
bottom_layout.addWidget(self.list_speed_label)
self.layout.addLayout(top_layout)
self.layout.addLayout(bottom_layout)
self.setLayout(self.layout)
def set_current(self, speed):
if str_utils.is_int(speed):
self.int_speed_box.setValue(speed)
self.int_speed.setChecked(True)
self.int_speed_toggled(True)
else:
self.list_speed_box.setText(self.set_speed(speed))
self.int_speed.setChecked(False)
self.int_speed_toggled(False)
def int_speed_toggled(self, checked):
if checked:
self.int_speed_box.setEnabled(True)
self.list_speed_box.setEnabled(False)
self.list_speed_label.setPixmap(QPixmap())
if self.window.current:
self.window.current.speed = int(self.int_speed_box.value())
else:
self.int_speed_box.setEnabled(False)
self.list_speed_box.setEnabled(True)
self.check_text()
def change_spinbox(self, val):
if self.window.current:
self.window.current.speed = int(val)
def change_text(self, text):
self.check_text()
def check_text(self):
text = self.list_speed_box.text()
if text:
good = self.text_valid(text)
if good:
icon = self.style().standardIcon(QStyle.SP_DialogApplyButton)
self.list_speed_label.setPixmap(icon.pixmap(32, 32))
if self.window.current:
self.window.current.speed = self.get_speed(text)
else:
icon = self.style().standardIcon(QStyle.SP_DialogCancelButton)
self.list_speed_label.setPixmap(icon.pixmap(32, 32))
if self.window.current:
self.window.current.speed = int(self.int_speed_box.value())
else:
self.list_speed_label.setPixmap(QPixmap())
if self.window.current:
self.window.current.speed = int(self.int_speed_box.value())
def get_speed(self, text):
frame_numbers = text.replace(' ', '').split(',')
# Split '*' symbol
new_frame_numbers = []
for num in frame_numbers:
if '*' in num:
a, b = num.split('*')
new_frame_numbers += [a] * int(b)
else:
new_frame_numbers.append(num)
frame_numbers = [int(_) for _ in new_frame_numbers]
return frame_numbers
def set_speed(self, speed_list):
a = []
current_speed_int = speed_list[0]
total = 1
for speed_int in speed_list[1:]:
if speed_int == current_speed_int:
total += 1
elif total > 1:
a.append(str(current_speed_int) + '*' + str(total))
total = 1
else:
a.append(str(current_speed_int))
total = 1
current_speed_int = speed_int
if total > 1:
a.append(str(current_speed_int) + '*' + str(total))
else:
a.append(str(current_speed_int))
return ','.join(a)
def text_valid(self, text):
try:
frame_numbers = self.get_speed(text)
return all(i > 0 for i in frame_numbers) and len(frame_numbers) == self.window.current.num_frames
except:
return False
class AnimationProperties(QWidget):
def __init__(self, parent, current=None):
QWidget.__init__(self, parent)
self.window = parent
self._data = self.window._data
self.setMaximumHeight(720)
# Populate resources
for resource in self._data:
resource.pixmap = QPixmap(resource.full_path)
self.current = current
self.playing = False
self.loop = False
self.last_update = 0
self.counter = 0
self.frames_passed = 0
left_section = QVBoxLayout()
self.frame_view = IconView(self)
self.frame_view.scene.setBackgroundBrush(QColor(200, 200, 200))
left_section.addWidget(self.frame_view)
button_section = QHBoxLayout()
button_section.setAlignment(Qt.AlignTop)
self.play_button = QToolButton(self)
self.play_button.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.play_button.clicked.connect(self.play_clicked)
self.loop_button = QToolButton(self)
self.loop_button.setIcon(self.style().standardIcon(QStyle.SP_BrowserReload))
self.loop_button.clicked.connect(self.loop_clicked)
self.loop_button.setCheckable(True)
button_section.addWidget(self.play_button)
button_section.addWidget(self.loop_button)
left_section.addLayout(button_section)
right_section = QVBoxLayout()
self.frame_box = PropertyBox("Frames", SpinBoxXY, self)
self.frame_box.edit.coordsChanged.connect(self.frames_changed)
self.frame_box.edit.setMinimum(1)
right_section.addWidget(self.frame_box)
self.total_num_box = PropertyBox("Total Frames", QSpinBox, self)
self.total_num_box.edit.valueChanged.connect(self.num_frames_changed)
right_section.addWidget(self.total_num_box)
self.speed_box = PropertyBox("Speed", SpeedSpecification, self)
right_section.addWidget(self.speed_box)
left_frame = QFrame(self)
left_frame.setLayout(left_section)
right_frame = QFrame(self)
right_frame.setLayout(right_section)
top_splitter = QSplitter(self)
top_splitter.setChildrenCollapsible(False)
top_splitter.addWidget(left_frame)
top_splitter.addWidget(right_frame)
self.raw_view = PropertyBox("Raw Sprite", IconView, self)
self.raw_view.edit.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
final_splitter = QSplitter(self)
final_splitter.setOrientation(Qt.Vertical)
final_splitter.setChildrenCollapsible(False)
final_splitter.addWidget(top_splitter)
final_splitter.addWidget(self.raw_view)
final_section = QHBoxLayout()
self.setLayout(final_section)
final_section.addWidget(final_splitter)
timer.get_timer().tick_elapsed.connect(self.tick)
def tick(self):
if self.current:
self.draw_raw()
self.draw_frame()
def set_current(self, current):
self.current = current
old_num_frames = self.current.num_frames
self.frame_box.edit.set_current(current.frame_x, current.frame_y)
self.total_num_box.edit.setValue(old_num_frames)
self.speed_box.edit.set_current(current.speed)
self.draw_raw()
self.draw_frame()
def draw_raw(self):
pixmap = self.current.pixmap
base_image = QImage(pixmap.width(), pixmap.height(), QImage.Format_ARGB32)
base_image.fill(QColor(0, 0, 0, 0))
painter = QPainter()
painter.begin(base_image)
painter.drawImage(0, 0, self.current.pixmap.toImage())
# Draw grid lines
painter.setPen(QPen(Qt.black, 1, Qt.DashLine))
width = self.current.pixmap.width() // self.current.frame_x
height = self.current.pixmap.height() // self.current.frame_y
for x in range(self.current.frame_x + 1):
painter.drawLine(x * width, 0, x * width, self.current.pixmap.height())
for y in range(self.current.frame_y + 1):
painter.drawLine(0, y * height, self.current.pixmap.width(), y * height)
painter.end()
self.raw_view.edit.set_image(QPixmap.fromImage(base_image))
self.raw_view.edit.show_image()
def draw_frame(self):
if self.playing:
if str_utils.is_int(self.current.speed):
num = int(time.time() * 1000 - self.last_update) // self.current.speed
if num >= self.current.num_frames and not self.loop:
num = 0
self.stop()
else:
num %= self.current.num_frames
else:
self.frames_passed += 1
if self.frames_passed > self.current.speed[self.counter]:
self.counter += 1
self.frames_passed = 0
if self.counter >= len(self.current.speed):
if not self.loop:
self.stop()
self.counter = 0
num = self.counter
else:
num = 0
width = self.current.pixmap.width() // self.current.frame_x
height = self.current.pixmap.height() // self.current.frame_y
left = (num % self.current.frame_x) * width
top = (num // self.current.frame_x) * height
base_image = self.current.pixmap.copy(left, top, width, height)
self.frame_view.set_image(base_image)
self.frame_view.show_image()
def stop(self):
self.playing = False
self.play_button.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
def play_clicked(self):
if self.playing:
self.stop()
else:
self.playing = True
self.last_update = time.time() * 1000
self.counter = 0
self.frames_passed = 0
self.play_button.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))
def loop_clicked(self, val):
if val:
self.loop = True
else:
self.loop = False
def frames_changed(self, x, y):
if self.current:
self.current.frame_x = x
self.current.frame_y = y
minim = x * y - x + 1
self.total_num_box.edit.setRange(minim, x * y)
self.total_num_box.edit.setValue(utils.clamp(self.current.num_frames, minim, x * y))
def num_frames_changed(self, val):
self.current.num_frames = val
| 2.15625 | 2 |
presqt/targets/zenodo/utilities/helpers/upload_helper.py | djordjetrajkovic/presqt | 3 | 12773740 | import json
import requests
from rest_framework import status
from presqt.utilities import PresQTResponseException
def zenodo_upload_helper(auth_parameter, project_title=None):
"""
Initialize a new project on Zenodo.
Parameters
----------
auth_parameter : str
The Authentication parameter expected by Zenodo.
Returns
-------
The new Project ID.
"""
headers = {"Content-Type": "application/json"}
project_info = requests.post('https://zenodo.org/api/deposit/depositions', params=auth_parameter,
json={}, headers=headers)
if project_info.status_code != 201:
raise PresQTResponseException(
"Zenodo returned a {} status code while trying to create the project.".format(
project_info.status_code), status.HTTP_400_BAD_REQUEST)
project_helper = project_info.json()
project_id = project_helper['id']
project_owner = project_helper['owner']
# Now we need to add some info to the project.
data = {
'metadata': {
'title': project_title,
'upload_type': 'other',
'description': 'PresQT Upload',
'creators': [{'name': str(project_owner)}]}}
requests.put('https://zenodo.org/api/deposit/depositions/{}'.format(project_id),
params=auth_parameter, data=json.dumps(data), headers=headers)
return project_id
| 2.640625 | 3 |
malspider_django/dashboard/management/commands/del_alerts.py | andrewhenke/malspider | 453 | 12773741 | #
# Copyright (c) 2016-present, Cisco Systems, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
from django.core.management.base import BaseCommand, CommandError
from dashboard.models import Alert
from django.db.models import Q
class Command(BaseCommand):
help = 'Removes ALL alerts.'
def handle(self, *args, **options):
Alert.objects.all().delete()
| 1.6875 | 2 |
Graph.py | bitsPleaseHacked22/UofaPathfinder | 0 | 12773742 | <filename>Graph.py<gh_stars>0
import sys
import numpy as np
class Graph(object):
def __init__(self):
nodes = np.loadtxt("assets/nodes.csv", dtype=str, delimiter=',')
matrix = np.genfromtxt("assets/AM.csv", delimiter=',', filling_values=1000)
init_graph = {}
for node in nodes:
init_graph[node] = {}
for i in range(len(nodes)):
for j in range(len(nodes)):
if matrix[i][j] != 1000:
if matrix[i][j] !=1:
init_graph[nodes[i]][nodes[j]] = matrix[i][j]*2
else:
init_graph[nodes[i]][nodes[j]] = matrix[i][j]
self.nodes = nodes
self.graph = self.construct_graph(nodes, init_graph)
def construct_graph(self, nodes, init_graph):
'''
This method makes sure that the graph is symmetrical. In other words, if there's a path from node A to B with a value V, there needs to be a path from node B to node A with a value V.
'''
graph = {}
for node in nodes:
graph[node] = {}
graph.update(init_graph)
for node, edges in graph.items():
for adjacent_node, value in edges.items():
if graph[adjacent_node].get(node, False) == False:
graph[adjacent_node][node] = value
print(graph.keys())
return graph
def get_nodes(self):
"Returns the nodes of the graph."
return self.nodes
def get_outgoing_edges(self, node):
"Returns the neighbors of a node."
connections = []
for out_node in self.nodes:
if self.graph[node].get(out_node, False) != False:
connections.append(out_node)
return connections
def value(self, node1, node2):
"Returns the value of an edge between two nodes."
return self.graph[node1][node2]
def dijkstra_algorithm(self, start_node):
unvisited_nodes = list(self.get_nodes())
# We'll use this dict to save the cost of visiting each node and update it as we move along the graph
shortest_path = {}
# We'll use this dict to save the shortest known path to a node found so far
previous_nodes = {}
# We'll use max_value to initialize the "infinity" value of the unvisited nodes
max_value = sys.maxsize
for node in unvisited_nodes:
shortest_path[node] = max_value
# However, we initialize the starting node's value with 0
shortest_path[start_node] = 0
# The algorithm executes until we visit all nodes
while unvisited_nodes:
# The code block below finds the node with the lowest score
current_min_node = None
for node in unvisited_nodes: # Iterate over the nodes
if current_min_node is None:
current_min_node = node
elif shortest_path[node] < shortest_path[current_min_node]:
current_min_node = node
# The code block below retrieves the current node's neighbors and updates their distances
neighbors = self.get_outgoing_edges(current_min_node)
for neighbor in neighbors:
tentative_value = shortest_path[current_min_node] + self.value(current_min_node, neighbor)
if tentative_value < shortest_path[neighbor]:
shortest_path[neighbor] = tentative_value
# We also update the best path to the current node
previous_nodes[neighbor] = current_min_node
# After visiting its neighbors, we mark the node as "visited"
unvisited_nodes.remove(current_min_node)
return previous_nodes, shortest_path
def print_result(self,start_node, target_node):
path = []
node = target_node
previous_nodes, shortest_path = self.dijkstra_algorithm(start_node)
while node != start_node:
path.append(node)
node = previous_nodes[node]
# Add the start node manually
path.append(start_node)
print("We found the following best path with a value of {}.".format(shortest_path[target_node]))
path.reverse()
return path
| 3.203125 | 3 |
material/assignment2/code/movements.py | mpambasange/MachineLearning | 16 | 12773743 | <gh_stars>10-100
#!/usr/bin/env Python3
'''
This file will read in data and start your mlp network.
You can leave this file mostly untouched and do your
mlp implementation in mlp.py.
'''
# Feel free to use numpy in your MLP if you like to.
import numpy as np
import mlp
filename = '../data/movements_day1-3.dat'
movements = np.loadtxt(filename,delimiter='\t')
# Subtract arithmetic mean for each sensor. We only care about how it varies:
movements[:,:40] = movements[:,:40] - movements[:,:40].mean(axis=0)
# Find maximum absolute value:
imax = np.concatenate( ( movements.max(axis=0) * np.ones((1,41)) ,
np.abs( movements.min(axis=0) * np.ones((1,41)) ) ),
axis=0 ).max(axis=0)
# Divide by imax, values should now be between -1,1
movements[:,:40] = movements[:,:40]/imax[:40]
# Generate target vectors for all inputs 2 -> [0,1,0,0,0,0,0,0]
target = np.zeros((np.shape(movements)[0],8));
for x in range(1,9):
indices = np.where(movements[:,40]==x)
target[indices,x-1] = 1
# Randomly order the data
order = list(range(np.shape(movements)[0]))
np.random.shuffle(order)
movements = movements[order,:]
target = target[order,:]
# Split data into 3 sets
# Training updates the weights of the network and thus improves the network
train = movements[::2,0:40]
train_targets = target[::2]
# Validation checks how well the network is performing and when to stop
valid = movements[1::4,0:40]
valid_targets = target[1::4]
# Test data is used to evaluate how good the completely trained network is.
test = movements[fc00:e968:6179::de52:7100,0:40]
test_targets = target[3::4]
# Try networks with different number of hidden nodes:
hidden = 12
# Initialize the network:
net = mlp.mlp(train, train_targets, hidden)
# Run training:
net.earlystopping(train, train_targets, valid, valid_targets)
# NOTE: You can also call train method from here,
# and make train use earlystopping method.
# This is a matter of preference.
# Check how well the network performed:
net.confusion(test,test_targets)
| 3.328125 | 3 |
ros/nodes/simple_run.py | asukiaaa/kagotos | 0 | 12773744 | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
def set_motor_speed(left, right):
twist = Twist()
if (left > 0.0 and right > 0.0):
twist.linear.x = 1.0
elif (left <= 0.0 and right > 0.0):
# Turn left
twist.angular.z = 1.0
if (left == 0.0):
twist.linear.x = 1.0
elif (left > 0.0 and right <= 0.0):
# Turn right
twist.angular.z = -1.0
if (right == 0.0):
twist.linear.x = 1.0
elif (left < 0.0 and right < 0.0):
twist.linear.x = -1.0
pub.publish(twist)
def laser_scan_callback(data):
print "get laser info"
# rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
# print data.ranges
print len(data.ranges)
min_range = 10
for range in data.ranges[270:450]:
# print range
if range != 0.0 and range < min_range:
min_range = range
if min_range < 0.4:
set_motor_speed(100, -100)
elif min_range < 0.6:
set_motor_speed(100, 0)
else:
set_motor_speed(100, 100)
if __name__ == '__main__':
rospy.init_node('sample_listener')
rospy.Subscriber("/scan", LaserScan, laser_scan_callback)
print "set subscriber"
rospy.spin()
| 2.515625 | 3 |
trace-engine/__main__.py | ameli/trace | 0 | 12773745 | print('trace!')
| 1.265625 | 1 |
moto/cloudwatch/responses.py | EvaSDK/moto | 0 | 12773746 | from moto.core.responses import BaseResponse
from .models import cloudwatch_backend
class CloudWatchResponse(BaseResponse):
def put_metric_alarm(self):
name = self._get_param('AlarmName')
comparison_operator = self._get_param('ComparisonOperator')
evaluation_periods = self._get_param('EvaluationPeriods')
period = self._get_param('Period')
threshold = self._get_param('Threshold')
statistic = self._get_param('Statistic')
description = self._get_param('AlarmDescription')
dimensions = self._get_list_prefix('Dimensions.member')
alarm_actions = self._get_multi_param('AlarmActions.member')
ok_actions = self._get_multi_param('OKActions.member')
insufficient_data_actions = self._get_multi_param("InsufficientDataActions.member")
unit = self._get_param('Unit')
alarm = cloudwatch_backend.put_metric_alarm(name, comparison_operator,
evaluation_periods, period,
threshold, statistic,
description, dimensions,
alarm_actions, ok_actions,
insufficient_data_actions,
unit)
template = self.response_template(PUT_METRIC_ALARM_TEMPLATE)
return template.render(alarm=alarm)
def describe_alarms(self):
alarms = cloudwatch_backend.get_all_alarms()
template = self.response_template(DESCRIBE_ALARMS_TEMPLATE)
return template.render(alarms=alarms)
def delete_alarms(self):
alarm_names = self._get_multi_param('AlarmNames.member')
cloudwatch_backend.delete_alarms(alarm_names)
template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE)
return template.render()
PUT_METRIC_ALARM_TEMPLATE = """<PutMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata>
<RequestId>
2690d7eb-ed86-11dd-9877-6fad448a8419
</RequestId>
</ResponseMetadata>
</PutMetricAlarmResponse>"""
DESCRIBE_ALARMS_TEMPLATE = """<DescribeAlarmsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<MetricAlarms>
{% for alarm in alarms %}
<member>
<ActionsEnabled>{{ alarm.actions_enabled }}</ActionsEnabled>
<AlarmActions>
{% for action in alarm.alarm_actions %}
<member>{{ action }}</member>
{% endfor %}
</AlarmActions>
<AlarmArn>{{ alarm.arn }}</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>{{ alarm.configuration_updated_timestamp }}</AlarmConfigurationUpdatedTimestamp>
<AlarmDescription>{{ alarm.description }}</AlarmDescription>
<AlarmName>{{ alarm.name }}</AlarmName>
<ComparisonOperator>{{ alarm.comparison_operator }}</ComparisonOperator>
<Dimensions>
{% for dimension in alarm.dimensions %}
<member>
<Name>{{ dimension.name }}</Name>
<Value>{{ dimension.value }}</Value>
</member>
{% endfor %}
</Dimensions>
<EvaluationPeriods>{{ alarm.evaluation_periods }}</EvaluationPeriods>
<InsufficientDataActions>
{% for action in alarm.insufficient_data_actions %}
<member>{{ action }}</member>
{% endfor %}
</InsufficientDataActions>
<MetricName>{{ alarm.metric_name }}</MetricName>
<Namespace>{{ alarm.namespace }}</Namespace>
<OKActions>
{% for action in alarm.ok_actions %}
<member>{{ action }}</member>
{% endfor %}
</OKActions>
<Period>{{ alarm.period }}</Period>
<StateReason>{{ alarm.state_reason }}</StateReason>
<StateReasonData>{{ alarm.state_reason_data }}</StateReasonData>
<StateUpdatedTimestamp>{{ alarm.state_updated_timestamp }}</StateUpdatedTimestamp>
<StateValue>{{ alarm.state_value }}</StateValue>
<Statistic>{{ alarm.statistic }}</Statistic>
<Threshold>{{ alarm.threshold }}</Threshold>
<Unit>{{ alarm.unit }}</Unit>
</member>
{% endfor %}
</MetricAlarms>
</DescribeAlarmsResponse>"""
DELETE_METRIC_ALARMS_TEMPLATE = """<DeleteMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata>
<RequestId>
2690d7eb-ed86-11dd-9877-6fad448a8419
</RequestId>
</ResponseMetadata>
</DeleteMetricAlarmResponse>"""
| 2.09375 | 2 |
Build.py | Carreau/all-jupyter | 0 | 12773747 | <reponame>Carreau/all-jupyter
import os
from pathlib import Path
ld = os.listdir('public/ecosystem')
p = Path('public/ecosystem')
import yaml
data =[]
for sub in p.iterdir():
if not sub.is_dir():
continue
y = (sub / 'data.yaml')
if not y.exists():
raise ValueError(f'{y} should exists')
with y.open() as f:
dt = yaml.safe_load(f.read())
dt['path'] = sub.parts[-1]
if (sub / 'logo.svg').exists():
dt['logo'] = 'svg'
elif (sub / 'logo.png').exists():
dt['logo'] = 'png'
elif (sub / 'logo.jpg').exists():
dt['logo'] = 'jpg'
data.append(dt)
tpl= """
export var data = {};
export default data;
"""
import json
with open('src/data.js', 'w') as f:
f.write(tpl.format(json.dumps(data, indent=2))) | 2.5 | 2 |
rearrangement/evaluation/model_eval.py | ylabbe/rearrangement-planning | 40 | 12773748 | <reponame>ylabbe/rearrangement-planning<filename>rearrangement/evaluation/model_eval.py<gh_stars>10-100
from pathlib import Path
import numpy as np
from ..dataset.lmdb import LMDBDataset
from ..dataset.real_scene_dataset import RealSceneDataset
from ..datasets_cfg import SYNTHETIC_DATASETS, REAL_DATASETS, DS_DIR
class ModelEvaluation:
def __init__(self, dataset, config):
self.dataset = dataset
self.config = config
if self.dataset in REAL_DATASETS:
if dataset == 'real-cubes-1to6':
self.dataset, n_objects = 'real-cubes-1to12', np.arange(1,7)
else:
n_objects = None
scene_ds = RealSceneDataset(Path(DS_DIR) / self.dataset, n_objects=n_objects)
elif self.dataset in SYNTHETIC_DATASETS:
n_eval_frames = 300
scene_ds = LMDBDataset(db_dir=str(Path(DS_DIR) / self.dataset),
train=False, n_frames=n_eval_frames)
else:
raise ValueError('unknown dataset')
self.scene_ds = scene_ds
| 2.34375 | 2 |
locmoss/kgram.py | jm-begon/locmoss | 0 | 12773749 |
from hashlib import sha1
class Buffer(object):
def __init__(self, capacity):
self.circ = [None for _ in range(capacity)]
self.top = 0
def __iter__(self):
size = len(self.circ)
for i in range(size):
o = self.circ[(self.top + i) % size]
if o is None:
raise StopIteration()
yield o
def put(self, o):
self.circ[self.top % len(self.circ)] = o
self.top += 1
def is_full(self):
return self.top >= len(self.circ)
class KGrams(object):
@classmethod
def default_hash_fn(cls, s):
hashval = sha1(s.encode("utf-8"))
hashval = hashval.hexdigest()[-4:]
hashval = int(hashval, 16) # using last 16 bits of sha-1 digest
return hashval
@classmethod
def kgramify(cls, token_iterator, k=5):
buffer = Buffer(k)
for token in token_iterator:
buffer.put(token)
if buffer.is_full():
tokens = list(buffer)
yield tokens[0].location, cls([x.symbol for x in tokens])
def __init__(self, symbols):
self.symbols = ''.join(symbols)
self.hash_val = self.__class__.default_hash_fn(self.symbols)
def __len__(self):
return len(self.symbols)
def __hash__(self):
return self.hash_val
def __eq__(self, other):
return isinstance(other, KGrams) and other.symbols == self.symbols
def __str__(self):
return self.symbols
def __repr__(self):
return "{}({})".format(self.__class__.__name__,
repr(self.symbols))
| 2.703125 | 3 |
tests/models/test_worklist.py | thtroyer/simple-text-generator | 1 | 12773750 | <reponame>thtroyer/simple-text-generator
from simpletextgenerator.models.worklist import WorkItem, WorkList, TrainingWorkItem, GeneratingWorkItem
def test_trainingWorkItem():
training_work_item = TrainingWorkItem(50)
assert (training_work_item.get_time_estimate(training_rate=50)) == 1
assert (training_work_item.get_time_estimate(training_rate=1.0)) == 50
assert (training_work_item.get_time_estimate(training_rate=1.2)) == 50 / 1.2
def test_generatingWorkItem():
generating_work_item = GeneratingWorkItem(50)
assert (generating_work_item.get_time_estimate(generation_rate=50)) == 1
assert (generating_work_item.get_time_estimate(generation_rate=1.0)) == 50
assert (generating_work_item.get_time_estimate(generation_rate=1.2)) == 50 / 1.2
def test_Worklist_get_time_estimate():
workList = WorkList((
GeneratingWorkItem(50),
TrainingWorkItem(200)
))
workList.set_training_rate(2)
workList.set_generating_rate(1)
assert workList.get_time_estimate() == 150
def test_Worklist_get_time_estimate_progress():
workList = WorkList((
TrainingWorkItem(200),
GeneratingWorkItem(50)
))
workList.set_training_rate(2)
workList.set_generating_rate(1)
assert workList.get_time_estimate() == 150
workList.set_progress_on_current_work_item(0.5)
assert workList.get_time_estimate() == 100
workList.set_progress_on_current_work_item(0.75)
assert workList.get_time_estimate() == 75
def test_Worklist_advance_index():
workList = WorkList((
GeneratingWorkItem(50),
GeneratingWorkItem(50),
TrainingWorkItem(100),
TrainingWorkItem(100)
))
workList.set_training_rate(1)
workList.set_generating_rate(1)
assert workList.get_time_estimate() == 300
workList.advance_index()
assert workList.get_time_estimate() == 250
workList.advance_index()
assert workList.get_time_estimate() == 200
workList.advance_index()
assert workList.get_time_estimate() == 100
workList.advance_index()
assert workList.get_time_estimate() == 0
# make sure advancing too far doesn't cause problem
workList.advance_index()
assert workList.get_time_estimate() == 0
| 2.546875 | 3 |