max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/test_main.py | greggles/cutadapt | 375 | 12762251 | import pytest
from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging
def test_help():
with pytest.raises(SystemExit) as e:
main(["--help"])
assert e.value.args[0] == 0
def test_parse_cutoffs():
assert parse_cutoffs("5") == (0, 5)
assert parse_cutoffs("6,7") == (6, 7)
with pytest.raises(CommandLineError):
parse_cutoffs("a,7")
with pytest.raises(CommandLineError):
parse_cutoffs("a")
with pytest.raises(CommandLineError):
parse_cutoffs("a,7")
with pytest.raises(CommandLineError):
parse_cutoffs("1,2,3")
def test_parse_lengths():
assert parse_lengths("25") == (25, )
assert parse_lengths("17:25") == (17, 25)
assert parse_lengths("25:") == (25, None)
assert parse_lengths(":25") == (None, 25)
with pytest.raises(CommandLineError):
parse_lengths("1:2:3")
with pytest.raises(CommandLineError):
parse_lengths("a:2")
with pytest.raises(CommandLineError):
parse_lengths("a")
with pytest.raises(CommandLineError):
parse_lengths("2:a")
with pytest.raises(CommandLineError):
parse_lengths(":")
def test_setup_logging():
import logging
logger = logging.getLogger(__name__)
setup_logging(logger, log_to_stderr=False, quiet=False, minimal=False, debug=False)
logger.info("Log message")
setup_logging(logger, log_to_stderr=False, debug=1)
setup_logging(logger, log_to_stderr=False, quiet=True)
setup_logging(logger, log_to_stderr=False, minimal=True)
| 2.40625 | 2 |
BarcodeGen.py | guillempp/BoardingPassGenerator | 0 | 12762252 | from pdf417 import encode, render_image, render_svg
import io
class BarcodeGen():
#OWN CLASS - BarcodeGen
def generateBarcode(self, text):
codes = encode(text, columns=7, security_level=4)
image = render_image(codes, scale=4, ratio=3, fg_color="black", bg_color="#FFFFFF")
image.show()
def generateBarcodeForWeb(self, text):
codes = encode(text, columns=7, security_level=4)
image = render_image(codes, scale=4, ratio=3, fg_color="black", bg_color="#FFFFFF")
return image
| 2.765625 | 3 |
02_linearregression/linreg.py | Ajay-Chaudhary/ML-Algorithmns | 54 | 12762253 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn-deep')
# Importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# Training/testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3,
random_state=0)
# Regressor
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
# Visualisation
plt.scatter(X_train, y_train, color = '#31b254')
plt.plot(X_train, regressor.predict(X_train))
plt.title("Relationship between years of experience and salary")
plt.xlabel("Years of experience")
plt.ylabel("Salary")
plt.show()
| 3.71875 | 4 |
core/cogs/Streams/__init__.py | py815-dev/delta-beats | 1 | 12762254 | <reponame>py815-dev/delta-beats<filename>core/cogs/Streams/__init__.py<gh_stars>1-10
import asyncio
from aiohttp import ClientSession
from discord.ext import commands, tasks
from discord.ext.commands import slash_command
class Streams(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.description = """A cog to send you a youtube notification when a youtube channel is live."""
self.streams = {}
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(f"Sorry, this command is on cooldown and can be used again in {round(error.retry_after, 2)} seconds.")
else:
await ctx.send(error.original)
self.bot.log(error.original, "error")
@tasks.loop(seconds=10)
async def stream_loop(self):
for channel, list in self.streams.items():
# List[0] is the stream url, stream[1] is the message to send.
async with ClientSession() as session:
async with session.get(list[0]+"/live") as resp:
if resp.status == 302:
channel_obj = await self.bot.get_channel(channel)
await channel_obj.send(list[1])
await asyncio.sleep(0.1) # Prevent busy looping.
@commands.guild_only()
@commands.group(name="streamalert", aliases=["alert"])
async def streamalert(self, ctx:commands.Context):
"""
Command group for stream commands
"""
pass
@commands.cooldown(1, 60, commands.BucketType.guild)
@streamalert.command(name="youtube", aliases=["yt"])
async def set(self, ctx, channel:str=None):
if self.streams[ctx.channel.id]:
await ctx.send("You are already set to recieve notifications for a channel, only one notification is allowed per channel.")
if not channel:
return await ctx.send("Please provide a channel url")
if not channel.startswith("https://www.youtube.com/channel/"):
return await ctx.send("Invalid channel, please ensure it is a url.")
await ctx.send("what message would you like me to send when this user is live?")
try:
message = await self.bot.wait_for("message", check=lambda m: m.author == ctx.author, timeout=30)
except asyncio.TimeoutError:
return await ctx.send("You took too long to respond, please run the command again.")
await ctx.send(f"OK, I will now alert you when this user is live on YouTube! Please note that if the bot restarts, you will not be notified.")
self.streams[channel.id] = [channel.id, message]
@streamalert.command(name="cancel", aliases=["c"])
async def cancel(self, ctx):
if self.streams.get(ctx.channel.id):
del self.streams[ctx.channel.id]
await ctx.send("OK, I will no longer alert you when this channel is live.")
else:
await ctx.send("You are not currently being notified of any streams.")
def setup(bot):
bot.add_cog(Streams(bot))
| 2.6875 | 3 |
model/RetinaNet.py | DrMMZ/RetinaNet | 7 | 12762255 | <filename>model/RetinaNet.py
"""
@author: <NAME>, <EMAIL>
RetinaNet
"""
import os, datetime, time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import retinanet_model, anchors, resnet_fpn, utils
class RetinaNet(object):
"""
Defines a class based on RetinaNet, including training (can use synchronized
multi-gpu training), detecting and evaluation.
"""
def __init__(self, mode, config):
"""
A constructor.
Parameters
----------
mode : string
The mode of building a retinanet in {'training', 'inference'}.
config : class
A custom configuration, see config.Config().
Returns
-------
None.
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
if mode == 'inference':
self.model = self.build_retinanet(mode, config)
if config.checkpoint_path is not None:
print('\nLoading checkpoint:\n%s\n' % config.checkpoint_path)
self.model.load_weights(config.checkpoint_path, by_name=False)
def build_retinanet(self, mode, config):
"""
Builds a RetinaNet.
Parameters
----------
mode : string
The mode of building a retinanet in {'training', 'inference'}.
config : class
A custom configuration, see config.Config().
Returns
-------
model : tf keras model
A retinanet based on the given config.
"""
model = retinanet_model.retinanet(
mode,
config.offsets_mean,
config.offsets_std,
config.architecture,
config.train_bn,
config.channels_fmap,
config.num_anchors_per_pixel,
config.num_object_classes,
config.pi,
config.alpha,
config.gamma,
config.confidence_threshold,
config.num_top_scoring,
config.batch_size_per_gpu,
config.max_objects_per_class_per_img,
config.iou_threshold,
output_top_scoring=False)
return model
def compile_model(
self,
model,
lr,
momentum,
beta_2,
l2,
loss_names=['cls_loss', 'reg_loss']
):
"""
Add Adam optimizer, loss(es) and L2-regularization to the model.
Parameters
----------
model : tf keras model
The built retinanet.
lr : float
A learning rate.
momentum : float
A scalar in Adam controlling moving average of the gradients decay.
beta_2 : float
A scalar in Adam controlling moving average of the squared gradients
decay.
l2 : float
A scalar in L2-regularization controlling the strength of
regularization.
loss_names : list, optional
The name(s) of loss function(s) in the model. The default is
['cls_loss', 'reg_loss'], i.e., focal (classification) and
smooth-L1 (regression) losses defined in losses.ClsLoss() and
losses.RegLoss(), respectively.
Returns
-------
None.
"""
# optimizer
optimizer = tf.keras.optimizers.Adam(
lr=lr,
beta_1=momentum,
beta_2=beta_2,
epsilon=1e-7)
# losses
for name in loss_names:
layer = model.get_layer(name)
loss = layer.output
model.add_loss(loss)
model.add_metric(loss, name=name)
# l2-regularization, exclude batch norm weights
reg_losses = []
for w in model.trainable_weights:
if 'gamma' not in w.name and 'beta' not in w.name:
reg_losses.append(
tf.math.divide(
tf.keras.regularizers.L2(l2)(w),
tf.cast(tf.size(w), w.dtype)))
model.add_loss(lambda: tf.math.add_n(reg_losses))
# compile the model
model.compile(
optimizer=optimizer,
loss=[None] * len(model.outputs))
def train(self,
train_generator,
val_generator=None,
plot_training=True
):
"""
Trains the RetinaNet.
Parameters
----------
train_generator : python generator
Described in data_gen.data_generator().
val_generator : python generator, optional
Described in data_gen.data_generator(). The default is None.
plot_training : boolean, optional
Whether to plot the learning curves. The default is True.
Returns
-------
The trained model, training log and plot if needed.
"""
assert self.mode == 'training', \
'Need to create an instance in training mode.'
if self.config.num_gpus > 1 and \
len(tf.config.list_physical_devices('GPU')) > 1:
strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
else:
strategy = tf.distribute.get_strategy()
with strategy.scope():
self.model = self.build_retinanet(self.mode, self.config)
if self.config.checkpoint_path is not None:
print('\nLoading checkpoint:\n%s\n' \
% self.config.checkpoint_path)
self.model.load_weights(
self.config.checkpoint_path, by_name=False)
if self.config.resnet_weights_path is not None:
print('\nLoading resnet:\n%s\n' \
% self.config.resnet_weights_path)
self.model.load_weights(
self.config.resnet_weights_path, by_name=True)
# if need to freeze resnet, uncomment the following
# self.config.train_bn = False
# for i in range(len(self.model.layers)):
# layer = self.model.layers[i]
# if layer.name == 'fpn_c5p5':
# assert self.model.layers[i-1].name == 'res5c_out'
# break
# layer.trainable = False
self.compile_model(
self.model,
self.config.lr,
self.config.momentum,
self.config.beta_2,
self.config.l2)
# assign a learning rate after loading a checkpoint; otherwise it
# will continue on the last learning rate in the checkpoint
self.model.optimizer.lr.assign(self.config.lr)
print('\nlearning rate:', self.model.optimizer.lr.numpy(), '\n')
# callbacks, including CSVLogger, ModelCheckpoint, ReduceLROnPlateau,
# and EarlyStopping
callbacks = []
ROOT_DIR = os.getcwd()
log_dir = os.path.join(ROOT_DIR, 'checkpoints')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
checkpoint_dir = os.path.join(log_dir, current_time)
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
if self.config.save_weights:
self.checkpoint_path = os.path.join(checkpoint_dir, 'checkpoint')
cp_callback = tf.keras.callbacks.ModelCheckpoint(
self.checkpoint_path,
save_weights_only=True)
callbacks.append(cp_callback)
if self.config.reduce_lr:
reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
patience=10)
callbacks.append(reduce_lr_callback)
if self.config.early_stopping:
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=10,
restore_best_weights=True)
callbacks.append(early_stopping_callback)
log_filename = os.path.join(checkpoint_dir, '%s.csv' % current_time)
log_callback = tf.keras.callbacks.CSVLogger(
log_filename,
append=False)
callbacks.append(log_callback)
# train
history = self.model.fit(
train_generator,
epochs=self.config.epochs,
steps_per_epoch=self.config.steps_per_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.validation_steps,
validation_freq=self.config.validation_freq)
# learning curves, saved to checkpoint_dir
if plot_training:
loss_names = []
for x in history.history.keys():
if 'loss' in x:
loss_names.append(x)
else:
print(x, history.history[x])
train_loss_names, val_loss_names = [], []
for x in loss_names:
if 'val' in x:
val_loss_names.append(x)
else:
train_loss_names.append(x)
train_losses = []
for name in train_loss_names:
train_losses.append(history.history[name])
val_losses = []
for name in val_loss_names:
val_losses.append(history.history[name])
plt.figure(figsize=(10, 10))
for i in range(len(train_loss_names)):
plt.subplot(3, 1, i+1)
plt.plot(train_losses[i], label='train')
plt.plot(val_losses[i], label='val')
plt.title(train_loss_names[i])
plt.legend()
plt.savefig(os.path.join(checkpoint_dir, '%s.png' % current_time))
plt.show()
def detect(self, images, verbose=False):
"""
Detects a set of images.
Parameters
----------
images : numpy array, [batch_size, height, width, 3]
The given batch of raw images, i.e., not required normalized by 255,
centered (substracting mean pixel per-channel) nor standardized
(centered and divided by standard deviation pixel per-channel).
verbose : boolean, optional
Whether to display the detection time.
Returns
-------
boxes_batch : list
Each element is the detected bounding boxes of an image, of shape
[num_boxes, 4] where 4 is the corner coordinates.
class_ids_batch : list
Each element is the detected class ids of bounding boxes, of shape
[num_boxes, ].
scores_batch : list
Each element is the detected scores of bounding boxes, of shape
[num_boxes, ].
t : float
The detection time in seconds.
"""
assert self.mode == 'inference', \
'Need to create an instance in inference mode.'
assert images.shape[0] == self.config.batch_size_per_gpu, \
'The number of images has to match with the batch size per gpu.'
self.config.image_shape = images.shape[1:]
window = (0,0) + images.shape[1:3]
self.config.fmap_sizes = resnet_fpn.compute_fmap(images.shape[1:])
# generate a list of anchors, each is at different level of FPN of
# shape [batch_size, h_i, w_i]
anchors_fpn = anchors.anchors_from_fpn(
self.config.scales,
self.config.ratios,
self.config.fmap_sizes,
self.config.fmap_strides,
self.config.denser)
anchors_fpn_batches = []
for i in range(len(anchors_fpn)):
a_i = np.broadcast_to(
anchors_fpn[i],
(self.config.batch_size_per_gpu,) + anchors_fpn[i].shape)
anchors_fpn_batches.append(a_i)
# print('anchors', np.concatenate(anchors_fpn_batches, axis=1).shape)
# standardize images
if self.config.channels_mean is not None \
and self.config.channels_std is not None:
images = (
images - self.config.channels_mean
) / self.config.channels_std
elif self.config.channels_mean is not None:
images = images - self.config.channels_mean
elif self.config.channels_std is not None:
images = images / self.config.channels_std
else:
images = images / 255.0
window = tf.expand_dims(tf.constant(window), axis=0)
inputs = [images, anchors_fpn_batches, window]
t1 = time.time()
boxes_batch, class_ids_batch, scores_batch = self.model(inputs)
t2 = time.time()
t = t2 - t1
if verbose: print('\ndetection time: %fs\n' %t)
return boxes_batch, class_ids_batch, scores_batch, t
def evaluate(self, dataset, verbose=False):
"""
Evaluates the trained RetinaNet for a given dataset using mAP metric,
in particular, the images in dataset have differen shapes.
Parameters
----------
dataset : class
Described in utils.Dataset().
verbose : boolean, optional
Whether to display mAP for each image. The default is False.
Returns
-------
mAP : float
The mAP result for the dataset.
output : numpy array
Records AP and detection time for each image, [num_images, 3] where
3 is (image_name, AP, time).
"""
image_names, APs, times = [], [], []
# loop if images in dataset have different shapes
# t1 = time.time()
for image_id in dataset.image_ids:
image_name = dataset.get_image_info(image_id)['id']
image_names.append(image_name)
if verbose: print('\n---%d, %s' % (image_id, image_name))
image, boxes, class_ids, cache = dataset.load_data(
image_id,
shortest_side=self.config.shortest_side,
mode=self.config.resize_mode)
image1 = image[np.newaxis,...]
boxes_batch, class_ids_batch, scores_batch, t = self.detect(
image1, verbose=verbose)
times.append(t)
i = 0
pred_boxes = boxes_batch[i]
# refine pred_boxes by clipping them to the window
window = cache[1]
pred_boxes = utils.clip_boxes(pred_boxes, window)
pred_boxes = pred_boxes.numpy()
# since detected classes are 0-base and objects only
# (not background 0), increase 1
pred_class_ids = class_ids_batch[i].numpy() + 1
pred_scores = scores_batch[i].numpy()
x = utils.compute_mAP(
boxes,
class_ids,
pred_boxes,
pred_class_ids,
pred_scores,
verbose=verbose)
APs.append(x)
# t2 = time.time()
# print('\nevaluation time: %fs\n' %(t2-t1))
mAP = np.array(APs).mean()
print('\nDataset mAP:', mAP)
output = np.stack([image_names, APs, times], axis=1)
return mAP, output
| 2.6875 | 3 |
quasitools/commands/cmd_aacoverage.py | hivdb/quasitools | 0 | 12762256 | <gh_stars>0
"""
Copyright Government of Canada 2017
Written by: <NAME>, National Microbiology Laboratory,
Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import click
from quasitools.cli import pass_context
from quasitools.aa_census import AACensus
from quasitools.parsers.reference_parser import parse_references_from_fasta
from quasitools.parsers.mapped_read_parser import parse_mapped_reads_from_bam
from quasitools.parsers.genes_file_parser import parse_genes_file
@click.command('aa_coverage',
short_help='Builds an amino acid census and returns its '
'coverage.')
@click.argument('bam', required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument('reference', required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument('genes_file', required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.option('-o', '--output', type=click.File('w'))
@pass_context
def cli(ctx, bam, reference, genes_file, output):
rs = parse_references_from_fasta(reference)
mapped_read_collection_arr = []
for r in rs:
# Create a MappedReadCollection object
mapped_read_collection_arr.append(parse_mapped_reads_from_bam(r, bam))
# Parse the genes from the gene file
genes = parse_genes_file(genes_file, rs[0].name)
# Determine which frames our genes are in
frames = set()
for gene in genes:
frames.add(genes[gene]["frame"])
# Create an AACensus object
aa_census = AACensus(reference, mapped_read_collection_arr, genes, frames)
if output:
output.write(aa_census.coverage(frames))
output.close()
else:
click.echo(aa_census.coverage(frames))
| 1.757813 | 2 |
scrape_mars.py | Hong-webport/Georgia-Tech-Challenge-WebScraping | 0 | 12762257 | <reponame>Hong-webport/Georgia-Tech-Challenge-WebScraping
from bs4 import BeautifulSoup
import requests
from splinter import Browser
import pandas as pd
import time
# app = Flask(__name__)
def init_browser():
executable_path = {'executable_path': 'chromedriver.exe'}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
browser.is_element_present_by_css('.content_title', wait_time=1000) # using wait_time
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# news_title = soup.find('div', class_='list_text').find('div', class_='content_title').text
news_title_contatiner = soup.find('div', class_='list_text')
news_title =news_title_contatiner.find('div', class_='content_title').text
news_p = soup.find('div', class_='list_text').find('div', class_='article_teaser_body').text
# news_p
sec_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(sec_url)
browser.is_element_present_by_css('.carousel_item', wait_time=1000) # using wait_time
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#the link that we have came in <article alt="Daybreak at Gale Crater" class="carousel_item" style="background-image: url('/spaceimages/images/wallpaper/PIA14293-1920x1200.jpg');">
featured_image_url = soup.find('article', class_='carousel_item')['style']
#the result of ['style'] is "background-image: url('/spaceimages/images/wallpaper/PIA14293-1920x1200.jpg');"
featured_image_url =featured_image_url.replace('background-image: url(', '').replace(');', '')
# "'/spaceimages/images/wallpaper/PIA16021-1920x1200.jpg'" is the result of the replace function
featured_image_url=featured_image_url.replace("'","")
# '/spaceimages/images/wallpaper/PIA16021-1920x1200.jpg'is the string result we want
featured_image_url='https://www.jpl.nasa.gov/'+featured_image_url
#'https://www.jpl.nasa.gov/' is the name of the website assuming the url is correct it gaves the correct after test it out
# featured_image_url
# mars Fact
thr_url = 'https://space-facts.com/mars/'
tables = pd.read_html(thr_url)
mars_df=tables[0]
# mars_df
frt_url='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
# browser.is_element_present_by_css('.itemLink product-item', wait_time=10) # using wait_time
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# # .find bring only the FIRST RESULT. .Find_all bring out a list
# imgpage_url = soup.find_all('a', class_='itemLink product-item')
img_ls=[]
title_ls=[]
hemisphere_image_urls=[]
for i in range(0, 4):
# to reinitialize similar to y=0
browser.visit(frt_url)
mars_clicks=browser.find_by_css('a.product-item h3', wait_time=1000) # using wait_time
mars_clicks[i].click()
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
titles_hem =soup.find('h2', class_='title').text
title_ls.append(titles_hem)
imgpage_url =soup.find('img', class_='wide-image')['src']
img_ls.append('https://astrogeology.usgs.gov'+imgpage_url)
mars_dict = dict(title=title_ls[i], img_url=img_ls[i])
hemisphere_image_urls.append(mars_dict)
print(hemisphere_image_urls)
mars_datatalists ={}
mars_datatalists = {"news_title": news_title,"news_content":news_p,
"image_url":featured_image_url,
"mars_table":mars_df.to_html(classes = 'table table-striped', index=False, index_names=False),
"hemisphere_image_urls": hemisphere_image_urls}
return mars_datatalists
# print(mars_datatalists)
# from flask_pymongo import PyMongo
# import scrape_craigslist
# @app.route("/")
# def index():
# listings = mongo.db.listings.find_one()
# return render_template("index.html", listings=listings)
# # @app.route("/scrape")
# # def scrap():
# # listings = mongo.db.listings
# # listings_data = scrape_craigslist.scrape()
# # listings.update({}, listings_data, upsert=True)
# # return redirect("/", code=302)
# if __name__ == "__main__":
# app.run(debug=True) | 3.109375 | 3 |
datasets/dataloader.py | tienthegainz/Object_Detection_Model_Zoo_PyTorch | 0 | 12762258 | <gh_stars>0
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
# import skimage.io
# import skimage.transform
# import skimage.color
# import skimage
import cv2
import traceback
# VOC
from PIL import Image
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
DATASET_CLASSES = ("keystone", "oval", "round", "taper")
CLASSES_TO_IDS = dict(zip(DATASET_CLASSES, range(len(DATASET_CLASSES))))
def get_list_ids(path):
"""
path: image_path or xml_path
output: list of [image, xml] item name
"""
ids = list()
for item in os.listdir(path):
parts = item.split('.')[:-1]
if not parts:
print("Skip ", item)
else:
name = '.'.join(parts)
ids.append(name)
return ids
class VOCDataset(Dataset):
"""
VOC dataset
"""
def __init__(self, root, mode='train', transform=None):
self.root = root
self.transform = transform
if mode == 'train':
self._annopath = os.path.join(self.root, 'train_xml', '%s.xml')
self._imgpath = os.path.join(self.root, 'train_image', '%s')
self.image_ids = get_list_ids(
os.path.join(self.root, 'train_image'))
elif mode == 'val':
# self._annopath = os.path.join(self.root, 'val_xml', '%s.xml')
# self._imgpath = os.path.join(self.root, 'val_image', '%s')
# self.image_ids = get_list_ids(os.path.join(self.root, 'val_image'))
self._annopath = os.path.join(self.root, 'train_xml', '%s.xml')
self._imgpath = os.path.join(self.root, 'train_image', '%s')
self.image_ids = get_list_ids(
os.path.join(self.root, 'train_image'))
else:
print('%s not supported. Exitting\n' % mode)
exit(-1)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.image_ids)
def load_image(self, idx):
try:
img_id = self.image_ids[idx]
xml_name = self._annopath % img_id
anno_file = ET.parse(xml_name).getroot()
file_postfix = anno_file.find('./filename').text.split('.')[-1]
image_name = img_id+'.' + file_postfix
image = Image.open(self._imgpath % image_name).convert('RGB')
return np.array(image)/255.00
except Exception as e:
print("Err image: {} - idx: {}".format(self.image_ids[idx], idx))
traceback.print_exc()
def load_annotations(self, idx):
img_id = self.image_ids[idx]
xml_name = self._annopath % img_id
anno_file = ET.parse(xml_name).getroot()
annot = []
for obj in anno_file.iter('object'):
name = obj.find('name').text.lower().strip()
if name not in DATASET_CLASSES:
continue
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = float(bbox.find(pt).text)
bndbox.append(cur_pt)
label_idx = CLASSES_TO_IDS[name]
bndbox.append(label_idx)
annot.append(bndbox) # [xmin, ymin, xmax, ymax, label_ind]
return np.array(annot) # [[xmin, ymin, xmax, ymax, label_ind], ... ]
def image_aspect_ratio(self, image_index):
img_id = self.image_ids[idx]
xml_name = self._annopath % img_id
anno_file = ET.parse(name).getroot()
file_postfix = anno_file.find('./filename').text.split('.')[-1]
image_name = img_id+'.' + file_postfix
image = Image.open(self._imgpath % image_name).convert('RGB')
return float(image.width) / float(image.height)
def num_classes(self):
return len(DATASET_CLASSES)
def label_to_name(self, label):
return DATASET_CLASSES[label]
def collater(data):
imgs = [s['img'] for s in data]
annots = [s['annot'] for s in data]
scales = [s['scale'] for s in data]
widths = [int(s.shape[0]) for s in imgs]
heights = [int(s.shape[1]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = np.array(heights).max()
padded_imgs = torch.zeros(batch_size, max_width, max_height, 3)
for i in range(batch_size):
img = imgs[i]
padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img
max_num_annots = max(annot.shape[0] for annot in annots)
if max_num_annots > 0:
annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1
if max_num_annots > 0:
for idx, annot in enumerate(annots):
# print(annot.shape)
if annot.shape[0] > 0:
annot_padded[idx, :annot.shape[0], :] = annot
else:
annot_padded = torch.ones((len(annots), 1, 5)) * -1
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
return {'img': padded_imgs, 'annot': annot_padded, 'scale': scales}
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, common_size=512):
image, annots = sample['img'], sample['annot']
height, width, _ = image.shape
if height > width:
scale = common_size / height
resized_height = common_size
resized_width = int(width * scale)
else:
scale = common_size / width
resized_height = int(height * scale)
resized_width = common_size
image = cv2.resize(image, (resized_width, resized_height))
new_image = np.zeros((common_size, common_size, 3))
new_image[0:resized_height, 0:resized_width] = image
if annots.shape[0] != 0:
annots[:, :4] *= scale
return {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'scale': scale}
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5):
# print(
# 'Image: {} -- Sample: {}'.format(sample['img'].shape, sample['annot'].shape))
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image = image[:, ::-1, :]
rows, cols, channels = image.shape
if annots.shape[0] != 0:
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample = {'img': image, 'annot': annots}
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
return {'img': ((image.astype(np.float32)-self.mean)/self.std), 'annot': annots}
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
# determine the order of the images
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
# divide into groups, one group = one batch
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
if __name__ == "__main__":
dataset_train = VOCDataset('test_data',
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
# dataset_train = VOCDataset('test_data')
for i in range(2):
data = dataset_train.__getitem__(i)
print(data['img'].shape)
print(data['annot'])
| 2.078125 | 2 |
src/plugins/dc/NMDC Parser/main.py | nsk89/nodeforge | 0 | 12762259 | <reponame>nsk89/nodeforge<filename>src/plugins/dc/NMDC Parser/main.py
from nodeforge.PluginUtils import *
from twisted.internet.task import LoopingCall
password = '<PASSWORD>'
tag = "http://code.google.com/p/nodeforge/<++ V:0.707,M:A,H:1/0/0,S:4>$ $0.005 $No Email$0$"
class Main(Plugin):
priority = Priority.parser
def onConnect(self):
# keepalive message every minute.
LoopingCall(self.tell, self.nick, 'a').start(60)
def onData(self, raw):
'''
First we will parse the stream and save it to self.context
Then we will do the login procedure.
'''
self.parse(raw)
# load the parse information here.
context = self.context
# start processing
if context.infotype == 'Lock':
lock = context.split[1]
self.send('$Key '+lock2key(lock))
self.send('$ValidateNick %s' % self.nick)
elif context.infotype == 'Hello' and context.split[1] == self.nick:
self.send('$Version 1,0091')
self.send('$MyINFO $ALL %s %s' % (self.nick, tag))
self.send('$GetNickList')
elif context.infotype == 'GetPass':
self.send('$MyPass %s' % password)
def onLoad(self):
self.core.delimiter = '|'
if not hasattr(self.core, 'nick'):
self.nick = 'AnonymousPerson'
else:
self.nick = self.core.nick
def parse(self, raw):
"""
Perform parsing on a string and save results to self.context.
Parsing Guide
what: Either 'chat' for a public chat message or 'info' for anything else.
infotype: Specifies the kind of message for 'info'.
Ex. $Exit --> infotype == 'Exit'
sender: the person that sent the message. None if the server sent it without
a name.
msg:
"""
split = raw.split(' ')
what = infotype = sender = msg = cmd = args = None
if raw[0] == '<':
what = 'chat'
sender = split[0][1:-1]
msg = raw[raw.find('>')+1:]
temp = raw.count(' ')
if temp == 1:
cmd = raw.split(' ', 2)[1]
else:
temp = raw.split(' ', 3)
cmd = temp[1]
args = temp[2]
reply = lambda msg: self.chat(msg)
tell = lambda msg: self.tell(sender, msg)
elif raw[0] == '$':
what = 'info'
infotype = split[0][1:]
if infotype == 'To:':
sender = split[3]
try:
msg = ' '.join(split[5:])
cmd = split[5]
args = ' '.join(split[6:])
except IndexError:
pass
reply = lambda msg: self.tell(sender, msg)
tell = lambda msg: self.tell(sender, msg)
# copy the current parse context into a variable
self.context = Context(locals())
def send(self, string):
"""
TODO: thread safe
"""
if isinstance(string, unicode):
string = string.encode('raw_unicode_escape')
string = string.replace('|','|').replace('|','$')
try:
self.core.sendLine(string)
print '>>> %s' % string
except Exception, e:
print e
def tell(self, target, msg):
self.send('$To: %s From: %s $<%s> %s' % (target, self.nick, self.nick, msg))
def chat(self, msg):
self.send('<%s> %s'% (self.nick, msg))
def emote(self, msg):
self.send('<%s> +me %s' % (self.nick, msg) )
class Context(object):
"""
This is a storage object.
It basically turns a dictionary into an object.
You can now use shortcuts like this.
context.blah == context['blah']
Currently used by the parser here.
"""
def __init__(self, dic):
self.__dict__.update(dic)
# WARNING This algorithm doesn't work with YnHub 1.036 all the time.
import array
def lock2key(lock):
"""
Generates response to $Lock challenge from Direct Connect Servers
Borrowed from free sourcecode online.
"""
lock = array.array('B', lock)
ll = len(lock)
key = list('0'*ll)
for n in xrange(1,ll):
key[n] = lock[n]^lock[n-1]
key[0] = lock[0] ^ lock[-1] ^ lock[-2] ^ 5
for n in xrange(ll):
key[n] = ((key[n] << 4) | (key[n] >> 4)) & 255
result = ""
for c in key:
if c in (0, 5, 36, 96, 124, 126):
result += "/%%DCN%.3i%%/" % c
else:
result += chr(c)
return result | 2.390625 | 2 |
lib/googlecloudsdk/third_party/apis/firestore/v1beta1/firestore_v1beta1_client.py | kustodian/google-cloud-sdk | 0 | 12762260 | """Generated client library for firestore version v1beta1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.firestore.v1beta1 import firestore_v1beta1_messages as messages
class FirestoreV1beta1(base_api.BaseApiClient):
"""Generated client library for service firestore version v1beta1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://firestore.googleapis.com/'
_PACKAGE = u'firestore'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/datastore']
_VERSION = u'v1beta1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'FirestoreV1beta1'
_URL_VERSION = u'v1beta1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new firestore handle."""
url = url or self.BASE_URL
super(FirestoreV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_databases_documents = self.ProjectsDatabasesDocumentsService(self)
self.projects_databases_indexes = self.ProjectsDatabasesIndexesService(self)
self.projects_databases = self.ProjectsDatabasesService(self)
self.projects = self.ProjectsService(self)
class ProjectsDatabasesDocumentsService(base_api.BaseApiService):
"""Service class for the projects_databases_documents resource."""
_NAME = u'projects_databases_documents'
def __init__(self, client):
super(FirestoreV1beta1.ProjectsDatabasesDocumentsService, self).__init__(client)
self._upload_configs = {
}
def BatchGet(self, request, global_params=None):
r"""Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
Args:
request: (FirestoreProjectsDatabasesDocumentsBatchGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BatchGetDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('BatchGet')
return self._RunMethod(
config, request, global_params=global_params)
BatchGet.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:batchGet',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.batchGet',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1beta1/{+database}/documents:batchGet',
request_field=u'batchGetDocumentsRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsBatchGetRequest',
response_type_name=u'BatchGetDocumentsResponse',
supports_download=False,
)
def BeginTransaction(self, request, global_params=None):
r"""Starts a new transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsBeginTransactionRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BeginTransactionResponse) The response message.
"""
config = self.GetMethodConfig('BeginTransaction')
return self._RunMethod(
config, request, global_params=global_params)
BeginTransaction.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.beginTransaction',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1beta1/{+database}/documents:beginTransaction',
request_field=u'beginTransactionRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsBeginTransactionRequest',
response_type_name=u'BeginTransactionResponse',
supports_download=False,
)
def Commit(self, request, global_params=None):
r"""Commits a transaction, while optionally updating documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsCommitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CommitResponse) The response message.
"""
config = self.GetMethodConfig('Commit')
return self._RunMethod(
config, request, global_params=global_params)
Commit.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:commit',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.commit',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1beta1/{+database}/documents:commit',
request_field=u'commitRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsCommitRequest',
response_type_name=u'CommitResponse',
supports_download=False,
)
def CreateDocument(self, request, global_params=None):
r"""Creates a new document.
Args:
request: (FirestoreProjectsDatabasesDocumentsCreateDocumentRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('CreateDocument')
return self._RunMethod(
config, request, global_params=global_params)
CreateDocument.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.createDocument',
ordered_params=[u'parent', u'collectionId'],
path_params=[u'collectionId', u'parent'],
query_params=[u'documentId', u'mask_fieldPaths'],
relative_path=u'v1beta1/{+parent}/{collectionId}',
request_field=u'document',
request_type_name=u'FirestoreProjectsDatabasesDocumentsCreateDocumentRequest',
response_type_name=u'Document',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method=u'DELETE',
method_id=u'firestore.projects.databases.documents.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'currentDocument_exists', u'currentDocument_updateTime'],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'FirestoreProjectsDatabasesDocumentsDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a single document.
Args:
request: (FirestoreProjectsDatabasesDocumentsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method=u'GET',
method_id=u'firestore.projects.databases.documents.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'mask_fieldPaths', u'readTime', u'transaction'],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'FirestoreProjectsDatabasesDocumentsGetRequest',
response_type_name=u'Document',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}',
http_method=u'GET',
method_id=u'firestore.projects.databases.documents.list',
ordered_params=[u'parent', u'collectionId'],
path_params=[u'collectionId', u'parent'],
query_params=[u'mask_fieldPaths', u'orderBy', u'pageSize', u'pageToken', u'readTime', u'showMissing', u'transaction'],
relative_path=u'v1beta1/{+parent}/{collectionId}',
request_field='',
request_type_name=u'FirestoreProjectsDatabasesDocumentsListRequest',
response_type_name=u'ListDocumentsResponse',
supports_download=False,
)
def ListCollectionIds(self, request, global_params=None):
r"""Lists all the collection IDs underneath a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListCollectionIdsResponse) The response message.
"""
config = self.GetMethodConfig('ListCollectionIds')
return self._RunMethod(
config, request, global_params=global_params)
ListCollectionIds.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.listCollectionIds',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}:listCollectionIds',
request_field=u'listCollectionIdsRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest',
response_type_name=u'ListCollectionIdsResponse',
supports_download=False,
)
def Listen(self, request, global_params=None):
r"""Listens to changes.
Args:
request: (FirestoreProjectsDatabasesDocumentsListenRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListenResponse) The response message.
"""
config = self.GetMethodConfig('Listen')
return self._RunMethod(
config, request, global_params=global_params)
Listen.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:listen',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.listen',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1beta1/{+database}/documents:listen',
request_field=u'listenRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsListenRequest',
response_type_name=u'ListenResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates or inserts a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method=u'PATCH',
method_id=u'firestore.projects.databases.documents.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'currentDocument_exists', u'currentDocument_updateTime', u'mask_fieldPaths', u'updateMask_fieldPaths'],
relative_path=u'v1beta1/{+name}',
request_field=u'document',
request_type_name=u'FirestoreProjectsDatabasesDocumentsPatchRequest',
response_type_name=u'Document',
supports_download=False,
)
def Rollback(self, request, global_params=None):
r"""Rolls back a transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsRollbackRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:rollback',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.rollback',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1beta1/{+database}/documents:rollback',
request_field=u'rollbackRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsRollbackRequest',
response_type_name=u'Empty',
supports_download=False,
)
def RunQuery(self, request, global_params=None):
r"""Runs a query.
Args:
request: (FirestoreProjectsDatabasesDocumentsRunQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RunQueryResponse) The response message.
"""
config = self.GetMethodConfig('RunQuery')
return self._RunMethod(
config, request, global_params=global_params)
RunQuery.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.runQuery',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}:runQuery',
request_field=u'runQueryRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsRunQueryRequest',
response_type_name=u'RunQueryResponse',
supports_download=False,
)
def Write(self, request, global_params=None):
r"""Streams batches of document updates and deletes, in order.
Args:
request: (FirestoreProjectsDatabasesDocumentsWriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WriteResponse) The response message.
"""
config = self.GetMethodConfig('Write')
return self._RunMethod(
config, request, global_params=global_params)
Write.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:write',
http_method=u'POST',
method_id=u'firestore.projects.databases.documents.write',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1beta1/{+database}/documents:write',
request_field=u'writeRequest',
request_type_name=u'FirestoreProjectsDatabasesDocumentsWriteRequest',
response_type_name=u'WriteResponse',
supports_download=False,
)
class ProjectsDatabasesIndexesService(base_api.BaseApiService):
"""Service class for the projects_databases_indexes resource."""
_NAME = u'projects_databases_indexes'
def __init__(self, client):
super(FirestoreV1beta1.ProjectsDatabasesIndexesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates the specified index.
A newly created index's initial state is `CREATING`. On completion of the
returned google.longrunning.Operation, the state will be `READY`.
If the index already exists, the call will return an `ALREADY_EXISTS`
status.
During creation, the process could result in an error, in which case the
index will move to the `ERROR` state. The process can be recovered by
fixing the data that caused the error, removing the index with
delete, then re-creating the index with
create.
Indexes with a single field cannot be created.
Args:
request: (FirestoreProjectsDatabasesIndexesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes',
http_method=u'POST',
method_id=u'firestore.projects.databases.indexes.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/indexes',
request_field=u'googleFirestoreAdminV1beta1Index',
request_type_name=u'FirestoreProjectsDatabasesIndexesCreateRequest',
response_type_name=u'GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes an index.
Args:
request: (FirestoreProjectsDatabasesIndexesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}',
http_method=u'DELETE',
method_id=u'firestore.projects.databases.indexes.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'FirestoreProjectsDatabasesIndexesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets an index.
Args:
request: (FirestoreProjectsDatabasesIndexesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1beta1Index) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}',
http_method=u'GET',
method_id=u'firestore.projects.databases.indexes.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'FirestoreProjectsDatabasesIndexesGetRequest',
response_type_name=u'GoogleFirestoreAdminV1beta1Index',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the indexes that match the specified filters.
Args:
request: (FirestoreProjectsDatabasesIndexesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1beta1ListIndexesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes',
http_method=u'GET',
method_id=u'firestore.projects.databases.indexes.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/indexes',
request_field='',
request_type_name=u'FirestoreProjectsDatabasesIndexesListRequest',
response_type_name=u'GoogleFirestoreAdminV1beta1ListIndexesResponse',
supports_download=False,
)
class ProjectsDatabasesService(base_api.BaseApiService):
"""Service class for the projects_databases resource."""
_NAME = u'projects_databases'
def __init__(self, client):
super(FirestoreV1beta1.ProjectsDatabasesService, self).__init__(client)
self._upload_configs = {
}
def ExportDocuments(self, request, global_params=None):
r"""Exports a copy of all or a subset of documents from Google Cloud Firestore.
to another storage system, such as Google Cloud Storage. Recent updates to
documents may not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed via the
Operation resource that is created. The output of an export may only be
used once the associated operation is done. If an export operation is
cancelled before completion it may leave partial data behind in Google
Cloud Storage.
Args:
request: (FirestoreProjectsDatabasesExportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ExportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ExportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}:exportDocuments',
http_method=u'POST',
method_id=u'firestore.projects.databases.exportDocuments',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}:exportDocuments',
request_field=u'googleFirestoreAdminV1beta1ExportDocumentsRequest',
request_type_name=u'FirestoreProjectsDatabasesExportDocumentsRequest',
response_type_name=u'GoogleLongrunningOperation',
supports_download=False,
)
def ImportDocuments(self, request, global_params=None):
r"""Imports documents into Google Cloud Firestore. Existing documents with the.
same name are overwritten. The import occurs in the background and its
progress can be monitored and managed via the Operation resource that is
created. If an ImportDocuments operation is cancelled, it is possible
that a subset of the data has already been imported to Cloud Firestore.
Args:
request: (FirestoreProjectsDatabasesImportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ImportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ImportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}:importDocuments',
http_method=u'POST',
method_id=u'firestore.projects.databases.importDocuments',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}:importDocuments',
request_field=u'googleFirestoreAdminV1beta1ImportDocumentsRequest',
request_type_name=u'FirestoreProjectsDatabasesImportDocumentsRequest',
response_type_name=u'GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(FirestoreV1beta1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| 1.921875 | 2 |
tests/bindings/test_string.py | DARSHIT006/godot-python | 0 | 12762261 | <filename>tests/bindings/test_string.py<gh_stars>0
import sys
import pytest
from godot import GDString
def test_base():
assert GDString().empty()
# Todo later: GDString creation from GD types: Vector2/3, Transform, Plane, Quat, AABB, Color, ...
s = GDString("12")
assert s.begins_with(GDString("1"))
assert s.bigrams().size() == 1
assert GDString("\ta").dedent() == GDString("a")
assert s.ends_with(GDString("2"))
abc = GDString("abc")
abc.erase(1, 1)
assert abc == GDString("ac")
assert GDString("abc").capitalize() == GDString("Abc")
assert GDString("abc").find(GDString("b")) == 1
assert GDString("file.ext").get_extension() == GDString("ext")
assert GDString("127.0.0.1").is_valid_ip_address()
assert not GDString("127.0.0.1.xxx").is_valid_ip_address()
assert GDString("abc").length() == 3
assert GDString("3.14").to_float() == pytest.approx(3.14)
assert GDString("42").to_int() == 42
@pytest.mark.parametrize("char", ["e", "é", "€", "蛇", "🐍"])
def test_unicode(char):
# Godot supports UCS2 on Windows and UCS4 on other platforms
if len(char.encode("utf8")) > 2 and sys.platform == "win32":
pytest.skip("Windows only supports UCS2")
gdchar = GDString(char)
assert str(gdchar) == char
assert gdchar.length() == len(char)
| 2.4375 | 2 |
vcweb/core/subjectpool/urls.py | Takato0120/social_ecological_system | 3 | 12762262 | from django.conf.urls import url
from .views import (subjectpool_index, manage_experiment_session, get_session_events, manage_participant_attendance,
send_invitations, get_invitations_count, invite_email_preview, experiment_session_signup,
submit_experiment_session_signup, cancel_experiment_session_signup, download_experiment_session,
add_participant)
urlpatterns = [
url(r'^$', subjectpool_index, name='subjectpool_index'),
url(r'^session/manage/(?P<pk>\-?\d+)$', manage_experiment_session, name='manage_experiment_session'),
url(r'^session/detail/event/(?P<pk>\d+)$', manage_participant_attendance, name='session_event_detail'),
url(r'^session/(?P<pk>\d+)/participant/add/$', add_participant, name='add_participant'),
url(r'^session/invite$', send_invitations, name='send_invites'),
url(r'^session/invite/count$', get_invitations_count, name='get_invitations_count'),
url(r'^session/email-preview$', invite_email_preview, name='invite_email_preview'),
url(r'^session/(?P<pk>\d+)/download/$', download_experiment_session, name='download_experiment_session'),
url(r'^session/events$', get_session_events, name='session_events'),
url(r'^signup/$', experiment_session_signup, name='experiment_session_signup'),
url(r'^signup/submit/$', submit_experiment_session_signup, name='submit_experiment_session_signup'),
url(r'^signup/cancel/$', cancel_experiment_session_signup, name='cancel_experiment_session_signup'),
]
| 1.828125 | 2 |
tools.py | Mart100/Mork | 0 | 12762263 | import math
from vectors import Vector2
from vectors import Vector3
def get_car_facing_vector(car):
pitch = float(car.rotation.pitch)
yaw = float(car.rotation.yaw)
facing_x = math.cos(pitch) * math.cos(yaw)
facing_y = math.cos(pitch) * math.sin(yaw)
return Vector2(facing_x, facing_y)
def get_own_goal(agent):
car = agent.car
field_info = agent.get_field_info()
team = 0
if field_info.goals[team].team_num != car.team: team = 1
return Vector3(field_info.goals[team].location)
def get_opponents_goal(agent):
car = agent.car
field_info = agent.get_field_info()
goal = Vector2(0, 0)
team = 1
if field_info.goals[team].team_num == car.team: team = 0
return Vector3(field_info.goals[team].location)
def time_needed_for_car(agent, car_to):
car = agent.car
difference = car.pos - car_to
length = difference.magnitude()
speed = get_xy_speed(agent)
if speed == 0: speed = 0.00000000000000001
duration = length/speed
return duration
def predict_time_needed_for_car(agent, car_to):
car = agent.car
difference = car.pos - car_to
length = difference.magnitude()
speed = 1500
if speed == 0: speed = 0.00000000000000001
duration = length/speed
return duration
def own_color(self, packet):
# get right color
if packet.game_cars[self.index].team:
color = self.renderer.create_color(255, 255, 127, 80)
else:
color = self.renderer.create_color(255, 22, 138, 255)
return color
def get_xy_speed(agent):
car = agent.car
car_xy_velocity = Vector3(car.velocity).to_2d()
car_xy_velocity_magnitude = car_xy_velocity.magnitude()
return car_xy_velocity_magnitude
def difference_angles(angle1, angle2):
angle1 = math.degrees(angle1)
angle2 = math.degrees(angle2)
angle1 = normalize_angle(angle1)
angle2 = normalize_angle(angle2)
difference = angle1 - angle2
if difference > 180: difference = 360 - difference
return math.radians(difference)
def normalize_angle(angle):
while angle < 0: angle += 360
while angle >= 360: angle -= 360
return angle
def get_car_speed(self, packet):
my_car = packet.game_cars[self.index]
def aim_to(agent, to, plus=0):
car = agent.car
car_direction = get_car_facing_vector(car)
magnitude = Vector3(car.pos - to).magnitude()
steer_correction = car_direction.correction_to(to.to_2d() - car.pos.to_2d())
z_correction = Vector3(car.pos - to).angle('z')
draw_text(agent, str(math.degrees(z_correction)), 100)
steer_correction *= -5
steer_correction += plus
# aerial
if to.z - car.pos.z > 500 and car.boost > 50 and agent.car_status != 'dribble':
if math.degrees(z_correction) > 10 and to.z - car.pos.z > 500:
# jump if still on ground
if car.pos.z < 17.1:
agent.jumps.append(1)
print(car.pos.x, car.pos.y)
# enable boost
agent.controller_state.boost = True
# sigmoid and correct
agent.controller_state.pitch = cap_num((z_correction-car.rotation.pitch)+0.9, -1, 1)
# if close to going to fly stop boost
elif math.degrees(z_correction) > 4:
agent.controller_state.boost = False
# Drift if needs to steer much
if abs(steer_correction) > 7:
agent.controller_state.handbrake = True
agent.controller_state.steer = cap_num(steer_correction, -1, 1)
def double_jump(self):
self.jumps.append(1)
self.jumps.append(3)
return self
def more_colors(agent, color):
if color == 'black': color = [255,255,255]
if color == 'white': color = [0,0,0]
if color == 'red': color = [255,0,0]
if color == 'blue': color = [0,0,255]
if color == 'green': color = [0,255,0]
if color == 'own':
if agent.car.team:
color = [255, 127, 80]
else:
color = [22, 138, 255]
return color
def draw_text(agent, text, y):
agent.renderer.draw_string_2d(0, y, 1, 1, text,agent.renderer.create_color(255, 255, 255, 255))
def sigmoid(x):
return (1 / (1 + math.exp(-x)))*2 - 1
def cap_num(x, mini, maxi):
if x > maxi: x = maxi
if x < mini: x = mini
return x
def black(agent):
return agent.renderer.create_color(255, 0, 0, 0)
def white(agent):
return agent.renderer.create_color(255, 255, 255, 255)
| 3.25 | 3 |
screlp/backend/files.py | LiterallyElvis/WebScrelp | 0 | 12762264 | <filename>screlp/backend/files.py
"""
files.py is a component of Screlp that handles file writing.
Specifically, it has functions to write a results file and a debug file. The
debug file is just the raw JSON output in a .txt file, and the true results
file is a CSV file.
"""
import json
import csv
METERS_PER_MILE = 1609 # number of meters in one mile.
def write_raw_result(api_result):
"""
Writes raw JSON data to a local file, for debugging purposes.
If for some reason the script returns an empty CSV file, or complete
garbage, it could be useful to have this file generated. Such a file would
reveal any Oauth errors, for example.
"""
with open("raw_output.txt", "w") as file:
json.dump(api_result, file, sort_keys=True, indent=4)
def write_csv_file(items):
"""
Writes CSV file of returned data from list of Business objects.
"""
with open("results.csv", "w") as csvout:
output = csv.writer(csvout)
output.writerow(["Rank", "ID", "Name", "Address", "City", "State",
"Zip", "Rating", "Review Count", "Category", "URL"])
output.writerows(items)
| 3.203125 | 3 |
SerialFace/Face.py | KenleyArai/SerialFace | 0 | 12762265 | import cv2
import os
import numpy as np
from PIL import Image
import picamera.array
from picamera import PiCamera
class Face(object):
training_count = 5
threshold = 30
def __init__(self, casc_path, path="./passwords", camera_port=0):
self.path = path
self._cascade = cv2.CascadeClassifier(casc_path)
self._port = camera_port
def __del__(self):
cv2.destroyAllWindows()
def _capture_image(self):
"""
Throw away frames so we can let the camera adjust
:return: list(list())
"""
with PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (640, 480)
camera.capture(stream, 'bgr', use_video_port=True)
return cv2.cv.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
def _get_faces_and_frames(self):
frame = self._capture_image()
faces = self._cascade.detectMultiScale(
frame,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
return faces, frame
def _get_training_faf(self):
"""
:yield: faces, frame
Gets all images required for training.
* Note
Won't stop getting images unless there is only one face per image.
"""
count = 0
error_count = 0
while count < self.training_count: # Ensures that we get at least self.training_count images
error_count += 1
faces, frame = self._get_faces_and_frames()
if len(faces) == 1:
yield faces, frame
count += 1
elif error_count >= 10:
break
def can_unlock(self):
"""
Will return false under the following conditions:
1. More than one face in the image
2. No images in password file
3. Face is not recognized
:return: True if face is recognized False if face is not recognized
"""
face, frame = self._get_faces_and_frames()
# Don't allow more than 1 face in the image
if len(face) != 1:
return False
x, y, w, h = face[0]
face = frame[y: y + h, x: x + w]
recognizer = cv2.face.createLBPHFaceRecognizer()
paths = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith("bmp")]
if not paths:
return False # Return since there are no images saved as a password
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
nbr = 0
for image_path in paths:
# Read the image
image_pil = Image.open(image_path)
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
images.append(image)
labels.append(nbr)
nbr += 1
cv2.destroyAllWindows()
# Perform the tranining
recognizer.train(images, np.array(labels))
nbr_predicted, conf = recognizer.predict(face)
if conf < self.threshold:
return True
return False
def new_pass(self):
count = 0
for face, frame in self._get_training_faf():
filename = "".join(["passwords/", str(count), ".bmp"])
x, y, w, h = face[0]
frame = frame[y: y + h, x: x + w]
count += 1
cv2.imwrite(filename, frame)
def secure_new_pass(self):
if self.can_unlock():
self.new_pass() | 3.0625 | 3 |
plugin.video.mrknowtv/resources/lib/sources/pierwsza.py | mrknow/filmkodi | 105 | 12762266 | # -*- coding: utf-8 -*-
'''
Mrknow TV Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,base64,urllib
import re, time, datetime
import json
from resources.lib.lib import control
from resources.lib.lib import client
from resources.lib.lib import stale
def get(url, params={}):
try:
params['api_id'] = stale.pierwszatv_apiid
params['checksum'] = stale.pierwszatv_checksum
url = urlparse.urljoin('http://pierwsza.tv', url)
url = url + '?' + urllib.urlencode(params)
headers = {'Content-Type': 'application/json'}
result = client.request(url, headers=headers, output='response', error=True)
if not (result[0] == '401' or result[0] == '405'): return result[1]
result = client.request(url, headers=headers)
#control.log('ZZZZZZZZ PIerwsza result: %s' % result)
return result
except:
pass
def getstream(id):
try:
control.set_setting('pierwszatv.tokenExpireIn', '')
control.set_setting('pierwszatv.serverId', '')
control.set_setting('pierwszatv.streamId', '')
control.set_setting('pierwszatv.token', '')
if getPierwszaCredentialsInfo() == False:
if control.yesnoDialog(control.lang(40003).encode('utf-8'), control.lang(30481).encode('utf-8'), '', 'Trakt', control.lang(30483).encode('utf-8'), control.lang(30482).encode('utf-8')):
control.set_setting('pierwszatv.user', '')
control.set_setting('pierwszatv.password', '')
control.openSettings('1.4')
raise Exception()
url = '/api/stream/create'
params = {}
params['id'] =id
params['user'] =control.setting('pierwszatv.user').strip()
params['password'] = urllib.quote_plus(control.setting('pierwszatv.password'))
result = get(url, params)
control.log('x1x1x1: %s' % result)
result = json.loads(result)
if result['status'] == 'ok':
#time.sleep(1)
expirein = int(int(result['tokenExpireIn'])*0.75)
expirewhen = datetime.datetime.now() + datetime.timedelta(seconds=expirein)
control.set_setting('pierwszatv.tokenExpireIn', str(int(time.mktime(expirewhen.timetuple()))))
control.set_setting('pierwszatv.serverId', result['serverId'])
control.set_setting('pierwszatv.streamId', result['streamId'])
control.set_setting('pierwszatv.token', result['token'])
for i in range(0, 5):
try:
r = get('/api/stream/status', {'serverId': result['serverId'] , 'streamId': result['streamId'], 'token': result['token']})
r = json.loads(r)
if r['status'] == 'ok':
#control.infoDialog(control.lang(30489).encode('utf-8'), time=6000)
for j in range(0, 20):
time.sleep(1)
control.infoDialog(control.lang(30489).encode('utf-8'), time=500)
try:
result2 = client.request(r['source']+'?token='+result['token'],safe=True, timeout='2')
control.log('Pierwsza link check nr: %s: result:%s' % (j,result2))
if result2 == None: raise Exception()
else: return r['source']+'?token='+result['token']
except:
pass
return r['source']+'?token='+result['token']
time.sleep(3)
except:
pass
if result['status'] == 'error':
control.infoDialog('%s' % result['message'].encode('utf-8'))
control.dialog.ok(control.addonInfo('name'), result['message'].encode('utf-8'), '')
return None
except Exception as e:
control.log('Error pierwsza.getstream %s' % e )
def getPierwszaCredentialsInfo():
user = control.setting('pierwszatv.user').strip()
password = control.setting('pierwszatv.password')
if (user == '' or password == ''): return False
return True
def streamrefresh():
try:
#mynow = int(datetime.datetime.now().strftime('%s'))
mynow = int(str(int(time.mktime(datetime.datetime.now().timetuple()))))
expired = int(control.get_setting('pierwszatv.tokenExpireIn'))
#control.log('XXXX Exp:%s Now:%s' % (expired, mynow))
if mynow>expired:
control.log('Pierwsza refresh')
url = '/api/stream/refresh'
params = {}
params['serverId'] =control.get_setting('pierwszatv.serverId')
params['streamId'] =control.get_setting('pierwszatv.streamId')
params['token'] = control.get_setting('pierwszatv.token')
result = get(url, params)
result = json.loads(result)
expirein = int(int(result['tokenExpireIn'])*0.75)
expirewhen = datetime.datetime.now() + datetime.timedelta(seconds=expirein)
control.set_setting('pierwszatv.tokenExpireIn', str(int(time.mktime(expirewhen.timetuple()))))
except Exception as e:
control.log('Error pierwsza.refresh %s' % e )
raise Exception()
def chanels():
items = []
try:
result = get('/api/channels')
result = json.loads(result)
for i in result['channels']:
try:
items.append(i)
except:
pass
if len(items) == 0:
items = result
except:
control.log('Error pierwsza.chanels' )
pass
return items
| 2.078125 | 2 |
lists/forms.py | morion4000/ophion | 0 | 12762267 | from django import forms
from lists.models import List
class ListForm(forms.ModelForm):
class Meta:
model = List
fields = ('name', 'description')
| 2.015625 | 2 |
houm/properties/migrations/0001_initial.py | marcelodavid/houm | 0 | 12762268 | # Generated by Django 3.2.11 on 2022-02-02 01:04
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Property',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('direction', models.CharField(max_length=180)),
('number', models.PositiveIntegerField(blank=True, null=True)),
('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),
],
),
]
| 1.6875 | 2 |
tensorkit/backend/init.py | lizeyan/tensorkit | 0 | 12762269 | <reponame>lizeyan/tensorkit
from ..settings_ import settings
if settings.backend == 'PyTorch':
from .pytorch_ import init
from .pytorch_.init import *
else:
RuntimeError(f'Backend {settings.backend} not supported.')
__all__ = init.__all__
| 1.484375 | 1 |
ci-scripts/infra-setup/roles/rrcockpit/files/telegraf/skipped_promotions.py | amolkahat/ci-config | 0 | 12762270 | #!/usr/bin/env python
import requests
import re
import sys
import dlrnapi_client
import influxdb_utils
import json
from promoter_utils import get_dlrn_instance_for_release
from diskcache import Cache
cache = Cache('/tmp/skipped_promotions_cache')
cache.expire()
promoter_skipping_regex = re.compile(
('.*promoter Skipping promotion of (.*) from (.*) to (.*), '
'missing successful jobs: (.*)')
)
html_link = "<a href='{}' target='_blank' >{}</a>"
def get_failing_jobs_html(dlrn_hashes, release_name):
failing_jobs_html = ""
# If any of the jobs is still in progress
in_progress = False
try:
dlrn = get_dlrn_instance_for_release(release_name)
if dlrn:
params = dlrnapi_client.Params2()
params.commit_hash = dlrn_hashes['commit_hash']
params.distro_hash = dlrn_hashes['distro_hash']
params.success = str(False)
failing_jobs = dlrn.api_repo_status_get(params)
if len(failing_jobs) > 0:
for i, failing_job in enumerate(failing_jobs):
if failing_job.in_progress:
in_progress = True
failing_job_ln = html_link.format(
failing_job.url, failing_job.job_id)
if i > 0:
failing_job_ln += "<br>"
failing_jobs_html += failing_job_ln
else:
failing_jobs_html = ("<font color='red'>WARNING</font> "
"expected perodic jobs have not run")
except Exception as e:
print(e)
pass
return (in_progress, failing_jobs_html)
# FIXME: Use a decorator ?
def get_cached_failing_jobs_html(dlrn_hashes, release_name):
cache_key = "failing_jobs_html_{timestamp}_{repo_hash}".format(
**dlrn_hashes)
if cache_key not in cache:
in_progress, failing_jobs_html = get_failing_jobs_html(
dlrn_hashes, release_name)
# Only chache if jobs have finished
if not in_progress:
cache.add(cache_key, failing_jobs_html, expire=259200)
return cache[cache_key]
def parse_skipped_promotions(release_name):
skipped_promotions = []
promoter_logs = requests.get(
"http://38.145.34.55/{}.log".format(release_name))
def get_log_time(log_line):
log_line_splitted = log_line.split()
log_time = "{} {}".format(log_line_splitted[0], log_line_splitted[1])
log_time = log_time.split(',')[0]
return log_time
for log_line in promoter_logs.iter_lines():
matched_regex = promoter_skipping_regex.match(log_line)
if matched_regex:
promotion = json.loads(matched_regex.group(1).replace("'", '"'))
repo_hash = promotion['full_hash']
failing_jobs = matched_regex.group(3)
skipped_promotion = {
'repo_hash': repo_hash,
'from_name': matched_regex.group(2),
'to_name': matched_regex.group(3),
'failing_jobs': failing_jobs,
'timestamp': get_log_time(log_line),
'release': release_name
}
skipped_promotions.append(skipped_promotion)
return skipped_promotions
def to_influxdb(skipped_promotions):
influxdb_lines = []
influxdb_format = (
"skipped-promotions,repo_hash={repo_hash}"
",release={release},from_name={from_name},"
"to_name={to_name} failing_jobs=\"{failing_jobs}\" "
"{timestamp}")
for skipped_promotion in skipped_promotions:
skipped_promotion['timestamp'] = influxdb_utils.format_ts_from_str(
skipped_promotion['timestamp'])
influxdb_lines.append(influxdb_format.format(**skipped_promotion))
return influxdb_lines
def main():
release = sys.argv[1]
influxdb_lines = to_influxdb(parse_skipped_promotions(release))
print('\n'.join(influxdb_lines))
if __name__ == '__main__':
main()
| 2.25 | 2 |
WiretapView.py | CBSDigital/Hiero-Wiretap | 5 | 12762271 | <reponame>CBSDigital/Hiero-Wiretap
"""Extended Qt widgets and methods for displaying hierarchical Wiretap nodes.
@author <NAME>
@date May 2014
@defgroup modWiretapView WiretapView
@{
"""
import sys
from PySide import QtCore, QtGui
from wiretap import WireTapException
from WiretapTools import WiretapManager
class NodeTreeView(QtGui.QTreeView):
"""A tree view tailored for displaying Wiretap nodes.
@todo Some of the methods in this class pertain more to the model than
the view. Implement a \c{QAbstractItemModel} and move those methods
there.
"""
def __init__(self, parent=None):
"""Initializes a node tree view for displaying NodeItem%s.
@param[in] parent \c{(QtGui.QWidget)} The parent widget for this
dialog.
"""
super(NodeTreeView, self).__init__(parent)
## \c{(bool)} Whether this tree view should exclude Wiretap clip,
# setup, and other content-related nodes.
self.excludeContent = True
## \c{(\ref WiretapTools.WiretapManager "WiretapManager")} The Wiretap
# server manager for keeping track of connections.
self.manager = WiretapManager(WiretapManager.PRODUCT_IFFFS)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
# Signal-slot pairs
self.expanded.connect(self.LoadChildren)
# Use Refresh buttons to update children instead
#self.collapsed.connect(self.ResetChildren)
# Auto-resizing of columns (may be visually annoying)
#self.expanded.connect(lambda index: self.FitColumnToContents(0))
#self.collapsed.connect(lambda index: self.FitColumnToContents(0))
def Populate(self):
"""Populates the tree model with NodeItem%s for each Wiretap server
discovered.
"""
model = QtGui.QStandardItemModel()
model.setHorizontalHeaderLabels(['Name', 'Type'])
self.setModel(model)
self.setUniformRowHeights(True) # enable view optimizations
self.setColumnWidth(0, 300) # works only after view initialized
self.setColumnWidth(1, 100)
# Populate the view with a list of servers
for hn in self.manager.hostnames:
item = NodeItem(self.manager, hn)
NestItem(self.model(), item)
def GoTo(self, path, useNodeID):
"""Navigates to the specified Wiretap node in the tree view and
selects it.
@param[in] path \c{(str)} The Wiretap server hostname concatenated with
the node ID or node display path.
@param[in] useNodeID \c{(bool)} Whether to interpret the path as a
Wiretap node ID or a series of slash-separated
node display names (reels and clips have display
names that differ from their node IDs).
"""
HOST = 0
depth = 0
item = None
segments = [seg for seg in path.split('/') if seg]
# Account for unnamed REELs and CLIPs (displayed as "Unnamed" but are
# actually an empty string)
if len(segments) > 1 and path[-1] == '/':
segments.append('') # unnamed REEL/CLIP
# Open each node item and search for a match
for depth, segment in enumerate(segments):
# Match top-level item's hostname
if depth == HOST:
items = Query(self.model(), 'hostname', segment)
# Match item with same node ID up to the current depth
elif useNodeID:
nodeID = '/' + '/'.join(segments[1:depth+1])
items = Query(item, 'nodeID', nodeID)
# Match first item with the same displayName
else:
items = Query(item, 'displayName', segment)
# Expand the first matched item's parent and select item in view
if items:
item = items[0]
if depth > HOST:
self.setExpanded(item.parent().index(), True)
self.setCurrentIndex(item.index())
#--------------------------------------------------------------------------
# SLOTS
#--------------------------------------------------------------------------
def Reset(self):
"""Resets the Wiretap connection manager and clears the item model."""
self.manager = WiretapManager(WiretapManager.PRODUCT_IFFFS)
self.model().clear()
self.expanded.connect(self.LoadChildren)
self.Populate()
def ResetChildren(self, index):
"""Removes the children of the NodeItem associated with the given
index.
@details The children may be reloaded by closing and reopening the item
in the tree view.
@param[in] index \c{(QtCore.QModelIndex)} The index of the parent item
whose children will be reset.
"""
model = index.model()
item = model.itemFromIndex(index)
try:
item.ResetChildren()
except:
item.removeRows(0, item.rowCount())
def LoadChildren(self, index):
"""Loads the children of the NodeItem associated with the given index.
@param[in] index \c{(QtCore.QModelIndex)} The index of the parent item
whose children will be loaded.
"""
model = index.model()
item = model.itemFromIndex(index)
try:
item.LoadChildren(self.excludeContent)
except WireTapException as why:
print why
self.clearSelection()
def FitColumnToContents(self, column, margin=10):
"""Resizes the specified column to fit its contents with a margin.
@param[in] column \c{(int)} The zero-based column index to fit.
@param[in] margin \c{(int)} Additional buffer added to the column
width.
"""
self.resizeColumnToContents(column)
self.setColumnWidth(column, self.columnWidth(column) + margin)
class NodeItem(QtGui.QStandardItem):
"""Represents a Wiretap server or node handle in a tree view."""
## \c{(tuple)} The names of the Wiretap container node types (hosts,
# volumes, projects, libraries, and reels).
CONTAINERS = ('HOST', 'VOLUME', 'PROJECT', 'LIBRARY', 'REEL')
## \c{(tuple)} The names of the Wiretap container types that can store
# clip nodes (libraries and reels).
CLIP_CONTAINERS = ('LIBRARY', 'REEL')
def __init__(self, manager, hostname, nodeID='/'):
"""Initializes a Wiretap node item for use with a
\c{QStandardItemModel}.
@param[in] manager \c{(\ref WiretapTools.WiretapManager
"WiretapManager")} A reference to the Wiretap
connection manager providing data for the tree view.
@param[in] hostname \c{(str)} The Wiretap server hostname associated
with this NodeItem. The hostname includes the
colon-separated product type (usually "IFFFS").
@param[in] nodeID \c{(str)} The Wiretap node ID that this item will
represent.
"""
## \c{(\ref WiretapTools.WiretapManager "WiretapManager")} A reference
# to the Wiretap connection manager providing data for the tree view.
self.manager = manager
## \c{(str)} The Wiretap server hostname associated with this node
# item.
#
# @details The hostname includes the colon-separated product type
# (usually "IFFFS").
self.hostname = hostname
## \c{(str)} The Wiretap node ID that this item will represent.
self.nodeID = nodeID
## \c{(str)} The Wiretap node type.
self.nodeType = manager.GetNodeTypeStr(hostname, nodeID)
## \c{(bool)} Whether the children of this node have been listed (the
# node was expanded in the tree view).
self.__hasCachedChildren = False
## \c{(function)} Nests other NodeItem%s under this node.
self.NestItem = lambda item: NestItem(self, item)
## \c{(str)} The Wiretap node display name (usually).
#
# @details The text property is not necessarily the same as the display
# name, especially with server items, which may append the
# artists' names.
self.displayName = manager.GetDisplayName(hostname, nodeID)
super(NodeItem, self).__init__(self.displayName)
self.setEditable(False)
if not self.text():
self.setText('Unnamed')
font = self.font()
font.setItalic(True)
self.setFont(font)
if self.nodeType in NodeItem.CONTAINERS:
self.appendRow(CreateDummyItem())
def LoadChildren(self, excludeContent):
"""Creates a NodeItem for every immediate child of the associated
Wiretap node.
@param[in] excludeContent \c{(bool)} Whether or not clips, setups, and
other non-container nodes should be excluded
from the hierarchy.
"""
if not self.__hasCachedChildren:
try:
children = self.manager.GetChildren(self.hostname, self.nodeID)
except WireTapException:
self.RemoveChildren()
self.DisableRow()
raise WireTapException("Failed to load children from node "
"path: " + self.nodePath)
self.removeRows(0, self.rowCount())
for node in children:
nodeID = node.getNodeId().id()
nodeType = self.manager.GetNodeTypeStr(self.hostname, nodeID)
# Exclude SETUP and CLIP nodes
if excludeContent and nodeType not in NodeItem.CONTAINERS:
continue
self.NestItem(NodeItem(self.manager, self.hostname, nodeID))
self.__hasCachedChildren = True
def ResetChildren(self):
"""Replaces this item's children with a placeholder item."""
self.RemoveChildren()
self.appendRow(CreateDummyItem())
def RemoveChildren(self):
"""Resets this item to an uncached state by removing all child rows."""
self.removeRows(0, self.rowCount())
self.__hasCachedChildren = False
def DisableRow(self):
"""Disable all columns in this item's row.
@note Each item can manipulate the states of its child columns. In
order to disable the current item's row, you must access its
parent and then the correct row beneath it.
"""
parent = self.parent()
if not parent: # top-level item has null parent
parent = self.model()
# Disable every column in this item's row (w.r.t. the parent)
for ii in xrange(parent.columnCount()):
try: # parent = QStandardItem
column = parent.child(self.row(), ii)
except AttributeError: # parent type is QStandardItemModel
column = parent.item(self.row(), ii)
if column:
column.setEnabled(False)
#--------------------------------------------------------------------------
# PROPERTIES
#--------------------------------------------------------------------------
def __GetDisplayPath(self):
"""Getter for #displayPath.
@return \c{(str)} The Wiretap hostname concatenated with the node
display path.
"""
MAX_DEPTH = 10 # IFFFS hierarchies typically less than 7 nodes deep
depth = 0 # safety feature in case top-level node is not of type HOST
displayNames = []
nodeItem = self
while nodeItem.nodeType != 'HOST' and depth < MAX_DEPTH:
displayNames.insert(0, nodeItem.displayName)
nodeItem = nodeItem.parent()
depth += 1
return self.hostname + '/' + '/'.join(displayNames)
def __GetNodePath(self):
"""Getter for #nodePath.
@return \c{(str)} The Wiretap hostname concatenated with the node ID.
"""
return self.hostname + self.nodeID
## \c{(str)} Gets the Wiretap hostname concatenated with the node display
# path.
#
# @details Implemented by __GetDisplayPath().
displayPath = property(fget=__GetDisplayPath)
## \c{(str)} Gets the Wiretap hostname concatenated with the node ID.
#
# @details Implemented by __GetNodePath().
nodePath = property(fget=__GetNodePath)
def CreateDummyItem():
"""Creates a placeholder child item so that parent items may be expanded
before checking for the existence of actual children.
@return \c{(QtGui.QStandardItem)} The dummy item that was created.
"""
item = QtGui.QStandardItem('Loading...')
item.setEditable(False)
return item
def NestItem(parent, item):
"""Adds the given item to a (multi-column) row beneath the parent model or
item.
@details Since \c{QStandardItemModel} is not supposed to be subclassed,
this hack automatically adds a second information column (when
appropriate) using an attribute from the item in the first column.
@param[in] parent \c{(QStandardItemModel/QStandardItem/QWiretapItem)} The
parent model or item that will host the given item in a
new row.
@param[in] item \c{(QStandardItem/QWiretapItem)} The item to be nested
under the parent.
"""
try:
nodeTypeItem = QtGui.QStandardItem(item.nodeType)
nodeTypeItem.setEditable(False)
parent.appendRow([item, nodeTypeItem])
except AttributeError: # item.nodeType doesn't exist (not a WiretapItem)
parent.appendRow(item)
def Query(parent, attribute, value):
"""Searches the immediate children of the parent for items containing the
attribute value that matches the given parameters.
@param[in] parent \c{(QStandardItemModel/QStandardItem/QWiretapItem)} The
parent widget whose child items will be queried.
@param[in] attribute \c{(str)} The name of the child item's attribute to be
checked.
@param[in] value The value of the child item's attribute.
@return \c{(list)} The matching items, if any, that were found.
"""
matched = []
if hasattr(parent, 'item'): # model
GetItem = lambda row: parent.item(row)
elif hasattr(parent, 'child'): # item
GetItem = lambda row: parent.child(row)
else:
return matched
for ii in xrange(parent.rowCount()):
item = GetItem(ii)
try:
if value == getattr(item, attribute):
matched.append(item)
except AttributeError:
continue
return matched
## @}
| 2.109375 | 2 |
train_pointcutmix_k.py | DbrRoxane/PointCutMix | 33 | 12762272 | from __future__ import print_function
import argparse
import os
import csv
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from data_utils.data_util import PointcloudScaleAndTranslate
from data_utils.ModelNetDataLoader import ModelNetDataLoader
from models.pointnet import PointNetCls, feature_transform_regularizer
from models.pointnet2 import PointNet2ClsMsg
from models.dgcnn import DGCNN
from models.pointcnn import PointCNNCls
from utils import progress_bar, log_row
import sys
sys.path.append("./emd/")
import emd_module as emd
def gen_train_log(args):
if not os.path.isdir('logs_train'):
os.mkdir('logs_train')
logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name))
if os.path.exists(logname):
with open(logname, 'a') as logfile:
log_row(logname, [''])
log_row(logname, [''])
with open(logname, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow(['model type', 'data set', 'seed', 'train batch size',
'number of points in one batch', 'number of epochs', 'optimizer',
'learning rate', 'resume checkpoint path',
'feature transform', 'lambda for feature transform regularizer', 'data augment'])
logwriter.writerow([args.model, args.data, args.seed, args.batch_size, args.num_points,
args.epochs, args.optimizer, args.lr, args.resume,
args.feature_transform, args.lambda_ft, args.augment])
logwriter.writerow(['Note', args.note])
logwriter.writerow([''])
def save_ckpt(args, epoch, model, optimizer, acc_list):
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
if not os.path.isdir('checkpoints/%s_%s_%s' % (args.data, args.model, args.name)):
os.mkdir('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))
if acc_list[-1] > max(acc_list[:-1]):
print('=====> Saving checkpoint...')
print('the best test acc is', acc_list[-1])
state = {
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'args': args,
'acc_list': acc_list,
}
torch.save(state, 'checkpoints/%s_%s_%s/best.pth' % (args.data, args.model, args.name))
print('Successfully save checkpoint at epoch %d' % epoch)
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
def test(model, test_loader, criterion):
model.eval()
correct = 0
total = 0
for j, data in enumerate(test_loader, 0):
points, label = data
points, label = points.to(device), label.to(device)[:, 0]
if args.model == 'rscnn_kcutmix':
fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points) # (B, npoint)
points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), fps_idx).transpose(1,
2).contiguous() # (B, N, 3)
points = points.transpose(2, 1) # to be shape batch_size*3*N
pred, trans_feat = model(points)
loss = criterion(pred, label.long())
pred_choice = pred.data.max(1)[1]
correct += pred_choice.eq(label.data).cpu().sum()
total += label.size(0)
progress_bar(j, len(test_loader), 'Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)'
% (loss.item() / (j + 1), 100. * correct.item() / total, correct, total))
return loss.item() / (j + 1), 100. * correct.item() / total
if __name__ == '__main__':
########################################
## Set hypeparameters
########################################
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='pointnet', help='choose model type')
parser.add_argument('--data', type=str, default='modelnet40', help='choose data set')
parser.add_argument('--seed', type=int, default=0, help='manual random seed')
parser.add_argument('--batch_size', type=int, default=16, help='input batch size')
parser.add_argument('--num_points', type=int, default=1024, help='input batch size')
parser.add_argument('--epochs', type=int, default=300, help='number of epochs to train for')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate in training')
parser.add_argument('--resume', type=str, default='/', help='resume path')
parser.add_argument('--feature_transform', type=int, default=1, help="use feature transform")
parser.add_argument('--lambda_ft', type=float, default=0.001, help="lambda for feature transform")
parser.add_argument('--augment', type=int, default=1, help='data argment to increase robustness')
parser.add_argument('--name', type=str, default='train', help='name of the experiment')
parser.add_argument('--note', type=str, default='', help='notation of the experiment')
parser.add_argument('--normal', action='store_true', default=False,
help='Whether to use normal information [default: False]')
parser.add_argument('--beta', default=1, type=float, help='hyperparameter beta')
parser.add_argument('--cutmix_prob', default=0.5, type=float, help='cutmix probability')
args = parser.parse_args()
args.feature_transform, args.augment = bool(args.feature_transform), bool(args.augment)
### Set random seed
args.seed = args.seed if args.seed > 0 else random.randint(1, 10000)
# dataset path
DATA_PATH = './data/modelnet40_normal_resampled/'
########################################
## Intiate model
########################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_classes = 40
if args.model == 'dgcnn_kcutmix':
model = DGCNN(num_classes)
model = model.to(device)
model = nn.DataParallel(model)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr * 100,
momentum=0.9, weight_decay=1e-4)
scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250,
eta_min=1e-3)
else:
if args.model == 'pointnet_kcutmix':
model = PointNetCls(num_classes, args.feature_transform)
model = model.to(device)
elif args.model == 'pointnet2_kcutmix':
model = PointNet2ClsMsg(num_classes)
model = model.to(device)
model = nn.DataParallel(model)
elif args.model == 'rscnn_kcutmix':
from models.rscnn import RSCNN
import models.rscnn_utils.pointnet2_utils as pointnet2_utils
model = RSCNN(num_classes)
model = model.to(device)
model = nn.DataParallel(model)
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.lr,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=1e-4
)
scheduler_c = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
if len(args.resume) > 1:
print('=====> Loading from checkpoint...')
checkpoint = torch.load('./checkpoints/%s.pth' % args.resume)
args = checkpoint['args']
torch.manual_seed(args.seed)
print("Random Seed: ", args.seed)
"""if args.optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
elif args.optimizer == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))"""
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
START_EPOCH = checkpoint['epoch'] + 1
acc_list = checkpoint['acc_list']
if args.model == 'dgcnn_kcutmix':
scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250, eta_min=1e-3)
else:
scheduler_c = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
print('Successfully resumed!')
else:
print('=====> Building new model...')
torch.manual_seed(args.seed)
print("Random Seed: ", args.seed)
START_EPOCH = 0
acc_list = [0]
print('Successfully built!')
########################################
## Load data
########################################
print('======> Loading data')
TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_points, split='train',
normal_channel=args.normal)
TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_points, split='test',
normal_channel=args.normal)
train_loader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True,
num_workers=4, drop_last=True)
test_loader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False,
num_workers=4, drop_last=False)
PointcloudScaleAndTranslate = PointcloudScaleAndTranslate()
print('======> Successfully loaded!')
gen_train_log(args)
logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name))
########################################
## Train
########################################
if args.model == 'dgcnn_kcutmix':
criterion = cal_loss
else:
criterion = F.cross_entropy # nn.CrossEntropyLoss()
if args.resume == '/':
log_row(logname, ['Epoch', 'Train Loss', 'Train Acc', 'Test Loss', 'Test Acc', 'learning Rate'])
for epoch in range(START_EPOCH, args.epochs):
print('\nEpoch: %d' % epoch)
scheduler_c.step(epoch)
model.train()
correct = 0
total = 0
for i, data in enumerate(train_loader, 0):
points, target = data
points, target = points.to(device), target.to(device)[:, 0]
points = PointcloudScaleAndTranslate(points)
if args.model == 'rscnn_kcutmix':
fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points) # (B, npoint)
fps_idx = fps_idx[:, np.random.choice(args.num_points, args.num_points, False)]
points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), fps_idx).transpose(1,2).contiguous() # (B, N, 3)
# cutmix
optimizer.zero_grad()
r = np.random.rand(1)
if args.beta > 0 and r < args.cutmix_prob:
lam = np.random.beta(args.beta, args.beta)
B = points.size()[0]
rand_index = torch.randperm(B).cuda()
target_a = target
target_b = target[rand_index]
point_a = torch.zeros(B, 1024, 3)
point_b = torch.zeros(B, 1024, 3)
point_c = torch.zeros(B, 1024, 3)
point_a = points
point_b = points[rand_index]
point_c = points[rand_index]
point_a, point_b, point_c = point_a.to(device), point_b.to(device), point_c.to(device)
remd = emd.emdModule()
remd = remd.cuda()
dis, ind = remd(point_a, point_b, 0.005, 300)
for ass in range(B):
point_c[ass, :, :] = point_c[ass, ind[ass].long(), :]
int_lam = int(args.num_points * lam)
int_lam = max(1, int_lam)
random_point = torch.from_numpy(np.random.choice(1024, B, replace=False, p=None))
# kNN
ind1 = torch.tensor(range(B))
query = point_a[ind1, random_point].view(B, 1, 3)
dist = torch.sqrt(torch.sum((point_a - query.repeat(1, args.num_points, 1)) ** 2, 2))
idxs = dist.topk(int_lam, dim=1, largest=False, sorted=True).indices
for i2 in range(B):
points[i2, idxs[i2], :] = point_c[i2, idxs[i2], :]
# adjust lambda to exactly match point ratio
lam = int_lam * 1.0 / args.num_points
points = points.transpose(2, 1)
pred, trans_feat = model(points)
loss = criterion(pred, target_a.long()) * (1. - lam) + criterion(pred, target_b.long()) * lam
else:
points = points.transpose(2, 1)
pred, trans_feat = model(points)
loss = criterion(pred, target.long())
if args.feature_transform and args.model == 'pointnet_kcutmix':
loss += feature_transform_regularizer(trans_feat) * args.lambda_ft
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct += pred_choice.eq(target.data).cpu().sum()
total += target.size(0)
progress_bar(i, len(train_loader), 'Train Loss: %.3f | Train Acc: %.3f%% (%d/%d)'
% (loss.item() / (i + 1), 100. * correct.item() / total, correct, total))
train_loss, train_acc = loss.item() / (i + 1), 100. * correct.item() / total
### Test in batch
test_loss, test_acc = test(model, test_loader, criterion)
acc_list.append(test_acc)
print('the best test acc is', max(acc_list))
### Keep tracing
log_row(logname, [epoch, train_loss, train_acc, test_loss, test_acc,
optimizer.param_groups[0]['lr'], max(acc_list), np.argmax(acc_list) - 1])
save_ckpt(args, epoch, model, optimizer, acc_list)
| 2.234375 | 2 |
lib/ruuvitag/tracker.py | rroemhild/ruuvitag-ttn | 6 | 12762273 | <filename>lib/ruuvitag/tracker.py
from ruuvitag.format import RuuviTagRAW
from ubinascii import hexlify
from ruuvitag.core import RuuviTag
class RuuviTagTracker(RuuviTag):
"""Ruuvi Tag Tracker
Track RuuviTags and call a callback on each new recieved data.
If device data can not be decoded it's propably not a RuuviTag and
the device goes onto the blacklist. Blacklistet devices will be
ignored as long this device is not resetted.
"""
def __init__(self, whitelist=None, antenna=None):
super().__init__(whitelist, antenna)
def track_ruuvitags(self, callback):
self.bluetooth.start_scan(-1)
get_adv = self.bluetooth.get_adv
isscanning = self.bluetooth.isscanning
whitelist = self._whitelist
while isscanning():
adv = get_adv()
if adv:
mac = hexlify(adv.mac, ":")
if whitelist is not None:
if mac not in whitelist:
continue
elif mac in self._blacklist:
continue
tag = self.get_tag(mac, adv)
if isinstance(tag, RuuviTagRAW):
callback(tag)
| 2.71875 | 3 |
07_calculation_game_on_web/01_multiplication.py | shakiyam/python4kids | 0 | 12762274 | import random
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
a = random.randrange(1, 10)
b = random.randrange(1, 10)
return f'{a} * {b} = {a * b}'
| 2.75 | 3 |
study/vowel_summary.py | Kshitiz-Bansal/wavetorch | 470 | 12762275 | """Generate a summary of a previously trained vowel recognition model.
"""
import torch
import wavetorch
import argparse
import yaml
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
try:
from helpers.plot import mpl_set_latex
mpl_set_latex()
except ImportError:
import warnings
warnings.warn('The helpers package is unavailable', ImportWarning)
COL_TRAIN = "#1f77b4"
COL_TEST = "#2ca02c"
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str)
parser.add_argument('--vmin', type=float, default=1e-3)
parser.add_argument('--vmax', type=float, default=1.0)
parser.add_argument('--fig', type=str, default=None)
parser.add_argument('--title_off', action='store_true')
parser.add_argument('--labels', action='store_true')
parser.add_argument('--vowel_samples', nargs='+', type=int, default=None)
if __name__ == '__main__':
args = parser.parse_args()
model, history, history_state, cfg = wavetorch.io.load_model(args.filename)
try:
if cfg['seed'] is not None:
torch.manual_seed(cfg['seed'])
except:
pass
print("Configuration for model in %s is:" % args.filename)
print(yaml.dump(cfg, default_flow_style=False))
sr = cfg['data']['sr']
gender = cfg['data']['gender']
vowels = cfg['data']['vowels']
N_classes = len(vowels)
fig = plt.figure( figsize=(7, 4.75), constrained_layout=True)
gs = fig.add_gridspec(1, 2, width_ratios=[1, 0.4])
gs_left = gs[0].subgridspec(3, 2)
gs_right = gs[1].subgridspec(N_classes+1, 1, height_ratios=[1 for i in range(0,N_classes)] + [0.05])
gs_bot = gs_left[2,:].subgridspec(1, 2)
ax_cm_train0 = fig.add_subplot(gs_left[0,0])
ax_cm_test0 = fig.add_subplot(gs_left[0,1])
ax_cm_train1 = fig.add_subplot(gs_left[1,0])
ax_cm_test1 = fig.add_subplot(gs_left[1,1])
ax_loss = fig.add_subplot(gs_bot[0])
ax_acc = fig.add_subplot(gs_bot[1])
ax_fields = [fig.add_subplot(gs_right[i]) for i in range(0, N_classes+1)]
history_mean = history.groupby('epoch').mean()
history_std = history.groupby('epoch').std()
epochs = history_mean.index.values
ax_loss.fill_between(epochs,
history_mean['loss_train'].values-history_std['loss_train'].values,
history_mean['loss_train'].values+history_std['loss_train'].values, color=COL_TRAIN, alpha=0.15)
ax_loss.plot(epochs, history_mean['loss_train'].values, "-", label="Training dataset", ms=4, color=COL_TRAIN)
ax_loss.fill_between(epochs,
history_mean['loss_test'].values-history_std['loss_test'].values,
history_mean['loss_test'].values+history_std['loss_test'].values, color=COL_TEST, alpha=0.15)
ax_loss.plot(epochs, history_mean['loss_test'].values, "-", label="Testing dataset", ms=4, color=COL_TEST)
ax_loss.set_ylabel('Loss')
ax_loss.set_xlabel('Training epoch \#')
ax_acc.plot(epochs, history_mean['acc_train'].values*100, "-", label="Training dataset", ms=4, color=COL_TRAIN)
ax_acc.fill_between(epochs,
history_mean['acc_train'].values*100-history_std['acc_train'].values*100,
history_mean['acc_train'].values*100+history_std['acc_train'].values*100, color=COL_TRAIN, alpha=0.15)
ax_acc.plot(epochs, history_mean['acc_test'].values*100, "-", label="Testing dataset", ms=4, color=COL_TEST)
ax_acc.fill_between(epochs,
history_mean['acc_test'].values*100-history_std['acc_test'].values*100,
history_mean['acc_test'].values*100+history_std['acc_test'].values*100, color=COL_TEST, alpha=0.15)
ax_acc.set_xlabel('Training epoch \#')
ax_acc.set_ylabel('Accuracy')
ax_acc.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=10))
# ax_acc.set_ylim([20,100])
ax_loss.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=0.1))
# ax_loss.set_ylim([0.7,1.2])
ax_acc.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f\%%'))
ax_loss.legend(fontsize='small')
# ax_acc.annotate("%.1f%% training set accuracy" % (history_mean['acc_train'].tail(1).iloc[0]*100), xy=(0.1,0.1), xytext=(0,10), textcoords="offset points", xycoords="axes fraction", ha="left", va="bottom", color=COL_TRAIN)
# ax_acc.annotate("%.1f%% testing set accuracy" % (history_mean['acc_test'].tail(1).iloc[0]*100), xy=(0.1,0.1), xycoords="axes fraction", ha="left", va="bottom", color=COL_TEST)
ax_acc.annotate('%.1f\%%' % (history_mean['acc_train'].tail(1).iloc[0]*100),
xy=(epochs[-1], history_mean['acc_train'].tail(1).iloc[0]*100), xycoords='data',
xytext=(-1, 5), textcoords='offset points', ha='left', va='center', fontsize='small',
color=COL_TRAIN, bbox=wavetorch.plot.bbox_white)
ax_acc.annotate('%.1f\%%' % (history_mean['acc_test'].tail(1).iloc[0]*100),
xy=(epochs[-1], history_mean['acc_test'].tail(1).iloc[0]*100), xycoords='data',
xytext=(-1, -5), textcoords='offset points', ha='left', va='center', fontsize='small',
color=COL_TEST, bbox=wavetorch.plot.bbox_white)
print('Accuracy (train): %.1f%% +/- %.1f%%' % (history_mean['acc_train'].tail(1).iloc[0]*100, history_std['acc_train'].tail(1).iloc[0]*100))
print('Accuracy (test): %.1f%% +/- %.1f%%' % (history_mean['acc_test'].tail(1).iloc[0]*100, history_std['acc_test'].tail(1).iloc[0]*100))
cm_train = history.groupby('epoch')['cm_train'].apply(np.mean).head(1).iloc[0]
cm_test = history.groupby('epoch')['cm_test'].apply(np.mean).head(1).iloc[0]
wavetorch.plot.confusion_matrix(cm_train, title="Training dataset", normalize=True, ax=ax_cm_train0, labels=vowels)
wavetorch.plot.confusion_matrix(cm_test, title="Testing dataset", normalize=True, ax=ax_cm_test0, labels=vowels)
cm_train = history.groupby('epoch')['cm_train'].apply(np.mean).tail(1).iloc[0]
cm_test = history.groupby('epoch')['cm_test'].apply(np.mean).tail(1).iloc[0]
wavetorch.plot.confusion_matrix(cm_train, title="Training dataset", normalize=True, ax=ax_cm_train1, labels=vowels)
wavetorch.plot.confusion_matrix(cm_test, title="Testing dataset", normalize=True, ax=ax_cm_test1, labels=vowels)
X, Y, F = wavetorch.data.load_all_vowels(vowels, gender='both', sr=sr, random_state=0)
# model.load_state_dict(history_state[cfg['training']['N_epochs']])
for i in range(N_classes):
xb, yb = wavetorch.data.select_vowel_sample(X, Y, F, i, ind=args.vowel_samples[i] if args.vowel_samples is not None else None)
with torch.no_grad():
field_dist = model(xb, output_fields=True)
wavetorch.plot.total_field(model, field_dist, yb, ax=ax_fields[yb.argmax().item()], cbar=True, cax=ax_fields[-1], vmin=args.vmin, vmax=args.vmax)
if args.labels:
try:
from helpers.plot import apply_panel_labels
apply_panel_labels([ax_cm_train0, ax_cm_test0, ax_cm_train1, ax_cm_test1, ax_loss, ax_acc] + ax_fields[0:-1],
xy=[(-35,0), (-35,0), (-35,0), (-35,0), (-25,0), (-40,0), (8,-6), (8,-6), (8,-6)],
color=['k', 'k', 'k', 'k', 'k', 'k', 'w', 'w', 'w'],
case='upper')
except ImportError:
import warnings
warnings.warn('The helpers package is unavailable', ImportWarning)
plt.show()
if args.fig is not None:
fig.savefig(args.fig, dpi=300)
else:
fig.savefig(os.path.splitext(args.filename)[0]+"_summary.png", dpi=300)
| 2.59375 | 3 |
tests/test_list_shortcut.py | jacebrowning/yorm | 20 | 12762276 | <gh_stars>10-100
# pylint: disable=redefined-outer-name,expression-not-assigned,attribute-defined-outside-init,no-member
from expecter import expect
import yorm
from yorm.types import List, Float
from . import strip
@yorm.attr(things=List.of_type(Float))
@yorm.sync("tmp/example.yml")
class Example:
"""An example class mapping a list using the shortened syntax."""
def test_list_mapping_using_shortened_syntax():
obj = Example()
obj.things = [1, 2.0, "3"]
expect(obj.__mapper__.text) == strip("""
things:
- 1.0
- 2.0
- 3.0
""")
| 1.96875 | 2 |
HW1/deeprl_hw1_src/setup.py | sssssdzxc/CMU10703_Assignments | 1 | 12762277 | <gh_stars>1-10
#!/usr/bin/env python
from distutils.core import setup
setup(
name='DeepRL Homework 1',
version='1.0',
description='Library for 10-703 Homework 1',
packages=['deeprl_hw1'])
| 1.078125 | 1 |
preprocess.py | NNDEV1/ImageCaptioning | 3 | 12762278 | <reponame>NNDEV1/ImageCaptioning
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import shutil
import pickle
from keras.applications import ResNet50
from keras.applications.resnet50 import preprocess_input, decode_predictions
from keras.models import Model,Sequential,load_model
from keras.layers import *
import re
import json
import collections
import time
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing import image
def load_doc(path):
with open(path, encoding='utf8') as file:
text = file.read()
return text
text = load_doc("/content/all_captions/Flickr8k.token.txt")
captions = text.split('\n')[:-1] #last line is empty
captions[0]
cap = captions[0].split('\t')
img_id = cap[0].split('.')[0]
caption = cap[1]
def get_img_caption_mapping(text):
description = {}
for ix in range(len(captions)):
tex = captions[ix].split('\t')
img_id = tex[0].split('.')[0]
cap = tex[1]
if description.get(img_id) is None:
description[img_id] = []
description[img_id].append(cap)
return description
descriptions = get_img_caption_mapping(captions)
IMG_path = "/content/all_images/Flicker8k_Dataset/"
img = image.load_img(IMG_path+"1000268201_693b08cb0e.jpg",target_size=(299, 299, 3))
img = image.img_to_array(img)/255.
plt.imshow(img)
def cleaned_captions(caption):
caption = caption.lower()
caption = re.sub("[^a-z]+"," ", caption)
caption = [x for x in caption.split() if len(x)>1]
caption = " ".join(caption)
return sentence
for img_id,caption_list in descriptions.items():
for i in range(len(caption_list)):
caption_list[i] = clean_captions(caption_list[i])
with open("description.txt",'w',encoding='utf8') as f:
f.write(str(descriptions))
description = None
with open("description.txt",'r') as f:
description = f.read()
json_acceptable_string = description.replace("'","\"")
description = json.loads(json_acceptable_string)
vocab = set()
total_words = []
for key in description.keys():
[vocab.update(sentence.split()) for sentence in description[key]]
[total_words.append(i) for des in description[key] for i in des.split()]
print(len(total_words),len(vocab))
counter = collections.Counter(total_words)
freq_count = dict(counter)
print(freq_count['the'])
sorted_freq_cnt = sorted(freq_count.items(),reverse=True,key = lambda x:x[1])
threshold = 10
sorted_freq_cnt = [x for x in sorted_freq_cnt if x[1]>threshold]
total_words = [x[0] for x in sorted_freq_cnt]
print(len(total_words))
train = load_doc("/content/all_captions/Flickr_8k.trainImages.txt")
test = load_doc("/content/all_captions/Flickr_8k.testImages.txt")
| 2.625 | 3 |
FCUnet.py | maxiangyueytu/FCUnet | 0 | 12762279 | import math
import numpy as np
import torch
import torch.nn as nn
import itertools
class FuzzyLayer(nn.Module):
def __init__(self, fuzzynum,channel):
super(FuzzyLayer,self).__init__()
self.n = fuzzynum
self.channel = channel
self.conv1 = nn.Conv2d(self.channel,1,3,padding=1)
self.conv2 = nn.Conv2d(1,self.channel,3,padding=1)
self.mu = nn.Parameter(torch.randn((self.channel,self.n)))
self.sigma = nn.Parameter(torch.randn((self.channel,self.n)))
self.bn1 = nn.BatchNorm2d(1, affine=True)
self.bn2 = nn.BatchNorm2d(self.channel,affine=True)
def forward(self, x):
x = self.conv1(x)
#tmp = torch.tensor(np.zeros((x.size()[0],x.size()[1],x.size()[2],x.size()[3])),dtype = torch.float).cuda()
tmp = torch.tensor(np.zeros((x.size()[0], x.size()[1], x.size()[2], x.size()[3])), dtype=torch.float).to(device)
for num,channel,w,h in itertools.product(range(x.size()[0]),range(x.size()[1]),range(x.size()[2]),range(x.size()[3])):
for f in range(self.n):
tmp[num][channel][w][h] -= ((x[num][channel][w][h]-self.mu[channel][f])/self.sigma[channel][f])**2
fNeural = self.bn2(self.conv2(self.bn1(torch.exp(tmp))))
return fNeural
class FuzzyNet(nn.Module):
def __init__(self, n_class=2, testing=False):
super(FuzzyNet, self).__init__()
self.fuzzy_4 = FuzzyLayer(fuzzynum=1,channel=512)
self.fuzzy_3 = FuzzyLayer(fuzzynum=1,channel=256)
self.fuzzy_2 = FuzzyLayer(fuzzynum=1,channel=128)
self.fuzzy_1 = FuzzyLayer(fuzzynum=1, channel=64)
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# 1/2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# 1/4
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# 1/8
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
# 1/16
self.deconv1 = nn.ConvTranspose2d(in_channels=512,out_channels=256,kernel_size=(2,2),stride=(2,2),bias=False)
self.deconv2 = nn.ConvTranspose2d(in_channels=256,out_channels=128,kernel_size=(2,2),stride=(2,2),bias=False)
self.deconv3 = nn.ConvTranspose2d(in_channels=128,out_channels=64,kernel_size=(2,2),stride=(2,2),bias=False)
self.deconv4 = nn.ConvTranspose2d(in_channels=64,out_channels=6,kernel_size=(2,2),stride=(2,2),bias=False) #
self.fbn1 = nn.BatchNorm2d(64, affine=True)
self.fbn2 = nn.BatchNorm2d(128, affine=True)
self.fbn3 = nn.BatchNorm2d(256, affine=True)
self.fbn4 = nn.BatchNorm2d(512, affine=True)
self.bn1 = nn.BatchNorm2d(512, affine=True)
self.bn2 = nn.BatchNorm2d(256, affine=True)
self.bn3 = nn.BatchNorm2d(128, affine=True)
self.bn4 = nn.BatchNorm2d(64, affine=True)
self.testing = testing
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels*m.in_channels
m.weight.data.normal_(0,math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m,nn.Linear):
m.weight.data.normal_(0, 1)
for param in self.parameters():
param.requires_grad = True
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
c1 = h
f1 = self.fbn1(self.fuzzy_1(c1))+c1
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h+g)
c2 = h
f2 = self.fbn2(self.fuzzy_2(c2))+c2
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h+g)
c3 = h
f3 = self.fbn3(self.fuzzy_3(c3))+c3
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h+g)
c4 = h
f4 = self.fbn4(self.fuzzy_4(c4))
h = self.bn1(h)
h = self.bn2(self.deconv1(h))
h = self.bn3(self.deconv2(h))
h = self.bn4(self.deconv3(h))
h = self.deconv4(h)
return h
| 2.46875 | 2 |
tes/data.py | Leo-am/tespackage | 0 | 12762280 | <reponame>Leo-am/tespackage<gh_stars>0
"""
Functions:
1) FpgaStats
2) capture
3) read_mca
4) av_trace
5) _memmap_data
Classes:
1) FpgaStats
2) CaptureResult
3) CaptureData
"""
import logging
from collections import namedtuple
import zmq
import numpy as np
from numpy import logical_and as and_l, logical_or as or_l
from .mca import Distribution
import os
from tes.base import (
fidx_dt,
tidx_dt,
tick_dt,
pulse_fmt,
payload_ref_dt,
Payload,
rise_fmt,
area_fmt,
av_trace_fmt,
dot_product_fmt,
pulse_rise_fmt,
)
_logger = logging.getLogger(__name__)
class FpgaStats(
namedtuple("FpgaOutputStats", "frames dropped bad tick mca trace event types")
):
"""
Class holding FPGA ethernet output statistics
"""
__slots__ = ()
def __repr__(self):
s = (
"Ethernet frames:{} - dropped:{}, invalid:{}\n".format(
self.frames, self.dropped, self.bad
)
+ "Ticks:{}, ".format(self.tick)
+ "MCA headers:{}, ".format(self.mca)
+ "Trace headers:{}\n".format(self.trace)
+ "Events:{}".format(self.event)
)
if self.event != 0:
s += ", {!r}".format(
[Payload(n) for n in range(8) if self.types & (1 << n)]
)
return s
def fpga_stats(time=1.0):
"""
Diagnostic statistics for the FPGA ethernet output.
:param float time: time to capture statistics for
:return: FpgaStats object that subclasses namedtuple.
"""
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.REQ)
sock.connect("tcp://smp-qtlab11.instrument.net.uq.edu.au:55554")
sock.send_multipart(
[bytes("{}".format(time), encoding="utf8"),]
)
r = sock.recv_multipart()
if int(r[0]) == 1:
print(r)
raise RuntimeError("Malformed request")
return FpgaStats(
frames=int(r[1]),
dropped=int(r[2]),
bad=int(r[3]),
tick=int(r[4]),
mca=int(r[5]),
trace=int(r[6]),
event=int(r[7]),
types=int(r[8]),
)
class CaptureResult(
namedtuple("CaptureResult", "ticks events traces mca frames dropped invalid")
):
__slots__ = ()
def __repr__(self):
s = (
"Frames captured:{} - dropped:{}, invalid:{}\n".format(
self.frames, self.dropped, self.invalid
)
+ "Ticks:{}, ".format(self.ticks)
+ "MCA histograms:{}, ".format(self.mca)
+ "Traces:{}, ".format(self.traces)
+ "Events:{}".format(self.events)
)
return s
def capture(
filename,
measurement,
use_existing=1,
access_mode=1,
ticks=10,
events=0,
conversion_mode=0,
):
"""
Capture FPGA output as a collection of data and index files.
:param filename:
:param measurement:
:param ticks:
:param events:
:param write_mode:
:param conversion_mode:
:param capture_mode:
:return:
"""
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.REQ)
sock.connect("tcp://smp-qtlab11.instrument.net.uq.edu.au:55555")
sock.send_multipart(
[
bytes(str(filename), encoding="utf8"),
bytes(str(measurement), encoding="utf8"),
bytes("{}".format(use_existing), encoding="utf8"),
bytes("{}".format(access_mode), encoding="utf8"),
bytes("{}".format(ticks), encoding="utf8"),
bytes("{}".format(events), encoding="utf8"),
bytes("{}".format(conversion_mode), encoding="utf8"),
]
)
r = sock.recv_multipart()
if int(r[0]) != 0:
print(int(r[0]))
if int(r[0]) == 1:
msg = "Malformed request"
elif int(r[0]) == 2:
if ticks == 0 and events == 0:
msg = "file error: status requested and file does not exist"
else:
msg = "file error: file exists and overwiting not enabled"
elif int(r[0]) == 3:
msg = "bad path to file"
elif int(r[0]) == 4:
msg = "initialisation error durining capture or conversion"
elif int(r[0]) == 5:
msg = "error while writing file"
elif int(r[0]) == 6:
msg = "error while converting to hdf5"
elif int(r[0]) == 7:
msg = "error writing stats or deleting files after conversion"
else:
msg = "unknown error"
raise RuntimeError(msg)
if int(r[6]) != 0:
print("WARNING {} frames dropped".format(int(r[6])))
return CaptureResult(
ticks=int(r[1]),
events=int(r[2]),
traces=int(r[3]),
mca=int(r[4]),
frames=int(r[5]),
dropped=int(r[6]),
invalid=int(r[7]),
)
def read_mca(n):
"""
Capture MCA histograms
:param int n: number of histograms to capture
:return: List of tes.mca.Distributions.
"""
context = zmq.Context.instance()
sock = context.socket(zmq.SUB)
sock.connect("tcp://smp-qtlab11.instrument.net.uq.edu.au:55565")
sock.subscribe("")
dists = []
for i in range(n):
dists.append(Distribution(sock.recv()))
return dists
def av_trace(timeout=30):
"""
Capture an average trace.
:param timeout: Timeout value in seconds.
:return:
"""
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.REQ)
sock.connect("tcp://smp-qtlab11.instrument.net.uq.edu.au:55556")
sock.send_multipart(
[bytes("{}".format(timeout), encoding="utf8"),]
)
r = sock.recv_multipart()
if int(r[0]) == 2:
raise RuntimeError("Time out while waiting for average trace.")
if int(r[0]) == 1:
raise RuntimeError("Invalid request.")
return r[1]
def _memmap_data(path, file, dtype=np.uint8):
fpath = os.path.join(path, file)
if os.stat(fpath).st_size == 0:
return None
else:
return np.memmap(fpath, dtype=dtype)
index_name = "nonhomogeneous"
samples_name = "extracted_samples"
class CaptureData:
def __init__(self, path):
if not os.path.isdir(path):
raise AttributeError("{} is not a directory".format(path))
self.path = path
# memmap the files from the capture server
self.edat = _memmap_data(path, "edat", dtype=np.uint8)
self.fidx = _memmap_data(path, "fidx", dtype=fidx_dt)
self.tidx = _memmap_data(path, "tidx", dtype=tidx_dt)
self.tdat = _memmap_data(path, "tdat", dtype=tick_dt)
self.midx = _memmap_data(path, "midx", dtype=payload_ref_dt)
self.mdat = _memmap_data(path, "mdat", dtype=np.uint8)
self.ridx = _memmap_data(path, "ridx", dtype=payload_ref_dt)
self.bdat = _memmap_data(path, "bdat", dtype=np.uint8)
self._frame_payload_types = self.fidx["type"] & 0x0F
self.event_types = set(self._frame_payload_types[self._frame_payload_types < 7])
self._sequence_error_mask = self.fidx["type"] & 0x80 != 0
self.sequence_errors = len(self._sequence_error_mask.nonzero()[0])
self.has_frame_errors = (
len((self._frame_payload_types == Payload.bad_frame).nonzero()[0]) != 0
or self._sequence_error_mask[0] != 0
)
self.has_traces = self.ridx is not None and len(self.ridx) != 0
# mask for fidx indicating frames carrying multiple events
pf_mask = or_l(self._frame_payload_types < 3, self._frame_payload_types == 5)
pf = self.fidx[pf_mask]
pf_lengths = pf["length"] // (pf["event_size"] * 8)
event_count = np.sum(pf_lengths, 0)
if self.has_traces:
event_count += np.uint64(len(self.ridx))
self.event_count = event_count
# find max_rises
pulse_mask = self.fidx["type"] == Payload.pulse
pulse_sizes = set(self.fidx["event_size"][pulse_mask])
max_rises = 0
for s in pulse_sizes:
max_rises = max(max_rises, s - 2)
"""
Homogeneous data sets are generated when each channel produces events
of the same type and size, in this case edat is a contiguous array.
Otherwise the transport frames need to be iterated over to extract the
fields.
"""
event_fields = {}
tick_idx = np.zeros(len(self.tidx), np.uint64)
if len(self.fidx["changed"].nonzero()[0]) == 0:
# The event stream is homogeneous, edat has a single dtype
event_frame_idxs = np.where(self._frame_payload_types < 7)[0]
self._homogeneous = self._dtype_from_frame(
event_frame_idxs[0], full=True
) # The single dtype for edat
data = self.edat.view(self._homogeneous)
# record fields to the event_fields dict
for field in self._homogeneous.names:
if field == "time":
# use copy of time field to it can be adjusted for ticks
# without changing the original data
event_fields[field] = np.array(data[field], copy=True)
else:
event_fields[field] = data[field]
# create tick_idx that contains the indices in edat, viewed as the
# homogeneous type, that immediately follow tick events.
for t in range(0, len(self.tidx)):
f_start = self.tidx[t]["start"] # first frame in tick
f_last = self.tidx[t]["stop"] # last frame in tick
p_start = self.fidx[f_start]["payload"] # start payload range
p_stop = p_start + self.fidx[f_last]["length"]
if self.has_traces: # homogeneous, must be all traces
# indices of traces in this payload range
# FIXME breaks when there are no traces between ticks
i = np.where(
and_l(
self.ridx["start"] >= p_start, self.ridx["start"] < p_stop
)
)[0]
if t < len(tick_idx) - 1:
tick_idx[t] = i[-1] + 1
else:
tick_idx[t] = p_start // self._homogeneous.itemsize
self._fields = event_fields
self._tick_idx = tick_idx
self._retime()
else:
self._homogeneous = None
# extract common fields from frames to form contiguous arrays
# extract rises as contiguous array with max_rises per event
# TODO generalise to more than two channels and optimise performance
print("Non homogeneous capture.")
if (path / index_name).with_suffix(".npz").exists():
print("Loading data extracted previously.")
nh_data = np.load((path / index_name).with_suffix(".npz"))
self._idx = nh_data["idx"]
self._trace_mask = nh_data["trace_mask"]
self._fields = nh_data["event_fields"][()]
else:
print("Extracting data from transport frames.")
type_fields = [self._event_fields(p) for p in self.event_types]
common_fields = set(type_fields[0].keys())
for field in type_fields[:-1]:
common_fields = common_fields & set(field.keys())
# allocate contiguous arrays for collating the fields
for field in common_fields:
# TODO
"""
Add better handling when the number of rises is different
for each channel.
"""
if field == "rise":
event_fields[field] = np.zeros(
(event_count, max_rises), np.dtype(pulse_rise_fmt)
)
else:
for t in type_fields:
if field in t:
event_fields[field] = np.zeros(event_count, t[field][0])
break
# trace_mask True means idx points to ridx else fidx
trace_mask = np.zeros(self.event_count, np.bool)
idx = np.zeros(self.event_count, np.uint64)
t_i = 0
e_i = 0
r_i = 0
# adjust time field to account for tick events
for f in range(len(self.fidx)):
f_type = self.fidx[f]["type"] & 0x0F
if f_type == Payload.tick:
if t_i < len(tick_idx):
tick_idx[t_i] = e_i if e_i < event_count else event_count
t_i += 1
elif f_type in [0, 1, 2, 5]:
# frame carrying multiple events
f_data = self._frame_event_data(f)
stop = e_i + len(f_data)
for field in event_fields:
event_fields[field][e_i:stop] = f_data[field]
idx[e_i:stop] = f
e_i += len(f_data)
elif f_type in [3, 4, 6] and self.fidx[f]["type"] & 0x40:
# trace_header
if self.ridx[r_i][0] == self.fidx[f][0]: # good trace
f_data = self._frame_event_data(f)
for field in event_fields:
event_fields[field][e_i] = f_data[field]
trace_mask[e_i] = True
idx[e_i] = r_i
e_i += 1
r_i += 1
if self.has_traces:
# add samples field but only extract on first request.
event_fields["samples"] = None
self._idx = idx
self._trace_mask = trace_mask
self._fields = event_fields
self._tick_idx = tick_idx
self._retime()
np.savez(
path / index_name,
idx=idx,
trace_mask=trace_mask,
event_fields=event_fields,
)
def _extract_samples(self):
"""
Extract samples from a non homogeneous dataset.
:return: ndarray of samples
:notes: Assumes that all traces in the data set are the same shape.
"""
offset, length, trace_type = self._trace_specs_from_ridx(0)
samples = np.zeros((len(self.ridx), (length - 8 * offset) // 2))
for r in range(len(self.ridx)):
samples[r, :] = self._trace(self.ridx[r])["samples"]
return samples
def _retime(self):
"""
Adjust time field by compensating for relative time in each tick frame.
:return: None
"""
self._fields["time"][0] = 2 ** 16 - 1
for t in range(1, len(self.tidx)):
if self._tick_idx[t] < len(self._fields["time"]):
time0 = min(
int(self.tdat["time"][t]) + self._fields["time"][self._tick_idx[t]],
2 ** 16 - 1,
)
self._fields["time"][self._tick_idx[t]] = time0
def _is_event_field(self, field):
if "_fields" in self.__dict__.keys():
return field in self.__dict__["_fields"]
else:
return False
def __getattr__(self, item):
if self._is_event_field(item):
if item == "samples":
fields = self.__dict__["_fields"]
if fields[item] is None:
npzfile = (self.path / samples_name).with_suffix(".npz")
if npzfile.exists():
data = np.load(npzfile)
fields[item] = data["samples"]
else:
print("Extracting samples.")
samples = self._extract_samples()
np.savez(
(self.path / samples_name).with_suffix(".npz"),
samples=samples,
)
fields[item] = samples
return self.__dict__["_fields"][item]
else:
raise AttributeError("no attribute or field {}".format(item))
def __setattr__(self, key, value):
if self._is_event_field(key):
raise AttributeError("event fields are not writable")
else:
super().__setattr__(key, value)
def __repr__(self):
return "some data"
def _frame_type(self, frame):
"""
:param int frame: index
:return: Payload, sequence_error, is_header
"""
return (
Payload(self._frame_payload_types[frame]),
self.fidx[frame]["type"] & 0x80 != 0,
self.fidx[frame]["type"] & 0x40 != 0,
)
def _trace_specs_from_payload(self, payload):
eflags = self.edat[payload + np.uint64(5) : payload + np.uint64(6)][0] & 0x0F
if (eflags & 0x0C) >> 2 != 3:
raise RuntimeError("payload does not represent a trace carrying samples")
trace_type = (eflags & 0xC0) >> 6
if trace_type == 2:
raise RuntimeError("payload does not represent a trace carrying samples")
offset = self.edat[payload + np.uint64(3) : payload + np.uint64(4)][0] & 0x0F
byte_length = self.edat[payload : payload + np.uint64(2)].view(np.uint16)[0]
return offset, byte_length, Payload(trace_type + 3)
@staticmethod
def _trace_header_fmt(offset, length, payload_type):
sample_fmt = [("samples", "({},)i2".format((length - offset * 8) // 2))]
if payload_type == Payload.single_trace:
return pulse_fmt(offset - 2) + sample_fmt
if payload_type == Payload.average_trace:
return av_trace_fmt + sample_fmt
if payload_type == Payload.dot_product_trace:
return pulse_fmt(offset - 2) + dot_product_fmt + sample_fmt
def _trace_specs_from_frame(self, frame, full=True):
payload = self.fidx[frame]["payload"]
offset, full_length, payload_type = self._trace_specs_from_payload(payload)
if full:
return offset, full_length, payload_type
else:
return offset, self.fidx[frame]["length"], payload_type
def _trace_specs_from_ridx(self, ridx):
"""
get trace specs from a entry in ridx
:param int ridx: the ridx entry
:return: offset, length, payload_type
"""
return self._trace_specs_from_payload(self.ridx[ridx]["start"])
def _trace_dtype_from_payload(self, payload):
offset, length, trace_payload = self._trace_specs_from_payload(payload)
return np.dtype(self._trace_header_fmt(offset, length, trace_payload))
def _trace_dtype_from_frame(self, frame, full=True):
"""
The compound numpy.dtype for the trace in frame.
:param int frame: index of frame
:param bool full: return dtype for full trace else just the current
frame.
:return: numpy.dtype
:raises: AttributeError when Full is True and frame is not a header.
"""
if self.fidx[frame]["type"] & 0x40 != 0: # header frame
offset, length, trace_payload = self._trace_specs_from_frame(
frame, full=full
)
return np.dtype(self._trace_header_fmt(offset, length, trace_payload))
else:
if full:
raise AttributeError("Frame at index:{} is not a header".format(frame))
return np.dtype(np.int16)
def _trace(self, ridx_entry):
offset, length, trace_payload = self._trace_specs_from_payload(ridx_entry[0])
dtype = self._trace_header_fmt(offset, length, trace_payload)
return self.edat[ridx_entry[0] : ridx_entry[0] + ridx_entry[1]].view(dtype)[0]
def _dtype_from_frame(self, frame, full=False):
p = Payload(self.fidx[frame]["type"] & 0x0F)
if p in [3, 4, 6]:
return self._trace_dtype_from_frame(frame, full=full)
if p == Payload.rise:
return np.dtype(rise_fmt)
if p == Payload.area:
return np.dtype(area_fmt)
if p == Payload.pulse:
return np.dtype(pulse_fmt(self.fidx[frame]["event_size"] - 2))
if p == Payload.dot_product:
return np.dtype(
pulse_fmt(self.fidx[frame]["event_size"] - 2) + dot_product_fmt
)
raise NotImplementedError("payload type {} not yet implemented".format(p))
def _frame_event_data(self, frame):
"""
frame return frame payload with correct view.
:param frame: frame index.
:return: ndarray with appropriate view.
"""
f_type = self.fidx[frame]["type"] & 0x0F
# print(frame, Payload(f_type))
if f_type > 7:
raise NotImplementedError(
"Not implemented for {!r} frames.", Payload(f_type)
)
start = self.fidx[frame]["payload"]
stop = self.fidx[frame]["payload"] + self.fidx[frame]["length"]
if f_type == Payload.tick:
return self.tdat.view(np.uint8)[start:stop].view(tick_dt)
return self.edat[start:stop].view(self._dtype_from_frame(frame, full=False))
@property
def homogeneous(self):
return self._homogeneous is not None
@staticmethod
def _event_fields(payload):
if payload == Payload.rise:
return np.dtype(rise_fmt).fields
if payload == Payload.area:
return np.dtype(area_fmt).fields
if payload == Payload.pulse:
return np.dtype(pulse_fmt(1)).fields
if payload == Payload.single_trace:
return np.dtype(pulse_fmt(1)).fields
if payload == Payload.average_trace:
return np.dtype(av_trace_fmt).fields
if payload == Payload.average_trace:
return np.dtype(av_trace_fmt).fields
if payload == Payload.dot_product:
return np.dtype(pulse_fmt(1) + dot_product_fmt).fields
if payload == Payload.dot_product:
return np.dtype(pulse_fmt(1) + dot_product_fmt).fields
raise NotImplementedError("Not implemented for {}".format(Payload(payload)))
def mask(self, channel):
return self.eflags[:, 0] & 0x07 == channel
| 2.21875 | 2 |
tests/runners/test_runners_allocation.py | Maciuch/workload-collocation-agent | 0 | 12762281 | <gh_stars>0
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
import pytest
from wca import storage
from wca.allocators import AllocationType, RDTAllocation, Allocator
from wca.mesos import MesosNode
from wca.runners.allocation import AllocationRunner
from wca.runners.measurement import MeasurementRunner
from tests.testing import redis_task_with_default_labels,\
prepare_runner_patches, assert_subdict, assert_metric,\
platform_mock
# Patch Container get_allocations (simulate allocations read from OS filesystem)
_os_tasks_allocations = {
AllocationType.QUOTA: 1.,
AllocationType.RDT: RDTAllocation(name='', l3='L3:0=fffff', mb='MB:0=50')
}
@prepare_runner_patches
@patch('wca.containers.Container.get_allocations', return_value=_os_tasks_allocations)
@patch('wca.containers.ContainerSet.get_allocations', return_value=_os_tasks_allocations)
@patch('wca.platforms.collect_platform_information', return_value=(platform_mock, [], {}))
@pytest.mark.parametrize('subcgroups', ([], ['/T/c1'], ['/T/c1', '/T/c2']))
def test_allocation_runner(
_get_allocations_mock, _get_allocations_mock_, platform_mock, subcgroups):
""" Low level system calls are not mocked - but higher level objects and functions:
Cgroup, Resgroup, Platform, etc. Thus the test do not cover the full usage scenario
(such tests would be much harder to write).
"""
# Tasks mock
t1 = redis_task_with_default_labels('t1', subcgroups)
t2 = redis_task_with_default_labels('t2', subcgroups)
# Allocator mock (lower the quota and number of cache ways in dedicated group).
# Patch some of the functions of AllocationRunner.
runner = AllocationRunner(
measurement_runner=MeasurementRunner(
node=Mock(spec=MesosNode, get_tasks=Mock(return_value=[])),
metrics_storage=Mock(spec=storage.Storage, store=Mock()),
rdt_enabled=True,
gather_hw_mm_topology=False,
extra_labels=dict(extra_labels='extra_value'),
),
anomalies_storage=Mock(spec=storage.Storage, store=Mock()),
allocations_storage=Mock(spec=storage.Storage, store=Mock()),
rdt_mb_control_required=True,
rdt_cache_control_required=True,
allocator=Mock(spec=Allocator, allocate=Mock(return_value=({}, [], []))))
runner._measurement_runner._wait = Mock()
runner._measurement_runner._initialize()
############
# First run (one task, one allocation).
runner._measurement_runner._node.get_tasks.return_value = [t1]
runner._allocator.allocate.return_value = (
{t1.task_id: {AllocationType.QUOTA: .5,
AllocationType.RDT: RDTAllocation(name=None, l3='L3:0=0000f')}},
[], []
)
runner._measurement_runner._iterate()
# Check that allocator.allocate was called with proper arguments.
assert runner._allocator.allocate.call_count == 1
(_, tasks_data) = runner._allocator.allocate.mock_calls[0][1]
assert_subdict(tasks_data[t1.task_id].allocations, _os_tasks_allocations)
# Check allocation metrics ...
got_allocations_metrics = runner._allocations_storage.store.call_args[0][0]
# ... generic allocation metrics ...
assert_metric(got_allocations_metrics,
'allocations_count',
dict(extra_labels='extra_value'),
expected_metric_value=1)
assert_metric(got_allocations_metrics,
'allocations_errors',
dict(extra_labels='extra_value'),
expected_metric_value=0)
assert_metric(got_allocations_metrics,
'allocation_duration',
dict(extra_labels='extra_value'))
# ... and allocation metrics for task t1.
assert_metric(got_allocations_metrics,
'allocation_cpu_quota',
dict(task=t1.task_id,
extra_labels='extra_value'), 0.5)
assert_metric(got_allocations_metrics,
'allocation_rdt_l3_cache_ways',
dict(task=t1.task_id, extra_labels='extra_value'), 4)
assert_metric(got_allocations_metrics,
'allocation_rdt_l3_mask',
dict(task=t1.task_id, extra_labels='extra_value'), 15)
############################
# Second run (two tasks, one allocation)
runner._measurement_runner._node.get_tasks.return_value = [t1, t2]
first_run_t1_task_allocations = {
t1.task_id: {AllocationType.QUOTA: .5,
AllocationType.RDT: RDTAllocation(name=None, l3='L3:0=0000f')}
}
runner._allocator.allocate.return_value = (first_run_t1_task_allocations, [], [])
runner._measurement_runner._iterate()
# Check allocation metrics...
got_allocations_metrics = runner._allocations_storage.store.call_args[0][0]
# ... generic allocation metrics ...
assert_metric(got_allocations_metrics, 'allocations_count', expected_metric_value=2)
assert_metric(got_allocations_metrics, 'allocations_errors', expected_metric_value=0)
assert_metric(got_allocations_metrics, 'allocation_duration')
# ... and metrics for task t1 ...
assert_metric(got_allocations_metrics, 'allocation_cpu_quota', dict(task=t1.task_id), 0.5)
assert_metric(got_allocations_metrics, 'allocation_rdt_l3_cache_ways', dict(task=t1.task_id), 4)
assert_metric(got_allocations_metrics, 'allocation_rdt_l3_mask', dict(task=t1.task_id), 15)
# Check allocate call.
(_, tasks_data) = runner._allocator.allocate.mock_calls[1][1]
# (note: tasks_allocations are always read from filesystem)
assert_subdict(tasks_data[t1.task_id].allocations, _os_tasks_allocations)
assert_subdict(tasks_data[t2.task_id].allocations, _os_tasks_allocations)
############
# Third run (two tasks, two allocations) - modify L3 cache and put in the same group
runner._measurement_runner._node.get_tasks.return_value = [t1, t2]
runner._allocator.allocate.return_value = \
{
t1.task_id: {
AllocationType.QUOTA: 0.7,
AllocationType.RDT: RDTAllocation(name='one_group', l3='L3:0=00fff')
},
t2.task_id: {
AllocationType.QUOTA: 0.8,
AllocationType.RDT: RDTAllocation(name='one_group', l3='L3:0=00fff')
}
}, [], []
runner._measurement_runner._iterate()
got_allocations_metrics = runner._allocations_storage.store.call_args[0][0]
assert_metric(got_allocations_metrics, 'allocations_count', expected_metric_value=4)
# ... and metrics for task t1 ...
assert_metric(got_allocations_metrics, 'allocation_cpu_quota', dict(task=t1.task_id), 0.7)
assert_metric(got_allocations_metrics, 'allocation_cpu_quota', dict(task=t2.task_id), 0.8)
assert_metric(got_allocations_metrics, 'allocation_rdt_l3_cache_ways',
dict(task=t1.task_id, group_name='one_group'), 12) # 00fff=12
assert_metric(got_allocations_metrics, 'allocation_rdt_l3_cache_ways',
dict(task=t1.task_id, group_name='one_group'), 12) # 00fff=12
| 1.710938 | 2 |
crudlib/__init__.py | zxyle/TinyCRUD | 2 | 12762282 | from .mysql import MySQL
from .sqlite import SQLite
from .mariadb import MariaDB
| 1.085938 | 1 |
nl2type/nl2type.py | rs-malik/nl2type | 1 | 12762283 | import argparse
import json
from gensim.models import Word2Vec
from tensorflow_core.python.keras.models import load_model
import convert
import extract
import predict
import vectorize
from annotation import annotate
def main(input_file: str, output_file: str):
extracted_jsdoc = extract.extract_from_file(input_file)
df = convert.convert_func_to_df(extracted_jsdoc)
word2vec_code = Word2Vec.load('data/word_vecs/word2vec_model_code.bin')
word2vec_lang = Word2Vec.load('data/word_vecs/word2vec_model_language.bin')
vectors = vectorize.df_to_vec(df, word2vec_lang, word2vec_code)
model = load_model('data/model.h5')
with open("data/types.json") as f:
types_map = json.load(f)
predictions = predict.predict(model, vectors, types_map)
annotate.annotate(df, predictions, input_file, output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input_file_path", type=str, help="Path of the input file")
parser.add_argument("output_file_path", type=str, help="Path of the output file")
args = parser.parse_args()
main(args.input_file_path, args.output_file_path)
| 2.75 | 3 |
June 2021/Reverse Linked List II.py | parikshitgupta1/leetcode | 0 | 12762284 | <reponame>parikshitgupta1/leetcode
class Solution:
def reverseBetween(self, head: ListNode,
left: int, right: int) -> ListNode:
# Base case scenario
if left == right:
return head
node = ptr = ListNode() # Dummy node before actual linked list
node.next = head
# First traverse to node before reversing starts
for _ in range(1, left):
ptr = ptr.next
# Start reversing from next node using three pointer approach
current_node = ptr.next
while left < right:
temp_node = current_node.next
current_node.next = temp_node.next
temp_node.next = ptr.next
ptr.next = temp_node
left += 1
return node.next
| 3.859375 | 4 |
adjutant/api/utils.py | knikolla/adjutant | 21 | 12762285 | <gh_stars>10-100
# Copyright (C) 2015 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import time
import sys
from decorator import decorator
from rest_framework.response import Response
def require_roles(roles, func, *args, **kwargs):
"""
endpoints setup with this decorator require the defined roles.
"""
request = args[1]
req_roles = set(roles)
if not request.keystone_user.get("authenticated", False):
return Response({"errors": ["Credentials incorrect or none given."]}, 401)
roles = set(request.keystone_user.get("roles", []))
if roles & req_roles:
return func(*args, **kwargs)
return Response(
{"errors": ["Must have one of the following roles: %s" % list(req_roles)]}, 403
)
@decorator
def mod_or_admin(func, *args, **kwargs):
"""
Require project_mod or project_admin.
Admin is allowed everything, so is also included.
"""
return require_roles(
{"project_admin", "project_mod", "admin"}, func, *args, **kwargs
)
@decorator
def project_admin(func, *args, **kwargs):
"""
endpoints setup with this decorator require the admin/project admin role.
"""
return require_roles({"project_admin", "admin"}, func, *args, **kwargs)
@decorator
def admin(func, *args, **kwargs):
"""
endpoints setup with this decorator require the admin role.
"""
return require_roles({"admin"}, func, *args, **kwargs)
@decorator
def authenticated(func, *args, **kwargs):
"""
endpoints setup with this decorator require the user to be signed in
"""
request = args[1]
if not request.keystone_user.get("authenticated", False):
return Response({"errors": ["Credentials incorrect or none given."]}, 401)
return func(*args, **kwargs)
@decorator
def minimal_duration(func, min_time=1, *args, **kwargs):
"""
Make a function (or API call) take at least some time.
"""
# doesn't apply during tests
if "test" in sys.argv:
return func(*args, **kwargs)
start = datetime.utcnow()
return_val = func(*args, **kwargs)
end = datetime.utcnow()
duration = end - start
if duration.total_seconds() < min_time:
time.sleep(min_time - duration.total_seconds())
return return_val
| 1.960938 | 2 |
tools/evo-plot/evo/core/trajectory.py | jiexuan/evaluation_tools | 12 | 12762286 | <reponame>jiexuan/evaluation_tools
# -*- coding: UTF8 -*-
"""
some functions for trajectories
author: <NAME>
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import logging
import numpy as np
import evo.core.transformations as tr
import evo.core.geometry as geometry
from evo.core import lie_algebra as lie
logger = logging.getLogger(__name__)
class TrajectoryException(Exception):
pass
class PosePath3D(object):
"""
just a path, no temporal information
also: base class for real trajectory
"""
def __init__(self, positions_xyz=None, orientations_quat_wxyz=None, poses_se3=None, meta=None):
"""
:param positions_xyz: nx3 list of x,y,z positions
:param orientations_quat_wxyz: nx4 list of quaternions (w,x,y,z format)
:param poses_se3: list of SE(3) poses
:param meta: optional metadata
"""
if (positions_xyz is None or orientations_quat_wxyz is None) and poses_se3 is None:
raise TrajectoryException("must provide at least positions_xyz "
"& orientations_quat_wxyz or poses_se3")
if positions_xyz is not None:
self._positions_xyz = np.array(positions_xyz)
if orientations_quat_wxyz is not None:
self._orientations_quat_wxyz = np.array(orientations_quat_wxyz)
if poses_se3 is not None:
self._poses_se3 = poses_se3
self.meta={} if meta is None else meta
def __str__(self):
return "{} poses, {:.3f}m path length".format(self.num_poses, self.path_length())
def __eq__(self, other):
if type(other) != type(self):
return False
equal = True
equal &= all([np.array_equal(p1, p2) for p1, p2 in zip(self.poses_se3, other.poses_se3)])
equal &= np.array_equal(self.orientations_quat_wxyz, other.orientations_quat_wxyz)
equal &= np.array_equal(self.positions_xyz, other.positions_xyz)
return equal
def __ne__(self, other):
return not self == other
@property
def positions_xyz(self):
if not hasattr(self, "_positions_xyz"):
assert hasattr(self, "_poses_se3")
self._positions_xyz = np.array([p[:3, 3] for p in self._poses_se3])
return self._positions_xyz
@property
def orientations_quat_wxyz(self):
if not hasattr(self, "_orientations_quat_wxyz"):
assert hasattr(self, "_poses_se3")
self._orientations_quat_wxyz \
= np.array([tr.quaternion_from_matrix(p) for p in self._poses_se3])
return self._orientations_quat_wxyz
@property
def orientations_euler(self):
if not hasattr(self, "_orientations_euler"):
if hasattr(self, "_poses_se3"):
self._orientations_euler \
= np.array([tr.euler_from_matrix(p, axes="sxyz") for p in self._poses_se3])
elif hasattr(self, "_orientations_quat_wxyz"):
self._orientations_euler \
= np.array([tr.euler_from_quaternion(q, axes="sxyz")
for q in self._orientations_quat_wxyz])
return self._orientations_euler
@property
def poses_se3(self):
if not hasattr(self, "_poses_se3"):
assert hasattr(self, "_positions_xyz")
assert hasattr(self, "_orientations_quat_wxyz")
self._poses_se3 \
= xyz_quat_wxyz_to_se3_poses(self.positions_xyz, self.orientations_quat_wxyz)
return self._poses_se3
@property
def num_poses(self):
if hasattr(self, "_poses_se3"):
return len(self._poses_se3)
else:
return self.positions_xyz.shape[0]
def path_length(self, ids=None):
"""
calculates the path length (arc-length)
:param ids: optional start and end index as tuple (start, end)
:return: path length in meters
"""
if ids is not None:
if len(ids) != 2 or not all(type(i) is int for i in ids):
raise TrajectoryException("ids must be a tuple of positive integers")
return float(geometry.arc_len(self.positions_xyz[ids[0]:ids[1]]))
else:
return float(geometry.arc_len(self.positions_xyz))
def transform(self, t, right_mul=False):
"""
apply a left or right multiplicative SE(3) transformation to the whole path
:param t: a valid SE(3) matrix
:param right_mul: whether to apply it right-multiplicative or not
"""
if not lie.is_se3(t):
raise TrajectoryException("transformation is not a valid SE(3) matrix")
if right_mul:
self._poses_se3 = [np.dot(p, t) for p in self.poses_se3]
else:
self._poses_se3 = [np.dot(t, p) for p in self.poses_se3]
self._positions_xyz, self._orientations_quat_wxyz \
= se3_poses_to_xyz_quat_wxyz(self.poses_se3)
def scale(self, s):
"""
apply a scaling to the whole path
:param s: scale factor
"""
if hasattr(self, "_poses_se3"):
self._poses_se3 = [lie.se3(p[:3, :3], s*p[:3, 3]) for p in self._poses_se3]
if hasattr(self, "_positions_xyz"):
self._positions_xyz = s * self._positions_xyz
def reduce_to_ids(self, ids):
"""
reduce the elements to the ones specified in ids
:param ids: list of integer indices
"""
if hasattr(self, "_positions_xyz"):
self._positions_xyz = self._positions_xyz[ids]
if hasattr(self, "_orientations_quat_wxyz"):
self._orientations_quat_wxyz = self._orientations_quat_wxyz[ids]
if hasattr(self, "_poses_se3"):
self._poses_se3 = [self._poses_se3[idx] for idx in ids]
def check(self):
"""
checks if the data is valid
:return: True/False, dictionary with some detailed infos
"""
same_len = self.positions_xyz.shape[0] \
== self.orientations_quat_wxyz.shape[0] \
== len(self.poses_se3)
se3_valid = all([lie.is_se3(p) for p in self.poses_se3])
norms = np.linalg.norm(self.orientations_quat_wxyz, axis=1)
quat_normed = np.allclose(norms, np.ones(norms.shape))
valid = same_len and se3_valid and quat_normed
details = {
"array shapes": "ok" if same_len else "invalid (lists must have same length)",
"SE(3) conform": "yes" if se3_valid else "no (poses are not valid SE(3) matrices)",
"quaternions": "ok" if quat_normed else "invalid (must be unit quaternions)"
}
return valid, details
def get_infos(self):
"""
:return: dictionary with some infos about the path
"""
return {
"nr. of poses": self.num_poses,
"path length (m)": self.path_length(),
"pos_start (m)": self.positions_xyz[0],
"pos_end (m)": self.positions_xyz[-1]
}
def get_statistics(self):
return {} # no idea yet
class PoseTrajectory3D(PosePath3D, object):
"""
a PosePath with temporal information
"""
def __init__(self, positions_xyz=None, orientations_quat_wxyz=None,
timestamps=None, poses_se3=None, meta=None):
"""
:param timestamps: optional nx1 list of timestamps
"""
super(PoseTrajectory3D, self).__init__(positions_xyz, orientations_quat_wxyz,
poses_se3, meta)
# this is a bit ugly...
if timestamps is None:
raise TrajectoryException("no timestamps provided")
self.timestamps = np.array(timestamps)
def __str__(self):
s = super(PoseTrajectory3D, self).__str__()
return s + ", {:.3f}s duration".format(self.timestamps[-1] - self.timestamps[0])
def __eq__(self, other):
if type(other) != type(self):
return False
equal = super(PoseTrajectory3D, self).__eq__(other)
equal &= np.array_equal(self.timestamps, other.timestamps)
return equal
def __ne__(self, other):
return not self == other
def reduce_to_ids(self, ids):
super(PoseTrajectory3D, self).reduce_to_ids(ids)
self.timestamps = self.timestamps[ids]
def check(self):
valid, details = super(PoseTrajectory3D, self).check()
len_stamps_valid = (len(self.timestamps) == len(self.positions_xyz))
valid &= len_stamps_valid
details["nr. of stamps"] = "ok" if len_stamps_valid else "wrong"
stamps_ascending = np.alltrue(np.sort(self.timestamps) == self.timestamps)
stamps_ascending &= np.unique(self.timestamps).size == len(self.timestamps)
valid &= stamps_ascending
details["timestamps"] = "ok" if stamps_ascending else "wrong, not ascending or duplicates"
return valid, details
def get_infos(self):
"""
:return: dictionary with some infos about the trajectory
"""
infos = super(PoseTrajectory3D, self).get_infos()
infos["duration (s)"] = self.timestamps[-1] - self.timestamps[0]
infos["t_start (s)"] = self.timestamps[0]
infos["t_end (s)"] = self.timestamps[-1]
return infos
def get_statistics(self):
"""
:return: dictionary with some statistics of the trajectory
"""
stats = super(PoseTrajectory3D, self).get_statistics()
speeds = [calc_speed(self.positions_xyz[i], self.positions_xyz[i + 1],
self.timestamps[i], self.timestamps[i + 1])
for i in range(len(self.positions_xyz) - 1)]
vmax = max(speeds)
vmin = min(speeds)
vmean = np.mean(speeds)
stats.update({
"v_max (m/s)": vmax,
"v_min (m/s)": vmin,
"v_avg (m/s)": vmean,
"v_max (km/h)": vmax * 3.6,
"v_min (km/h)": vmin * 3.6,
"v_avg (km/h)": vmean * 3.6
})
return stats
class Trajectory(PoseTrajectory3D):
pass # TODO compat
def calc_speed(xyz_1, xyz_2, t_1, t_2):
"""
:param xyz_1: position at timestamp 1
:param xyz_2: position at timestamp 2
:param t_1: timestamp 1
:param t_2: timestamp 2
:return: speed in m/s
"""
if (t_2 - t_1) <= 0:
raise TrajectoryException("bad timestamps: " + str(t_1) + " & " + str(t_2))
return np.linalg.norm(xyz_2 - xyz_1) / (t_2 - t_1)
def calc_angular_speed(p_1, p_2, t_1, t_2, degrees=False):
"""
:param p_1: pose at timestamp 1
:param p_2: pose at timestamp 2
:param t_1: timestamp 1
:param t_2: timestamp 2
:param degrees: set to True to return deg/s
:return: speed in rad/s
"""
if (t_2 - t_1) <= 0:
raise TrajectoryException("bad timestamps: " + str(t_1) + " & " + str(t_2))
if degrees:
angle_1 = lie.so3_log(p_1[:3, :3]) * 180 / np.pi
angle_2 = lie.so3_log(p_2[:3, :3]) * 180 / np.pi
else:
angle_1 = lie.so3_log(p_1[:3, :3])
angle_2 = lie.so3_log(p_2[:3, :3])
return (angle_2 - angle_1) / (t_2 - t_1)
def xyz_quat_wxyz_to_se3_poses(xyz, quat):
poses = [lie.se3(lie.so3_from_se3(tr.quaternion_matrix(quat)), xyz)
for quat, xyz in zip(quat, xyz)]
return poses
def se3_poses_to_xyz_quat_wxyz(poses):
xyz = np.array([pose[:3, 3] for pose in poses])
quat_wxyz = np.array([tr.quaternion_from_matrix(pose) for pose in poses])
return xyz, quat_wxyz
def align_trajectory(traj, traj_ref, correct_scale=False, correct_only_scale=False, n=-1):
"""
align a trajectory to a reference using Umeyama alignment
:param traj: the trajectory to align
:param traj_ref: reference trajectory
:param correct_scale: set to True to adjust also the scale
:param correct_only_scale: set to True to correct the scale, but not the pose
:param n: the number of poses to use, counted from the start (default: all)
:return: the aligned trajectory
"""
traj_aligned = copy.deepcopy(traj) # otherwise np arrays will be references and mess up stuff
with_scale = correct_scale or correct_only_scale
if correct_only_scale:
logger.debug("Correcting scale...")
else:
logger.debug("Aligning using Umeyama's method..."
+ (" (with scale correction)" if with_scale else ""))
if n == -1:
r_a, t_a, s = geometry.umeyama_alignment(traj_aligned.positions_xyz.T,
traj_ref.positions_xyz.T, with_scale)
else:
r_a, t_a, s = geometry.umeyama_alignment(traj_aligned.positions_xyz[:n, :].T,
traj_ref.positions_xyz[:n, :].T, with_scale)
if not correct_only_scale:
logger.debug("Rotation of alignment:\n{}"
"\nTranslation of alignment:\n{}".format(r_a, t_a))
logger.debug("Scale correction: {}".format(s))
if correct_only_scale:
traj_aligned.scale(s)
elif correct_scale:
traj_aligned.scale(s)
traj_aligned.transform(lie.se3(r_a, t_a))
else:
traj_aligned.transform(lie.se3(r_a, t_a))
return traj_aligned
| 2.03125 | 2 |
lofka-python-utils/test.py | zigeertech/lofka | 73 | 12762287 | # coding=utf8
"""
方便调试使用
"""
from lofka import LofkaHandler,LofkaAsyncHandler
import logging
import traceback
handler = LofkaAsyncHandler()
logger = logging.getLogger('test')
logger.addHandler(handler)
def __debug_method():
try:
raise Exception("TestException")
except Exception as ex:
traceback.format_exc()
logger.exception("ErrorTitle")
if __name__ == "__main__":
__debug_method()
| 2.59375 | 3 |
Aula 07/ex05.py | rafa-santana/Curso-Python | 1 | 12762288 | n = int(input('Digite um número e descubra o seu sucessor e antecesor: '))
print ('O seu sucessor é igual a {} e o antecessor é igual a {}'.format(n+1,n-1)) | 4.15625 | 4 |
main.py | AhmadWajid/Ebay-Bot | 2 | 12762289 | from bs4 import BeautifulSoup
import requests
#from webdriver import keep_alive
import discord
import time
from discord.ext import commands
bot = commands.Bot(command_prefix='!')
bot.remove_command("help")
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name=""))
print(f'Logged in as {bot.user.name}')
@commands.command(name="ebay")
async def ebay(ctx):
message = ctx.message.content.split('!ebay ')
url = message[1]
pname = str(url)
url = url.replace(' ', '+')
url ='https://www.ebay.com/sch/i.html?_from=R40&_nkw={}&_sacat=0&_ipg=200'.format(url)
print(url)
pname=str(pname)
embeder = discord.Embed(title='Processing...', description='Looking for `{}`.\n\nThis may take some time...'.format(pname), color=0x00bfff)
sendd1 = await ctx.send(embed=embeder)
x = requests.get(url)
time.sleep(2)
embeder = discord.Embed(title='Processing...', description='Looking for `{}`.\n\nThis may take some time...\n\n`Scraping Prices...`'.format(pname), color=0x00bfff)
embeder.set_thumbnail(url='https://cdn0.iconfinder.com/data/icons/big-file-flat/32/02_Cloud_Computing_computer_internet_file_data-512.png')
sendd = await sendd1.edit(embed=embeder)
soup = BeautifulSoup(x.content, 'html.parser')
together = []
allitems = soup.find_all('span', class_='s-item__price')
for price in allitems:
price = price.text
price = price.replace('$', '')
price = price.replace(',', '')
if 'to' in price:
pass
else:
price = int(float(price))
together.append(price)
embeder = discord.Embed(title='Processing...', description='Looking for `{}`.\n\nThis may take some time...\n\n`Doing math...`'.format(pname), color=0x00bfff)
embeder.set_thumbnail(url='https://www.pngmart.com/files/7/Calculator-PNG-Picture.png')
new = await sendd1.edit(embed=embeder)
time.sleep(4)
items = len(together)
total = sum(together)
average = total/items
average = round(average, 2)
average = str(average)
items = str(items)
embeder = discord.Embed(title='Done', description='Here is what I found.\n\nItem : `{}`\n\nAverage Price : `${}`\n\nTotal number of items counted : `{}`'.format(pname,average,items), color=0x00bfff)
embeder.set_thumbnail(url='https://assets.stickpng.com/thumbs/5aa78e207603fc558cffbf19.png')
sendd = await sendd1.edit(embed=embeder)
bot.add_command(ebay)
#keep_alive()
bot.run('bot-token') | 2.765625 | 3 |
octavious/parallelizer/celery.py | metglobal/octavious | 1 | 12762290 | <filename>octavious/parallelizer/celery.py
from __future__ import absolute_import
import celery
from octavious.parallelizer import Parallelizer
@celery.task('parallelizer_task')
def parallelizer_task(processor, input, callback):
output = processor(input)
if callback:
callback(output)
return output
class CeleryParallelizer(Parallelizer):
"""This class is basic implementation for parallelizing processors using
a messaging queue server through celery libraries.
"""
def parallelize(self, processors, input=None, callback=None):
"""Convenient ``Parallelizer.parallelize`` implementation to establish
concurrency using celery tasks
"""
subtasks = []
for processor in processors:
subtasks.append(parallelizer_task.s(processor, input, callback))
async_result = celery.group(subtasks).apply_async()
return async_result.get()
parallelizer = CeleryParallelizer
| 2.796875 | 3 |
testdocker/cli/__init__.py | sip-li/testdocker | 0 | 12762291 | <reponame>sip-li/testdocker
"""
testdocker.cli
~~~~~~~~~~~~~~
CLI interface package for testdocker.
:copyright: (c) 2017 by <NAME>.
:license: Apache2.
"""
from . import main
| 1.015625 | 1 |
nni/common/nas_utils.py | dutxubo/nni | 9,680 | 12762292 | <gh_stars>1000+
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import functools
import logging
from .. import trial
_logger = logging.getLogger(__name__)
_MUTABLE_LAYER_SPACE_PREFIX = "_mutable_layer"
_namespace = {}
_tf_variables = {}
_arch_logits_list = []
_optimizer = None
_train_op = None
def classic_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size):
'''Execute the chosen function and inputs directly.
In this mode, the trial code is only running the chosen subgraph (i.e., the chosen ops and inputs),
without touching the full model graph.'''
if trial.get_current_parameter() is None:
trial.get_next_parameter()
chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id,
list(optional_inputs.keys()))
real_chosen_inputs = [optional_inputs[input_name] for input_name in chosen_inputs]
layer_out = funcs[chosen_layer]([fixed_inputs, real_chosen_inputs], **funcs_args[chosen_layer])
return layer_out
def enas_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
'''For enas mode, we build the full model graph in trial but only run a subgraph。
This is implemented by masking inputs and branching ops.
Specifically, based on the received subgraph (through nni.get_next_parameter),
it can be known which inputs should be masked and which op should be executed.'''
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# store namespace
_namespace[mutable_id] = True
_namespace[name_prefix] = dict()
_namespace[name_prefix]['funcs'] = list(funcs)
_namespace[name_prefix]['optional_inputs'] = list(optional_inputs)
# create tensorflow variables as 1/0 signals used to form subgraph
name_for_optional_inputs = name_prefix + '_optional_inputs'
name_for_funcs = name_prefix + '_funcs'
_tf_variables[name_prefix] = dict()
_tf_variables[name_prefix]['optional_inputs'] = tf.get_variable(
name_for_optional_inputs,
[len(optional_inputs)],
dtype=tf.bool,
trainable=False
)
_tf_variables[name_prefix]['funcs'] = tf.get_variable(
name_for_funcs, [], dtype=tf.int64, trainable=False)
# get real values using their variable names
real_optional_inputs_value = [optional_inputs[name]
for name in _namespace[name_prefix]['optional_inputs']]
real_func_value = [funcs[name]
for name in _namespace[name_prefix]['funcs']]
real_funcs_args = [funcs_args[name]
for name in _namespace[name_prefix]['funcs']]
# build tensorflow graph of geting chosen inputs by masking
real_chosen_inputs = tf.boolean_mask(
real_optional_inputs_value, _tf_variables[name_prefix]['optional_inputs'])
# build tensorflow graph of different branches by using tf.case
branches = dict()
func_output = None
for func_id in range(len(funcs)):
func_output = real_func_value[func_id]([fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id])
branches[tf.equal(_tf_variables[name_prefix]['funcs'], func_id)] = lambda: func_output
layer_out = tf.case(branches, exclusive=True, default=lambda: func_output)
return layer_out
def oneshot_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
'''Similar to enas mode, oneshot mode also builds the full model graph.
The difference is that oneshot mode does not receive subgraph.
Instead, it uses dropout to randomly dropout inputs and ops.'''
# NNI requires to get_next_parameter before report a result. But the parameter will not be used in this mode
if trial.get_current_parameter() is None:
trial.get_next_parameter()
optional_inputs = list(optional_inputs.values())
inputs_num = len(optional_inputs)
# Calculate dropout rate according to the formular r^(1/k), where r is a hyper-parameter and k is the number of inputs
if inputs_num > 0:
rate = 0.01 ** (1 / inputs_num)
noise_shape = [inputs_num] + [1] * len(optional_inputs[0].get_shape())
optional_inputs = tf.nn.dropout(
optional_inputs, rate=rate, noise_shape=noise_shape)
optional_inputs = [optional_inputs[idx] for idx in range(inputs_num)]
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
output_num = len(layer_outs)
rate = 0.01 ** (1 / output_num)
noise_shape = [output_num] + [1] * len(layer_outs[0].get_shape())
layer_outs = tf.nn.dropout(layer_outs, rate=rate, noise_shape=noise_shape)
layer_out = tf.reduce_sum(layer_outs, axis=0)
return layer_out
def darts_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
optional_inputs = list(optional_inputs.values())
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
# Create architecture weights for every func(op)
var_name = "{}_{}_arch_weights".format(mutable_id, mutable_layer_id)
arch_logits = tf.get_variable(var_name, shape=[len(funcs)], trainable=False)
_arch_logits_list.append(arch_logits)
arch_weights = tf.nn.softmax(arch_logits)
layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)])
return layer_out
def reload_tensorflow_variables(tf, session):
'''In Enas mode, this function reload every signal varaible created in `enas_mode` function so
the whole tensorflow graph will be changed into certain subgraph recerived from Tuner.
---------------
session: the tensorflow session created by users
tf: tensorflow module
'''
subgraph_from_tuner = trial.get_next_parameter()
mutable_layers = set()
for subgraph_key in subgraph_from_tuner:
if "/" in subgraph_key:
# has to remove the last, could be layer_choice or whatever
mutable_id, mutable_layer_id = _decompose_general_key(subgraph_key[:subgraph_key.rfind("/")])
if mutable_id is not None:
mutable_layers.add((mutable_id, mutable_layer_id))
mutable_layers = sorted(list(mutable_layers))
for mutable_id, mutable_layer_id in mutable_layers:
if mutable_id not in _namespace:
_logger.warning("%s not found in name space", mutable_id)
continue
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# get optional inputs names
optional_inputs = _namespace[name_prefix]['optional_inputs']
# extract layer information from the subgraph sampled by tuner
chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs)
chosen_layer = _namespace[name_prefix]['funcs'].index(chosen_layer)
chosen_inputs = [1 if inp in chosen_inputs else 0 for inp in optional_inputs]
# load these information into pre-defined tensorflow variables
_tf_variables[name_prefix]['funcs'].load(chosen_layer, session)
_tf_variables[name_prefix]['optional_inputs'].load(
chosen_inputs, session)
def _construct_general_key(mutable_id, mutable_layer_id):
# Mutable layer key in a general (search space) format
# that is, prefix/mutable_id/mutable_layer_id
return _MUTABLE_LAYER_SPACE_PREFIX + "/" + mutable_id + "/" + mutable_layer_id
def _decompose_general_key(key):
# inverse operation of above
if not key.startswith(_MUTABLE_LAYER_SPACE_PREFIX):
return None, None
else:
_, mutable_id, mutable_layer_id = key.split("/", maxsplit=2)
return mutable_id, mutable_layer_id
def darts_training(tf, session, loss, feed_dict):
global _optimizer, _train_op
if _optimizer is None:
_optimizer = tf.MomentumOptimizer(learning_rate=0.025)
# TODO: Calculate loss
grads_and_vars = _optimizer.compute_gradients(loss, _arch_logits_list)
_train_op = _optimizer.apply_gradients(grads_and_vars)
session.run(_train_op)
def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None):
if nas_mode == 'darts_mode':
darts_training(tf, session, loss, feed_dict)
elif nas_mode == 'enas_mode':
reload_tensorflow_variables(tf, session)
def _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs):
# optional_inputs should be name(key)s of the optional inputs
try:
mutable_block = trial.get_current_parameter(mutable_id)
# There is a NAS tuner
chosen_layer = mutable_block[mutable_layer_id]["chosen_layer"]
chosen_inputs = mutable_block[mutable_layer_id]["chosen_inputs"]
except KeyError:
# Try to find converted NAS parameters
params = trial.get_current_parameter()
expected_prefix = _construct_general_key(mutable_id, mutable_layer_id)
chosen_layer = params[expected_prefix + "/layer_choice"]
# find how many to choose
optional_input_size = int(params[expected_prefix + "/optional_input_size"]) # convert uniform to randint
# find who to choose, can duplicate
optional_input_state = params[expected_prefix + "/optional_input_chosen_state"]
chosen_inputs = []
# make sure dict -> list produce stable result by sorting
optional_inputs_keys = sorted(optional_inputs)
for _ in range(optional_input_size):
chosen_inputs.append(optional_inputs_keys[optional_input_state % len(optional_inputs)])
optional_input_state //= len(optional_inputs)
_logger.info("%s_%s: layer: %s, optional inputs: %s", mutable_id, mutable_layer_id, chosen_layer, chosen_inputs)
return chosen_layer, chosen_inputs
def convert_nas_search_space(search_space):
"""
Args:
param search_space: raw search space
return: the new search space, mutable_layers will be converted into choice
"""
if not isinstance(search_space, dict):
return search_space
ret = dict()
for k, v in search_space.items():
if "_type" not in v:
# this should not happen
_logger.warning("There is no _type in one of your search space values with key '%s'"
". Please check your search space", k)
ret[k] = v
elif v["_type"] != "mutable_layer":
ret[k] = v
else:
_logger.info("Converting mutable_layer search space with key '%s'", k)
# v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...}
values = v["_value"]
for layer_name, layer_data in values.items():
# there should be at most layer_choice, optional_inputs, optional_input_size in layer_data
# add "_mutable_layer" as prefix so that they can be recovered later
layer_key = _construct_general_key(k, layer_name)
if layer_data.get("layer_choice"): # filter out empty choice and no choice
layer_choice = layer_data["layer_choice"]
else:
raise ValueError("No layer choice found in %s" % layer_key)
if layer_data.get("optional_input_size"):
input_size = layer_data["optional_input_size"]
if isinstance(input_size, int):
input_size = [input_size, input_size]
if input_size[0] > input_size[1] or input_size[0] < 0:
_logger.error("Might not be able to handle optional_input_size < 0, please double check")
input_size[1] += 1
else:
_logger.info("Optional input choices are set to empty by default in %s", layer_key)
input_size = [0, 1]
if layer_data.get("optional_inputs"):
total_state_size = len(layer_data["optional_inputs"]) ** (input_size[1] - 1)
else:
_logger.info("Optional inputs not found in %s", layer_key)
total_state_size = 1
converted = {
layer_key + "/layer_choice": {
"_type": "choice", "_value": layer_choice
},
layer_key + "/optional_input_size": {
"_type": "randint", "_value": input_size
},
layer_key + "/optional_input_chosen_state": {
"_type": "randint", "_value": [0, total_state_size]
}
}
_logger.info(converted)
ret.update(converted)
return ret
def rewrite_nas_space(func):
@functools.wraps(func)
def wrap(self, search_space):
search_space = convert_nas_search_space(search_space)
return func(self, search_space)
return wrap
| 2.140625 | 2 |
py_learning/python_demo/30/func_test4.py | flylei009/python_learning | 1 | 12762293 | import time
# print( time.time())
def timmer(func):
def wrapper():
start_time = time.time()
func()
stop_time = time.time()
print("运行时间是 %s 秒 " % (stop_time - start_time))
return wrapper
@timmer
def i_can_sleep():
time.sleep(3)
# start_time = time.time()
i_can_sleep()
# stop_time = time.time()
# print('函数运行了 %s 秒' %(stop_time-start_time))
| 3.234375 | 3 |
PYCV2IP/cv2IPApp.py | TsungYen0412/Visual_Sensing_Technology_Homework | 0 | 12762294 | #!/usr/bin/python3
import cv2
import cv2IP
if __name__ == '__main__':
IP = cv2IP.BaseIP()
img = IP.ImRead("img/test.jpg")
IP.ImWindow("foreGround")
IP.ImShow("foreGround", img)
cv2.waitKey(0)
del IP
| 2.328125 | 2 |
model.py | jrzech/cxr-generalize | 3 | 12762295 | from __future__ import print_function, division
import torch
import os
import pandas as pd
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import sklearn
import sklearn.metrics as sklm
import csv
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import pickle
import random
from shutil import copyfile
from shutil import rmtree
import torchvision
use_gpu = torch.cuda.is_available()
print("We see GPU:")
print(use_gpu)
print("Let's use", torch.cuda.device_count(), "GPUs!")
from PIL import Image
import CXRDataset as CXR
import Eval as E
from importlib import reload
reload(CXR)
reload(E)
def checkpoint(model_ft, best_acc, best_loss, epoch,PRED_LABEL,LR,RESULT_PATH):
"""
save checkpoint
args:
model_ft: torchvision model
best_acc: best accuracy achieved so far in training
best_loss: best loss achieved so far in training
epoch: last epoch of training
PRED_LABEL: what we're predicting; expect format ["Pneumonia"] or ["Pneumonia","Opacity"]... etc
LR: learning rate
RESULT_PATH: path to save this to
returns:
nothing (saves file)
"""
# Save checkpoint.
print('Saving..')
state = {
'model_ft': model_ft,
'best_acc': best_acc,
'best_loss': best_loss,
'epoch': epoch,
'rng_state': torch.get_rng_state(),
'LR':LR
}
torch.save(state, RESULT_PATH+'checkpoint_'+PRED_LABEL)
def train_model(model, criterion, optimizer, LR, num_epochs=5,dataloaders="x",dataset_sizes="x", PRED_LABEL="x", start_epoch=1,MULTILABEL=True,FOLD_OVERRIDE="",TRAIN_FILTER="",RESULT_PATH="results/",MULTICLASS=False):
"""
performs torchvision model training
args:
model: model to fine tune
criterion: pytorch optimization criteria
optimizer: pytorch optimizer
LR: learning rate
num_epochs: stop after this many epochs
dataloaders: torchvision dataloader
dataset_sizes: length of train/val datasets
PRED_LABEL: targets we're predicting in list format ["PNA","Opacity"] etc
start_epoch: in case of loading saved model; not currently used
MULTILABEL: should be removed - always True - everything is trained using multilabel list format now even single labels ["Pneumonia"]
FOLD_OVERRIDE: columns of scalars with train/val/test split
TRAIN_FILTER: list of data we're training on, used for labeling results
RESULT_PATH= path at which resutls are saved, recommend leaving default to use with other scripts
MULTICLASS: if training on single multiclass n>2 target; currently only implemented for single multiclass target.
returns:
model: trained torchvision model
best_epoch: epoch on which best model was achieved
"""
since = time.time()
best_acc = 0.0
best_loss=999999
best_epoch=-1
last_train_acc=-1
last_train_loss=-1
for epoch in range(start_epoch,num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
#small_data flag used to decide on how to decay
small_data=False
if dataset_sizes['train']<=10000: small_data=True
iter_at_lr=0
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
i=0
total_done=0
for data in dataloaders[phase]:
i+=1
# get the inputs
inputs, labels = data
batch_size= inputs.shape[0]
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
#needed for multilabel training which uses different loss and expects floats
if not MULTICLASS:
labels = labels.float()
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
if MULTICLASS: # need to fix this for multilabel
running_corrects += torch.sum(preds == labels.long().data)
running_loss += loss.data[0]*batch_size
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
if phase=='train':
last_train_acc=epoch_acc
last_train_loss=epoch_loss
print(phase+' epoch {}:loss {:.4f} acc: {:.4f} with data size {}'.format(
epoch, epoch_loss, epoch_acc, dataset_sizes[phase]))
#decay if not best
if phase == 'val' and epoch_loss > best_loss:
#normally we just decay if no improvement in val loss in epoch, but not ideal with small datasets
#so 'small_data' condition that insists on 5 passes at lr if dataset size <=10k
if small_data==False or iter_at_lr>=4:
print("decay loss from "+str(LR)+" to "+str(LR/10)+" as not seeing improvement in val loss")
LR = LR / 10
#making a new optimizer zeros out momentum
optimizer = optim.SGD(filter(lambda p:p.requires_grad, model.parameters()), lr = LR, momentum=0.9, weight_decay=1e-4)
iter_at_lr=0
else:
iter_at_lr+=1
#below is used for labeling results
trainstring = str(TRAIN_FILTER).replace("_","").replace("[","").replace(",","_").replace("]","").replace(" ","").replace("'","")
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_epoch = epoch
#save stuff if we have a best model
write_label = str(PRED_LABEL)
write_label = "Multilabel"
checkpoint(model, best_acc, best_loss, epoch, RESULT_PATH+write_label+"_train_"+trainstring+"_"+FOLD_OVERRIDE,LR,RESULT_PATH=RESULT_PATH)
write_label = "multilabel_" + trainstring + "_" + FOLD_OVERRIDE
if phase== 'val':
with open(RESULT_PATH+"log_train_"+write_label, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow([write_label, epoch, last_train_loss, last_train_acc, epoch_loss, epoch_acc])
total_done+=batch_size
if(total_done % (100*batch_size) == 0): print("completed "+str(total_done)+" so far in epoch")
#quit if 3 epochs no improvement
if ((epoch-best_epoch)>=3 and small_data==False) or ((epoch-best_epoch)>=15 and small_data==True):
print("no improvement in 3 epochs, break")
break
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights and return them
checkpoint_best = torch.load(RESULT_PATH+"checkpoint_results/Multilabel_train_"+trainstring+"_"+FOLD_OVERRIDE)
model = checkpoint_best['model_ft']
return model, best_epoch
def give_mean_var(LABEL_PATH, PRED_LABEL,BALANCE_MODE, TRAIN_FILTER,MULTILABEL, FOLD_OVERRIDE, BATCH_SIZE):
"""
args:
LABEL_PATH: path to the scalars file
PRED_LABEL: list of targets we're predicting
BALANCE_MODE: deprecated
TRAIN_FILTER: list of dataset we're training on, needed for dataloader
MULTILABEL: deprecated, always true
FOLD_OVERRIDE: train/val/test split column name in scalars
BATCH_SIZE: passes batch for dataloader
returns:
mean: rgb channel means np array 3x1
std:: rgb channel std np array 3x1
"""
#create set of val transforms
data_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(224),
transforms.CenterCrop(224), #needed to get 224x224
transforms.ToTensor()
])
#make dataloader
transformed_dataset =CXR.CXRDataset(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL, transform=data_transform, balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=0,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH="ignore",MULTICLASS=MULTICLASS)
dataloader = torch.utils.data.DataLoader(transformed_dataset, batch_size=BATCH_SIZE, shuffle=True,num_workers=8)#, sampler=sampler)
#calculate some means and st devs
x = len(dataloader)*BATCH_SIZE
print("len dataloader for give_mean_var:"+str(x))
means = np.empty((x,3))
stds = np.empty((x,3))
means[:,:]=np.nan
stds[:,:]=np.nan
for data in dataloader:
inputs, _ = data
inputs=inputs.numpy()
for i in range(0,inputs.shape[0]):
for j in range(0,3):
means[i,j]=np.mean(inputs[i,j,:,:])
stds[i,j]=np.std(inputs[i,j,:,:])
mean = np.zeros(3)
std = np.zeros(3)
for j in range (0,3):
x=np.nanmean(means[:,j])
mean[j]=x
x=np.nanmean(stds[:,j])
std[j]=x
return mean, std
def train_one(PRED_LABEL,LR,BATCH_SIZE,LABEL_PATH,RESULT_PATH,BALANCE_MODE,FREEZE_LAYERS, NUM_EPOCHS,TRAIN_FILTER,PRED_FILTER,MULTILABEL,FOLD_OVERRIDE,TRAIN_SAMPLE,PRED_SAMPLE,CUSTOM_NORMALIZE, NET_TYPE,MULTICLASS,OUTPUT1024):
"""
make dataloader, instantiates torchvision model, calls training function, returns results
args:
PRED_LABEL: list of labels to predict ["pna","opacity"] etc
LR: learning rate
BATCH_SIZE: batch size for dataloader; too big and won't fit on gpu
LABEL_PATH: path to scalars
RESULT_PATH: path to write results
BALANCE_MODE: deprecated
FREEZE_LAYERS: deprecated
NUM_EPOCHS: max number of epochs to train for; may quit sooner if not improving
TRAIN_FILTER: list of sites we're training on
PRED_FILTER: list of sites we're predicting
MULTILABEL: deprecated
FOLD_OVERRIDE: train/val/test split column in scalars
TRAIN_SAMPLE: sample training data to get limited sample (for testing)
PRED_SAMPLE: sample test data to get limited sample (for testing)
CUSTOM_NORMALIZE: use normalization mean, std based on data not imagenet
NET_TYPE: deprecated
MULTICLASS: train to single multiclass n>2 target (not implemented for multilabel multiclass)
returns:
x: df with predictions
"""
#if we were using custom normalization and not imagenet, do this; it didn't help vs imagenet nornmalization
if CUSTOM_NORMALIZE:
mean, std = give_mean_var(LABEL_PATH, PRED_LABEL,BALANCE_MODE, TRAIN_FILTER,MULTILABEL, FOLD_OVERRIDE, BATCH_SIZE)
print(mean)
print(std)
elif not CUSTOM_NORMALIZE:
mean= [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
#torchvision transforms
df = pd.read_csv(LABEL_PATH,index_col=0)
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(224), #244
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
'val': transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
}
#make dataloader
transformed_datasets={}
transformed_datasets['train'] =CXR.CXRDataset(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL, transform=data_transforms['train'], balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=TRAIN_SAMPLE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS)
transformed_datasets['val'] =CXR.CXRDataset(csv_file=LABEL_PATH, fold='val', PRED_LABEL=PRED_LABEL, transform=data_transforms['val'], balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=TRAIN_SAMPLE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS)
dataloaders={}
dataloaders['train'] = torch.utils.data.DataLoader(transformed_datasets['train'], batch_size=BATCH_SIZE, shuffle=True,num_workers=8)#, sampler=sampler)
dataloaders['val'] = torch.utils.data.DataLoader(transformed_datasets['val'], batch_size=BATCH_SIZE, shuffle=True, num_workers=8)
#instantiate model
if not use_gpu: raise ValueError("Error, requires GPU")
print('==> Building model..')
if(NET_TYPE=="densenet121"):
print("using densenet121")
model_ft = models.densenet121(pretrained=True)
num_ftrs = model_ft.classifier.in_features
if(OUTPUT1024==False):
print("adding bottleneck=15 features")
#if multiclass, needs different output structure then regular training to list of binary taragets
if not MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 15), nn.Linear(15, len(PRED_LABEL)),nn.Sigmoid())
elif MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 15), nn.Linear(15, transformed_datasets['train'].n_class))
print("n_class "+str(transformed_datasets['train'].n_class))
elif(OUTPUT1024==True):
print("NOT adding bottleneck=15 features")
#if multiclass, needs different output structure then regular training to list of binary taragets
if not MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, len(PRED_LABEL)),nn.Sigmoid())
elif MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, transformed_datasets['train'].n_class))
print("n_class "+str(transformed_datasets['train'].n_class))
start_epoch = 1
print("loading model_ft onto gpu")
model_ft = model_ft.cuda()
if NET_TYPE=="densenet121":
if(MULTICLASS==False):
criterion = nn.BCELoss()
else:
criterion = nn.CrossEntropyLoss() # only using this for predicting site, department
optimizer_ft = optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), lr=LR, momentum=0.9, weight_decay=1e-4)
dataset_sizes = {x: len(transformed_datasets[x]) for x in ['train', 'val']}
#train
model_ft , best_epoch = train_model(model_ft, criterion, optimizer_ft, LR, num_epochs=NUM_EPOCHS,dataloaders=dataloaders,dataset_sizes=dataset_sizes, PRED_LABEL=PRED_LABEL, start_epoch=start_epoch,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS)
#make preds on test
x = E.make_pred_multilabel(data_transforms,model_ft,"pred_"+str(PRED_LABEL), LABEL_PATH,RESULT_PATH,PRED_LABEL,TRAIN_FILTER,PRED_FILTER,FOLD_OVERRIDE,PRED_SAMPLE,MULTICLASS,OUTPUT1024)
return x
def train_cnn(LABEL_PATH, PRED_LABEL,TRAIN_FILTER,PRED_FILTER,BALANCE_MODE,FOLD_OVERRIDE,MULTICLASS=False,OUTPUT1024=False):
"""
main function that gets called externally to train
LABEL_PATH: path to scalars
PRED_LABEL: targets to predict; list ["pna","opacity"] etc as in scalars file
TRAIN_FILTER: list of sites we're training to ["nih","msh"]
PRED_FILTER: list of sites we're predicting ["nih","iu"]
BALANCE_MODE: deprecated
FOLD_OVERRIDE: the column of scalars we use for train val test split
MULTICLASS: train to single multiclass n>2 target
returns:
y: results
"""
NUM_EPOCHS=50
BATCH_SIZE=16
LR = 0.01
RESULT_PATH="results/"
FREEZE_LAYERS="no"
MULTILABEL = not isinstance(PRED_LABEL, str)
TRAIN_SAMPLE=0
PRED_SAMPLE =0
CUSTOM_NORMALIZE=False
NET_TYPE="densenet121"
if not os.path.exists(RESULT_PATH):
os.makedirs(RESULT_PATH)
if not os.path.exists(RESULT_PATH+"checkpoint_results/"):
os.makedirs(RESULT_PATH+"checkpoint_results/")
x = train_one(PRED_LABEL,LR,BATCH_SIZE,LABEL_PATH,RESULT_PATH,BALANCE_MODE,"layer4",NUM_EPOCHS,TRAIN_FILTER,PRED_FILTER,MULTILABEL,FOLD_OVERRIDE,TRAIN_SAMPLE,PRED_SAMPLE,CUSTOM_NORMALIZE, NET_TYPE, MULTICLASS,OUTPUT1024)
y = pd.read_csv(LABEL_PATH)
y=y[['img_id']]
y = y.merge(x,on="img_id",how="inner")
trainlist=str(TRAIN_FILTER).replace("_","").replace("[","").replace(",","_").replace("]","").replace(" ","").replace("'","")
y.to_csv(RESULT_PATH+"preds_train_"+trainlist+"_"+FOLD_OVERRIDE+".csv",index=False)
return y
| 2.40625 | 2 |
lattpy/utils.py | dylanljones/lattpy | 11 | 12762296 | # coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, <NAME>
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""Contains miscellaneous utility methods."""
import logging
from typing import Iterable, List, Sequence, Optional, Union, Tuple
import time
import numpy as np
__all__ = [
"ArrayLike", "logger", "LatticeError", "ConfigurationError", "SiteOccupiedError",
"NoAtomsError", "NoBaseNeighborsError", "NotBuiltError", "Timer",
"min_dtype", "chain", "create_lookup_table", "frmt_num", "frmt_bytes", "frmt_time",
]
# define type for numpy `array_like` types
ArrayLike = Union[int, float, Iterable, np.ndarray]
# Configure package logger
logger = logging.getLogger("lattpy")
_CH = logging.StreamHandler()
_CH.setLevel(logging.DEBUG)
_FRMT_STR = "[%(asctime)s] %(levelname)-8s - %(name)-15s - %(funcName)-25s - %(message)s"
_FRMT = logging.Formatter(_FRMT_STR, datefmt='%H:%M:%S')
_CH.setFormatter(_FRMT) # Add formatter to stream handler
logger.addHandler(_CH) # Add stream handler to package logger
logger.setLevel(logging.WARNING) # Set initial logging level
class LatticeError(Exception):
pass
class ConfigurationError(LatticeError):
@property
def msg(self):
return self.args[0]
@property
def hint(self):
return self.args[1]
def __str__(self):
msg, hint = self.args
if hint:
msg += f" ({hint})"
return msg
class SiteOccupiedError(ConfigurationError):
def __init__(self, atom, pos):
super().__init__(f"Can't add {atom} to lattice, position {pos} already occupied!")
class NoAtomsError(ConfigurationError):
def __init__(self):
super().__init__("lattice doesn't contain any atoms",
"use 'add_atom' to add an 'Atom'-object")
class NoBaseNeighborsError(ConfigurationError):
def __init__(self):
msg = "base neighbors not configured"
hint = "call 'set_num_neighbors' after adding atoms or " \
"use the 'neighbors' keyword of 'add_atom'"
super().__init__(msg, hint)
class NotBuiltError(ConfigurationError):
def __init__(self):
msg = "lattice has not been built"
hint = "use the 'build' method to construct a finite size lattice model"
super().__init__(msg, hint)
def create_lookup_table(array: ArrayLike, dtype: Optional[Union[str, np.dtype]] = np.uint8) \
-> Tuple[np.ndarray, np.ndarray]:
"""Converts the given array to an array of indices linked to the unique values.
Parameters
----------
array : array_like
dtype : int or np.dtype, optional
Optional data-type for storing the indices of the unique values.
By default `np.uint8` is used, since it is assumed that the
input-array has only a few unique values.
Returns
-------
values : np.ndarray
The unique values occuring in the input-array.
indices : np.ndarray
The corresponding indices in the same shape as the input-array.
"""
values = np.sort(np.unique(array))
indices = np.zeros_like(array, dtype=dtype)
for i, x in enumerate(values):
mask = array == x
indices[mask] = i
return values, indices
def min_dtype(a: Union[int, float, np.ndarray, Iterable],
signed: Optional[bool] = True) -> np.dtype:
"""Returns the minimum required dtype to store the given values.
Parameters
----------
a : array_like
One or more values for determining the dtype.
Should contain the maximal expected values.
signed : bool, optional
If `True` the dtype is forced to be signed. The default is `True`.
Returns
-------
dtype : dtype
The required dtype.
"""
if signed:
a = -np.max(np.abs(a))-1
else:
amin, amax = np.min(a), np.max(a)
if amin < 0:
a = - amax - 1 if abs(amin) <= amax else amin
else:
a = amax
return np.dtype(np.min_scalar_type(a))
def chain(items: Sequence, cycle: bool = False) -> List:
"""Creates a chain between items
Parameters
----------
items : Sequence
items to join to chain
cycle : bool, optional
cycle to the start of the chain if True, default: False
Returns
-------
chain: list
chain of items
Example
-------
>>> print(chain(["x", "y", "z"]))
[['x', 'y'], ['y', 'z']]
>>> print(chain(["x", "y", "z"], True))
[['x', 'y'], ['y', 'z'], ['z', 'x']]
"""
result = list()
for i in range(len(items)-1):
result.append([items[i], items[i+1]])
if cycle:
result.append([items[-1], items[0]])
return result
def frmt_num(num: float, dec: Optional[int] = 1, unit: Optional[str] = '',
div: Optional[float] = 1000.) -> str:
"""Returns a formatted string of a number.
Parameters
----------
num : float
The number to format.
dec : int, optional
Number of decimals. The default is 1.
unit : str, optional
Optional unit suffix. By default no unit-strinmg is used.
div : float, optional
The divider used for units. The default is 1000.
Returns
-------
num_str: str
"""
for prefix in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < div:
return f"{num:.{dec}f}{prefix}{unit}"
num /= div
return f"{num:.{dec}f}Y{unit}"
def frmt_bytes(num: float, dec: Optional[int] = 1) -> str:
"""Returns a formatted string of the number of bytes."""
return frmt_num(num, dec, unit="iB", div=1024)
def frmt_time(seconds: float, short: bool = False, width: int = 0) -> str:
"""Returns a formated string for a given time in seconds.
Parameters
----------
seconds : float
Time value to format
short : bool, optional
Flag if short representation should be used.
width : int, optional
Optional minimum length of the returned string.
Returns
-------
time_str: str
"""
string = "00:00"
# short time string
if short:
if seconds > 0:
mins, secs = divmod(seconds, 60)
if mins > 60:
hours, mins = divmod(mins, 60)
string = f"{hours:02.0f}:{mins:02.0f}h"
else:
string = f"{mins:02.0f}:{secs:02.0f}"
# Full time strings
else:
if seconds < 1e-3:
nanos = 1e6 * seconds
string = f"{nanos:.0f}\u03BCs"
elif seconds < 1:
millis = 1000 * seconds
string = f"{millis:.1f}ms"
elif seconds < 60:
string = f"{seconds:.1f}s"
else:
mins, seconds = divmod(seconds, 60)
if mins < 60:
string = f"{mins:.0f}:{seconds:04.1f}min"
else:
hours, mins = divmod(mins, 60)
string = f"{hours:.0f}:{mins:02.0f}:{seconds:02.0f}h"
if width > 0:
string = f"{string:>{width}}"
return string
class Timer:
"""Timer object for easy time measuring."""
__slots__ = ["_time", "_t0"]
def __init__(self, method=None):
self._time = method or time.perf_counter
self._t0 = 0
self.start()
@property
def seconds(self) -> float:
"""Returns the time since the timer has been started in seconds."""
return self.time() - self._t0
@property
def millis(self) -> float:
"""Returns the time since the timer has been started in milliseconds."""
return 1000 * (self.time() - self._t0)
def time(self) -> float:
"""Returns the current time as a timestamp."""
return self._time()
def start(self) -> None:
"""Start the timer."""
self._t0 = self._time()
def eta(self, progress: float) -> float:
"""Approximates the time left for a task.
Parameters
----------
progress: float
Progress fraction of task.
Returns
-------
eta: float
Approximation of time left.
"""
if not progress:
return 0.0
return (1 / progress - 1) * self.time()
def strfrmt(self, short: bool = False, width: int = 0) -> str:
"""Formats the time since the timer has been started."""
return frmt_time(self.seconds, short, width)
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.strfrmt(short=True)})'
def __str__(self) -> str:
return self.strfrmt(short=True)
| 2.359375 | 2 |
Python/textbook_track/chapter01/ba1n.py | ghostrider77/RosalindProblems | 0 | 12762297 | <gh_stars>0
# Generate the d-Neighborhood of a String
import sys
NUCLEOTIDES = ('A', 'C', 'G', 'T')
def calc_immediate_neighbours(pattern):
neighbours = set()
for ix, letter in enumerate(pattern):
for nucleotide in NUCLEOTIDES:
if nucleotide != letter:
pattern_list = list(pattern)
pattern_list[ix] = nucleotide
neighbours.add(''.join(pattern_list))
return neighbours
def generate_neighbourhood(text, d):
neighbourhood = {text}
for _ in range(d):
neighbours_of_neighbours = set()
for pattern in neighbourhood:
immediate_neighbours = calc_immediate_neighbours(pattern)
neighbours_of_neighbours.update(immediate_neighbours)
neighbourhood.update(neighbours_of_neighbours)
return neighbourhood
def main():
data = sys.stdin.read().splitlines()
pattern = data[0]
d = int(data[1])
result = generate_neighbourhood(pattern, d)
for string in result:
print(string)
if __name__ == '__main__':
main()
| 3.484375 | 3 |
gimp_be/image/save.py | J216/gimp_be | 3 | 12762298 | from string import letters
from gimpfu import pdb, CLIP_TO_IMAGE, gimp
import datetime
import re
import os
import random
def saveJPG(fn, comment=""):
time_stamp = datetime.datetime.now()
image = gimp.image_list()[0]
new_image = pdb.gimp_image_duplicate(image)
layer = pdb.gimp_image_merge_visible_layers(new_image, CLIP_TO_IMAGE)
if comment=="":
comment = "Digital Art - " + str(time_stamp)
try:
pdb.file_jpeg_save(new_image, layer, fn, fn, .65, 0, 0, 0, comment, 2, 1, 0, 0)
pdb.gimp_image_delete(new_image)
return (True, fn,comment)
except:
return (False, fn,comment)
def saveXCFProject(fn):
# save project file of current image
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
try:
pdb.gimp_xcf_save(1, image, drawable, fn, fn)
return (True, fn)
except:
return (False, fn)
def savePNG(fn,comment=""):
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
#replace slash with forward
fn=fn.replace('\\','/')
if comment == "":
comment="NACHO Comment Activated !!!"
#SAVE FILE WITH UNIQUE NAME
new_image = pdb.gimp_image_duplicate(image)
layer = pdb.gimp_image_merge_visible_layers(new_image, CLIP_TO_IMAGE)
export_fn = fn
pdb.file_png_save2(image, drawable, fn, fn, 0, 7, 0, 0, 0, 0, 0, 0, 0)
pdb.gimp_image_delete(new_image)
def saveGIF(fn,delay=100):
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
fn=fn.replace('\\','/')
export_fn = fn
try:
pdb.gimp_convert_indexed(image, 2, 2, 256, 1, 0, "Computer Jones' Magic Palette")
except:
print "already indexed"
try:
pdb.file_gif_save(image, drawable, fn, fn, 0, 1, delay, 0)
return (True,fn,comment)
except:
return (False,fn)
| 2.71875 | 3 |
url_shorter/models.py | narnikgamarnikus/django-url-shorter | 0 | 12762299 | # -*- coding: utf-8 -*-
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from model_utils.models import TimeStampedModel
from annoying.fields import JSONField
from geolite2 import geolite2
from .utils import string_generator
class URL(TimeStampedModel):
"""
Description: Model Description
"""
user = models.ForeignKey(get_user_model(), null=True, on_delete=models.CASCADE)
short_url = models.SlugField(max_length=6, primary_key=True, unique=True)
long_url = models.URLField(max_length=100, unique=True)
created = models.DateTimeField(auto_now=True)
count = models.IntegerField(default=0)
def __str__(self):
return self.long_url
def get_absolute_url(self):
return reverse(
'url_shorter:detail',
kwargs={'short_url': self.short_url}
)
def save(self, *args, **kwargs):
if not self.short_url:
self.short_url = string_generator()
return super(URL, self).save(*args, **kwargs)
class Hit(TimeStampedModel):
"""
Description: Model Description
"""
url = models.ForeignKey(URL, on_delete=models.CASCADE)
ip = models.CharField(default=None, null=True, max_length=50)
data = JSONField(default=None, null=True)
class Meta:
pass
def save(self, *args, **kwargs):
if self.ip:
reader = geolite2.reader()
data = reader.get(self.ip)
self.data = data
geolite2.close()
return super(Hit, self).save(*args, **kwargs)
| 2.21875 | 2 |
src/input_parser.py | DalyaG/Sphinx185 | 11 | 12762300 | <gh_stars>10-100
def input_parser(m):
"""
Given length of sequences, load input data corresponding to this length.
:param m: Length of sequences in the input.
:return: tuple, containing:
1. sequences_list: List of sequences in the input, parsed such that \
sequences_list[i] holds a list of integers that are the colors of this sequence.
2. n_correct_vertices_list: List holding the number of correct vertices in each sequence in sequences_list.
.. note:: This function assumes the existence of 'data/input_m.txt'
.. todo:: find a better name for n_correct_vertices_list
"""
print "Loading input..."
with open('data/input_{}.txt'.format(m), 'r') as f:
data = [line.rstrip() for line in f]
sequences_list = [[int(i) for i in item[:m]] for item in data]
n_correct_vertices_list = [int(item[m + 2]) for item in data]
return sequences_list, n_correct_vertices_list
| 3.703125 | 4 |
competitive_programming/programming_contests/interfatecs/1_2018/f.py | LeandroTk/Algorithms | 205 | 12762301 | codigo_set = set()
codido_set_saiu = set()
s = input()
codigos = input().split(' ')
for codigo in codigos:
codigo_set.add(codigo)
i = input()
saidas = input().split(' ')
A = 0
I = 0
R = 0
for saida in saidas:
if saida in codigo_set:
if saida in codido_set_saiu:
R += 1
else:
A += 1
codido_set_saiu.add(saida)
else:
if saida in codido_set_saiu:
R += 1
else:
I += 1
codido_set_saiu.add(saida)
print('%d A' % A)
print('%d I' % I)
print('%d R' % R)
| 3.328125 | 3 |
kinda/objects/dnaobjects.py | uwmisl/KinDA | 0 | 12762302 | <filename>kinda/objects/dnaobjects.py
from .domain import Domain, ComplementaryDomain
from .strand import Strand, ComplementaryStrand
from .complex import Complex
from .restingset import RestingSet
from .macrostate import Macrostate
from .reaction import Reaction, RestingSetReaction
from .sequence import Sequence
from .structure import Structure
from . import utils
from . import io_PIL, io_Multistrand, io_Peppercorn
| 1.023438 | 1 |
src/workflows/transport/common_transport.py | rjgildea/python-workflows | 0 | 12762303 | from __future__ import annotations
import decimal
import logging
from typing import Any, Callable, Dict, Mapping, Optional, Set
import workflows
MessageCallback = Callable[[Mapping[str, Any], Any], None]
class CommonTransport:
"""A common transport class, containing e.g. the logic to manage
subscriptions and transactions."""
__callback_interceptor = None
__subscriptions: Dict[int, Dict[str, Any]] = {}
__subscription_id: int = 0
__transactions: Set[int] = set()
__transaction_id: int = 0
log = logging.getLogger("workflows.transport")
#
# -- High level communication calls ----------------------------------------
#
@classmethod
def add_command_line_options(cls, parser):
"""Function to inject command line parameters."""
pass
def connect(self) -> bool:
"""Connect the transport class. This function must be overridden.
:return: True-like value when connection successful,
False-like value otherwise."""
return False
def is_connected(self) -> bool:
"""Returns the current connection status. This function must be overridden.
:return: True-like value when connection is available,
False-like value otherwise."""
return False
def disconnect(self):
"""Gracefully disconnect the transport class. This function should be
overridden."""
def subscribe(self, channel, callback, **kwargs) -> int:
"""Listen to a queue, notify via callback function.
:param channel: Queue name to subscribe to
:param callback: Function to be called when messages are received.
The callback will pass two arguments, the header as a
dictionary structure, and the message.
:param **kwargs: Further parameters for the transport layer. For example
disable_mangling: Receive messages as unprocessed strings.
exclusive: Attempt to become exclusive subscriber to the queue.
acknowledgement: If true receipt of each message needs to be
acknowledged.
:return: A unique subscription ID
"""
self.__subscription_id += 1
def mangled_callback(header, message):
return callback(header, self._mangle_for_receiving(message))
if "disable_mangling" in kwargs:
if kwargs["disable_mangling"]:
mangled_callback = callback # noqa:F811
del kwargs["disable_mangling"]
self.__subscriptions[self.__subscription_id] = {
"channel": channel,
"callback": mangled_callback,
"ack": kwargs.get("acknowledgement"),
"unsubscribed": False,
}
self.log.debug("Subscribing to %s with ID %d", channel, self.__subscription_id)
self._subscribe(self.__subscription_id, channel, mangled_callback, **kwargs)
return self.__subscription_id
def unsubscribe(self, subscription: int, drop_callback_reference=False, **kwargs):
"""Stop listening to a queue or a broadcast
:param subscription: Subscription ID to cancel
:param drop_callback_reference: Drop the reference to the registered
callback function immediately. This
means any buffered messages still in
flight will not arrive at the intended
destination and cause exceptions to be
raised instead.
:param **kwargs: Further parameters for the transport layer.
"""
if subscription not in self.__subscriptions:
raise workflows.Error("Attempting to unsubscribe unknown subscription")
if self.__subscriptions[subscription]["unsubscribed"]:
raise workflows.Error(
"Attempting to unsubscribe already unsubscribed subscription"
)
self._unsubscribe(subscription, **kwargs)
self.__subscriptions[subscription]["unsubscribed"] = True
if drop_callback_reference:
self.drop_callback_reference(subscription)
def drop_callback_reference(self, subscription: int):
"""Drop reference to the callback function after unsubscribing.
Any future messages arriving for that subscription will result in
exceptions being raised.
:param subscription: Subscription ID to delete callback reference for.
"""
if subscription not in self.__subscriptions:
raise workflows.Error(
"Attempting to drop callback reference for unknown subscription"
)
if not self.__subscriptions[subscription]["unsubscribed"]:
raise workflows.Error(
"Attempting to drop callback reference for live subscription"
)
del self.__subscriptions[subscription]
def subscribe_broadcast(self, channel, callback, **kwargs) -> int:
"""Listen to a broadcast topic, notify via callback function.
:param channel: Topic name to subscribe to
:param callback: Function to be called when messages are received.
The callback will pass two arguments, the header as a
dictionary structure, and the message.
:param **kwargs: Further parameters for the transport layer. For example
disable_mangling: Receive messages as unprocessed strings.
retroactive: Ask broker to send old messages if possible
:return: A unique subscription ID
"""
self.__subscription_id += 1
def mangled_callback(header, message):
return callback(header, self._mangle_for_receiving(message))
if "disable_mangling" in kwargs:
if kwargs["disable_mangling"]:
mangled_callback = callback # noqa:F811
del kwargs["disable_mangling"]
self.__subscriptions[self.__subscription_id] = {
"channel": channel,
"callback": mangled_callback,
"ack": False,
"unsubscribed": False,
}
self.log.debug(
"Subscribing to broadcasts on %s with ID %d",
channel,
self.__subscription_id,
)
self._subscribe_broadcast(
self.__subscription_id, channel, mangled_callback, **kwargs
)
return self.__subscription_id
def subscription_callback(self, subscription: int) -> MessageCallback:
"""Retrieve the callback function for a subscription. Raise a
workflows.Error if the subscription does not exist.
All transport callbacks can be intercepted by setting an
interceptor function with subscription_callback_intercept().
:param subscription: Subscription ID to look up
:return: Callback function
"""
subscription_record = self.__subscriptions.get(subscription)
if not subscription_record:
raise workflows.Error("Attempting to callback on unknown subscription")
callback = subscription_record["callback"]
if self.__callback_interceptor:
return self.__callback_interceptor(callback)
return callback
def subscription_callback_set_intercept(self, interceptor):
"""Set a function to intercept all callbacks. This is useful to, for
example, keep a thread barrier between the transport related functions
and processing functions.
:param interceptor: A function that takes the original callback function
and returns a modified callback function. Or None to
disable interception.
"""
self.__callback_interceptor = interceptor
def send(self, destination, message, **kwargs):
"""Send a message to a queue.
:param destination: Queue name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
message = self._mangle_for_sending(message)
self._send(destination, message, **kwargs)
def raw_send(self, destination, message, **kwargs):
"""Send a raw (unmangled) message to a queue.
This may cause errors if the receiver expects a mangled message.
:param destination: Queue name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
self._send(destination, message, **kwargs)
def broadcast(self, destination, message, **kwargs):
"""Broadcast a message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
message = self._mangle_for_sending(message)
self._broadcast(destination, message, **kwargs)
def raw_broadcast(self, destination, message, **kwargs):
"""Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
self._broadcast(destination, message, **kwargs)
def ack(self, message, subscription_id: Optional[int] = None, **kwargs):
"""Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be acknowledged, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
"""
if isinstance(message, dict):
message_id = message.get("message-id")
if not subscription_id:
subscription_id = message.get("subscription")
else:
message_id = message
if not message_id:
raise workflows.Error("Cannot acknowledge message without message ID")
if not subscription_id:
raise workflows.Error("Cannot acknowledge message without subscription ID")
self.log.debug(
"Acknowledging message %s on subscription %s", message_id, subscription_id
)
self._ack(message_id, subscription_id=subscription_id, **kwargs)
def nack(self, message, subscription_id: Optional[int] = None, **kwargs):
"""Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
"""
if isinstance(message, dict):
message_id = message.get("message-id")
if not subscription_id:
subscription_id = message.get("subscription")
else:
message_id = message
if not message_id:
raise workflows.Error("Cannot reject message without message ID")
if not subscription_id:
raise workflows.Error("Cannot reject message without subscription ID")
self.log.debug(
"Rejecting message %s on subscription %d", message_id, subscription_id
)
self._nack(message_id, subscription_id=subscription_id, **kwargs)
def transaction_begin(self, subscription_id: Optional[int] = None, **kwargs) -> int:
"""Start a new transaction.
:param **kwargs: Further parameters for the transport layer.
:return: A transaction ID that can be passed to other functions.
"""
self.__transaction_id += 1
self.__transactions.add(self.__transaction_id)
if subscription_id:
self.log.debug(
"Starting transaction with ID %d on subscription %d",
self.__transaction_id,
subscription_id,
)
else:
self.log.debug("Starting transaction with ID %d", self.__transaction_id)
self._transaction_begin(
self.__transaction_id, subscription_id=subscription_id, **kwargs
)
return self.__transaction_id
def transaction_abort(self, transaction_id: int, **kwargs):
"""Abort a transaction and roll back all operations.
:param transaction_id: ID of transaction to be aborted.
:param **kwargs: Further parameters for the transport layer.
"""
if transaction_id not in self.__transactions:
raise workflows.Error("Attempting to abort unknown transaction")
self.log.debug("Aborting transaction %s", transaction_id)
self.__transactions.remove(transaction_id)
self._transaction_abort(transaction_id, **kwargs)
def transaction_commit(self, transaction_id: int, **kwargs):
"""Commit a transaction.
:param transaction_id: ID of transaction to be committed.
:param **kwargs: Further parameters for the transport layer.
"""
if transaction_id not in self.__transactions:
raise workflows.Error("Attempting to commit unknown transaction")
self.log.debug("Committing transaction %s", transaction_id)
self.__transactions.remove(transaction_id)
self._transaction_commit(transaction_id, **kwargs)
@property
def is_reconnectable(self):
"""Check if the transport object is in a status where reconnecting is
supported. There must not be any active subscriptions or transactions."""
return not self.__subscriptions and not self.__transactions
#
# -- Low level communication calls to be implemented by subclass -----------
#
def _subscribe(self, sub_id: int, channel, callback, **kwargs):
"""Listen to a queue, notify via callback function.
:param sub_id: ID for this subscription in the transport layer
:param channel: Queue name to subscribe to
:param callback: Function to be called when messages are received
:param **kwargs: Further parameters for the transport layer. For example
exclusive: Attempt to become exclusive subscriber to the queue.
acknowledgement: If true receipt of each message needs to be
acknowledged.
"""
raise NotImplementedError("Transport interface not implemented")
def _subscribe_broadcast(self, sub_id: int, channel, callback, **kwargs):
"""Listen to a broadcast topic, notify via callback function.
:param sub_id: ID for this subscription in the transport layer
:param channel: Topic name to subscribe to
:param callback: Function to be called when messages are received
:param **kwargs: Further parameters for the transport layer. For example
retroactive: Ask broker to send old messages if possible
"""
raise NotImplementedError("Transport interface not implemented")
def _unsubscribe(self, sub_id: int, **kwargs):
"""Stop listening to a queue or a broadcast
:param sub_id: ID for this subscription in the transport layer
"""
raise NotImplementedError("Transport interface not implemented")
def _send(self, destination, message, **kwargs):
"""Send a message to a queue.
:param destination: Queue name to send to
:param message: A string to be sent
:param **kwargs: Further parameters for the transport layer. For example
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
raise NotImplementedError("Transport interface not implemented")
def _broadcast(self, destination, message, **kwargs):
"""Broadcast a message.
:param destination: Topic name to send to
:param message: A string to be broadcast
:param **kwargs: Further parameters for the transport layer. For example
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
raise NotImplementedError("Transport interface not implemented")
def _ack(self, message_id, subscription_id, **kwargs):
"""Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be acknowledged.
:param subscription_id: ID of the associated subscription.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
"""
raise NotImplementedError("Transport interface not implemented")
def _nack(self, message_id, subscription_id, **kwargs):
"""Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be rejected.
:param subscription_id: ID of the associated subscription.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
"""
raise NotImplementedError("Transport interface not implemented")
def _transaction_begin(
self, transaction_id: int, *, subscription_id: Optional[int] = None, **kwargs
) -> None:
"""Start a new transaction.
:param transaction_id: ID for this transaction in the transport layer.
:param **kwargs: Further parameters for the transport layer.
"""
raise NotImplementedError("Transport interface not implemented")
def _transaction_abort(self, transaction_id: int, **kwargs) -> None:
"""Abort a transaction and roll back all operations.
:param transaction_id: ID of transaction to be aborted.
:param **kwargs: Further parameters for the transport layer.
"""
raise NotImplementedError("Transport interface not implemented")
def _transaction_commit(self, transaction_id: int, **kwargs) -> None:
"""Commit a transaction.
:param transaction_id: ID of transaction to be committed.
:param **kwargs: Further parameters for the transport layer.
"""
raise NotImplementedError("Transport interface not implemented")
#
# -- Internal message mangling functions -----------------------------------
#
# Some transport mechanisms will not be able to work with arbitrary objects,
# so these functions are used to prepare a message for sending/receiving.
# The canonical example is serialization/deserialization, see stomp_transport
@staticmethod
def _mangle_for_sending(message):
"""Function that any message will pass through before it being forwarded to
the actual _send* functions."""
return message
@staticmethod
def _mangle_for_receiving(message):
"""Function that any message will pass through before it being forwarded to
the receiving subscribed callback functions."""
return message
def json_serializer(obj):
"""A helper function for JSON serialization, where it can be used as
the default= argument. This function helps the serializer to translate
objects that otherwise would not be understood. Note that this is
one-way only - these objects are not restored on the receiving end."""
if isinstance(obj, decimal.Decimal):
# turn all Decimals into floats
return float(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
| 2.625 | 3 |
experiments/prob_vishull.py | matheusgadelha/ShapeRecShapePrior | 23 | 12762304 | import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import skimage.io as io
import argparse
import os
import sys
import time
# Allow python3 to search for modules outside of this directory
sys.path.append("../")
from models.skip import skip3d
from volumetocube import write_bin_from_array
from volumetocube import write_obj_from_array
import binvox_rw
from tools.Ops import radon
from tools.Ops import tvloss
from tools.Ops import tvloss3d
from tools.Ops import load_binvox
from tools.Ops import volume_proj
from tools.Ops import rotate_volume
from tools.Ops import inv_rotate_volume
from skimage.measure import compare_ssim as ssim
parser = argparse.ArgumentParser(description='Reconstruciton using deep prior.')
parser.add_argument("-m", "--method", type=str, help="Prior to be used in the reconstruction (deep | tv | carve)", default="deep")
parser.add_argument("-b", "--binvox", type=str, help="Path to the binvox file.", default="../data/bunny.binvox")
parser.add_argument("-p", "--projection", type=str, help="Type of projection to be used (depth | binary)", default="depth")
parser.add_argument("-n", "--nproj", type=int, help="Number of projections.", default=8)
parser.add_argument("-s", "--sigma", type=float, help="Amount of variance in the gaussian noise.", default=0.0)
parser.add_argument("-k", "--kappa", type=float, help="Dispersion rate of Von Mises noise.", default=4.0)
parser.add_argument("-v", "--viewWeight", type=float, help="Weight of the viewpoint regularization.", default=1.0)
def add_gaussian_noise(img, sigma=1.0):
randv = torch.randn(*(img.shape)).cuda()
return img + sigma*randv
if __name__ == '__main__':
args = parser.parse_args()
use_tv = args.method == 'tv'
use_dp = args.method == 'deep'
kappa = args.kappa
view_weight = args.viewWeight
binvoxname = args.binvox.split('/')[-1].split('.')[0]
fullname = "prob_{}_{}_{}_{}_{}_vw{}_k{}".format(binvoxname, args.method, args.projection,
args.nproj, args.sigma, view_weight, kappa)
input_depth = 3
input_noise = torch.randn(1, input_depth, 128, 128, 128).cuda()
net = skip3d(
input_depth, 1,
num_channels_down = [8, 16, 32, 64, 128],
num_channels_up = [8, 16, 32, 64, 128],
num_channels_skip = [0, 0, 0, 4, 4],
upsample_mode='trilinear',
need_sigmoid=True, need_bias=True, pad='zero', act_fun='LeakyReLU')
net.cuda()
net(input_noise)
out_volume = torch.zeros(1, 1, 128, 128, 128).cuda()
out_volume.requires_grad = True
nviews = args.nproj
method = args.projection
views = torch.FloatTensor(np.random.rand(nviews, 3) * 2*np.pi)
noisy_views = torch.FloatTensor(np.random.vonmises(views, kappa, size=(nviews,3)))
pred_views = nn.Parameter(noisy_views.detach().clone())
if use_dp:
optimizer = optim.Adam(list(net.parameters()) + [pred_views], lr=0.01)
elif use_tv:
optimizer = optim.Adam([out_volume] + [pred_views], lr=0.01)
padder = nn.ConstantPad3d(10, 0.0)
volume = padder(load_binvox(args.binvox).cuda())
gtprojs = volume_proj(volume, method=method, views=views).cuda()
noisyprojs = gtprojs.detach().clone()
noisyprojs.requires_grad = False
results_dir = os.path.join("results", fullname)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
mse = nn.L1Loss()
sigmoid = nn.Sigmoid()
#Space carving
if args.method == 'carve':
gtprojs = volume_proj(volume, method=method, views=views).cuda()
gtprojs.requires_grad = False
noisyprojs = gtprojs.clone()
noisyprojs.requires_grad = False
carve = torch.ones(*(volume.size())).cuda()
for i in range(nviews):
carve = rotate_volume(carve, x=noisy_views[i,0], y=noisy_views[i,1], z=noisy_views[i,2])
p = gtprojs[:, :, i] < 1e-2
coords = np.argwhere(p)
carve[coords[0, :], :, coords[1, :]] = 0.0
carve = inv_rotate_volume(carve, x=noisy_views[i,0], y=noisy_views[i,1], z=noisy_views[i,2])
projs = volume_proj(carve, method=method, views=views).cuda()
for i in range(noisyprojs.size()[2]):
io.imsave(results_dir+"/carve{}.png".format(i), torch.clamp(projs[:, :, i], -1, 1))
io.imsave(results_dir+"/carvegt{}.png".format(i), torch.clamp(gtprojs[:, :, i], -1, 1))
write_bin_from_array("results/{}/data.npy".format(fullname), carve.data.cpu().numpy())
exit(0)
gt_curve = []
noisygt_curve = []
n_iter = 500
out_rec = None
out_projs = None
pred_views_log = []
noisy_views_log = []
gt_views_log = []
print('EXPERIMENT {}'.format(fullname))
for i in range(n_iter):
optimizer.zero_grad()
if use_dp:
out_rec = net(input_noise)[0, 0, :, :, :]
out_projs = volume_proj(out_rec, method=method, views=pred_views)
loss = mse(out_projs, noisyprojs)
loss -= view_weight * torch.cos(pred_views - noisy_views).mean().cuda()
elif use_tv:
out_rec = sigmoid(out_volume[0, 0, :, :, :])
out_projs = volume_proj(out_rec, method=method, views=views)
loss = mse(out_projs, noisyprojs) + tvloss3d(out_rec, weight=1e-7)#
else:
raise ValueError("Unkown method")
pred_views_log.append(pred_views.data.detach().cpu().numpy())
noisy_views_log.append(noisy_views.data.detach().cpu().numpy())
gt_views_log.append(views.data.detach().cpu().numpy())
predloss = mse(out_projs, noisyprojs)
gtloss = torch.abs(out_projs - gtprojs).mean()
noisyloss = torch.abs(noisyprojs - gtprojs).mean()
print("\r({}/{}) Pred->Noisy: {} | Pred->GT: {} | Noisy->GT: {}".format(
str(i).zfill(4), n_iter, predloss.item(), gtloss.item(), noisyloss.item()),
gt_curve.append(gtloss.item()))
noisygt_curve.append(noisyloss.item())
loss.backward()
optimizer.step()
results_dir = os.path.join("results", fullname)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
write_bin_from_array("results/{}/databin.npy".format(fullname),
out_rec.data.cpu().detach().numpy())
np.save("results/{}/data.npy".format(fullname),
out_rec.data.cpu().detach().numpy())
for i in range(out_projs.size()[2]):
print("Saved {}".format("results/{}/proj{}".format(fullname, i)))
io.imsave("results/{}/proj{}.png".format(fullname, i),
out_projs.data.cpu().detach().numpy()[:, :, i])
io.imsave("results/{}/gt{}.png".format(fullname, i),
torch.clamp(gtprojs[:, :, i], -1, 1).data.cpu().detach().numpy())
np.save("results/{}/gtviews.npy".format(fullname), np.array(gt_views_log))
np.save("results/{}/noisyviews.npy".format(fullname), np.array(noisy_views_log))
np.save("results/{}/predviews.npy".format(fullname), np.array(pred_views_log))
| 1.828125 | 2 |
fastseq/logging/logging_utils.py | nttcs-ds/fastseq | 346 | 12762305 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Logging related module."""
import os
import logging
from logging import _checkLevel
from fastseq.config import FASTSEQ_DEFAULT_LOG_LEVEL, FASTSEQ_LOG_LEVEL, FASTSEQ_LOG_FORMAT
def set_default_log_level():
"""Set the default log level from the environment variable"""
try:
fastseq_log_level = _checkLevel(FASTSEQ_LOG_LEVEL)
except (ValueError, TypeError) as e:
logging.error(
"Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. "
"'DEBUG', 'INFO'): {}".format(e))
raise
logging.basicConfig(level=fastseq_log_level, format=FASTSEQ_LOG_FORMAT)
def get_logger(name=None, level=logging.INFO):
"""
Return a logger with the specific name, creating it if necessary.
If no name is specified, return the root logger.
Args:
name (str, optional): logger name. Defaults to None.
Returns:
Logger : the specified logger.
"""
level = _checkLevel(level)
if FASTSEQ_LOG_LEVEL != FASTSEQ_DEFAULT_LOG_LEVEL:
try:
level = _checkLevel(FASTSEQ_LOG_LEVEL)
except (ValueError, TypeError) as e:
logging.error(
"Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. "
"'DEBUG', 'INFO'): {}".format(e))
raise
logger = logging.getLogger(name)
logger.setLevel(level)
return logger
def update_all_log_level(level=logging.INFO):
"""
Update all the loggers to use the specified level.
Args:
level (int/str, optional): the log level. Defaults to logging.INFO.
"""
loggers = [
logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
| 2.890625 | 3 |
numpy_to_mp7.py | anomalyHackathon/hls4ml-ipbb | 5 | 12762306 | #!/usr/bin/env python
import argparse
from ast import parse
import numpy as np
import bitstring
def to_fixed(x, args):
F = args.fixed_point_bits[0] - args.fixed_point_bits[1]
return np.round(x * 2**F)
def to_float(x, args):
F = args.fixed_point_bits[0] - args.fixed_point_bits[1]
return x * 2**-F
def vals_to_hex(vals, args):
nb = args.fixed_point_bits[0] # bits of one value
tnb = len(vals) * nb # bitwidth of N values
assert args.link_bitwidth >= tnb, \
"Attempting to pack {} x {} bits ({} bits) into {}".format(
len(vals), nb, tnb, args.link_bitwidth)
pad = args.link_bitwidth - tnb
fmt_string = 'uint:{},'.format(pad) + 'int:{},'.format(nb) * len(vals)
return bitstring.pack(fmt_string, 0, *vals).hex
def row_to_hex(row, args):
# compute the packing factor
pf = args.link_bitwidth // args.fixed_point_bits[0] if args.pack_links else 1
N = int(np.ceil(len(row) / pf))
y = np.array([vals_to_hex(np.flip(row[i*pf:(i+1)*pf]), args)
for i in range(N)])
return y
def main():
parser = argparse.ArgumentParser(
description='Parse numpy file to FPGA testing for MP7 board')
parser.add_argument('--board_name', type=str,
help='A string representing the name of the board')
parser.add_argument('--generate_float_from_fix', type=bool,
help='Specify if you want to obtain the .npy file '
'obtained via to_float(to_fix(input)). It is useful to '
'feed to avoid casting mismatches using '
'hls_model.predict()')
parser.add_argument('--link_range', choices=range(0,96), type=int, nargs=2,
metavar=('start','stop'), help='Start and stop values '
'for the range related to links')
parser.add_argument('--link_bitwidth', choices=[32,64], type=int,
help='Word size in bits of each link')
parser.add_argument('--invalid_rows', type=int,
help='The number of invalid that will be generate at '
'the beginning of the test')
parser.add_argument('--fixed_point_bits', type=int, nargs=2,
metavar=('word_bits', 'integer_bits'),
help='The number of invalid that will be generate at '
'the beginning of the test')
parser.add_argument('--pack_links', type=bool, help='Whether to pack '
'multiple values into one link where possible')
parser.add_argument('--link_map', type=int, nargs='*', help='The link map')
parser.add_argument('--input_data_path', type=str,
help='The path of the numpy file containing data in '
'floating point')
parser.add_argument('--output_data_path', type=str,
help='The path of the produced .txt file containing '
'data in fixed point')
args = parser.parse_args()
fp32_data = np.load(args.input_data_path)
# packing factor
pf = args.link_bitwidth // args.fixed_point_bits[0] if args.pack_links else 1
link_width = args.link_range[1] - args.link_range[0] + 1
if fp32_data.shape[1] > link_width * pf:
raise Exception(
'Trying to fit {} features into {} links with packing factor {}'
.format(fp32_data.shape[1],link_width,pf))
if fp32_data.shape[0] > 1024:
print('The system expect no more than 1024 rows; the original file will '
'be truncated, keeping the first 1024 rows')
fp32_data = fp32_data[:1024]
output_file = open(args.output_data_path, 'w')
fixed_data = to_fixed(fp32_data, args)
if args.generate_float_from_fix:
float_back_data = to_float(fixed_data, args)
np.save('float_from_fix.npy', float_back_data)
fixed_data = np.array([row_to_hex(row, args) for row in fixed_data])
link_map = list(range(args.link_range[0], args.link_range[1] + 1)) \
if args.link_map is None else args.link_map
assert len(link_map) == link_width, \
'Link map length ({}) does not match link range ({})'.format(
len(link_map), link_width)
# board section
board_string = 'Board {}\n'.format(args.board_name)
# channel section
quad_chan_string = ' Quad/Chan : '
for i in link_map:
if args.link_bitwidth == 32:
quad_chan_string += ' q{:02d}c{} '.format(i // 4, i % 4)
else:
quad_chan_string += ' q{:02d}c{} '.format(i // 4, i % 4)
if i != link_map[-1]:
quad_chan_string += ' '
else:
quad_chan_string += '\n'
# link section
link_string = ' Link : '
for i in link_map:
if args.link_bitwidth == 32:
link_string += ' {:02d} '.format(i)
else:
link_string += ' {:02d} '.format(i)
if i != link_map[-1]:
link_string += ' '
else:
link_string += '\n'
# frame section
frame_start = 'Frame {:04d} : '
frame = ''
if args.invalid_rows > 0:
for i in range(0,args.invalid_rows):
frame += frame_start.format(i)
for j in range(0, args.link_range[1] - args.link_range[0] + 1):
if args.link_bitwidth == 32:
frame += '0v00000000'
else:
frame += '0v0000000000000000'
if j != args.link_range[1] - args.link_range[0]:
frame += ' '
else:
frame += '\n'
dummy_cols = args.link_range[1] - args.link_range[0] - fp32_data.shape[1]
for i, v in enumerate(fixed_data):
frame += frame_start.format(i + args.invalid_rows)
for j, k in enumerate(v):
frame += '1v' + k
frame += ' '
if dummy_cols > 0:
for s in range(0, dummy_cols + 1):
if args.link_bitwidth == 32:
frame += '0v00000000'
else:
frame += '0v0000000000000000'
if s + j != args.link_range[1] - args.link_range[0] - 1:
frame += ' '
frame += '\n'
l = [board_string, quad_chan_string, link_string, frame]
output_file.writelines(l)
output_file.close()
print('Done!')
| 2.96875 | 3 |
src/image.py | luiscarlosgph/semi-synthetic | 5 | 12762307 | """
@brief This file holds classes that store information about the endoscopic images that are
going to be segmented.
@author <NAME> (<EMAIL>).
@date 25 Aug 2015.
"""
import numpy as np
import os
import cv2
# import caffe
import sys
import random
import matplotlib.pyplot as plt
import scipy.misc
import imutils
import geometry
import tempfile
import PIL
import skimage.morphology
import skimage.util
# My imports
import common
#
# @brief Perlin noise generator.
#
def perlin(x, y, seed):
# Permutation table
np.random.seed(seed)
p = np.arange(256, dtype = int)
np.random.shuffle(p)
p = np.stack([p, p]).flatten()
# Coordinates of the top-left
xi = x.astype(int)
yi = y.astype(int)
# Internal coordinates
xf = x - xi
yf = y - yi
# Fade factors
u = fade(xf)
v = fade(yf)
# Noise components
n00 = gradient(p[p[xi] + yi], xf, yf)
n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1)
n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1)
n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf)
# Combine noises
x1 = lerp(n00, n10, u)
x2 = lerp(n01, n11, u)
return lerp(x1, x2, v)
#
# @brief Linear interpolation.
#
def lerp(a, b, x):
return a + x * (b - a)
#
# @brief 6t^5 - 15t^4 + 10t^3.
#
def fade(t):
return 6 * t**5 - 15 * t**4 + 10 * t**3
#
# @brief Grad converts h to the right gradient vector and return the dot product with (x, y).
#
def gradient(h, x, y):
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
g = vectors[h % 4]
return g[:,:, 0] * x + g[:,:, 1] * y
#
# @brief Perlin noise image.
#
# @param[in] height Height of the output image.
# @param[in] width Width of the output image.
# @param[in] scale Higher means smaller blobs.
# @param[in] minval The minimum noise value.
# @param[in] maxval The maximum noise value.
#
# @returns a 2D numpy array.
def perlin2d_smooth(height, width, scale, minval = 0.0, maxval = 1.0, seed = None):
lin_y = np.linspace(0, scale, height, endpoint = False)
lin_x = np.linspace(0, scale, width, endpoint = False)
x, y = np.meshgrid(lin_x, lin_y)
arr = perlin(x, y, seed)
min_arr = np.min(arr)
max_arr = np.max(arr)
arr = (np.clip((arr - min_arr) / (max_arr - min_arr), 0.0, 1.0) * (maxval - minval)) + minval
return arr
#
# @brief Given a set of 2D points it finds the center and radius of a circle.
#
# @param[in] x List or array of x coordinates.
# @param[in] y List or array of y coordinates.
#
# @returns (xc, yc, radius).
def fit_circle(x, y):
# Coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
# Calculation of the reduced coordinates
u = x - x_m
v = y - y_m
# Linear system defining the center in reduced coordinates (uc, vc):
# Suu * uc + Suv * vc = (Suuu + Suvv)/2
# Suv * uc + Svv * vc = (Suuv + Svvv)/2
Suv = np.sum(u*v)
Suu = np.sum(u**2)
Svv = np.sum(v**2)
Suuv = np.sum(u**2 * v)
Suvv = np.sum(u * v**2)
Suuu = np.sum(u**3)
Svvv = np.sum(v**3)
# Solving the linear system
A = np.array([ [ Suu, Suv ], [Suv, Svv]])
B = np.array([ Suuu + Suvv, Svvv + Suuv ])/2.0
uc, vc = np.linalg.solve(A, B)
xc_1 = x_m + uc
yc_1 = y_m + vc
# Calculation of all distances from the center (xc_1, yc_1)
Ri_1 = np.sqrt((x - xc_1) ** 2 + (y - yc_1) ** 2)
R_1 = np.mean(Ri_1)
residu_1 = np.sum((Ri_1-R_1) ** 2)
residu2_1 = np.sum((Ri_1 ** 2 - R_1 ** 2) ** 2)
return xc_1, yc_1, R_1
#
# @brief Zero parameter Canny edge detector.
#
def auto_canny(image, sigma = 0.33):
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(image, lower, upper)
return edges
#
# @brief Abstract image class. This is not meant to be instantiated and it refers to a general
# multidimensional image or label.
#
class CaffeinatedAbstract(object):
#
# @brief Every image must have at least data and name. We ensure of that with this abstract
# constructor that will be called by all the children.
#
# @param[in] raw_frame Multidimensional image, at least H x W.
# @param[in] name String with the name of the image. It can also be the frame number
# of a video, but it will be converted to string.
#
def __init__(self, raw_frame, name):
# Assert that the frame has data
if len(raw_frame.shape) <= 1 or raw_frame.shape[0] <= 0 or raw_frame.shape[1] <= 0:
raise RuntimeError('[CaffeinatedAbstract.__init__], the image provided ' \
'does not have data.')
# Assert that the name is valid
if not name:
raise ValueError('[CaffeinatedAbstract.__init__] Error, every caffeinated ' \
'abstract child must have a name.')
# Store attributes in class
self._raw_frame = raw_frame
self._name = str(name)
#
# @brief Access to a copy of the internal BGR image.
#
# @returns a copy of the internal frame, whatever it is, image or label.
def raw_copy(self):
return self._raw_frame.copy()
#
# @brief Saves image to file.
#
# @param[in] path Destination path.
# @param[in] flags Flags that will be passed to OpenCV.
#
def save(self, path, flags):
# Assert that the destination path does not exist
if common.path_exists(path):
raise ValueError('[CaffeinatedImage.save] Error, destination path ' \
+ str(path) + ' already exists.')
if flags:
return cv2.imwrite(path, self._raw_frame, flags)
else:
return cv2.imwrite(path, self._raw_frame)
#
# @brief Crops an image in a rectangular fashion, including both corner pixels in the image.
#
# @param[in] tlx Integer that represents the top left corner column.
# @param[in] tly Integer that represents the top left corner row.
# @param[in] brx Integer that represents the bottom right corner column.
# @param[in] bry Integer that represents the bottom right corner row.
#
# @returns nothing.
def crop(self, tlx, tly, brx, bry):
assert(isinstance(tlx, type(0)) and isinstance(tly, type(1)) and isinstance(brx, type(1)) \
and isinstance(bry, type(1)))
assert(tlx <= brx)
assert(tly <= bry)
self._raw_frame = self._raw_frame[tly:bry + 1, tlx:brx + 1]
def resize_to_width(self, new_w, interp):
self._raw_frame = CaffeinatedAbstract.resize_width(self._raw_frame, new_w, interp)
#
# @brief Convert binary mask into just the mask of its boundary.
#
# @param[in] mask Input mask.
# @param[in] thickness Thickness of the border.
#
# @returns the boundary mask.
@staticmethod
def mask2border(mask, thickness):
# Find the contour of the mask
cnts = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
# Create a new image with just the contour
new_mask = np.zeros_like(mask)
new_mask = cv2.drawContours(new_mask, cnts, -1, 255, thickness)
return new_mask
#
# @brief Histogram equalisation (CLAHE).
#
# @param[in] im Input image.
# @param[in] clip_limit Contrast limit.
#
# @returns the equalised image.
@staticmethod
def clahe(im, clip_limit = 2.0):
lab = cv2.cvtColor(im, cv2.COLOR_BGR2Lab)
clahe_engine = cv2.createCLAHE(clipLimit = clip_limit, tileGridSize = (8, 8))
lab[:,:, 0] = clahe_engine.apply(lab[:,:, 0])
return cv2.cvtColor(lab, cv2.COLOR_Lab2BGR)
#
# @brief Flip left-right.
#
# @returns the flipped image.
@staticmethod
def fliplr(im):
return np.fliplr(im)
#
# @brief Flip up-down.
#
# @returns the flipped image.
@staticmethod
def flipud(im):
return np.flipud(im)
#
# @brief Thresholds a grayscale image.
#
# @param[in] img Input grayscale image.
# @param[in] level Greater than this level will be set to maxval. Default value is 127.
# @param[in] maxval Th values greater than level will be set to maxval.
# Default value is 255.
#
# @returns the thresholded image.
@staticmethod
def bin_thresh(im, level = 127, maxval = 255):
assert(len(im.shape) == 2 or (len(im.shape) == 3 and im.shape[2] == 1))
_, thresh = cv2.threshold(np.squeeze(im), level, maxval, cv2.THRESH_BINARY)
return thresh
#
# @brief Random crop, both dimensions should be equal or smaller than the original size.
# @details If a list is given, all the images must be larger than the desired new height and
# width.
#
# @param[in] img Ndarray with the image, shape (height, width) or
# (height, width, channels).
# @param[in] new_height Height of the cropped image.
# @param[in] new_width Width of the cropped image.
#
# @returns a cropped patch.
@staticmethod
def random_crop(im, new_height, new_width):
assert(isinstance(im, np.ndarray))
assert(new_height > 0 and new_height <= im.shape[0])
assert(new_width > 0 and new_width <= im.shape[1])
# Choose random coordinates for crop
height_border = im.shape[0] - new_height
width_border = im.shape[1] - new_width
top_y = random.randint(0, height_border - 1) if height_border > 0 else 0
top_x = random.randint(0, width_border - 1) if width_border > 0 else 0
# Crop image
new_im = im[top_y:top_y + new_height, top_x:top_x + new_width].copy()
assert(new_im.shape[0] == new_height)
assert(new_im.shape[1] == new_width)
return new_im
#
# @brief Performs a random crop. New height and width is decided independently, this
# function changes the form factor.
#
# @param[in] img Input image, numpy array.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns the new image.
@staticmethod
def random_crop_factor(im, delta):
assert(isinstance(im, np.ndarray))
min_scale = 1.0 - delta
max_scale = 1.0
new_scale = random.uniform(min_scale, max_scale)
new_height = int(round(im.shape[0] * new_scale))
new_width = int(round(im.shape[1] * new_scale))
new_im = CaffeinatedAbstract.random_crop(im, new_height, new_width)
return new_im
#
# @brief Performs a random crop. New height and width is decided independently, this
# function changes the form factor.
#
# @param[in] img Input image, numpy array.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns the new image.
@staticmethod
def random_crop_no_factor(im, delta):
assert(isinstance(im, np.ndarray))
min_scale = 1.0 - delta
max_scale = 1.0
new_height = int(round(im.shape[0] * random.uniform(min_scale, max_scale)))
new_width = int(round(im.shape[1] * random.uniform(min_scale, max_scale)))
new_im = CaffeinatedAbstract.random_crop(im, new_height, new_width)
return new_im
#
# @brief Random crop of a list of images. The crops will be performed in different locations
# for the different images of the list, but all the output images will have the same
# size.
#
# @param[in] im_list List of images to be cropped.
# @param[in] new_height Height of the cropped image.
# @param[in] new_width Width of the cropped image.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_list(im_list, new_height, new_width):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
new_im_list = [ CaffeinatedAbstract.random_crop(im, new_height, new_width) \
for im in im_list ]
return new_im_list
#
# @brief Random crop all the images of the list in the same coordinates for all of them.
# All the input images MUST have the same size.
#
# @param[in] im_list List of images to be cropped.
# @param[in] new_height Height of the cropped image.
# @param[in] new_width Width of the cropped image.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_same_coord_list(im_list, new_height, new_width):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
# Choose random coordinates for crop
height_border = im_list[0].shape[0] - new_height
width_border = im_list[0].shape[1] - new_width
top_y = random.randint(0, height_border - 1) if height_border > 0 else 0
top_x = random.randint(0, width_border - 1) if width_border > 0 else 0
# Crop all the images in the list
new_im_list = [ im[top_y:top_y + new_height, top_x:top_x + new_width].copy() \
for im in im_list ]
return new_im_list
#
# @brief Random crop all the images of the list in the same coordinates for all of them.
# All the images MUST have the same size. The output images will have the same form
# factor.
#
# @param[in] im_list List of images to be cropped.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_same_coord_list_factor(im_list, delta):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
# Get the dimensions of the new images
min_scale = 1.0 - delta
max_scale = 1.0
new_scale = random.uniform(min_scale, max_scale)
new_height = int(round(im.shape[0] * new_scale))
new_width = int(round(im.shape[1] * new_scale))
return CaffeinatedAbstract.random_crop_same_coord_list(im_list, new_height, new_width)
#
# @brief Random crop all the images of the list in the same coordinates for all of them.
# All the images MUST have the same size. The output images will not have the same
# form factor.
#
# @param[in] im_list List of images to be cropped.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_same_coord_list_no_factor(im_list, delta):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
# Get the dimensions of the new images
min_scale = 1.0 - delta
max_scale = 1.0
new_height = int(round(im_list[0].shape[0] * random.uniform(min_scale, max_scale)))
new_width = int(round(im_list[0].shape[1] * random.uniform(min_scale, max_scale)))
return CaffeinatedAbstract.random_crop_same_coord_list(im_list, new_height, new_width)
#
# @brief Scale an image keeping original size, that is, the output image will have the
# size of the input.
#
# @details If the scale factor is smaller than 1.0, the output image will be padded.
# Otherwise it will be cropped.
#
# @param[in] im Input image or list of images.
# @param[in] scale_factor If 1.0, the image stays as it is.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
# @param[in] boder_value Border value. Used when the image is downsized and padded.
# @param[in] clip_sides List of sides to crop out. Used only in case the scaling factor
# is lower than 1.0.
#
# @returns the scaled image.
@staticmethod
def scale_keeping_size(im, scale_factor, interp, border_value, clip_sides = None):
if clip_sides is None:
clip_sides = []
# Resize image to the desired new scale
new_im = CaffeinatedAbstract.resize_factor(im, scale_factor, interp)
# If the new image is larger, we crop it
if new_im.shape[0] > im.shape[0]:
new_im = CaffeinatedAbstract.crop_center(new_im, im.shape[1], im.shape[0])
# If the new image is smaller, we pad it
elif new_im.shape[0] < im.shape[0]:
padded = np.full_like(im, border_value)
start_row = (padded.shape[0] // 2) - (new_im.shape[0] // 2)
start_col = (padded.shape[1] // 2) - (new_im.shape[1] // 2)
end_row = start_row + new_im.shape[0]
end_col = start_col + new_im.shape[1]
padded[start_row:end_row, start_col:end_col] = new_im
new_im = padded
# Move the image to the desired sides (used to downscale tools and still keep them
# attached to the border of the image)
if 'top' in clip_sides:
M = np.float32([[1, 0, 0], [0, 1, -start_row]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
if 'left' in clip_sides:
M = np.float32([[1, 0, -start_col], [0, 1, 0]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
if 'bottom' in clip_sides:
M = np.float32([[1, 0, 0], [0, 1, start_row]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
if 'right' in clip_sides:
M = np.float32([[1, 0, start_col], [0, 1, 0]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
return new_im
#
# @brief Could flip the image or not.
#
# @param[in] im Image or list of images. If list, all images are either flipped or not.
#
# @returns the image (maybe flipped) maybe just the original one.
@staticmethod
def random_fliplr(im, not_used = None):
if common.randbin():
if isinstance(im, list):
return [ CaffeinatedAbstract.fliplr(i) for i in im ]
else:
return CaffeinatedAbstract.fliplr(im)
else:
return im
#
# @brief Could flip the image or not.
#
# @param[in] im Image or list of images. If list, all images are either flipped or not.
#
# @returns the image (maybe flipped) maybe just the original one.
@staticmethod
def random_flipud(im, not_used = None):
if common.randbin():
if isinstance(im, list):
return [ CaffeinatedAbstract.flipud(i) for i in im ]
else:
return CaffeinatedAbstract.flipud(im)
else:
return im
#
# @brief Add motion blur in a specific direction.
#
# @param[in] im Input image.
# @param[in] mask Pass a foreground mask if you wanna apply the motion just in the
# foreground.
# @param[in] apply_on Either 'bg', 'fg' or 'both'.
# @param[in] ks Size of the convolution kernel to be applied.
# @param[in] phi_deg Angle of rotation in degrees. Default is zero, so the motion will be
# horizontal.
#
# @returns the blured images.
@staticmethod
def directional_motion_blur(im, phi_deg = 0, ks = 15):
# Generating the kernel
kernel = np.zeros((ks, ks))
kernel[int((ks - 1) / 2),:] = np.ones(ks) / ks
# Rotate image if the user wants to simulate motion in a particular direction
# rot_im = CaffeinatedAbstract.rotate_bound(im, phi_deg, cv2.INTER_CUBIC)
# rot_im_blur = cv2.filter2D(rot_im, -1, kernel)
# new_im = CaffeinatedAbstract.rotate_bound(rot_im_blur, -phi_deg, cv2.INTER_CUBIC)
# tly = (new_im.shape[0] - im.shape[0]) // 2
# tlx = (new_im.shape[1] - im.shape[1]) // 2
# new_im = new_im[tly:tly + im.shape[0], tlx:tlx + im.shape[1]]
# FIXME: We keep just horizontal motion to investigate drop in performance
new_im = cv2.filter2D(im, -1, kernel)
return new_im
#
# @brief Random motion blur. Both foreground and background images must have the same size.
#
# @param[in] im Input image.
# @param[in] mask Mask of the foreground object that will appear blurred within the
# image.
# @param[in] rho Magnitude in pixels of the foreground motion vector.
# @param[in] phi_deg Angle in degrees of the motion vector.
# @param[in] interlaced Random interlacing will be added. Some lines of the foreground will
# move and others will not.
# @param[in] alpha Weight for the weighted sum. Default value is 0.5.
#
# @returns the blurred image.
@staticmethod
def weighted_sum_motion_blur(im, mask, rho, phi_deg, interlaced = False,
alpha = 0.5):
assert(im.shape[0] == mask.shape[0])
assert(im.shape[1] == mask.shape[1])
# Compute random motion vector
phi = common.deg_to_rad(phi_deg)
tx = rho * np.cos(phi)
ty = rho * np.sin(phi)
# Translation matrix
trans_mat = np.eye(3)
trans_mat[0, 2] = tx
trans_mat[1, 2] = ty
mat = trans_mat[:2, :3]
# Warp current image and mask according to the motion vector
im_warped = cv2.warpAffine(im, mat, (im.shape[1], im.shape[0]), flags = cv2.INTER_CUBIC)
mask_warped = cv2.warpAffine(mask, mat, (im.shape[1], im.shape[0]),
flags = cv2.INTER_NEAREST)
# Interlacing
if interlaced:
mask_warped_orig = mask_warped.copy()
lines_with_mask = np.unique(np.nonzero(mask_warped)[0]).tolist()
if lines_with_mask:
num_lines_to_remove = np.random.randint(len(lines_with_mask))
random.shuffle(lines_with_mask)
lines_with_mask = lines_with_mask[:num_lines_to_remove]
for i in lines_with_mask:
mask_warped[i,:] = 0
# Combine both images
new_im = im.copy()
new_im[mask_warped > 0] = np.round(
alpha * im[mask_warped > 0] + (1. - alpha) * im_warped[mask_warped > 0]
).astype(np.uint8)
# Blur if interlaced
if interlaced:
ksize = 3
blurred = cv2.GaussianBlur(new_im, (ksize, ksize), 0)
new_im[mask_warped_orig > 0] = blurred[mask_warped_orig > 0]
return new_im
#
# @brief Adds or subtracts intensity in different parts of the image using Perlin noise.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_local_brightness_augmentation(im, intensity_start = 50., intensity_stop = 200.,
intensity_step = 50., shape_start = 1., shape_stop = 5., shape_step = 1.):
# Generate random illumination change range
intensity_options = np.arange(intensity_start, intensity_stop + intensity_step, intensity_step)
change_choice = np.random.choice(intensity_options)
# Generate Perlin blob size, larger numbers mean smaller blobs
shape_options = np.arange(shape_start, shape_stop + shape_step, shape_step)
shape_choice = np.random.choice(shape_options)
# Generate Perlin additive noise mask
pn = perlin2d_smooth(im.shape[0], im.shape[1], shape_choice) * change_choice \
- .5 * change_choice
pn = np.dstack((pn, pn, pn))
# Modify the image: HSV option
# hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV).astype(np.float64)
# hsv[:, :, 2] = np.round(np.clip(hsv[:, :, 2] + pn, 0, 255))
# augmented = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
# Additive value on BGR
augmented = np.round(np.clip(im.astype(np.float64) + pn, 0., 255.)).astype(np.uint8)
return augmented
#
# @brief Adds or subtracts intensity in different parts of the image using Perlin noise.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_local_contrast_augmentation(im, shape_start = 1., shape_stop = 5., shape_step = 1.):
# Choose minimum and maximum contrast randomly
contrast_min = random.choice([0.5, 0.6, 0.7, 0.8])
contrast_max = random.choice([1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0])
# Generate Perlin blob size, larger numbers mean smaller blobs
shape_options = np.arange(shape_start, shape_stop + shape_step, shape_step)
shape_choice = np.random.choice(shape_options)
# Generate Perlin additive noise mask
pn = perlin2d_smooth(im.shape[0], im.shape[1], shape_choice, minval = contrast_min,
maxval = contrast_max)
pn = np.dstack((pn, pn, pn))
# Modify the image
augmented = np.round(np.clip(np.multiply(im.astype(np.float64), pn), 0, 255)).astype(np.uint8)
return augmented
#
# @brief Global (as in same additive value added to all pixels) brightness augmentation.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_global_brightness_augmentation(im, intensity_start = -50, intensity_stop = 50,
intensity_step = 10):
# Generate random illumination change
intensity_options = np.arange(intensity_start, intensity_stop + intensity_step,
intensity_step)
change_choice = np.random.choice(intensity_options)
# Additive change on Value of HSV
# hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV).astype(np.float64)
# hsv[:, :, 2] = np.round(np.clip(hsv[:, :, 2] + change_choice, 0., 255.))
# hsv = hsv.astype(np.uint8)
# augmented = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# Additive change on all channels of BGR
augmented = np.clip(im.astype(np.float64) + change_choice, 0, 255).astype(np.uint8)
return augmented
#
# @brief Global contrast (multiplicative) augmentation.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_global_contrast_augmentation(im):
contrast_choice = random.choice([0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0])
augmented = np.round(np.clip(np.multiply(im.astype(np.float64),
contrast_choice), 0, 255)).astype(np.uint8)
return augmented
#
# @brief Bernoulli motion blur.
#
# @param[in] im Image or list of images.
# @param[in] mask Mask of moving object.
# @param[in] max_mag Maximum amount of pixels of displacement.
# @param[in] max_ang Maximum angle of the motion vector. Default is 360, i.e. can move in any
# direction.
#
# @returns the images with motion blur with a probability p.
@staticmethod
def random_weighted_sum_motion_blur(im, mask, max_mag = 32, max_ang = 360):
rho = np.random.randint(max_mag)
phi_deg = np.random.randint(max_ang)
interlaced = common.randbin()
if isinstance(im, list):
return [ CaffeinatedAbstract.weighted_sum_motion_blur(i, m, rho, phi_deg,
interlaced) for i, m in zip(im, mask) ]
else:
return CaffeinatedAbstract.weighted_sum_motion_blur(im, mask, rho, phi_deg,
interlaced)
#
# @brief Converts an image from BGR to BRG.
#
# @param[in] im BGR image.
#
# @returns an image converted to BRG.
@staticmethod
def bgr2brg(im):
return im[..., [0, 2, 1]]
#
# @brief Bernoulli BGR to BRG swapping.
#
# @param[in] im Image or list of images.
#
# @returns the image with the green-red channels swapped with a probability of 0.5.
@staticmethod
def random_brg(im):
if common.randbin():
if isinstance(im, list):
return [ CaffeinatedAbstract.bgr2brg(i) for i in im ]
else:
return CaffeinatedAbstract.bgr2brg(im)
else:
return im
#
# @brief Rotates the image over itself a random number of degrees.
#
# @param[in] im Input image, numpy array.
# @param[in] deg_delta The range of possible rotation is +- deg_delta.
# @param[in] interp Interpolation method: lanczos, linear, cubic, nearest.
#
# @returns the rotated image.
@staticmethod
def random_rotation(im, deg_delta, interp):
max_ang = deg_delta
min_ang = -1. * max_ang
ang = random.uniform(min_ang, max_ang)
new_im = None
if isinstance(im, list):
new_im = [ CaffeinatedAbstract.rotate_and_crop(i, ang, interp) for i in im ]
else:
new_im = CaffeinatedAbstract.rotate_and_crop(im, ang, interp)
return new_im
#
# @brief Resizes an imaged to the desired width while keeping proportions.
#
# @param[in] im Image to be resized.
# @param[in] new_w New width.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize_width(im, new_w, interp = None):
assert(im.dtype == np.uint8)
# If no interpolation method is chosen we select the most convenient depending on whether
# the user is upsampling or downsampling the image
if interp is None:
interp = cv2.INTER_AREA if new_w < im.shape[1] else cv2.INTER_LANCZOS4
ratio = float(im.shape[0]) / float(im.shape[1])
new_h = int(round(new_w * ratio))
new_im = cv2.resize(im, (new_w, new_h), interpolation=interp)
return new_im
#
# @brief Resizes an imaged to the desired width while keeping proportions.
#
# @param[in] im Image to be resized.
# @param[in] new_h New height.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize_height(im, new_h, interp):
assert(im.dtype == np.uint8)
ratio = float(im.shape[0]) / float(im.shape[1])
new_w = int(round(new_h / ratio))
# imethod = PIL_interp_method[interp]
# new_im = np.array(PIL.Image.fromarray(im).resize((new_w, new_h), imethod))
new_im = cv2.resize(im, (new_w, new_h), interpolation=interp)
return new_im
#
# @brief Scales an image to a desired factor of the original one.
#
# @param[in] im Image to be resized.
# @param[in] scale_factor Factor to scale up or down the image.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize_factor(im, scale_factor, interp):
new_w = int(round(im.shape[1] * scale_factor))
return CaffeinatedAbstract.resize_width(im, new_w, interp)
#
# @brief Scales an image to a desired factor of the original one.
#
# @param[in] im Image to be resized.
# @param[in] new_w New width.
# @param[in] new_h New width.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize(im, new_w, new_h, interp):
# imethod = PIL_interp_method[interp]
# new_im = scipy.misc.imresize(im, (new_h, new_w), interp = interp).astype(im.dtype)
# return np.array(PIL.Image.fromarray(im).resize((new_w, new_h), imethod),
# dtype = im.dtype)
new_im = cv2.resize(im, (new_w, new_h), interpolation=interp)
return new_im
#
# @returns a crop of shape (new_h, new_w).
#
@staticmethod
def crop_center(im, new_w, new_h):
start_x = im.shape[1] // 2 - (new_w // 2)
start_y = im.shape[0] // 2 - (new_h // 2)
return im[start_y:start_y + new_h, start_x:start_x + new_w].copy()
#
# @brief Rotatation of an image with black bounds around it, as it would be
# expected. A positive rotation angle results in a clockwise rotation.
#
# @param[in] image Numpy ndarray.
# @param[in] angle Angle in degrees.
#
# @returns the rotated image.
@staticmethod
def rotate_bound(image, angle, interp):
# Grab the dimensions of the image and then determine the center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# Grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# Compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# Adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# Perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), flags = interp)
#
# brief Rotates an image over a centre point given and leaves the whole
# image inside. Clockwise rotation of the image.
#
# @param[in] im Numpy ndarray.
# @param[in] centre (x, y) in image coordinates.
# @param[in] angle Angle in degrees.
# @param[in] interp OpenCV interpolation method.
@staticmethod
def rotate_bound_centre(im, centre, deg, interp):
cm_x = centre[0]
cm_y = centre[1]
# Build the rotation matrix
rot_mat = cv2.getRotationMatrix2D((cm_y, cm_x), -deg, 1.0)
rot_mat_hom = np.zeros((3, 3))
rot_mat_hom[:2,:] = rot_mat
rot_mat_hom[2, 2] = 1
# Find the coordinates of the corners in the rotated image
h = im.shape[0]
w = im.shape[1]
tl = np.array([0, 0, 1]).reshape((3, 1))
tr = np.array([w - 1, 0, 1]).reshape((3, 1))
bl = np.array([0, h - 1, 1]).reshape((3, 1))
br = np.array([w - 1, h - 1, 1]).reshape((3, 1))
tl_rot = np.round(np.dot(rot_mat_hom, tl)).astype(np.int)
tr_rot = np.round(np.dot(rot_mat_hom, tr)).astype(np.int)
bl_rot = np.round(np.dot(rot_mat_hom, bl)).astype(np.int)
br_rot = np.round(np.dot(rot_mat_hom, br)).astype(np.int)
# Compute the size of the new image from the coordinates of the rotated one so that
# we add black bounds around the rotated one
min_x = min([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]])
max_x = max([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]])
min_y = min([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]])
max_y = max([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]])
new_w = max_x + 1 - min_x
new_h = max_y + 1 - min_y
# Correct the translation so that the rotated image lies inside the window
rot_mat[0, 2] -= min_x
rot_mat[1, 2] -= min_y
return cv2.warpAffine(im, rot_mat, (new_w[0], new_h[0]), flags = interp)
#
# @brief Clockwise rotation plus crop (so that there is no extra added black background).
#
# @details The crop is done based on a rectangle of maximal area inside the rotated region.
#
# @param[in] im Numpy ndarray image. Shape (h, w, 3) or (h, w).
# @param[in] ang Angle in degrees.
# @param[in] interp Interpolation method: lanczos, linear, cubic, nearest.
#
# @returns the rotated image.
@staticmethod
def rotate_and_crop(im, ang, interp):
# Rotate image
rotated = CaffeinatedAbstract.rotate_bound(im, ang, interp)
# Calculate cropping area
wr, hr = geometry.rotated_rect_with_max_area(im.shape[1],
im.shape[0], common.deg_to_rad(ang))
wr = int(np.floor(wr))
hr = int(np.floor(hr))
# Centre crop
rotated = CaffeinatedAbstract.crop_center(rotated, wr, hr)
return rotated
#
# @brief This method deinterlaces an image using ffmpeg.
#
# @param[in] im Numpy ndarray image. Shape (h, w, 3) or (h, w).
#
# @returns the deinterlaced image.
@staticmethod
def deinterlace(im, ext = '.png'):
input_path = tempfile.gettempdir() + '/' + common.gen_rand_str() + ext
output_path = tempfile.gettempdir() + '/' + common.gen_rand_str() + ext
# Save image in a temporary folder
cv2.imwrite(input_path, im)
# Deinterlace using ffmpeg
common.shell('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path)
# Read deinterlaced image
dei = cv2.imread(output_path)
# Remove image from temporary folder
common.rm(input_path)
common.rm(output_path)
return dei
@staticmethod
def gaussian_noise(im, mean=0, std=20):
noise = np.random.normal(mean, std, im.shape)
return np.round(np.clip(im.astype(np.float64) + noise, 0, 255)).astype(np.uint8)
#
# @rteurns a gamma corrected image.
#
@staticmethod
def adjust_gamma(im, gamma = 1.0):
inv_gamma = 1.0 / gamma
table = np.array([((i / 255.0) ** inv_gamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(im, table)
#
# @brief Draws an horizontal gradient image.
#
# @returns the image of the gradient.
@staticmethod
def draw_grad_lr(height, width, left_colour, right_colour):
return (np.ones((height, width)) * np.linspace(left_colour, right_colour,
width)).astype(np.uint8)
#
# @brief Draws an horizontal gradient image.
#
# @returns the image of the gradient.
@staticmethod
def draw_grad_ud(height, width, left_colour, right_colour):
return (np.ones((height, width)) * np.linspace(left_colour, right_colour,
width)).astype(np.uint8).T
#
# @brief FIXME: does not work properly when image is dark
@staticmethod
def detect_endoscopic_circle_bbox(im):
# Edge detection
max_black_intensity = 10
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
kernel = np.ones((3, 3), np.uint8)
dilation = cv2.dilate(gray, kernel, iterations = 1)
_, thresh = cv2.threshold(dilation, max_black_intensity, 255, cv2.THRESH_BINARY)
# Detect contour of largest area
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = max(contours, key = cv2.contourArea)
((xc, yc), radius) = cv2.minEnclosingCircle(cnt)
x = xc - radius
y = yc - radius
w = 2 * radius
h = 2 * radius
# x, y, w, h = cv2.boundingRect(cnt)
return int(x), int(y), int(w), int(h)
@staticmethod
def crop_endoscopic_circle(im):
# Detect endoscopic circle
has_circle = True
# TODO
if not has_circle:
return im
x, y, w, h = CaffeinatedAbstract.detect_endoscopic_circle_bbox(im)
cropped = im[y:y + h, x:x + w].copy()
return cropped
#
# @brief Function to add specular reflections to an image.
#
# TODO
#
#
@staticmethod
def add_specular_noise():
pass
#
# @brief Skeletonisation of a binary image [0, 255].
#
# @param[in] im Input binary image. Binary means either some values are zero and some
# others different from zero. Different from 0 can be 1 and 255.
#
# @returns a binary image (0, 255) with the skeleton of the image.
@staticmethod
def skeleton(im):
assert(len(im.shape) == 2)
sk = skimage.morphology.skeletonize_3d(im.astype(bool))
return sk
#
# @brief Pads and image with extra pixels according to a newly specified size.
#
# @param[in] tlx Integer that represents the top left corner column.
# @param[in] tly Integer that represents the top left corner row.
# @param[in] brx Integer that represents the bottom right corner column.
# @param[in] bry Integer that represents the bottom right corner row.
# @param[in] width Width of the new image.
# @param[in] height Height of the new image.
# @param[in] intensity Integer of the padding pixels.
#
# @returns nothing.
def pad(self, tlx, tly, brx, bry, width, height, intensity):
assert(isinstance(tlx, type(0)) and isinstance(tly, type(1)) and isinstance(brx, type(1)) \
and isinstance(bry, type(1)))
assert(tlx <= brx)
assert(tly <= bry)
assert(width >= self.width)
assert(height >= self.height)
assert(isinstance(intensity, type(1)))
# Create image of the new size
new_raw_frame = None
new_pixel = None
if len(self._raw_frame.shape) == 2:
new_raw_frame = np.empty((height, width), dtype=self._raw_frame.dtype)
new_pixel = intensity
elif len(self._raw_frame.shape) == 3:
new_raw_frame = np.empty((height, width, self._raw_frame.shape[2]),
dtype=self._raw_frame.dtype)
new_pixel = np.empty((self._raw_frame.shape[2],), dtype=self._raw_frame.dtype)
new_pixel.fill(intensity)
else:
raise ValueError('[image.CaffeinatedAbstract.pad] Error, image dimension ' \
+ str(self._raw_frame.shape) + ' not supported.')
new_raw_frame[:,:] = new_pixel
# Insert the previous image in the right place
new_raw_frame[tly:bry + 1, tlx:brx + 1] = self._raw_frame
self._raw_frame = new_raw_frame
#
# @brief Converts the image into a distance transform (L2 norm) to the edges.
#
# @param[in] mask_size Size of the Sobel filter kernel.
#
# @returns nothing.
def shape_transform(self, mask_size):
assert(isinstance(mask_size, type(0)))
# Convert to grayscale
# gray = cv2.cvtColor(self._raw_frame, cv2.COLOR_BGR2GRAY)
# Sobel filter
sobel_x_64f = np.absolute(cv2.Sobel(self._raw_frame, cv2.CV_64F, 1, 0, ksize=mask_size))
sobel_y_64f = np.absolute(cv2.Sobel(self._raw_frame, cv2.CV_64F, 0, 1, ksize=mask_size))
sobel_64f = (sobel_x_64f + sobel_y_64f)
scaled_sobel = np.uint8(255 * sobel_64f / np.max(sobel_64f))
# Dilate borders
kernel = np.ones((mask_size, mask_size), np.uint8)
dilated = cv2.dilate(scaled_sobel, kernel, iterations=1)
# Threshold
_, thresh = cv2.threshold(dilated, 1, 255, cv2.THRESH_BINARY)
# Distance transform
dist = 255 - (cv2.distanceTransform(255 - thresh, cv2.DIST_L2, maskSize=0))
# Remove backbround
dist[self._raw_frame == 0] = 0
self._raw_frame = dist
#
# @brief Converts image to single channel.
#
# @returns nothing.
def convert_to_single_chan(self):
assert(len(self._raw_frame.shape) == 3)
# Sanity check: assert that all the pixels of the image have the same intensity value in all the
# channels
for channel in range(1, self._raw_frame.shape[2]):
if not np.array_equal(self._raw_frame[:,:, channel], self._raw_frame[:,:, 0]):
raise RuntimeError('[CaffeinatedAbstract] Error, the image ' + self._name + ' has ' \
+ 'channels that are different from each other so it is not clear ' \
+ 'how to convert it to a proper single channel image.')
self._raw_frame = self._raw_frame[:,:, 0]
#
# @brief Changes the intensity of all the pixels in all the channels to zero.
#
# @returns nothing.
def convert_to_black(self):
self._raw_frame.fill(0)
#
# @brief Filter image with ground truth label, background pixels on the ground truth will be blacked.
#
# @param[in] caffe_label CaffeinatedLabel.
#
def filter_with_gt(self, caffe_label):
self._raw_frame[caffe_label.raw == 0] = 0
#
# @brief Builds an object of type CaffeinatedAbstract from an image file.
#
# @param[in] path Path to the image file.
#
@classmethod
def from_file(cls, path, *args):
# return cls(cv2.imread(path, cv2.IMREAD_COLOR), *args)
return cls(cv2.imread(path, cv2.IMREAD_UNCHANGED), *args)
#
# @returns the height of the image.
#
@property
def height(self):
return self._raw_frame.shape[0]
#
# @returns the width of the image.
#
@property
def width(self):
return self._raw_frame.shape[1]
#
# @returns the name of the image.
#
@property
def name(self):
return self._name
#
# @returns the raw internal image.
#
@property
def raw(self):
return self._raw_frame
#
# @returns the data type.
#
@property
def dtype(self):
return self._raw_frame.dtype
#
# @class CaffeinatedImage represents an image that will be used by Caffe so this class should
# provide methods to adapt the original image to the type of input
# Caffe is expecting.
#
class CaffeinatedImage(CaffeinatedAbstract):
#
# @brief Saves the colour image as an attribute of the class.
#
# @param[in] raw_frame Numpy array with a image, shape (h, w) or (h, w, c).
# @param[in] name Id of the image, either the name or the frame number, it will be converted to
# str.
# @param[in] label Id of the class to whom the image belongs. Only used in case the image is used
# for classification purposes. Default value is None.
#
def __init__(self, raw_frame, name, label = None):
# Assert that the image is multi-channel
dim = len(raw_frame.shape)
if dim < 2:
raise RuntimeError('[CaffeinatedImage.__init__], the image provided has [' + \
str(dim) + '] dimensions, only (H x W x C) and (H x W) are supported.')
# Assert that the type of label is correct (i.e. integer) when it is not None
if label is not None:
assert(isinstance(label, type(0)))
self._label = label
# Call CaffeinatedAbstract constructor
super(CaffeinatedImage, self).__init__(raw_frame if dim > 2 else np.expand_dims(raw_frame, axis = 2),
name)
#
# @brief Builds an object of type CaffeinatedImage from file.
#
# @details Only supports 3-channel colour images. It will raise errors for images with a different
# number of channels.
#
# @param[in] path Path to the image file.
# @classmethod
# def from_file(cls, path, name):
# return cls(cv2.imread(path, cv2.IMREAD_UNCHANGED), name)
#
# @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and
# subtracting the training mean.
#
# @details The mean needs to be subtracted because there is no transform_param section in the input
# layer of the test network.
#
# @param[in] mean_values Numpy ndarray with the per channel mean of the training set.
# Shape (channels,).
#
# @returns an image ready to be processed by Caffe.
def convert_to_caffe_input(self, mean_values):
# Sanity check: the mean values should be equal to the number of channels of the input image
dim = len(self._raw_frame.shape)
no_mean_values = mean_values.shape[0]
if dim < 3: # 1D or 2D images should have only one channel mean
if no_mean_values != 1:
raise ValueError('[convert_to_caffe_input] Error, [' + str(no_mean_values) + '] mean ' + \
' values provided, but the image is only 1D or 2D, so only one mean value is required.')
elif dim == 3:
channels = self._raw_frame.shape[-1]
if channels != no_mean_values:
raise ValueError('[convert_to_caffe_input] Error, [' + str(no_mean_values) \
+ '] mean values have been provided but the given image has [' + str(channels) \
+ '] channels.')
else:
raise ValueError('[convert_to_caffe_input] Error, high dimensional image not supported.')
return np.transpose(self._raw_frame.astype(np.float32) - mean_values, (2, 0, 1))
#
# @brief Resize the image to the desired new width and height.
#
# @param[in] new_h New height.
# @param[in] new_w New width.
#
# @returns nothing.
def resize(self, new_h, new_w):
self._raw_frame = cv2.resize(self._raw_frame, (new_w, new_h))
#
# @brief Resize the image and keep the original aspect ratio, padding if required.
#
# @param[in] new_h Height of the new image.
# @param[in] new_w Width of the new image.
#
# @returns nothing.
def resize_keeping_aspect(self, new_h, new_w):
# Store aspect ratio, width and height about the previous dimensions
w = self.width
h = self.height
ar = float(w) / float(h)
# Create new frame respecting the desired new dimensions
new_frame = np.zeros((new_h, new_w, self._raw_frame.shape[2]), self._raw_frame.dtype)
# We scale the larger size of the image and adapt the other one to the aspect ratio
temp_w = None
temp_h = None
y_start = 0
x_start = 0
if w >= h:
temp_w = new_w
temp_h = int(temp_w / ar)
y_start = int((new_h - temp_h) / 2.0)
else:
temp_h = new_h
temp_w = int(temp_h * ar)
x_start = int((new_w - temp_w) / 2.0)
# We add black padding if there is free space
new_frame[y_start:temp_h + y_start, x_start:temp_w + x_start] = cv2.resize(self._raw_frame,
(temp_w, temp_h))
# Copy the final image to the internal buffer that will be displayed
self._raw_frame = new_frame
#
# @brief Converts BGR image to a Caffe datum with shape (C x H x W).
#
# @returns the Caffe datum serialised as a string.
def serialise_to_string(self, jpeg_quality=100):
assert(self._raw_frame.dtype == np.uint8)
import caffe
# caffe_image = self._raw_frame.astype(np.float32)
# Convert image to Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width, datum.channels = self._raw_frame.shape
# datum.data = caffe_image.tostring()
flags = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality]
datum.data = cv2.imencode('.jpg', self._raw_frame, flags)[1].tostring()
# If the image has a label, it must be an integer
if self._label is not None:
assert(isinstance(self._label, type(0)))
datum.label = self._label
return datum.SerializeToString()
#
# @brief Convert image from uint16 to uint8.
#
def uint16_to_uint8(self):
self._raw_frame = np.round((self._raw_frame.astype(np.float32) / 65535.0) * 255.0).astype(np.uint8)
#
# @brief Add Gaussian noise to image.
#
# @param[in] mean Default value is 0.
# @param[in] std Default value is 10.
#
# @returns nothing.
def add_gaussian_noise(self, mean = 0, std = 10):
# Get image dimensions
row, col, ch = self._raw_frame.shape
# Add Gaussian noise to the internal image
gauss = np.random.normal(mean, std, (row, col, ch)).reshape(row, col, ch)
# Convert image to float, add Gaussian noise and convert back to uint8
self._raw_frame = np.round(self._raw_frame.astype(np.float64) + gauss).astype(np.uint8)
#
# @brief Converts a green screen image with tools to grayscale
# adding a bit of noise so that BGR are not kept equal.
#
@classmethod
def gray_tools(cls, im, noise_delta=3):
assert(isinstance(im, np.ndarray))
new_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
new_im = cv2.cvtColor(new_im, cv2.COLOR_GRAY2BGR)
noise = np.random.randint(-noise_delta,
noise_delta + 1, size=new_im.shape)
new_im = np.clip(new_im + noise, 0, 255).astype(np.uint8)
return new_im
#
# @brief Convert it to a noisy grayscale image.
#
def noisy_gray(self, noise_delta=3):
self._raw_frame = CaffeinatedImage.gray_tools(self._raw_frame, noise_delta)
def random_crop(self, height, width):
self._raw_frame = CaffeinatedAbstract.random_crop(self._raw_frame, height, width)
@property
def shape(self):
return self._raw_frame.shape
#
# @class Caffeinated8UC3Image represents a colour (H x W x 3) CaffeinatedImage.
#
class Caffeinated8UC3Image(CaffeinatedImage):
#
# @brief Saves the colour image as an attribute of the class.
#
# @param[in] frame_bgr Numpy array with a BGR image, shape (h, w, c).
def __init__(self, frame_bgr, name):
# Check that it is a 3-channel BGR image
EXPECTED_DIM = 3
EXPECTED_CHANNELS = 3
if len(frame_bgr.shape) != EXPECTED_DIM or frame_bgr.shape[EXPECTED_DIM - 1] != EXPECTED_CHANNELS:
raise RuntimeError('[Caffeinated8UC3Image] Error, the image provided has a shape of ' + \
str(frame_bgr.shape) + '. We expect an image of shape (H x W x ' + \
str(EXPECTED_CHANNELS) + ').')
# Check that the image is uint8
EXPECTED_TYPE = np.uint8
if frame_bgr.dtype != EXPECTED_TYPE:
raise RuntimeError('[Caffeinated8UC3Image] Error, the image provided has a type of ' + \
str(frame_bgr.dtype) + ' and we expect ' + str(EXPECTED_TYPE) + '.')
super(self.__class__, self).__init__(frame_bgr, name)
#
# @class CaffeinatedLabel represents a segmentation label that will be used by Caffe so this
# class should provide methods to adapt the original image to the type of
# input Caffe is expecting.
#
# @details This class does not support labels that are not grayscale or colour images, that is,
# the images provided must be (H x W) or (H x W x C). In case that you provide a label
# with shape (H x W x C) this class will make sure that all the channels C have the same
# values. This is because a priori it does not make any sense for a pixel to belong to
# different classes.
class CaffeinatedLabel(CaffeinatedAbstract):
#
# @brief Stores the label and checks that both dimensions and type are correct for a label.
# @details To make a safe conversion to single channel this method will check that all the
# pixels of the image have exactly the same intensity value in all the BGR channels.
# If this does not happen an exception will be raised.
#
# @param[in] label_image Single channel OpenCV/Numpy image. Shape (H x W) or (H x W x C).
# @param[in] name Name of the label, usually stores the id of the related image.
# @param[in] classes Integer that represents the maximum number of classes in the labels,
# used for both validation purposes and to convert back/forth to Caffe
# input.
# @param[in] class_map Integer (pixel intensity) -> Integer (class, [0, K - 1]),
# where K is the maximum number of classes.
# @param[in] proba_map Probability maps for all the classes, shape (c, h, w).
#
def __init__(self, label_image, name, classes, class_map, proba_map = None):
# This is 2 because we expect the image to be of shape (H x W) and the intensity of the
# pixel to indicate the class that the pixel belongs to
EXPECTED_DIM = 2
EXPECTED_LABEL_TYPE = np.uint8
# Store the maximum number of classes after validating that it is in the range [2, 256]
assert(isinstance(classes, type(0)) and classes >= 2 and classes <= 256)
self._classes = classes
# Store the dictionary for class mappings after validating it
classes_present = [False] * classes
assert(len(class_map.keys()) == classes)
for k, v in class_map.items():
assert(isinstance(k, type(0)))
assert(isinstance(v, type(0)))
assert(k >= 0 and k <= 255)
assert(v >= 0 and v < self._classes)
classes_present[v] = True
assert(all(classes_present))
self._class_map = class_map
# Sanity check: labels that are neither (H x W) nor (H x W x C) are not supported
dim = len(label_image.shape)
if not (dim == 2 or dim == 3):
raise RuntimeError('[CaffeinatedLabel] Error, the label provided has a dimension of ' + \
str(dim) + ', which is not supported. Only (H x W) and (H x W x C) are supported.')
# Sanity check: if the label provided is multiple-channel, assert that all the pixels of the image
# have the same intensity value in all the channels
if dim > EXPECTED_DIM:
for channel in range(1, label_image.shape[2]):
if not np.array_equal(label_image[:,:, channel], label_image[:,:, 0]):
raise RuntimeError('[CaffeinatedLabel] Error, the label provided in ' + name + ' has channels that are ' + \
'different from each other so it is not clear how to convert it to a proper ' + \
'single channel label in which the intensity defines the pixel class.')
# Sanity check: the image must be uint8, this essentially means that there is a maximum of 256 labels
if label_image.dtype != EXPECTED_LABEL_TYPE:
raise RuntimeError('[CaffeinatedLabel] Error, a label must be ' + str(EXPECTED_LABEL_TYPE) + '.')
# If the image has several channels, we just get one (we already know that all the channels have the
# same values
if dim == EXPECTED_DIM:
raw_label = label_image
else:
raw_label = label_image[:,:, 0]
# Assert that there are no more unique labels than classes
unique_classes = np.unique(raw_label)
if unique_classes.shape[0] > self._classes:
raise ValueError('[CaffeinatedLabel] Error, label ' + str(name) + ' is said to have ' \
+ str(self._classes) + ' classes but there are more unique values in it, exactly: ' \
+ str(unique_classes))
# Assert thate the intensities in the label are all present in the class_map dictionary
for i in unique_classes:
if not i in self._class_map:
raise ValueError('[CaffeinatedLabel] Error, label ' + str(name) + ' has a pixel with ' \
+ 'intensity ' + str(i) + ' but this intensity is not present in the class map.')
# Store probability map if provided
if proba_map is not None:
assert(len(proba_map.shape) == 3)
assert(proba_map.shape[0] == classes)
assert(proba_map.shape[1] == raw_label.shape[0])
assert(proba_map.shape[2] == raw_label.shape[1])
self._predicted_map = proba_map
# Call CaffeinatedAbstract constructor
super(CaffeinatedLabel, self).__init__(raw_label, name)
#
# @brief Builds an object of type CaffeinatedLabel from an image file.
#
# @param[in] fmaps array_like, shape (c, h, w).
#
# @param[in] classes Integer that represents the maximum number of classes in the labels, used for
# both validation purposes and to convert back/forth to Caffe input.
#
# @param[in] class_map Integer (pixel intensity) -> Integer (class, [0, K - 1]), where K is the
# maximum number of classes.
#
@classmethod
def from_network_output(cls, fmaps, name, classes, class_map):
label_image = fmaps.argmax(axis=0).astype(np.uint8)
for k, v in class_map.items():
label_image[label_image == v] = k
return cls(label_image, name, classes, class_map, fmaps)
#
# @brief Convert label to CaffeinatedImage for displaying purposes.
#
# @param[in] cn Channels of the new image. The labels will be replicated across channels.
#
# @returns the label converted into a cn-channel CaffeinatedImage.
def to_image(self, cn = 3):
new_image = np.ndarray((self._raw_frame.shape[0], self._raw_frame.shape[1], cn),
self._raw_frame.dtype)
for k in range(cn):
new_image[:,:, k] = self._raw_frame
return CaffeinatedImage(new_image, self._name)
#
# @brief Converts the label to a Caffe datum.
#
# @returns a Caffe datum label serialised to string.
def serialise_to_string(self):
# Sanity check: assert that the type of the label is correct
import caffe
assert(self._raw_frame.dtype == np.uint8)
# Create Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width = self._raw_frame.shape
# if self._classes == 2:
# Convert (h, w) -> (1, h, w)
# caffe_label = np.expand_dims(self._raw_frame, axis = 0)
# caffe_label = self._raw_frame
# else:
# Create ndarray of binary maps
fmaps = np.zeros([self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]],
dtype = np.uint8)
# k is intensity
# v is the class number
for k, v in self._class_map.items():
fmaps[v, self._raw_frame == k] = 1
# if self._classes == 2:
# Binary case, only one feature map
# datum.channels = 1
# caffe_label = np.expand_dims(fmaps[1], axis = 0)
# else:
# Multi-class case, one feature map per class
# datum.channels = self._classes
# caffe_label = fmaps
# Multi-class case, one feature map per class
datum.channels = self._classes
caffe_label = fmaps
# Convert label[s] to string
datum.data = caffe_label.tostring()
return datum.SerializeToString()
#
# @brief Binarises the label. It will be thresholded so that only 0/maxval values are present.
#
# @param[in] thresh Values greater or equal than 'thresh' will be transformed to 'maxval'.
# @param[in] maxval Integer that will be given to those pixels higher or equal than 'thresh'.
#
# @returns nothing.
def binarise(self, thresh = 10, maxval = 1):
_, self._raw_frame = cv2.threshold(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)
#
# @brief Convert intensity-based labels into proper class-index labels.
#
# @returns an array_like, shape (h, w).
def to_classes(self):
class_index_frame = self._raw_frame.copy()
for k, v in self._class_map.items():
class_index_frame[self._raw_frame == k] = v
return class_index_frame
#
# @brief Maps between intensities [0, 255] to classes [0, K] using the JSON info provided.
#
# @param[in] intensity Typically an integer [0, 255].
#
# @returns the class index of the givel pixel intensity according to the provided class map.
def map_intensity_to_class(self, intensity):
return self._class_map[intensity]
#
# @brief Maps between classes and JSON intensities.
#
# @param[in] class_id Id of the class whose intensity you want to retrieve.
#
# @returns the intensity corresponding to the given class.
def map_class_to_intensity(self, class_id):
return {v: k for k, v in self._class_map.items()}[class_id]
#
# @brief Retrieves a normalised probability map for a particular class.
#
# @param[in] class_id Id of the class whose probability map you want to retrieve.
#
# @returns an array_like probability map, shape (h, w).
def softmax_predicted_map(self, class_id):
assert(self._predicted_map)
pmap = np.exp(self._predicted_map - np.amax(self._predicted_map, axis = 0))
pmap /= np.sum(pmap, axis = 0)
return pmap[class_id, ...]
#
# @brief Converts all the feature maps to contour images.
#
# @param[in] pixel_width Thickness of the border in pixels.
#
# @returns nothing.
def convert_to_contours(self, pixel_width = 5):
new_raw_frame = np.zeros_like(self._raw_frame)
# If self._predicted_map does not exist, we create it, shape (c, h, w)
if not self._predicted_map:
self._predicted_map = np.zeros((self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]),
dtype=np.uint8)
for k in range(self._classes):
self._predicted_map[k,:,:][self._raw_frame == self.map_class_to_intensity(k)] = 1
# Draw contours in the new raw frame
for k in range(self._classes):
(_, cnts, _) = cv2.findContours(self._predicted_map[k], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# cv2.drawContours(new_raw_frame, [c], -1, (self.map_class_to_intensity(k)), pixel_width)
cv2.drawContours(new_raw_frame, [c], -1, self.map_class_to_intensity(k), pixel_width)
self._raw_frame = new_raw_frame
def random_crop(self, height, width):
self._raw_frame = CaffeinatedAbstract.random_crop(self._raw_frame, height, width)
#
# @brief Calculates the number of classes in the frame, that is the quantity of unique labels.
#
# @returns an integer that indicates the number of different pixel labels.
@property
def classes(self):
return self._classes
# return np.unique(self._raw_frame).shape[0]
#
# @returns the unnormalised predicted map for all the classes (class_id, height, width).
#
@property
def predicted_map(self):
return self._predicted_map
@property
def class_map(self):
return self._class_map
#
# @class CaffeinatedBinaryLabel behaves as a CaffeinatedLabel but makes sure that the label images provided
# only contain two different types or labels. Furthermore, it makes them 0's
# and 1's (np.uint8) in case that they are different from these two values.
# Say that you provide an image with 0's and 255's as typical ground truth
# images, this class will make it 0's and 1's.
#
class CaffeinatedBinaryLabel(CaffeinatedLabel):
#
# @brief Stores the label and checks that both, dimensions and type, are correct for a label.
# @details If the label provided is not single channel, the label is converted to grayscale with
# the OpenCV cvtColour function. It
#
# @param[in] label_image Single channel OpenCV/Numpy image. Shape (H x W) or (H x W x C).
# @param[in] name String that identifies the label, usually a frame number.
# @param[in] thresh Values greater or equal than 'thresh' will be transformed to 'maxval'.
# @param[in] maxval Integer that will be given to those pixels higher or equal than 'thresh'.
#
# @returns nothing.
def __init__(self, label_image, name, thresh = 10, maxval = 1):
# Call CaffeinatedLabel constructor
super(self.__class__, self).__init__(label_image, name)
# Sanity check: labels that are neither (H x W) nor (H x W x C) are not supported
# dim = len(label_image.shape)
# if not (dim == 2 or dim == 3):
# raise RuntimeError('[CaffeinatedLabel] Error, the label provided has a dimension of ' + \
# str(dim) + ', which is not supported. Only (H x W) and (H x W x C) are supported.')
# If we received a colour image as a label, we convert it to grayscale
# if dim == 3:
# label_image = cv2.cvtColor(label_image, cv2.COLOR_BGR2GRAY)
# If the label image is not binary, that is, if it has more than two unique values, we thresholded it
# to ensure that the labels are binary
EXPECTED_NO_UNIQUE_VALUES = 2 # As we expect a binary label
no_unique_values = np.unique(self._raw_frame).shape[0]
if no_unique_values > EXPECTED_NO_UNIQUE_VALUES:
_, self._raw_frame = cv2.threshold(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)
#
# @returns the number of foreground pixels.
@property
def count_fg_pixels(self):
return np.count_nonzero(self._raw_frame)
#
# @returns the number of background pixels.
@property
def count_bg_pixels(self):
return np.count_nonzero(self._raw_frame == 0)
#
# @class CaffeinatedImagePair represents a pair of consecutive frames that will be used by Caffe so this
# class should provide methods to adapt the original images to the type of
# input Caffe is expecting.
#
class CaffeinatedImagePair(object):
#
# @brief Saves the colour image as an attribute of the class.
#
# @param[in] frame_bgr_prev Numpy array with the previous BGR image in the video sequence, shape (h, w, c).
# @param[in] frame_bgr_next Numpy array with the current BGR image in the video sequecne, shape (h, w, c).
#
def __init__(self, frame_bgr_prev, frame_bgr_next):
# Sanity check: both images must have 3 dimensions (h, w, c)
if len(frame_bgr_prev.shape) != 3 or len(frame_bgr_next.shape) != 3:
raise RuntimeError('[CaffeinatedImagePair.__init__] The images provided must have ' + \
'three dimensions (i.e. H x W x C).')
# Sanity check: both images must have 3 channels
if frame_bgr_prev.shape[2] != 3 or frame_bgr_next.shape[2] != 3:
raise RuntimeError('[CaffeinatedImagePair.__init__] The images provided must have three ' + \
'channels (i. e. BGR)')
# Sanity check: both images must have the same height and width
if frame_bgr_prev.shape[0] != frame_bgr_next.shape[0] or \
frame_bgr_prev.shape[1] != frame_bgr_next.shape[1]:
raise RuntimeError('[CaffeinatedImagePair.__init__] The imaged provided must have the same ' + \
'dimensions (i.e. height and width).')
self._frame_bgr_prev = frame_bgr_prev
self._frame_bgr_next = frame_bgr_next
#
# @brief Builds an object of type CaffeinatedImage from file.
#
# @details Only supports 3-channel colour images. It will raise errors for images with a different
# number of channels.
#
# @param[in] path_prev Path to the previous image file.
# @param[in] path_next Path to the next image file.
#
@classmethod
def from_file(cls, path_prev, path_next):
return cls(cv2.imread(path_prev), cv2.imread(path_next))
#
# @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and
# subtracting the training mean.
#
# @details The mean needs to be subtracted because there is no transform_param section in the input
# layer of the test network.
#
# @param[in] mean_values Numpy ndarray with the per channel mean of the training set. Shape (channels,).
#
# @returns an image ready to be processed by Caffe.
def convert_to_caffe_input(self, mean_values):
no_mean_values = mean_values.shape[0]
# Sanity check: the mean values should be equal to the number of channels of the input image
if no_mean_values != 6:
raise ValueError('[CaffeinatedImagePair.convert_to_caffe_input()] Error, six means are required.')
# Subtract mean values from previous frame
norm_prev = self._frame_bgr_prev.astype(np.float32) - mean_values[:3]
# Subtract mean values from next frame
norm_next = self._frame_bgr_next.astype(np.float32) - mean_values[3:]
# Sanity checks: both images must have the same shape and be of the same datatype
assert(norm_prev.shape[0] == norm_next.shape[0])
assert(norm_prev.shape[1] == norm_next.shape[1])
assert(norm_prev.shape[2] == norm_next.shape[2])
assert(norm_prev.dtype == norm_next.dtype)
# Combine both images in a 6-channel image
combined_image = np.empty((norm_prev.shape[0], norm_prev.shape[1], 6), dtype = norm_prev.dtype)
combined_image[:,:, 0:3] = norm_prev
combined_image[:,:, 3:6] = norm_next
# Transpose to channel-first Caffe style
combined_transposed = np.transpose(combined_image, (2, 0, 1))
return combined_transposed
#
# @brief Converts BGR image to a Caffe datum with shape (C x H x W).
#
# @details The training mean is not subtracted from the image because Caffe does this automatically for
# the data layer used for training (see the transform_param section of the 'data' layer in the
# training prototxt).
#
# @returns the Caffe datum serialised as a string.
@property
def serialise_to_string(self):
# Sanity checks: both images must have the same shape and be of the same datatype
import caffe
assert(self._frame_bgr_prev.shape[0] == self._frame_bgr_next.shape[0])
assert(self._frame_bgr_prev.shape[1] == self._frame_bgr_next.shape[1])
assert(self._frame_bgr_prev.shape[2] == self._frame_bgr_next.shape[2])
assert(self._frame_bgr_prev.dtype == self._frame_bgr_next.dtype)
# Combine the two images in a single 6-channel image
channels = 6
combined_image = np.empty((self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1], channels), \
dtype = self._frame_bgr_prev.dtype)
combined_image[:,:, 0:3] = self._frame_bgr_prev
combined_image[:,:, 3:6] = self._frame_bgr_next
caffe_image = combined_image.astype(np.float32)
# Convert image to Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width, _ = caffe_image.shape
datum.channels = channels
datum.data = caffe_image.tostring()
return datum.SerializeToString()
#
# @returns the height of the image.
@property
def height(self):
return self._frame_bgr_prev.shape[0]
#
# @returns the width of the image.
@property
def width(self):
return self._frame_bgr_prev.shape[1]
#
# @class CaffeinatedImagePlusPrevSeg represents a BGR image with a fourth channel that contains the segmentation of the
# previous frame in the video sequence.
#
class CaffeinatedImagePrevSeg(object):
#
# @brief Saves the colour image and the previous segmentation as attributes of the class.
#
# @param[in] prev_seg Numpy array with the predicted segmentation of the previous frame in the sequence,
# shape (h, w, c).
# @param[in] frame_bgr Numpy array with a BGR image, shape (h, w, c).
#
def __init__(self, prev_seg, frame_bgr):
# Sanity check: the image must have three dimensions (h, w, c) and three channels (c = 3)
if len(frame_bgr.shape) != 3 or frame_bgr.shape[2] != 3:
raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the image provided must ' + \
' have three dimensions (i.e. H x W x 3).')
# Sanity check: the previous mask must have a dimension of two
if len(prev_seg.shape) != 2:
raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the previous mask must have ' + \
'two dimensions.')
# Sanity check: the frame and the previous mask must have the same dimensions
if frame_bgr.shape[0] != prev_seg.shape[0] or frame_bgr.shape[1] != prev_seg.shape[1]:
raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the current image and the ' + \
'previous segmentation must have the same height and width.')
self._prev_seg = prev_seg
self._frame_bgr = frame_bgr
#
# @brief Builds an object of type CaffeinatedImage from file.
#
# @details Only supports 3-channel colour images. It will raise errors for images with a different
# number of channels.
#
# @param[in] path Path to the image file.
@classmethod
def from_file(cls, path_prev_seg, path_frame_bgr):
caffeinated_prev_label = CaffeinatedBinaryLabel.from_file(path_prev_seg)
return cls(caffeinated_prev_label.single_channel_label_copy(), cv2.imread(path_frame_bgr))
#
# @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and
# subtracting the training mean.
#
# @details The mean needs to be subtracted because there is no transform_param section in the input
# layer of the test network.
#
# @param[in] mean_values Numpy ndarray with the per channel mean of the training set. Shape (channels,).
#
# @returns an image ready to be processed by Caffe.
def convert_to_caffe_input(self, mean_values):
colour_channels = 3
no_mean_values = mean_values.shape[0]
# Sanity check: the mean values should be equal to the number of channels of the input image
if no_mean_values != colour_channels:
raise ValueError('[CaffeinatedImagePlusPrevSeg.convert_to_caffe_input] Error, three means are ' + \
'required.')
# Subtract mean values from the current frame
norm_frame_bgr = self._frame_bgr.astype(np.float32) - mean_values
# Convert previous segmentation to float
norm_prev_seg = self._prev_seg.astype(np.float32)
# Sanity check: the current normalised image and the segmentation mask must have the same shape and
# datatype
total_channels = colour_channels + 1
assert(norm_frame_bgr.shape[0] == norm_prev_seg.shape[0])
assert(norm_frame_bgr.shape[1] == norm_prev_seg.shape[1])
assert(norm_frame_bgr.shape[2] == colour_channels)
assert(norm_frame_bgr.dtype == norm_prev_seg.dtype)
# Combine the current frame with the previous segmentation in a 4-channel image
combined_image = np.empty((norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels),
dtype = norm_frame_bgr.dtype)
combined_image[:,:, :colour_channels] = norm_frame_bgr
combined_image[:,:, colour_channels] = norm_prev_seg
# Transpose to channel-first Caffe style
combined_transposed = np.transpose(combined_image, (2, 0, 1))
return combined_transposed
#
# @brief Converts BGR image to a Caffe datum with shape (C x H x W).
#
# @details The training mean is not subtracted from the image because Caffe does this automatically for
# the data layer used for training (see the transform_param section of the 'data' layer in the
# training prototxt).
#
# @returns the Caffe datum serialised as a string.
@property
def serialise_to_string(self):
# Sanity checks: both images must have the same shape and be of the same datatype
import caffe
assert(self._frame_bgr.shape[0] == self._prev_seg.shape[0])
assert(self._frame_bgr.shape[1] == self._prev_seg.shape[1])
assert(self._frame_bgr.dtype == self._prev_seg.dtype)
# Combine the current image and the previous segmentation in a single 4-channel image
colour_channels = 3
total_channels = colour_channels + 1
combined_image = np.empty((self._frame_bgr.shape[0], self._frame_bgr.shape[1], total_channels), \
dtype = self._frame_bgr.dtype)
combined_image[:,:, :colour_channels] = self._frame_bgr
combined_image[:,:, colour_channels] = self._prev_seg
caffe_image = combined_image.astype(np.float32)
# Convert image to Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width, _ = caffe_image.shape
datum.channels = total_channels
datum.data = caffe_image.tostring()
return datum.SerializeToString()
#
# @returns the height of the image.
@property
def height(self):
return self._frame_bgr.shape[0]
#
# @returns the width of the image.
@property
def width(self):
return self._frame_bgr.shape[1]
#
# @brief Convert a binary probability map into a beautiful image.
#
# @param[in] probmap 2D floating point probability map, shape (height, width).
#
# @returns a fancy BGR image.
def make_it_pretty(probmap, vmin = 0, vmax = 1, colourmap = 'plasma', eps = 1e-3):
assert(len(probmap.shape) == 2)
assert(np.max(probmap) < vmax + eps)
assert(np.min(probmap) > vmin - eps)
height = probmap.shape[0]
width = probmap.shape[1]
# Create figure without axes
fig = plt.figure(frameon = False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# Plot figure
plt.imshow(probmap, cmap = colourmap, vmin = vmin, vmax = vmax) # vmin/vmax adjust thesholds
fig.canvas.draw()
# Convert plot to numpy array
data = np.fromstring(fig.canvas.tostring_rgb(), dtype = np.uint8, sep = '')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
# Remove left/right borders
left_right_offset = 0
i = 0
left_intensity = data[0, left_right_offset, 0]
right_intensity = data[0, -1, 0]
min_intensity = 255
# Assert that the values for all the rows are equal for the columns 'offset' and '-offset'
left_side_equal = True if np.unique(data[:, left_right_offset, 0]).shape[0] == 1 else False
right_side_equal = True if np.unique(data[:, -left_right_offset, 0]).shape[0] == 1 else False
while left_intensity == right_intensity and left_intensity >= min_intensity and left_side_equal and right_side_equal:
left_right_offset += 1
left_intensity = data[0, left_right_offset, 0]
right_intensity = data[0, -left_right_offset - 1, 0]
left_side_equal = True if np.unique(data[:, left_right_offset, 0]).shape[0] == 1 else False
right_side_equal = True if np.unique(data[:, -left_right_offset, 0]).shape[0] == 1 else False
# Remove top/bottom borders
top_bottom_offset = 0
i = 0
top_intensity = data[top_bottom_offset, 0, 0]
bottom_intensity = data[-1, 0, 0]
min_intensity = 255
# Assert that the values for all the rows are equal for the columns 'offset' and '-offset'
top_side_equal = True if np.unique(data[top_bottom_offset,:, 0]).shape[0] == 1 else False
bottom_side_equal = True if np.unique(data[-top_bottom_offset,:, 0]).shape[0] == 1 else False
while top_intensity == bottom_intensity and top_intensity >= min_intensity and top_side_equal and bottom_side_equal:
top_bottom_offset += 1
top_intensity = data[top_bottom_offset, 0, 0]
bottom_intensity = data[-top_bottom_offset - 1, 0, 0]
top_side_equal = True if np.unique(data[top_bottom_offset,:, 0]).shape[0] == 1 else False
bottom_side_equal = True if np.unique(data[-top_bottom_offset,:, 0]).shape[0] == 1 else False
# Note: 1 is added to 'left_right_offset' because matplotlib tends to leave a border on the left one
# pixel thicker than on the right
cropped_image = data[top_bottom_offset:data.shape[0] - top_bottom_offset,
left_right_offset + 1:data.shape[1] - left_right_offset]
# Resize to original size
resized_image = cv2.resize(cropped_image, (width, height))
assert(resized_image.shape[0] == height)
assert(resized_image.shape[1] == width)
assert(resized_image.shape[2] == 3)
# Convert RGB to BGR
final_image = cv2.cvtColor(resized_image, cv2.COLOR_RGB2BGR)
return final_image
# This module cannot be executed as a script because it is not a script :)
if __name__ == "__main__":
print >> sys.stderr, 'Error, this module is not supposed to be executed by itself.'
sys.exit(1)
| 2.140625 | 2 |
{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/models.py | huogerac/cookiecutter-djangofloppyforms | 3 | 12762308 | from django.db import models
class {{ cookiecutter.main_model }}(models.Model):
description = models.CharField(max_length=264)
due_to = models.DateTimeField()
done = models.BooleanField(default=False)
@property
def status(self):
return 'done' if self.done else 'pending'
| 2.078125 | 2 |
submissions/!EVALUATION/indicator.py | SoumithThumma/TTF | 3 | 12762309 | <gh_stars>1-10
import abc
import numpy as np
class Indicator:
def __init__(self):
self.default_if_empty = 0.0
def calc(self, F):
# if it is a 1d array
if len(F.shape) == 1:
F = np.array([F])
# if there are not data
if F.shape[1] == 0:
return self.default_if_empty
return self._calc(F)
@abc.abstractmethod
def _calc(self, F):
return
| 3.140625 | 3 |
bookclubs/meeting_link.py | hihi-itsann/SEG_Major_Group | 6 | 12762310 | <filename>bookclubs/meeting_link.py
import datetime
import time
import jwt
import requests
from faker import Faker
# create a function to generate a token using the pyjwt library
def generateToken():
token = jwt.encode(
# Create a payload of the token containing API Key & expiration time
{"iss": "Z8KPddIlSg-N9LTbgh5jnQ", "exp": time.time() + 5000},
# Secret used to generate token signature
"c18QSFhXlaHbG6gP7HI9XGhWyaM6FQTKgYfl",
# Specify the hashing alg
algorithm='HS256'
# Convert token to utf-8
)
return token
def create_zoom_meeting(date, time_start, duration):
email = "<EMAIL>"
headers = {'authorization': 'Bearer %s' % generateToken(),
'content-type': 'application/json'}
url = "https://api.zoom.us/v2/users/{}/meetings".format(email)
date = str(date) + "T" + str(time_start) + ":00"
obj = {"topic": "Book Club", "start_time": date, "duration": duration, "password": "<PASSWORD>",
"timezone": (time.tzname)[0]}
create_meeting = requests.post(url, json=obj, headers=headers)
response_data = create_meeting.json()
global join_link, start_link
join_link = response_data["join_url"]
start_link = response_data["start_url"]
def get_join_link():
return join_link
def get_start_link():
return start_link
def delete_zoom_meeting():
email = "<EMAIL>"
headers = {'authorization': 'Bearer %s' % generateToken(),
'content-type': 'application/json'}
url = "https://api.zoom.us/v2/users/{}/meetings".format(email)
delete_meeting = requests.get(url, headers=headers)
response_data = delete_meeting.json()
while (len(response_data['meetings'])):
email = "<EMAIL>"
headers = {'authorization': 'Bearer %s' % generateToken(),
'content-type': 'application/json'}
url = "https://api.zoom.us/v2/users/{}/meetings".format(email)
delete_meeting = requests.get(url, headers=headers)
response_data = delete_meeting.json()
for meeting in response_data['meetings']:
meetingId = meeting['id']
url = "https://api.zoom.us/v2/meetings/" + str(meetingId)
requests.delete(url, headers=headers)
| 2.875 | 3 |
rllib/algorithms/appo/__init__.py | jianoaix/ray | 0 | 12762311 | from ray.rllib.algorithms.appo.appo import APPO, APPOConfig, DEFAULT_CONFIG
from ray.rllib.algorithms.appo.appo_tf_policy import APPOTF1Policy, APPOTF2Policy
from ray.rllib.algorithms.appo.appo_torch_policy import APPOTorchPolicy
__all__ = [
"APPO",
"APPOConfig",
"APPOTF1Policy",
"APPOTF2Policy",
"APPOTorchPolicy",
"DEFAULT_CONFIG",
]
| 1.257813 | 1 |
Translator.py | jessedeveloperinvestor/Multiple-Jesse-Projects | 0 | 12762312 | <filename>Translator.py<gh_stars>0
#pip install translate
from translate import Translator
print('Hello, write something in English and hit enter to get it translated')
z=0
while z<1:
text=input()
i='english'
o='pt-br'
set = Translator(from_lang=i, to_lang=o)
y=set.translate(text)
print(y)
z=z-1
| 3.375 | 3 |
command.py | nshepperd/tracebuild | 0 | 12762313 | #!/usr/bin/python2
import sys, os
op = os.path.basename(sys.argv[0])
mypath = os.path.abspath(os.path.dirname(sys.argv[0]))
PATH = os.getenv('PATH').split(':')
if op == 'mv':
# copy much cleaner than move in a build (immutable inputs)
op = 'cp'
# Delete ourselves from the PATH
if mypath in PATH:
del PATH[PATH.index(mypath)]
os.environ['PATH'] = ':'.join(PATH)
# Log a command entry
LOGFILE = os.getenv('TRACE_LOG_LOCATION')
with open(LOGFILE, 'a') as file:
file.write(repr((os.getpid(), os.getcwd(), [op] + sys.argv[1:])) + '\n')
# Create a process group for this command
os.setpgid(0, 0)
# Execute
command = list(sys.argv)
command[0] = op
os.execvp(op, command)
# STRACE_FILE = '{}.strace.{}'.format(LOGFILE, os.getpid())
# os.execvp('strace', ['strace', '-o', STRACE_FILE, '-f', '-e', 'open,chdir'] + command)
| 2.265625 | 2 |
students/d33101/wangyixin/Lr2/homework/views.py | losepower/ITMO_ICT_WebDevelopment_2021-2022_D3310 | 0 | 12762314 | <reponame>losepower/ITMO_ICT_WebDevelopment_2021-2022_D3310
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.views.generic import View
from django.contrib.auth import authenticate, login
from .models import Student, Studenttopic, Homework
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView
from django.http import Http404
# Create your views here.
def index(request):
pass
return render(request, 'homework/index.html')
class Login(View):
def post(self, request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
prompt = "Вход пользователя в систему успешный"
user = authenticate(username=username, password=password)
# 判断user是否有效,如果无效则表示认证失败
if not user:
prompt = "пароль пользователя неверен!"
else:
# 调用login方法来为认证通过的用户创建会话
login(request, user)
return redirect('/index')
return render(request, "homework/login.html", {"prompt": prompt})
def get(self, request):
# 如果为get请求,则直接对登录页面进行渲染
return render(request, "homework/login.html")
class Register(View):
def post(self, request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
password_ = request.POST.get('password_', '')
email = request.POST.get('email', '')
prompt = "Успешная регистрация пользователя"
if not username or not password or password != password_:
prompt = "Логин или пароль пользователя неверен"
else:
# 使用User模型的管理器方法来进行用户对象的创建
user = User.objects.create_user(username, email, password)
user.save()
return redirect('/')
return render(request, "homework/register.html", {"prompt": prompt})
def get(self, request):
# 如果为get请求,则直接对注册页面进行渲染
return render(request, "homework/register.html")
class HomeworkView(ListView):
model = Homework
class HomeworkCreateView(CreateView):
model = Studenttopic
template_name = 'homework_create_view.html'
fields = ['homework', 'user', 'answer']
success_url = '/homework'
class StudentCreateView(CreateView):
model = Student
template_name = 'student_create_view.html'
fields = ['name', 'Class', 'email', 'birthday']
success_url = '/homework'
def work(request, Homework_id):
try:
p = Homework.objects.get(pk=Homework_id)
except Studenttopic.DoesNotExist:
raise Http404("Homework does not exist")
return render(request, 'work.html', {'work': p})
class TableView(ListView):
model = Studenttopic
| 2.328125 | 2 |
sec/train.py | pygongnlp/gramcorrector | 5 | 12762315 | <gh_stars>1-10
import argparse
import time
import os
from tqdm import tqdm
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader, BatchSampler, RandomSampler, DistributedSampler
from transformers import AutoConfig, AutoTokenizer, AutoModelForMaskedLM
from utils import set_seed, compute_model_size, load_data, write_to_file, epoch_time
from data import SpellGECDataset
from metric import compute_metrics
from collactor import DataCollactorForSpellGEC
def train(model, data_loader, optimizer, tokenizer, device, step=500):
model.train()
epoch_loss = 0
results = []
for i, (src, labels, src_tok, trg_tok) in enumerate(data_loader):
src["labels"] = labels
src = src.to(device)
optimizer.zero_grad()
outputs = model(**src)
loss = outputs.loss
loss.backward()
epoch_loss += loss.item()
optimizer.step()
predictions = outputs.logits.argmax(-1).tolist()
labels = labels.tolist()
for s, t, label, predict in zip(src_tok, trg_tok, labels, predictions):
predict = tokenizer.convert_ids_to_tokens([p for p_id, p in enumerate(predict) if label[p_id] != -100])
assert len(s) == len(t) == len(predict), f"{s} {t} {predict} {len(s)}/{len(t)}/{len(predict)}"
results.append([s, t, predict])
if (i + 1) % step == 0:
metrics = compute_metrics(results)
print(f"Step {i + 1}, loss={epoch_loss / (i + 1):.4f}, "
f"{', '.join([f'{key}={value:.4f}' for key, value in metrics.items()])}")
return epoch_loss / len(data_loader), compute_metrics(results)
def valid(model, data_loader, tokenizer, device, step=500):
model.eval()
epoch_loss = 0
results = []
with torch.no_grad():
for i, (src, labels, src_tok, trg_tok) in enumerate(data_loader):
src["labels"] = labels
src = src.to(device)
outputs = model(**src)
loss = outputs.loss
epoch_loss += loss.item()
predictions = outputs.logits.argmax(-1).tolist()
labels = labels.tolist()
for s, t, label, predict in zip(src_tok, trg_tok, labels, predictions):
predict = tokenizer.convert_ids_to_tokens([p for p_id, p in enumerate(predict) if label[p_id] != -100])
assert len(s) == len(t) == len(predict), f"{s} {t} {predict} {len(s)}/{len(t)}/{len(predict)}"
results.append([s, t, predict])
if (i + 1) % step == 0:
metrics = compute_metrics(results)
print(f"Step {i + 1}, loss={epoch_loss / (i + 1):.4f}, "
f"{', '.join([f'{key}={value:.4f}' for key, value in metrics.items()])}")
return epoch_loss / len(data_loader), compute_metrics(results), results
if __name__ == "__main__":
parser = argparse.ArgumentParser("Spell Error Correction")
parser.add_argument("--model_name_or_path", default="bert-base-chinese", type=str)
parser.add_argument("--train_file", default="data/sighan/train.json", type=str)
parser.add_argument("--valid_file", default="data/sighan/dev.json", type=str)
parser.add_argument("--save_path", default="sec/checkpoints/bert", type=str)
parser.add_argument("--train_batch_size", default=8, type=int)
parser.add_argument("--valid_batch_size", default=8, type=int)
parser.add_argument("--max_length", default=512, type=int)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--epochs", default=100, type=int)
parser.add_argument("--patience", default=3, type=int)
parser.add_argument("--step", default=2, type=int)
parser.add_argument("--lr", default=3e-5, type=float)
parser.add_argument("--weight_decay", default=0.0, type=float)
args = parser.parse_args()
print(f"Params={args}")
set_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
model = AutoModelForMaskedLM.from_pretrained(args.model_name_or_path)
model = model.to(device)
compute_model_size(model)
train_dataset = SpellGECDataset(file_path=args.train_file, mode="train")
valid_dataset = SpellGECDataset(file_path=args.valid_file, mode="valid")
collactor = DataCollactorForSpellGEC(tokenizer=tokenizer, max_length=args.max_length)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.train_batch_size, collate_fn=collactor)
valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=args.valid_batch_size, collate_fn=collactor)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = optim.AdamW(optimizer_grouped_parameters, lr=args.lr)
patience = 0
print("Start valid before training...")
valid_loss, valid_metrics, _ = valid(model, valid_dataloader, tokenizer, device, args.step)
store_metrics = {
"valid_metrics": valid_metrics
}
print(f"Before training, valid_loss={valid_loss:.4f}, {', '.join([f'{key}={value:.4f}' for key, value in valid_metrics.items()])}")
all_start_time = time.time()
for epoch in range(args.epochs):
print(f"Start train {epoch + 1}th epochs")
start_time = time.time()
train_loss, train_metrics = train(model, train_dataloader, optimizer, tokenizer, device, args.step)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f"Epoch {epoch + 1}th: time={epoch_mins}m{epoch_secs}s, "
f"train_loss={train_loss:.4f}, {', '.join([f'{key}={value:.4f}' for key, value in train_metrics.items()])}")
print(f"Start valid {epoch + 1}th epochs")
start_time = time.time()
valid_loss, valid_metrics, results = valid(model, valid_dataloader, tokenizer, device, args.step)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f"Epoch {epoch + 1}th: time={epoch_mins}m{epoch_secs}s, "
f"valid_loss={valid_loss:.4f}, {', '.join([f'{key}={value:.4f}' for key, value in valid_metrics.items()])}")
if valid_metrics["cor_f1"] > store_metrics["valid_metrics"]["cor_f1"]:
store_metrics["train_metrics"] = train_metrics
store_metrics["valid_metrics"] = valid_metrics
patience = 0
torch.save({
"config": args,
"epoch": epoch + 1,
"model_state_dict": model.state_dict(),
"valid_metrics": valid_metrics,
"train_metrics": train_metrics,
"train_loss": train_loss,
"valid_loss": valid_loss,
}, os.path.join(args.save_path, "model.tar"))
print(f"save model to {args.save_path}")
write_to_file(os.path.join(args.save_path, "result_valid.json"), results)
print(f"write result to {os.path.join(args.save_path, 'result_valid.json')}")
else:
patience += 1
print(f"patience up to {patience}")
if patience == args.patience:
all_end_time = time.time()
epoch_mins, epoch_secs = epoch_time(all_start_time, all_end_time)
print("Training Over!")
print(f"All time={epoch_mins}m{epoch_secs}s")
print(
f"Best train_metrics=({', '.join([f'{key}={value:.4f}' for key, value in store_metrics['train_metrics'].items()])})"
f"valid_metrics=({', '.join([f'{key}={value:.4f}' for key, value in store_metrics['valid_metrics'].items()])})")
break
if patience < args.patience:
all_end_time = time.time()
epoch_mins, epoch_secs = epoch_time(all_start_time, all_end_time)
print("Training Over!")
print(f"All time={epoch_mins}m{epoch_secs}s")
print(
f"Best train_metrics=({', '.join([f'{key}={value:.4f}' for key, value in store_metrics['train_metrics'].items()])})"
f"valid_metrics=({', '.join([f'{key}={value:.4f}' for key, value in store_metrics['valid_metrics'].items()])})")
| 2.09375 | 2 |
01.DataStructure/Stack&Queue/B1966-M.py | SP2021-2/Algorithm | 1 | 12762316 | <filename>01.DataStructure/Stack&Queue/B1966-M.py
num = 0
num = (int)(input ())
for i in range(num):
check = [int(x) for x in input().split()]
arr = [int(x) for x in input().split()]
checkingP = check[1]
popNum = 0
while(True):
existPop = False
#맨 처음 값의 중요도와 뒤의 중요도 비교 for문
for j in range(len(arr)-1):
if(arr[0] >= arr[j+1]):
continue
else:
arr.append(arr[0])
arr.pop(0)
checkingP -= 1
if(checkingP < 0):
checkingP = len(arr)-1
existPop = True
break
if(not existPop):
arr.pop(0)
popNum += 1
if(checkingP == 0):
print(popNum)
break
else:
checkingP -= 1
| 3.203125 | 3 |
AutomatedTesting/Gem/PythonTests/Terrain/EditorScripts/TerrainPhysicsCollider_ChangesSizeWithAxisAlignedBoxShapeChanges.py | LB-KatarzynaDylska/o3de | 1 | 12762317 | <filename>AutomatedTesting/Gem/PythonTests/Terrain/EditorScripts/TerrainPhysicsCollider_ChangesSizeWithAxisAlignedBoxShapeChanges.py
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
#fmt: off
class Tests:
create_test_entity = ("Entity created successfully", "Failed to create Entity")
add_axis_aligned_box_shape = ("Axis Aligned Box Shape component added", "Failed to add Axis Aligned Box Shape component")
add_terrain_collider = ("Terrain Physics Heightfield Collider component added", "Failed to add a Terrain Physics Heightfield Collider component")
box_dimensions_changed = ("Aabb dimensions changed successfully", "Failed change Aabb dimensions")
configuration_changed = ("Terrain size changed successfully", "Failed terrain size change")
#fmt: on
def TerrainPhysicsCollider_ChangesSizeWithAxisAlignedBoxShapeChanges():
"""
Summary:
Test aspects of the Terrain Physics Heightfield Collider through the BehaviorContext and the Property Tree.
Test Steps:
Expected Behavior:
The Editor is stable there are no warnings or errors.
Test Steps:
1) Load the base level
2) Create test entity
3) Start the Tracer to catch any errors and warnings
4) Add the Axis Aligned Box Shape and Terrain Physics Heightfield Collider components
5) Change the Axis Aligned Box Shape dimensions
6) Check the Heightfield provider is returning the correct size
7) Verify there are no errors and warnings in the logs
:return: None
"""
import editor_python_test_tools.hydra_editor_utils as hydra
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import TestHelper as helper
from editor_python_test_tools.utils import Report, Tracer
import azlmbr.physics as physics
import azlmbr.math as azmath
import azlmbr.bus as bus
import math
SET_BOX_X_SIZE = 5.0
SET_BOX_Y_SIZE = 6.0
EXPECTED_COLUMN_SIZE = SET_BOX_X_SIZE + 1
EXPECTED_ROW_SIZE = SET_BOX_Y_SIZE + 1
# 1) Load the level
hydra.open_base_level()
# 2) Create test entity
test_entity = EditorEntity.create_editor_entity("TestEntity")
Report.result(Tests.create_test_entity, test_entity.id.IsValid())
# 3) Start the Tracer to catch any errors and warnings
with Tracer() as section_tracer:
# 4) Add the Axis Aligned Box Shape and Terrain Physics Heightfield Collider components
aaBoxShape_component = test_entity.add_component("Axis Aligned Box Shape")
Report.result(Tests.add_axis_aligned_box_shape, test_entity.has_component("Axis Aligned Box Shape"))
terrainPhysics_component = test_entity.add_component("Terrain Physics Heightfield Collider")
Report.result(Tests.add_terrain_collider, test_entity.has_component("Terrain Physics Heightfield Collider"))
# 5) Change the Axis Aligned Box Shape dimensions
aaBoxShape_component.set_component_property_value("Axis Aligned Box Shape|Box Configuration|Dimensions", azmath.Vector3(SET_BOX_X_SIZE, SET_BOX_Y_SIZE, 1.0))
add_check = aaBoxShape_component.get_component_property_value("Axis Aligned Box Shape|Box Configuration|Dimensions") == azmath.Vector3(SET_BOX_X_SIZE, SET_BOX_Y_SIZE, 1.0)
Report.result(Tests.box_dimensions_changed, add_check)
# 6) Check the Heightfield provider is returning the correct size
columns = physics.HeightfieldProviderRequestsBus(bus.Broadcast, "GetHeightfieldGridColumns")
rows = physics.HeightfieldProviderRequestsBus(bus.Broadcast, "GetHeightfieldGridRows")
Report.result(Tests.configuration_changed, math.isclose(columns, EXPECTED_COLUMN_SIZE) and math.isclose(rows, EXPECTED_ROW_SIZE))
helper.wait_for_condition(lambda: section_tracer.has_errors or section_tracer.has_asserts, 1.0)
for error_info in section_tracer.errors:
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")
for assert_info in section_tracer.asserts:
Report.info(f"Assert: {assert_info.filename} {assert_info.function} | {assert_info.message}")
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(TerrainPhysicsCollider_ChangesSizeWithAxisAlignedBoxShapeChanges)
| 1.96875 | 2 |
Assets/StreamingAssets/.q/Lib/site-packages/docplex/cli.py | hennlo/Q-shall-not-pass | 15 | 12762318 | <gh_stars>10-100
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2018
# --------------------------------------------------------------------------
from __future__ import print_function
import argparse
import datetime
import json
import os.path
from os.path import basename
import shlex
import sys
from six import iteritems
from docloud.job import JobClient
from docplex.mp.context import Context
from docloud.status import JobExecutionStatus
pd = None
try:
import pandas as pd
import numpy as np
except ImportError:
pass
# This will be the ipython context if any
ip = None
try:
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core.display import display
@magics_class
class DocplexCliMagics(Magics):
def __init__(self, shell):
super(DocplexCliMagics, self).__init__(shell)
self.url = os.environ.get('DOCPLEXCLOUD_URL')
self.key = os.environ.get('DOCPLEXCLOUD_KEY')
@line_magic
def docplex_cli(self, line):
"The docpelx CLI magics"
args = shlex.split(line)
try:
return run_command('docplex_cli', args, url=self.url, key=self.key)
except SystemExit:
pass
@line_magic
def docplex_url(self, line):
self.url = line
return None
@line_magic
def docplex_key(self, line):
self.key = line
return None
# register the magics
try:
ip = get_ipython() # @UndefinedVariable
ip.register_magics(DocplexCliMagics)
except NameError:
# get_ipython not found -> we are not in a notebook
pass
except ImportError:
# ipython is not available
print("Could not import ipython things")
pass
class ProgramResults(object):
def __init__(self):
self.return_code = 0
self.output = []
def __repr__(self):
if self.return_code == 0:
if self.output:
return "\n".join(self.output)
else:
return ""
else:
return "Exit %s" % self.return_code
def add_output(self, m):
self.output.append(m)
class list_with_html_repr(list):
def __init__(self):
super(list_with_html_repr, self).__init__()
def _repr_html_(self):
st = "<ul>"
for item in self:
st += "<li>%s</lu" % item
st += "</ul>"
return st
def ls_jobs(client, program_result, quiet=False, selected_jobs=None):
jobs = selected_jobs if selected_jobs else client.get_all_jobs()
if ip:
result = []
for i, j in enumerate(jobs):
jobid = j["_id"]
date = datetime.datetime.fromtimestamp(j['createdAt'] / 1e3)
if ip:
in_att = list_with_html_repr()
out_att = list_with_html_repr()
attachments = client.get_job_attachments(jobid)
for a in attachments:
desc = "%s (%s bytes)" % (a['name'], a['length'])
if a['type'] == 'INPUT_ATTACHMENT':
in_att.append(desc)
else:
out_att.append(desc)
row = [jobid, j["executionStatus"], date, in_att, out_att]
result.append(row)
else:
if not quiet:
m = (" [{0}] id={1} status={2} created={3}".format(i, jobid, j["executionStatus"],
date))
attachments = client.get_job_attachments(jobid)
for a in attachments:
m += ("\n %s: %s (%s bytes)" % (a['type'], a['name'], a['length']))
else:
m = None
print('%s' % jobid)
if m:
program_result.add_output(m)
if ip:
ar = np.array(result)
result_df = pd.DataFrame(ar, index=range(len(jobs)), columns=['id', 'status', 'created', 'input attachments', 'output attachments'])
with pd.option_context("display.max_colwidth", -1):
display(result_df)
def rm_job(client, arguments, verbose=False):
if len(arguments) == 1 and arguments[0] == 'all':
arguments = [x["_id"] for x in client.get_all_jobs()]
for id in arguments:
try:
if verbose:
print("Deleting %s" % id)
ok = client.delete_job(id)
if not ok:
print("Could not delete job %s" % id)
except Exception as e:
print(e)
last_updated_job = None
def print_job_info(info):
global last_updated_job
if 'details' in info:
uptd = info.get('updatedAt', None)
updatedated = datetime.datetime.fromtimestamp(float(uptd)/1000.0).strftime('%Y-%m-%d %H:%M:%S %Z') if uptd else None
if updatedated != last_updated_job:
msg = ('--- Solve details - %s ---' % updatedated)
print(msg)
last_updated_job = updatedated
# let's format to align all ':'
details = info['details']
max_len = max([len(d) for d in details])
output_format = ' %-' + str(max_len) + 's : %s'
for k, v in iteritems(info['details']):
print(output_format % (k, v))
def execute_job(client, inputs, verbose, details, nodelete):
# The continuous logs feature is not available in version <= 1.0.257
# we'll check TypeError when calling execute() and adjust parameters
# if not available while waiting for the latest version to be out
continuous_logs_available = False
response = None
try:
if verbose:
print("Executing")
xkwargs = {'input': inputs,
'delete_on_completion': False}
if details:
xkwargs['info_cb'] = print_job_info
try:
xkwargs['continuous_logs'] = True
try:
response = client.execute(log=sys.stdout, **xkwargs)
continuous_logs_available = True
except TypeError as cla:
if 'execute() got an unexpected keyword argument \'continuous_logs\'' in str(cla):
del xkwargs['continuous_logs']
response = client.execute(**xkwargs)
else:
raise
except TypeError as te:
if 'execute() got an unexpected keyword argument \'info_cb\'' in str(te):
print('Your version of docplexcloud client does not support details polling (--details option). Please update')
return(-1)
else:
raise
if response.execution_status != JobExecutionStatus.PROCESSED:
print("Execution failed.\nDetails:\n%s" % json.dumps(response.job_info, sort_keys=True, indent=4))
log_items = client.get_log_items(response.jobid)
if not continuous_logs_available:
# download and print logs if the continuous log feature was not available
for log in log_items:
for record in log["records"]:
print(record["message"])
attachments = client.get_job_attachments(response.jobid)
for a in attachments:
if a['type'] == 'OUTPUT_ATTACHMENT':
if verbose:
print("Downloading attachment %s" % a["name"])
data = client.download_job_attachment(response.jobid, a["name"])
with open(a["name"], "w+b") as f:
f.write(data)
finally:
if response and not nodelete:
if verbose:
print("Deleting job %s (cleanup)" % response.jobid)
client.delete_job(response.jobid)
def run_command(prog, argv, url=None, key=None):
description = '''Command line client for DOcplexcloud.'''
epilog = '''Command details:
info Get and display information for the jobs which ids are
specified as ARG.
download Download the attachment to the the current directory.
rm Delete the jobs which ids are specfied as ARG.
rm all Delete all jobs.
logs Download and display the logs for the jobs which id are specified.
ls Lists the jobs.'''
epilog_cli = '''
execute Submit a job and wait for end of execution. Each ARG that
is a file is uploaded as the job input. Example:
Example: python run.py execute model.py model.data -v
executes a job which input files are model.py and
model.dada, in verbose mode.
'''
filter_help = '''
Within filters, the following variables are defined:
now: current date and time as timestamp in millisec
minute: 60 sec in millisec
hour: 60 minutes in millisec
day: 24 hour in millisec
job: The current job being filtered
Example filter usage:
Delete all jobs older than 3 hour
python -m docplex.cli --filter "now-job['startedAt'] > 3*hour " rm
'''
if ip is None:
epilog += epilog_cli
epilog += filter_help
parser = argparse.ArgumentParser(prog=prog, description=description, epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('command',
metavar='COMMAND',
help='DOcplexcloud command')
parser.add_argument('arguments', metavar='ARG', nargs='*',
help='Arguments for the command')
parser.add_argument('--no-delete', action='store_true', default=False,
dest='nodelete',
help="If specified, jobs are not deleted after execution")
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose mode')
parser.add_argument('--as', nargs=1, metavar='HOST',
dest="host_config", default=None,
help="'as host' - use the cplex_config_<HOST>.py configuration file found in PYTHONPATH")
parser.add_argument('--url', metavar='URL',
dest="url", default=None,
help="The DOcplexcloud connection URL. If not specified, will use those found in docplex config files")
parser.add_argument('--key', metavar='API_KEY',
dest="key", default=None,
help="The DOcplexcloud connection key. If not specified, will use those found in docplex config files")
parser.add_argument('--details', action='store_true', default=False,
help='Display solve details as they are available')
parser.add_argument('--filter', metavar='FILTER', default=None,
help='filter on job. Example: --filter "True if (now-job.createdAt) > 3600"')
parser.add_argument('--quiet', '-q', action='store_true', default=False,
help='Only show numeric IDs as output')
args = parser.parse_args(argv)
program_result = ProgramResults()
# Get the context here so that we have some credentials at hand
context = Context.make_default_context()
if args.host_config is not None:
config_name = "cplex_config_%s.py" % args.host_config[0]
config_file = list(filter(os.path.isfile, [os.path.join(x, config_name) for x in sys.path]))
if len(config_file) == 0:
print("Could not find config file for host: %s" % args.host_config[0])
program_result.return_code = -1
return(program_result)
if args.verbose:
print("Overriding host config with: %s" % config_file[0])
context.read_settings(config_file[0])
# use credentials in context unless they are given to this function
client_url = context.solver.docloud.url if url is None else url
client_key = context.solver.docloud.key if key is None else key
# but if there are some credentials in arguments (--url, --key), use them
if args.url:
client_url = args.url
if args.key:
client_key = args.key
if args.verbose:
print('**** Connecting to %s with key %s' % (client_url, client_key))
print('Will send command %s' % args.command)
print('Arguments:')
for i in args.arguments:
print(' -> %s' % i)
print('verbose = %s' % args.verbose)
client = JobClient(client_url, client_key)
target_jobs = []
if args.filter:
jobs = client.get_all_jobs()
now = (datetime.datetime.now() - datetime.datetime(1970,1,1)).total_seconds() * 1000.0
minute = 60 * 1000
hour = 60 * minute
day = 24 * hour
context = {'now': now,
'minute': minute,
'hour': hour,
'day': day,
}
for j in jobs:
context['job'] = j
keep = False
try:
keep = eval(args.filter, globals(), context)
except KeyError: # if a key was not foud, just assume expression is false
keep = False
if keep:
target_jobs.append(j)
if target_jobs:
for i in target_jobs:
print('applying to %s' % i['_id'])
if args.command == 'ls':
ls_jobs(client, program_result, quiet=args.quiet, selected_jobs=target_jobs)
elif args.command == 'info':
if target_jobs:
args.arguments = [x["_id"] for x in target_jobs]
elif len(args.arguments) == 1 and args.arguments[0] == 'all':
args.arguments = [x["_id"] for x in client.get_all_jobs()]
for id in args.arguments:
info_text = "NOT FOUND"
try:
job = client.get_job(id)
info_text = json.dumps(job, indent=3)
except:
pass
print("%s:\n%s" % (id, info_text))
elif args.command == 'rm':
if target_jobs:
joblist = [x["_id"] for x in target_jobs]
elif args.arguments:
joblist = args.arguments
else:
joblist = shlex.split(sys.stdin.read())
rm_job(client, joblist, verbose=args.verbose)
elif args.command == 'logs':
if target_jobs:
if len(target_jobs) != 1:
print('Logs can only be retrieved when filter select one job (actual selection count = %s)' % len(target_jobs))
program_result.return_code = -1
return(program_result)
args.arguments = [x["_id"] for x in target_jobs]
if not args.arguments:
print('Please specify job list in arguments or using filter.')
program_result.return_code = -1
return(program_result)
for jid in args.arguments:
log_items = client.get_log_items(jid)
for log in log_items:
for record in log["records"]:
print(record["message"])
elif args.command == 'download':
if target_jobs:
if len(target_jobs) != 1:
print('Jobs can only be downloaded when filter select one job (actual selection count = %s)' % len(target_jobs))
program_result.return_code = -1
return(program_result)
args.arguments = [x["_id"] for x in target_jobs]
for jid in args.arguments:
job = client.get_job(jid)
for attachment in job['attachments']:
print('downloading %s' % attachment['name'])
with open(attachment['name'], 'wb') as f:
f.write(client.download_job_attachment(id, attachment['name']))
elif args.command == 'execute':
if target_jobs:
print('Execute command does not support job filtering')
program_result.return_code = -1
return(program_result)
inputs = [{'name': basename(a), 'filename': a} for a in args.arguments]
if args.verbose:
for i in inputs:
print("Uploading %s as attachment name %s" % (i['filename'], i['name']))
execute_job(client, inputs, args.verbose, args.details, args.nodelete)
else:
print("Unknown command: %s" % args.command)
program_result.return_code = -1
return(program_result)
return(program_result)
if __name__ == '__main__':
program_result = run_command(sys.argv[0], sys.argv[1:])
if program_result.output:
exit(program_result)
else:
exit(program_result.return_code)
| 2.140625 | 2 |
tensorflow/compiler/plugin/poplar/tests/arg_min_max_test.py | pierricklee/tensorflow | 0 | 12762319 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from test_utils import ReportJSON
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import googletest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
TYPES = (np.float16, np.float32, np.int32)
TESTCASES = [{"testcase_name": np.dtype(x).name, "dtype": x} for x in TYPES]
def _get_random_input(dtype, shape):
if np.issubdtype(dtype, np.integer):
info_fn = np.iinfo
random_fn = np.random.random_integers
else:
info_fn = np.finfo
random_fn = np.random.uniform
return random_fn(info_fn(dtype).min, info_fn(dtype).max,
size=shape).astype(dtype)
class ArgMinMax(xla_test.XLATestCase, parameterized.TestCase):
@parameterized.named_parameters(*TESTCASES)
def testArgMaxBasic(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
report_json.reset()
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=0))
report_json.parse_log(assert_len=4)
@parameterized.named_parameters(*TESTCASES)
def testArgMaxHalf(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, output_type=dtypes.int32)
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=0))
@parameterized.named_parameters(*TESTCASES)
def testArgMaxMultiDimensional(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a, axis):
return math_ops.argmax(a, axis=axis, output_type=dtypes.int32)
for axis in range(6):
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [1, 2, 3, 4, 5, 6])
p_axis = array_ops.placeholder(np.int32, shape=())
with ops.device("/device:IPU:0"):
out = model(pa, p_axis)
input = _get_random_input(dtype, (1, 2, 3, 4, 5, 6))
fd = {pa: input, p_axis: axis}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=axis))
@parameterized.named_parameters(*TESTCASES)
def testArgMinBasic(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmin(a, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
report_json.reset()
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmin(input, axis=0))
report_json.parse_log(assert_len=4)
@parameterized.named_parameters(*TESTCASES)
def testArgMinHalf(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmin(a, output_type=dtypes.int32)
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmin(input, axis=0))
@parameterized.named_parameters(*TESTCASES)
def testArgMinMultiDimensional(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a, axis):
return math_ops.argmin(a, axis=axis, output_type=dtypes.int32)
for axis in range(6):
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [1, 2, 3, 4, 5, 6])
p_axis = array_ops.placeholder(np.int32, shape=())
with ops.device("/device:IPU:0"):
out = model(pa, p_axis)
input = _get_random_input(dtype, (1, 2, 3, 4, 5, 6))
fd = {pa: input, p_axis: axis}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmin(input, axis=axis))
@parameterized.named_parameters(*TESTCASES)
def testArgMaxNegativeDim(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, axis=-1, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
report_json.reset()
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=-1))
report_json.parse_log(assert_len=4)
@parameterized.named_parameters(*TESTCASES)
def testArgMaxVector(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, axis=0, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
report_json.reset()
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input))
report_json.parse_log(assert_len=4)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
| 2.15625 | 2 |
parking_project/helpers/file_io_helper.py | abhinavsri000/squad_activity | 0 | 12762320 | """
This is a Utility to parse a file.
"""
def parse_file(input_file = ""):
try:
with open(input_file , 'r') as file:
lines = [line.rstrip() for line in file]
return lines
except :
return None
| 3.40625 | 3 |
DPopt.py | mikiec84/Python-DP-Means-Clustering | 31 | 12762321 | #!/usr/bin/env python
import cluster
import sys
import csv
import math
import random
import scipy.optimize
# function to minimize
def g(l, returnObject=False):
minError = sys.maxint
for i in range(0,iters):
k1 = cluster.dpmeans(res, l, xVal)
err, xerr = k1.run()
if xerr < minError:
minError = xerr
kmin = k1
if returnObject:
return minError, k1
return minError
#parameters
iters = 8 # iterations in search for min
maxClusters = 12 # used for setting minimum lambda
xValFrac = 0.2 # 20% of data for xVal
# Read data from standard in
res = []
for row in csv.reader(sys.stdin):
res.append([float(x) for x in row])
nFeatures = len(res[0])
minx, maxx = [sys.maxint for i in range(0,nFeatures)], [-sys.maxint for i in range(0,nFeatures)]
for r in res:
idx = 0
for i in minx:
if r[idx] < i:
minx[idx] = r[idx]
idx += 1
idx = 0
for i in maxx:
if r[idx] > i:
maxx[idx] = r[idx]
idx += 1
dataSpread = max([abs(x - y) for x, y in zip(maxx, minx)])
# quick and dirty min scale is average dist if data was along dataspread
dataGrain = dataSpread/maxClusters
# make sure data is in random order
random.shuffle(res)
# set aside for cross-validation
xVal = int(xValFrac*len(res))
optLambda = scipy.optimize.brent(g,
brack=(1./dataSpread, 1./dataGrain),
tol=1e-4,
full_output=0,
maxiter=100)
e,k = g(optLambda, returnObject=True)
wrtr = csv.writer(open("./output/opt_result.csv","wb"))
for x in k.getOutput():
wrtr.writerow(x)
eWrtr = csv.writer(open("./output/opt_error.csv","wb"))
for x in k.getErrors():
eWrtr.writerow(x)
print "lambda: %2.5f\n with error: %2.5f\n"%(optLambda,e)
| 2.421875 | 2 |
quantum_solver/custom_gates/hhl4x4.py | nakul-shahdadpuri/Minor_Project | 0 | 12762322 | <reponame>nakul-shahdadpuri/Minor_Project<filename>quantum_solver/custom_gates/hhl4x4.py
"""This module contains functions to apply a controlled-Hamiltonian.
"""
from typing import Tuple, Union, List
from qiskit import QuantumCircuit, QuantumRegister, CompositeGate
from hhl4x4.custom_gates import comment, ccz, crx, csqtrx, crzz
QubitType = Tuple[QuantumRegister, int]
class Hamiltonian4x4Gate(CompositeGate):
def __init__(self, ctrl: QubitType, targets: Tuple[QubitType],
params: List[float] = None, circuit: QuantumCircuit = None):
"""Initialize the Hamiltonian4x4Gate class.
:param ctrl: The control qubit used to control the Hamiltonian gate.
:param targets: 2 qubits used to apply the Hamiltonian.
:param params: floating point parameters.
:param circuit: The associated quantum circuit.
"""
if params is None:
# Default parameters for a simple Hamiltonian (no powers)
params = [0.19634953, 0.37900987, 0.9817477, 1.87900984, 0.58904862]
used_qubits = [ctrl, targets[0], targets[1]]
super().__init__(self.__class__.__name__, # name
[], # parameters
used_qubits, # qubits
circuit) # circuit
self.comment("[HS] Start.")
self.ccz(ctrl, targets[0], targets[1])
self.crx(params[0], ctrl, targets[1])
self._attach(csqtrx.CsqrtX(ctrl, targets[1], self).inverse())
self.crzz(params[1], ctrl, targets[1])
self.crx(params[2], ctrl, targets[0])
self.crzz(params[3], ctrl, targets[0])
self.ccx(ctrl, targets[0], targets[1])
self.crx(params[4], ctrl, targets[0])
self.ccx(ctrl, targets[0], targets[1])
self.ccz(ctrl, targets[0], targets[1])
self.comment("[HS] End.")
# Adding the method to the QuantumCircuit and CompositeGate classes.
def hamiltonian4x4(self, ctrl: QubitType, targets: Tuple[QubitType],
params: List[float] = None) -> Hamiltonian4x4Gate:
self._check_qubit(ctrl)
self._check_qubit(targets[0])
self._check_qubit(targets[1])
self._check_dups([ctrl, targets[0], targets[1]])
return self._attach(Hamiltonian4x4Gate(ctrl, targets, params, self))
QuantumCircuit.hamiltonian4x4 = hamiltonian4x4
CompositeGate.hamiltonian4x4 = hamiltonian4x4
| 2.65625 | 3 |
core/api/urls.py | kiae-grid/panda-bigmon-core | 0 | 12762323 | <reponame>kiae-grid/panda-bigmon-core
"""
api.urls
"""
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
### #FIXME admin.autodiscover()
common_patterns = patterns('',
### Applications
url(r'^htcondor', include('core.api.htcondorapi.urls')),
url(r'^jedi', include('core.api.jedi.urls')),
url(r'^user', include('core.api.user.urls')),
)
urlpatterns = patterns('',)
urlpatterns += common_patterns
| 1.882813 | 2 |
docs/conf.py | jazzband/pathlib2 | 4 | 12762324 | # Configuration file for the Sphinx documentation builder.
project = 'pathlib2'
copyright = '2012-2014 <NAME> and contributors; 2014-2021, <NAME> and contributors'
author = '<NAME>'
# The full version, including alpha/beta/rc tags
with open("../VERSION", "r") as version_file:
release = version_file.read().strip()
# -- General configuration ---------------------------------------------------
extensions = []
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
| 1.1875 | 1 |
game1.py | Athenian-ComputerScience-Fall2020/guessing-game-eskinders17 | 0 | 12762325 | <gh_stars>0
#collaboraters - https://www.youtube.com/watch?v=KdMAj8Et4xk
# <NAME>
from random import randint
def game():
guesses = 1 #guessing starts from 1
guess_limit = 5 #user can only try 5 times
out_of_guesses = False #
try:
Min = int(input("Enter minimum number: "))
Max = int(input("Enter maximum number: "))
number = randint(Min,Max)
print("Guess a number between", Min, " and", Max )
except:
print()
print("Invalid Input, Please start again")
print()
game()
try:
x = int(input("Gueess Here: "))
except:
print()
print("Invalid Input, please start again")
print()
game()
#while x != number and not(out_of_guesses):
#print("try again")
#game()
#else:
#print("congratttts")
#except:
#print("Invalid Input, Please try again")
#print()
#game()
#while guesses < guess_limit:
while x != number and not(out_of_guesses):
if x < number:
try:
if guesses < guess_limit:
print("Your number is too low.")
x = int(input("Please guess again: "))
guesses = guesses + 1
else:
out_of_guesses = True
except:
print("Invalid Input, Please try again")
print()
elif x > number:
try:
if guesses < guess_limit:
print("Your number is too high.")
x = int(input("PLease guess again: "))
guesses = guesses + 1
else:
out_of_guesses = True
except:
print("Invalid Input, Please try again")
print()
#else:
#out_of_guesses = True
#if out_of_guesses:
#print("Game over, you are out of guesses")
if out_of_guesses:
print()
print("Sorry, you are out of guesses!!")
print()
game()
else:
print()
print("Congrats, you guessed the number! ")
print("It only took you", guesses, "guesses!")
#if out_of_guesses:
# print("Game over, you are out of guesses")
# game()
game()
while True:
print("Would do you like to play again?: ")
a = input("Enter 'yes' or 'no': ")
if a == 'yes':
game()
else:
print("good bye")
break
| 3.90625 | 4 |
play_sop.py | Fable67/Streamlined-Off-Policy-Learning | 5 | 12762326 | <reponame>Fable67/Streamlined-Off-Policy-Learning
import argparse
import gym
from lib import model
from lib.Hyperparameters import *
import numpy as np
import torch
import torch.nn as nn
try:
import roboschool
except:
print("A problem occured when trying to import roboschool. Maybe not installed?")
try:
import pybullet_envs
except:
print("A problem occured when trying to import pybullet_envs. Maybe not installed?")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", required=True, help="Model file to load")
parser.add_argument("-e", "--env", default=ENV_ID,
help="Environment name to use, default=" + ENV_ID)
parser.add_argument(
"-r", "--record", help="If specified, sets the recording dir, default=Disabled")
parser.add_argument("--eval", default=False, action='store_true', help='Evaluates Agent')
args = parser.parse_args()
reward_eval_env = gym.make(args.env)
env = gym.make(args.env)
if args.record:
env = gym.wrappers.Monitor(env, args.record, force=True)
net = model.ModelActor(env.observation_space.shape[0], env.action_space.shape[0],
HID_SIZE, ACTF)
net.load_state_dict(torch.load(args.model))
agent = model.Agent(net, FIXED_SIGMA_VALUE, BETA)
if args.eval:
print("Evaluating Agent...")
rewards = 0.0
steps = 0
for _ in range(100):
obs = reward_eval_env.reset()
while True:
obs_v = torch.FloatTensor([obs])
mu_v = agent.get_actions_deterministic(obs_v)
action = mu_v.squeeze(dim=0).data.cpu().numpy()
obs, reward, done, _ = reward_eval_env.step(action)
rewards += reward
steps += 1
if done:
break
print("The Agent was able to reach an average reward of %.3f over 100 consecutive episodes" %
(rewards / 100))
obs = env.reset()
total_reward = 0.0
total_steps = 0
while True:
obs_v = torch.FloatTensor([obs])
mu_v = agent.get_actions_deterministic(obs_v)
action = mu_v.squeeze(dim=0).data.cpu().numpy()
obs, reward, done, _ = env.step(action)
total_reward += reward
total_steps += 1
if done:
break
if args.record is None:
env.render()
print("In %d steps we got %.3f reward" % (total_steps, total_reward))
if args.record is None:
env.close()
| 2.28125 | 2 |
chap6/bbox_labeling/detection_anno_bbox2voc.py | wang420349864/dlcv_for_beginners | 1,424 | 12762327 | <reponame>wang420349864/dlcv_for_beginners<filename>chap6/bbox_labeling/detection_anno_bbox2voc.py
import os
import sys
import xml.etree.ElementTree as ET
#import xml.dom.minidom as minidom
import cv2
from bbox_labeling import SimpleBBoxLabeling
input_dir = sys.argv[1].rstrip(os.sep)
bbox_filenames = [x for x in os.listdir(input_dir) if x.endswith('.bbox')]
for bbox_filename in bbox_filenames:
bbox_filepath = os.sep.join([input_dir, bbox_filename])
jpg_filepath = bbox_filepath[:-5]
if not os.path.exists(jpg_filepath):
print('Something is wrong with {}!'.format(bbox_filepath))
break
root = ET.Element('annotation')
filename = ET.SubElement(root, 'filename')
jpg_filename = jpg_filepath.split(os.sep)[-1]
filename.text = jpg_filename
img = cv2.imread(jpg_filepath)
h, w, c = img.shape
size = ET.SubElement(root, 'size')
width = ET.SubElement(size, 'width')
width.text = str(w)
height = ET.SubElement(size, 'height')
height.text = str(h)
depth = ET.SubElement(size, 'depth')
depth.text = str(c)
bboxes = SimpleBBoxLabeling.load_bbox(bbox_filepath)
for obj_name, coord in bboxes:
obj = ET.SubElement(root, 'object')
name = ET.SubElement(obj, 'name')
name.text = obj_name
bndbox = ET.SubElement(obj, 'bndbox')
xmin = ET.SubElement(bndbox, 'xmin')
xmax = ET.SubElement(bndbox, 'xmax')
ymin = ET.SubElement(bndbox, 'ymin')
ymax = ET.SubElement(bndbox, 'ymax')
(left, top), (right, bottom) = coord
xmin.text = str(left)
xmax.text = str(right)
ymin.text = str(top)
ymax.text = str(bottom)
xml_filepath = jpg_filepath[:jpg_filepath.rfind('.')] + '.xml'
with open(xml_filepath, 'w') as f:
anno_xmlstr = ET.tostring(root)
# In case a nicely formatted xml is needed
# uncomment the following 2 lines and minidom import
#anno_xml = minidom.parseString(anno_xmlstr)
#anno_xmlstr = anno_xml.toprettyxml()
f.write(anno_xmlstr)
| 2.703125 | 3 |
BOJ_Solved/BOJ-11279.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 7 | 12762328 | # coding=utf-8
"""
백준 11279번 : 최대 힙
"""
import heapq
import sys
N = int(sys.stdin.readline())
heap = []
for _ in range(N):
num = int(sys.stdin.readline())
if num == 0:
if len(heap) != 0:
print(heapq.heappop(heap)[1])
else:
print(0)
else:
heapq.heappush(heap, (-num, num)) # max_heap
| 3.546875 | 4 |
configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py | fengyouliang/wheat_detection | 0 | 12762329 | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
rpn_head=dict(
anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5),
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', out_size=7, sample_num=2, aligned=False)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', out_size=14, sample_num=2, aligned=False)),
bbox_head=dict(
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn_proposal=dict(nms_post=2000, max_num=2000),
rcnn=dict(assigner=dict(match_low_quality=True)))
| 1.429688 | 1 |
example-tests/example_FDTD_periodical.py | sbastrakov/pyHiChi | 1 | 12762330 | import pyHiChi as pfc
import numpy as np
import math as ma
def valueEx(x, y, z):
Ex = 0 #for x or y
#Ex=np.sin(z) #for z
return Ex
def valueEy(x, y, z):
#Ey = 0 #for y or z
#Ey = np.sin(x) #for x
Ey = np.sin(x - z) #for xz
return Ey
def valueEz(x, y, z):
Ez = 0 #for x or z or xz
#Ez = np.sin(y) #for y
return Ez
def valueBx(x, y, z):
#Bx = 0 #for x or z
#Bx = np.sin(y) #for y
Bx = np.sin(x - z)/np.sqrt(2) #for xz
return Bx
def valueBy(x, y, z):
By = 0 #for x or y or xz
#By = np.sin(z) #for z
return By
def valueBz(x, y, z):
#Bz = 0 #for y or z
#Bz = np.sin(x) #for x
Bz = np.sin(x - z)/np.sqrt(2) #for xz
return Bz
def step(minCoords, maxCoords, gridSize):
steps = pfc.vector3d(1, 1, 1)
steps.x = (maxCoords.x - minCoords.x)/(gridSize.x)
steps.y = (maxCoords.y - minCoords.y)/(gridSize.y)
steps.z = (maxCoords.z - minCoords.z)/(gridSize.z)
return steps
gridSize = pfc.vector3d(20, 20, 20)
minCoords = pfc.vector3d(0.0, 0.0, 0.0)
maxCoords = pfc.vector3d(2*ma.pi, 2*ma.pi, 2*ma.pi)
stepsGrid = step(minCoords, maxCoords, gridSize)
timeStep = 1e-14
grid = pfc.YeeGrid(gridSize, timeStep, minCoords, stepsGrid)
grid.setE(valueEx, valueEy, valueEz)
grid.setB(valueBx, valueBy, valueBz)
fieldSolver = pfc.FDTD(grid)
fieldSolver.setPML(0, 0, 0)
periodicalBC = pfc.PeriodicalBC(fieldSolver)
#show
import matplotlib.pyplot as plt
import matplotlib.animation as animation
N = 50
eps = 0.0
x = np.arange(eps, 2*ma.pi - eps, 2*(ma.pi-eps)/N)
z = np.arange(eps, 2*ma.pi - eps, 2*(ma.pi-eps)/N)
def getFields():
global grid, x, z, N
#print(grid)
Ex = np.zeros(shape=(N,N))
Ey = np.zeros(shape=(N,N))
Ez = np.zeros(shape=(N,N))
Bx = np.zeros(shape=(N,N))
By = np.zeros(shape=(N,N))
Bz = np.zeros(shape=(N,N))
for ix in range(N):
for iy in range(N):
coordXZ = pfc.vector3d(x[ix], 0.0, z[iy]) #for x or z or xz
#coordXZ = pfc.vector3d(x[ix], z[iy], 0.0) #for y or x
E = grid.getE(coordXZ)
Ex[ix, iy] = E.x
Ey[ix, iy] = E.y
Ez[ix, iy] = E.z
B = grid.getB(coordXZ)
Bx[ix, iy] = B.x
By[ix, iy] = B.y
Bz[ix, iy] = B.z
return Ex, Ey, Ez, Bx, By, Bz
def updateData():
for i in range(1000):
fieldSolver.updateFields()
(Ex, Ey, Ez, Bx, By, Bz) = getFields()
fig, axes = plt.subplots(ncols=3, nrows=2)
im11 = axes[0, 0].imshow(Ex, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im11, ax=axes[0, 0])
im12 = axes[0, 1].imshow(Ey, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im12, ax=axes[0, 1])
im13 = axes[0, 2].imshow(Ez, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im13, ax=axes[0, 2])
im21 = axes[1, 0].imshow(Bx, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im21, ax=axes[1, 0])
im22 = axes[1, 1].imshow(By, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im22, ax=axes[1, 1])
im23 = axes[1, 2].imshow(Bz, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im23, ax=axes[1, 2])
i = 0
def updatefig(*args):
global i
updateData()
(Ex, Ey, Ez, Bx, By, Bz) = getFields()
im11.set_array(Ex)
im12.set_array(Ey)
im13.set_array(Ez)
im21.set_array(Bx)
im22.set_array(By)
im23.set_array(Bz)
i = i + 1
return im11, im12, im13, im21, im22, im23,
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
plt.show()
| 2.46875 | 2 |
sparse_operation_kit/Deprecated/samples/sample_with_fprop_v4.py | marsmiao/HugeCTR | 0 | 12762331 | """
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
"""
This script is only used for demo of fprop_v4
This version will be deprecated in near future, please update to fprop or fprop_experimental.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.python.distribute.values import PerReplica
import hugectr_tf_ops
""" 1. Define DNN model with fprop_v4, whole DNN model should be split into two sub-models."""
# define sparse model which contains embedding layer(s)
class PluginSparseModel(tf.keras.models.Model):
def __init__(self,
gpus,
batch_size,
embedding_type,
vocabulary_size,
slot_num,
embedding_vec_size,
embedding_type,
opt_hparam,
update_type,
atomic_update,
max_feature_num,
max_nnz,
combiner,
gpu_count):
super(PluginSparseModel, self).__init__()
self.vocabulary_size_each_gpu = (vocabulary_size // gpu_count) + 1
self.slot_num = slot_num
self.embedding_vec_size = embedding_vec_size
self.embedding_type = embedding_type
self.optimizer_type = optimizer
self.opt_hparam = opt_hparam
self.update_type = update_type
self.atomic_update = atomic_update
self.max_feature_num = max_feature_num
self.max_nnz = max_nnz
self.combiner = combiner
self.gpu_count = gpu_count
# Make use init() only be called once. It will create resource manager for embedding_plugin.
hugectr_tf_ops.init(visiable_gpus=gpus, seed=123, key_type='int64', value_type='float',
batch_size=batch_size, batch_size_eval=len(gpus))
# create one embedding layer, and its embedding_name will be unique if there are more than one embedding layer.
self.embedding_name = hugectr_tf_ops.create_embedding(initializer, name_=name, embedding_type=self.embedding_type,
optimizer_type=self.optimizer_type,
max_vocabulary_size_per_gpu=self.vocabulary_size_each_gpu,
opt_hparams=self.opt_hparam, update_type=self.update_type,
atomic_update=self.atomic_update, slot_num=self.slot_num,
max_nnz=self.max_nnz, max_feature_num=self.max_feature_num,
embedding_vec_size=self.embedding_vec_size,
combiner=self.combiner)
def build(self, _):
# this tf.Variable is used for embedding plugin.
self.bp_trigger = self.add_weight(name='bp_trigger', shape=(1,), dtype=tf.float32, trainable=True)
@tf.function
def call(self, row_indices, values, training=True):
# forward propagtion of embedding layer
return hugectr_tf_ops.fprop_v4(embedding_name=self.embedding_name, row_indices=row_indices,
values=values, bp_trigger=self.bp_trigger, is_training=training,
output_shape=[self.batch_size, self.slot_num, self.embedding_vec_size])
# define dense model which contains other parts of the DNN model
class DenseModel(tf.keras.models.Model):
def __init__(self, num_layers):
super(DenseModel, self).__init__()
self.num_layers = num_layers
self.dense_layers = []
for _ in range(num_layers - 1):
self.dense_layers.append(tf.keras.layers.Dense(units=1024, activation='relu'))
self.out_layer = tf.keras.layers.Dense(units=1, activation='sigmoid', use_bias=True,
kernel_initializer='glorot_normal',
bias_initializer='glorot_normal')
@tf.function
def call(self, inputs, training=True):
hidden = tf.reshape(inputs, [tf.shape(inputs)[0], 26 * 32]) # [batchsize, slot_num * embedding_vec_size]
for i in range(self.num_layers - 1):
hidden = self.dense_layers[i](hidden)
result = self.out_layer(hidden)
return result
""" 2.Define training loop with the model mentioned above """
def main():
# create MirroredStrategy with specified GPUs.
strategy = tf.distribute.MirroredStrategy(devices=["/GPU:" + str(i) for i in range(gpu_count)])
# create sparse model outside the scope of MirroredStrategy
sparse_model = PluginSparseModel(...)
sparse_opt = tf.keras.optimizers.SGD()
# create dense model inside the scope of MirroredSrategy
with strategy.scope():
dense_model = DenseModel(...)
dense_opt = tf.keras.optimizers.SGD()
# define loss function for each replica
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
def _replica_loss(labels, logits):
loss_value = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss_value, global_batch_size=batch_size)
# define dense model train step
@tf.function
def dense_train_step(dense_inputs, labels):
with tf.GradientTape() as tape:
# should watch inputs, in order to obtain gradients later
tape.watch(dense_inputs)
logits = dense_model(dense_inputs)
replica_loss = _replica_loss(labels, logits)
grads, input_grads = tape.gradient(replica_loss, [dense_model.trainable_weights, dense_inputs])
dense_opt.apply_gradients(zip(grads, dense_model.trainable_weights))
return replica_loss, input_grads
# define whole model train step
@tf.function
def total_train_step(row_indices, values, labels):
with tf.GradientTape() as tape:
# do embedding fprop
embedding_results = sparse_model(row_indices, values)
# convert to PerReplica
dense_inputs = tf.split(embedding_results, num_or_size_splits=gpu_count)
dense_inputs = PerReplica(dense_inputs)
labels = tf.expand_dims(labels, axis=1)
labels = tf.split(labels, num_or_size_splits=gpu_count)
labels = PerReplica(labels)
replica_loss, input_grads = strategy.run(dense_train_step, args=(dense_inputs, labels))
# gather all grads from dense replicas
all_grads = tf.concat(input_grads.values, axis=0)
# do embedding backward
embedding_grads = tape.gradient(embedding_results, sparse_model.trainable_weights, output_gradients=all_grads)
sparse_opt.apply_gradients(zip(embedding_grads, sparse_model.trainable_weights))
return strategy.reduce(tf.distribute.ReduceOp.SUM, replica_loss, axis=None)
# create a tf.data.Dataset to read data
dataset = ...
# training loop
for step, (row_indices, values, labels) in enumerate(dataset):
total_loss = total_train_step(row_indices, values, labels)
# you can save model, print loss or do sth. else. | 1.921875 | 2 |
pandas_ml_utils/model/fitting/splitting.py | KIC/pandas_utils | 3 | 12762332 | <filename>pandas_ml_utils/model/fitting/splitting.py
from __future__ import annotations
import logging
from typing import Tuple
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split as sk_train_test_split
_log = logging.getLogger(__name__)
def train_test_split(index: pd.Index,
test_size: float = 0.4,
youngest_size: float = None,
seed: int = 42) -> Tuple[pd.Index, pd.Index]:
# convert data frame index to numpy array
index = index.values
if test_size <= 0:
train, test = index, index[:0]
elif seed == 'youngest':
i = int(len(index) - len(index) * test_size)
train, test = index[:i], index[i:]
else:
random_sample_test_size = test_size if youngest_size is None else test_size * (1 - youngest_size)
random_sample_train_index_size = int(len(index) - len(index) * (test_size - random_sample_test_size))
if random_sample_train_index_size < len(index):
_log.warning(f"keeping youngest {len(index) - random_sample_train_index_size} elements in test set")
# cut the youngest data and use residual to randomize train/test data
index_train, index_test = \
sk_train_test_split(index[:random_sample_train_index_size],
test_size=random_sample_test_size, random_state=seed)
# then concatenate (add back) the youngest data to the random test data
index_test = np.hstack([index_test, index[random_sample_train_index_size:]]) # index is 1D
train, test = index_train, index_test
else:
train, test = sk_train_test_split(index, test_size=random_sample_test_size, random_state=seed)
return pd.Index(train), pd.Index(test)
| 2.921875 | 3 |
jemp.py | ReggieCodes/tacklebox | 6 | 12762333 | import json
import http.client
conn = http.client.HTTPSConnection("public.radio.co")
station = 'stations/sd71de59b3/status'
payload = "{}"
conn.request("GET", station, payload)
res = conn.getresponse()
data = res.read()
json_string = data.decode("utf-8")
now_playing = json.loads(json_string)
print("JEMP is currently playing: " + now_playing["current_track"]["title"])
song = now_playing["current_track"]["title"]
artist = song[0:7]
showdate = song[song.find("(")+1:song.find(")")]
if artist == 'Phish -':
print("Phish is playing!")
if int(showdate[-2:]) < 80:
year = str(int(showdate[-2:]) + 2000)
else:
year = str(int(showdate[-2:]) + 1900)
month = showdate[:-3]
month = "000" + month[:month.find("-")]
month = month[-2:]
day = showdate[:-3]
day = "000" + day[day.find("-")+1:]
day = day[-2:]
show = year + "-" + month + "-" + day
else:
print("Phish is not playing :(")
show = "1900-01-01"
print(show) | 3.09375 | 3 |
setup.py | WarrenWeckesser/voronoiz | 1 | 12762334 | from setuptools import setup
setup(
name='voronoiz',
version='0.1.0',
author='<NAME>',
description="Functions for generating Voronoi diagrams with "
"alternate metrics.",
license="MIT",
url="https://github.com/WarrenWeckesser/voronoiz",
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="voronoi",
packages=['voronoiz'],
install_requires=['scipy', 'shapely']
)
| 1.0625 | 1 |
test-data/unit/fixtures/module_all.py | ddfisher/mypy | 0 | 12762335 | <gh_stars>0
from typing import Generic, Sequence, TypeVar
_T = TypeVar('_T')
class object:
def __init__(self) -> None: pass
class module: pass
class type: pass
class function: pass
class int: pass
class str: pass
class list(Generic[_T], Sequence[_T]):
def append(self, x: _T): pass
def extend(self, x: Sequence[_T]): pass
def __add__(self, rhs: Sequence[_T]) -> list[_T]: pass
class tuple: pass
| 3.046875 | 3 |
dcl/tradegecko/fixerio/exceptions.py | OlamideD/zutron | 16 | 12762336 | class FixerioException(BaseException):
""" Common base class for all fixerio exceptions. """
pass
| 1.34375 | 1 |
pyreds/__init__.py | 7anshuai/pyreds | 9 | 12762337 | from pyreds.reds import (
set_client,
create_client,
create_search,
Query,
Search
)
__version__ = '0.1.4'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = [
'set_client', 'create_client', 'create_search', 'Query', 'Search'
]
| 1.742188 | 2 |
pwncat/modules/linux/enumerate/user/__init__.py | Mitul16/pwncat | 1,454 | 12762338 | <reponame>Mitul16/pwncat<filename>pwncat/modules/linux/enumerate/user/__init__.py<gh_stars>1000+
#!/usr/bin/env python3
import pwncat
from pwncat.modules import Status, ModuleFailed
from pwncat.facts.linux import LinuxUser
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class Module(EnumerateModule):
"""Enumerate users from a linux target"""
PROVIDES = ["user"]
PLATFORM = [Linux]
SCHEDULE = Schedule.ONCE
def enumerate(self, session: "pwncat.manager.Session"):
passwd = session.platform.Path("/etc/passwd")
shadow = session.platform.Path("/etc/shadow")
users = {}
try:
with passwd.open("r") as filp:
for user_info in filp:
try:
# Extract the user fields
(
name,
hash,
uid,
gid,
comment,
home,
shell,
) = user_info.split(":")
# Build a user object
user = LinuxUser(
self.name,
name,
hash,
int(uid),
int(gid),
comment,
home,
shell,
)
users[name] = user
yield Status(user)
except Exception:
# Bad passwd line
continue
except (FileNotFoundError, PermissionError) as exc:
raise ModuleFailed(str(exc)) from exc
try:
with shadow.open("r") as filp:
for user_info in filp:
try:
(
name,
hash,
last_change,
min_age,
max_age,
warn_period,
inactive_period,
expir_date,
reserved,
) = user_info.split(":")
if users[name].hash is None:
users[name].hash = hash if hash != "" else None
if users[name].password is None and hash == "":
users[name].password = ""
users[name].last_change = int(last_change)
users[name].min_age = int(min_age)
users[name].max_age = int(max_age)
users[name].warn_period = int(warn_period)
users[name].inactive_period = int(inactive_period)
users[name].expiration = int(expir_date)
users[name].reserved = reserved
except (ValueError, IndexError):
continue
except (FileNotFoundError, PermissionError):
pass
except Exception as exc:
raise ModuleFailed(str(exc)) from exc
# Yield all the known users after attempting to parse /etc/shadow
yield from users.values()
| 2.453125 | 2 |
scripts/pyPlotM.py | srinivas32/mirtop | 0 | 12762339 | <gh_stars>0
import matplotlib.pyplot as plt
def makePlots(tsvFileN, pdfFileN, show):
#Reading file
with open(tsvFileN, "r") as ins:
lines = []
for line in ins:
lines.append(line.split('\t'))
#Calculating maximum number of plots
maxPlots = 0
for x in range(1, len(lines)):
idc = int(lines[x][0]) + 1
if maxPlots < idc:
maxPlots = idc
# Set up the matplotlib figure
cols = 3
rows = maxPlots/3
plt.subplots(3, rows, figsize=(8, 6), sharex=True)
#Creating array to store the values
array = []
for i in range(0, maxPlots):
arr2 = []
for j in range(0, 5):
arr2.append(0)
array.append(arr2)
#Filling the array with the values
for x in range(1, len(lines)):
idc = int(lines[x][0])
cnt = int(lines[x][3])
typ = lines[x][4].strip()
nam = lines[x][1].strip()
pos = 3;
if typ == 'synthetic':
pos = 2
if typ == 'bcbio':
pos = 0
if typ == 'mirge':
pos = 1
array[idc][pos] = cnt
array[idc][4] = nam
#Plotting the graphs
plt.figure(1)
#plt.xlabel('tool')
#plt.ylabel('Counts')
p = []
p.append(array[0][0])
p.append(array[0][1])
p.append(array[0][2])
n = array[0][4]
for i in range(0, maxPlots):
del(p[2])
del(p[1])
del(p[0])
p.append(array[i][0])
p.append(array[i][1])
p.append(array[i][2])
n = array[i][4]
pcd = rows * 100 + cols * 10 + 1 + i
plt.subplot(pcd)
ax = plt.gca()
ax.set_facecolor('lightgray')
plt.xticks([1,2,3], ('bcbio', 'mirge', 'synthetic'))
plt.yticks([0,10,20,30,40,50])
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.bar([1,2,3], p, color='gray')
plt.title(n)
for i, v in enumerate(p):
plt.text(i+0.9, 0, str(v), color='black', fontsize='8', fontweight='bold')
plt.subplots_adjust(top=0.92, bottom=0.10, left=0.10, right=0.95, hspace=0.50, wspace=0.35)
plt.savefig(pdfFileN, format="pdf")
if show == 1:
plt.show()
makePlots("../data/examples/plot/example_count.tsv", "kk.pdf", 1)
| 2.671875 | 3 |
venv/lib/python3.7/site-packages/scapy/arch/pcapdnet.py | nicholasadamou/python-proxy | 0 | 12762340 | <gh_stars>0
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Packet sending and receiving with libdnet and libpcap/WinPcap.
"""
import os
import platform
import socket
import struct
import time
from ctypes import c_ubyte, cast
from scapy.data import MTU, ETH_P_ALL, ARPHDR_ETHER, ARPHDR_LOOPBACK
from scapy.compat import raw, plain_str, chb
from scapy.config import conf
from scapy.consts import WINDOWS
from scapy.utils import mac2str
from scapy.supersocket import SuperSocket
from scapy.error import Scapy_Exception, log_loading, warning
from scapy.pton_ntop import inet_ntop
from scapy.automaton import SelectableObject
import scapy.consts
if not scapy.consts.WINDOWS:
from fcntl import ioctl
############
# COMMON #
############
# From BSD net/bpf.h
# BIOCIMMEDIATE = 0x80044270
BIOCIMMEDIATE = -2147204496
class PcapTimeoutElapsed(Scapy_Exception):
pass
class _L2pcapdnetSocket(SuperSocket, SelectableObject):
read_allowed_exceptions = (PcapTimeoutElapsed,)
def check_recv(self):
return True
def recv_raw(self, x=MTU):
"""Receives a packet, then returns a tuple containing (cls, pkt_data, time)""" # noqa: E501
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s", # noqa: E501
self.iface, ll, cls.name)
pkt = None
while pkt is None:
pkt = self.ins.next()
if pkt is not None:
ts, pkt = pkt
if pkt is None and scapy.consts.WINDOWS:
raise PcapTimeoutElapsed # To understand this behavior, have a look at L2pcapListenSocket's note # noqa: E501
if pkt is None:
return None, None, None
return cls, pkt, ts
def nonblock_recv(self):
"""Receives and dissect a packet in non-blocking mode.
Note: on Windows, this won't do anything."""
self.ins.setnonblock(1)
p = self.recv(MTU)
self.ins.setnonblock(0)
return p
@staticmethod
def select(sockets, remain=None):
"""This function is called during sendrecv() routine to select
the available sockets.
"""
# pcap sockets aren't selectable, so we return all of them
# and ask the selecting functions to use nonblock_recv instead of recv
def _sleep_nonblock_recv(self):
try:
res = self.nonblock_recv()
if res is None:
time.sleep(conf.recv_poll_rate)
return res
except PcapTimeoutElapsed:
return None
return sockets, _sleep_nonblock_recv
###################
# WINPCAP/NPCAP #
###################
if conf.use_winpcapy:
NPCAP_PATH = os.environ["WINDIR"] + "\\System32\\Npcap"
# Part of the Winpcapy integration was inspired by phaethon/scapy
# but he destroyed the commit history, so there is no link to that
try:
from scapy.modules.winpcapy import PCAP_ERRBUF_SIZE, pcap_if_t, \
sockaddr_in, sockaddr_in6, pcap_findalldevs, pcap_freealldevs, \
pcap_lib_version, pcap_create, pcap_close, pcap_set_snaplen, \
pcap_set_promisc, pcap_set_timeout, pcap_set_rfmon, \
pcap_activate, pcap_open_live, pcap_setmintocopy, pcap_pkthdr, \
pcap_next_ex, pcap_datalink, \
pcap_compile, pcap_setfilter, pcap_setnonblock, pcap_sendpacket, \
bpf_program as winpcapy_bpf_program
def load_winpcapy():
"""This functions calls Winpcap/Npcap pcap_findalldevs function,
and extracts and parse all the data scapy will need to use it:
- the Interface List
- the IPv4 addresses
- the IPv6 addresses
This data is stored in their respective conf.cache_* subfields:
conf.cache_iflist
conf.cache_ipaddrs
conf.cache_in6_getifaddr
"""
err = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = POINTER(pcap_if_t)()
if_list = []
ip_addresses = {}
ip6_addresses = []
if pcap_findalldevs(byref(devs), err) < 0:
return
try:
p = devs
# Iterate through the different interfaces
while p:
if_list.append(plain_str(p.contents.name))
a = p.contents.addresses
while a:
# IPv4 address
if a.contents.addr.contents.sa_family == socket.AF_INET: # noqa: E501
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_in))
if_raw_addr = b"".join(chb(x) for x in val.contents.sin_addr[:4]) # noqa: E501
if if_raw_addr != b'\x00\x00\x00\x00':
ip_addresses[plain_str(p.contents.name)] = if_raw_addr # noqa: E501
# IPv6 address
if a.contents.addr.contents.sa_family == socket.AF_INET6: # noqa: E501
ap = a.contents.addr
val = cast(ap, POINTER(sockaddr_in6))
addr = inet_ntop(socket.AF_INET6, b"".join(chb(x) for x in val.contents.sin6_addr[:])) # noqa: E501
scope = scapy.utils6.in6_getscope(addr)
ip6_addresses.append((addr, scope, plain_str(p.contents.name))) # noqa: E501
a = a.contents.next
p = p.contents.next
conf.cache_iflist = if_list
conf.cache_ipaddrs = ip_addresses
conf.cache_in6_getifaddr = ip6_addresses
except Exception:
raise
finally:
pcap_freealldevs(devs)
# Detect Pcap version
version = pcap_lib_version()
if b"winpcap" in version.lower():
if os.path.exists(NPCAP_PATH + "\\wpcap.dll"):
warning("Winpcap is installed over Npcap. Will use Winpcap (see 'Winpcap/Npcap conflicts' in scapy's docs)") # noqa: E501
elif platform.release() != "XP":
warning("WinPcap is now deprecated (not maintened). Please use Npcap instead") # noqa: E501
elif b"npcap" in version.lower():
conf.use_npcap = True
LOOPBACK_NAME = scapy.consts.LOOPBACK_NAME = "Npcap Loopback Adapter" # noqa: E501
except OSError:
conf.use_winpcapy = False
if conf.interactive:
log_loading.warning("wpcap.dll is not installed. You won't be able to send/receive packets. Visit the scapy's doc to install it") # noqa: E501
if conf.use_winpcapy:
def get_if_raw_addr(iff): # noqa: F811
"""Returns the raw ip address corresponding to the NetworkInterface.""" # noqa: E501
if not conf.cache_ipaddrs:
load_winpcapy()
return conf.cache_ipaddrs.get(iff.pcap_name, None)
def get_if_list():
"""Returns all pcap names"""
if not conf.cache_iflist:
load_winpcapy()
return conf.cache_iflist
def in6_getifaddr_raw():
"""Returns all available IPv6 on the computer, read from winpcap.""" # noqa: E501
if not conf.cache_in6_getifaddr:
load_winpcapy()
return conf.cache_in6_getifaddr
else:
get_if_raw_addr = lambda x: None
get_if_list = lambda: []
in6_getifaddr_raw = lambda: []
from ctypes import POINTER, byref, create_string_buffer
class _PcapWrapper_winpcap: # noqa: F811
"""Wrapper for the WinPcap calls"""
def __init__(self, device, snaplen, promisc, to_ms, monitor=None):
self.errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
self.iface = create_string_buffer(device.encode("utf8"))
if monitor:
self.pcap = pcap_create(self.iface, self.errbuf)
pcap_set_snaplen(self.pcap, snaplen)
pcap_set_promisc(self.pcap, promisc)
pcap_set_timeout(self.pcap, to_ms)
if pcap_set_rfmon(self.pcap, 1) != 0:
warning("Could not set monitor mode")
if pcap_activate(self.pcap) != 0:
raise OSError("Could not activate the pcap handler")
else:
self.pcap = pcap_open_live(self.iface, snaplen, promisc, to_ms, self.errbuf) # noqa: E501
# Winpcap/Npcap exclusive: make every packet to be instantly
# returned, and not buffered within Winpcap/Npcap
pcap_setmintocopy(self.pcap, 0)
self.header = POINTER(pcap_pkthdr)()
self.pkt_data = POINTER(c_ubyte)()
self.bpf_program = winpcapy_bpf_program()
def next(self):
c = pcap_next_ex(self.pcap, byref(self.header), byref(self.pkt_data)) # noqa: E501
if not c > 0:
return
ts = self.header.contents.ts.tv_sec + float(self.header.contents.ts.tv_usec) / 1000000 # noqa: E501
pkt = b"".join(chb(i) for i in self.pkt_data[:self.header.contents.len]) # noqa: E501
return ts, pkt
__next__ = next
def datalink(self):
return pcap_datalink(self.pcap)
def fileno(self):
if WINDOWS:
log_loading.error("Cannot get selectable PCAP fd on Windows")
return 0
else:
# This does not exist under Windows
from scapy.modules.winpcapy import pcap_get_selectable_fd
return pcap_get_selectable_fd(self.pcap)
def setfilter(self, f):
filter_exp = create_string_buffer(f.encode("utf8"))
if pcap_compile(self.pcap, byref(self.bpf_program), filter_exp, 0, -1) == -1: # noqa: E501
log_loading.error("Could not compile filter expression %s", f)
return False
else:
if pcap_setfilter(self.pcap, byref(self.bpf_program)) == -1:
log_loading.error("Could not install filter %s", f)
return False
return True
def setnonblock(self, i):
pcap_setnonblock(self.pcap, i, self.errbuf)
def send(self, x):
pcap_sendpacket(self.pcap, x, len(x))
def close(self):
pcap_close(self.pcap)
open_pcap = lambda *args, **kargs: _PcapWrapper_winpcap(*args, **kargs)
################
# PCAP/PCAPY #
################
if conf.use_pcap:
# We try from most to less tested/used
try:
import pcapy as pcap # python-pcapy
_PCAP_MODE = "pcapy"
except ImportError as e:
try:
import pcap # python-pypcap
_PCAP_MODE = "pypcap"
except ImportError as e2:
try:
# This is our last chance, but we don't really
# recommand it as very little tested
import libpcap as pcap # python-libpcap
_PCAP_MODE = "libpcap"
except ImportError:
if conf.interactive:
log_loading.error(
"Unable to import any of the pcap "
"modules: %s/%s", e, e2
)
conf.use_pcap = False
else:
raise
if conf.use_pcap:
if _PCAP_MODE == "pypcap": # python-pypcap
class _PcapWrapper_pypcap: # noqa: F811
def __init__(self, device, snaplen, promisc,
to_ms, monitor=False):
try:
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1, timeout_ms=to_ms, rfmon=monitor) # noqa: E501
except TypeError:
try:
if monitor:
warning("Your pypcap version is too old to support monitor mode, Please use pypcap 1.2.1+ !") # noqa: E501
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1, timeout_ms=to_ms) # noqa: E501
except TypeError:
# Even older pypcap versions do not support the timeout_ms argument # noqa: E501
self.pcap = pcap.pcap(device, snaplen, promisc, immediate=1) # noqa: E501
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def setnonblock(self, i):
self.pcap.setnonblock(i)
def close(self):
try:
self.pcap.close()
except AttributeError:
warning("close(): don't know how to close the file "
"descriptor. Bugs ahead! Please use python-pypcap 1.2.1+") # noqa: E501
def send(self, x):
self.pcap.sendpacket(x)
def next(self):
c = self.pcap.next()
if c is None:
return
ts, pkt = c
return ts, raw(pkt)
__next__ = next
open_pcap = lambda *args, **kargs: _PcapWrapper_pypcap(*args, **kargs) # noqa: E501
elif _PCAP_MODE == "libpcap": # python-libpcap
class _PcapWrapper_libpcap:
def __init__(self, device, snaplen, promisc, to_ms, monitor=False): # noqa: E501
self.errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
if monitor:
self.pcap = pcap.pcap_create(device, self.errbuf)
pcap.pcap_set_snaplen(self.pcap, snaplen)
pcap.pcap_set_promisc(self.pcap, promisc)
pcap.pcap_set_timeout(self.pcap, to_ms)
if pcap.pcap_set_rfmon(self.pcap, 1) != 0:
warning("Could not set monitor mode")
if pcap.pcap_activate(self.pcap) != 0:
raise OSError("Could not activate the pcap handler") # noqa: E501
else:
self.pcap = pcap.open_live(device, snaplen, promisc, to_ms) # noqa: E501
def setfilter(self, filter):
self.pcap.setfilter(filter, 0, 0)
def next(self):
c = self.pcap.next()
if c is None:
return
l, pkt, ts = c
return ts, pkt
__next__ = next
def setnonblock(self, i):
pcap.pcap_setnonblock(self.pcap, i, self.errbuf)
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def send(self, x):
pcap.pcap_sendpacket(self.pcap, x, len(x))
def close(self):
pcap.close(self.pcap)
open_pcap = lambda *args, **kargs: _PcapWrapper_libpcap(*args, **kargs) # noqa: E501
elif _PCAP_MODE == "pcapy": # python-pcapy
class _PcapWrapper_pcapy:
def __init__(self, device, snaplen, promisc, to_ms, monitor=False): # noqa: E501
if monitor:
try:
self.pcap = pcap.create(device)
self.pcap.set_snaplen(snaplen)
self.pcap.set_promisc(promisc)
self.pcap.set_timeout(to_ms)
if self.pcap.set_rfmon(1) != 0:
warning("Could not set monitor mode")
if self.pcap.activate() != 0:
raise OSError("Could not activate the pcap handler") # noqa: E501
except AttributeError:
raise OSError("Your pcapy version does not support"
"monitor mode ! Use pcapy 0.11.4+")
else:
self.pcap = pcap.open_live(device, snaplen, promisc, to_ms) # noqa: E501
def next(self):
try:
c = self.pcap.next()
except pcap.PcapError:
return None
else:
h, p = c
if h is None:
return
s, us = h.getts()
return (s + 0.000001 * us), p
__next__ = next
def fileno(self):
try:
return self.pcap.getfd()
except AttributeError:
warning("fileno: getfd() does not exist. Please use "
"pcapy 0.11.3+ !")
def setnonblock(self, i):
self.pcap.setnonblock(i)
def __getattr__(self, attr):
return getattr(self.pcap, attr)
def send(self, x):
self.pcap.sendpacket(x)
def close(self):
try:
self.pcap.close()
except AttributeError:
warning("close(): don't know how to close the file "
"descriptor. Bugs ahead! Please update pcapy!")
open_pcap = lambda *args, **kargs: _PcapWrapper_pcapy(*args, **kargs) # noqa: E501
#################
# PCAP/WINPCAPY #
#################
if conf.use_pcap or conf.use_winpcapy:
class L2pcapListenSocket(_L2pcapdnetSocket):
desc = "read packets at layer 2 using libpcap"
def __init__(self, iface=None, type=ETH_P_ALL, promisc=None, filter=None, monitor=None): # noqa: E501
self.type = type
self.outs = None
self.iface = iface
if iface is None:
iface = conf.iface
if promisc is None:
promisc = conf.sniff_promisc
self.promisc = promisc
# Note: Timeout with Winpcap/Npcap
# The 4th argument of open_pcap corresponds to timeout. In an ideal world, we would # noqa: E501
# set it to 0 ==> blocking pcap_next_ex.
# However, the way it is handled is very poor, and result in a jerky packet stream. # noqa: E501
# To fix this, we set 100 and the implementation under windows is slightly different, as # noqa: E501
# everything is always received as non-blocking
self.ins = open_pcap(iface, MTU, self.promisc, 100, monitor=monitor) # noqa: E501
try:
ioctl(self.ins.fileno(), BIOCIMMEDIATE, struct.pack("I", 1))
except Exception:
pass
if type == ETH_P_ALL: # Do not apply any filter if Ethernet type is given # noqa: E501
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter) # noqa: E501
else:
filter = "not (%s)" % conf.except_filter
if filter:
self.ins.setfilter(filter)
def send(self, x):
raise Scapy_Exception("Can't send anything with L2pcapListenSocket") # noqa: E501
class L2pcapSocket(_L2pcapdnetSocket):
desc = "read/write packets at layer 2 using only libpcap"
def __init__(self, iface=None, type=ETH_P_ALL, promisc=None, filter=None, nofilter=0, # noqa: E501
monitor=None):
if iface is None:
iface = conf.iface
self.iface = iface
if promisc is None:
promisc = 0
self.promisc = promisc
# See L2pcapListenSocket for infos about this line
self.ins = open_pcap(iface, MTU, self.promisc, 100, monitor=monitor) # noqa: E501
# We need to have a different interface open because of an
# access violation in Npcap that occurs in multi-threading
# (see https://github.com/nmap/nmap/issues/982)
self.outs = open_pcap(iface, MTU, self.promisc, 100)
try:
ioctl(self.ins.fileno(), BIOCIMMEDIATE, struct.pack("I", 1))
except Exception:
pass
if nofilter:
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap # noqa: E501
filter = "ether proto %i" % type
else:
filter = None
else:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter) # noqa: E501
else:
filter = "not (%s)" % conf.except_filter
if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap # noqa: E501
if filter:
filter = "(ether proto %i) and (%s)" % (type, filter)
else:
filter = "ether proto %i" % type
if filter:
self.ins.setfilter(filter)
def send(self, x):
sx = raw(x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.outs.send(sx)
def close(self):
if not self.closed:
ins = getattr(self, "ins", None)
out = getattr(self, "out", None)
if ins:
self.ins.close()
if out and out != ins:
self.outs.close()
self.closed = True
class L3pcapSocket(L2pcapSocket):
desc = "read/write packets at layer 3 using only libpcap"
# def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0): # noqa: E501
# L2pcapSocket.__init__(self, iface, type, filter, nofilter)
def recv(self, x=MTU):
r = L2pcapSocket.recv(self, x)
if r:
return r.payload
else:
return
def send(self, x):
# Makes send detects when it should add Loopback(), Dot11... instead of Ether() # noqa: E501
ll = self.ins.datalink()
if ll in conf.l2types:
cls = conf.l2types[ll]
else:
cls = conf.default_l2
warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s", self.iface, ll, cls.name) # noqa: E501
sx = raw(cls() / x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.ins.send(sx)
##########
# DNET #
##########
# DEPRECATED
if conf.use_dnet:
warning("dnet usage with scapy is deprecated, and will be removed in a future version.") # noqa: E501
try:
try:
# First try to import dnet
import dnet
except ImportError:
# Then, try to import dumbnet as dnet
import dumbnet as dnet
except ImportError as e:
if conf.interactive:
log_loading.error("Unable to import dnet module: %s", e)
conf.use_dnet = False
def get_if_raw_hwaddr(iff):
"dummy"
return (0, b"\0\0\0\0\0\0")
def get_if_raw_addr(iff): # noqa: F811
"dummy"
return b"\0\0\0\0"
def get_if_list():
"dummy"
return []
else:
raise
else:
def get_if_raw_hwaddr(iff):
"""Return a tuple containing the link type and the raw hardware
address corresponding to the interface 'iff'"""
if iff == scapy.arch.LOOPBACK_NAME:
return (ARPHDR_LOOPBACK, b'\x00' * 6)
# Retrieve interface information
try:
tmp_intf = dnet.intf().get(iff)
link_addr = tmp_intf["link_addr"]
except Exception:
raise Scapy_Exception("Error in attempting to get hw address"
" for interface [%s]" % iff)
if hasattr(link_addr, "type"):
# Legacy dnet module
return link_addr.type, link_addr.data
else:
# dumbnet module
mac = mac2str(str(link_addr))
# Adjust the link type
if tmp_intf["type"] == 6: # INTF_TYPE_ETH from dnet
return (ARPHDR_ETHER, mac)
return (tmp_intf["type"], mac)
def get_if_raw_addr(ifname): # noqa: F811
i = dnet.intf()
try:
return i.get(ifname)["addr"].data
except (OSError, KeyError):
warning("No MAC address found on %s !" % ifname)
return b"\0\0\0\0"
def get_if_list():
return [i.get("name", None) for i in dnet.intf()]
def get_working_if():
"""Returns the first interface than can be used with dnet"""
if_iter = iter(dnet.intf())
try:
intf = next(if_iter)
except (StopIteration, RuntimeError):
return scapy.consts.LOOPBACK_NAME
return intf.get("name", scapy.consts.LOOPBACK_NAME)
| 2.171875 | 2 |
BD_simulator.py | trstn9598/ORIE7590 | 0 | 12762341 | # ORIE 7590
import numpy as np
from bd_sim_cython import discrete_bessel_sim, discrete_laguerre_sim, cmeixner
from scipy.special import jv, laguerre, poch, eval_laguerre, j0
from scipy.integrate import quad
from math import comb, factorial, exp, sqrt, log
import hankel
def bd_simulator(t, x0, num_paths, method='bessel', num_threads=4):
"""
:param t: terminal time, double
:param x0: initial state, callable or int
:param num_paths: number of paths
:param method: method of simulating birth-death chain, currently support 'bessel' and 'laguerre'
:param num_threads: number of threads for multiprocessing
:return: ndarray of simulated result at terminal time
"""
if isinstance(x0, int):
x0_array = np.array([x0]*num_paths, dtype=np.int64)
else:
x0_array = np.array([x0() for _ in range(num_paths)], dtype=np.int64)
output = np.zeros(dtype=np.int64, shape=num_paths)
if method == 'bessel':
discrete_bessel_sim(t, x0_array, num_paths, output, int(num_threads))
else:
discrete_laguerre_sim(t, x0_array, num_paths, output, int(num_threads))
return output
def MC_BESQ_gateway(N = 10**6, t = 0, x0 = 0, test = 'bessel', method = 'bessel', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected BESQ using dBESQ simulation or dLaguerre simulation
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'bessel', 'laguerre', 'bessel-delay', 'laguerre-delay'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'bessel':
if test == 'bessel':
f = lambda n : eval_laguerre(n, 1)
s = t
elif test == 'custom':
f = arg[0]
s = t
elif method == 'laguerre':
if test == 'bessel':
f = lambda n : eval_laguerre(n, 1+t)
s = log(t + 1)
elif method == 'bessel-delay':
method = 'bessel'
if test == 'bessel':
f = lambda n : j0(2*np.sqrt(np.random.gamma(n+1)))
s = t - 1
elif test == 'custom':
f = lambda n : args[0](np.random.gamma(n + 1))
s = t - 1
elif method == 'laguerre-delay':
method = 'laguerre'
if test == 'bessel':
f = lambda n : j0(2*np.sqrt(np.random.gamma(n+1) * (t/2 + 1/2)))
s = log(t/2 + 1/2)
def poisson_x0():
return np.random.poisson(x0)
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method=method, num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
def MC_Laguerre_gateway(N = 10**6, t = 0, x0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected Laguerre using dLaguerre simulation or dLaguerre simulation
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'laguerre', 'laguerre-delay'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'laguerre':
if test == 'laguerre':
f = lambda m : eval_meixner(args['n'], m)
s = t
elif method == 'laguerre-delay':
if test == 'laguerre':
f = lambda m : eval_laguerre(args['n'], np.random.gamma(m+1)/2)
s = t - log(2)
elif test == 'relu':
f = lambda m : np.maximum(0, np.random.gamma(m+1)/2)
s = t - log(2)
def poisson_x0():
return np.random.poisson(x0)
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
def MC_Laguerre(N = 10**6, t = 0, x0 = 0, test = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected Laguerre using Brownian motion simulation
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param test: defines test function
:args: arguments to define test function
"""
if test == 'laguerre':
f = lambda x : eval_laguerre(args['n'], x)
elif test == 'relu':
f = lambda x : np.maximum(0, x)
s = exp(t) - 1
xt_array = exp(-t)/2 * np.sum(np.square(np.random.multivariate_normal(np.zeros(2), s*np.eye(2), size=N)
+ np.sqrt(x0)*np.ones((N,2))), axis=1)
return np.mean(f(xt_array)).round(num_decimal)
def MC_dBESQ_gateway(N = 10**6, t = 0, n0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected dBESQ using birth-death simulation, exact BESQ solution, dLaguerre simulation
or PDE systems.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'birth-death', 'exact-besq', 'laguerre', 'pde'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'birth-death':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
xt_array = bd_simulator(t, x0=n0, num_paths=N, method='bessel', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
elif method == 'exact-besq':
if test == 'laguerre':
return np.mean(exp(-t+1)*jv(0, 2*np.sqrt(np.random.gamma(n0+1)))).round(num_decimal)
elif method == 'laguerre':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
s = log(t / 2)
def poisson_x0():
return np.random.poisson(np.random.gamma(n0+1))
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(np.random.poisson(t/2 *np.random.gamma(xt_array+1)))).round(num_decimal)
def MC_BESQ_hankel(N = 10**6, t = 0, x0 = 0, test = 'custom', function = lambda x : 0, args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected BESQ using Hankel transform and Exponential r.v.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param test: defines test function
:param function: custom test function
:args: arguments to define test function
"""
j0 = lambda x : jv(0, 2*np.sqrt(x))
if test == 'bessel':
f = j0
elif test == 'poly':
if len(args) < 1:
print('No coefficients provided')
coef = []
else:
coef = args[0]
f = lambda x : np.polyval(coef, x)
else:
f = function
estimates = np.zeros(N)
for n in range(N):
Z = np.random.exponential(1/t)
estimates[n] = j0(x0*Z)*hankel_reparam(Z, f)/t
return np.mean(estimates).round(num_decimal)
def discrete_poly(n, coef):
return sum([coef[i]*poch(n - i + 1, i) for i in range(len(coef)) if n >= i])
def exact_BESQ(t = 0, x0 = 0, num_decimal = 4):
return (exp(-t)*jv(0, 2*np.sqrt(x0))).round(num_decimal)
def exact_Laguerre(t = 0, x0 = 0, n = 0, num_decimal = 4):
return (exp(-t*n)*eval_laguerre(n, x0)).round(num_decimal)
def eval_meixner(n, m):
output = np.zeros(dtype=np.int64, shape=len(m))
cmeixner(n, m, len(m), output)
return output
def hankel_reparam(z, f):
"""
Monte Carlo estimator of expected BESQ using Hankel transform and Exponential r.v.
Based on <NAME> and <NAME>, “hankel: A Python library for performing simple and accurate Hankel transformations”, Journal of Open Source Software, 4(37), 1397, https://doi.org/10.21105/joss.01397
:param z: positive float
:param f: function in L^2(R_+)
"""
ht = hankel.HankelTransform(
nu= 0, # The order of the bessel function
N = 120, # Number of steps in the integration
h = 0.03 # Proxy for "size" of steps in integration
)
return 2*ht.transform(lambda x: f(x**2), 2*np.sqrt(z), ret_err = False)
# exp = np.random.exponential
# def bd_one_path(t, x0):
# """
# simulate a birth-death proecss X at time t.
#
# :param t: float, terminal time
# :param x0: initial value of X
# :return: one realization of X_t
# """
#
# s = 0
# state = x0
#
# while True:
# birth_rate = state + 1
# death_rate = state
# arrival_rate = birth_rate + death_rate
# time_to_arrival = exp(1/arrival_rate)
# s += time_to_arrival
# # stop and return when exceeds target time
# if s > t:
# return state
# # update
# if np.random.rand() < death_rate / arrival_rate:
# state -= 1
# else:
# state += 1
#
#
# def bd_simulator(t, x0):
# """
# :param t: terminal time
# :param x0: list of initial values from certain distribution
# :return: list of simulated X_t
# """
#
# num_iter = len(x0)
# result = np.zeros(num_iter, dtype = np.int64)
#
# for i in range(num_iter):
# result[i] = bd_one_path(t, x0[i])
#
# return result | 2.296875 | 2 |
contatos/views.py | LuanFaria/Agenda | 1 | 12762342 | from django.shortcuts import render, get_object_or_404
from .models import Contato
from django.http import Http404
def index(request):
contatos = Contato.objects.all()
return render(request, 'contatos/index.html', {
'contatos': contatos
})
def ver_contato(request, contato_id):
#contato = Contato.objects.get(id=contato_id)
contato = get_object_or_404(Contato, id=contato_id)
return render(request, 'contatos/ver_contato.html', {
'contatos': contato
})
| 2.046875 | 2 |
read_xls/read_xls.py | ono-kojiro/learning_python | 0 | 12762343 | #!/usr/bin/python3
import os
import sys
import getopt
import json
import re
import xlrd
import openpyxl
from openpyxl.utils import get_column_letter
from pprint import pprint
def usage():
print("Usage : {0}".format(sys.argv[0]))
def main():
ret = 0
try:
opts, args = getopt.getopt(
sys.argv[1:], "hvo:", ["help", "version", "output="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
output = None
for o, a in opts:
if o == "-v":
usage()
sys.exit(0)
elif o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-o", "--output"):
output = a
else:
assert False, "unknown option"
if output == None :
print("no output option")
ret += 1
if ret != 0:
sys.exit(1)
fp = open(output, mode='w', encoding='utf-8')
for filepath in args:
print("arg : {0}".format(filepath))
fp.write("# file : {0}\n".format(filepath))
filename, ext = os.path.splitext(filepath)
if ext == ".xls" :
book = xlrd.open_workbook(filepath, formatting_info=True)
pprint(book)
for sheet in book.sheets() :
pprint(sheet)
fp.write("# sheet : {0}\n".format(sheet.name))
pprint(sheet.colinfo_map)
pprint(sheet.rowinfo_map)
for row in range(sheet.nrows) :
if row in sheet.rowinfo_map:
if sheet.rowinfo_map[row].hidden == 1:
continue
fp.write(" ")
for col in range(sheet.ncols) :
if col in sheet.colinfo_map:
if sheet.colinfo_map[col].hidden == 1:
continue
cell = sheet.cell(row, col)
val = cell.value
if col != 0 :
fp.write("\t")
fp.write("{0}".format(val))
fp.write("\n")
elif ext == ".xlsx" :
wb = openpyxl.load_workbook(filename = filepath)
for sheet_name in wb.sheetnames:
sheet = wb[sheet_name]
fp.write("# sheet : {0}\n".format(sheet_name))
hidden_cols = {}
for collet, coldim in sheet.column_dimensions.items():
if coldim.hidden == True :
hidden_cols[collet] = 1
hidden_rows = {}
for rowlet, rowdim in sheet.row_dimensions.items():
if rowdim.hidden == True :
hidden_rows[str(rowlet)] = 1;
rows = len(tuple(sheet.rows))
cols = len(tuple(sheet.columns))
for row in range(rows) :
if str(row) in hidden_rows :
continue
fp.write(" ")
for col in range(cols):
col_let = get_column_letter(col + 1)
if col_let in hidden_cols:
continue
cell = sheet.cell(row=row + 1, column=col + 1)
val = cell.value
if col != 0 :
fp.write("\t")
if val == None:
val = ''
fp.write("{0}".format(val))
fp.write("\n")
pprint(sheet)
pass
fp.close()
if __name__ == "__main__":
main()
| 2.953125 | 3 |
VAE/utils.py | NeKoSaNnn/Neko-ML | 0 | 12762344 | import os.path as osp
import sys
import numpy as np
import torch
from matplotlib import pyplot as plt
from scipy.stats import norm
sys.path.append(osp.dirname(sys.path[0]))
from neko import neko_utils
class utils(neko_utils.neko_utils):
def __init__(self):
super(utils, self).__init__()
def plot_latent_image(self, model, latent_dim, patch_count, patch_side_size):
# 2σ原则
xs = norm.ppf(np.linspace(0.05, 0.95, patch_count))
ys = norm.ppf(np.linspace(0.05, 0.95, patch_count))
image_size = [patch_count * patch_side_size, patch_count * patch_side_size]
image = np.zeros(image_size)
for x_index, x in enumerate(xs):
for y_index, y in enumerate(ys):
z = np.tile(np.array([[x, y]]), latent_dim).reshape(-1, latent_dim)
z = torch.Tensor(z).cuda()
decoder_image = model.decoder(z)
decoder_image = decoder_image.reshape(-1, patch_side_size, patch_side_size)
image[x_index * patch_side_size:(x_index + 1) * patch_side_size,
y_index * patch_side_size:(y_index + 1) * patch_side_size] = decoder_image[0].cpu().detach().numpy()
plt.figure(figsize=(10, 10))
plt.imshow(image, cmap="gray")
plt.savefig("latent-{}_space_image_{}.png".format(latent_dim, self.get_now_time()))
self.divide_line("save latent space images !")
plt.show()
| 2 | 2 |
examples/providers/factory_init_injections_underlying.py | whysage/python-dependency-injector | 1,997 | 12762345 | <reponame>whysage/python-dependency-injector<gh_stars>1000+
"""`Factory` provider - passing injections to the underlying providers example."""
from dependency_injector import containers, providers
class Regularizer:
def __init__(self, alpha: float) -> None:
self.alpha = alpha
class Loss:
def __init__(self, regularizer: Regularizer) -> None:
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss: Loss) -> None:
self.loss = loss
class Algorithm:
def __init__(self, task: ClassificationTask) -> None:
self.task = task
class Container(containers.DeclarativeContainer):
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
container = Container()
algorithm_1 = container.algorithm_factory(
task__loss__regularizer__alpha=0.5,
)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = container.algorithm_factory(
task__loss__regularizer__alpha=0.7,
)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
| 3.203125 | 3 |
servomoteur_api/script/servo.py | stimulee/rpi_samples | 0 | 12762346 | <gh_stars>0
#!/usr/bin/python
import RPi.GPIO as GPIO
import time,sys,getopt
# Servo Control
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
p = GPIO.PWM(5,50)
p.start(7.5)
#GPIO.setup(11, GPIO.OUT)
#p1 = GPIO.PWM(11,50)
#p1.start(7.5)
#Gestion des parametres en entree
statu="neutre"
mode = 0
try:
opts,args = getopt.getopt(sys.argv[1:],'hs:m:')
except getopt.GetoptError:
print "error ARg"
sys.exit(1)
for opt,arg in opts:
if opt == '-s':
statu=arg
elif opt == '-m':
mode=int(arg)
delay_period=0.5
delay_period2=0.05
pos90=7.5
pos180=12.5
pos0=2.5
print "mode="
print mode
i = 0
#if mode > 0:
# while i <= mode:
# print "Appui sur mode | "
# p1.ChangeDutyCycle(12.5)
# time.sleep(delay_period)
# p1.ChangeDutyCycle(7.5)
# time.sleep(delay_period2)
# i =i + 1
# print "sortie de boucle"
if statu == "fermeture":
print "Appui bouton bas ..."
p.ChangeDutyCycle(2)
time.sleep(delay_period)
p.ChangeDutyCycle(7.5)
time.sleep(delay_period2)
print "Fermeture en cours"
elif statu == "ouverture":
print "Appui bouton haut..."
p.ChangeDutyCycle(12.5)
time.sleep(delay_period)
p.ChangeDutyCycle(7.5)
time.sleep(delay_period2)
print "Ouverture en cours"
else:
print "ordre inconnu ou absent"
print statu
p.stop()
#p1.stop()
GPIO.cleanup()
| 2.71875 | 3 |
flaskapp/blueprints/users/__init__.py | crockmitnic/question-paper-generator | 6 | 12762347 | <reponame>crockmitnic/question-paper-generator<filename>flaskapp/blueprints/users/__init__.py
"""users blueprint"""
| 1.0625 | 1 |
losses.py | breuckelen/torf | 14 | 12762348 | import tensorflow as tf
import numpy as np
def img2mse(x, y):
return tf.reduce_mean(tf.square(x - y))
def mse2psnr(x):
return -10.*tf.math.log(x)/tf.math.log(10.)
def variance_weighted_loss(tof, gt, c=1.):
tof = outputs['tof_map']
tof_std = tof[..., -1:]
tof = tof[..., :2]
gt = gt[..., :2]
mse = tf.reduce_mean(tf.square(tof - gt) / (2 * tf.square(tof_std)))
return (mse + c * tf.reduce_mean(tf.math.log(tof_std)))
def tof_loss_variance(target_tof, outputs, tof_weight):
img_loss = variance_weighted_loss(outputs['tof_map'], target_tof) * tof_weight
img_loss0 = 0.0
if 'tof_map0' in outputs:
img_loss0 = variance_weighted_loss(outputs['tof_map0'], target_tof) * tof_weight
return img_loss, img_loss0
def tof_loss_default(target_tof, outputs, tof_weight):
img_loss = img2mse(outputs['tof_map'][..., :2], target_tof[..., :2]) * tof_weight
img_loss0 = 0.0
if 'tof_map0' in outputs:
img_loss0 = img2mse(outputs['tof_map0'][..., :2], target_tof[..., :2]) * tof_weight
return img_loss, img_loss0
def color_loss_default(target_color, outputs, color_weight):
img_loss = img2mse(outputs['color_map'], target_color) * color_weight
img_loss0 = 0.0
if 'color_map0' in outputs:
img_loss0 = img2mse(outputs['color_map0'], target_color) * color_weight
return img_loss, img_loss0
def disparity_loss_default(target_depth, outputs, disp_weight, near, far):
target_disp = 1. / np.clip(target_depth, near, far)
target
img_loss = img2mse(outputs['disp_map'], target_disp) * disp_weight
img_loss0 = 0.0
if 'disp_map0' in outputs:
img_loss0 = img2mse(outputs['disp_map0'], target_disp) * disp_weight
return img_loss, img_loss0
def depth_loss_default(target_depth, outputs, depth_weight):
img_loss = img2mse(outputs['depth_map'], target_depth) * depth_weight
img_loss0 = 0.0
if 'depth_map0' in outputs:
img_loss0 = img2mse(outputs['depth_map0'], target_depth) * depth_weight
return img_loss, img_loss0
def empty_space_loss(outputs):
loss = tf.reduce_mean(tf.abs(outputs['acc_map']))
if 'acc_map0' in outputs:
loss += tf.reduce_mean(tf.abs(outputs['acc_map0']))
return loss
def make_pose_loss(model, key):
def loss_fn(_):
return tf.reduce_mean(tf.square(
tf.abs(model.poses[key][1:] - model.poses[key][:-1])
))
return loss_fn
| 2.390625 | 2 |
ykdl/extractors/yizhibo.py | 592767809/ykdl | 136 | 12762349 | # -*- coding: utf-8 -*-
from ._common import *
class Yizhibo(Extractor):
name = 'Yizhibo (一直播)'
def prepare(self):
info = MediaInfo(self.name)
info.live = True
self.vid = self.url[self.url.rfind('/')+1:].split('.')[0]
data = get_response(
'http://www.yizhibo.com/live/h5api/get_basic_live_info',
params={'scid': self.vid}).json()
assert content['result'] == 1, 'Error : ' + data['result']
data = data['data']
info.title = data['live_title']
info.artist = data['nickname']
info.streams['current'] = {
'container': 'm3u8',
'video_profile': 'current',
'src' : [data['play_url']],
'size': float('inf')
}
return info
site = Yizhibo()
| 2.484375 | 2 |
folder_compiler/processors/base_processors.py | d-krupke/folder_compiler | 0 | 12762350 | <filename>folder_compiler/processors/base_processors.py<gh_stars>0
import os
from ..context import CompilerContext
from .utils.pattern_filter import PatternFilter
from .utils.processor_utils import ProcessorUtils
class Processor:
"""
A base class for a processor. It provides some basic utils and the general interface.
A processor processes a file in the input directory (e.g., "./content").
The compiler iterates through all files and calls all processors in the given
order until the first one returns True. If include/exclude is used, the processor
will return automatically if the file does not match. Otherwise, the method 'match'
is called.
A very simple Processor that simply copies all .pdf files could look like this:
```
class PdfCopyProcessor(Processor):
def __init__(self):
super().__init__()
self.add_include(".*\\.pdf") # only .pdf files
def process(self, source, utils):
utils.copy_file(source, source) # simply copy the file
return True # mark file as processed
```
Note that for this case, the FileCopyProcessor could also be used.
"""
def __init__(self, includes: list = None, excludes: list = None):
"""
:param includes: List of patterns to include
:param excludes: List of patterns to exclude
"""
self._inclusion_pattern_checker = PatternFilter(includes=includes,
excludes=excludes)
def add_include(self, pattern: str):
"""
Add a pattern that is included even if it fits an exclude pattern.
If there is no prior exclude pattern, a generic ".*" exclude is added (an include
without an exclude is useless).
Use before compiling.
:param pattern: A regex pattern for `re.match`. See https://docs.python.org/3/library/re.html
:return: Itself to allow concatenation
"""
self._inclusion_pattern_checker.add_include_pattern(pattern)
return self
def add_exclude(self, pattern: str):
"""
Add a pattern that is excluded pattern.
Use before compiling.
:param pattern: A regex pattern for `re.match`. See https://docs.python.org/3/library/re.html
:return: Itself to allow concatenation
"""
self._inclusion_pattern_checker.add_exclude_pattern(pattern)
return self
def __call__(self, source, context: CompilerContext):
utils = ProcessorUtils(context, self)
if self._inclusion_pattern_checker.is_included(source):
if os.path.isdir(utils.get_full_source_path(source)):
return self.process_folder(source, utils)
else:
return self.process_file(source, utils)
return False
def process_folder(self, path: str, utils: ProcessorUtils):
"""
Overwrite this method in your custom folder (optionally).
You can do the same as in process file but the source is a folder.
If you return True, no file or folder in this folder will be processed.
It is called before processing any of the folders content.
:param path: Relative path of the folder (relative to input directory)
:param utils: Utils for reading/writing/copying files.
:return: True if folder is (completely) processed (including content).
"""
return False
def process_file(self, source: str, utils: ProcessorUtils):
"""
Overwrite this method in your custom processor.
:param source: Path in the input directory to the source
:param utils: Utils for reading/writing/copying files
:return: True if processed, False if not responsible.
"""
raise NotImplementedError()
def __repr__(self):
return self.__class__.__name__+"("+str(self._inclusion_pattern_checker)+")"
| 2.984375 | 3 |