max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
python/code_challenges/left_join/tests/test_left_join.py | Awonkhrais/data-structures-and-algorithms | 0 | 12761651 | <gh_stars>0
from left_join import __version__
from left_join.leftjoin import *
from left_join.hashtabel import *
def test_version():
assert __version__ == '0.1.0'
def test_one():
data1=HashTable()
data1.add('fond','enamored')
data2=HashTable()
data2.add('fond','averse')
assert left_join(data1,data2)==[['fond', 'enamored', 'averse']]
def test_two():
data1=HashTable()
data1.add('fond','enamored')
data2=HashTable()
data2.add('warth','anger')
assert left_join(data1,data2)==[['fond', 'enamored', None]]
def test_three():
data1=HashTable()
data2=HashTable()
data2.add('warth','anger')
assert left_join(data1,data2)==[]
| 2.234375 | 2 |
cmake_targets/autotests/tools/configure_cots_bandrich_ue.py | t0930198/OAI_nb_IoT | 2 | 12761652 | <filename>cmake_targets/autotests/tools/configure_cots_bandrich_ue.py
#!/usr/bin/python
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.0 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * <EMAIL>
# */
# \author <NAME>, <NAME>
import time
import serial
import os
from socket import AF_INET
from pyroute2 import IPRoute
import sys
import re
import threading
import signal
import traceback
import os
import commands
# configure the serial connections (the parameters differs on the device you are connecting to)
#First we find an open port to work with
serial_port=''
ser=serial.Serial()
openair_dir = os.environ.get('OPENAIR_DIR')
if openair_dir == None:
print "Error getting OPENAIR_DIR environment variable"
sys.exit(1)
sys.path.append(os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/tools/'))
from lib_autotest import *
def find_open_port():
global serial_port, ser
max_ports=100
serial_port=''
while True:
if os.path.exists(serial_port) == True:
return serial_port
for port in range(0,100):
serial_port_tmp = '/dev/ttyUSB'+str(port)
if os.path.exists(serial_port_tmp) == True:
print 'New Serial Port : ' + serial_port_tmp
serial_port = serial_port_tmp
break
if serial_port == '':
print" Not able to detect valid serial ports. Resetting the modem now..."
reset_ue()
else :
ser = serial.Serial(port=serial_port)
return
#serial_port = '/dev/ttyUSB2'
bandrich_ppd_config = os.environ.get('OPENAIR_DIR') + '/cmake_targets/autotests/tools/wdial.bandrich.conf'
exit_flag=0
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
print('Resetting the UE to detached state')
timeout=10
exit_flag=1
send_command('AT+CGATT=0' , 'OK' , timeout)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
#ser.open()
#ser.isOpen()
class pppThread (threading.Thread):
def __init__(self, threadID, name, counter,port):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
self.port=port
def run(self):
print "Starting " + self.name
#Here we keep running pppd thread in indefinite loop as this script terminates sometimes
#while 1:
while 1:
time.sleep(5) #Hard coded, do not reduce this number!
print "Starting wvdial now..."
print 'exit_flag = ' + str(exit_flag)
send_command('AT+CGATT=1','OK', 300)
#Now we do search and replace on wvdial config file
cmd="sed -i \"s%Modem = .*%Modem = " + self.port + "%g\" " + bandrich_ppd_config
os.system(cmd)
os.system('wvdial -C ' + bandrich_ppd_config + '' )
if exit_flag == 1:
print "Exit flag set to true. Exiting pppThread now"
print "Terminating wvdial now..."
def send_command (cmd, response, timeout):
count=0
sleep_duration = 1
print 'In function: send_command: cmd = <' + cmd + '> response: <' + response + '> \n'
global serial_port, ser
while count <= timeout:
try:
#Sometimes the port does not exist coz of reset in modem.
#In that case, we need to search for this port again
if os.path.exists(serial_port) == False:
find_open_port()
ser.write (cmd + '\r\n')
out = ''
time.sleep(sleep_duration)
count = count + sleep_duration
while ser.inWaiting() > 0:
out += ser.read(1)
print 'out = <' + out + '> response = <' + response + '> \n'
if re.search(response, out):
break
except Exception, e:
error = ' cmd : ' + cmd + ' response : ' + response
error = error + ' In function: ' + sys._getframe().f_code.co_name + ': *** Caught exception: ' + str(e.__class__) + " : " + str( e)
error = error + traceback.format_exc()
print error
time.sleep(1)
def start_ue () :
#print 'Enter your commands below.\r\nInsert "exit" to leave the application.'
global serial_port
timeout=60 #timeout in seconds
send_command('AT', 'OK' , timeout)
send_command('AT+CFUN=1' , 'OK' , timeout)
#send_command('AT+CGATT=0' , 'OK' , timeout)
send_command('AT+CGATT=1','OK', 300)
#os.system('wvdial -C ' + bandrich_ppd_config + ' &' )
thread_ppp = pppThread(1, "ppp_thread", 1,port=serial_port)
thread_ppp.start()
#iface='ppp0'
while 1:
time.sleep ( 2)
iface=''
#Now we check if ppp0 interface is up and running
try:
if exit_flag == 1:
break
cmd="ifconfig -a | sed 's/[ \t].*//;/^$/d' | grep ppp"
status, out = commands.getstatusoutput(cmd)
iface=out
ip = IPRoute()
idx = ip.link_lookup(ifname=iface)[0]
print "iface = " + iface
print " Setting route now..."
#os.system("status=1; while [ \"$status\" -ne \"0\" ]; do route add -host " + gw + ' ' + iface + " ; status=$? ;sleep 1; echo \"status = $status\" ; sleep 2; done ")
os.system ('route add -host ' + gw + ' ' + iface + ' 2> /dev/null')
#ip.route('add', dst=gw, oif=iface)
os.system('sleep 5')
#print "Starting ping now..."
os.system ('ping -c 1 ' + gw)
#break
except Exception, e:
error = ' Interface ' + iface + 'does not exist...'
error = error + ' In function: ' + sys._getframe().f_code.co_name + ': *** Caught exception: ' + str(e.__class__) + " : " + str( e)
error = error + traceback.format_exc()
print error
thread_ppp.join()
def stop_ue():
timeout=60
os.system('killall wvdial')
send_command('AT', 'OK' , timeout)
send_command('AT+CGATT=0' , 'OK|ERROR' , timeout)
send_command('AT+CFUN=4' , 'OK' , timeout)
#reset the USB BUS of Bandrich UE
def reset_ue():
stringIdBandrich='BandRich, Inc. 4G LTE adapter'
status, out = commands.getstatusoutput('lsusb | grep -i \'' + stringIdBandrich + '\'')
if (out == '') :
print "Bandrich 4G LTE Adapter not found. Exiting now..."
sys.exit()
p=re.compile('Bus\s*(\w+)\s*Device\s*(\w+):\s*ID\s*(\w+):(\w+)')
res=p.findall(out)
BusId=res[0][0]
DeviceId=res[0][1]
VendorId=res[0][2]
ProductId=res[0][3]
usb_dir= find_usb_path(VendorId, ProductId)
print "Bandrich 4G LTE Adapter found in..." + usb_dir
print "Sleeping now for 45 seconds...please wait..."
cmd = "sudo sh -c \"echo 0 > " + usb_dir + "/authorized\""
os.system(cmd + " ; sleep 15" )
cmd = "sudo sh -c \"echo 1 > " + usb_dir + "/authorized\""
os.system(cmd + " ; sleep 30" )
find_open_port()
stop_ue()
i=1
gw='172.16.58.3'
while i < len(sys.argv):
arg=sys.argv[i]
if arg == '--start-ue' :
print "Turning on UE..."
find_open_port()
print 'Using Serial port : ' + serial_port
start_ue()
elif arg == '--stop-ue' :
print "Turning off UE..."
find_open_port()
print 'Using Serial port : ' + serial_port
stop_ue()
elif arg == '--reset-ue' :
print "Resetting UE..."
find_open_port()
reset_ue()
elif arg == '-gw' :
gw = sys.argv[i+1]
i=i+1
elif arg == '-h' :
print "--reset-ue: Reset the UE on USB Bus. Similar to unplugging and plugging the UE"
print "--stop-ue: Stop the UE. Send DETACH command"
print "--start-ue: Start the UE. Send ATTACH command"
print "-gw: Specify the default gw as sometimes the gateway/route arguments are not set properly via wvdial"
else :
print " Script called with wrong arguments, arg = " + arg
sys.exit()
i = i +1
| 1.609375 | 2 |
lino_xl/lib/blogs/__init__.py | khchine5/xl | 1 | 12761653 | # -*- coding: UTF-8 -*-
# Copyright 2013-2016 <NAME>
#
# License: BSD (see file COPYING for details)
"""
.. autosummary::
:toctree:
models
"""
from lino.ad import Plugin
from django.utils.translation import ugettext_lazy as _
class Plugin(Plugin):
verbose_name = _("Blog")
needs_plugins = ['lino_xl.lib.topics']
def setup_main_menu(self, site, user_type, m):
# mg = self.get_menu_group()
mg = site.plugins.office
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('blogs.MyEntries')
def setup_config_menu(self, site, user_type, m):
mg = self.get_menu_group()
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('blogs.EntryTypes')
def setup_explorer_menu(self, site, user_type, m):
mg = self.get_menu_group()
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('blogs.AllEntries')
# m.add_action('blogs.AllTaggings')
def get_dashboard_items(self, user):
from lino.core.dashboard import ActorItem
yield ActorItem(
self.site.models.blogs.LatestEntries, header_level=None)
# yield CustomItem(
# 'blogs.Entry.latest_entries',
# self.models.blogs.Entry.latest_entries, max_num=10)
| 1.992188 | 2 |
utils/weaponsConverter.py | makennedy626/Destiny2AutoTagger | 0 | 12761654 | import pandas as pd
import numpy as np
def combineData(df_list):
combined = pd.DataFrame()
for df in df_list:
combined = combined.append(df)
combined = combined.fillna(method='ffill').drop_duplicates()
return combined
def modifyWeapons(df):
modified = pd.DataFrame()
modified["Name"] = df["Name"]
modified['perksList'] = df["Perks 0"].str.cat(df[["Perks 2", "Perks 3", "Perks 4", "Perks 5", "Perks 6"]], sep=', ')
return modified
| 3.390625 | 3 |
home/scripts/memory/genld.py | ParksProjets/Mips-Applications | 1 | 12761655 | <reponame>ParksProjets/Mips-Applications
"""
Generate the LD script.
Copyright (C) 2018, <NAME>
License MIT
"""
import io, os.path as path
import argparse
import configparser
import json
import re
from getobjsizes import get_sections_size_prefixed
from sectionsutils import (split_sections, get_mem_sections as refine_sections,
sort_sections, get_memory_usages)
def read_ini(filename):
"Read an INI file."
config = configparser.ConfigParser()
with open(filename) as file:
config.read_string("[DEFAULT]\n%s" % file.read())
return config["DEFAULT"]
def read_config(filename):
"Read the configuration file."
with open(filename) as file:
data = json.load(file)
return data
def gen_MEMORY(out, memories):
"Generate 'MEMORY' part of the LD script."
out.write("MEMORY\n{\n")
for mem in memories:
out.write(" %s : ORIGIN = 0x%0X, LENGTH = 0x%0X\n" % (mem["name"],
mem["origin"], mem["length"]))
out.write("}\n")
def gen_SECTIONS(out, memories, sections, usages):
"Generate 'SECTIONS' part of the LD script."
out.write("\nSECTIONS\n{\n")
so = io.StringIO()
for mem, secs, (usage, perc) in zip(memories, sections, usages):
so.write("\n /* %s memory data (%dB used (%d%%)) */\n" %
(mem["name"], usage, perc))
so.write(" . = 0x%0X;\n" % mem["origin"])
so.write(" %s : {\n" % mem.get("sections", "%s.text" % mem["name"]))
for symbol in secs:
so.write(" %s\n" % symbol)
so.write(" } > %s\n" % mem["name"])
out.write(so.getvalue()[1:])
out.write("}\n")
def get_sections(apps, memconf):
"Gets the sections in each memories from the apps."
sections = {}
for name in apps:
sections.update(get_sections_size_prefixed(name))
for name in ("home", "startup"):
sections.update(get_sections_size_prefixed(name))
mems, spe = refine_sections(sections, memconf)
memsecs = split_sections(sections, mems)
assert memsecs, "Not enought memory space for storing all the apps"
usages = get_memory_usages(memconf, mems, memsecs, sections)
sort_sections(memsecs)
secs = [spe[i][0] + secs + spe[i][1] for i, secs in enumerate(memsecs)]
return (secs, usages)
def genld(inname, outname, folder):
"Generate the LD script."
memconf = read_config(inname)
appconf = read_ini(path.join(folder, "#all-apps.ini"))
apps = re.findall("^\\s*-\\s*(.*)$", appconf.get("apps").strip(), re.M)
apps.append(appconf.get("lock"))
sections, usages = get_sections(apps, memconf)
out = open(outname, "w")
out.write("/*\n * This file has been generated automatically by genld.py\n")
out.write(" * Configuration file: %s\n */\n\n" % path.basename(inname))
gen_MEMORY(out, memconf)
gen_SECTIONS(out, memconf, sections, usages)
out.close()
def main():
"Entry point of the application."
parser = argparse.ArgumentParser(prog="genld",
description="Generate the LD script.")
parser.add_argument("folder", default="apps", nargs="?",
help="folder containing the apps, from root (default=apps)")
parser.add_argument("-o", default="ldscript.ld", metavar="outname",
help="output filename, in root folder (default=ldscript.ld)")
parser.add_argument("-c", default="memory.json", metavar="config",
help="configuration file (default=memory.json)")
args = parser.parse_args()
here = path.dirname(__file__)
configf = path.abspath(path.join(here, args.c))
outf = path.abspath(path.join(here, '../..', args.o))
folder = path.abspath(path.join(here, '../..', args.folder))
genld(configf, outf, folder)
if __name__ == "__main__":
main()
| 2.375 | 2 |
student/migrations/0001_initial.py | n0ss4/trobafeina | 0 | 12761656 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-11 14:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(default=b'', max_length=100, verbose_name=b'Nom')),
('cognom', models.CharField(default=b'', max_length=100, verbose_name=b'Cognom')),
('dni', models.CharField(default=b'', max_length=100, unique=True, verbose_name=b'DNI')),
('adreca', models.CharField(default=b'', max_length=100, verbose_name=b'Adre\xc3\xa7a')),
('poblacio', models.CharField(default=b'', max_length=100, verbose_name=b'Poblaci\xc3\xb3')),
('codi_postal', models.CharField(default=b'', max_length=100, verbose_name=b'Codi Postal')),
('telefon', models.CharField(default=b'', max_length=100, verbose_name=b'Tel\xc3\xa8fon')),
('correu_electronic', models.CharField(default=b'', max_length=100, verbose_name=b'Correu electronic')),
('edat', models.CharField(default=b'', max_length=100, verbose_name=b'Edat')),
('estudis', models.CharField(default=b'', max_length=1000, verbose_name=b'Estudis')),
('experiencia', models.CharField(default=b'', max_length=1000, verbose_name=b'Experiencia')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name=b'Nom del usuari')),
],
),
]
| 1.648438 | 2 |
src/scoreboard.py | dhvip9/SnakeGame-Package | 0 | 12761657 | <filename>src/scoreboard.py
from turtle import Turtle
ALIGNMENT = "center"
FONT = ('Arial', 20, 'normal')
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.hideturtle()
self.color("white")
self.penup()
self.score = 0
with open("data.txt") as data:
self.high_score = int(data.read())
self.scoreboard_coordinates = (0.00, 270)
self.scoreboard_body(coordinate=self.scoreboard_coordinates)
def scoreboard_body(self, alignment=ALIGNMENT,
font=FONT,
coordinate=None):
"""Scoreboard Body"""
self.clear()
self.setposition(coordinate)
self.write(arg=f"[ Score : {self.score} HighScore : {self.high_score} ]", align=alignment, font=font)
def increase_score(self):
"""Increase ScoreBoard"""
self.score += 1
self.scoreboard_body(coordinate=self.scoreboard_coordinates)
def reset(self):
"""Reset ScoreBoard"""
if self.score > self.high_score:
self.high_score = self.score
with open("data.txt", mode="w") as data:
data.write(f"{self.high_score}")
self.score = 0
self.scoreboard_body(coordinate=self.scoreboard_coordinates)
def game_over(self, text="GameOver!",
alignment="center",
font=('Arial', 20, 'normal')):
"""GameOver Message"""
self.home()
self.write(arg=text, align=alignment, font=font)
| 3.78125 | 4 |
benchmark/megatron/benchmark_gpt_bert_one_case.py | yf225/alpa | 114 | 12761658 | <reponame>yf225/alpa
import argparse
import gc
from functools import partial
import os
import sys
import time
import numpy as np
from megatron.utils import average_losses_across_data_parallel_group
from megatron.model import BertModel, GPTModel
from megatron.model import ModelType
from megatron import mpu, initialize_megatron, get_args, get_timers
from megatron.training import train_step, setup_model_and_optimizer
import torch
from util import write_tsv, benchmark_func,\
compute_gpt_tflops, compute_gpt_parameter_count
GB = 1024 ** 3
def get_gpt_functions():
args = get_args()
micro_batch_size = args.micro_batch_size
seq_len = args.encoder_seq_length
def model_provider(pre_process=True, post_process=True):
model = GPTModel(
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process
)
return model
def loss_func(loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Reduce loss for logging.
#averaged_loss = average_losses_across_data_parallel_group([loss])
averaged_loss = [0]
return loss, {'lm loss': averaged_loss[0]}
tokens = torch.ones((micro_batch_size, seq_len)).cuda().long()
labels = torch.ones((micro_batch_size, seq_len)).cuda().long()
loss_mask = torch.ones((micro_batch_size, seq_len)).cuda().int()
attention_mask = \
torch.ones(micro_batch_size, 1, seq_len, seq_len).cuda().bool()
position_ids = torch.ones((micro_batch_size, seq_len)).cuda().long()
def forward_step(data_iterator, model):
output_tensor = model(tokens, position_ids, attention_mask,
labels=labels)
return output_tensor, partial(loss_func, loss_mask)
return model_provider, loss_func, forward_step
def get_bert_functions():
args = get_args()
micro_batch_size = args.micro_batch_size
seq_len = args.encoder_seq_length
def model_provider(pre_process=True, post_process=True):
num_tokentypes = 2 if args.bert_binary_head else 0
model = BertModel(
num_tokentypes=num_tokentypes,
add_binary_head=args.bert_binary_head,
parallel_output=True,
pre_process=pre_process,
post_process=post_process)
return model
def loss_func(loss_mask, sentence_order, output_tensor):
lm_loss_, sop_logits = output_tensor
lm_loss_ = lm_loss_.float()
loss_mask = loss_mask.float()
lm_loss = torch.sum(
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
if sop_logits is not None:
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
sentence_order.view(-1),
ignore_index=-1)
sop_loss = sop_loss.float()
loss = lm_loss + sop_loss
#averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss, sop_loss])
averaged_losses = [0, 0]
return loss, {'lm loss': averaged_losses[0],
'sop loss': averaged_losses[1]}
else:
loss = lm_loss
#averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss])
averaged_losses = [0]
return loss, {'lm loss': averaged_losses[0]}
tokens = torch.ones((micro_batch_size, seq_len)).cuda().long()
padding_mask = \
torch.ones(micro_batch_size, seq_len).cuda().bool()
types = torch.ones((micro_batch_size, seq_len)).cuda().long()
lm_labels = torch.ones((micro_batch_size, seq_len)).cuda().long()
loss_mask = torch.ones((micro_batch_size, seq_len)).cuda().int()
sentence_order = None
def forward_step(data_iterator, model):
if not args.bert_binary_head:
types = None
output_tensor = model(tokens, padding_mask, tokentype_ids=types,
lm_labels=lm_labels)
return output_tensor, partial(loss_func, loss_mask, sentence_order)
return model_provider, loss_func, forward_step
def benchmark_gpt_bert_one_case(benchmark_case, output_file_name):
# Model configs
(model_type, global_batch_size, seq_len, hidden_size, num_layers, num_heads,
vocab_size, num_micro_batches, parallel_mode, parallel_args) = benchmark_case
assert parallel_mode == "manual"
(prefer_reduce_scatter, use_remat, (dp, op, pp), force_batch_dim_mapping) = parallel_args
dp_size, tensor_mp_size, pipeline_mp_size = dp, op, pp
checkpoint_activations = use_remat
num_gpus = dp_size * tensor_mp_size * pipeline_mp_size
assert global_batch_size % (dp_size * num_micro_batches) == 0
micro_batch_size = global_batch_size // dp_size // num_micro_batches
# always use local DDP
ddp_impl = True
# Parallel configs
# Initialize megatron
sys.argv += ["--micro-batch-size", str(micro_batch_size)]
sys.argv += ["--tensor-model-parallel-size", str(tensor_mp_size)]
sys.argv += ["--pipeline-model-parallel-size", str(pipeline_mp_size)]
sys.argv += ["--global-batch-size", str(global_batch_size)]
sys.argv += ["--num-layers", str(num_layers)]
sys.argv += ["--hidden-size", str(hidden_size)]
sys.argv += ["--num-attention-heads", str(num_heads)]
sys.argv += ["--seq-length", str(seq_len)]
sys.argv += ["--max-position-embeddings", str(seq_len)]
sys.argv += ["--optimizer", "adam"]
sys.argv += ["--train-iters", "100"]
sys.argv += ["--lr", "0.00015"]
sys.argv += ["--bert-no-binary-head"]
sys.argv += ["--DDP-impl", "local" if ddp_impl else "torch"]
sys.argv += ["--fp16"]
sys.argv += ["--loss-scale", "8"]
if checkpoint_activations:
sys.argv += ["--checkpoint-activations"]
# sys.argv += ["--no-masked-softmax-fusion"]
# sys.argv += ["--no-async-tensor-model-parallel-allreduce"]
# sys.argv += ["--no-scatter-gather-tensors-in-pipeline"]
initialize_megatron()
args = get_args()
args.padded_vocab_size = vocab_size
rank = torch.distributed.get_rank()
# Check initialization
assert dp_size == mpu.get_data_parallel_world_size()
assert tensor_mp_size == mpu.get_tensor_model_parallel_world_size()
assert pipeline_mp_size == mpu.get_pipeline_model_parallel_world_size()
# Build model
if model_type == "gpt":
model_provider, loss_func, forward_step = get_gpt_functions()
elif model_type == "bert":
model_provider, loss_func, forward_step = get_bert_functions()
model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider,
model_type=ModelType.encoder_or_decoder)
parameter_count = compute_gpt_parameter_count(
num_layers, hidden_size, vocab_size)
def run_func():
train_step(forward_step, None, model, optimizer, lr_scheduler)
# Warmup and reset timers
run_func()
timers = get_timers()
names = list(timers.timers.keys())
for name in names:
timers(name).reset()
# Benchmark step time
repeat = 2
number = 1
costs = benchmark_func(run_func, sync_func=None,
warmup=0, repeat=repeat, number=number)
timers.log(names, normalizer=repeat * number)
# Print results
if rank == 0:
peak_mem = torch.cuda.max_memory_allocated(0)
tflops = compute_gpt_tflops(global_batch_size, seq_len, num_layers,
hidden_size, vocab_size,
torch.distributed.get_world_size(),
np.mean(costs))
tflops_ckpt = compute_gpt_tflops(global_batch_size, seq_len, num_layers,
hidden_size, vocab_size,
torch.distributed.get_world_size(),
np.mean(costs), True)
heads = ["Type", "Model Config", "Parallel Config", "P-mesh shape", "#Microbatch",
"Force DP", "Remat", "Mean Time", "Std Time", "#Params", "TFLOPs", "TFLOPs (ckpt)",
"Peak Mem"]
values = [model_type, str(benchmark_case[1:6]),
str((dp_size, tensor_mp_size, pipeline_mp_size)),
"N/A", str(num_micro_batches), "N/A",
str(checkpoint_activations), f"{np.mean(costs):.3f}", f"{np.std(costs):.3f}",
f"{parameter_count/1e9:.3f}", f"{tflops:.2f}", f"{tflops_ckpt:.2f}",
f"{peak_mem/GB:5.3f}"]
write_tsv(heads, values, f"{model_type}_megatron_{output_file_name}_rank{rank}.tsv")
print("Sleeping for 30 seconds before starting the next case. ")
time.sleep(30)
if __name__ == "__main__":
case = eval(sys.argv[-2])
output_file_name = sys.argv[-1]
del sys.argv[-1]
del sys.argv[-1]
benchmark_gpt_bert_one_case(case, output_file_name)
| 1.898438 | 2 |
python/strings/lsd_string_sort.py | rcanepa/cs-fundamentals | 0 | 12761659 | """LSD (least significant digit) string sort algorithm.
This algorithm is based on the Key-indexed counting sorting algorithm. The
main difference is that LSD run the same operation for W characters instead
of just 1 integer. It is assumed that strings are fixed length and that W
is the length of them.
Characteristics:
- Stable (preserves original order).
- Linear time: O(N * W).
E.g.:
Given the strings:
[
"4PGC938",
"2IYE230",
"3CI0720"
]
The algorithm is going to sort them through 7 iterations (W = 7). The process
is going to sort them char by char starting from right to left (from the least
significant 'digit').
After the first iteration, the result is going to be:
[
"2IYE230",
"3CI0720"
"4PGC938",
]
After the second:
[
"3CI0720"
"2IYE230",
"4PGC938",
]
And so on until the end:
[
"2IYE230",
"3CI0720"
"4PGC938",
]
"""
def _initialize_list(size, default_value):
return [default_value] * size
def lsd_sort(strings, key_length=None):
if not isinstance(strings, list):
raise Exception("A {} was provided instead of a list of strings.".format(type(strings)))
if key_length is None:
key_length = len(strings[0])
r = 256
# cn iterate on a string from right to left.
for cn in reversed(range(key_length)):
# Compute the frequency of each character.
count = _initialize_list(r + 1, 0)
for s in strings:
count[ord(s[cn]) + 1] += 1
# Compute the starting point for each character.
for j in range(r - 1):
count[j + 1] += count[j]
# Distribute the date in a new list.
partial_solution = _initialize_list(len(strings), None)
for s in strings:
ascii_code = ord(s[cn]) # Transform the character to its int representation.
s_position = count[ascii_code] # Find its starting point.
count[ascii_code] += 1 # Increment the starting position for that character.
partial_solution[s_position] = s # Add the string in its partially final position.
# Replace the original list with the partially sorted list.
strings = partial_solution
return strings
if __name__ == "__main__":
licenses = [
"4PGC938",
"2IYE230",
"3CI0720",
"1ICK750",
"1OHV845",
"4JZY524",
"1ICK750",
"3CI0720",
"1OHV845",
"1OHV845",
"2RLA629",
"2RLA629",
"3ATW723"
]
print(lsd_sort(licenses))
| 4.21875 | 4 |
tests/__init__.py | kajyuuen/pytorch-partial-crf | 22 | 12761660 | <filename>tests/__init__.py
__version__ = '0.1.2'
from pytorch_partial_crf.partial_crf import PartialCRF
from pytorch_partial_crf.crf import CRF
| 1.335938 | 1 |
tests/test_package.py | smisra87/ScopeReaders | 0 | 12761661 | from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import sys
sys.path.append("../ScopeReaders/")
class TestImport(unittest.TestCase):
def test_basic(self):
import ScopeReaders as sr
print(sr.__version__)
self.assertTrue(True)
| 2.140625 | 2 |
attending/mimetypes.py | ivanov/attending | 2 | 12761662 | <reponame>ivanov/attending
from pathlib import Path
from zipfile import ZipFile
from urllib.parse import urlparse
from http.client import HTTPResponse
import tarfile
def extract_zip(doc_location: Path, file: Path):
ZipFile(file).extractall(path=doc_location)
file.unlink()
def extract_tars(doc_location: Path, file: Path):
tar = tarfile.open(file)
tar.extractall(path=doc_location)
tar.close()
file.unlink()
def get_mapping(mime_type):
return {
"application/pdf": "pdf",
"application/zip": "zip",
"text/html": "html",
"text/plain": "txt"
}.get(mime_type, "txt")
def get_extractor(file_extension):
post_directives = {
".zip": extract_zip,
".tar": extract_tars,
".tar.gz": extract_tars
}
if file_extension in post_directives:
return post_directives[file_extension]
def get_filename(connection: HTTPResponse):
content_disposition = connection.getheader('Content-Disposition')
if content_disposition is not None:
# !!!! I have yet to find a server that returns this for a file download. I have not been able to test this on
# a proper response. I have made mock responses to test this.
# Based off of https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
dispositions = content_disposition.strip().split("; ")
for disposition in dispositions:
if "=" in disposition:
key, value = disposition.split("=")
if key == 'filename':
return value
# Will return '' if the path is '/path/to/something
return Path(urlparse(connection.geturl()).path).name
| 3.046875 | 3 |
mason/examples/operators/table/summarize/__init__.py | kyprifog/mason | 4 | 12761663 | from typing import Union, Optional
from mason.clients.response import Response
from mason.configurations.config import Config
from mason.engines.execution.models.jobs import ExecutedJob, InvalidJob
from mason.engines.execution.models.jobs.summary_job import SummaryJob
from mason.engines.metastore.models.credentials import MetastoreCredentials, InvalidCredentials
from mason.engines.metastore.models.table.table import Table
from mason.engines.storage.models.path import Path
from mason.operators.operator_definition import OperatorDefinition
from mason.operators.operator_response import OperatorResponse, DelayedOperatorResponse
from mason.parameters.validated_parameters import ValidatedParameters
from mason.util.environment import MasonEnvironment
class TableSummarize(OperatorDefinition):
def run(self, env: MasonEnvironment, config: Config, parameters: ValidatedParameters, response: Response) -> OperatorResponse:
database_name: str = parameters.get_required("database_name")
table_name: str = parameters.get_required("table_name")
read_headers: bool = isinstance(parameters.get_optional("read_headers"), str)
options = {"read_headers": read_headers}
table, response = config.metastore().get_table(database_name, table_name, options, response)
if isinstance(table, Table):
summary, response = config.metastore().summarize_table(table, options, response)
else:
summary = table
return OperatorResponse(response, summary)
def run_async(self, env: MasonEnvironment, config: Config, parameters: ValidatedParameters, response: Response) -> DelayedOperatorResponse:
database_name: str = parameters.get_required("database_name")
table_name: str = parameters.get_required("table_name")
read_headers: bool = isinstance(parameters.get_optional("read_headers"), str)
out_path: Optional[str] = parameters.get_optional("output_path")
input_path: Path = config.storage().table_path(database_name, table_name)
if out_path:
output_path: Path = config.storage().path(out_path)
credentials: Union[MetastoreCredentials, InvalidCredentials] = config.metastore().credentials()
if isinstance(credentials, MetastoreCredentials):
job = SummaryJob(input_path, output_path, credentials, read_headers)
run, response = config.execution().run_job(job)
else:
run = InvalidJob("Invalid Metastore Credentials")
else:
run = InvalidJob("Must specify output_path for asynchronous execution client")
return DelayedOperatorResponse(run, response)
| 1.976563 | 2 |
datasets/wiki_auto/wiki_auto.py | MitchellTesla/datasets | 10,608 | 12761664 | <reponame>MitchellTesla/datasets
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikiAuto dataset for Text Simplification"""
import json
import datasets
_CITATION = """\
@inproceedings{acl/JiangMLZX20,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
editor = {<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Neural {CRF} Model for Sentence Alignment in Text Simplification},
booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational
Linguistics, {ACL} 2020, Online, July 5-10, 2020},
pages = {7943--7960},
publisher = {Association for Computational Linguistics},
year = {2020},
url = {https://www.aclweb.org/anthology/2020.acl-main.709/}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
WikiAuto provides a set of aligned sentences from English Wikipedia and Simple English Wikipedia
as a resource to train sentence simplification systems. The authors first crowd-sourced a set of manual alignments
between sentences in a subset of the Simple English Wikipedia and their corresponding versions in English Wikipedia
(this corresponds to the `manual` config), then trained a neural CRF system to predict these alignments.
The trained model was then applied to the other articles in Simple English Wikipedia with an English counterpart to
create a larger corpus of aligned sentences (corresponding to the `auto`, `auto_acl`, `auto_full_no_split`, and `auto_full_with_split` configs here).
"""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = "CC-BY-SA 3.0"
# TODO: Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
"manual": {
"train": "https://www.dropbox.com/sh/ohqaw41v48c7e5p/AACdl4UPKtu7CMMa-CJhz4G7a/wiki-manual/train.tsv?dl=1",
"dev": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv",
"test": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/test.tsv",
},
"auto_acl": {
"normal": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/ACL2020/train.src",
"simple": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/ACL2020/train.dst",
},
"auto_full_no_split": {
"normal": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_no_split/train.src",
"simple": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_no_split/train.dst",
},
"auto_full_with_split": {
"normal": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.src",
"simple": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.dst",
},
"auto": {
"part_1": "https://www.dropbox.com/sh/ohqaw41v48c7e5p/AAATBDhU1zpdcT5x5WgO8DMaa/wiki-auto-all-data/wiki-auto-part-1-data.json?dl=1",
"part_2": "https://www.dropbox.com/sh/ohqaw41v48c7e5p/AAATgPkjo_tPt9z12vZxJ3MRa/wiki-auto-all-data/wiki-auto-part-2-data.json?dl=1",
},
}
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class WikiAuto(datasets.GeneratorBasedBuilder):
"""WikiAuto dataset for sentence simplification"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="manual",
version=VERSION,
description="A set of 10K Wikipedia sentence pairs aligned by crowd workers.",
),
datasets.BuilderConfig(
name="auto_acl",
version=VERSION,
description="Automatically aligned and filtered sentence pairs used to train the ACL2020 system.",
),
datasets.BuilderConfig(
name="auto_full_no_split",
version=VERSION,
description="All automatically aligned sentence pairs without sentence splitting.",
),
datasets.BuilderConfig(
name="auto_full_with_split",
version=VERSION,
description="All automatically aligned sentence pairs with sentence splitting.",
),
datasets.BuilderConfig(
name="auto", version=VERSION, description="A large set of automatically aligned sentence pairs."
),
]
DEFAULT_CONFIG_NAME = "auto"
def _info(self):
if self.config.name == "manual": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"alignment_label": datasets.ClassLabel(names=["notAligned", "aligned", "partialAligned"]),
"normal_sentence_id": datasets.Value("string"),
"simple_sentence_id": datasets.Value("string"),
"normal_sentence": datasets.Value("string"),
"simple_sentence": datasets.Value("string"),
"gleu_score": datasets.Value("float32"),
}
)
elif (
self.config.name == "auto_acl"
or self.config.name == "auto_full_no_split"
or self.config.name == "auto_full_with_split"
):
features = datasets.Features(
{
"normal_sentence": datasets.Value("string"),
"simple_sentence": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"example_id": datasets.Value("string"),
"normal": {
"normal_article_id": datasets.Value("int32"),
"normal_article_title": datasets.Value("string"),
"normal_article_url": datasets.Value("string"),
"normal_article_content": datasets.Sequence(
{
"normal_sentence_id": datasets.Value("string"),
"normal_sentence": datasets.Value("string"),
}
),
},
"simple": {
"simple_article_id": datasets.Value("int32"),
"simple_article_title": datasets.Value("string"),
"simple_article_url": datasets.Value("string"),
"simple_article_content": datasets.Sequence(
{
"simple_sentence_id": datasets.Value("string"),
"simple_sentence": datasets.Value("string"),
}
),
},
"paragraph_alignment": datasets.Sequence(
{
"normal_paragraph_id": datasets.Value("string"),
"simple_paragraph_id": datasets.Value("string"),
}
),
"sentence_alignment": datasets.Sequence(
{
"normal_sentence_id": datasets.Value("string"),
"simple_sentence_id": datasets.Value("string"),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://github.com/chaojiang06/wiki-auto",
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
if self.config.name in ["manual", "auto"]:
return [
datasets.SplitGenerator(
name=spl,
gen_kwargs={
"filepaths": data_dir,
"split": spl,
},
)
for spl in data_dir
]
else:
return [
datasets.SplitGenerator(
name="full",
gen_kwargs={"filepaths": data_dir, "split": "full"},
)
]
def _generate_examples(self, filepaths, split):
if self.config.name == "manual":
keys = [
"alignment_label",
"simple_sentence_id",
"normal_sentence_id",
"simple_sentence",
"normal_sentence",
"gleu_score",
]
with open(filepaths[split], encoding="utf-8") as f:
for id_, line in enumerate(f):
values = line.strip().split("\t")
assert len(values) == 6, f"Not enough fields in ---- {line} --- {values}"
yield id_, dict(
[(k, val) if k != "gleu_score" else (k, float(val)) for k, val in zip(keys, values)]
)
elif (
self.config.name == "auto_acl"
or self.config.name == "auto_full_no_split"
or self.config.name == "auto_full_with_split"
):
with open(filepaths["normal"], encoding="utf-8") as fi:
with open(filepaths["simple"], encoding="utf-8") as fo:
for id_, (norm_se, simp_se) in enumerate(zip(fi, fo)):
yield id_, {
"normal_sentence": norm_se,
"simple_sentence": simp_se,
}
else:
dataset_dict = json.load(open(filepaths[split], encoding="utf-8"))
for id_, (eid, example_dict) in enumerate(dataset_dict.items()):
res = {
"example_id": eid,
"normal": {
"normal_article_id": example_dict["normal"]["id"],
"normal_article_title": example_dict["normal"]["title"],
"normal_article_url": example_dict["normal"]["url"],
"normal_article_content": {
"normal_sentence_id": [
sen_id for sen_id, sen_txt in example_dict["normal"]["content"].items()
],
"normal_sentence": [
sen_txt for sen_id, sen_txt in example_dict["normal"]["content"].items()
],
},
},
"simple": {
"simple_article_id": example_dict["simple"]["id"],
"simple_article_title": example_dict["simple"]["title"],
"simple_article_url": example_dict["simple"]["url"],
"simple_article_content": {
"simple_sentence_id": [
sen_id for sen_id, sen_txt in example_dict["simple"]["content"].items()
],
"simple_sentence": [
sen_txt for sen_id, sen_txt in example_dict["simple"]["content"].items()
],
},
},
"paragraph_alignment": {
"normal_paragraph_id": [
norm_id for simp_id, norm_id in example_dict.get("paragraph_alignment", [])
],
"simple_paragraph_id": [
simp_id for simp_id, norm_id in example_dict.get("paragraph_alignment", [])
],
},
"sentence_alignment": {
"normal_sentence_id": [
norm_id for simp_id, norm_id in example_dict.get("sentence_alignment", [])
],
"simple_sentence_id": [
simp_id for simp_id, norm_id in example_dict.get("sentence_alignment", [])
],
},
}
yield id_, res
| 2.09375 | 2 |
rlkit/torch/irl/encoders/ant_lin_class_mlp_encoder.py | yifan-you-37/rl_swiss | 56 | 12761665 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from rlkit.torch.core import PyTorchModule
from rlkit.torch.networks import Mlp
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.torch_meta_irl_algorithm import np_to_pytorch_batch
from rlkit.torch.irl.encoders.aggregators import sum_aggregator_unmasked, tanh_sum_aggregator_unmasked
from rlkit.torch.irl.encoders.aggregators import sum_aggregator, tanh_sum_aggregator
from rlkit.torch.distributions import ReparamMultivariateNormalDiag
class TrivialR2ZMap(PyTorchModule):
def __init__(
self,
r_dim,
z_dim,
hid_dim,
# this makes it be closer to deterministic, makes it easier to train
# before we turn on the KL regularization
LOG_STD_SUBTRACT_VALUE=2.0
):
self.save_init_params(locals())
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(r_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Linear(hid_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU()
)
self.mean_fc = nn.Linear(hid_dim, z_dim)
self.log_sig_fc = nn.Linear(hid_dim, z_dim)
self.LOG_STD_SUBTRACT_VALUE = LOG_STD_SUBTRACT_VALUE
print('LOG STD SUBTRACT VALUE IS FOR APPROX POSTERIOR IS %f' % LOG_STD_SUBTRACT_VALUE)
def forward(self, r):
trunk_output = self.trunk(r)
mean = self.mean_fc(trunk_output)
log_sig = self.log_sig_fc(trunk_output) - self.LOG_STD_SUBTRACT_VALUE
return mean, log_sig
class TimestepBasedEncoder(PyTorchModule):
def __init__(
self,
input_dim, #(s,a,s') or (s,s') depending on state-only
r_dim,
z_dim,
enc_hid_dim,
r2z_hid_dim,
num_enc_layer_blocks,
hid_act='relu',
use_bn=True,
within_traj_agg='sum', # 'sum' or 'mean',
state_only=False # if state-only, we only condition on the states and not actions
):
self.save_init_params(locals())
super().__init__()
if hid_act == 'relu':
hid_act_class = nn.ReLU
elif hid_act == 'tanh':
hid_act_class = nn.Tanh
else:
raise NotImplementedError()
self.r_dim, self.z_dim = r_dim, z_dim
# build the timestep encoder
mod_list = nn.ModuleList([nn.Linear(input_dim, enc_hid_dim)])
if use_bn: mod_list.append(nn.BatchNorm1d(enc_hid_dim))
mod_list.append(hid_act_class())
for i in range(num_enc_layer_blocks - 1):
mod_list.append(nn.Linear(enc_hid_dim, enc_hid_dim))
if use_bn: mod_list.append(nn.BatchNorm1d(enc_hid_dim))
mod_list.append(hid_act_class())
mod_list.append(nn.Linear(enc_hid_dim, r_dim))
self.timestep_encoder = nn.Sequential(*mod_list)
assert within_traj_agg in ['sum', 'mean']
self.use_sum_for_traj_agg = within_traj_agg == 'sum'
print('\nWITHIN TRAJ AGG IS SUM: {}'.format(self.use_sum_for_traj_agg))
# aggregator
self.agg = sum_aggregator_unmasked
self.agg_masked = sum_aggregator
# build the r to z map
self.r2z_map = TrivialR2ZMap(r_dim, z_dim, r2z_hid_dim)
self.state_only = state_only
print('STATE-ONLY ENCODER: {}'.format(self.state_only))
def forward(self, context=None, mask=None, r=None):
if r is None:
obs = np.array([[d['observations'] for d in task_trajs] for task_trajs in context])
next_obs = np.array([[d['next_observations'] for d in task_trajs] for task_trajs in context])
if not self.state_only:
acts = np.array([[d['actions'] for d in task_trajs] for task_trajs in context])
all_timesteps = np.concatenate([obs, acts, next_obs], axis=-1)
else:
all_timesteps = np.concatenate([obs, next_obs], axis=-1)
# FOR DEBUGGING THE ENCODER
# all_timesteps = all_timesteps[:,:,-1:,:]
all_timesteps = Variable(ptu.from_numpy(all_timesteps), requires_grad=False)
# N_tasks x N_trajs x Len x Dim
N_tasks, N_trajs, Len, Dim = all_timesteps.size(0), all_timesteps.size(1), all_timesteps.size(2), all_timesteps.size(3)
all_timesteps = all_timesteps.view(-1, Dim)
embeddings = self.timestep_encoder(all_timesteps)
embeddings = embeddings.view(N_tasks, N_trajs, Len, self.r_dim)
if self.use_sum_for_traj_agg:
traj_embeddings = torch.sum(embeddings, dim=2)
else:
traj_embeddings = torch.mean(embeddings, dim=2)
# get r
if mask is None:
r = self.agg(traj_embeddings)
else:
r = self.agg_masked(traj_embeddings, mask)
post_mean, post_log_sig_diag = self.r2z_map(r)
return ReparamMultivariateNormalDiag(post_mean, post_log_sig_diag)
| 2.40625 | 2 |
mast/activities/utils.py | Matfyz-Developer-Student-Club/matfyz-activity-sport-tracker | 5 | 12761666 | <filename>mast/activities/utils.py
def ordinal(number):
if number <= 0:
return 'none'
tmp = number % 100
if tmp >= 20:
tmp = tmp % 10
if tmp == 1:
return str(number) + 'st'
elif tmp == 2:
return str(number) + 'nd'
elif tmp == 3:
return str(number) + 'rd'
else:
return str(number) + 'th'
| 3.0625 | 3 |
tests/test_encoding.py | mschneider/mango-explorer | 1 | 12761667 | from .context import mango
def test_decode_binary():
data = mango.decode_binary(["SGVsbG8gV29ybGQ=", "base64"]) # "Hello World"
assert len(data) == 11
| 2.21875 | 2 |
test/models/Group.py | cofepy/torm | 2 | 12761668 | import wpath
from torm import Model
from torm import f
class Group(Model):
__config__ = "mongo"
group = f.EmailList()
group_hash = f.Str()
display_name = f.Str()
update_at = f.Timestamp()
create_at = f.Timestamp()
user_email = '<EMAIL>'
groups = Group.FindMany({"group": user_email})
print(groups)
| 2.34375 | 2 |
chainer/_environment_check.py | zjzh/chainer | 3,705 | 12761669 | <reponame>zjzh/chainer
from __future__ import absolute_import
import os
import sys
import warnings
import numpy.distutils.system_info
import pkg_resources
import chainer
def _check_python_350():
if sys.version_info[:3] == (3, 5, 0):
if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
msg = """
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set 1 to CHAINER_PYTHON_350_FORCE environment variable."""
raise Exception(msg)
def _check_osx_numpy_backend():
if sys.platform != 'darwin':
return
blas_opt_info = numpy.distutils.system_info.get_info('blas_opt')
if blas_opt_info:
extra_link_args = blas_opt_info.get('extra_link_args')
if extra_link_args and '-Wl,Accelerate' in extra_link_args:
warnings.warn('''\
Accelerate has been detected as a NumPy backend library.
vecLib, which is a part of Accelerate, is known not to work correctly with Chainer.
We recommend using other BLAS libraries such as OpenBLAS.
For details of the issue, please see
https://docs.chainer.org/en/stable/tips.html#mnist-example-does-not-converge-in-cpu-mode-on-mac-os-x.
Please be aware that Mac OS X is not an officially supported OS.
''') # NOQA
def _check_optional_dependencies():
for dep in chainer._version._optional_dependencies:
name = dep['name']
pkgs = dep['packages']
spec = dep['specifier']
help = dep['help']
installed = False
for pkg in pkgs:
found = False
requirement = pkg
if os.environ.get('CHAINER_WARN_VERSION_MISMATCH', '1') == '1':
requirement = '{}{}'.format(pkg, spec)
try:
pkg_resources.require(requirement)
found = True
except pkg_resources.DistributionNotFound:
continue
except pkg_resources.VersionConflict:
msg = '''
--------------------------------------------------------------------------------
{name} ({pkg}) version {version} may not be compatible with this version of Chainer.
Please consider installing the supported version by running:
$ pip install '{requirement}'
See the following page for more details:
{help}
--------------------------------------------------------------------------------
''' # NOQA
warnings.warn(msg.format(
name=name, pkg=pkg,
version=pkg_resources.get_distribution(pkg).version,
requirement=requirement, help=help))
found = True
except Exception:
warnings.warn(
'Failed to check requirement: {}'.format(requirement))
break
if found:
if installed:
warnings.warn('''
--------------------------------------------------------------------------------
Multiple installations of {name} package has been detected.
You should select only one package from from {pkgs}.
Follow these steps to resolve this issue:
1. `pip list` to list {name} packages installed
2. `pip uninstall <package name>` to uninstall all {name} packages
3. `pip install <package name>` to install the proper one
--------------------------------------------------------------------------------
'''.format(name=name, pkgs=pkgs))
installed = True
def check():
_check_python_350()
_check_osx_numpy_backend()
_check_optional_dependencies()
| 2.25 | 2 |
pypeit/spectrographs/magellan_fire.py | mcoughlin/PypeIt | 0 | 12761670 | """
Module for Magellan/FIRE specific methods.
Important Notes:
- If you are reducing old FIRE data (before the broken happened
in 2016), please change the ord_spat_pos array (see lines from
~220 to ~230)
.. include:: ../include/links.rst
"""
from pkg_resources import resource_filename
import numpy as np
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import framematch
from pypeit.spectrographs import spectrograph
from pypeit.images import detector_container
class MagellanFIRESpectrograph(spectrograph.Spectrograph):
"""
Child to handle Magellan/FIRE specific code
.. note::
For FIRE Echelle, we usually use high gain and SUTR read mode.
The exposure time is usually around 900s. The detector
parameters below are based on such mode. Standard star and
calibrations are usually use Fowler 1 read mode in which case
the read noise is ~20 electron.
"""
ndet = 1
telescope = telescopes.MagellanTelescopePar()
def init_meta(self):
"""
Define how metadata are derived from the spectrograph files.
That is, this associates the ``PypeIt``-specific metadata keywords
with the instrument-specific header cards using :attr:`meta`.
"""
self.meta = {}
# Required (core)
self.meta['ra'] = dict(ext=0, card='RA')
self.meta['dec'] = dict(ext=0, card='DEC')
self.meta['target'] = dict(ext=0, card='OBJECT')
self.meta['decker'] = dict(ext=0, card=None, default='default')
self.meta['dichroic'] = dict(ext=0, card=None, default='default')
self.meta['binning'] = dict(ext=0, card=None, default='1,1')
self.meta['mjd'] = dict(ext=0, card='ACQTIME')
self.meta['exptime'] = dict(ext=0, card='EXPTIME')
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
# Extras for config and frametyping
self.meta['dispname'] = dict(ext=0, card='GRISM')
self.meta['idname'] = dict(ext=0, card='OBSTYPE')
class MagellanFIREEchelleSpectrograph(MagellanFIRESpectrograph):
"""
Child to handle Magellan/FIRE Echelle data
.. note::
For FIRE Echelle, we usually use high gain and SUTR read mode.
The exposure time is usually around 900s. The detector
parameters below are based on such mode. Standard star and
calibrations are usually use Fowler 1 read mode in which case
the read noise is ~20 electron.
"""
name = 'magellan_fire'
camera = 'FIRE'
pypeline = 'Echelle'
supported = True
comment = 'Magellan/FIRE in echelle mode'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning = '1,1',
det = 1,
dataext = 0,
specaxis = 1,
specflip = True,
spatflip = False,
platescale = 0.18,
darkcurr = 0.01,
#saturation = 20000., # high gain is 20000 ADU, low gain is 32000 ADU
saturation = 100000., # This is an arbitrary value.
nonlinear = 1.0, # high gain mode, low gain is 0.875
mincounts = -1e10,
numamplifiers = 1,
gain = np.atleast_1d(1.2), # high gain mode, low gain is 3.8 e-/DN
ronoise = np.atleast_1d(5.0), # for high gain mode and SUTR read modes with exptime ~ 900s
datasec = np.atleast_1d('[5:2044,5:2044]'),
oscansec = np.atleast_1d('[5:2044,:5]')
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Wavelengths
# 1D wavelength solution with OH lines
par['calibrations']['wavelengths']['rms_threshold'] = 1.0
par['calibrations']['wavelengths']['sigdetect']=[5,10,10,10,10,20,30,30,30,30,30,10,30,30,60,30,30,10,20,30,10]
par['calibrations']['wavelengths']['n_first']=2
par['calibrations']['wavelengths']['n_final']=[3,3,3,2,4,4,4,3,4,4,4,3,4,4,4,4,4,4,6,6,4]
par['calibrations']['wavelengths']['lamps'] = ['OH_FIRE_Echelle']
#par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['method'] = 'reidentify'
par['calibrations']['wavelengths']['cc_thresh'] = 0.35
par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_echelle.fits'
par['calibrations']['wavelengths']['match_toler']=30.0
# Echelle parameters
par['calibrations']['wavelengths']['echelle'] = True
par['calibrations']['wavelengths']['ech_fix_format'] = True
par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4
par['calibrations']['wavelengths']['ech_norder_coeff'] = 6
par['calibrations']['wavelengths']['ech_sigrej'] = 3.0
# Always correct for flexure, starting with default parameters
par['scienceframe']['process']['sigclip'] = 20.0
par['scienceframe']['process']['satpix'] ='nothing'
# Set slits and tilts parameters
par['calibrations']['tilts']['tracethresh'] = 5
par['calibrations']['slitedges']['edge_thresh'] = 10.
par['calibrations']['slitedges']['trace_thresh'] = 10.
par['calibrations']['slitedges']['fit_order'] = 5
par['calibrations']['slitedges']['max_shift_adj'] = 0.5
par['calibrations']['slitedges']['fit_min_spec_length'] = 0.5
par['calibrations']['slitedges']['left_right_pca'] = True
par['calibrations']['slitedges']['pca_order'] = 3
# Model entire slit
par['reduce']['extraction']['model_full_slit'] = True # local sky subtraction operates on entire slit
# Processing steps
turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False, use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Do not correct for flexure
par['flexure']['spec_method'] = 'skip'
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 60]
par['calibrations']['arcframe']['exprng'] = [20, None]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
# Sensitivity function parameters
# Sensitivity function parameters
par['sensfunc']['algorithm'] = 'IR'
par['sensfunc']['polyorder'] = 8
# place holder for telgrid file
par['sensfunc']['IR']['telgridfile'] \
= resource_filename('pypeit',
'/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')
return par
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['idname'] == 'PixFlat')
if ftype == 'standard':
return good_exp & (fitstbl['idname'] == 'Telluric')
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'Science')
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Science')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
@property
def norders(self):
"""
Number of orders for this spectograph. Should only defined for
echelle spectrographs, and it is undefined for the base class.
"""
return 21
@property
def order_spat_pos(self):
"""
Return the expected spatial position of each echelle order.
"""
# ToDo: We somehow need to automate this.
## For OLD data, i.e. before 2017
#ord_spat_pos = np.array([0.06054688, 0.14160156, 0.17089844, 0.22753906, 0.27539062,
# 0.32128906, 0.36474609, 0.40673828, 0.45019531, 0.48974609,
# 0.52978516, 0.56054688, 0.59814453, 0.63378906, 0.66503906,
# 0.70019531, 0.7421875 , 0.77978516, 0.82763672, 0.87109375,
# 0.9296875])
## For NEW data
ord_spat_pos = np.array([0.078125, 0.13769531, 0.19189453, 0.24414062, 0.29296875,
0.34179688, 0.38330078, 0.42724609, 0.46582031, 0.50439453,
0.54199219, 0.57763672, 0.61279297, 0.6484375 , 0.68457031,
0.71875 , 0.75439453, 0.79443359, 0.83789062, 0.88671875,
0.94091797])
return ord_spat_pos
@property
def orders(self):
"""
Return the order number for each echelle order.
"""
return np.arange(31, 10, -1, dtype=int)
@property
def spec_min_max(self):
"""
Return the minimum and maximum spectral pixel expected for the
spectral range of each order.
"""
spec_max = np.asarray([2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,
2048,2048,2048,2048,2048])
spec_min = np.asarray([ 500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0])
return np.vstack((spec_min, spec_max))
def order_platescale(self, order_vec, binning=None):
"""
Return the platescale for each echelle order.
Note that FIRE has no binning.
Args:
order_vec (`numpy.ndarray`_):
The vector providing the order numbers.
binning (:obj:`str`, optional):
The string defining the spectral and spatial binning. **This
is always ignored.**
Returns:
`numpy.ndarray`_: An array with the platescale for each order
provided by ``order``.
"""
return np.full(order_vec.size, 0.15)
@property
def dloglam(self):
"""
Return the logarithmic step in wavelength for output spectra.
"""
# This number was determined using the resolution and sampling quoted on the FIRE website
R = 6000.0 * 2.7
dloglam = 1.0 / R / np.log(10.0)
return dloglam
@property
def loglam_minmax(self):
"""
Return the base-10 logarithm of the first and last wavelength for
ouput spectra.
"""
return np.log10(8000.0), np.log10(25700)
class MagellanFIRELONGSpectrograph(MagellanFIRESpectrograph):
"""
Child to handle Magellan/FIRE high-throughput data
.. note::
For FIRE longslit, science data are usually taken with SUTR readout
mode with ~600s exposure (at least for quasar hunting people) and the
readout noise is ~6 e-
"""
name = 'magellan_fire_long'
camera = 'FIRE'
supported = True
comment = 'Magellan/FIRE in long-slit/high-throughput mode'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning = '1,1',
det = 1,
dataext = 0,
specaxis = 0,
specflip = False,
spatflip = False,
platescale = 0.15,
darkcurr = 0.01,
saturation = 320000., #32000 for low gain, I set to a higher value to keep data in K-band
nonlinear = 0.875,
mincounts = -1e10,
numamplifiers = 1,
gain = np.atleast_1d(3.8),
ronoise = np.atleast_1d(6.0), # SUTR readout mode with exposure~600s
datasec = np.atleast_1d('[5:2044, 900:1250]'),
oscansec = np.atleast_1d('[:5, 900:1250]')
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Wavelengths
# 1D wavelength solution with arc lines
par['calibrations']['wavelengths']['rms_threshold'] = 1.0
par['calibrations']['wavelengths']['sigdetect']=3
par['calibrations']['wavelengths']['fwhm'] = 20
par['calibrations']['wavelengths']['n_first']=2
par['calibrations']['wavelengths']['n_final']=4
par['calibrations']['wavelengths']['lamps'] = ['ArI', 'ArII', 'ThAr', 'NeI']
#par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_long.fits'
par['calibrations']['wavelengths']['match_toler']=5.0
# Set slits and tilts parameters
par['calibrations']['tilts']['tracethresh'] = 5
par['calibrations']['slitedges']['trace_thresh'] = 10.
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Processing steps
turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False,
use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Scienceimage parameters
par['reduce']['findobj']['sig_thresh'] = 5
#par['reduce']['maxnumber'] = 2
par['reduce']['findobj']['find_trim_edge'] = [50,50]
par['flexure']['spec_method'] = 'skip'
par['sensfunc']['IR']['telgridfile'] \
= resource_filename('pypeit',
'/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 60]
par['calibrations']['arcframe']['exprng'] = [1, 50]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
return par
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['idname'] == 'PixFlat')
if ftype == 'standard':
return good_exp & (fitstbl['idname'] == 'Telluric')
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'Science')
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Arc')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
| 2.265625 | 2 |
contrib/share_driver_hooks/zaqar_notification_example_consumer.py | kpawar89/manila | 159 | 12761671 | #!/usr/bin/env python
#
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import signal
import sys
import time
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import timeutils
import six
opts = [
cfg.IntOpt(
"consume_interval",
default=5,
deprecated_name="sleep_between_consume_attempts",
help=("Time that script will sleep between requests for consuming "
"Zaqar messages in seconds."),
),
cfg.StrOpt(
"mount_dir",
default="/tmp",
help="Directory that will contain all mounted shares."
),
cfg.ListOpt(
"expected_ip_addresses",
default=[],
help=("List of IP addresses that are expected to be found in access "
"rules to trigger [un]mount operation for a share.")
),
]
CONF = cfg.CONF
def print_with_time(data):
time = six.text_type(timeutils.utcnow())
print(time + " " + six.text_type(data))
def print_pretty_dict(d):
pprint.pprint(d)
def pop_zaqar_messages(client, queues_names):
if not isinstance(queues_names, (list, set, tuple)):
queues_names = (queues_names, )
try:
user = client.conf['auth_opts']['options']['os_username']
project = client.conf['auth_opts']['options']['os_project_name']
messages = []
for queue_name in queues_names:
queue = client.queue(queue_name)
messages.extend([six.text_type(m.body) for m in queue.pop()])
print_with_time(
"Received %(len)s message[s] from '%(q)s' "
"queue using '%(u)s' user and '%(p)s' project." % {
'len': len(messages),
'q': queue_name,
'u': user,
'p': project,
}
)
return messages
except Exception as e:
print_with_time("Caught exception - %s" % e)
return []
def signal_handler(signal, frame):
print("")
print_with_time("Ctrl+C was pressed. Shutting down consumer.")
sys.exit(0)
def parse_str_to_dict(string):
if not isinstance(string, six.string_types):
return string
result = eval(string)
return result
def handle_message(data):
"""Handles consumed message.
Expected structure of a message is following:
{'data': {
'access_rules': [
{
'access_id': u'b28268b9-36c6-40d3-a485-22534077328f',
'access_instance_id':
u'd137b2cb-f549-4141-9dd7-36b2789fb973',
'access_level': u'rw',
'access_state': u'active',
'access_to': u'7.7.7.7',
'access_type': u'ip',
}
],
'availability_zone': u'nova',
'export_locations': [u'127.0.0.1:/path/to/nfs/share'],
'is_allow_operation': True,
'share_id': u'053eae9a-726f-4f7e-8502-49d7b1adf290',
'share_instance_id': u'dc33e554-e0b9-40f5-9046-c198716d73a0',
'share_proto': u'NFS'
}}
"""
if 'data' in data.keys():
data = data['data']
valid_access = (
'access_rules' in data and len(data['access_rules']) == 1 and
data['access_rules'][0].get('access_type', '?').lower() == 'ip' and
data.get('share_proto', '?').lower() == 'nfs'
)
if valid_access:
is_allow_operation = data['is_allow_operation']
export_location = data['export_locations'][0]
if is_allow_operation:
mount_share(export_location, data['access_to'])
else:
unmount_share(export_location, data['access_to'])
else:
print_with_time('Do nothing with above message.')
def execute(cmd):
try:
print_with_time('Executing following command: \n%s' % cmd)
cmd = cmd.split()
stdout, stderr = processutils.execute(*cmd)
if stderr:
print_with_time('Got error: %s' % stderr)
return stdout, stderr
except Exception as e:
print_with_time('Got following error: %s' % e)
return False, True
def is_share_mounted(mount_point):
mounts, stderr = execute('mount')
return mount_point in mounts
def rule_affects_me(ip_or_cidr):
if '/' in ip_or_cidr:
net = netaddr.IPNetwork(ip_or_cidr)
for my_ip in CONF.zaqar.expected_ip_addresses:
if netaddr.IPAddress(my_ip) in net:
return True
else:
for my_ip in CONF.zaqar.expected_ip_addresses:
if my_ip == ip_or_cidr:
return True
return False
def mount_share(export_location, access_to):
data = {
'mount_point': os.path.join(CONF.zaqar.mount_dir,
export_location.split('/')[-1]),
'export_location': export_location,
}
if (rule_affects_me(access_to) and
not is_share_mounted(data['mount_point'])):
print_with_time(
"Mounting '%(export_location)s' share to %(mount_point)s.")
execute('sudo mkdir -p %(mount_point)s' % data)
stdout, stderr = execute(
'sudo mount.nfs %(export_location)s %(mount_point)s' % data)
if stderr:
print_with_time("Mount operation failed.")
else:
print_with_time("Mount operation went OK.")
def unmount_share(export_location, access_to):
if rule_affects_me(access_to) and is_share_mounted(export_location):
print_with_time("Unmounting '%(export_location)s' share.")
stdout, stderr = execute('sudo umount %s' % export_location)
if stderr:
print_with_time("Unmount operation failed.")
else:
print_with_time("Unmount operation went OK.")
def main():
# Register other local modules
cur = os.path.dirname(__file__)
pathtest = os.path.join(cur)
sys.path.append(pathtest)
# Init configuration
CONF(sys.argv[1:], project="manila_notifier", version=1.0)
CONF.register_opts(opts, group="zaqar")
# Import common config and Zaqar client
import zaqarclientwrapper
# Handle SIGINT
signal.signal(signal.SIGINT, signal_handler)
# Run consumer
print_with_time("Consumer was successfully run.")
while(True):
messages = pop_zaqar_messages(
zaqarclientwrapper.ZAQARCLIENT, CONF.zaqar.zaqar_queues)
if not messages:
message = ("No new messages in '%s' queue[s] "
"found." % ','.join(CONF.zaqar.zaqar_queues))
else:
message = "Got following messages:"
print_with_time(message)
for message in messages:
message = parse_str_to_dict(message)
print_pretty_dict(message)
handle_message(message)
time.sleep(CONF.zaqar.consume_interval)
if __name__ == '__main__':
main()
| 2.1875 | 2 |
quapy/data/preprocessing.py | valgur/QuaPy | 0 | 12761672 | <reponame>valgur/QuaPy
import numpy as np
from scipy.sparse import spmatrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import quapy as qp
from quapy.data.base import Dataset
from quapy.util import map_parallel
from .base import LabelledCollection
def text2tfidf(dataset:Dataset, min_df=3, sublinear_tf=True, inplace=False, **kwargs):
"""
Transforms a :class:`quapy.data.base.Dataset` of textual instances into a :class:`quapy.data.base.Dataset` of
tfidf weighted sparse vectors
:param dataset: a :class:`quapy.data.base.Dataset` where the instances of training and test collections are
lists of str
:param min_df: minimum number of occurrences for a word to be considered as part of the vocabulary (default 3)
:param sublinear_tf: whether or not to apply the log scalling to the tf counters (default True)
:param inplace: whether or not to apply the transformation inplace (True), or to a new copy (False, default)
:param kwargs: the rest of parameters of the transformation (as for sklearn's
`TfidfVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`_)
:return: a new :class:`quapy.data.base.Dataset` in `csr_matrix` format (if inplace=False) or a reference to the
current Dataset (if inplace=True) where the instances are stored in a `csr_matrix` of real-valued tfidf scores
"""
__check_type(dataset.training.instances, np.ndarray, str)
__check_type(dataset.test.instances, np.ndarray, str)
vectorizer = TfidfVectorizer(min_df=min_df, sublinear_tf=sublinear_tf, **kwargs)
training_documents = vectorizer.fit_transform(dataset.training.instances)
test_documents = vectorizer.transform(dataset.test.instances)
if inplace:
dataset.training = LabelledCollection(training_documents, dataset.training.labels, dataset.classes_)
dataset.test = LabelledCollection(test_documents, dataset.test.labels, dataset.classes_)
dataset.vocabulary = vectorizer.vocabulary_
return dataset
else:
training = LabelledCollection(training_documents, dataset.training.labels.copy(), dataset.classes_)
test = LabelledCollection(test_documents, dataset.test.labels.copy(), dataset.classes_)
return Dataset(training, test, vectorizer.vocabulary_)
def reduce_columns(dataset: Dataset, min_df=5, inplace=False):
"""
Reduces the dimensionality of the instances, represented as a `csr_matrix` (or any subtype of
`scipy.sparse.spmatrix`), of training and test documents by removing the columns of words which are not present
in at least `min_df` instances in the training set
:param dataset: a :class:`quapy.data.base.Dataset` in which instances are represented in sparse format (any
subtype of scipy.sparse.spmatrix)
:param min_df: integer, minimum number of instances below which the columns are removed
:param inplace: whether or not to apply the transformation inplace (True), or to a new copy (False, default)
:return: a new :class:`quapy.data.base.Dataset` (if inplace=False) or a reference to the current
:class:`quapy.data.base.Dataset` (inplace=True) where the dimensions corresponding to infrequent terms
in the training set have been removed
"""
__check_type(dataset.training.instances, spmatrix)
__check_type(dataset.test.instances, spmatrix)
assert dataset.training.instances.shape[1] == dataset.test.instances.shape[1], 'unaligned vector spaces'
def filter_by_occurrences(X, W):
column_prevalence = np.asarray((X > 0).sum(axis=0)).flatten()
take_columns = column_prevalence >= min_df
X = X[:, take_columns]
W = W[:, take_columns]
return X, W
Xtr, Xte = filter_by_occurrences(dataset.training.instances, dataset.test.instances)
if inplace:
dataset.training.instances = Xtr
dataset.test.instances = Xte
return dataset
else:
training = LabelledCollection(Xtr, dataset.training.labels.copy(), dataset.classes_)
test = LabelledCollection(Xte, dataset.test.labels.copy(), dataset.classes_)
return Dataset(training, test)
def standardize(dataset: Dataset, inplace=False):
"""
Standardizes the real-valued columns of a :class:`quapy.data.base.Dataset`.
Standardization, aka z-scoring, of a variable `X` comes down to subtracting the average and normalizing by the
standard deviation.
:param dataset: a :class:`quapy.data.base.Dataset` object
:param inplace: set to True if the transformation is to be applied inplace, or to False (default) if a new
:class:`quapy.data.base.Dataset` is to be returned
:return:
"""
s = StandardScaler(copy=not inplace)
training = s.fit_transform(dataset.training.instances)
test = s.transform(dataset.test.instances)
if inplace:
return dataset
else:
return Dataset(training, test, dataset.vocabulary, dataset.name)
def index(dataset: Dataset, min_df=5, inplace=False, **kwargs):
"""
Indexes the tokens of a textual :class:`quapy.data.base.Dataset` of string documents.
To index a document means to replace each different token by a unique numerical index.
Rare words (i.e., words occurring less than `min_df` times) are replaced by a special token `UNK`
:param dataset: a :class:`quapy.data.base.Dataset` object where the instances of training and test documents
are lists of str
:param min_df: minimum number of occurrences below which the term is replaced by a `UNK` index
:param inplace: whether or not to apply the transformation inplace (True), or to a new copy (False, default)
:param kwargs: the rest of parameters of the transformation (as for sklearn's
`CountVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>_`)
:return: a new :class:`quapy.data.base.Dataset` (if inplace=False) or a reference to the current
:class:`quapy.data.base.Dataset` (inplace=True) consisting of lists of integer values representing indices.
"""
__check_type(dataset.training.instances, np.ndarray, str)
__check_type(dataset.test.instances, np.ndarray, str)
indexer = IndexTransformer(min_df=min_df, **kwargs)
training_index = indexer.fit_transform(dataset.training.instances)
test_index = indexer.transform(dataset.test.instances)
if inplace:
dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_)
dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_)
dataset.vocabulary = indexer.vocabulary_
return dataset
else:
training = LabelledCollection(training_index, dataset.training.labels.copy(), dataset.classes_)
test = LabelledCollection(test_index, dataset.test.labels.copy(), dataset.classes_)
return Dataset(training, test, indexer.vocabulary_)
def __check_type(container, container_type=None, element_type=None):
if container_type:
assert isinstance(container, container_type), \
f'unexpected type of container (expected {container_type}, found {type(container)})'
if element_type:
assert isinstance(container[0], element_type), \
f'unexpected type of element (expected {container_type}, found {type(container)})'
class IndexTransformer:
"""
This class implements a sklearn's-style transformer that indexes text as numerical ids for the tokens it
contains, and that would be generated by sklearn's
`CountVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>`_
:param kwargs: keyworded arguments from `CountVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>`_
"""
def __init__(self, **kwargs):
self.vect = CountVectorizer(**kwargs)
self.unk = -1 # a valid index is assigned after fit
self.pad = -2 # a valid index is assigned after fit
def fit(self, X):
"""
Fits the transformer, i.e., decides on the vocabulary, given a list of strings.
:param X: a list of strings
:return: self
"""
self.vect.fit(X)
self.analyzer = self.vect.build_analyzer()
self.vocabulary_ = self.vect.vocabulary_
self.unk = self.add_word(qp.environ['UNK_TOKEN'], qp.environ['UNK_INDEX'])
self.pad = self.add_word(qp.environ['PAD_TOKEN'], qp.environ['PAD_INDEX'])
return self
def transform(self, X, n_jobs=-1):
"""
Transforms the strings in `X` as lists of numerical ids
:param X: a list of strings
:param n_jobs: the number of parallel workers to carry out this task
:return: a `np.ndarray` of numerical ids
"""
# given the number of tasks and the number of jobs, generates the slices for the parallel processes
assert self.unk != -1, 'transform called before fit'
indexed = map_parallel(func=self._index, args=X, n_jobs=n_jobs)
return np.asarray(indexed)
def _index(self, documents):
vocab = self.vocabulary_.copy()
return [[vocab.prevalence(word, self.unk) for word in self.analyzer(doc)] for doc in tqdm(documents, 'indexing')]
def fit_transform(self, X, n_jobs=-1):
"""
Fits the transform on `X` and transforms it.
:param X: a list of strings
:param n_jobs: the number of parallel workers to carry out this task
:return: a `np.ndarray` of numerical ids
"""
return self.fit(X).transform(X, n_jobs=n_jobs)
def vocabulary_size(self):
"""
Gets the length of the vocabulary according to which the document tokens have been indexed
:return: integer
"""
return len(self.vocabulary_)
def add_word(self, word, id=None, nogaps=True):
"""
Adds a new token (regardless of whether it has been found in the text or not), with dedicated id.
Useful to define special tokens for codifying unknown words, or padding tokens.
:param word: string, surface form of the token
:param id: integer, numerical value to assign to the token (leave as None for indicating the next valid id,
default)
:param nogaps: if set to True (default) asserts that the id indicated leads to no numerical gaps with
precedent ids stored so far
:return: integer, the numerical id for the new token
"""
if word in self.vocabulary_:
raise ValueError(f'word {word} already in dictionary')
if id is None:
# add the word with the next id
self.vocabulary_[word] = len(self.vocabulary_)
else:
id2word = {id_:word_ for word_, id_ in self.vocabulary_.items()}
if id in id2word:
old_word = id2word[id]
self.vocabulary_[word] = id
del self.vocabulary_[old_word]
self.add_word(old_word)
elif nogaps:
if id > self.vocabulary_size()+1:
raise ValueError(f'word {word} added with id {id}, while the current vocabulary size '
f'is of {self.vocabulary_size()}, and id gaps are not allowed')
return self.vocabulary_[word]
| 2.5 | 2 |
zenodo_gitlab/objects.py | tuw-eeg/zenodo-gitlab | 0 | 12761673 | from enum import Enum
class ArchiveFormat(str, Enum):
ZIP = 'zip'
TAR_GZ = 'tar.gz'
TAR_BZ2 = 'tar.bz2'
TAR = 'tar'
| 2.78125 | 3 |
setup.py | jpereiran/chana | 0 | 12761674 | <filename>setup.py
from setuptools import setup, find_packages
from codecs import open
from os import path
files = ["files/*"]
setup(
name='chana', # Required
version='0.1.8.dev1', # Required
description='A module of NLP tools for the shipibo-konibo language', # Required
url='https://github.com/jpereiran/chana', # Optional
author='<NAME>', # Optional
author_email='<EMAIL>', # Optional
license='MIT',
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='nlp shipibo development', # Optional
packages= ['chana'], # Required
package_data = {'chana' : files },
include_package_data=True,
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Main Page': 'https://chana.inf.pucp.edu.pe',
},
) | 1.789063 | 2 |
tools/process_raw_output.py | TRex22/picam | 1 | 12761675 | <filename>tools/process_raw_output.py
# TODO: Commandline inputs
# Converts JPEGs with bayer EXIFdata into DNGs with camera profile applied
VERSION = "0.0.2"
import sys
sys.path.insert(1, '../src/')
sys.path.insert(1, 'src/')
import time
import glob
import re
from io import BytesIO
from pydng.core import RPICAM2DNG
# Modules
import document_handler
# Constants
# original_files_path = "/mnt/g/tmp/original"
original_files_path = "/mnt/g/tmp/784 Waxing Gibbons/raw/original"
# raw_file_save_path = "/mnt/g/tmp/raw"
raw_file_save_path = "/mnt/g/tmp/784 Waxing Gibbons/raw/"
filetype = '.dng'
# TODO: List them all
# Colour profiles:
# colour_profile_path = "../Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Neutral Look.json"
# colour_profile_path = "../Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Skin+Sky Look.json"
# colour_profile_path = "../Colour_Profiles/imx477/PyDNG_profile"
config = {
"neutral_colour_profile": "../Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Neutral Look.json",
"neutral_colour_profile_name": "neutral_colour",
"skin_tone_colour_profile": "../Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Skin+Sky Look.json",
"skin_tone_colour_profile_name": "skin_tone",
"pydng_colour_profile": "../Colour_Profiles/imx477/PyDNG_profile.json",
"pydng_colour_profile_name": "pydng",
"selected_colour_profile": "neutral_colour_profile" #"all" # can be all or neutral_colour_profile, skin_tone_colour_profile, pydng_colour_profile ... others to be added later
}
def generate_filename(original_files_path, raw_file_save_path, f, config, colour_profile_name):
raw_file_save_path_with_profile = f'{raw_file_save_path}/{colour_profile_name}'
document_handler.detect_or_create_folder(raw_file_save_path_with_profile)
filename = re.sub(original_files_path, raw_file_save_path_with_profile, f)
filename = re.sub('.jpg', filetype, filename)
filename = re.sub('.jpeg', filetype, filename)
return filename
def convert_file(f, filename, config, colour_profile_name):
print(f'{f} -> {filename}', end='')
start_time = time.time()
json_colour_profile = document_handler.load_colour_profile({ "colour_profile_path": config[colour_profile_name] })
# Open file as a stream
stream = BytesIO()
try:
with open(f, 'rb') as original_f_stream:
stream = BytesIO(original_f_stream.read())
output = RPICAM2DNG().convert(stream, json_camera_profile=json_colour_profile)
stream.close()
with open(filename, 'wb') as raw_f_stream:
raw_f_stream.write(output)
# Completed file conversion
print(f' ({(time.time() - start_time)} seconds)')
except:
print(' ... failed, skipping file.')
print(f'Starting to convert original images to RAW with colour profile (Version: {VERSION})...')
print(f'original_files_path: {original_files_path}')
print(f'raw_file_save_path: {raw_file_save_path}\n')
document_handler.detect_or_create_folder(original_files_path)
document_handler.detect_or_create_folder(raw_file_save_path)
original_files = glob.glob(f'{original_files_path}/*')
print(f'{len(original_files)} files to be processed.\n')
global_start_time = time.time()
colour_profile_name = config["selected_colour_profile"]
if (colour_profile_name == 'all' or colour_profile_name == "ALL"):
print("Converting files to all colour profiles...")
# TODO: Clean up this code even more
profile_start_time = time.time()
for f in original_files:
filename = generate_filename(original_files_path, raw_file_save_path, f, config, "neutral_colour_profile")
convert_file(f, filename, config, "neutral_colour_profile")
print(f'--- {(time.time() - profile_start_time)} total profile seconds ---\n')
profile_start_time = time.time()
for f in original_files:
filename = generate_filename(original_files_path, raw_file_save_path, f, config, "skin_tone_colour_profile")
convert_file(f, filename, config, "skin_tone_colour_profile")
print(f'--- {(time.time() - profile_start_time)} total profile seconds ---\n')
profile_start_time = time.time()
for f in original_files:
filename = generate_filename(original_files_path, raw_file_save_path, f, config, "pydng_colour_profile")
convert_file(f, filename, config, "pydng_colour_profile")
print(f'--- {(time.time() - profile_start_time)} total profile seconds ---\n')
else:
print(f'Converting files to {colour_profile_name}...')
for f in original_files:
filename = generate_filename(original_files_path, raw_file_save_path, f, config, colour_profile_name)
convert_file(f, filename, config, colour_profile_name)
total_time = (time.time() - global_start_time)
average_time = total_time / len(original_files)
print(f'\n--- {total_time} total seconds ---')
print(f'--- {average_time} average seconds ---')
print('Much Success!')
| 2.375 | 2 |
2021/day-06/python/main.py | cdrowley/advent-of-code | 0 | 12761676 | <reponame>cdrowley/advent-of-code
def load_parse_data(path: str) -> list:
with open(path, mode='r', encoding="utf-8") as f:
return list(map(int, f.read().split(',')))
def school_by_age(data: list) -> list:
school_by_age = [0] * 9
for age in data:
school_by_age[age] += 1
return school_by_age
def model_school_growth(school_by_age: list, days: int=80) -> list:
for _ in range(days):
reproduction_rate = school_by_age[0]
for age in range(8):
school_by_age[age] = school_by_age[age + 1]
school_by_age[6] += reproduction_rate
school_by_age[8] = reproduction_rate
return school_by_age
def puzzle_one(data: list) -> int:
return sum(model_school_growth(school_by_age(data)))
def puzzle_two(data: str) -> int:
return sum(model_school_growth(school_by_age(data), days=256))
if __name__ == "__main__":
print('--------------', 'Puzzle One', '--------------', end='\n')
print(puzzle_one(load_parse_data("../puzzle_input.txt")))
print('--------------', 'Puzzle Two', '--------------', end='\n')
print(puzzle_two(load_parse_data("../puzzle_input.txt")))
| 3.625 | 4 |
bestflags/task.py | jingpad-bsp/android_external_toolchain-utils | 0 | 12761677 | <gh_stars>0
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A reproducing entity.
Part of the Chrome build flags optimization.
The Task class is used by different modules. Each module fills in the
corresponding information into a Task instance. Class Task contains the bit set
representing the flags selection. The builder module is responsible for filling
the image and the checksum field of a Task. The executor module will put the
execution output to the execution field.
"""
__author__ = '<EMAIL> (<NAME>)'
import os
import subprocess
import sys
from uuid import uuid4
BUILD_STAGE = 1
TEST_STAGE = 2
# Message indicating that the build or test failed.
ERROR_STRING = 'error'
# The maximum number of tries a build can have. Some compilations may fail due
# to unexpected environment circumstance. This variable defines how many tries
# the build should attempt before giving up.
BUILD_TRIES = 3
# The maximum number of tries a test can have. Some tests may fail due to
# unexpected environment circumstance. This variable defines how many tries the
# test should attempt before giving up.
TEST_TRIES = 3
# Create the file/directory if it does not already exist.
def _CreateDirectory(file_name):
directory = os.path.dirname(file_name)
if not os.path.exists(directory):
os.makedirs(directory)
class Task(object):
"""A single reproducing entity.
A single test of performance with a particular set of flags. It records the
flag set, the image, the check sum of the image and the cost.
"""
# The command that will be used in the build stage to compile the tasks.
BUILD_COMMAND = None
# The command that will be used in the test stage to test the tasks.
TEST_COMMAND = None
# The directory to log the compilation and test results.
LOG_DIRECTORY = None
@staticmethod
def InitLogCommand(build_command, test_command, log_directory):
"""Set up the build and test command for the task and the log directory.
This framework is generic. It lets the client specify application specific
compile and test methods by passing different build_command and
test_command.
Args:
build_command: The command that will be used in the build stage to compile
this task.
test_command: The command that will be used in the test stage to test this
task.
log_directory: The directory to log the compilation and test results.
"""
Task.BUILD_COMMAND = build_command
Task.TEST_COMMAND = test_command
Task.LOG_DIRECTORY = log_directory
def __init__(self, flag_set):
"""Set up the optimization flag selection for this task.
Args:
flag_set: The optimization flag set that is encapsulated by this task.
"""
self._flag_set = flag_set
# A unique identifier that distinguishes this task from other tasks.
self._task_identifier = uuid4()
self._log_path = (Task.LOG_DIRECTORY, self._task_identifier)
# Initiate the hash value. The hash value is used so as not to recompute it
# every time the hash method is called.
self._hash_value = None
# Indicate that the task has not been compiled/tested.
self._build_cost = None
self._exe_cost = None
self._checksum = None
self._image = None
self._file_length = None
self._text_length = None
def __eq__(self, other):
"""Test whether two tasks are equal.
Two tasks are equal if their flag_set are equal.
Args:
other: The other task with which this task is tested equality.
Returns:
True if the encapsulated flag sets are equal.
"""
if isinstance(other, Task):
return self.GetFlags() == other.GetFlags()
return False
def __hash__(self):
if self._hash_value is None:
# Cache the hash value of the flags, so as not to recompute them.
self._hash_value = hash(self._flag_set)
return self._hash_value
def GetIdentifier(self, stage):
"""Get the identifier of the task in the stage.
The flag set uniquely identifies a task in the build stage. The checksum of
the image of the task uniquely identifies the task in the test stage.
Args:
stage: The stage (build/test) in which this method is called.
Returns:
Return the flag set in build stage and return the checksum in test stage.
"""
# Define the dictionary for different stage function lookup.
get_identifier_functions = {BUILD_STAGE: self.FormattedFlags,
TEST_STAGE: self.__GetCheckSum}
assert stage in get_identifier_functions
return get_identifier_functions[stage]()
def GetResult(self, stage):
"""Get the performance results of the task in the stage.
Args:
stage: The stage (build/test) in which this method is called.
Returns:
Performance results.
"""
# Define the dictionary for different stage function lookup.
get_result_functions = {BUILD_STAGE: self.__GetBuildResult,
TEST_STAGE: self.GetTestResult}
assert stage in get_result_functions
return get_result_functions[stage]()
def SetResult(self, stage, result):
"""Set the performance results of the task in the stage.
This method is called by the pipeling_worker to set the results for
duplicated tasks.
Args:
stage: The stage (build/test) in which this method is called.
result: The performance results of the stage.
"""
# Define the dictionary for different stage function lookup.
set_result_functions = {BUILD_STAGE: self.__SetBuildResult,
TEST_STAGE: self.__SetTestResult}
assert stage in set_result_functions
set_result_functions[stage](result)
def Done(self, stage):
"""Check whether the stage is done.
Args:
stage: The stage to be checked, build or test.
Returns:
True if the stage is done.
"""
# Define the dictionary for different result string lookup.
done_string = {BUILD_STAGE: self._build_cost, TEST_STAGE: self._exe_cost}
assert stage in done_string
return done_string[stage] is not None
def Work(self, stage):
"""Perform the task.
Args:
stage: The stage in which the task is performed, compile or test.
"""
# Define the dictionary for different stage function lookup.
work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test}
assert stage in work_functions
work_functions[stage]()
def FormattedFlags(self):
"""Format the optimization flag set of this task.
Returns:
The formatted optimization flag set that is encapsulated by this task.
"""
return str(self._flag_set.FormattedForUse())
def GetFlags(self):
"""Get the optimization flag set of this task.
Returns:
The optimization flag set that is encapsulated by this task.
"""
return self._flag_set
def __GetCheckSum(self):
"""Get the compilation image checksum of this task.
Returns:
The compilation image checksum of this task.
"""
# The checksum should be computed before this method is called.
assert self._checksum is not None
return self._checksum
def __Compile(self):
"""Run a compile.
This method compile an image using the present flags, get the image,
test the existent of the image and gathers monitoring information, and sets
the internal cost (fitness) for this set of flags.
"""
# Format the flags as a string as input to compile command. The unique
# identifier is passed to the compile command. If concurrent processes are
# used to compile different tasks, these processes can use the identifier to
# write to different file.
flags = self._flag_set.FormattedForUse()
command = '%s %s %s' % (Task.BUILD_COMMAND, ' '.join(flags),
self._task_identifier)
# Try BUILD_TRIES number of times before confirming that the build fails.
for _ in range(BUILD_TRIES):
try:
# Execute the command and get the execution status/results.
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if out:
out = out.strip()
if out != ERROR_STRING:
# Each build results contains the checksum of the result image, the
# performance cost of the build, the compilation image, the length
# of the build, and the length of the text section of the build.
(checksum, cost, image, file_length, text_length) = out.split()
# Build successfully.
break
# Build failed.
cost = ERROR_STRING
except _:
# If there is exception getting the cost information of the build, the
# build failed.
cost = ERROR_STRING
# Convert the build cost from String to integer. The build cost is used to
# compare a task with another task. Set the build cost of the failing task
# to the max integer. The for loop will keep trying until either there is a
# success or BUILD_TRIES number of tries have been conducted.
self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost)
self._checksum = checksum
self._file_length = file_length
self._text_length = text_length
self._image = image
self.__LogBuildCost(err)
def __Test(self):
"""__Test the task against benchmark(s) using the input test command."""
# Ensure that the task is compiled before being tested.
assert self._image is not None
# If the task does not compile, no need to test.
if self._image == ERROR_STRING:
self._exe_cost = ERROR_STRING
return
# The unique identifier is passed to the test command. If concurrent
# processes are used to compile different tasks, these processes can use the
# identifier to write to different file.
command = '%s %s %s' % (Task.TEST_COMMAND, self._image,
self._task_identifier)
# Try TEST_TRIES number of times before confirming that the build fails.
for _ in range(TEST_TRIES):
try:
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if out:
out = out.strip()
if out != ERROR_STRING:
# The test results contains the performance cost of the test.
cost = out
# Test successfully.
break
# Test failed.
cost = ERROR_STRING
except _:
# If there is exception getting the cost information of the test, the
# test failed. The for loop will keep trying until either there is a
# success or TEST_TRIES number of tries have been conducted.
cost = ERROR_STRING
self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost)
self.__LogTestCost(err)
def __SetBuildResult(self, (checksum, build_cost, image, file_length,
text_length)):
self._checksum = checksum
self._build_cost = build_cost
self._image = image
self._file_length = file_length
self._text_length = text_length
def __GetBuildResult(self):
return (self._checksum, self._build_cost, self._image, self._file_length,
self._text_length)
def GetTestResult(self):
return self._exe_cost
def __SetTestResult(self, exe_cost):
self._exe_cost = exe_cost
def LogSteeringCost(self):
"""Log the performance results for the task.
This method is called by the steering stage and this method writes the
results out to a file. The results include the build and the test results.
"""
steering_log = '%s/%s/steering.txt' % self._log_path
_CreateDirectory(steering_log)
with open(steering_log, 'w') as out_file:
# Include the build and the test results.
steering_result = (self._flag_set, self._checksum, self._build_cost,
self._image, self._file_length, self._text_length,
self._exe_cost)
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s,%s,%s,%s,%s\n' % steering_result)
def __LogBuildCost(self, log):
"""Log the build results for the task.
The build results include the compilation time of the build, the result
image, the checksum, the file length and the text length of the image.
The file length of the image includes the length of the file of the image.
The text length only includes the length of the text section of the image.
Args:
log: The build log of this task.
"""
build_result_log = '%s/%s/build.txt' % self._log_path
_CreateDirectory(build_result_log)
with open(build_result_log, 'w') as out_file:
build_result = (self._flag_set, self._build_cost, self._image,
self._checksum, self._file_length, self._text_length)
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s,%s,%s,%s\n' % build_result)
# The build information about running the build.
build_run_log = '%s/%s/build_log.txt' % self._log_path
_CreateDirectory(build_run_log)
with open(build_run_log, 'w') as out_log_file:
# Write out the execution information.
out_log_file.write('%s' % log)
def __LogTestCost(self, log):
"""Log the test results for the task.
The test results include the runtime execution time of the test.
Args:
log: The test log of this task.
"""
test_log = '%s/%s/test.txt' % self._log_path
_CreateDirectory(test_log)
with open(test_log, 'w') as out_file:
test_result = (self._flag_set, self._checksum, self._exe_cost)
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s\n' % test_result)
# The execution information about running the test.
test_run_log = '%s/%s/test_log.txt' % self._log_path
_CreateDirectory(test_run_log)
with open(test_run_log, 'w') as out_log_file:
# Append the test log information.
out_log_file.write('%s' % log)
def IsImproved(self, other):
"""Compare the current task with another task.
Args:
other: The other task against which the current task is compared.
Returns:
True if this task has improvement upon the other task.
"""
# The execution costs must have been initiated.
assert self._exe_cost is not None
assert other.GetTestResult() is not None
return self._exe_cost < other.GetTestResult()
| 2.625 | 3 |
Samples/TipCalculator/Python_Export/TipMain.py | Embarcadero/Delphi4PythonExporter | 18 | 12761678 | <reponame>Embarcadero/Delphi4PythonExporter<filename>Samples/TipCalculator/Python_Export/TipMain.py<gh_stars>10-100
import os
from delphifmx import *
class Main_Window(Form):
def __init__(self, owner):
self.styleRuby = None
self.styleLight = None
self.ListBox1 = None
self.ListBoxItem1 = None
self.editTotal = None
self.Label6 = None
self.ListBoxItem2 = None
self.Label7 = None
self.editTip = None
self.ListBoxItem3 = None
self.trackTip = None
self.ListBoxItem4 = None
self.editPeople = None
self.Label3 = None
self.ListBoxItem5 = None
self.trackPeople = None
self.ListBoxItem6 = None
self.Layout2 = None
self.ListBoxItem7 = None
self.per_person_share = None
self.Label1 = None
self.ListBoxItem8 = None
self.bill_plus_tip = None
self.Label5 = None
self.ListBoxItem9 = None
self.gold_style_btn = None
self.ruby_style_btn = None
self.light_style_btn = None
self.default_style = None
self.styleGold = None
self.LoadProps(os.path.join(os.path.dirname(os.path.abspath(__file__)), "TipMain.pyfmx"))
def editTipChange(self, Sender):
pass
def trackTipChange(self, Sender):
pass
def editPeopleChange(self, Sender):
pass
def trackPeopleChange(self, Sender):
pass
def gold_style_btnClick(self, Sender):
pass
def ruby_style_btnClick(self, Sender):
pass
def light_style_btnClick(self, Sender):
pass
def default_styleClick(self, Sender):
pass | 1.828125 | 2 |
evaluate_streams.py | adityac-view/two-stream-action-recognition | 0 | 12761679 | """
********************************
* Created by mohammed-alaa *
********************************
Evaluate motion and spatial streams
"""
import frame_dataloader
from evaluation import legacy_load_model, get_batch_size
from evaluation.evaluation import *
from utils.drive_manager import DriveManager
"""
Evaluate spatial stream
"""
# download
drive_manager = DriveManager("spa-xception-adam-5e-06-imnet")
drive_manager.download_file('1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK', "spatial.zip")
# load into ram
print("Spatial stream")
spatial_model_restored = legacy_load_model(filepath="spatial.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
spatial_model_restored.summary()
# evaluate
_, spatial_test_loader, test_video_level_label = frame_dataloader.SpatialDataLoader(
num_workers=workers,
width=int(spatial_model_restored.inputs[0].shape[1]), height=int(spatial_model_restored.inputs[0].shape[2])
, use_multiprocessing=False, batch_size=get_batch_size(spatial_model_restored, spatial=True), testing_samples_per_video=19
).run()
video_level_loss, video_level_accuracy_1, video_level_accuracy_5, test_video_level_preds = eval_model(spatial_model_restored, spatial_test_loader, test_video_level_label, 19)
print("Spatial Model validation", "prec@1", video_level_accuracy_1, "prec@5", video_level_accuracy_5, "loss", video_level_loss)
"""
Evaluate motion stream
"""
# download
drive_manager = DriveManager("heavy-mot-xception-adam-1e-05-imnet")
drive_manager.download_file('1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ', "motion.zip") # the id of the zip file contains my network
# load into ram
print("Motion stream")
motion_model_restored = legacy_load_model(filepath="motion.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
motion_model_restored.summary()
# evaluate
_, motion_test_loader, test_video_level_label = frame_dataloader.MotionDataLoader(
num_workers=workers,
width=int(motion_model_restored.inputs[0].shape[1]), height=int(motion_model_restored.inputs[0].shape[2])
, use_multiprocessing=False,
batch_size=get_batch_size(motion_model_restored, spatial=True)
, testing_samples_per_video=19).run()
video_level_loss, video_level_accuracy_1, video_level_accuracy_5, test_video_level_preds = eval_model(motion_model_restored, motion_test_loader, test_video_level_label, 19)
print("Motion Model validation", "prec@1", video_level_accuracy_1, "prec@5", video_level_accuracy_5, "loss", video_level_loss)
| 2.234375 | 2 |
python_exceptions_improved/debug_exception.py | sk-/python-exceptions-improved | 0 | 12761680 | <reponame>sk-/python-exceptions-improved
# Copyright 2013-2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import imp
import ast
import marshal
import difflib
import itertools
import re
import asm
class ModuleImporter(object):
"""
Class that allows to patch modules.
It is both a finder (find_module) and a loader (load_module).
See PEP 302 (http://www.python.org/dev/peps/pep-0302/) for further details.
"""
def __init__(self):
self.install()
def install(self):
"""Install this importer before all others."""
if self not in sys.meta_path:
sys.meta_path.insert(0, self)
def uninstall(self):
"""Removes this importer from the global importers."""
if self in sys.meta_path:
sys.meta_path.remove(self)
def get_module_from_package(self, name, file, file_path):
if os.path.exists(os.path.join(file_path, '__init__.pyc')):
return self.get_module_from_pyc(
name, None, os.path.join(file_path, '__init__.pyc'))
elif os.path.exists(os.path.join(file_path, '__init__.py')):
return self.get_module_from_source(
name, None, os.path.join(file_path, '__init__.py'))
def get_module_from_source(self, name, file, file_path):
try:
if not file:
file = open(file_path, 'U')
code_tree = ast.parse(file.read())
return self.get_module_from_code(
name, compile(code_tree, file_path, 'exec'))
finally:
file.close()
def get_module_from_pyc(self, name, file, file_path):
try:
if not file:
file = open(file_path, 'rb')
file.read(8)
return self.get_module_from_code(name, marshal.load(file))
finally:
file.close()
def get_module_from_code(self, module_name, module_code):
module_code = asm.patch_code(module_code)
mod = sys.modules.setdefault(module_name, imp.new_module(module_name))
# The following two fields are required by PEP 302
mod.__file__ = module_code.co_filename
mod.__loader__ = self
is_package = os.path.basename(mod.__file__) in ('__init__.py',
'__init__.pyc')
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
package = get_package(module_name, is_package)
if package:
mod.__package__ = package
exec module_code in mod.__dict__
return mod
def get_module(self, name, file, file_path, description):
try:
if description[2] == imp.PKG_DIRECTORY:
return self.get_module_from_package(name, file, file_path)
elif description[2] == imp.PY_SOURCE:
return self.get_module_from_source(name, file, file_path)
elif description[2] == imp.PY_COMPILED:
return self.get_module_from_pyc(name, file, file_path)
finally:
if file:
file.close()
def find_module(self, module_name, path=None): # pylint: disable=W0613
"""Returns self when the module registered is requested."""
self.module_name = module_name
if path:
path_component = path[0].split('/')[::-1]
module_component = module_name.split('.')
for i in xrange(min(len(module_component), len(path_component))):
if module_component[i] != path_component[i]:
break
else:
i += 1
module_name = '.'.join(module_component[i:])
result = imp.find_module(module_name, path)
if result[2][2] in (imp.PKG_DIRECTORY, imp.PY_SOURCE, imp.PY_COMPILED):
self.result = result
return self
def load_module(self, module_name):
"""Loads the registered module."""
if self.module_name == module_name and self.result:
return self.get_module(module_name, *self.result)
else:
raise ImportError('Module not found')
def get_package(module_name, is_package):
"""Returns a string representing the package to which the file belongs."""
if is_package:
return module_name
else:
return '.'.join(module_name.split('.')[:-1])
ATTRIBUTE_ERROR_MESSAGE_PATTERN = r"(')?(?P<type>[a-zA-Z0-9_]*)(')? (.*) has no attribute '(?P<attribute>[a-zA-Z0-9_]*)'"
ATTRIBUTE_ERROR_DELETE_MESSAGE_PATTERN = r"(?P<attribute>[a-zA-Z0-9_]*)"
NAME_ERROR_MESSAGE_PATTERN = r"global name '(?P<name>[a-zA-Z0-9_]*)' is not defined"
# TODO(skreft): Fix it for modules.
def name_to_class(class_name):
matches = []
for m in sys.modules.values():
if hasattr(m, class_name):
class_type = getattr(m, class_name)
if isinstance(class_type, type):
matches.append(class_type)
if matches:
return matches[0]
def is_similar_attribute(attribute, x):
# N.B. foo and fox are not similar according to this
attribute = attribute.lower().replace('_', '')
x = x.lower().replace('_', '')
return difflib.SequenceMatcher(a=attribute, b=x).ratio() >= 0.75
def get_similar_attributes(type, attribute):
return itertools.ifilter(
lambda x: is_similar_attribute(attribute, x), dir(type))
def get_similar_variables(name, variables):
return itertools.ifilter(lambda x: is_similar_attribute(name, x), variables)
def get_debug_vars(tb):
attr = None
index = None
attr_set = False
index_set = False
while tb:
if '_s_attr' in tb.tb_frame.f_globals:
attr = tb.tb_frame.f_globals['_s_attr']
attr_set = True
del tb.tb_frame.f_globals['_s_attr']
if '_s_index' in tb.tb_frame.f_globals:
index = tb.tb_frame.f_globals['_s_index']
index_set = True
tb.tb_frame.f_globals['_s_index']
if attr_set or index_set:
return attr, index, attr_set, index_set
tb = tb.tb_next
return None, None, False, False
class KeyError_(KeyError):
def __str__(self):
if len(self.args) > 1:
return str(self.args)
else:
return str(self.args[0])
def debug_exceptions(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
et, ei, tb = sys.exc_info()
msg = str(ei)
if isinstance(ei, IndexError):
attr, index, attr_set, index_set = get_debug_vars(tb.tb_next)
if attr_set and index_set:
msg = msg + "\nDebug info:\n\tObject: %s\n\tObject len: %s\n\tIndex: %s" % (attr, len(attr), index)
elif isinstance(ei, KeyError):
attr, index, attr_set, index_set = get_debug_vars(tb.tb_next)
if attr_set and index_set:
msg = msg + "\nDebug info:\n\tObject: %s\n\tKey: %s" % (attr, repr(index))
et = KeyError_
ei = KeyError_(msg)
elif isinstance(ei, AttributeError):
match = re.match(ATTRIBUTE_ERROR_MESSAGE_PATTERN, msg)
field_type = None
if match:
field_type = name_to_class(match.group('type'))
attribute = match.group('attribute')
else:
match = re.match(ATTRIBUTE_ERROR_DELETE_MESSAGE_PATTERN, msg)
if match:
attribute = match.group('attribute')
attr, index, attr_set, index_set = get_debug_vars(tb.tb_next)
if attr_set:
field_type = attr
debug_info = "\nDebug info:\n\tObject: %s\n\tType: %s\n\tAttributes: %s" % (repr(field_type), type(field_type), dir(field_type))
elif field_type:
debug_info = "\nDebug info:\n\tType: %s\n\tAttributes: %s" % (field_type, dir(field_type))
proposals = list(get_similar_attributes(field_type, attribute))
if proposals:
msg += '. Did you mean %s?' % ', '.join(["'%s'" %a for a in proposals])
msg = msg + debug_info
elif isinstance(ei, NameError):
match = re.match(NAME_ERROR_MESSAGE_PATTERN, msg)
name = match.group('name')
proposals = list(get_similar_variables(name, tb.tb_next.tb_frame.f_locals.keys() + tb.tb_next.tb_frame.f_globals.keys()))
if proposals:
msg += '. Did you mean %s?' % ', '.join(["'%s'" %a for a in proposals])
raise et(msg), None, tb.tb_next
return wrapper
def decorate(f):
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
for method in result:
setattr(args[1], method, debug_exceptions(getattr(args[1], method)))
return result
return wrapper
| 2.046875 | 2 |
src/utils/logger.py | gdevos010/ml_supervised_learning | 0 | 12761681 | <gh_stars>0
import inspect
import logging
import os
from datetime import datetime
logger = logging.getLogger(__name__)
def info(msg):
level = "INFO"
frame, filename, line_number, function_name, lines, index = inspect.getouterframes(
inspect.currentframe())[1]
line = lines[0]
indentation_level = line.find(line.lstrip())
now = datetime.now().time().strftime("%H:%M:%S")
prefix = f'[{now} {os.path.basename(filename)}:{line_number} - {function_name}'.ljust(45) + f'] {level}:'
logger.info('{prefix}{i}\t{m}'.format(
prefix=prefix,
i=' ' * max(0, indentation_level - 8),
m=msg
))
def debug(msg):
level = "DEBUG"
frame, filename, line_number, function_name, lines, index = inspect.getouterframes(
inspect.currentframe())[1]
line = lines[0]
indentation_level = line.find(line.lstrip())
now = datetime.now().time().strftime("%H:%M:%S")
prefix = f'[{now} {os.path.basename(filename)}:{line_number} - {function_name}'.ljust(45) + f'] {level}:'
logger.info('{prefix}{i}\t{m}'.format(
prefix=prefix,
i=' ' * max(0, indentation_level - 8),
m=msg
))
def error(msg):
level = "ERROR"
frame, filename, line_number, function_name, lines, index = inspect.getouterframes(
inspect.currentframe())[1]
line = lines[0]
indentation_level = line.find(line.lstrip())
now = datetime.now().time().strftime("%H:%M:%S")
prefix = f'[{now} {os.path.basename(filename)}:{line_number} - {function_name}'.ljust(45) + f'] {level}:'
logger.info('{prefix}{i}\t{m}'.format(
prefix=prefix,
i=' ' * max(0, indentation_level - 8),
m=msg
))
def init_logger():
logger = logging.getLogger(__name__)
logging.basicConfig(format="",
handlers=[
logging.FileHandler("ml_supervised.log", 'a'),
logging.StreamHandler()
])
logger.setLevel(logging.DEBUG)
| 2.828125 | 3 |
kattis/pet/pet.py | pi-guy-in-the-sky/competitive-programming | 0 | 12761682 | scores = []
for i in range(5):
scores.append(sum([int(x) for x in input().split(" ")]))
topscore = 0
for score in scores:
if score > topscore:
topscore = score
index = scores.index(topscore) + 1
print(str(index) + " " + str(topscore))
| 3.359375 | 3 |
01_Python_Basico_Intermediario/Aula040/aula40.py | Joao-Inacio/Curso-de-Python3 | 1 | 12761683 | <filename>01_Python_Basico_Intermediario/Aula040/aula40.py
"""
Dictionary Comprehension em Python - (Compreensão de
dicionários)
"""
lista = [
('chave', 'valor'),
('chave2', 'valor2'),
]
# d1 = {x: y*2 for x, y in lista}
d2 = {f'chave_{x}': x**2 for x in range(5)}
print(d2)
| 3.828125 | 4 |
lib/ansiblelint/rules/MetaMainHasInfoRule.py | cyper85/ansible-lint | 1 | 12761684 | <reponame>cyper85/ansible-lint<gh_stars>1-10
# Copyright (c) 2016, <NAME> and contributors
# Copyright (c) 2018, Ansible Project
from ansiblelint.rules import AnsibleLintRule
META_STR_INFO = (
'author',
'description'
)
META_INFO = tuple(list(META_STR_INFO) + [
'license',
'min_ansible_version',
'platforms',
])
def _platform_info_errors_itr(platforms):
if not isinstance(platforms, list):
yield 'Platforms should be a list of dictionaries'
return
for platform in platforms:
if not isinstance(platform, dict):
yield 'Platforms should be a list of dictionaries'
elif 'name' not in platform:
yield 'Platform should contain name'
def _galaxy_info_errors_itr(galaxy_info,
info_list=META_INFO,
str_info_list=META_STR_INFO):
for info in info_list:
ginfo = galaxy_info.get(info, False)
if ginfo:
if info in str_info_list and not isinstance(ginfo, str):
yield '{info} should be a string'.format(info=info)
elif info == 'platforms':
for err in _platform_info_errors_itr(ginfo):
yield err
else:
yield 'Role info should contain {info}'.format(info=info)
class MetaMainHasInfoRule(AnsibleLintRule):
id = '701'
shortdesc = 'meta/main.yml should contain relevant info'
str_info = META_STR_INFO
info = META_INFO
description = (
'meta/main.yml should contain: ``{}``'.format(', '.join(info))
)
severity = 'HIGH'
tags = ['metadata']
version_added = 'v4.0.0'
def matchplay(self, file, data):
if file['type'] != 'meta':
return False
meta = {'meta/main.yml': data}
galaxy_info = data.get('galaxy_info', False)
if galaxy_info:
return [(meta, err) for err
in _galaxy_info_errors_itr(galaxy_info)]
return [(meta, "No 'galaxy_info' found")]
| 2.25 | 2 |
lstm_deep_name_generator.py | Ladvien/gan_name_maker | 0 | 12761685 | <filename>lstm_deep_name_generator.py
# -*- coding: utf-8 -*-
"""deep_name_generator.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Y_WqpnCdnBFm3142rtp6zlZQaZNkF3xi
# Deep Name Generator
This project is meant to be a proof-of-concept. Showing "organic" first names can be generated using a [Generative Advasarial Network](https://en.wikipedia.org/wiki/Generative_adversarial_network). We are using a found dataset provided by [<NAME>](http://hadley.nz/) at RStudio.
The goal will be to vectorize each of the names in the following format:
| a_0 | b_0 | c_0 | ... | z_9 | a_10 | etc |
|-----|-----|-----|-----|-----|------|-----|
| 1 | 0 | 0 | ... | 1 | 0 | 0 |
| 0 | 0 | 1 | ... | 0 | 0 | 0 |
Where the letter is the one-hot encoded representation of a character and the number the placeholder in string.
For example, the name `Abby` would be represented with the following vector.
| a_0 | ... | b_1 | ... | b_2 | ... | y_3 |
|-----|-----|-----|-----|-----|-----|-----|
| 1 | ... | 1 | ... | 1 | ... | 1 |
Given Wickham's dataset also includes:
* `year`
* `percent_[popularity]`
* `sex`
It may be interesting to add these as additional features to allow the model to learn first name contexts.
# GAN
Working off the following Keras GAN Example:
https://towardsdatascience.com/gan-by-example-using-keras-on-tensorflow-backend-1a6d515a60d0
Another good article on GANS and text generation
https://becominghuman.ai/generative-adversarial-networks-for-text-generation-part-1-2b886c8cab10
And a good one on transformers (Attention is All You Need)
https://medium.com/inside-machine-learning/what-is-a-transformer-d07dd1fbec04
https://medium.com/datadriveninvestor/generative-adversarial-network-gan-using-keras-ce1c05cfdfd3
https://machinelearningmastery.com/practical-guide-to-gan-failure-modes/
https://towardsdatascience.com/my-first-encounter-with-gans-6c0114f60cd7
# Preventing Mode Collapse
https://towardsdatascience.com/10-lessons-i-learned-training-generative-adversarial-networks-gans-for-a-year-c9071159628?
https://medium.com/@jonathan_hui/gan-unrolled-gan-how-to-reduce-mode-collapse-af5f2f7b51cd
**Good TF GAN build**
https://towardsdatascience.com/gan-introduction-and-implementation-part1-implement-a-simple-gan-in-tf-for-mnist-handwritten-de00a759ae5c
# Training Parameters
"""
# Engineering parameters.
data_set = '93k' # "6k" or "93
pad_character = '~'
allowed_chars = f'abcdefghijklmnopqrstuvwxyz{pad_character}'
len_allow_chars = len(allowed_chars)
max_name_length = 10
# Parameters
optimizer_name = 'adagrad'
g_learning_rate = 0.0001
d_learning_rate = 0.0004
gan_learning_rate = 0.0001
epochs = 45000
batch_size = 128
num_samples = 8
g_dropout = 0.2
d_dropout = 0.2
generator_inputs = 270
g_width_modifier = 0.8 # Discriminator deep-neuron multiplier.
d_width_modifier = 0.5 # Generator deep-neuron multiplier.
label_smoothing = 0.1
g_h_activation = 'lrelu' # Activation function for hidden layers.
d_h_activation = 'lrelu'
generator_activation = 'sigmoid'
g_batchnorm = True
d_batchnorm = True
# Discriminator accuracy threshold for retraining.
d_accuracy_threshold = 0.98 # 1.1 == always retrain
params = {
'epochs': epochs,
'batch_size': batch_size,
'g_learning_rate': g_learning_rate,
'd_learning_rate': d_learning_rate,
'gan_learning_rate': gan_learning_rate,
'optimizer_name': optimizer_name,
'generator_inputs': generator_inputs,
'num_samples_per_step': num_samples,
'allowed_chars': allowed_chars,
'max_name_length': max_name_length,
'g_h_activation': g_h_activation,
'd_h_activation': d_h_activation,
'g_dropout': g_dropout,
'd_dropout': d_dropout,
'd_width_modifier': d_width_modifier,
'g_width_modifier': g_width_modifier,
'd_accuracy_threshold': d_accuracy_threshold,
'label_smoothing': label_smoothing,
'g_batchnorm': g_batchnorm,
'd_batchnorm': d_batchnorm,
'generator_activation': generator_activation
}
"""# Prepared Data
If you'd like to skip to the fun part, I've vectorized the names already.
But, if you want to grind it out, here's the code:
* [deep_name_prep_data](https://github.com/Ladvien/gan_name_maker/blob/master/deep_name_prep_data.ipynb)
## Load the Data
"""
import pandas as pd
import numpy as np
# !git clone https://github.com/Ladvien/gan_name_maker
if data_set == '6k':
# ~6k names
df = pd.read_csv('./gan_name_maker/vectorized_names_6k.csv')
elif data_set == '93k':
# ~93k names
df = pd.read_csv('./gan_name_maker/vectorized_names_93k.csv')
df = df.rename(columns = {'Name':'name'})
else:
print('Please select data_set')
params['data_set'] = data_set
cols = list(df)
# Move the name column to the beginning.
cols.insert(0, cols.pop(cols.index('name')))
df = df.loc[:, cols]
# Drop the yucky columns.
df.drop('Unnamed: 0', axis = 1, inplace = True)
# Sort values by name
df.sort_values(by = 'name', ascending = True, inplace = True)
print(f'Vectorized data has {df.shape[0]} samples and {df.shape[1]} features.')
df.head()
# Randomize
df = df.sample(df.shape[0])
"""# Libraries"""
import tensorflow as tf
from keras.layers import Dense, Dropout, Activation, Input, LeakyReLU,\
BatchNormalization, ReLU, Embedding,\
Lambda, LSTM, TimeDistributed, Flatten
from keras import Sequential
from keras.models import Model
from keras.callbacks import History
from keras import backend
# Personal tools.
# !pip install git+https://github.com/Ladvien/ladvien_ml.git
from ladvien_ml import FeatureModel
fm = FeatureModel()
"""# Setup Weights and Biases"""
# !pip install --upgrade wandb
# !wandb login <PASSWORD>
import wandb
wandb.init(project = 'deep_name_generator',
config = params)
"""# Discriminator"""
def label_smooth_sigmoid(y_true, y_pred):
return tf.losses.sigmoid_cross_entropy(y_true, y_pred, label_smoothing = label_smoothing)
def discriminator(input_shape, optimizer, d_activation, d_batchnorm, dropout = 0.1, width_modifier = 0.5):
D = Sequential()
# Input layer
input_layer_width = input_shape
D.add(Dense(input_layer_width, input_shape = (input_layer_width,)))
D.add(d_activation)
if d_batchnorm:
D.add(BatchNormalization())
D.add(Dropout(dropout))
# First Hidden Layer
first_layer_width = int(input_shape * width_modifier)
D.add(Dense(first_layer_width))
D.add(d_activation)
if d_batchnorm:
D.add(BatchNormalization())
D.add(Dropout(dropout))
# Second Hidden Layer
second_layer_width = int(input_shape * width_modifier)
D.add(Dense(second_layer_width))
D.add(d_activation)
if d_batchnorm:
D.add(BatchNormalization())
D.add(Dropout(dropout))
# Third Hidden Layer
third_layer_width = int(input_shape * width_modifier)
D.add(Dense(third_layer_width))
D.add(d_activation)
if d_batchnorm:
D.add(BatchNormalization())
D.add(Dropout(dropout))
# Output
D.add(Dense(1, activation = 'sigmoid'))
D._name = 'discriminator'
D.compile(optimizer = optimizer, loss = label_smooth_sigmoid, metrics = ['accuracy'])
D.summary()
return D
"""# Generator"""
def generator(num_inputs, output_shape, optimizer, g_activation, g_batchnorm, generator_activation, dropout = 0.1, width_modifier = 0.5):
G = Sequential()
G.add(Embedding(output_shape, output_shape, input_length = output_shape, dropout = dropout))
G.add(LSTM(output_shape, dropout_U = dropout, dropout_W = dropout))
if g_batchnorm:
G.add(BatchNormalization())
# G.add(LSTM(output_shape, dropout_U = dropout, dropout_W = dropout, return_sequences = True))
# G.add(LSTM(output_shape, dropout_U = dropout, dropout_W = dropout, return_sequences = True))
# G.add(TimeDistributed(Dense(1)))
# G.add(Flatten())
G.add(Dense(output_shape))
G.add(d_activation)
G.compile(optimizer = optimizer, loss = 'categorical_crossentropy')
G.summary()
return G
"""# GAN"""
def create_gan(D, G, g_inputs):
D.trainable = False
gan_input = Input(shape = (g_inputs,))
x = G(gan_input)
gan_output = D(x)
gan = Model(inputs = gan_input, outputs = gan_output)
return gan
"""# Compile"""
# implementation of wasserstein loss
def wasserstein_loss(y_true, y_pred):
return backend.mean(y_true * y_pred)
names_master = df['name'].tolist()
names_master = list(filter(lambda x: type(x) == str, names_master))
df = df.drop('name', axis = 1)
# Input shape will be the number of possible characters times
# the maximum name length allowed.
vectorized_name_length = df.shape[1]
import keras
g_optimizer = keras.optimizers.Adam(g_learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)
d_optimizer = keras.optimizers.Adam(d_learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)
gan_optimizer = keras.optimizers.Adam(gan_learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)
## Select optimizer.
#g_optimizer = fm.select_optimizer(optimizer_name, g_learning_rate)
#d_optimizer = fm.select_optimizer(optimizer_name, d_learning_rate)
#gan_optimizer = fm.select_optimizer(optimizer_name, gan_learning_rate)
# Select activation function for hidden layers.
if g_h_activation == 'relu':
g_activation = ReLU()
elif g_h_activation == 'lrelu':
g_activation = LeakyReLU()
if d_h_activation == 'relu':
d_activation = ReLU()
elif d_h_activation == 'lrelu':
d_activation = LeakyReLU()
# Generator
G = generator(generator_inputs, vectorized_name_length, g_optimizer, g_activation, g_batchnorm, generator_activation, dropout = g_dropout, width_modifier = g_width_modifier)
# Discriminator
D = discriminator(vectorized_name_length, d_optimizer, d_activation, d_batchnorm, dropout = d_dropout, width_modifier = d_width_modifier)
# Build GAN
GAN = create_gan(D, G, generator_inputs)
GAN._name = 'GAN'
GAN.compile(loss = wasserstein_loss, optimizer = gan_optimizer, metrics=['accuracy'])
GAN.summary()
#wandb.save('model.h5')
"""## Prepare Data"""
# Randomize inputs.
df = df.sample(df.shape[0])
# Create target label.
df['real'] = 1
# Drop the 'name' and 'real' columns.
X = df.iloc[:,0:-1]
# Get target
y = df.iloc[:,-1:]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
column_names = df.iloc[:,0:-1].columns.tolist()
"""# Evaluation Method"""
def retrieve_names_from_sparse_matrix(generated_names, pad_character):
retrieved_names = []
for name_index in range(len(generated_names)):
generated_name = ''
name_array = generated_names[name_index]
for char_index in range(max_name_length):
# Get A index.
first_letter_index = (char_index * len_allow_chars)
last_letter_index = (char_index * len_allow_chars + len_allow_chars)
char_vector = list(name_array[first_letter_index:last_letter_index])
char = allowed_chars[char_vector.index(max(char_vector))]
if char == pad_character:
break
generated_name += char
retrieved_names.append(generated_name)
return retrieved_names
# !pip install pyjarowinkler
from pyjarowinkler import distance
# Calculate the generated names similarity to the
# real names using Jara-Winkler Distance
def get_jw_similarity_score(generated_names, real_names):
real_name_jw_scores = []
for generated_name in generated_names:
values = []
for real_name in real_names:
if real_name is None:
continue
try:
values.append(distance.get_jaro_distance(real_name, generated_name, winkler=True, scaling=0.1))
except:
# If empty string, set a low value.
values.append(0.00001)
try:
real_name_jw_scores.append(sum(values) / len(values))
except ZeroDivisionError:
real_name_jw_scores.append(0.00001)
return real_name_jw_scores
"""# Training"""
# Loading the data
batch_count = x_train.shape[0] / batch_size
d_accuracy = 0
for e in range(1, epochs + 1):
print(f'Epoch: {e}')
for step in range(batch_size):
# Generate noise as input to initialize generator.
noise = np.random.normal(0, 1, [batch_size, generator_inputs])
# Generate fake names from noise.
generated_names = G.predict(noise)
# Get a random set of real names.
real_names = x_train.iloc[np.random.randint(low = 0, high = x_train.shape[0], size = batch_size),:]
#Construct different batches of real and fake data
X = np.concatenate([real_names, generated_names])
# Labels for generated and real data (first four rows are real)
y_labels = np.zeros(2 * batch_size)
y_labels[:batch_size] = 1
if d_accuracy < d_accuracy_threshold:
# Pre-train discriminator on fake and real data before starting the GAN.
D.trainable = True
D.train_on_batch(X, y_labels)
# During the training of GAN, the weights of discriminator should be
# fixed. We can enforce that by setting the trainable flag.
D.trainable = False
# Tricking the noised input of the Generator as real data
noise = np.random.normal(0, 1, [batch_size, generator_inputs])
y_gen = np.ones(batch_size)
# Train the GAN by alternating the training of the Discriminator
# and training the chained GAN model with Discriminator’s weights
# frozen.
GAN_score = GAN.train_on_batch(noise, y_gen)
D_score = D.evaluate(X, y_labels, verbose = 0)
d_accuracy = D_score[1]
# End of an epoch.
print(f'GAN loss: {GAN_score[0]}')
print(f'Disc. loss: {D_score[0]}')
# Make Generator inputs.
noise = np.random.normal(0, 1, [num_samples, generator_inputs])
# Generate fake names from noise.
generated_names = G.predict(noise)
retrieved_names = retrieve_names_from_sparse_matrix(generated_names, pad_character)
# Get get Jara-Winkler similarity.
retrieved_name_similarity_scores = get_jw_similarity_score(retrieved_names, names_master)
try:
batch_similarity_score = (sum(retrieved_name_similarity_scores) / len(retrieved_name_similarity_scores))
except ZeroDivisionError:
batch_similarity_score = 0
print('Division by zero')
# Save generated names.
table = wandb.Table(columns=['Name', 'Epoch'])
for name in retrieved_names:
table.add_data(name, e)
# Log sample of generated names.
wandb.log({"generated_names": table})
# Log to Weights and Biases
wandb.log({'GAN Loss': GAN_score[0],
'epoch': e,
'discriminator_loss': D_score[0],
'discriminator_accuracy': D_score[1],
'num_of_names': len(list(set(retrieved_names))),
'batch_similarity_score': batch_similarity_score
})
| 2.90625 | 3 |
main.py | mgadel/Text_Browser | 0 | 12761686 | <reponame>mgadel/Text_Browser
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 29 16:18:43 2021
@author: mgadel
"""
#from configs.config import CONFIG
from utils.load_config import configuration
from dataloader.data_loader import dataloader
from model.k_neighbours import model_knn
from model.cosine_similarity import model_cosine_sim
from model.gloves import model_glove
from sys import exit
PATH = r'configs\config.json'
def init():
global config, data
# Set Configuration Parameters
config = configuration.load_json(PATH)
# Load Data corpus
data = dataloader(config.data_path)
def build_model():
global model
while True :
model_choice =input('Select Model : knn, cosine sim, glove (to quit press Q): \n')
if model_choice == 'knn':
model = model_knn(data,config.model_hyperparameter['knn']['n_voisins'])
break
elif model_choice == 'cosine sim':
model = model_cosine_sim(data)
break
elif model_choice == 'glove':
model = model_glove(data,config.model_hyperparameter['embeddings'])
break
elif model_choice == 'Q':
print('\n...............\n')
menu()
else :
print('Selection error')
model_choice =input('Select Model : knn, cosine sim, glove (to quit press Q): \n')
def ask_questions():
query = input('Enter keyworlds: \n')
print('\n')
answers = model.n_answer(query)
other_ans = 'yes'
for i, a in enumerate(answers):
if i<3:
print('Answer' + str(i+1) + '\n')
print(a)
print('\n')
if i==3:
other_ans = input('Print all answers (yes/no) ? \n')
while True:
if other_ans == 'no' or other_ans == 'yes' :
break
else :
print('Wrong input ! Try again \n')
other_ans = input('Print all answers (yes/no) ? \n')
elif i>= 3 and other_ans == 'no':
break
elif i>= 3 and other_ans == 'yes':
print('Answer' + str(i+1) + '\n')
print(a)
print('\n')
def menu():
if 'model' not in globals():
print('First, you should select a model to start searching ! \n')
build_model()
while True :
print('\n --- Menu ---')
menu_selection = input('New Model (Press M), New Request (Press R), Exit (Press Q): \n')
if menu_selection == 'M':
build_model()
elif menu_selection == 'R':
ask_questions()
elif menu_selection == 'Q':
ans =input('Are you sure to quit (yes/no): \n')
if ans =='no':
menu()
elif ans =='yes':
break
else :
print('Selection error !')
menu_selection =input('New Model (Press M), New Request (Press Q), Exit (Press Q): \n')
print('\n Good Bye ! \n')
exit()
def run_main():
print('\n')
print('Loading')
print('...............\n')
init()
menu()
if __name__=='__main__':
print('\n --- TEXT SEARCH --- \n')
run_main()
| 2.609375 | 3 |
networkapi/api_pools/tests/sanity/test_pool_put_spec.py | vinicius-marinho/GloboNetworkAPI | 73 | 12761687 | # -*- coding: utf-8 -*-
import json
import logging
from django.core.management import call_command
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
log = logging.getLogger(__name__)
def setup():
call_command(
'loaddata',
'networkapi/system/fixtures/initial_variables.json',
'networkapi/api_pools/fixtures/initial_optionspool.json',
'networkapi/requisicaovips/fixtures/initial_optionsvip.json',
'networkapi/healthcheckexpect/fixtures/initial_healthcheck.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
'networkapi/api_pools/fixtures/initial_base.json',
'networkapi/api_pools/fixtures/initial_pools_1.json',
verbosity=0
)
class PoolPutSpecTestCase(NetworkApiTestCase):
maxDiff = None
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def execute_some_put_verify_error(self, name_file):
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.compare_status(400, response.status_code)
def execute_some_put_verify_success(self, name_file):
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.compare_status(200, response.status_code)
# get datas updated
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.compare_status(200, response.status_code)
self.compare_json(name_file, response.data)
def test_put_valid_file(self):
""" test_put_valid_file"""
self.execute_some_put_verify_success(
'api_pools/tests/sanity/json/put/test_pool_put_valid_file.json')
def test_put_out_of_range_port(self):
""" test_put_out_of_range_port"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_out_of_range_port.json')
def test_put_negative_port(self):
""" test_put_negative_port"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_negative_port.json')
def test_put_float_port(self):
""" test_put_float_port"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_float_port.json')
def test_put_zero_port(self):
""" test_put_zero_port"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_zero_port.json')
def test_put_string_port(self):
""" test_put_string_port"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_string_port.json')
def test_put_float_environment(self):
""" test_put_float_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_float_environment.json')
def test_put_string_environment(self):
""" test_put_string_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_string_environment.json')
def test_put_zero_environment(self):
""" test_put_zero_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_zero_environment.json')
def test_put_negative_environment(self):
""" test_put_negative_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_negative_environment.json')
def test_put_integer_name_servicedownaction(self):
""" test_put_integer_name_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_integer_name_servicedownaction.json')
def test_put_invalid_healthcheck_type(self):
""" test_put_invalid_healthcheck_type"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_invalid_healthcheck_type.json')
def test_put_invalid_destination(self):
""" test_put_invalid_destination"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_invalid_destination.json')
def test_put_negative_default_limit(self):
""" test_put_negative_default_limit"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_negative_default_limit.json')
def test_put_integer_lb_method(self):
""" test_put_integer_lb_method"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_integer_lb_method.json')
def test_put_string_id_servicedownaction(self):
""" test_put_string_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_string_id_servicedownaction.json')
def test_put_zero_id_servicedownaction(self):
""" test_put_zero_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_zero_id_servicedownaction.json')
def test_put_negative_id_servicedownaction(self):
""" test_put_negative_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/sanity/json/put/test_pool_put_negative_id_servicedownaction.json')
def test_valid_post_after_equals_valid_put(self):
""" test_valid_post_after_equals_valid_put"""
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if data were not inserted
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(
'api_pools/tests/sanity/json/test_pool_put_and_post.json')),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(
'api_pools/tests/sanity/json/test_pool_put_and_post.json')),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
| 2.0625 | 2 |
LegoLib/legoMotor.py | rphuang/IotDevicesPy | 0 | 12761688 | from threading import RLock
from pylgbst.peripherals import EncodedMotor
from IotLib.pyUtils import startThread
from IotLib.log import Log
from IotLib.iotMotor import IotMotor
from IotLib.iotEncodedMotor import IotEncodedMotor
from .legoNode import SendCommand
# todo: LegoMotor inherits both IotMotor and IotSteering.
class LegoMotor(IotEncodedMotor):
""" the class encapsulates a lego encoded motor based on pylgbst.Motor
"""
# subscribe data
NoData = 0 # do not subscribe data
SpeedData = 1 # subscribe speed data
AngleData = 2 # subscribe angle data
def __init__(self, name, parent, motor, data=0, minMovingSpeed=5, maxPower=1.0):
""" construct a LegoMotor
name: the name of the node
parent: parent IotNode object. None for root node.
motor: an instance of pylgbst.Motor
data: which data to subscribe (NoData, SpeedData, or AngleData)
minMovingSpeed: the minimum valid moving absolute speed
maxPower: max power allowed for the motor
"""
super(LegoMotor, self).__init__(name, parent, minMovingSpeed=minMovingSpeed)
self.motor = motor
self.data = data
self.maxPower = maxPower
self._motorControlLock = RLock() # lock is required in case of emergencyStop() been called in separate thread
def stop(self):
""" stop the motor """
self._stop()
return self.speed
def run(self, speed, speed2=None):
""" run the motor with specified speed
speed: the speed for the motor, speed2: the speed for the secondary motor
speed > 0 run forward max 100
speed < 0 run reverse max -100
speed = 0 stop
return the running speed
"""
self._requestedSpeed = speed
self._requestedSpeed2 = speed2
Log.info('Request %s to run at speed %i, %s' %(self.name, speed, str(speed2)))
self._run(speed, speed2)
return self.speed
def runAngle(self, angle, speed, speed2 = None):
""" move the motor by specified angle for encoded single or dual encoded motor
angle is in degree (360 is one rotation)
speed controls the direction ranges from -100 to 100
"""
outspd = float(IotMotor._clampSpeed(speed)) / 100.0
outspd2 = speed2
if speed2 is not None:
outspd2 = float(IotMotor._clampSpeed(speed2)) / 100.0
Log.info('MoveAngle %s by %i degrees at speed %f, %s' %(self.name, angle, outspd, str(outspd2)))
self._motorControlLock.acquire
SendCommand(self.motor, self.motor.angled, degrees=angle, speed_primary=outspd, speed_secondary=outspd2, max_power=self.maxPower)
#self.motor.angled(angle, outspd, outspd2, max_power=self.maxPower)
self._motorControlLock.release
def runAngleAsync(self, angle, speed, speed2 = None):
""" launch a thread to move the motor by specified angle for encoded single or dual motor
angle is in degree (360 is one rotation)
speed controls the direction ranges from -100 to 100
"""
startThread('%s.moveAngle' %self.name, target=self.runAngle, front=True, args=(angle, speed, speed2))
def goToPosition(self, position, position2 = None, speed = 100):
""" run the motor to specified positions for encoded single or dual motor
positions are in degrees range from int.min to int.max
speed controls the direction ranges from -100 to 100
"""
outspd = float(IotMotor._clampSpeed(speed)) / 100.0
Log.info('GoToPosition %s to (%i, %s) at speed %f' %(self.name, position, str(position2), outspd))
self._motorControlLock.acquire
SendCommand(self.motor, self.motor.goto_position, degrees_primary=position, degrees_secondary=position2, speed=outspd, max_power=self.maxPower)
#self.motor.goto_position(position, position2, outspd, max_power=self.maxPower)
self._motorControlLock.release
def goToPositionAsync(self, position, position2 = None, speed = 100):
""" launch a thread to run the motor to specified positions for encoded single or dual motor
positions are in degrees range from int.min to int.max
speed controls the direction ranges from -100 to 100
"""
startThread('%s.goToPosition' %self.name, target=self.goToPosition, front=True, args=(position, position2, speed))
def extraSpeed(self, deltaSpeed):
""" request extra speed in addition to the run speed by run(speed) """
self._extraSpeed = deltaSpeed
if self.speed2 is not None and self.speed2 != self.speed:
return
if self._requestedSpeed == 0 or self.speed == 0:
return
absRequestedSpeed = abs(self._requestedSpeed)
extraSpeed = self._extraSpeed + self._extraSteeringSpeed
absRunSpeed = abs(self._requestedSpeed) + extraSpeed
if absRunSpeed != abs(self.speed):
if self._requestedSpeed > 0:
self._run(absRunSpeed)
else:
self._run(-absRunSpeed)
def _stop(self):
""" internal method to stop the motor """
self._requestedSpeed = 0
self._requestedSpeed2 = 0
Log.info('Stop %s' %self.name)
self._motorControlLock.acquire
SendCommand(self.motor, self.motor.start_power, power_primary=0, power_secondary=0)
#self.motor.start_power(0)
self._motorControlLock.release
self.speed = 0
self.speed2 = 0
return self.speed
def _run(self, speed, speed2=None):
""" internal method to run the motor with specified speed
speed > 0 run forward max 100
speed < 0 run reverse max -100
speed = 0 stop
return the running speed
"""
if abs(speed) < self._minMovingSpeed: # stop
self._stop()
outspd = 0
outspd = 0
else:
outspd = float(IotMotor._clampSpeed(speed)) / 100.0
outspd2 = speed2
if speed2 is not None:
outspd2 = float(IotMotor._clampSpeed(speed2)) / 100.0
Log.info('Run %s at speed %f, %s' %(self.name, outspd, str(outspd2)))
self._motorControlLock.acquire
SendCommand(self.motor, self.motor.start_speed, speed_primary=outspd, speed_secondary=outspd2, max_power=self.maxPower)
#self.motor.start_speed(outspd, outspd2, max_power=self.maxPower)
self._motorControlLock.release
if self.data == LegoMotor.NoData:
self.speed = outspd
self.speed2 = outspd2
def _callbackSpeed(self, param1):
Log.debug("Motor %s speed %s" %(self.name, str(param1)))
self.speed = param1
def _callbackAngle(self, param1):
Log.debug("Motor %s angle %s" %(self.name, str(param1)))
self.angle = param1
def startUp(self):
""" override to subscribe the data from lego sensor """
if self.data == LegoMotor.SpeedData:
self.motor.subscribe(self._callbackSpeed, mode=EncodedMotor.SENSOR_SPEED, granularity=1)
elif self.data == LegoMotor.AngleData:
self.motor.subscribe(self._callbackAngle, mode=EncodedMotor.SENSOR_ANGLE, granularity=1)
def shutDown(self):
""" override to unsubscribe the data """
if self.data == LegoMotor.SpeedData:
self.motor.unsubscribe(self._callbackSpeed)
elif self.data == LegoMotor.AngleData:
self.motor.unsubscribe(self._callbackAngle)
| 3.03125 | 3 |
exercicios-turtle/.history/conversor_temp_20210624131757.py | Aleff13/poo-ufsc | 1 | 12761689 | <reponame>Aleff13/poo-ufsc<filename>exercicios-turtle/.history/conversor_temp_20210624131757.py<gh_stars>1-10
print("Abaixo digite o valor da temperatura em graus para saber sua equivalencia em farhent") | 1.523438 | 2 |
MixNotes/myDIR/2.py | nickliqian/ralph_doc_to_chinese | 8 | 12761690 | <reponame>nickliqian/ralph_doc_to_chinese
import matplotlib.pyplot as plt
from ising import *
import numpy as np
temperatures = [0.5, 2.27, 5.0]
for T in temperatures:
lattice, energies, spins = ising(n=20, nsteps = 500000, T=T)
spins = np.array(spins) / 20. ** 2
plt.plot(range(len(spins)), spins, label = 'T = {0}'.format(T))
plt.legend(loc = 'best')
plt.xlabel('nSteps')
plt.ylabel('Average Spin')
plt.ylim(-1.2, 1.2)
plt.savefig('images/average-spin.png')
plt.show() | 3.1875 | 3 |
asap/point_match_optimization/schemas.py | AllenInstitute/render-modules | 6 | 12761691 | <filename>asap/point_match_optimization/schemas.py<gh_stars>1-10
from argschema.fields import Bool, Int, Nested, Str, OutputDir, List, InputFile
from argschema.schemas import DefaultSchema
from marshmallow import post_load
from marshmallow import fields
from asap.module.render_module import RenderParameters
class url_options(DefaultSchema):
normalizeForMatching = Bool(
required=False,
default=True,
missing=True,
description='normalize for matching')
renderWithFilter = Bool(
required=False,
default=True,
missing=True,
description='Render with Filter')
renderWithoutMask = Bool(
required=False,
default=False,
missing=False,
description='Render without mask')
excludeAllTransforms = Bool(
required=False,
default=False,
missing=False,
description="Exclude all transforms")
excludeFirstTransformAndAllAfter = Bool(
required=False,
default=False,
missing=False,
description="Exclude first transfrom and all after")
excludeTransformsAfterLast = Bool(
required=False,
default=False,
missing=False,
description="Exclude transforms after last")
class SIFT_options(DefaultSchema):
SIFTfdSize = List(
fields.Int,
required=False,
cli_as_single_argument=True,
default=[8],
missing=[8],
description=('SIFT feature descriptor size: '
'how many samples per row and column'))
SIFTmaxScale = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[0.85],
missing=[0.85],
description=('SIFT maximum scale: minSize * minScale '
'< size < maxSize * maxScale'))
SIFTminScale = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[0.5],
missing=[0.5],
description=('SIFT minimum scale: minSize * minScale '
'< size < maxSize * maxScale'))
SIFTsteps = List(
fields.Int,
required=False,
cli_as_single_argument=True,
default=[3],
missing=[3],
description='SIFT steps per scale octave')
matchIterations = List(
fields.Int,
required=False,
default=[1000],
missing=[1000],
cli_as_single_argument=True,
description='Match filter iterations')
matchMaxEpsilon = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[20.0],
missing=[20.0],
description='Minimal allowed transfer error for match filtering')
matchMaxNumInliers = List(
fields.Int,
required=False,
default=[500],
missing=[500],
cli_as_single_argument=True,
description='Maximum number of inliers for match filtering')
matchMaxTrust = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[3.0],
missing=[3.0],
description=('Reject match candidates with a cost larger '
'than maxTrust * median cost'))
matchMinInlierRatio = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[0.0],
missing=[0.0],
description=(
'Minimal ratio of inliers to candidates for match filtering'))
matchMinNumInliers = List(
fields.Int,
required=False,
default=[10],
missing=[10],
cli_as_single_argument=True,
description='Minimal absolute number of inliers for match filtering')
matchModelType = List(
fields.String,
required=False,
default=['AFFINE'],
missing=['AFFINE'],
cli_as_single_argument=True,
description=('Type of model for match filtering Possible Values: '
'[TRANSLATION, RIGID, SIMILARITY, AFFINE]'))
matchRod = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[0.92],
missing=[0.92],
description='Ratio of distances for matches')
renderScale = List(
fields.Float,
required=False,
cli_as_single_argument=True,
default=[0.35],
missing=[0.35],
description='Render canvases at this scale')
class PtMatchOptimizationParameters(RenderParameters):
stack = Str(
required=True,
description=(
'Name of the stack containing the tile pair (not the base stack)'))
tile_stack = Str(
required=False,
default=None,
missing=None,
description='Name of the stack that will hold these two tiles')
tilepair_file = InputFile(
required=True,
description='Tile pair file')
no_tilepairs_to_test = Int(
required=False,
default=10,
missing=10,
description=('Number of tilepairs to be tested for '
'optimization - default = 10'))
filter_tilepairs = Bool(
required=False,
default=False,
missing=False,
description=("Do you want filter the tilpair file for pairs "
"that overlap? - default = False"))
max_tilepairs_with_matches = Int(
required=False,
default=0,
missing=0,
description=('How many tilepairs with matches required for '
'selection of optimized parameter set'))
numberOfThreads = Int(
required=False,
default=5,
missing=5,
description='Number of threads to run point matching job')
SIFT_options = Nested(SIFT_options, required=True)
outputDirectory = OutputDir(
required=True,
description=(
'Parent directory in which subdirectories will be '
'created to store images and point-match results from SIFT'))
url_options = Nested(url_options, required=True)
pool_size = Int(
required=False,
default=10,
missing=10,
description='Pool size for parallel processing')
@post_load
def validate_data(self, data):
if data['max_tilepairs_with_matches'] == 0:
data['max_tilepairs_with_matches'] = data['no_tilepairs_to_test']
class PtMatchOptimizationParametersOutput(DefaultSchema):
output_html = Str(
required=True,
description=(
'Output html file that shows all the tilepair plot and results'))
class PointMatchOptimizationParameters(RenderParameters):
stack = Str(
required=True,
description='Name of the stack containing the tile pair')
tile_stack = Str(
required=False,
default=None,
missing=None,
description='Name of the stack that will hold these two tiles')
tileId1 = Str(
required=True,
description='tileId of the first tile in the tile pair')
tileId2 = Str(
required=True,
description='tileId of the second tile in the tile pair')
pool_size = Int(
required=False,
default=10,
missing=10,
description='Pool size for parallel processing')
SIFT_options = Nested(SIFT_options, required=True)
outputDirectory = OutputDir(
required=True,
description=(
'Parent directory in which subdirectories will be '
'created to store images and point-match results from SIFT'))
url_options = Nested(url_options, required=True)
class PointMatchOptimizationParametersOutput(DefaultSchema):
output_html = Str(
required=True,
description=(
'Output html file that shows all the tilepair plot and results'))
| 2.171875 | 2 |
gridworld_vav/data_analysis/plot_grid.py | dsbrown1331/vav-icml | 0 | 12761692 | <filename>gridworld_vav/data_analysis/plot_grid.py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
import sys
def plot_dashed_arrow(state, width, ax, direction, arrow_color='k'):
print("plotting dashed arrow", direction)
h_length = 0.15
shaft_length = 0.4
#convert state to coords where (0,0) is top left
x_coord = state % width
y_coord = state // width
print(x_coord, y_coord)
if direction is 'down':
x_end = 0
y_end = shaft_length - h_length
elif direction is 'up':
x_end = 0
y_end = -shaft_length + h_length
elif direction is 'left':
x_end = -shaft_length + h_length
y_end = 0
elif direction is 'right':
x_end = shaft_length - h_length
y_end = 0
else:
print("ERROR: ", direction, " is not a valid action")
return
print(x_end, y_end)
ax.arrow(x_coord, y_coord, x_end, y_end, head_width=None, head_length=None, fc=arrow_color, ec=arrow_color,linewidth=4, linestyle=':',fill=False)
#convert state to coords where (0,0) is top left
x_coord = state % width
y_coord = state // width
print(x_coord, y_coord)
if direction is 'down':
x_end = 0
y_end = h_length
y_coord += shaft_length - h_length
elif direction is 'up':
x_end = 0
y_end = -h_length
y_coord += -shaft_length + h_length
elif direction is 'left':
x_end = -h_length
y_end = 0
x_coord += -shaft_length + h_length
elif direction is 'right':
x_end = h_length
y_end = 0
x_coord += shaft_length - h_length
else:
print("ERROR: ", direction, " is not a valid action")
return
print(x_end, y_end)
ax.arrow(x_coord, y_coord, x_end, y_end, head_width=0.2, head_length=h_length, fc=arrow_color, ec=arrow_color,linewidth=4, fill=False,length_includes_head = True)
def plot_arrow(state, width, ax, direction, arrow_color='k'):
print("plotting arrow", direction)
h_length = 0.15
shaft_length = 0.4
#convert state to coords where (0,0) is top left
x_coord = state % width
y_coord = state // width
print(x_coord, y_coord)
if direction is 'down':
x_end = 0
y_end = shaft_length - h_length
elif direction is 'up':
x_end = 0
y_end = -shaft_length + h_length
elif direction is 'left':
x_end = -shaft_length + h_length
y_end = 0
elif direction is 'right':
x_end = shaft_length - h_length
y_end = 0
else:
print("ERROR: ", direction, " is not a valid action")
return
print(x_end, y_end)
ax.arrow(x_coord, y_coord, x_end, y_end, head_width=0.2, head_length=h_length, fc=arrow_color, ec=arrow_color,linewidth=4)
def plot_dot(state, width, ax):
ax.plot(state % width, state // width, 'ko',markersize=10)
def plot_questionmark(state, width, ax):
ax.plot(state % width, state // width, 'k', marker=r'$?$',markersize=40)
def plot_optimal_policy(pi, feature_mat):
plt.figure()
ax = plt.axes()
count = 0
print(pi)
rows,cols = len(pi), len(pi[0])
for line in pi:
for el in line:
print("optimal action", el)
# could be a stochastic policy with more than one optimal action
for char in el:
print(char)
if char is "^" or char == (-1,0):
plot_arrow(count, cols, ax, "up")
elif char is "v" or char == (1,0):
plot_arrow(count, cols, ax, "down")
elif char is ">" or char == (0,1):
plot_arrow(count, cols, ax, "right")
elif char is "<" or char == (0,-1):
plot_arrow(count, cols, ax, "left")
elif char is ".":
plot_dot(count, cols, ax)
elif el is "w":
#wall
pass
else:
print("error in policy format")
sys.exit()
count += 1
mat = [[0 if fvec is None else fvec.index(1)+1 for fvec in row] for row in feature_mat]
#convert feature_mat into colors
#heatmap = plt.imshow(mat, cmap="Reds", interpolation='none', aspect='equal')
cmap = colors.ListedColormap(['black','white','tab:red', 'tab:blue','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan'])
im = plt.imshow(mat, cmap=cmap, interpolation='none', aspect='equal')
ax = plt.gca()
ax.set_xticks(np.arange(-.5, cols, 1), minor=True);
ax.set_yticks(np.arange(-.5, rows, 1), minor=True);
#ax.grid(which='minor', axis='both', linestyle='-', linewidth=5, color='k')
# Gridlines based on minor ticks
ax.grid(which='minor', color='k', linestyle='-', linewidth=5)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
#cbar = plt.colorbar(heatmap)
#cbar.ax.tick_params(labelsize=20)
plt.show()
def plot_optimal_policy_vav(pi, feature_mat, walls=False, filename=False, show=False, arrow_color='k', feature_colors = None):
#takes a dictionary of policy optimal actions
#takes a 2d array of feature vectors
plt.figure()
ax = plt.axes()
count = 0
print(pi)
rows,cols = len(feature_mat), len(feature_mat[0])
for r in range(rows):
for c in range(cols):
if feature_mat[r][c]:
opt_actions = pi[(r,c)]
for a in opt_actions:
print("optimal action", a)
# could be a stochastic policy with more than one optimal action
if a is None:
plot_dot(count, cols, ax)
else:
if a == (-1,0):
plot_arrow(count, cols, ax, "up", arrow_color)
elif a == (1,0):
plot_arrow(count, cols, ax, "down", arrow_color)
elif a == (0,1):
plot_arrow(count, cols, ax, "right", arrow_color)
elif a == (0,-1):
plot_arrow(count, cols, ax, "left", arrow_color)
elif a is None:
plot_dot(count, cols, ax)
elif a is "w":
#wall
pass
else:
print("error in policy format")
#sys.exit()
count += 1
print(feature_mat)
#use for wall states
#if walls:
mat = [[0 if fvec is None else fvec.index(1)+1 for fvec in row] for row in feature_mat]
#mat =[[0,0],[2,2]]
feature_set = set()
for mrow in mat:
for m in mrow:
feature_set.add(m)
num_features = len(feature_set)
print(mat)
if feature_colors is None:
all_colors = ['black','white','tab:red','tab:blue','tab:gray','tab:green','tab:purple', 'tab:orange', 'tab:cyan']
else:
all_colors = feature_colors
colors_to_use = []
for f in range(9):#hard coded to only have 9 features right now
if f in feature_set:
colors_to_use.append(all_colors[f])
cmap = colors.ListedColormap(colors_to_use)
# else:
# mat = [[fvec.index(1) for fvec in row] for row in feature_mat]
# cmap = colors.ListedColormap(['white','tab:red','tab:blue','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan'])
#input()
#convert feature_mat into colors
#heatmap = plt.imshow(mat, cmap="Reds", interpolation='none', aspect='equal')
im = plt.imshow(mat, cmap=cmap, interpolation='none', aspect='equal')
ax = plt.gca()
ax.set_xticks(np.arange(-.5, cols, 1), minor=True);
ax.set_yticks(np.arange(-.5, rows, 1), minor=True);
#ax.grid(which='minor', axis='both', linestyle='-', linewidth=5, color='k')
# Gridlines based on minor ticks
ax.grid(which='minor', color='k', linestyle='-', linewidth=5)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
#cbar = plt.colorbar(heatmap)
#cbar.ax.tick_params(labelsize=20)
plt.tight_layout()
if filename:
plt.savefig(filename)
elif show:
plt.show()
def plot_test_questions(question_list, feature_mat, walls=False, filename=False, show=False, arrow_color='k', feature_colors = None):
#takes a dictionary of policy optimal actions
#takes a 2d array of feature vectors
plt.figure()
ax = plt.axes()
count = 0
rows,cols = len(feature_mat), len(feature_mat[0])
for r in range(rows):
for c in range(cols):
if feature_mat[r][c]:
for (s,a) in question_list:
if s == (r,c):
if type(a) is list:
opt_actions = a
else:
opt_actions = [a]
for a in opt_actions:
print("optimal action", a)
# could be a stochastic policy with more than one optimal action
if a is None:
#plot_dot(count, cols, ax)
continue # don't plot anything at terminal no choice there anyways
else:
# if a == (-1,0):
# plot_arrow(count, cols, ax, "up", arrow_color)
# elif a == (1,0):
# plot_arrow(count, cols, ax, "down", arrow_color)
# elif a == (0,1):
# plot_arrow(count, cols, ax, "right", arrow_color)
# elif a == (0,-1):
# plot_arrow(count, cols, ax, "left", arrow_color)
# elif a is None:
plot_questionmark(count, cols, ax)
# elif a is "w":
# #wall
# pass
# else:
# print("error in policy format")
# #sys.exit()
count += 1
print(feature_mat)
#use for wall states
#if walls:
mat = [[0 if fvec is None else fvec.index(1)+1 for fvec in row] for row in feature_mat]
#mat =[[0,0],[2,2]]
feature_set = set()
for mrow in mat:
for m in mrow:
feature_set.add(m)
num_features = len(feature_set)
print(mat)
if feature_colors is None:
all_colors = ['black','white','tab:red','tab:blue','tab:gray','tab:green','tab:purple', 'tab:orange', 'tab:cyan']
else:
all_colors = feature_colors
colors_to_use = []
for f in range(9):#hard coded to only have 9 features right now
if f in feature_set:
colors_to_use.append(all_colors[f])
cmap = colors.ListedColormap(colors_to_use)
# else:
# mat = [[fvec.index(1) for fvec in row] for row in feature_mat]
# cmap = colors.ListedColormap(['white','tab:red','tab:blue','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan'])
#input()
#convert feature_mat into colors
#heatmap = plt.imshow(mat, cmap="Reds", interpolation='none', aspect='equal')
im = plt.imshow(mat, cmap=cmap, interpolation='none', aspect='equal')
ax = plt.gca()
ax.set_xticks(np.arange(-.5, cols, 1), minor=True);
ax.set_yticks(np.arange(-.5, rows, 1), minor=True);
#ax.grid(which='minor', axis='both', linestyle='-', linewidth=5, color='k')
# Gridlines based on minor ticks
ax.grid(which='minor', color='k', linestyle='-', linewidth=5)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
#cbar = plt.colorbar(heatmap)
#cbar.ax.tick_params(labelsize=20)
plt.tight_layout()
if filename:
plt.savefig(filename)
if show:
plt.show()
def plot_preference_query(good_traj, bad_traj, feature_mat, walls=False, filename=False, show=False,
good_arrow_color='b', bad_arrow_color='r', feature_colors = None):
#Takes in two trajs good and bad and plots good in solid and bad in dotted
plt.figure()
ax = plt.axes()
count = 0
rows,cols = len(feature_mat), len(feature_mat[0])
#plot good trajectory
arrow_color=good_arrow_color
for r in range(rows):
for c in range(cols):
if feature_mat[r][c]:
for (s,a) in good_traj:
if s == (r,c):
if type(a) is list:
opt_actions = a
else:
opt_actions = [a]
for a in opt_actions:
print("optimal action", a)
# could be a stochastic policy with more than one optimal action
if a is None:
plot_dot(count, cols, ax)
else:
if a == (-1,0):
plot_arrow(count, cols, ax, "up", arrow_color)
elif a == (1,0):
plot_arrow(count, cols, ax, "down", arrow_color)
elif a == (0,1):
plot_arrow(count, cols, ax, "right", arrow_color)
elif a == (0,-1):
plot_arrow(count, cols, ax, "left", arrow_color)
elif a is None:
plot_dot(count, cols, ax)
elif a is "w":
#wall
pass
else:
print("error in policy format")
#sys.exit()
count += 1
#plot bad trajectory
arrow_color=bad_arrow_color
count = 0
for r in range(rows):
for c in range(cols):
if feature_mat[r][c]:
for (s,a) in bad_traj:
if s == (r,c):
if type(a) is list:
opt_actions = a
else:
opt_actions = [a]
for a in opt_actions:
print("optimal action", a)
# could be a stochastic policy with more than one optimal action
if a is None:
plot_dot(count, cols, ax)
else:
if a == (-1,0):
plot_dashed_arrow(count, cols, ax, "up", arrow_color)
elif a == (1,0):
plot_dashed_arrow(count, cols, ax, "down", arrow_color)
elif a == (0,1):
plot_dashed_arrow(count, cols, ax, "right", arrow_color)
elif a == (0,-1):
plot_dashed_arrow(count, cols, ax, "left", arrow_color)
elif a is None:
plot_dot(count, cols, ax)
elif a is "w":
#wall
pass
else:
print("error in policy format")
#sys.exit()
count += 1
#use for wall states
#if walls:
mat = [[0 if fvec is None else fvec.index(1)+1 for fvec in row] for row in feature_mat]
#mat =[[0,0],[2,2]]
feature_set = set()
for mrow in mat:
for m in mrow:
feature_set.add(m)
num_features = len(feature_set)
print(mat)
if feature_colors is None:
all_colors = ['black','white','tab:red','tab:blue','tab:gray','tab:green','tab:purple', 'tab:orange', 'tab:cyan']
else:
all_colors = feature_colors
colors_to_use = []
for f in range(9):#hard coded to only have 9 features right now
if f in feature_set:
colors_to_use.append(all_colors[f])
cmap = colors.ListedColormap(colors_to_use)
# else:
# mat = [[fvec.index(1) for fvec in row] for row in feature_mat]
# cmap = colors.ListedColormap(['white','tab:red','tab:blue','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan'])
#input()
#convert feature_mat into colors
#heatmap = plt.imshow(mat, cmap="Reds", interpolation='none', aspect='equal')
im = plt.imshow(mat, cmap=cmap, interpolation='none', aspect='equal')
ax = plt.gca()
ax.set_xticks(np.arange(-.5, cols, 1), minor=True);
ax.set_yticks(np.arange(-.5, rows, 1), minor=True);
#ax.grid(which='minor', axis='both', linestyle='-', linewidth=5, color='k')
# Gridlines based on minor ticks
ax.grid(which='minor', color='k', linestyle='-', linewidth=5)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
#cbar = plt.colorbar(heatmap)
#cbar.ax.tick_params(labelsize=20)
plt.tight_layout()
if filename:
plt.savefig(filename)
if show:
plt.show()
def plot_optimal_policy_vav_grid(pis, feature_mats, g_rows, g_cols, walls=False, filename=False):
#size is tuple for rows / cols of
#takes a dictionary of policy optimal actions
#takes a 2d array of feature vectors
fig, axs = plt.subplots(g_rows, g_cols)
cnt = 0
for ax in axs:#r in range(g_rows):
#for c in range(g_cols):
#print(r,c)
#ax = axs[r,c]
#ax.set_title('Axis [0,0]')
pi = pis[cnt]
feature_mat = feature_mats[cnt]
cnt += 1
count = 0
#print(pi)
rows,cols = len(feature_mat), len(feature_mat[0])
for r in range(rows):
for c in range(cols):
opt_actions = pi[(r,c)]
for a in opt_actions:
# print("optimal action", a)
# could be a stochastic policy with more than one optimal action
if a is None:
plot_dot(count, cols, ax)
else:
if a == (-1,0):
plot_arrow(count, cols, ax, "up")
elif a == (1,0):
plot_arrow(count, cols, ax, "down")
elif a == (0,1):
plot_arrow(count, cols, ax, "right")
elif a == (0,-1):
plot_arrow(count, cols, ax, "left")
elif a is None:
plot_dot(count, cols, ax)
elif a is "w":
#wall
pass
else:
print("error in policy format")
sys.exit()
count += 1
# print(feature_mat)
#use for wall states
#if walls:
mat = [[0 if fvec is None else fvec.index(1)+1 for fvec in row] for row in feature_mat]
#mat =[[0,0],[2,2]]
feature_set = set()
for mrow in mat:
for m in mrow:
feature_set.add(m)
num_features = len(feature_set)
# print(mat)
all_colors = ['black','white','tab:red','tab:blue','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan']
colors_to_use = []
for f in range(9):#hard coded to only have 9 features right now
if f in feature_set:
colors_to_use.append(all_colors[f])
cmap = colors.ListedColormap(colors_to_use)
# else:
# mat = [[fvec.index(1) for fvec in row] for row in feature_mat]
# cmap = colors.ListedColormap(['white','tab:red','tab:blue','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan'])
#input()
#convert feature_mat into colors
#heatmap = plt.imshow(mat, cmap="Reds", interpolation='none', aspect='equal')
ax.imshow(mat, cmap=cmap, interpolation='none', aspect='equal')
#ax = plt.gca()
ax.set_xticks(np.arange(-.5, cols, 1), minor=True);
ax.set_yticks(np.arange(-.5, rows, 1), minor=True);
#ax.grid(which='minor', axis='both', linestyle='-', linewidth=5, color='k')
# Gridlines based on minor ticks
ax.grid(which='minor', color='k', linestyle='-', linewidth=5)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
#cbar = plt.colorbar(heatmap)
#cbar.ax.tick_params(labelsize=20)
plt.tight_layout()
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_test_query(state, better_action, worse_action, feature_mat, equal_pref = False):
plt.figure()
ax = plt.axes()
count = 0
rows,cols = len(feature_mat), len(feature_mat[0])
if better_action is "^":
plot_arrow(state, cols, ax, "up")
elif better_action is "v":
plot_arrow(state, cols, ax, "down")
elif better_action is ">":
plot_arrow(state, cols, ax, "right")
elif better_action is "<":
plot_arrow(state, cols, ax, "left")
if equal_pref:
if worse_action is "^":
plot_arrow(state, cols, ax, "up")
elif worse_action is "v":
plot_arrow(state, cols, ax, "down")
elif worse_action is ">":
plot_arrow(state, cols, ax, "right")
elif worse_action is "<":
plot_arrow(state, cols, ax, "left")
else:
if worse_action is "^":
plot_dashed_arrow(state, cols, ax, "up")
elif worse_action is "v":
plot_dashed_arrow(state, cols, ax, "down")
elif worse_action is ">":
plot_dashed_arrow(state, cols, ax, "right")
elif worse_action is "<":
plot_dashed_arrow(state, cols, ax, "left")
mat = [[0 if fvec is None else fvec.index(1)+1 for fvec in row] for row in feature_mat]
#convert feature_mat into colors
#heatmap = plt.imshow(mat, cmap="Reds", interpolation='none', aspect='equal')
cmap = colors.ListedColormap(['black','white','tab:blue','tab:red','tab:green','tab:purple', 'tab:orange', 'tab:gray', 'tab:cyan'])
plt.imshow(mat, cmap=cmap, interpolation='none', aspect='equal')
# Add the grid
ax = plt.gca()
# Minor ticks
ax.set_xticks(np.arange(-.5, cols, 1), minor=True);
ax.set_yticks(np.arange(-.5, rows, 1), minor=True);
ax.grid(which='minor', axis='both', linestyle='-', linewidth=5, color='k')
#remove ticks
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
left='off',
right='off',
labelbottom='off',
labelleft='off') # labels along the bottom edge are off
#cbar = plt.colorbar(heatmap)
#cbar.ax.tick_params(labelsize=20)
plt.show()
if __name__=="__main__":
pi = [['v', '^><','.'],['<>v','<','>'],['<>^v','v' ,'^']]
feature_mat = [[(1,0,0),(0,1,0),(0,0,1)],[(0,0,0,1),(0,0,0,0,1),(0,0,0,0,0,1)],[(0,0,0,0,0,0,1), (0,0,0,0,0,0,0,1),None] ]
plot_optimal_policy(pi, feature_mat)
state = 3 #the integer value of state starting from top left and reading left to right, top to bottom.
better_action = "v"
worse_action = "<"
#plot the optimal test query, where the right answer is bolded (add equal_pref=True argument if both are equally good)
plot_test_query(state, better_action, worse_action, feature_mat)
state = 4 #the integer value of state starting from top left and reading left to right, top to bottom.
better_action = "v"
worse_action = "<"
plot_test_query(state, better_action, worse_action, feature_mat, equal_pref = True)
| 3.28125 | 3 |
vidsz/opencv/writer/__init__.py | BlueMirrors/vidsz | 10 | 12761693 | """Implements vidsz's Writer for Opencv Backend
"""
from .base_writer import Writer
| 1.257813 | 1 |
ex048.py | EduotavioFonseca/ProgramasPython | 0 | 12761694 | <filename>ex048.py
# Maior e menor peso
menor = maior = 0
for i in range(1, 6):
peso = float(input('Digite o seu peso em kg: '))
if i == 1:
menor = peso
maior = peso
else:
if peso < menor:
menor = peso
if peso > maior:
maior = peso
print()
print('O menor peso é {} kg e o maior peso vale {} kg.'.format(menor, maior))
| 3.953125 | 4 |
tests/test_util.py | nozyh/maf | 0 | 12761695 | <reponame>nozyh/maf
# Copyright (c) 2013, Preferred Infrastructure, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from maflib.util import *
from maflib.core import Parameter
import unittest
class TestProduct(unittest.TestCase):
def test_empty_input(self):
self.assertEqual([{}], product({}))
def test_single_key(self):
params = product({ 'key': [0, 1, 2] })
expect = [{ 'key': 0 }, { 'key': 1 }, { 'key': 2 }]
self.assertListEqual(expect, params)
def test_two_keys(self):
params = product({ 'a': [0, 1, 2], 'b': ['x', 'y'] })
expect = [{ 'a': 0, 'b': 'x' },
{ 'a': 0, 'b': 'y' },
{ 'a': 1, 'b': 'x' },
{ 'a': 1, 'b': 'y' },
{ 'a': 2, 'b': 'x' },
{ 'a': 2, 'b': 'y' }]
self.assertSetEqual(set(Parameter(e) for e in expect),
set(Parameter(p) for p in params))
def test_empty_value_for_some_key(self):
params = product({ 'a': [0, 1], 'b': ['x', 'y'], 'c': [] })
self.assertEqual([], params)
class TestSample(unittest.TestCase):
def test_zero_sample(self):
params = sample(0, { 'key': [0, 1] })
self.assertEqual([], params)
def test_empty_distribution(self):
params = sample(1, {})
self.assertListEqual([{}], params)
def test_sample_from_interval(self):
params = sample(100, { 'key': (-2, 3) })
for param in params:
self.assertGreater(param['key'], -2)
self.assertLess(param['key'], 3)
def test_sample_from_list(self):
values = set(('a', 'b', 'c', 'x', 'y', 'z'))
params = sample(100, { 'key': list(values) })
for param in params:
self.assertIn(param['key'], values)
def test_sample_from_function(self):
i = [0]
def gen():
i[0] += 1
return i[0] % 3
expects = [1, 2, 0, 1, 2, 0]
params = sample(6, { 'key': gen })
for param, expect in zip(params, expects):
self.assertEqual(expect, param['key'])
| 1.773438 | 2 |
setup.py | EpicScizor/climatenet | 0 | 12761696 | #
# This is a setup file for Cython
#
#
#
#
#
#
from setuptools import setup
from Cython.Build import cythonize
setup(
name = "Climate",
ext_modules = cythonize(["*.py"],
build_dir="output"),
)
#python setup.py build_ext --inplace | 1.679688 | 2 |
hyapi/auth.py | kimtree/hanyang-univ-api | 6 | 12761697 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import urllib
import requests
from error import HYAuthError
class HYAuthHandler(object):
'''OAuth authentication handler'''
OAUTH_HOST = 'api.hanyang.ac.kr'
OAUTH_ROOT = '/oauth/'
def __init__(self, client_id, client_secret, scope=None, callback=None):
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.code = None
self.access_token = None
self.callback = callback
def _get_oauth_url(self, endpoint):
return 'https://' + self.OAUTH_HOST + self.OAUTH_ROOT + endpoint
def set_access_token(self, access_token):
self.access_token = access_token
def set_code(self, code):
self.code = code
def get_code_url(self):
params = {
'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': self.callback,
'scope': self.scope
}
url = self._get_oauth_url('authorize') + '?' + urllib.urlencode(params)
return url
def get_access_token(self):
url = self._get_oauth_url('token')
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': self.code,
'scope': self.scope,
'redirect_uri': self.callback,
'grant_type': 'authorization_code'
}
r = requests.get(url, params=params)
result = r.json()
if result.get('access_token'):
self.access_token = result['access_token']
return self.access_token
else:
raise HYAuthError(result.get('error_description'))
| 2.828125 | 3 |
2020/24/solution.py | Rexcantor/advent-of-code | 1 | 12761698 | lines = [line.strip() for line in open("input.txt", 'r') if line.strip() != ""]
tiles = []
# e - 0; se - 1; sw - 2; w - 3; nw - 4; ne - 5
directions = ((1, -1, 0), (0, -1, 1), (-1, 0, 1),
(-1, 1, 0), (0, 1, -1), (1, 0, -1))
for line in lines:
tile = []
while len(line) > 0:
if line[0] == 's':
tile.append(1 if line[1] == 'e' else 2)
line = line[2:]
elif line[0] == 'n':
tile.append(4 if line[1] == 'w' else 5)
line = line[2:]
elif line[0] == 'e':
tile.append(0)
line = line[1:]
elif line[0] == 'w':
tile.append(3)
line = line[1:]
tiles.append(tile)
##########################################
# PART 1 #
##########################################
def part1(tiles):
blackTiles = set()
for tile in tiles:
coord = [0, 0, 0]
for move in tile:
vector = directions[move]
for i in range(3):
coord[i] += vector[i]
coord = tuple(coord)
if coord in blackTiles:
blackTiles.remove(coord)
else:
blackTiles.add(coord)
return blackTiles
blackTiles = part1(tiles)
print('Answer to part 1 is', len(blackTiles))
##########################################
# PART 2 #
##########################################
def get_adjacent(pos):
adj = ()
for vector in directions:
res = list(pos)
for i in range(3):
res[i] += vector[i]
adj += (tuple(res), )
return adj
def part2(hexMap):
for i in range(100):
print('Day {}: {}'.format(i, len(hexMap)))
frozen_map = set(hexMap)
to_check = set()
for tile in frozen_map:
black_adj = 0
for adj in get_adjacent(tile):
if adj not in frozen_map:
to_check.add(adj)
else:
black_adj += 1
if black_adj == 0 or black_adj > 2:
hexMap.remove(tile)
for tile in to_check:
black_adj = 0
for adj in get_adjacent(tile):
if adj in frozen_map:
black_adj += 1
if black_adj == 2:
hexMap.add(tile)
return len(hexMap)
print('Answer to part 2 is', part2(blackTiles))
| 3.3125 | 3 |
tests/__init__.py | ye-yu/aaapi | 1 | 12761699 | <gh_stars>1-10
"""Unit test package for aaapi."""
| 0.941406 | 1 |
jp.atcoder/abc144/abc144_a/8525603.py | kagemeka/atcoder-submissions | 1 | 12761700 | # 2019-11-19 19:43:48(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# import re
# import heapq
# import array
# from scipy.misc import comb # (default: exact=False)
# import numpy as np
def main():
a, b = [int(x) for x in sys.stdin.readline().split()]
print(-1 if a >= 10 or b >= 10 else a * b)
if __name__ == "__main__":
main()
| 2.640625 | 3 |
packages/grid/backend/grid/db/session.py | pculliton/PySyft | 2 | 12761701 | <gh_stars>1-10
# third party
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
# grid absolute
from grid.core.config import settings
from grid.db.base import Base
def get_db_engine(db_uri: str = str(settings.SQLALCHEMY_DATABASE_URI)) -> Engine:
if db_uri.startswith("sqlite://"):
db_engine = create_engine(db_uri, echo=False)
# TODO change to use alembic properly with the sqlite memory store:
# https://stackoverflow.com/questions/31406359/use-alembic-to-upgrade-in-memory-sqlite3-database
Base.metadata.create_all(db_engine)
else:
db_engine = create_engine(db_uri, pool_pre_ping=True)
# Base.metadata.create_all(db_engine)
return db_engine
def get_db_session(db_uri: str = str(settings.SQLALCHEMY_DATABASE_URI)) -> Session:
engine = get_db_engine(db_uri=db_uri)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
return SessionLocal()
| 2.234375 | 2 |
python/koheron/cli.py | Koheron/koheron-sdk | 77 | 12761702 | <filename>python/koheron/cli.py
import click
# --------------------------------------------
# Call koheron-server
# --------------------------------------------
class ConnectionType(object):
def __init__(self, host="", unixsock=""):
self.host = host
@click.group()
@click.option('--host', default='', help='Host ip address', envvar='HOST')
@click.pass_context
def cli(ctx, host):
if host != "":
ctx.obj = ConnectionType(host=str(host))
@cli.command()
def version():
''' Get the version of koheron python library '''
from .version import __version__
click.echo(__version__)
@cli.command()
@click.pass_obj
def devices(conn_type):
''' Get the list of devices '''
from .koheron import KoheronClient
client = KoheronClient(host=conn_type.host)
click.echo(client.devices_idx)
@cli.command()
@click.pass_obj
@click.option('--device', default=None)
def commands(conn_type, device):
''' Get the list of commands for a specified device '''
from .koheron import KoheronClient
client = KoheronClient(host=conn_type.host)
if device is None:
click.echo(client.commands)
else:
device_idx = client.devices_idx[device]
click.echo(client.commands[device_idx])
# --------------------------------------------
# Call HTTP API
# --------------------------------------------
@cli.command()
@click.pass_obj
@click.argument('instrument_zip')
@click.option('--run', is_flag=True)
def upload(conn_type, instrument_zip, run):
''' Upload instrument.zip '''
from .koheron import upload_instrument
upload_instrument(conn_type.host, instrument_zip, run=run)
@cli.command()
@click.pass_obj
@click.argument('instrument_name', required=False)
@click.option('--restart', is_flag=True)
def run(conn_type, instrument_name, restart):
''' Run a given instrument '''
from .koheron import run_instrument
run_instrument(conn_type.host, instrument_name, restart=restart)
| 2.375 | 2 |
Heap/545.Top k Largest Numbers II/Solution.py | Zhenye-Na/LxxxCode | 12 | 12761703 | <reponame>Zhenye-Na/LxxxCode
import heapq
class Solution:
"""
@param: k: An integer
"""
def __init__(self, k):
# do intialization if necessary
self.pq = []
self.k = k
"""
@param: num: Number to be added
@return: nothing
"""
def add(self, num):
# write your code here
heapq.heappush(self.pq, num)
"""
@return: Top k element
"""
def topk(self):
# write your code here
return heapq.nlargest(self.k, self.pq)
| 3.109375 | 3 |
wd/gsimport.py | WolfgangFahl/pyOnlineSpreadSheetEditing | 0 | 12761704 | import justpy as jp
from spreadsheet.googlesheet import GoogleSheet
from lodstorage.lod import LOD
from lodstorage.sparql import SPARQL
from markupsafe import Markup
import copy
import datetime
import re
import os
import pprint
import sys
import traceback
from jpwidgets.widgets import LodGrid,MenuButton, MenuLink, QAlert,QPasswordDialog
from spreadsheet.version import Version
from spreadsheet.wikidata import Wikidata
from spreadsheet.wbquery import WikibaseQuery
DEBUG = 0
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
class WikidataGrid():
'''
the tabular data to work with
'''
def __init__(self,wbQueries):
'''
constructor
wbQueries(dict): the WikibaseQueries
'''
self.wbQueries=wbQueries
def setLodFromDataFrame(self,df):
'''
set my List of Dicts from the given data frame
Args:
df(Dataframe): the dataframe to set my list of dicts from
'''
lod=df.to_dict('records')
self.setLod(lod)
def setLod(self,lod:list):
'''
set my list of dicts
Args:
lod(list): a list of dicts to work with
'''
self.lod=lod
if len(lod)<1:
raise Exception("Empty List of dicts is not valid")
self.columns=self.lod[0].keys()
for index,row in enumerate(self.lod):
row["lodRowIndex"]=index
self.viewLod=copy.deepcopy(self.lod)
def getColumnTypeAndVarname(self,entityName,propName):
'''
slightly modified getter to account for "item" special case
'''
wbQuery=self.wbQueries[entityName]
if propName=="item":
column="item"
propType=""
varName="item"
else:
column,propType,varName=wbQuery.getColumnTypeAndVarname(propName)
return wbQuery,column,propType,varName
def getHtmlColums(self,entityName):
'''
get the columns that have html content(links) for the given entityName
entityName(str): the name of the entity
'''
htmlColumns=[0]
# loop over columns of dataframe
wbQuery=self.wbQueries[entityName]
for columnIndex,column in enumerate(self.columns):
# check whether there is metadata for the column
if column in wbQuery.propertiesByColumn:
propRow=wbQuery.propertiesByColumn[column]
propType=propRow["Type"]
if not propType or propType=="extid" or propType=="url":
htmlColumns.append(columnIndex)
return htmlColumns
def createLink(self,url,text):
'''
create a link from the given url and text
Args:
url(str): the url to create a link for
text(str): the text to add for the link
'''
link=f"<a href='{url}' style='color:blue'>{text}</a>"
return link
def linkWikidataItems(self,viewLod,itemColumn:str="item"):
'''
link the wikidata entries in the given item column if containing Q values
Args:
viewLod(list): the list of dicts for the view
itemColumn(str): the name of the column to handle
'''
for row in viewLod:
if itemColumn in row:
item=row[itemColumn]
if re.match(r"Q[0-9]+",item):
itemLink=self.createLink(f"https://www.wikidata.org/wiki/{item}", item)
row[itemColumn]=itemLink
class GridSync():
'''
allow syncing the grid with data from wikibase
'''
def __init__(self,wdgrid,sheetName,pk,debug:bool=False):
self.wdgrid=wdgrid
self.sheetName=sheetName
self.pk=pk
self.debug=debug
self.itemRows=wdgrid.lod
self.wbQuery,self.pkColumn,self.pkType,self.pkProp=wdgrid.getColumnTypeAndVarname(sheetName,pk)
self.itemsByPk,_dup=LOD.getLookup(self.itemRows,self.pkColumn)
if self.debug:
print(self.itemsByPk.keys())
def query(self,sparql):
'''
query the wikibase instance based on the list of dict
'''
lang="en" if self.pkType =="text" else None
valuesClause=self.wbQuery.getValuesClause(self.itemsByPk.keys(),self.pkProp,propType=self.pkType,lang=lang)
self.sparqlQuery=self.wbQuery.asSparql(filterClause=valuesClause,orderClause=f"ORDER BY ?{self.pkProp}",pk=self.pk)
if self.debug:
print(self.sparqlQuery)
self.wbRows=sparql.queryAsListOfDicts(self.sparqlQuery)
if self.debug:
pprint.pprint(self.wbRows)
def checkCell(self,viewLodRow,column,value,propVarname,propType,propLabel,propUrl:str=None):
'''
update the cell value for the given
Args:
viewLodRow(dict): the row to modify
value(object): the value to set for the cell
propVarName(str): the name of the property Variable set in the SPARQL statement
propType(str): the abbreviation for the property Type
propLabel(str): the propertyLabel (if any)
propUrl(str): the propertyUrl (if any)
'''
cellValue=viewLodRow[column]
valueType=type(value)
print(f"{column}({propVarname})={value}({propLabel}:{propUrl}:{valueType})⮂{cellValue}")
# overwrite empty cells
overwrite=not cellValue
if cellValue:
# overwrite values with links
if propUrl and cellValue==value:
overwrite=True
if overwrite and value:
doadd=True
# create links for item properties
if not propType:
value=self.wdgrid.createLink(value, propLabel)
elif propType=="extid":
value=self.wdgrid.createLink(propUrl,value)
if valueType==str:
pass
elif valueType==datetime.datetime:
value=value.strftime('%Y-%m-%d')
else:
doadd=False
print(f"{valueType} not added")
if doadd:
viewLodRow[column]=value
def markViewLod(self,viewLod):
'''
viewLod(list): a list of dict for the mark result
'''
# now check the rows
for wbRow in self.wbRows:
# get the primary key value
pkValue=wbRow[self.pkProp]
pkValue=re.sub(r"http://www.wikidata.org/entity/(Q[0-9]+)", r"\1",pkValue)
# if we have the primary key then we mark the whole row
if pkValue in self.itemsByPk:
if self.debug:
print(pkValue)
# https://stackoverflow.com/questions/14538885/how-to-get-the-index-with-the-key-in-a-dictionary
lodRow=self.itemsByPk[pkValue]
rowIndex=lodRow["lodRowIndex"]
viewLodRow=viewLod[rowIndex]
itemLink=self.wdgrid.createLink(wbRow["item"],wbRow["itemLabel"])
viewLodRow["item"]=itemLink
itemDescription=wbRow.get("itemDescription","")
self.checkCell(viewLodRow,"description",itemDescription,propVarname="itemDescription",propType="string",propLabel="")
# loop over the result items
for propVarname,value in wbRow.items():
# remap the property variable name to the original property description
if propVarname in self.wbQuery.propertiesByVarname:
propRow=self.wbQuery.propertiesByVarname[propVarname]
column=propRow["Column"]
propType=propRow["Type"]
if not propType:
propLabel=wbRow[f"{propVarname}Label"]
else:
propLabel=""
if propType=="extid":
propUrl=wbRow[f"{propVarname}Url"]
else:
propUrl=""
# Linked Or
if type(value)==str and value.startswith("http://www.wikidata.org/entity/") and f"{propVarname}Label" in wbRow:
propUrl=value
propLabel=wbRow[f"{propVarname}Label"]
value=propLabel
if column in lodRow:
self.checkCell(viewLodRow,column,value,propVarname,propType,propLabel,propUrl)
class GoogleSheetWikidataImport():
'''
reactive google sheet display to be used for wikidata import of the content
'''
def __init__(self,url,sheetNames:list,pk:str,endpoint:str,lang:str="en",debug:bool=False):
'''
constructor
Args:
url(str): the url of the google spreadsheet
sheetNames(list): the name of the sheets to import data from
pk(str): the primary key property to use for wikidata queries
endpoint(str): the url of the endpoint to use
lang(str): the languate to use for labels
debug(bool): if True show debug information
'''
self.debug=debug
self.url=url
self.sheetNames=sheetNames
self.sheetName=sheetNames[0]
self.pk=pk
self.endpoint=endpoint
self.sparql=SPARQL(self.endpoint)
self.lang=lang
# @TODO make configurable
self.metaDataSheetName="WikidataMetadata"
self.wd=Wikidata("https://www.wikidata.org",debug=True)
self.agGrid=None
self.wdgrid=None
self.dryRun=True
def clearErrors(self):
'''
clear the error display
'''
self.errors.inner_html=""
def handleException(self,ex):
'''
handle the given exception
Args:
ex(Exception): the exception to handle
'''
errorMsg=str(ex)
trace=""
if self.debug:
trace=traceback.format_exc()
errorMsgHtml=f"{errorMsg}<pre>{trace}</pre>"
self.errors.inner_html=errorMsgHtml
print(errorMsg)
if self.debug:
print(trace)
def load(self,url:str,sheetName:str,metaDataSheetName="WikidataMetadata"):
'''
load my googlesheet, wikibaseQueries and dataframe
Args:
url(str): the url to load the spreadsheet from
sheetName(str): the sheetName of the sheet/tab to load
'''
wbQueries=WikibaseQuery.ofGoogleSheet(url, metaDataSheetName, debug=self.debug)
self.wdgrid=WikidataGrid(wbQueries)
self.gs=GoogleSheet(url)
self.gs.open([sheetName])
self.wdgrid.setLod(self.gs.asListOfDicts(sheetName))
def onCheckWikidata(self,msg=None):
'''
check clicked - check the wikidata content
Args:
msg(dict): the justpy message
'''
if self.debug:
print(msg)
try:
self.clearErrors()
# prepare syncing the table results with the wikibase query result
gridSync=GridSync(self.wdgrid,self.sheetName,self.pk,debug=self.debug)
# query based on table content
gridSync.query(self.sparql)
# get the view copy to insert result as html statements
viewLod=self.wdgrid.viewLod
gridSync.markViewLod(viewLod)
# reload the AG Grid with the html enriched content
self.reloadAgGrid(viewLod)
except Exception as ex:
self.handleException(ex)
def reloadAgGrid(self,viewLod:list,showLimit=10):
'''
reload the agGrid with the given list of Dicts
Args:
viewLod(list): the list of dicts for the current view
'''
self.agGrid.load_lod(viewLod)
if self.debug:
pprint.pprint(viewLod[:showLimit])
self.refreshGridSettings()
def refreshGridSettings(self):
'''
refresh the ag grid settings e.g. enable the row selection event handler
enable row selection event handler
'''
self.agGrid.on('rowSelected', self.onRowSelected)
self.agGrid.options.columnDefs[0].checkboxSelection = True
# set html columns according to types that have links
self.agGrid.html_columns = self.wdgrid.getHtmlColums(self.sheetName)
def reload(self,_msg=None,clearErrors=True):
'''
reload the table content from myl url and sheet name
'''
if clearErrors:
self.clearErrors()
self.load(self.url,self.sheetName,self.metaDataSheetName)
# is there already agrid?
if self.agGrid is None:
self.agGrid = LodGrid(a=self.container)
viewLod=self.wdgrid.viewLod
self.wdgrid.linkWikidataItems(viewLod)
self.reloadAgGrid(viewLod)
# set up the primary key selector
self.pkSelect.delete_components()
self.pkSelect.add(jp.Option(value="item",text="item"))
wbQuery=self.wdgrid.wbQueries[self.sheetName]
for propertyName,row in wbQuery.propertiesByName.items():
columnName=row["Column"]
if columnName:
self.pkSelect.add(jp.Option(value=propertyName,text=columnName))
def onChangeSheet(self, msg:dict):
'''
handle selection of a different sheet
Args:
msg(dict): the justpy message
'''
if self.debug:
print(msg)
self.sheetName=msg.value
try:
self.reload()
except Exception as ex:
self.handleException(ex)
def onChangePk(self, msg:dict):
'''
handle selection of a different primary key
Args:
msg(dict): the justpy message
'''
if self.debug:
print(msg)
self.pk=msg.value
try:
self.reload()
except Exception as ex:
self.handleException(ex)
def onChangeUrl(self,msg:dict):
'''
handle selection of a different url
Args:
msg(dict): the justpy message
'''
if self.debug:
print(msg)
self.url=msg.value
self.gsheetUrl.href=self.url
self.gsheetUrl.text=self.url
try:
self.reload()
except Exception as ex:
self.handleException(ex)
def onChangeDryRun(self,msg:dict):
'''
handle change of DryRun setting
Args:
msg(dict): the justpy message
'''
self.dryRun=msg.value
def loginUser(self,user):
self.loginButton.text=f"logout {user}"
self.loginButton.icon="chevron_left"
self.dryRunButton.disable=False
def onloginViaDialog(self,_msg):
'''
handle login via dialog
'''
user=self.passwordDialog.userInput.value
password=self.passwordDialog.passwordInput.value
self.wd.loginWithCredentials(user, password)
if self.wd.user is not None:
self.loginUser(self.wd.user)
def onLogin(self,msg:dict):
'''
handle Login
Args:
msg(dict): the justpy message
'''
if self.debug:
print(msg)
try:
self.clearErrors()
if self.wd.user is None:
self.wd.loginWithCredentials()
if self.wd.user is None:
self.passwordDialog.loginButton.on("click",self.onloginViaDialog)
self.passwordDialog.value=True
else:
self.loginUser(self.wd.user)
else:
self.wd.logout()
self.dryRunButton.value=True
self.dryRunButton.disable=True
self.loginButton.text="login"
self.loginButton.icon="chevron_right"
except Exception as ex:
self.handleException(ex)
def onRowSelected(self, msg):
'''
row selection event handler
Args:
msg(dict): row selection information
'''
if self.debug:
print(msg)
self.clearErrors()
if msg.selected:
self.rowSelected = msg.rowIndex
write=not self.dryRun
label=msg.data["label"]
try:
mapDict=self.wdgrid.wbQueries[self.sheetName].propertiesById
qid,errors=self.wd.addDict(msg.data, mapDict,write=write)
if qid is not None:
# set item link
link=self.wdgrid.createLink(f"https://www.wikidata.org/wiki/{qid}", f"{label}")
self.wdgrid.viewLod[msg.rowIndex]["item"]=link
self.agGrid.load_lod(self.wdgrid.viewLod)
self.refreshGridSettings()
if len(errors)>0:
self.errors.text=errors
print(errors)
if self.dryRun:
prettyData=pprint.pformat(msg.data)
html=Markup(f"<pre>{prettyData}</pre>")
self.alertDialog.alertContent.inner_html=html
self.alertDialog.alertTitle.text=f"Dry Run for {label}"
self.alertDialog.value=True
except Exception as ex:
self.handleException(ex)
def gridForDataFrame(self):
'''
show aggrid for the given data frame
'''
self.wp = jp.QuasarPage()
self.container=jp.Div(a=self.wp)
self.header=jp.Div(a=self.container)
self.toolbar=jp.QToolbar(a=self.header)
# for icons see https://quasar.dev/vue-components/icon
# see justpy/templates/local/materialdesignicons/iconfont/codepoints for available icons
self.reloadButton=MenuButton(a=self.toolbar,text='reload',icon="refresh",click=self.reload)
self.checkButton=MenuButton(a=self.toolbar,text='check',icon='check_box',click=self.onCheckWikidata)
MenuLink(a=self.toolbar,text="docs",icon="description",href='https://wiki.bitplan.com/index.php/PyOnlineSpreadSheetEditing')
MenuLink(a=self.toolbar,text='github',icon='forum', href="https://github.com/WolfgangFahl/pyOnlineSpreadSheetEditing")
self.loginButton=MenuButton(a=self.toolbar,icon='chevron_right',text="login",click=self.onLogin)
self.passwordDialog=QPasswordDialog(a=self.wp)
self.alertDialog=QAlert(a=self.wp)
#jp.Br(a=self.header)
# url
urlLabelText="Google Spreadsheet Url"
self.gsheetUrl=jp.A(a=self.header,href=self.url,target="_blank",title=urlLabelText)
self.linkIcon=jp.QIcon(a=self.gsheetUrl,name="link",size="md")
self.urlInput=jp.Input(a=self.header,placeholder=urlLabelText,size=80,value=self.url,change=self.onChangeUrl)
self.dryRunButton=jp.QToggle(a=self.header,text="dry run",value=True,disable=True)
self.dryRunButton.on("input",self.onChangeDryRun)
jp.Br(a=self.header)
# link to the wikidata item currently imported
selectorClasses='w-32 m-4 p-2 bg-white'
# select for sheets
self.sheetSelect = jp.Select(classes=selectorClasses, a=self.header, value=self.sheetName,
change=self.onChangeSheet)
for sheetName in self.sheetNames:
self.sheetSelect.add(jp.Option(value=sheetName, text=sheetName))
# selector for column/property
self.pkSelect=jp.Select(classes=selectorClasses,a=self.header,value=self.pk,
change=self.onChangePk)
jp.Br(a=self.header)
self.errors=jp.Span(a=self.container,style='color:red')
try:
self.reload()
except Exception as ex:
self.handleException(ex)
return self.wp
def start(self):
'''
start the reactive justpy webserver
'''
jp.justpy(self.gridForDataFrame)
def main(argv=None): # IGNORE:C0111
'''main program.'''
if argv is None:
argv=sys.argv[1:]
program_name = os.path.basename(__file__)
program_version = "v%s" % Version.version
program_build_date = str(Version.updated)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = "Wikidata Import from google spreadsheet"
user_name="<NAME>"
program_license = '''%s
Created by %s on %s.
Copyright 2022 contributors. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc,user_name, str(Version.date))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-d", "--debug", dest="debug", action="store_true", help="set debug [default: %(default)s]")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
parser.add_argument('--endpoint',help="the endpoint to use [default: %(default)s]",default="https://query.wikidata.org/sparql")
#parser.add_argument('--dryrun', action="store_true", dest='dryrun', help="dry run only")
parser.add_argument('--url')
parser.add_argument('--sheets',nargs="+",required=True)
parser.add_argument('--pk')
args = parser.parse_args(argv)
gswdi=GoogleSheetWikidataImport(args.url,args.sheets,pk=args.pk,endpoint=args.endpoint,debug=args.debug)
gswdi.start()
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 1
except Exception as e:
if DEBUG:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
print(traceback.format_exc())
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-d")
sys.exit(main()) | 2.359375 | 2 |
mcazurerm/amsrp.py | pjshi23/mcazurerm | 0 | 12761705 | # amsrp.py - azurerm functions for the Microsoft.Media resource provider
from .restfns import do_get, do_post, do_put, do_delete
from .settings import azure_rm_endpoint, MEDIA_API
# check_name_availability of a media service name(access_token, subscription_id, rgname)
# check the media service name availability in a rgname and msname
def check_media_service_name_availability(access_token, subscription_id, name):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/microsoft.media/CheckNameAvailability?api-version=', MEDIA_API])
body = '{"name": "' + name + '", "type":"mediaservices"}'
return do_post(endpoint, body, access_token)
# create_media_service_rg(access_token, subscription_id, rgname)
# create the media service in a rgname
def create_media_service_rg(access_token, subscription_id, rgname, location, stoname, name):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/providers/microsoft.media/mediaservices/' + name + '?api-version=', MEDIA_API])
body = '{"name":"' + name + '", "location":"' + location + '", "properties":{ "storageAccounts":[ { "id":"/subscriptions/' + subscription_id + '/resourceGroups/' + rgname + '/providers/Microsoft.Storage/storageAccounts/' + stoname + '", "isPrimary":true } ] } }'
return do_put(endpoint, body, access_token)
# delete_media_service_rg(access_token, subscription_id, rgname)
# delete the media service in a rgname
def delete_media_service_rg(access_token, subscription_id, rgname, location, stoname, name):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/providers/microsoft.media/mediaservices/' + name + '?api-version=', MEDIA_API])
return do_delete(endpoint, access_token)
# list_media_endpoint_keys in a resrouce group(access_token, subscription_id, rgname, msname)
# list the media endpoint keys in a rgname and msname
def list_media_endpoint_keys(access_token, subscription_id, rgname, msname):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/providers/microsoft.media/',
'/mediaservices/', msname,
'/listKeys?api-version=', MEDIA_API])
return do_get(endpoint, access_token)
# list_media_services(access_token, subscription_id)
# list the media services in a subscription_id
def list_media_services(access_token, subscription_id):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])
return do_get(endpoint, access_token)
# list_media_services_rg in a resrouce group(access_token, subscription_id, rgname)
# list the media services in a rgname
def list_media_services_rg(access_token, subscription_id, rgname):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])
return do_get(endpoint, access_token)
| 2.140625 | 2 |
tests/test_remote_debug.py | codelv/enaml-native | 237 | 12761706 | """
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Oct 4, 2017
@author: jrm
"""
import sh
import sys
def main():
# Make sure instance is cleared
from enaml.application import Application
Application._instance = None
from enamlnative.android.app import AndroidApplication
app = AndroidApplication(
debug=True,
dev='remote', # "10.0.2.2" # or 'server'
load_view=load_view
)
app.timed_call(5000, run_gestures, app)
app.start()
def run_gestures(app):
for i in range(30):
#: Swipe to next page
t = i*2000
app.timed_call(t,
sh.adb, *'shell input swipe 250 300 -800 300'.split(), _bg=True)
#: Tap a few places
for j in range(4):
app.timed_call(t+i*200,
sh.adb, *'shell input tap 500 150'.split(), _bg=True)
app.timed_call(120000, app.stop)
def load_view(app):
import enaml
#: For debug purposes only!
app.widget.resetBridgeStats()
app.widget.resetBridgeCache()
with enaml.imports():
import view
if app.view:
reload(view)
app.view = view.ContentView()
#: Time how long it takes
app.show_view()
def test_remote_debug():
#sh.pip('install tornado --user'.split())
enaml_native = sh.Command('enaml-native')
enaml_native('start', '--remote-debugging', _bg=True)
#: Add
sys.path.append('src/apps/')
sys.path.append('src/')
#: Init remote nativehooks implementation
from enamlnative.core import remotehooks
remotehooks.init()
main()
| 1.914063 | 2 |
old_agents/qtd_agent.py | ajmcastro/quantum-reinforcement-learning | 1 | 12761707 | <gh_stars>1-10
import math
import numpy as np
from itertools import count
import ipywidgets as widgets
from IPython.display import display
from qiskit import QuantumRegister, QuantumCircuit, execute
from qiskit.extensions import Initialize
from qiskit.quantum_info import Statevector
from qiskit.circuit.library import GroverOperator
from stats import EpisodeStats
# from . import BaseAgent
# from circuit_builder import CircuitBuilder
from utils import prob_to_angles, R_amplify, R_deamplify
class QTDAgent:
def __init__(self, backend, alpha, gamma, k):
self.backend = backend
self.alpha = alpha
self.gamma = gamma
self.k = k
self.memory = dict()
def train(self, env, num_episodes):
learning_progress = widgets.IntProgress(
min=0, max=num_episodes, step=1,
description='0', bar_style='success',
orientation='horizontal',
display='flex', flex_flow='column', align_items='stretch',
layout=widgets.Layout(width='auto', height='auto')
)
display(learning_progress)
stats = EpisodeStats(
episode_results=np.empty(num_episodes, dtype=str),
episode_steps=np.empty(num_episodes),
episode_rewards=np.empty(num_episodes)
)
for i_episode in range(num_episodes):
env.reset()
total_rewards = 0.0
state = env.state()
actions = env.actions()
for t in count():
num_actions = len(actions)
if state not in self.memory:
self.memory[state] = 0.0, np.array([1 / num_actions] * num_actions)
if num_actions == 1:
action = 0
else:
num_qubits = math.ceil(math.log(num_actions, 2))
amplitudes = np.sqrt(self.memory[state][1])
if len(amplitudes) != 2**num_qubits:
amplitudes = np.append(amplitudes, [0] * (2**num_qubits - len(amplitudes)))
U = Initialize(amplitudes).gates_to_uncompute().inverse().copy(name='A')
qreg = QuantumRegister(num_qubits)
circ = QuantumCircuit(qreg)
circ.append(U.to_instruction(), qreg)
# amplitudes = np.array([
# 1 / math.sqrt(num_actions) * complex(1, 0) if i < num_actions else 0 for i in range(2**num_qubits)
# ])
# U = Initialize(amplitudes).gates_to_uncompute().inverse().copy(name='A')
# qreg = QuantumRegister(num_qubits)
# circ = QuantumCircuit(qreg)
# circ.append(U.to_instruction(), qreg)
# if self.memory[state][1] is not None:
# a, L = self.memory[state][1]
# angle = math.asin(math.sqrt(1 / num_actions))
# max_L = math.floor(math.pi / (4 * angle) - 0.5)
# L = L if L <= max_L else max_L
# grover = GroverOperator(
# oracle=Statevector.from_label(np.binary_repr(a, width=num_qubits)),
# state_preparation=U
# ).repeat(L)
# circ.append(grover.to_instruction(), qreg)
circ.measure_all()
result = execute(circ, backend=self.backend, shots=1).result()
counts = result.get_counts(circ)
action = int(max(counts, key=counts.get), 2)
next_state, next_actions, reward = env.step(actions[action])
# Update state value estimate
old_v, prob = self.memory[state]
if next_state in self.memory:
next_v = self.memory[next_state][0]
else:
next_v = 0.0
new_v = old_v + self.alpha * (reward + self.gamma * next_v - old_v)
L = self.k * (reward + next_v)
max_L = math.floor(math.pi / (4 * math.asin(math.sqrt(prob[action]))))
L = L if L <= max_L else max_L
for i in range(int(L)):
amp_ratio = R_amplify(prob[action], math.pi, math.pi)
deamp_ratio = R_deamplify(prob[action], math.pi, math.pi)
prob = np.array([p * amp_ratio if i == action else p * deamp_ratio for i,p in enumerate(prob)])
self.memory[state] = new_v, prob
# self.memory[state] = new_v, (action, int(self.k * (reward + next_v)))
total_rewards += reward
if env.is_over:
stats.episode_results[i_episode] = env.winner
stats.episode_steps[i_episode] = t + 1
stats.episode_rewards[i_episode] = total_rewards
break
state, actions = next_state, next_actions
learning_progress.description = str(i_episode + 1)
learning_progress.value += 1
return stats
def compute_angles(self, prob, reward):
estimations = {
0.1: ([5.17294298, 1.11024233], [3.14159265, 3.14159265], [4.6010479674466795, 1.6821373425388433]),
0.2: ([5.09678577, 1.18639955], [3.14159265, 3.14159265], [4.459708731543855, 1.823476588238218]),
0.3: ([5.00214072, 1.28104463], [2.30052385, 2.30052382], [3.973121599843598, 1.669354158691014]),
0.4: ([4.87983706, 1.40334824], [1.82347659, 1.82347658], [3.542721084843427, 1.5871486685387224]),
0.5: ([4.71238898, 1.57079633], [1.57079631, 1.57079632], [3.1415926535897936, 1.5707963234699533]),
0.6: ([4.45970872, 1.82347658], [1.40334827, 1.40334823], [2.7404642285137673, 1.5871486527193748]),
0.7: ([3.98266133, 2.30052398], [1.28104463, 1.28104454], [2.3100636806158654, 1.6693541946745145]),
0.8: ([3.14159267, 3.14159265], [1.18639965, 1.18639955], [1.8234765877090218, 1.823476576164929]),
0.9: ([3.14159268, 3.14159265], [1.11024236, 1.11024248], [1.6821373510225015, 1.6821373312492471])
}
max_p, min_p, b = None, None, None
for p, v in estimations.items():
if prob <= p:
max_p, min_p, b = v
break
if max_p is None and min_p is None and b is None:
return 0.0, 0.0
# Slope & magnitude
slope = (max_p[1] - min_p[1]) / (max_p[0] - min_p[0])
angle = np.arctan(slope)
magnitude = math.sqrt((max_p[0] - min_p[0])**2 + (max_p[1] - min_p[1])**2)
# Ascending or descending, depending if min->max is left->right or right->left
asc = 1 if max_p[0] > min_p[0] else -1
distance_min = math.sqrt((b[0] - min_p[0])**2 + (b[1] - min_p[1])**2)
distance_max = math.sqrt((b[0] - max_p[0])**2 + (b[1] - max_p[1])**2)
lower_bound = -distance_min
upper_bound = distance_max
def reward_to_magnitude(reward):
Q = - upper_bound / lower_bound
growth_rate = 0.35
return lower_bound + (upper_bound - lower_bound) / (1 + Q * math.e**(-growth_rate * reward))
theta1 = asc * reward_to_magnitude(reward) * math.cos(angle) + b[0]
theta2 = asc * reward_to_magnitude(reward) * math.sin(angle) + b[1]
return theta1, theta2
# class QTDAgent(BaseAgent):
# def __init__(self, backend, alpha, gamma, k):
# super().__init__()
# self.backend = backend
# self.alpha = alpha
# self.gamma = gamma
# self.k = k
# self.memory = dict()
# def add_to_memory(self, state, actions):
# num_actions = len(actions)
# if state not in self.memory:
# self.memory[state] = np.random.uniform(0, 1), np.array([1 / num_actions for i in range(num_actions)])
# def select_action(self, state, actions):
# self.add_to_memory(state, actions)
# _, prob = self.memory[state]
# num_qubits = 1 if len(prob) == 1 else math.ceil(math.log2(len(prob)))
# if len(prob) != 2**num_qubits:
# prob = np.append(prob, [0.0] * (2**num_qubits - len(prob)))
# qreg = QuantumRegister(num_qubits)
# circ = QuantumCircuit(qreg)
# U = CircuitBuilder(self.backend).get_U(num_qubits, prob_to_angles(prob)).to_instruction()
# circ.append(U, qreg)
# circ.measure_all()
# result = execute(circ, backend=self.backend, shots=1).result()
# counts = result.get_counts(circ)
# action = max(counts, key=counts.get)
# return int(action, 2)
# def learn(self, state, action, next_state, reward, terminal=False):
# old_v, prob = self.memory[state]
# new_v = old_v
# if terminal:
# new_v += self.alpha * reward
# else:
# next_v, _ = self.memory[next_state]
# new_v += self.alpha * (reward + self.gamma * next_v - old_v)
# if prob is not None:
# epsilon = prob[action]
# R_amp = R_amplify(epsilon, math.pi, math.pi)
# R_deamp = R_deamplify(epsilon, math.pi, math.pi)
# L = self.k * reward if terminal else self.k * (reward + self.memory[next_state][0])
# angle = math.asin(math.sqrt(epsilon))
# max_L = math.floor(math.pi / (4 * angle) - 0.5)
# if L < 0.0:
# L = 0.0
# elif L >= max_L:
# L = max_L
# for i in range(int(L)):
# epsilon = prob[action]
# R_amp = R_amplify(epsilon, math.pi, math.pi)
# R_deamp = R_deamplify(epsilon, math.pi, math.pi)
# prob = np.array([round(p * R_amp, 6) if i == action else round(p * R_deamp, 6) for i,p in enumerate(prob)])
# self.memory[state] = new_v, prob
# def best_action(self, state, actions):
# if state in self.memory:
# return np.argmax(self.memory[state])
# else:
# return np.random.choice(range(len(actions))) | 2.140625 | 2 |
src/dcs/oscilloscopeRead/gw_lan.py | TenilleLori/ALICE-Project | 0 | 12761708 | <reponame>TenilleLori/ALICE-Project
# -*- coding: utf-8 -*-
"""
Module name: gw_lan
Copyright:
----------------------------------------------------------------------
gw_lan is Copyright (c) 2014 Good Will Instrument Co., Ltd All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under the terms
of the GNU Lesser General Public License as published by the Free Software Foundation;
either version 2.1 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You can receive a copy of the GNU Lesser General Public License from
http://www.gnu.org/
Note:
gw_lan uses third party software which is copyrighted by its respective copyright holder.
For details see the copyright notice of the individual package.
----------------------------------------------------------------------
Description:
gw_lan is a python Ethernet interface module used to connect and read/write data from/to DSO.
Version: 1.00
Created on JUN 28 2018
Author: <NAME>
"""
import socket
class lan:
def __init__(self, str):
if ':' in str:
host,port = str.split(':')
else:
host = str
port = 3001
self.IO = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.IO.settimeout(1) #Set timeout
try:
self.IO.connect((host,int(port)))
except socket.error as e:
print ("__init__(), socket error: %s" % e)
def write(self, str):
try:
self.IO.sendall(str.encode())
except socket.error as e:
print ("write(), socket error: %s" % e)
def read(self):
line_buf=b''
while True:
try:
# a=self.IO.recv(1)
a=self.IO.recv(2048)
except socket.error as e:
print ("read(), socket error: %s" % e)
return line_buf
line_buf += a
# if(a==b'\n'):
return line_buf
def readBytes(self, length):
str=''
try:
str=self.IO.recv(length)
except socket.error as e:
print ("readBytes(), socket error: %s" % e)
return str
def clearBuf(self):
pass
def closeIO(self):
self.IO.close()
@classmethod
def connection_test(self, str):
ip_str=str.split(':')
ip=ip_str[0].split('.')
if(ip_str[1].isdigit() and ip[0].isdigit() and ip[1].isdigit() and ip[2].isdigit() and ip[3].isdigit()):
__port = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
__port.settimeout(2) #2 Second Timeout
try:
__port.connect((ip_str[0], int(ip_str[1])))
except socket.error as e:
print ("Socket error: %s" % e)
return ''
__port.close()
return str
else:
return ''
| 2.328125 | 2 |
src/layers/FFN.py | ThanThoai/Visual-Question-Answering_Vietnamese | 10 | 12761709 | <filename>src/layers/FFN.py
import torch
from .MLP import MLP
class FFN(torch.nn.Module):
def __init__(self, __C):
super(FFN, self).__init__()
self.mlp = MLP(
in_size = __C.HIDDEN_SIZE,
hidden_size = __C.FF_SIZE,
out_size = __C.HIDDEN_SIZE,
dropout_r = __C.DROPOUT_R,
use_relu = True
)
def forward(self, x):
return self.mlp(x) | 2.9375 | 3 |
c6_1101_Bintree_map.py | Julia-Run/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python | 0 | 12761710 | # 二叉搜索树,实现映射抽象数据类型。(之前用散列实现过)
# Map() / put(key,value) /del amap[key] / get(key) /len() /in
# 二叉搜索树:对任意一个节点,比节点值小的值放在左子值,大的放在右子树,也叫做二叉搜索性
# 必须处理并创建一颗空的二叉树,因此在实现的过程中必须使用两个类,涉及两个类的耦合问题
# put函数,新来的一定被放在最后,无论大小
class TreeNode(object):
def __init__(self, key, val, lc=None, rc=None, par=None):
self.key = key
self.val = val
self.lc = lc
self.rc = rc
self.par = par
def has_lc(self):
return self.lc
def has_rc(self):
return self.rc
def has_child(self):
return self.lc or self.rc
def has_2child(self):
return self.lc and self.rc
def is_root(self):
return self.par is None
def is_lc(self):
return self.par is not None and self.par.lc is self
def is_rc(self):
return self.par is not None and self.par.rc is self
def is_leaf(self):
return self.lc is None and self.rc is None
def set_lc(self, lc):
self.lc = lc
def set_rc(self, rc):
self.lc = rc
def change_data(self, key, val, new_lc, new_rc):
self.key = key
self.val = val
self.lc = new_lc
self.rc = new_rc
if self.has_lc(): ################################################## 为什么呀下面这四句
self.lc.par = self
if self.has_rc():
self.rc.par = self
class SearchTree(object):
def __init__(self): # 初始化
self.root = None
self.size = 0
def length(self):
return self.size
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__() #################################################
def put(self, key, val):
if self.root:
self._put(key, val, self.root)
else:
aNode = TreeNode(key, val)
self.root = aNode
self.size = self.size + 1 ##########################################forgot
def _put(self, key, val, currentNode):
if key < currentNode.key:
if currentNode.has_lc():
self._put(key, val, currentNode.lc)
else:
currentNode.lc = TreeNode(key, val, par=currentNode) # #################################### par loose
elif key > currentNode.key:
if currentNode.has_rc():
self._put(key, val, currentNode.rc)
else:
currentNode.rc = TreeNode(key, val, par=currentNode) ################################struggle
else:
currentNode.change_data(key, val, currentNode.lc, currentNode.rc)
def __setitem__(self, key, val): ## ######################################## why return that?
return self.put(key, val)
def get(self, key):
if self.root:
res = self._get(key, self.root)
if res:
return res.val
else:
return None
else:
return None
def _get(self, key, current):
if current is None: ############################### 出错 if current.key is None:。没有空白,空白即是None
return None
elif key < current.key:
return self._get(key, current.lc) ######## 掉了return!!!!!!!!!!!!!!!!!! 检查了至少两个半小时。。。。。。
# 会在这一步得到return的正确值,但_get函数没有返回值!!是None,所以除了第一个,其他结果查出来都是None
elif key > current.key:
return self._get(key, current.rc) ######## 掉了return!!!!!!!!!!!!!!!!!!
else:
return current
def __getitem__(self, key):
return self.get(key)
#
# def __contains__(self, key):
# if self._get(key, self.root):
# return True
# else:
# return False
######定义一个delete函数
def successor(self, nodenow): # 右边最小值
successor = nodenow.rc
while successor.lc:
successor = successor.lc
return successor.key
def precessor(self, nodenow): # 左边最大值
precessor = nodenow.lc
while precessor.rc:
precessor = precessor.rc
return precessor.key
def delete(self, key):
# 11.01 今天未测试delete#################################################################################
# 1.是叶子,直接删除。
# 2.不是叶子,使用递归
# 1.如果有右子节点,找到successor,替代当前点的key/val,在子树中再用下一个node替代successo,直到下一个Node is None--> 循环--》 直到左右节点都是None
# 2.如果有左节点,找precessor,替代当前点的key/val,
if self.root:
pos = self._get(key, self.root)
if pos:
self.delnode(pos)
else:
print('key is not in this data!')
else:
return None
def delnode(self, pos):
if pos.isLeaf():
pos = None
else:
if pos.has_rc():
new_pos = self.successor(pos)
else:
new_pos = self.precessor(pos)
pos.key = new_pos.key
pos.val = new_pos.val
return self.delnode(new_pos)
a = SearchTree()
a.put(1, 'A')
a.put(10, 'B')
a.put(-1, '[[[C]]]')
print(a.size)
print(a.get(10))
print(a.get(1))
print(a[-1])
| 3.765625 | 4 |
349/__init__.py | sc4599/LeetCode | 0 | 12761711 | class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
max_nums = nums1 if len(nums1) > len(nums2) else nums2
min_nums = nums1 if len(nums1) < len(nums2) else nums2
r_nums = set()
for i in min_nums:
if i in max_nums:
r_nums.add(i)
return list(r_nums)
def intersection_b(self, nums1, nums2):
return list(set(nums1) & set(nums2))
| 3.640625 | 4 |
ytelapi/controllers/recording_controller.py | Ytel-Inc/YtelAPI-Python | 0 | 12761712 | <filename>ytelapi/controllers/recording_controller.py
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from .base_controller import BaseController
from ..api_helper import APIHelper
from ..configuration import Configuration
from ..http.auth.basic_auth import BasicAuth
class RecordingController(BaseController):
"""A Controller to access Endpoints in the ytelapi API."""
def create_delete_recording(self,
recordingsid):
"""Does a POST request to /recording/deleterecording.json.
Remove a recording from your Ytel account.
Args:
recordingsid (string): The unique identifier for a recording.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/recording/deleterecording.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'recordingsid': recordingsid
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_list_recordings(self,
page=None,
pagesize=None,
datecreated=None,
callsid=None):
"""Does a POST request to /recording/listrecording.json.
Retrieve a list of recording objects.
Args:
page (int, optional): The page count to retrieve from the total
results in the collection. Page indexing starts at 1.
pagesize (int, optional): The count of objects to return per
page.
datecreated (string, optional): Filter results by creation date
callsid (string, optional): The unique identifier for a call.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/recording/listrecording.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'page': page,
'pagesize': pagesize,
'Datecreated': datecreated,
'callsid': callsid
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_view_recording(self,
recordingsid):
"""Does a POST request to /recording/viewrecording.json.
Retrieve the recording of a call by its RecordingSid. This resource
will return information regarding the call such as start time, end
time, duration, and so forth.
Args:
recordingsid (string): The unique identifier for the recording.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/recording/viewrecording.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'recordingsid': recordingsid
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
| 2.671875 | 3 |
data/studio21_generated/introductory/3729/starter_code.py | vijaykumawat256/Prompt-Summarization | 0 | 12761713 | def count_zeros_n_double_fact(n):
| 1.367188 | 1 |
comment/tests/test_api/test_serializers.py | KedarisettiSreeVamsi/Comment | 0 | 12761714 | <filename>comment/tests/test_api/test_serializers.py
from unittest.mock import patch
from django.core import mail
from django.test import RequestFactory
from comment.conf import settings
from comment.models import Comment, Follower
from comment.api.serializers import get_profile_model, get_user_fields, UserSerializerDAB, CommentCreateSerializer, \
CommentSerializer
from comment.tests.test_api.test_views import APIBaseTest
class APICommentSerializers(APIBaseTest):
def setUp(self):
super().setUp()
self.parent_count = Comment.objects.filter_parents_by_object(self.post_1).count()
self.all_count = Comment.objects.all().count()
def increase_count(self, parent=False):
if parent:
self.parent_count += 1
self.all_count += 1
def comment_count_test(self):
self.assertEqual(Comment.objects.filter_parents_by_object(self.post_1).count(), self.parent_count)
self.assertEqual(Comment.objects.all().count(), self.all_count)
def test_get_profile_model(self):
# missing settings attrs
with patch.object(settings, 'PROFILE_APP_NAME', None):
profile = get_profile_model()
self.assertIsNone(profile)
# providing wrong attribute value, an exception is raised
with patch.object(settings, 'PROFILE_APP_NAME', 'wrong'):
self.assertRaises(LookupError, get_profile_model)
# attribute value is None
with patch.object(settings, 'PROFILE_APP_NAME', None):
profile = get_profile_model()
self.assertIsNone(profile)
# success
with patch.object(settings, 'PROFILE_APP_NAME', 'user_profile'):
profile = get_profile_model()
self.assertIsNotNone(profile)
def test_get_user_fields(self):
fields = get_user_fields()
self.assertEqual(fields, ('id', 'username', 'email', 'profile'))
mocked_hasattr = patch('comment.api.serializers.hasattr').start()
mocked_hasattr.return_value = True
fields = get_user_fields()
self.assertEqual(fields, ('id', 'username', 'email', 'profile', 'logentry'))
def test_user_serializer(self):
# PROFILE_MODEL_NAME not provided
with patch.object(settings, 'PROFILE_MODEL_NAME', None):
profile = UserSerializerDAB.get_profile(self.user_1)
self.assertIsNone(profile)
# PROFILE_MODEL_NAME is wrong
with patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong'):
profile = UserSerializerDAB.get_profile(self.user_1)
self.assertIsNone(profile)
# success
with patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile'):
profile = UserSerializerDAB.get_profile(self.user_1)
self.assertIsNotNone(profile)
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', False)
def test_create_parent_comment_serializer(self):
self.assertEqual(self.parent_count, 3)
self.assertEqual(self.all_count, 8)
factory = RequestFactory()
request = factory.get('/')
request.user = self.user_1
data = {
'model_obj': self.post_1,
'parent_comment': None,
'request': request
}
serializer = CommentCreateSerializer(context=data)
self.assertIsNone(serializer.fields.get('email'))
comment = serializer.create(validated_data={'content': 'test'})
self.increase_count(parent=True)
self.comment_count_test()
self.assertIsNotNone(comment)
# get parent
parent_id = serializer.get_parent(comment)
self.assertIsNone(parent_id)
# get replies
replies = serializer.get_replies(comment)
reply_count = serializer.get_reply_count(comment)
self.assertEqual(replies, [])
self.assertEqual(reply_count, 0)
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', False)
def test_create_child_comment_serializer(self):
self.assertEqual(self.parent_count, 3)
self.assertEqual(self.all_count, 8)
factory = RequestFactory()
request = factory.get('/')
request.user = self.user_1
data = {
'model_obj': self.post_1,
'request': request,
'parent_comment': self.comment_1
}
serializer = CommentCreateSerializer(context=data)
comment = serializer.create(validated_data={'content': 'test'})
self.increase_count()
self.comment_count_test()
self.assertIsNotNone(comment)
# get parent
parent_id = CommentCreateSerializer.get_parent(comment)
self.assertEqual(parent_id, data['parent_comment'].id)
replies = serializer.get_replies(self.comment_1)
reply_count = serializer.get_reply_count(self.comment_1)
self.assertIsNotNone(replies)
self.assertEqual(reply_count, 2)
replies = serializer.get_replies(self.comment_4)
reply_count = serializer.get_reply_count(self.comment_4)
self.assertEqual(replies, [])
self.assertEqual(reply_count, 0)
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', True)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', False)
def test_send_notification(self):
factory = RequestFactory()
request = factory.get('/')
request.user = self.user_1
data = {
'model_obj': self.post_1,
'request': request,
'parent_comment': self.comment_1
}
Follower.objects.follow('<EMAIL>', 'testUser', self.comment_1)
serializer = CommentCreateSerializer(context=data)
comment = serializer.create(validated_data={'content': 'test'})
self.assertTrue(serializer.email_service._email_thread.is_alive)
self.assertIsNotNone(comment)
self.assertIsNotNone(serializer.email_service._email_thread)
serializer.email_service._email_thread.join()
self.assertEqual(len(mail.outbox), 1)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', True)
def test_create_comment_serializer_for_anonymous(self):
from django.contrib.auth.models import AnonymousUser
factory = RequestFactory()
request = factory.get('/')
request.user = AnonymousUser()
data = {
'model_obj': self.post_1,
'parent_comment': None,
'request': request
}
serializer = CommentCreateSerializer(context=data)
self.assertIsNotNone(serializer.fields['email'])
comment = serializer.create(validated_data={
'content': 'anonymous posting',
'email': '<EMAIL>'
})
# no creation occurs until comment is verified
self.comment_count_test()
self.assertIsNotNone(comment)
# confirmation email is sent
self.assertIsNotNone(serializer.email_service._email_thread)
serializer.email_service._email_thread.join()
self.assertEqual(len(mail.outbox), 1)
def test_passing_context_to_serializer(self):
serializer = CommentSerializer(self.comment_1)
self.assertFalse(serializer.fields['content'].read_only)
serializer = CommentSerializer(self.comment_1, context={'reaction_update': True})
self.assertTrue(serializer.fields['content'].read_only)
serializer = CommentSerializer(self.comment_1, context={'flag_update': True})
self.assertTrue(serializer.fields['content'].read_only)
| 2.390625 | 2 |
FresnoPython/compat.py | FresnoPython/FresnoPython | 2 | 12761715 | try:
# Python 3
from urllib.parse import quote_plus
except ImportError:
# Python 2
from urllib import quote_plus
| 1.453125 | 1 |
autopandas_v2/cloud/utils.py | chyanju/autopandas | 16 | 12761716 | <filename>autopandas_v2/cloud/utils.py
import json
import os
import pandas as pd
import subprocess
import sys
import time
from io import StringIO
from autopandas_v2.utils import logger
from autopandas_v2.utils.cli import ArgNamespace
class GDriveRunner:
def __init__(self, home_dir, cmd_args: ArgNamespace):
self.home_dir = home_dir
self.cmd_args = cmd_args
self.max_gdrive_retries = cmd_args.max_gdrive_retries
self.path_cache = {
'/Data': '1hlg3OcR3uPiqJQRVPuLqJeeeB6R4ESyY',
'/Data/Raw': '1vYcDRjSSzi6oIPpvOvW6PZOMBKzt7UdG',
'/Data/Functions': '1uDf8Udvtz_F4aSpXZyouCDmIwZr2xxnW',
'/Data/Generators': '1gonGvuvyPSu5LlLWWMutmb2-fZfdJHXo',
'/Data/Raw/Pandas': '1rYbvhHqzH9FEAKRwHlJx2Sw_rrVPon8V',
'/Data/Functions/Pandas': '1T1GBdH4AOL4Gl64A5ZXFxQxsX26sq3z6',
'/Data/Generators/Pandas': '1JtF8lBIhZSzalgST1hEYlldjvs9WrCUc',
}
if not os.path.exists(home_dir + '/.gdrive/path_cache.json'):
self.save_path_cache()
else:
with open(home_dir + '/.gdrive/path_cache.json', 'r') as f:
self.path_cache.update(json.load(f))
def save_path_cache(self):
with open(self.home_dir + '/.gdrive/path_cache.json', 'w') as f:
json.dump(self.path_cache, f)
def get_id(self, path):
if path in self.path_cache:
return self.path_cache[path]
basename = os.path.basename(path)
cmd = "{home_dir}/gdrive list --name-width 0 " \
"--absolute --query \"trashed = false and name contains '{path}'\"".format(home_dir=self.home_dir,
path=basename)
listing = self.get_output(cmd)
listing = pd.read_csv(StringIO(listing), delimiter=r'\s\s+')
for g_id, g_name in zip(listing.Id, listing.Name):
if g_name.endswith(path):
self.path_cache[path] = g_id
self.save_path_cache()
return g_id
raise Exception("Could not find path {path}".format(path=path))
def get_output(self, cmd: str):
attempts = 0
sleep_time = 5
max_sleep_time = 20
while True:
attempts += 1
try:
out = subprocess.check_output(cmd, shell=True)
return out.decode("utf-8")
except subprocess.CalledProcessError as e:
e.output = str(e.output)
if 'rateLimitExceeded' in e.output and attempts <= self.max_gdrive_retries:
logger.info("Rate Limit Exceeded. Waiting {sleep} seconds...".format(sleep=sleep_time))
time.sleep(sleep_time)
sleep_time = min(sleep_time + 5, max_sleep_time)
continue
logger.err("Command {cmd} failed with exit code {code} "
"and output {output}".format(cmd=cmd, code=e.returncode, output=e.output))
sys.exit(1)
def run(self, cmd: str):
attempts = 0
sleep_time = 5
max_sleep_time = 20
code = os.system(cmd)
while code != 0:
attempts += 1
if attempts <= self.max_gdrive_retries:
logger.info("Retrying after {sleep} seconds...".format(sleep=sleep_time))
time.sleep(sleep_time)
sleep_time = min(sleep_time + 5, max_sleep_time)
code = os.system(cmd)
continue
logger.err("Command {cmd} failed with exit code {code}".format(cmd=cmd, code=code))
sys.exit(1)
| 2.125 | 2 |
partners/views/partner_views.py | pyladiesghana/PyLadies-Website | 2 | 12761717 | # Django imports
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
# Partners app imports
from partners.models.partner_models import Partner
def partner_list(request):
"""
Display a list of partners and their details
"""
partners_list = Partner.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(partners_list, 10)
try:
partners = paginator.page(page)
except PageNotAnInteger:
partners = paginator.page(1)
except EmptyPage:
partners = paginator.page(paginator.num_pages)
template_name = "partners/partners_list.html"
context = {"partners": partners}
return render(request, template_name, context)
| 2.140625 | 2 |
nuitka/tree/Operations.py | sthagen/Nuitka-Nuitka | 0 | 12761718 | # Copyright 2022, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Operations on the tree.
This is mostly for the different kinds of visits that the node tree can have.
You can visit a scope, a tree (module), or every scope of a tree (module).
"""
from nuitka.containers.oset import OrderedSet
from nuitka.Tracing import general
def visitTree(tree, visitor):
visitor.onEnterNode(tree)
for visitable in tree.getVisitableNodes():
if visitable is None:
raise AssertionError("'None' child encountered", tree, tree.source_ref)
visitTree(visitable, visitor)
visitor.onLeaveNode(tree)
def visitFunction(function, visitor):
visitor.onEnterNode(function)
visitor.onLeaveNode(function)
def visitModule(module, visitor):
visitor.onEnterNode(module)
visitor.onLeaveNode(module)
class VisitorNoopMixin(object):
def onEnterNode(self, node):
"""Overloaded for operation before the node children were done."""
def onLeaveNode(self, node):
"""Overloaded for operation after the node children were done."""
class DetectUsedModules(VisitorNoopMixin):
def __init__(self):
self.used_modules = OrderedSet()
def onEnterNode(self, node):
try:
self._onEnterNode(node)
except Exception:
general.my_print(
"Problem with %r at %s"
% (node, node.getSourceReference().getAsString())
)
raise
def _onEnterNode(self, node):
if node.isExpressionBuiltinImport():
for (
used_module_name,
used_module_filename,
finding,
level,
) in node.getUsedModules():
self.used_modules.add(
(
used_module_name,
used_module_filename,
finding,
level,
node.source_ref,
)
)
elif (
node.isExpressionImportModuleHard()
or node.isExpressionImportModuleNameHard()
or node.isExpressionImportModuleFixed()
):
used_module_name, used_module_filename, finding = node.getUsedModule()
self.used_modules.add(
(used_module_name, used_module_filename, finding, 0, node.source_ref)
)
def getUsedModules(self):
return self.used_modules
| 2.109375 | 2 |
mixify_django/playlists/views.py | sethmenghi/mixify | 1 | 12761719 | <filename>mixify_django/playlists/views.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.shortcuts import redirect
from braces.views import LoginRequiredMixin
# from .models import Playlist
from ..users.models import User
from .models import Playlist
class PlaylistListAllView(LoginRequiredMixin, ListView):
model = Playlist
template_name = 'playlists/playlist_all.html'
slug_field = "slug"
slug_url_kwarg = "slug"
class PlaylistListView(LoginRequiredMixin, ListView):
model = Playlist
# These next two lines tell the view to index lookups by username
slug_field = "slug"
slug_url_kwarg = "slug"
class PlaylistDetailView(LoginRequiredMixin, DetailView):
model = Playlist
# These next two lines tell the view to index lookups by username
slug_field = "slug"
slug_url_kwarg = "slug"
class PlaylistRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("playlists:list")
class PlaylistUpdateView(LoginRequiredMixin, UpdateView):
model = Playlist
# These next two lines tell the view to index lookups by username
def get_success_url(self):
playlist = Playlist.objects.get(id=self.kwargs['id']).first()
return reverse("playlists:detail",
kwargs={"slug": playlist.slug})
def get_owner(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
def get_object(self):
return Playlist.objects.get(id=self.kwargs['id'],
owner=self.get_owner())
def load_playlists(request):
request.user.load_playlists()
return redirect("playlists:list")
| 2.328125 | 2 |
mezzanine/core/apps.py | arundalal/mezzanine-blog | 3 | 12761720 | from __future__ import unicode_literals
from django import VERSION as DJANGO_VERSION
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'mezzanine.core'
def ready(self):
from . import checks # noqa
if DJANGO_VERSION < (1, 9):
# add_to_builtins was removed in 1.9 and replaced with a
# documented public API configured by the TEMPLATES setting.
from django.template.base import add_to_builtins
add_to_builtins("mezzanine.template.loader_tags")
| 1.75 | 2 |
spacy/lang/nb/morph_rules.py | algteam/spacy_zh_model | 5 | 12761721 | <gh_stars>1-10
# encoding: utf8
from __future__ import unicode_literals
from ...symbols import LEMMA, PRON_LEMMA
"""
This dict includes all the PRON and DET tag combinations found in the
dataset developed by Schibsted, Nasjonalbiblioteket and LTG (to be published
autumn 2018) and the rarely used polite form.
"""
MORPH_RULES = {
"PRON__Animacy=Anim|Case=Nom|Number=Sing|Person=1|PronType=Prs": {
"jeg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Nom|Number=Sing|Person=2|PronType=Prs": {
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom"},
#polite form, not sure about the tag
"De": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom", "Polite": "Form"}
},
"PRON__Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing|Person=3|PronType=Prs": {
"hun": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Nom|Gender=Masc|Number=Sing|Person=3|PronType=Prs": {
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"}
},
"PRON__Gender=Neut|Number=Sing|Person=3|PronType=Prs": {
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"alt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"intet": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"}
},
"PRON__Gender=Fem,Masc|Number=Sing|Person=3|PronType=Prs": {
"den": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": ("Fem", "Masc")}
},
"PRON__Animacy=Anim|Case=Nom|Number=Plur|Person=1|PronType=Prs": {
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Nom|Number=Plur|Person=2|PronType=Prs": {
"dere": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Nom"}
},
"PRON__Case=Nom|Number=Plur|Person=3|PronType=Prs": {
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Sing|Person=1|PronType=Prs": {
"meg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Sing|Person=2|PronType=Prs": {
"deg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Acc"},
#polite form, not sure about the tag
"Dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Acc", "Polite": "Form"}
},
"PRON__Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing|Person=3|PronType=Prs": {
"henne": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Gender=Masc|Number=Sing|Person=3|PronType=Prs": {
"ham": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Plur|Person=1|PronType=Prs": {
"oss": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Plur|Person=2|PronType=Prs": {
"dere": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Acc"}
},
"PRON__Case=Acc|Number=Plur|Person=3|PronType=Prs": {
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"}
},
"PRON__Case=Acc|Reflex=Yes": {
"seg": {LEMMA: PRON_LEMMA, "Person": "Three", "Number": "Sing", "Reflex": "Yes"},
"seg": {LEMMA: PRON_LEMMA, "Person": "Three", "Number": "Plur", "Reflex": "Yes"}
},
"PRON__Animacy=Anim|Case=Nom|Number=Sing|PronType=Prs": {
"man": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Case": "Nom"}
},
"DET__Gender=Masc|Number=Sing|Poss=Yes": {
"min": {LEMMA: "min", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"din": {LEMMA: "din", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"sin": {LEMMA: "sin", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc", "Reflex":"Yes"},
"vår": {LEMMA: "vår", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender":"Masc"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Masc"},
#polite form, not sure about the tag
"Deres": {LEMMA: "Deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Masc", "Polite": "Form"}
},
"DET__Gender=Fem|Number=Sing|Poss=Yes": {
"mi": {LEMMA: "min", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"di": {LEMMA: "din", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"si": {LEMMA: "sin", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem", "Reflex":"Yes"},
"vår": {LEMMA: "vår", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
#polite form, not sure about the tag
"Deres": {LEMMA: "Deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Fem", "Polite": "Form"}
},
"DET__Gender=Neut|Number=Sing|Poss=Yes": {
"mitt": {LEMMA: "min", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"ditt": {LEMMA: "din", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"sitt": {LEMMA: "sin", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut", "Reflex":"Yes"},
"vårt": {LEMMA: "vår", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
#polite form, not sure about the tag
"Deres": {LEMMA: "Deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Neut", "Polite": "Form"}
},
"DET__Number=Plur|Poss=Yes": {
"mine": {LEMMA: "min", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"dine": {LEMMA: "din", "Person": "Two", "Number": "Plur", "Poss": "Yes"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Plur", "Poss": "Yes"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Plur", "Poss": "Yes"},
"sine": {LEMMA: "sin", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex":"Yes"},
"våre": {LEMMA: "vår", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Plur", "Poss": "Yes"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Plur", "Poss": "Yes"}
},
"PRON__Animacy=Anim|Number=Plur|PronType=Rcp": {
"hverandre": {LEMMA: PRON_LEMMA, "PronType": "Rcp", "Number": "Plur"}
},
"DET__Number=Plur|Poss=Yes|PronType=Rcp": {
"hverandres": {LEMMA: "hverandres", "PronType": "Rcp", "Number": "Plur", "Poss": "Yes"}
},
"PRON___": {
"som": {LEMMA: PRON_LEMMA},
"ikkenoe": {LEMMA: PRON_LEMMA}
},
"PRON__PronType=Int": {
"hva": {LEMMA: PRON_LEMMA, "PronType": "Int"}
},
"PRON__Animacy=Anim|PronType=Int": {
"hvem": {LEMMA: PRON_LEMMA, "PronType": "Int"}
},
"PRON__Animacy=Anim|Poss=Yes|PronType=Int": {
"hvis": {LEMMA:PRON_LEMMA, "PronType": "Int", "Poss": "Yes"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"noen": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Plur", "Person": "Three"}
},
"PRON__Gender=Fem,Masc|Number=Sing|Person=3|PronType=Prs": {
"noen": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": ("Fem", "Masc")},
"den": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": ("Fem", "Masc")}
},
"PRON__Gender=Neut|Number=Sing|Person=3|PronType=Prs": {
"noe": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": "Neut"},
"det": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": "Neut"}
},
"PRON__Gender=Fem,Masc|Number=Sing|Person=3|PronType=Prs": {
"ingen": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": ("Fem", "Masc"), "Polarity": "Neg"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"ingen": {LEMMA:PRON_LEMMA, "PronType":"Prs", "Number": "Plur", "Person": "Three"}
},
"PRON__Number=Sing": {
"ingenting": {LEMMA:PRON_LEMMA, "Number": "Sing"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"alle": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Plur", "Person": "Three"}
},
"PRON__Animacy=Anim|Number=Sing|PronType=Prs": {
"en": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing"}
},
"PRON__Animacy=Anim|Case=Gen,Nom|Number=Sing|PronType=Prs": {
"ens": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Case": ("Gen", "Nom")}
},
"PRON__Animacy=Anim|Case=Gen|Number=Sing|PronType=Prs": {
"ens": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Masc|Number=Sing": {
"ens": {LEMMA: "en", "Number": "Sing", "Case": "Gen"}
},
"DET__Gender=Masc|Number=Sing": {
"enhver": {LEMMA: "enhver", "Number": "Sing", "Gender": "Masc"},
"all": {LEMMA: "all", "Number": "Sing", "Gender": "Masc"},
"hver": {LEMMA: "hver", "Number": "Sing", "Gender": "Masc"}
},
"DET__Gender=Fem|Number=Sing": {
"enhver": {LEMMA: "enhver", "Number": "Sing", "Gender": "Fem"},
"all": {LEMMA: "all", "Number": "Sing", "Gender": "Fem"},
"hver": {LEMMA: "hver", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Neut|Number=Sing": {
"ethvert": {LEMMA: "enhver", "Number": "Sing", "Gender": "Neut"},
"alt": {LEMMA: "all", "Number": "Sing", "Gender": "Neut"},
"hvert": {LEMMA: "hver", "Number": "Sing", "Gender": "Neut"},
},
"DET__Gender=Masc|Number=Sing": {
"noen": {LEMMA: "noen", "Gender": "Masc", "Number": "Sing"},
"noe": {LEMMA: "noen", "Gender": "Masc", "Number": "Sing"}
},
"DET__Gender=Fem|Number=Sing": {
"noen": {LEMMA: "noen", "Gender": "Fem", "Number": "Sing"},
"noe": {LEMMA: "noen", "Gender": "Fem", "Number": "Sing"}
},
"DET__Gender=Neut|Number=Sing": {
"noe": {LEMMA: "noen", "Number": "Sing", "Gender": "Neut"}
},
"DET__Number=Plur": {
"noen": {LEMMA: "noen", "Number": "Plur"}
},
"DET__Gender=Neut|Number=Sing": {
"intet": {LEMMA: "ingen", "Gender": "Neut", "Number": "Sing"}
},
"DET__Gender=Masc|Number=Sing": {
"en": {LEMMA: "en", "Number": "Sing", "Gender": "Neut"}
},
"DET__Gender=Fem|Number=Sing": {
"ei": {LEMMA: "en", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Neut|Number=Sing": {
"et": {LEMMA: "en", "Number": "Sing", "Gender": "Neut"}
},
"DET__Gender=Neut|Number=Sing|PronType=Int": {
"hvilket": {LEMMA: "hvilken", "PronType": "Int", "Number": "Sing", "Gender": "Neut"}
},
"DET__Gender=Fem|Number=Sing|PronType=Int": {
"hvilken": {LEMMA: "hvilken", "PronType": "Int", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Masc|Number=Sing|PronType=Int": {
"hvilken": {LEMMA: "hvilken", "PronType": "Int", "Number": "Sing", "Gender": "Masc"}
},
"DET__Number=Plur|PronType=Int": {
"hvilke": {LEMMA: "hvilken", "PronType": "Int", "Number": "Plur"}
},
"DET__Number=Plur": {
"alle": {LEMMA: "all", "Number": "Plur"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"alle": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Number": "Plur", "Person": "Three"}
},
"DET__Gender=Masc|Number=Sing|PronType=Dem": {
"den": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"},
"slik": {LEMMA: "slik", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"},
"denne": {LEMMA: "denne", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"}
},
"DET__Gender=Fem|Number=Sing|PronType=Dem": {
"den": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"},
"slik": {LEMMA: "slik", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"},
"denne": {LEMMA: "denne", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Neut|Number=Sing|PronType=Dem": {
"det": {LEMMA: "det", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"},
"slikt": {LEMMA: "slik", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"},
"dette": {LEMMA: "dette", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"}
},
"DET__Number=Plur|PronType=Dem": {
"disse": {LEMMA: "disse", "PronType": "Dem", "Number": "Plur"},
"andre": {LEMMA: "annen", "PronType": "Dem", "Number": "Plur"},
"de": {LEMMA: "de", "PronType": "Dem", "Number": "Plur"},
"slike": {LEMMA: "slik", "PronType": "Dem", "Number": "Plur"}
},
"DET__Definite=Ind|Gender=Masc|Number=Sing|PronType=Dem": {
"annen": {LEMMA: "annen", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"}
},
"DET__Definite=Ind|Gender=Fem|Number=Sing|PronType=Dem": {
"annen": {LEMMA: "annen", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"}
},
"DET__Definite=Ind|Gender=Neut|Number=Sing|PronType=Dem": {
"annet": {LEMMA: "annen", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"}
},
"DET__Case=Gen|Definite=Ind|Gender=Masc|Number=Sing|PronType=Dem": {
"annens": {LEMMA: "annnen", "PronType": "Dem", "Number": "Sing", "Gender": "Masc", "Case": "Gen"}
},
"DET__Case=Gen|Number=Plur|PronType=Dem": {
"andres": {LEMMA: "annen", "PronType": "Dem", "Number": "Plur", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Fem|Number=Sing|PronType=Dem": {
"dens": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Fem", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Masc|Number=Sing|PronType=Dem": {
"hvis": {LEMMA: "hvis", "PronType": "Dem", "Number": "Sing", "Gender": "Masc", "Case": "Gen"},
"dens": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Masc", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Neut|Number=Sing|PronType=Dem": {
"dets": {LEMMA: "det", "PronType": "Dem", "Number": "Sing", "Gender": "Neut", "Case": "Gen"}
},
"DET__Case=Gen|Number=Plur": {
"alles": {LEMMA: "all", "Number": "Plur", "Case": "Gen"}
},
"DET__Definite=Def|Number=Sing|PronType=Dem": {
"andre": {LEMMA: "annen", "Number": "Sing", "PronType": "Dem"}
},
"DET__Definite=Def|PronType=Dem": {
"samme": {LEMMA: "samme", "PronType": "Dem"},
"forrige": {LEMMA: "forrige", "PronType": "Dem"},
"neste": {LEMMA: "neste", "PronType": "Dem"},
},
"DET__Definite=Def": {
"selve": {LEMMA: "selve"},
"selveste": {LEMMA: "selveste"},
},
"DET___": {
"selv": {LEMMA: "selv"},
"endel": {LEMMA: "endel"}
},
"DET__Definite=Ind|Gender=Fem|Number=Sing": {
"egen": {LEMMA: "egen", "Gender": "Fem", "Number": "Sing"}
},
"DET__Definite=Ind|Gender=Masc|Number=Sing": {
"egen": {LEMMA: "egen", "Gender": "Masc", "Number": "Sing"}
},
"DET__Definite=Ind|Gender=Neut|Number=Sing": {
"eget": {LEMMA: "egen", "Gender": "Neut", "Number": "Sing"}
},
"DET__Number=Plur": {
"egne": {LEMMA: "egen", "Number": "Plur"}
},
"DET__Gender=Masc|Number=Sing": {
"ingen": {LEMMA: "ingen", "Gender": "Masc", "Number": "Sing"}
},
"DET__Number=Plur": {
"ingen": {LEMMA: "ingen", "Number": "Plur"}
},
#same wordform and pos (verb), have to specify the exact features in order to not mix them up
"VERB__Mood=Ind|Tense=Pres|VerbForm=Fin": {
"så": {LEMMA: "så", "VerbForm": "Fin", "Tense": "Pres", "Mood": "Ind"}
},
"VERB__Mood=Ind|Tense=Past|VerbForm=Fin": {
"så": {LEMMA: "se", "VerbForm": "Fin", "Tense": "Past", "Mood": "Ind"}
}
}
#copied from the English morph_rules.py
for tag, rules in MORPH_RULES.items():
for key, attrs in dict(rules).items():
rules[key.title()] = attrs
| 2.34375 | 2 |
bigstore/backends/rackspace.py | dubois/git-bigstore | 0 | 12761722 | try:
import cloudfiles
except ImportError:
pass
class RackspaceBackend(object):
def __init__(self, username, api_key, container_name):
self.username = username
self.api_key = api_key
self.conn = cloudfiles.Connection(username=username, api_key=api_key)
self.container = cloudfiles.Container(self.conn, name=container_name)
@property
def name(self):
return "cloudfiles"
def key(self, hash):
return cloudfiles.Object(container=self.container, name="{}/{}".format(hash[:2], hash[2:]))
def push(self, file, hash, cb=None):
self.key(hash).load_from_filename(file.name, callback=cb)
def pull(self, file, hash, cb=None):
self.key(hash).save_to_filename(file.name, callback=cb)
def exists(self, hash):
return self.key(hash).etag is not None
| 2.40625 | 2 |
.dev_scripts/benchmark/gather_train_benchmark_metric.py | kevin3314/mmtracking | 2,226 | 12761723 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import mmcv
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
root_path = args.root
all_results_dict = {}
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
model_cfgs = [_ for _ in model_cfgs if 'configs' in _]
for i, config in enumerate(model_cfgs):
config = config.strip()
if len(config) == 0:
continue
config_name = osp.split(config)[-1]
config_name = osp.splitext(config_name)[0]
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config and excel
cfg = mmcv.Config.fromfile(config)
total_epochs = cfg.total_epochs
# the first metric will be used to find the best ckpt
has_final_ckpt = True
if 'vid' in config:
eval_metrics = ['bbox_mAP_50']
elif 'mot' in config:
eval_metrics = ['MOTA', 'IDF1']
# tracktor and deepsort don't have ckpt.
has_final_ckpt = False
elif 'sot' in config:
eval_metrics = ['success', 'norm_precision', 'precision']
else:
raise NotImplementedError(
f'Not supported config: {config}')
if args.excel:
xlrw = copy(readbook)
if 'vid' in config:
sheet = readbook.sheet_by_name('vid')
table = xlrw.get_sheet('vid')
elif 'mot' in config:
sheet = readbook.sheet_by_name('mot')
table = xlrw.get_sheet('mot')
elif 'sot' in config:
sheet = readbook.sheet_by_name('sot')
table = xlrw.get_sheet('sot')
sheet_info = {}
for i in range(6, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
# 2 determine whether total_epochs ckpt exists
ckpt_path = f'epoch_{total_epochs}.pth'
if osp.exists(osp.join(result_path, ckpt_path)) or \
not has_final_ckpt:
log_json_path = list(
sorted(glob.glob(osp.join(result_path,
'*.log.json'))))[-1]
# 3 read metric
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if log_line['mode'] == 'val' or \
log_line['mode'] == 'test':
result_dict[f"epoch_{log_line['epoch']}"] = {
key: log_line[key]
for key in eval_metrics if key in log_line
}
# 4 find the best ckpt
best_epoch_results = dict()
for epoch in result_dict:
if len(best_epoch_results) == 0:
best_epoch_results = result_dict[epoch]
else:
if best_epoch_results[eval_metrics[
0]] < result_dict[epoch][eval_metrics[0]]:
best_epoch_results = result_dict[epoch]
for metric in best_epoch_results:
if 'success' in best_epoch_results:
performance = round(best_epoch_results[metric],
1)
else:
performance = round(
best_epoch_results[metric] * 100, 1)
best_epoch_results[metric] = performance
all_results_dict[config] = best_epoch_results
# update and append excel content
if args.excel:
performance = ''
for metric in best_epoch_results:
performance += f'{best_epoch_results[metric]}/'
row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, performance)
else:
table.write(sheet.nrows, 0, config)
table.write(sheet.nrows, args.ncol, performance)
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
readbook = xlrd.open_workbook(f'{filename}_o{sufflx}')
else:
print(f'{config} not exist: {ckpt_path}')
else:
print(f'not exist: {config}')
# 4 save or print results
print('===================================')
for config_name, metrics in all_results_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
print(f'>>> Output {filename}_o{sufflx}')
| 2.421875 | 2 |
custom_components/keymaster/helpers.py | ccsliinc/keymaster | 0 | 12761724 | <filename>custom_components/keymaster/helpers.py
"""Helpers for keymaster."""
from datetime import timedelta
import logging
import os
from typing import Dict, List, Optional, Union
from openzwavemqtt.const import ATTR_CODE_SLOT
from homeassistant.components.input_boolean import DOMAIN as IN_BOOL_DOMAIN
from homeassistant.components.input_datetime import DOMAIN as IN_DT_DOMAIN
from homeassistant.components.input_number import DOMAIN as IN_NUM_DOMAIN
from homeassistant.components.input_select import DOMAIN as IN_SELECT_DOMAIN
from homeassistant.components.input_text import DOMAIN as IN_TXT_DOMAIN
from homeassistant.components.ozw import DOMAIN as OZW_DOMAIN
from homeassistant.components.timer import DOMAIN as TIMER_DOMAIN
from homeassistant.components.zwave.const import DATA_ZWAVE_CONFIG
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_STATE, STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import HomeAssistant, State
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.util import dt
from homeassistant.util.yaml.loader import load_yaml
from .const import (
ACCESS_CONTROL,
ACTION_MAP,
ALARM_TYPE,
ATTR_ACTION_CODE,
ATTR_ACTION_TEXT,
ATTR_CODE_SLOT_NAME,
ATTR_NAME,
ATTR_NODE_ID,
CONF_PATH,
DOMAIN,
EVENT_KEYMASTER_LOCK_STATE_CHANGED,
LOCK_STATE_MAP,
PRIMARY_LOCK,
)
from .lock import KeymasterLock
_LOGGER = logging.getLogger(__name__)
def using_ozw(hass: HomeAssistant) -> bool:
"""Returns whether the ozw integration is configured."""
return OZW_DOMAIN in hass.data
def using_zwave(hass: HomeAssistant) -> bool:
"""Returns whether the zwave integration is configured."""
return DATA_ZWAVE_CONFIG in hass.data
def get_node_id(hass: HomeAssistant, entity_id: str) -> Optional[str]:
"""Get node ID from entity."""
state = hass.states.get(entity_id)
if state:
return state.attributes[ATTR_NODE_ID]
return None
def output_to_file_from_template(
input_path: str,
input_filename: str,
output_path: str,
output_filename: str,
replacements_dict: Dict[str, str],
write_mode: str,
) -> None:
"""Generate file output from input templates while replacing string references."""
_LOGGER.debug("Starting generation of %s from %s", output_filename, input_filename)
with open(os.path.join(input_path, input_filename), "r") as infile, open(
os.path.join(output_path, output_filename), write_mode
) as outfile:
for line in infile:
for src, target in replacements_dict.items():
line = line.replace(src, target)
outfile.write(line)
_LOGGER.debug("Completed generation of %s from %s", output_filename, input_filename)
def _get_entities_to_remove(
lock_name: str,
file_path: str,
code_slots_to_remove: Union[List[int], range],
remove_common_file: bool,
) -> List[str]:
"""Gets list of entities to remove."""
output_path = os.path.join(file_path, lock_name)
filenames = [f"{lock_name}_keymaster_{x}.yaml" for x in code_slots_to_remove]
if remove_common_file:
filenames.append(f"{lock_name}_keymaster_common.yaml")
entities = []
for filename in filenames:
file_dict = load_yaml(os.path.join(output_path, filename))
# get all entities from all helper domains that exist in package files
for domain in (
IN_BOOL_DOMAIN,
IN_DT_DOMAIN,
IN_NUM_DOMAIN,
IN_SELECT_DOMAIN,
IN_TXT_DOMAIN,
TIMER_DOMAIN,
):
entities.extend(
[f"{domain}.{ent_id}" for ent_id in file_dict.get(domain, {})]
)
return entities
async def remove_generated_entities(
hass: HomeAssistant,
config_entry: ConfigEntry,
code_slots_to_remove: Union[List[int], range],
remove_common_file: bool,
) -> List[str]:
"""Remove entities and return removed list."""
ent_reg = await async_get_registry(hass)
lock: KeymasterLock = hass.data[DOMAIN][config_entry.entry_id][PRIMARY_LOCK]
entities_to_remove = await hass.async_add_executor_job(
_get_entities_to_remove,
lock.lock_name,
os.path.join(hass.config.path(), config_entry.data[CONF_PATH]),
code_slots_to_remove,
remove_common_file,
)
for entity_id in entities_to_remove:
if ent_reg.async_get(entity_id):
ent_reg.async_remove(entity_id)
return entities_to_remove
def delete_lock_and_base_folder(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Delete packages folder for lock and base keymaster folder if empty."""
base_path = os.path.join(hass.config.path(), config_entry.data[CONF_PATH])
lock: KeymasterLock = hass.data[DOMAIN][config_entry.entry_id][PRIMARY_LOCK]
delete_folder(base_path, lock.lock_name)
if not os.listdir(base_path):
os.rmdir(base_path)
def delete_folder(absolute_path: str, *relative_paths: str) -> None:
"""Recursively delete folder and all children files and folders (depth first)."""
path = os.path.join(absolute_path, *relative_paths)
if os.path.isfile(path):
os.remove(path)
else:
for file_or_dir in os.listdir(path):
delete_folder(path, file_or_dir)
os.rmdir(path)
def handle_state_change(
hass: HomeAssistant,
config_entry: ConfigEntry,
changed_entity: str,
old_state: State,
new_state: State,
) -> None:
"""Listener to track state changes to lock entities."""
primary_lock: KeymasterLock = hass.data[DOMAIN][config_entry.entry_id][PRIMARY_LOCK]
# If listener was called for entity that is not for this entry,
# or lock state is coming from or going to a weird state, ignore
if (
changed_entity != primary_lock.lock_entity_id
or new_state is None
or new_state.state not in (STATE_LOCKED, STATE_UNLOCKED)
or old_state.state not in (STATE_LOCKED, STATE_UNLOCKED)
):
return
# Determine action type to set appropriate action text using ACTION_MAP
action_type = ""
if ALARM_TYPE in primary_lock.alarm_type_or_access_control_entity_id:
action_type = ALARM_TYPE
if ACCESS_CONTROL in primary_lock.alarm_type_or_access_control_entity_id:
action_type = ACCESS_CONTROL
# Get alarm_level/usercode and alarm_type/access_control states
alarm_level_state = hass.states.get(primary_lock.alarm_level_or_user_code_entity_id)
alarm_level_value = int(alarm_level_state.state) if alarm_level_state else None
alarm_type_state = hass.states.get(
primary_lock.alarm_type_or_access_control_entity_id
)
alarm_type_value = int(alarm_type_state.state) if alarm_type_state else None
# If lock has changed state but alarm_type/access_control state hasn't changed in a while
# set action_value to RF lock/unlock
if (
alarm_level_state is not None
and int(alarm_level_state.state) == 0
and dt.utcnow() - dt.as_utc(alarm_type_state.last_changed)
> timedelta(seconds=5)
and action_type in LOCK_STATE_MAP
):
alarm_type_value = LOCK_STATE_MAP[action_type][new_state.state]
# Lookup action text based on alarm type value
action_text = (
ACTION_MAP.get(action_type, {}).get(
alarm_type_value, "Unknown Alarm Type Value"
)
if alarm_type_value is not None
else None
)
# Lookup name for usercode
code_slot_name_state = hass.states.get(
f"input_text.{primary_lock.lock_name}_name_{alarm_level_value}"
)
code_slot_name = (
code_slot_name_state.state if code_slot_name_state is not None else None
)
# Get lock state to provide as part of event data
lock_state = hass.states.get(primary_lock.lock_entity_id)
# Fire state change event
hass.bus.async_fire(
EVENT_KEYMASTER_LOCK_STATE_CHANGED,
event_data={
ATTR_NAME: primary_lock.lock_name,
ATTR_STATE: lock_state.state if lock_state else None,
ATTR_ACTION_CODE: alarm_type_value,
ATTR_ACTION_TEXT: action_text,
ATTR_CODE_SLOT: alarm_level_value,
ATTR_CODE_SLOT_NAME: code_slot_name,
},
)
| 1.8125 | 2 |
eda5/nadzornaplosca/migrations/0001_initial.py | vasjapavlovic/eda5 | 0 | 12761725 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='NadzornaEnota',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('oznaka', models.CharField(max_length=50)),
('naziv', models.CharField(max_length=255)),
('ip_naslov', models.CharField(max_length=255)),
('opis', models.TextField()),
],
options={
'verbose_name_plural': 'nadzorne enote',
'verbose_name': 'nadzorna enota',
'ordering': ['oznaka'],
},
),
migrations.CreateModel(
name='NadzorniSistem',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('oznaka', models.CharField(max_length=50)),
('naziv', models.CharField(max_length=255)),
],
options={
'verbose_name_plural': 'nadzorni sistemi',
'verbose_name': 'nadzorni sistem',
},
),
migrations.AddField(
model_name='nadzornaenota',
name='nadzorni_sistem',
field=models.ForeignKey(to='nadzornaplosca.NadzorniSistem'),
),
]
| 1.773438 | 2 |
helpers/pagination.py | oliver-ni/helper-bot | 37 | 12761726 | import asyncio
import discord
from discord.ext import commands, menus
class AsyncEmbedCodeBlockTablePageSource(menus.AsyncIteratorPageSource):
def __init__(
self,
data,
title=None,
count=None,
show_index=False,
format_embed=lambda x: None,
format_item=str,
):
super().__init__(data, per_page=20)
self.title = title
self.show_index = show_index
self.format_embed = format_embed
self.format_item = format_item
self.count = count
def justify(self, s, width):
if s.isdigit():
return s.rjust(width)
else:
return s.ljust(width)
async def format_page(self, menu, entries):
start = menu.current_page * self.per_page
table = [
(f"{i+1}.", *self.format_item(x)) if self.show_index else self.format_item(x)
for i, x in enumerate(entries, start=menu.current_page * self.per_page)
]
col_lens = [max(len(x) for x in col) for col in zip(*table)]
lines = [
" ".join(self.justify(x, col_lens[i]) for i, x in enumerate(line)).rstrip()
for line in table
]
embed = discord.Embed(
title=self.title,
color=discord.Color.blurple(),
description="```" + f"\n".join(lines) + "```",
)
self.format_embed(embed)
footer = f"Showing entries {start + 1}–{start + len(lines)}"
if self.count is not None:
footer += f" out of {self.count}"
embed.set_footer(text=footer)
return embed
class EmbedListPageSource(menus.ListPageSource):
def __init__(self, data, title=None, show_index=False, format_item=str):
super().__init__(data, per_page=20)
self.title = title
self.show_index = show_index
self.format_item = format_item
async def format_page(self, menu, entries):
lines = (
f"{i+1}. {self.format_item(x)}" if self.show_index else self.format_item(x)
for i, x in enumerate(entries, start=menu.current_page * self.per_page)
)
return discord.Embed(
title=self.title,
color=discord.Color.blurple(),
description=f"\n".join(lines),
)
class AsyncEmbedListPageSource(menus.AsyncIteratorPageSource):
def __init__(self, data, title=None, count=None, show_index=False, format_item=str):
super().__init__(data, per_page=20)
self.title = title or discord.Embed.Empty
self.show_index = show_index
self.format_item = format_item
self.count = count
async def format_page(self, menu, entries):
start = menu.current_page * self.per_page
lines = [
f"{i+1}. {self.format_item(x)}" if self.show_index else self.format_item(x)
for i, x in enumerate(entries, start=start)
]
embed = discord.Embed(
title=self.title,
color=discord.Color.blurple(),
description=f"\n".join(lines),
)
footer = f"Showing entries {start + 1}–{start + len(lines) + 1}"
if self.count is not None:
footer += f" out of {self.count}"
embed.set_footer(text=footer)
return embed
class AsyncEmbedFieldsPageSource(menus.AsyncIteratorPageSource):
def __init__(self, data, title=None, count=None, format_item=lambda i, x: (i, x)):
super().__init__(data, per_page=5)
self.title = title
self.format_item = format_item
self.count = count
async def format_page(self, menu, entries):
embed = discord.Embed(
title=self.title,
color=discord.Color.blurple(),
)
start = menu.current_page * self.per_page
for i, x in enumerate(entries, start=start):
embed.add_field(**self.format_item(i, x))
footer = f"Showing entries {start+1}–{i+1}"
if self.count is not None:
footer += f" out of {self.count}"
embed.set_footer(text=footer)
return embed
class Paginator:
def __init__(self, get_page, num_pages):
self.num_pages = num_pages
self.get_page = get_page
async def send(self, ctx: commands.Context, pidx: int = 0):
embed = await self.get_page(pidx)
message = await ctx.send(embed=embed)
if self.num_pages <= 1:
return
await message.add_reaction("⏮️")
await message.add_reaction("◀")
await message.add_reaction("▶")
await message.add_reaction("⏭️")
await message.add_reaction("🔢")
await message.add_reaction("⏹")
try:
while True:
reaction, user = await ctx.bot.wait_for(
"reaction_add",
check=lambda r, u: r.message.id == message.id and u.id == ctx.author.id,
timeout=120,
)
try:
await reaction.remove(user)
except:
pass
if reaction.emoji == "⏹":
await message.delete()
return
elif reaction.emoji == "🔢":
ask_message = await ctx.send("What page would you like to go to?")
message = await ctx.bot.wait_for(
"message",
check=lambda m: m.author == ctx.author and m.channel == ctx.channel,
timeout=30,
)
try:
pidx = (int(message.content) - 1) % self.num_pages
except ValueError:
await ctx.send("That's not a valid page number!")
continue
ctx.bot.loop.create_task(ask_message.delete())
ctx.bot.loop.create_task(message.delete())
else:
pidx = {
"⏮️": 0,
"◀": pidx - 1,
"▶": pidx + 1,
"⏭️": self.num_pages - 1,
}[reaction.emoji] % self.num_pages
embed = await self.get_page(pidx)
await message.edit(embed=embed)
except asyncio.TimeoutError:
await message.add_reaction("❌")
| 2.65625 | 3 |
Simulations/Drive_Amp_(Passive_Model).py | jatinchowdhury18/Aphex_Exciter | 7 | 12761727 | #%%
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
#%%
N = 1000
n = np.arange (N)
f = 100
fs = 44100
x = (1.58 * 0.3125) * np.sin (2 * np.pi * n * f / fs)
#%%
e_s_plus = 72
e_s_minus = -72
V_cm = (e_s_plus + e_s_minus) / 2
V_dm = (e_s_plus - e_s_minus) / 2
R_p = 0 # 50000
G = (R_p + 150 + 20000) / (R_p + 150)
print (V_cm)
print (V_dm)
#%%
y = np.zeros (N)
y_1 = 0
for i in range (N):
inner = (G*x[i] - V_cm) / V_dm
sat = inner
if sat < -1:
sat = -1
elif sat >1:
sat = 1
y[i] = V_cm + V_dm * sat
#%%
plt.figure()
plt.plot (n, x)
plt.plot (n, y)
plt.axhline (66)
#%%
| 2.40625 | 2 |
agate/tableset/from_json.py | timgates42/agate | 4 | 12761728 | #!/usr/bin/env python
from collections import OrderedDict
from decimal import Decimal
from glob import glob
import json
import os
import six
from agate.table import Table
@classmethod
def from_json(cls, path, column_names=None, column_types=None, keys=None, **kwargs):
"""
Create a new :class:`TableSet` from a directory of JSON files or a
single JSON object with key value (Table key and list of row objects)
pairs for each :class:`Table`.
See :meth:`.Table.from_json` for additional details.
:param path:
Path to a directory containing JSON files or filepath/file-like
object of nested JSON file.
:param keys:
A list of keys of the top-level dictionaries for each file. If
specified, length must be equal to number of JSON files in path.
:param column_types:
See :meth:`Table.__init__`.
"""
from agate.tableset import TableSet
if isinstance(path, six.string_types) and not os.path.isdir(path) and not os.path.isfile(path):
raise IOError('Specified path doesn\'t exist.')
tables = OrderedDict()
if isinstance(path, six.string_types) and os.path.isdir(path):
filepaths = glob(os.path.join(path, '*.json'))
if keys is not None and len(keys) != len(filepaths):
raise ValueError('If specified, keys must have length equal to number of JSON files')
for i, filepath in enumerate(filepaths):
name = os.path.split(filepath)[1].strip('.json')
if keys is not None:
tables[name] = Table.from_json(filepath, keys[i], column_types=column_types, **kwargs)
else:
tables[name] = Table.from_json(filepath, column_types=column_types, **kwargs)
else:
if hasattr(path, 'read'):
js = json.load(path, object_pairs_hook=OrderedDict, parse_float=Decimal, **kwargs)
else:
with open(path, 'r') as f:
js = json.load(f, object_pairs_hook=OrderedDict, parse_float=Decimal, **kwargs)
for key, value in js.items():
tables[key] = Table.from_object(value, column_types=column_types, **kwargs)
return TableSet(tables.values(), tables.keys())
| 2.828125 | 3 |
backend/web.py | thep0y/WFCleanupTool | 6 | 12761729 | <filename>backend/web.py
from typing import List
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from backend.settings import TEMPLATES_DIR, STATIC_DIR
from backend.handlers.file_handler import delete_files
app = FastAPI()
app.mount("/static", StaticFiles(directory=STATIC_DIR), name='static')
templates = Jinja2Templates(directory=TEMPLATES_DIR)
class CleanModel(BaseModel):
work_dir: str
wx_id: str
folders: List[str]
@app.get("/", response_class=HTMLResponse)
async def index(request: Request):
return templates.TemplateResponse('index.html', {"request": request})
@app.post('/clean/')
async def clean(item: CleanModel):
try:
delete_files(item.work_dir, item.wx_id, item.folders)
return {"status": "ok"}
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
| 2.375 | 2 |
scripts/calling_threshold.py | maojanlin/gAIRRsuite | 3 | 12761730 | import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-dp', '--fn_depth_report',
help = 'input read-depth calling report'
)
args = parser.parse_args()
return args
def find_thresh(list_depth):
list_d = [pair_info[1] for pair_info in list_depth if 'TRAV8-5*01' not in pair_info[0]] # rebuild a list without TRAV8-5*01 (outlier) stuff
list_d = [list_d[0]]*3 + list_d + [list_d[-1]]*2 # padding max value and zeros
#print(list_d)
list_value = [] # the division value between two windows
for idx in range(3,len(list_d)-2):
window = (list_d[idx] + list_d[idx+1]*0.25 + list_d[idx+2]*0.1)
p_window = (list_d[idx-3]*0.1 + list_d[idx-2]*0.25 + list_d[idx-1])
list_value.append((((window+2)/(p_window+2))*((window+2)/(window+0.5)),idx))
#print(idx-3, list_d[idx], format(((window+2)/(p_window+2))*((window+2)/(window+0.5)), '.3f'))
#print(sorted(list_value))
sorted_value = sorted(list_value)
thresh_id = -1
if sorted_value[0][0]*2 < sorted_value[1][0]: # absolute winner
thresh_id = sorted_value[0][1]
else: # if there are similar candidate, use the old method
for idx in range(3,len(list_d)-2):
if list_d[idx] / list_d[idx-1] < 0.73:
thresh_id = idx
break
thresh = list_d[thresh_id] + 1
return thresh
def thresh_divide(list_depth, thresh):
total_num = 0
novel_num = 0
flag = True
p_depth = 1
for allele_name, depth in list_depth:
if depth < thresh:
if flag:
flag = False
print("------------- thresh: " + str(thresh) + " ----------------")
pass
else:
total_num += 1
if 'novel' in allele_name:
novel_num += 1
print(allele_name, depth)#, depth/p_depth, sep='\t\t')
p_depth = depth
if depth == 0:
return total_num, novel_num
return total_num, novel_num
if __name__ == '__main__':
args = parse_args()
fn_depth_report = args.fn_depth_report
f_n = open(fn_depth_report, 'r')
list_depth = [] # list_depth = [(name1,depth1), (name2,depth2), ... ]
for line in f_n:
fields = line.split()
allele_name = fields[0]
depth = float(fields[1])
list_depth.append((allele_name, depth))
if depth == 0:
break
f_n.close()
thresh = find_thresh(list_depth)
total_num, novel_num = thresh_divide(list_depth, thresh)
print("\n========= Summary ===========")
print("Total AIRRCall alleles:", total_num)
print("Novel AIRRCall alleles:", novel_num)
| 2.75 | 3 |
ocellaris_post/readers/iso_surfaces.py | TormodLandet/Ocellaris | 1 | 12761731 | # Copyright (C) 2018-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import numpy
def read_surfaces(res):
inp = res.input
res.surfaces = {}
if 'probes' not in inp:
return
for probe in inp['probes']:
if not (probe.get('enabled', True) and probe.get('type', '') == 'IsoSurface'):
continue
name = probe['name']
field_name = probe['field']
value = probe['value']
file_name_postfix = probe['file_name']
file_name = res.get_file_path(file_name_postfix)
isosurf = IsoSurfaces(name, field_name, value, file_name)
res.surfaces[name] = isosurf
class IsoSurfaces(object):
def __init__(self, name, field_name, value, file_name):
self.name = name
self.field_name = field_name
self.value = value
self.file_name = file_name
self._cache = None
def reload(self):
self._cache = None
def get_surfaces(self, cache=True):
if cache and self._cache is not None:
return self._cache
timesteps = []
data = []
with open(self.file_name, 'rt') as f:
description = f.readline()[1:].strip()
value = float(f.readline().split()[-1])
dim = int(f.readline().split()[-1])
line = f.readline()
while line:
wds = line.split()
try:
time = float(wds[1])
nsurf = int(wds[3])
except Exception:
break
if nsurf == 0:
timesteps.append(time)
data.append([])
line = f.readline()
continue
datalines = [f.readline() for _ in range(nsurf * 3)]
if not datalines[-1]:
break
timesteps.append(time)
data.append([])
for i in range(nsurf):
xvals = [float(v) for v in datalines[i * 3 + 0].split()]
yvals = [float(v) for v in datalines[i * 3 + 1].split()]
zvals = [float(v) for v in datalines[i * 3 + 2].split()]
data[-1].append((xvals, yvals, zvals))
line = f.readline()
res = (description, value, dim, numpy.array(timesteps), data)
if cache:
self._cache = res
return res
| 2.515625 | 3 |
gravity/migrations/0003_tiltbridge_mdns_id.py | fossabot/fermentrack | 114 | 12761732 | <reponame>fossabot/fermentrack
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-03-18 23:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_tilt_refactor'),
]
operations = [
# Converting from AlterField to RemoveField/AddField because of issues with Django 2.0+ migration:
# https://docs.djangoproject.com/en/3.0/releases/2.0/#foreign-key-constraints-are-now-enabled-on-sqlite
migrations.RemoveField(
model_name='tiltbridge',
name='api_key',
),
migrations.AddField(
model_name='tiltbridge',
name='mdns_id',
field=models.CharField(help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'", max_length=64, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9]+$')]),
),
migrations.AlterField(
model_name='tiltbridge',
name='mdns_id',
field=models.CharField(default='tiltbridge', help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'", max_length=64, primary_key=True, serialize=False),
preserve_default=False,
),
]
| 1.859375 | 2 |
tiaApp/models.py | cxcarvaj/DjangoServer | 0 | 12761733 | from django.db import models
class DepartmentsF(models.Model):
department_id = models.AutoField(primary_key=True)
department = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'departments_f'
class OrdersF(models.Model):
order_id = models.AutoField(primary_key=True)
order_hour_of_day = models.PositiveIntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'orders_f'
class ProductsF(models.Model):
product_id = models.AutoField(primary_key=True)
product_name = models.CharField(max_length=200)
department = models.ForeignKey(DepartmentsF, models.DO_NOTHING)
price = models.FloatField()
margin = models.FloatField()
class Meta:
managed = False
db_table = 'products_f'
class OrderProductsF(models.Model):
order = models.ForeignKey('OrdersF', models.DO_NOTHING, blank=True, null=False, primary_key=True)
product = models.ForeignKey('ProductsF', models.DO_NOTHING, blank=True, null=False)
quantity = models.PositiveIntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'order_products_f' | 2.09375 | 2 |
get_trending.py | CoderCYLee/CoderCYLee.github.io | 1 | 12761734 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request
import os
import subprocess
import sys
import json
import yaml
import codecs
import requests
from collections import OrderedDict
from pyquery import PyQuery as pq
# the treading
url_str = 'http://trending.codehub-app.com/v2/trending/'
langs_str = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml'
foldername = "json"
filename = "trending.json"
# folder_path = "./" + foldername + "/"
file_path = "./" + filename
def git_pull():
print("prepare to do 'git pull'")
cmd = ['git', 'pull']
p = subprocess.Popen(cmd, cwd="./")
p.wait()
def git_add():
print("prepare to do 'git add'")
cmd = ['git', 'add', '.']
p = subprocess.Popen(cmd, cwd="./")
p.wait()
def git_commit():
print("prepare to do 'git commit'")
centext = "'refresh git trending'"
cmd = ['git', 'commit', '-m', centext]
p = subprocess.Popen(cmd, cwd="./")
p.wait()
def git_push():
print("prepare to do 'git push'")
cmd = ['git', 'push', '-u', 'origin', 'master']
p = subprocess.Popen(cmd, cwd="./")
p.wait()
def file_handle():
git_pull()
git_add()
git_commit()
git_push()
def url_open(url):
if not ('http' in url):
url = 'http://' + url
print('url is :' + url)
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:36.0) Gecko/20100101 Firefox/36.0')
response = request.urlopen(req)
return response.read()
def scrape(language, file_path):
HEADERS = {
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding' : 'gzip,deflate,sdch',
'Accept-Language' : 'zh-CN,zh;q=0.8'
}
print("begin request")
url = 'https://github.com/trending/{language}'.format(language=language)
r = requests.get(url, headers=HEADERS)
assert r.status_code == 200
# print(r.encoding)
d = pq(r.content)
items = d('ol.repo-list li')
# codecs to solve the problem utf-8 codec like chinese
with codecs.open(file_path, "w", encoding='utf-8') as f:
arr = []
for item in items:
i = pq(item)
title = i("h3 a").text()
language = i("div.f6 span[itemprop='programmingLanguage']").text()
star = i("div.f6 svg.octicon-star").closest("a").text()
fork = i("div.f6 svg.octicon-repo-forked").closest("a").text()
description = i("p.col-9").text()
hrefurl = i("h3 a").attr("href")
urllist = hrefurl.split('/')
login = urllist[1]
name = urllist[2]
full_name = login + '/' + name
url = "https://github.com" + hrefurl
# ownerImg = i("p.repo-list-meta a img").attr("src")
# print(ownerImg)
data = {}
data["name"] = name
data["owner"] = {
"login":login
}
data["full_name"] = full_name
data["forks_count"] = fork.replace(',', '')
data["stargazers_count"] = star.replace(',', '')
if (len(language) > 0):
data["language"] = language
data["repositoryDescription"] = description
arr.append(data)
print("get json data, ready write to file 'trending.json'")
f.write(json.dumps(arr, indent=4, ensure_ascii=False))
def save_file():
scrape("", file_path)
getColors()
file_handle()
def trending():
if not os.path.exists(foldername):
os.mkdir(foldername)
print('create folder success', foldername)
os.chdir(foldername)
folder_top = os.getcwd()
print(folder_top)
save_file()
# ----------------------------------------------
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
"""
Parse the first YAML document in a stream
and produce the corresponding Python Orderered Dictionary.
"""
class OrderedLoader(Loader):
pass
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
lambda loader, node: object_pairs_hook(loader.construct_pairs(node)))
return yaml.load(stream, OrderedLoader)
def order_by_keys(dict):
"""
Sort a dictionary by keys, case insensitive ie [ Ada, eC, Fortran ]
Default ordering, or using json.dump with sort_keys=True, produces
[ Ada, Fortran, eC ]
"""
from collections import OrderedDict
return OrderedDict(sorted(dict.items(), key=lambda s: s[0].lower()))
def getFile(url):
"""
Return the URL body, or False if page not found
Keyword arguments:
url -- url to parse
"""
try:
r = request.urlopen(url)
except:
sys.exit("Request fatal error : %s" % sys.exc_info()[1])
if r.status != 200:
return False
return r.read()
def write_json(text, filename='colors.json'):
"""
Write a JSON file from a dictionary
"""
with open(filename, 'w') as f:
f.write(json.dumps(text, indent=4) + '\n')
def getColors():
print("geting list of language")
yml = getFile(langs_str)
langs_yml = ordered_load(yml)
langs_yml = order_by_keys(langs_yml)
# List construction done, count keys
lang_count = len(langs_yml)
print("Found %d languages" % lang_count)
# Construct the wanted list
langs = OrderedDict()
for lang in langs_yml.keys():
if ("type" not in langs_yml[lang] or
"color" in langs_yml[lang] or
langs_yml[lang]["type"] == "programming"):
print(" Parsing the color for '%s' ..." % (lang))
langs[lang] = OrderedDict()
langs[lang]["color"] = langs_yml[lang]["color"] if "color" in langs_yml[lang] else None
langs[lang]["url"] = "https://github.com/trending?l=" + (
langs_yml[lang]["search_term"] if "search_term" in langs_yml[lang] else lang)
langs[lang]["url"] = langs[lang]["url"].replace(' ', '-').replace('#', 'sharp')
print("Writing a new JSON file ...")
write_json(langs)
print("All done!")
if __name__ == '__main__':
trending()
| 2.46875 | 2 |
loader/models.py | alvinMemphis/greenmill_backend | 0 | 12761735 | <reponame>alvinMemphis/greenmill_backend<gh_stars>0
from django.db import models
from hubmanager.models import HubManager, LogicHub
from person.models import GreenUser
# Create your models here.
class HubLoader(models.Model):
user = models.OneToOneField(GreenUser, on_delete=models.CASCADE, related_name="greenloaders")
manager = models.ForeignKey(HubManager, on_delete=models.CASCADE, related_name="hmanagers")
his_hub = models.ForeignKey(LogicHub, on_delete=models.CASCADE, null=True, related_name="hubs", default=1)
def __str__(self):
return "Loader: " + self.user.user_name | 2.28125 | 2 |
scripts/0_downloadData.py | shadrackkiprotich/MachineLearningSamples-ImageClassificationUsingCntk | 4 | 12761736 | <filename>scripts/0_downloadData.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import sys, os
sys.path.append(".")
sys.path.append("..")
sys.path.append("libraries")
sys.path.append("../libraries")
from helpers import *
from PARAMETERS import *
#locals().update(importlib.import_module("PARAMETERS").__dict__)
####################################
# Parameter
####################################
downloadTopNImages = sys.maxsize #set to e.g. 50 to only download the first 50 of the 428 images
maxSize = 1000
####################################
# Main
####################################
makeDirectory(rootDir)
makeDirectory(imgOrigDir)
print("Directory used to read and write model/image files: " + rootDir)
amlLogger = getAmlLogger()
if amlLogger != []:
amlLogger.log("amlrealworld.ImageClassificationUsingCntk.0_downloadData", "true")
# Read image urls
if os.path.exists(imgUrlsPath):
imgUrls = readTable(imgUrlsPath)
else:
imgUrls = readTable("../" + imgUrlsPath)
imgUrls = randomizeList(imgUrls)
# Download provided fashion images
counter = 0
for index, (label, url) in enumerate(imgUrls):
# Skip image if was already downloaded
outImgPath = pathJoin(imgOrigDir, label, str(index) + ".jpg")
if pathExists(outImgPath):
counter += 1
continue
# Download image
print("Downloading image {} of {}: label={}, url={}".format(index, len(imgUrls), label, url))
data = downloadFromUrl(url)
if len(data) > 0:
makeDirectory(pathJoin(imgOrigDir, label))
writeBinaryFile(outImgPath, data)
# Sanity check: delete image if it is corrupted
# Otherwise, resize if above given pixel width/height
try:
img = imread(outImgPath)
if max(imWidthHeight(img)) > maxSize:
img, _ = imresizeMaxDim(img, maxSize)
imwrite(img, outImgPath)
counter += 1
except:
print("Removing corrupted image {}, url={}".format(outImgPath, url))
os.remove(outImgPath)
print("Successfully downloaded {} of the {} image urls.".format(counter, len(imgUrls)))
print("DONE.")
| 2.828125 | 3 |
saleor/menu/migrations/0009_remove_menu_json_content.py | elwoodxblues/saleor | 15,337 | 12761737 | # Generated by Django 2.0.8 on 2018-09-13 13:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("menu", "0008_menu_json_content_new")]
operations = [migrations.RemoveField(model_name="menu", name="json_content")]
| 1.359375 | 1 |
src/tools/_predict.py | TensorFX/tensorfx | 204 | 12761738 | # Copyright 2016 TensorLab. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# _predict.py
# Implements PredictCommand.
import json
import os
import sys
import tensorflow as tf
import tensorfx as tfx
class PredictCommand(object):
"""Implements the tfx predict command to use a model to produce predictions.
"""
name = 'predict'
help = 'Produces predictions using a model.'
extra = False
@staticmethod
def build_parser(parser):
parser.add_argument('--model', metavar='path', type=str, required=True,
help='The path to a previously trained model.')
parser.add_argument('--input', metavar='path', type=str,
help='The path to a file with input instances. Uses stdin by default.')
parser.add_argument('--output', metavar='path', type=str,
help='The path to a file to write outputs to. Uses stdout by default.')
parser.add_argument('--batch-size', metavar='instances', type=int, default=10,
help='The number of instances to predict per batch.')
@staticmethod
def run(args):
# TODO: Figure out where to do JSON and TF initialization in more common way.
json.encoder.FLOAT_REPR = lambda f: ('%.5f' % f)
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.ERROR)
model = tfx.prediction.Model.load(args.model)
with TextSource(args.input, args.batch_size) as source, TextSink(args.output) as sink:
for instances in source:
predictions = model.predict(instances)
lines = map(lambda p: json.dumps(p, sort_keys=True), predictions)
sink.write(lines)
class TextSource(object):
def __init__(self, file=None, batch_size=1):
self._file = file
self._batch_size = batch_size
def __enter__(self):
self._stream = open(self._file, 'r') if self._file else sys.stdin
return self
def __exit__(self, type, value, traceback):
if self._stream and self._file:
self._stream.close()
def __iter__(self):
instances = []
while True:
instance = self._stream.readline().strip()
if not instance:
# EOF
break
instances.append(instance)
if len(instances) == self._batch_size:
# A desired batch of instances is available
yield instances
instances = []
if instances:
yield instances
class TextSink(object):
def __init__(self, file=None):
self._file = file
def __enter__(self):
self._stream = open(self._file, 'w') if self._file else sys.stdout
return self
def __exit__(self, type, value, traceback):
if self._stream and self._file:
self._stream.close()
def write(self, lines):
for l in lines:
self._stream.write(l + '\n')
| 2.296875 | 2 |
4_main.py | yafun92386/Thesis_Code | 0 | 12761739 | import os, torch, argparse
from torch import optim
from torch.utils.data import DataLoader
from pytorch_pretrained_bert import BertTokenizer
from _preprocessing import Vocabulary, DRCDDataset
from _preprocessing import build_emb, create_bert_batch, create_jieba_batch
from _model import EncoderRNN, BertEncoder
from _model import DecoderRNN, LuongAttnDecoderRNN
from _train import trainEpochs
from _evaluation import train_evaluation, test_evaluation
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
def main(args):
print("Data preprocessing ...")
if args.data_mode == "jieba":
vocab = Vocabulary(args.data_set, args.vec_min)
tokenizer = None
data_transformer = DRCDDataset(args.data_set, args.data_sel, args.data_mode, args.with_ans, vocab, tokenizer)
data_loader = DataLoader(data_transformer, batch_size=args.batch_size, shuffle=True, collate_fn=create_jieba_batch)
embedding = build_emb(args.save_dir, args.vec_path, vocab, args.emb_size, args.loadEmbedding)
embedding = embedding.to(device)
elif args.data_mode == "bert":
vocab = None
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
data_transformer = DRCDDataset(args.data_set, args.data_sel, args.data_mode, args.with_ans, vocab, tokenizer)
data_loader = DataLoader(data_transformer, batch_size=args.batch_size, shuffle=True, collate_fn=create_bert_batch)
embedding = None
print('Building encoder and decoder ...')
if args.data_mode == "jieba":
encoder = EncoderRNN(embedding, args.hidden_size, args.transfer_layer, args.encoder_n_layers, args.dropout)
vocab_size = vocab.num_words
elif args.data_mode == "bert":
encoder = BertEncoder(args.transfer_layer)
embedding = encoder.embedding
vocab_size = encoder.vocab_size
if args.attn_model == 'none':
decoder = DecoderRNN(embedding, args.hidden_size, vocab_size, args.decoder_n_layers, args.dropout)
else:
decoder = LuongAttnDecoderRNN(args.attn_model, embedding, args.hidden_size, vocab_size, args.decoder_n_layers, args.dropout)
# Load model if a loadFilename is provided
if args.loadEncoder:
print("Loading pretrained Encoder ...")
checkpoint = torch.load(args.loadEncoder)
prencoder_sd = checkpoint['en']
encoder_sd = encoder.state_dict()
prencoder_sd = {k: v for k, v in encoder_sd.items() if k in prencoder_sd}
encoder_sd.update(prencoder_sd)
encoder.load_state_dict(encoder_sd)
if args.fixed_enc:
for param in encoder.parameters():
param.requires_grad = False
encoder.out.weight.requires_grad = True
encoder.out.bias.requires_grad = True
if args.loadDecoder:
print("Loading pretrained Decoder ...")
checkpoint = torch.load(args.loadDecoder)
decoder_sd = checkpoint['de']
decoder.load_state_dict(decoder_sd)
if args.loadFilename:
print("Loading pretrained Model ...")
checkpoint = torch.load(args.loadFilename)
encoder_sd = checkpoint['en']
encoder.load_state_dict(encoder_sd)
decoder_sd = checkpoint['de']
decoder.load_state_dict(decoder_sd)
# Use appropriate device
encoder = encoder.to(device)
decoder = decoder.to(device)
# Ensure dropout layers are in train mode
encoder.train()
decoder.train()
if args.training_flag:
print('Building optimizers ...')
if args.fixed_enc:
encoder_optimizer = optim.Adam(filter(lambda p: p.requires_grad, encoder.parameters()), lr=args.encoder_op_lr)
else:
encoder_optimizer = optim.Adam(encoder.parameters(), lr=args.encoder_op_lr)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=args.decoder_op_lr)
if args.loadEncoder:
checkpoint = torch.load(args.loadEncoder)
prencoder_optimizer_sd = checkpoint['en_opt']
encoder_optimizer_sd = encoder_optimizer.state_dict()
prencoder_optimizer_sd = {k: v for k, v in encoder_optimizer_sd.items() if k in prencoder_optimizer_sd}
encoder_optimizer_sd.update(prencoder_optimizer_sd)
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
if args.loadDecoder:
checkpoint = torch.load(args.loadDecoder)
decoder_optimizer_sd = checkpoint['de_opt']
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
if args.loadFilename:
checkpoint = torch.load(args.loadFilename)
prencoder_optimizer_sd = checkpoint['en_opt']
encoder_optimizer_sd = encoder_optimizer.state_dict()
prencoder_optimizer_sd = {k: v for k, v in encoder_optimizer_sd.items() if k in prencoder_optimizer_sd}
encoder_optimizer_sd.update(prencoder_optimizer_sd)
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer_sd = checkpoint['de_opt']
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
# If you have cuda, configure cuda to call
for state in encoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
for state in decoder_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
print("Starting training!")
trainEpochs(args.save_dir, args.data_mode, data_loader, vocab, tokenizer,
args.attn_model, encoder, decoder, encoder_optimizer, decoder_optimizer, args.it_percent,
args.checkpoint_epoch, args.num_epochs, args.teacher_forcing_ratio, args.data_sel)
# Set dropout layers to eval mode
encoder.eval()
decoder.eval()
if args.dev_flag:
dev_transformer = DRCDDataset(args.dev_set, args.data_sel, args.data_mode, args.with_ans, vocab, tokenizer)
if args.data_mode == "jieba":
dev_loader = DataLoader(dev_transformer, batch_size=args.batch_size, shuffle=True, collate_fn=create_jieba_batch)
elif args.data_mode == "bert":
dev_loader = DataLoader(dev_transformer, batch_size=args.batch_size, shuffle=True , collate_fn=create_bert_batch)
print("Starting evaluation!")
test_evaluation(args.eval_flag, args.data_mode, args.data_sel, dev_loader, args.attn_model, vocab, tokenizer,
encoder, decoder, args.max_length, args.save_dir, args.checkpoint_epoch)
if args.eval_flag:
eval_transformer = DRCDDataset(args.eval_set, args.data_sel, args.data_mode, args.with_ans, vocab, tokenizer)
if args.data_mode == "jieba":
eval_loader = DataLoader(eval_transformer, batch_size=args.batch_size, shuffle=False, collate_fn=create_jieba_batch)
elif args.data_mode == "bert":
eval_loader = DataLoader(eval_transformer, batch_size=args.batch_size, shuffle=False, collate_fn=create_bert_batch)
print("Starting evaluation!")
test_evaluation(args.eval_flag, args.data_mode, args.data_sel, eval_loader, args.attn_model, vocab, tokenizer,
encoder, decoder, args.max_length, args.save_dir, args.checkpoint_epoch)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# for data
parser.add_argument('--vec_min', type=int, default=5)
parser.add_argument('--vec_path', type=str, default='cc.zh.300.vec') #cc.zh.300.vec
parser.add_argument('--emb_size', type=int, default=300)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--data_set', type=str, default="sS200_train")
parser.add_argument('--dev_set', type=str, default="sS200_dev")
parser.add_argument('--data_sel', type=str, default="sq")
parser.add_argument('--data_mode', type=str, default="bert")
# for model
parser.add_argument('--model_name', type=str, default='BERT_model') # ignore
parser.add_argument('--attn_model', type=str, default='general') #dot #general #concat
parser.add_argument('--with_ans', type=bool, default=False)
parser.add_argument('--fixed_enc', type=bool, default=False)
parser.add_argument('--transfer_layer', type=bool, default=False)
parser.add_argument('--hidden_size', type=int, default=300)
parser.add_argument('--encoder_n_layers', type=int, default=1)
parser.add_argument('--decoder_n_layers', type=int, default=1)
parser.add_argument('--dropout', type=float, default=0.1)
# for training
parser.add_argument('--training_flag', type=bool, default=True)
parser.add_argument('--it_percent', type=int, default=0.5)
parser.add_argument('--teacher_forcing_ratio', type=float, default=0.5)
parser.add_argument('--encoder_op_lr', type=float, default=1e-4)
parser.add_argument('--decoder_op_lr', type=float, default=1e-4)
parser.add_argument('--num_epochs', type=int, default=300)
# for loading
save_dir = 'BERT_SQ'
check_epoch = 0
encoder_epoch = 0
decoder_epoch = 0
loadFilename = os.path.join(save_dir, 'E{}_checkpoint.tar'.format(check_epoch))
loadEncodername = os.path.join(save_dir, 'SS_E{}_checkpoint.tar'.format(encoder_epoch))
loadDecodername = os.path.join(save_dir, 'QQ_E{}_checkpoint.tar'.format(decoder_epoch))
loadEmbedding = os.path.join(save_dir, 'emb_matrix.tar')
if not os.path.exists(loadFilename):
loadFilename = None
if not os.path.exists(loadEncodername):
loadEncodername = None
if not os.path.exists(loadDecodername):
loadDecodername = None
if not os.path.exists(loadEmbedding):
loadEmbedding = None
parser.add_argument('--loadFilename', default=loadFilename)
parser.add_argument('--loadEncoder', default=loadEncodername)
parser.add_argument('--loadDecoder', default=loadDecodername)
parser.add_argument('--loadEmbedding', default=loadEmbedding)
parser.add_argument('--save_dir', type=str, default=save_dir)
parser.add_argument('--checkpoint_epoch', type=int, default=check_epoch)
# for evaluation
parser.add_argument('--dev_flag', type=bool, default=False)
parser.add_argument('--eval_flag', type=bool, default=False)
parser.add_argument('--eval_set', type=str, default="sQ30_test")
parser.add_argument('--max_length', type=int, default=50)
args = parser.parse_args(args=[])
print("[["+args.model_name+"]]")
main(args)
| 2.375 | 2 |
about.py | Geekid812/discord-rpc-customizer | 0 | 12761740 | from PyQt5 import QtCore, QtGui, QtWidgets
version = '1.0.0'
class AboutDialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(390, 110)
Dialog.setFixedSize(390, 110)
self.appNameLabel = QtWidgets.QLabel(Dialog)
self.appNameLabel.setGeometry(QtCore.QRect(10, 10, 381, 20))
font = QtGui.QFont()
font.setPointSize(12)
self.appNameLabel.setFont(font)
self.appNameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.appNameLabel.setObjectName("appNameLabel")
self.appInfoLabel = QtWidgets.QLabel(Dialog)
self.appInfoLabel.setGeometry(QtCore.QRect(0, 40, 391, 20))
font = QtGui.QFont()
font.setPointSize(10)
self.appInfoLabel.setFont(font)
self.appInfoLabel.setAlignment(QtCore.Qt.AlignCenter)
self.appInfoLabel.setObjectName("appInfoLabel")
self.repoButton = QtWidgets.QPushButton(Dialog)
self.repoButton.setGeometry(QtCore.QRect(150, 70, 101, 23))
self.repoButton.setObjectName("repoButton")
self.retranslateUi(Dialog)
self.bind_signals()
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "About Discord RPC"))
self.appNameLabel.setText(_translate("Dialog", "Discord Rich Presence Customizer"))
self.appInfoLabel.setText(_translate("Dialog", "Made by Geekid812 - Version " + version))
self.repoButton.setText(_translate("Dialog", "GitHub Repository"))
def bind_signals(self):
self.repoButton.clicked.connect(lambda: QtGui.QDesktopServices.openUrl(QtCore.QUrl("https://github.com/geekid812/discord-rpc-customizer")))
| 2.46875 | 2 |
RS-recall/swing.py | LianShuaiLong/Codebook | 0 | 12761741 | <filename>RS-recall/swing.py
#swing召回算法,代码借鉴 https://blog.csdn.net/Gamer_gyt/article/details/115678598
import json
import argparse
import pandas as pd
from itertools import combinations
from collections import defaultdict
import os
alpha = 0.5
topk=20
def parse_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--train_data_path',type=str,default='./swing-data/ml-100k/ua.base')
parser.add_argument('--test_data_path',type=str,default='./swing-data/ml-100k/ua.test')
parser.add_argument('--model_path',type=str,default='./swing-data/swing.json')
args = parser.parse_args()
return args
def load_data(train_data_path:str,test_data_path:str):
train_data = pd.read_csv(train_data_path,sep = '\t',engine='python',names=['userid','movieid','rate','EventTimeStamp'])
test_data = pd.read_csv(test_data_path,sep = '\t',engine='python',names=['userid','movieid','rate','EventTimeStamp'])
print('train data example:\n',train_data.head(5))
print('test data example:\n',test_data.head(5))
return train_data,test_data
def get_iusers_uitems(train_data):
i_users = defaultdict(set)
u_items = defaultdict(set)
for index,rows in train_data.iterrows():
i_users[str(rows['movieid'])].add(str(rows['userid']))
u_items[str(rows['userid'])].add(str(rows['movieid']))
print('item个数:',len(i_users.keys()))
print('user个数:',len(u_items.keys()))
return i_users,u_items
def train(i_users:dict,u_items:dict):
item_pairs = list(combinations(i_users.keys(),r=2))
print('item pairs:\n',len(item_pairs))
item_sim_dict = defaultdict(dict)
item_pair = 0
for item_i,item_j in item_pairs:
print('当前item pair:',item_pair)
result = 0
common_users = i_users[item_i] & i_users[item_j]
user_pairs = list(combinations(common_users,r=2))
for user_i,user_j in user_pairs:
result += 1/(alpha+len(u_items[user_i] & u_items[user_j]))
item_sim_dict[item_i][item_j] = result
item_pair+=1
for k,v in item_sim_dict.items():
item_sim_dict[k] = dict(sorted(item_sim_dict[k].items(),key=lambda k: k[1],reverse=True)[:topk])
return item_sim_dict
def save_model(item_sim_dict:dict,model_path:str):
with open(model_path,'w') as f:
f.write(json.dumps(item_sim_dict,ensure_ascii=True,indent=4))
print('模型{}-{}保存完成'.format(model_path,topk))
if __name__=='__main__':
args = parse_parser()
train_data_path = args.train_data_path
test_data_path = args.test_data_path
model_path = args.model_path
train_data,test_data = load_data(train_data_path,test_data_path)
i_users,u_items = get_iusers_uitems(train_data=train_data)
item_sim_dict = train(i_users=i_users,u_items=u_items)
save_model(item_sim_dict=item_sim_dict,model_path=model_path)
| 2.90625 | 3 |
lib/__init__.py | rubbieKelvin/courier | 9 | 12761742 | import os
import sys
import socket
import logging
from uuid import uuid4
from datetime import datetime
from .paths import Path
from PySide2.QtCore import QStandardPaths
PORT = 8977
LOG_TO_FILE = False
RUNNING_BUNDLE = getattr(sys, 'frozen', False)
# noinspection SpellCheckingInspection
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(QStandardPaths.writableLocation(QStandardPaths.TempLocation), 'courier.log'),
filemode="w")
def is_valid_ip(ip: str) -> bool:
try:
socket.inet_aton(ip)
return True
except socket.error:
return False
# noinspection PyPep8Naming
class logger:
@staticmethod
def _log(*args, mode=logging.info):
MODE = "INFO"
if mode == logging.debug:
MODE = "DEBUG"
elif mode == logging.error:
MODE = "ERROR"
elif mode == logging.warn:
MODE = "WARN"
if not RUNNING_BUNDLE:
print(f"{MODE}: ", *args)
if LOG_TO_FILE:
mode(" ".join([str(i) for i in args]))
@staticmethod
def log(*args):
logger._log(*args)
@staticmethod
def debug(*args):
logger._log(*args, mode=logging.debug)
@staticmethod
def error(*args):
logger._log(*args, mode=logging.error)
@staticmethod
def warn(*args):
logger._log(*args, mode=logging.warning)
def getUniqueId() -> str:
""" creates a unique id for this device.
the id will be used for unique identification in chats.
if there's no unique id, a new one will be created
"""
path = Path()
filedir = path.UUID_FILE
if os.path.exists(filedir):
# just get the file and return data
with open(filedir) as file:
uid = file.read()
return uid
# create new and return data
uid = uuid4().__str__()+"-"+datetime.now().__str__()
with open(filedir, "w") as file:
file.write(uid)
return uid
def username(name: str=None) -> str:
""" returns the hostname if client has not set a username
if client has a username, just return it then.
if the name argument is passed, just set if as a new username
"""
path = Path()
filedir = path.USERNAME_FILE
if not name:
if os.path.exists(filedir):
# just get the file and return data
with open(filedir) as file:
username = file.read()
return username.splitlines(keepends=False)[0]
# create new and return data
username = name or socket.gethostname()
with open(filedir, "w") as file:
file.write(username)
return username
| 2.53125 | 3 |
modules/func.py | harrypen1996/coding-task | 0 | 12761743 | import urllib.request as request
import json
wFactor = [11,10,9,8,7,6,5,4,3] ## Weighted array for wfCalc function
def identDig(inpt): ## Task 1 check if all digits are identical function
if all(i == inpt[0] for i in inpt):
return True
else:
return False
def wfCalc(inpt): ## Task 2 check digit verification using weighted array function
wSum = []
for i in range(len(inpt)-1):
wSum.append(int(inpt[i]) * wFactor[i])
remainder = sum(wSum) % 12
remainder = 12 - remainder ## previously removed line oops
# if remainder in {10,12}: ## [EDIT] Obsolete condition statement as met on line 23
# return False
if remainder == int(inpt[9]) or (remainder == 11 and int(inpt[9]) == 0):
return True
else:
return False
def importJson(): ## Task 3 import json file from URL function
with request.urlopen('https://s3.amazonaws.com/cognisant-interview-resources/identifiers.json') as r:
s = r.read()
array = json.loads(s)
return array
| 3.0625 | 3 |
backend/models/promoCodes.py | salah-walid/Ecommerce-Shopping-App | 1 | 12761744 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class promoCode(models.Model):
title = models.CharField(max_length=40, unique=True, blank=False, null=False)
code = models.CharField(max_length=40, blank=False, null=False)
promo = models.FloatField(default=0, validators=[MinValueValidator(0)])
def __str__(self):
return self.title
| 2.375 | 2 |
utils/spec_tokenizers.py | ArinaBelova/MASK_public | 0 | 12761745 | """
Copyright 2020 ICES, University of Manchester, Evenset Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#Code by <NAME>
import nltk
nltk.download('punkt')
from nltk.tokenize.util import align_tokens
from nltk.tokenize.treebank import TreebankWordTokenizer
import re
import tensorflow_hub as hub
#from bert.tokenization import FullTokenizer
import tensorflow as tf
sess = tf.compat.v1.Session()
_treebank_word_tokenizer = TreebankWordTokenizer()
def tokenize_to_seq(documents): # in the end you will get a big list of concatenated documents, no division between individual documents anymore.
sequences = []
sequence = []
for doc in documents:
if len(sequence)>0:
sequences.append(sequence)
sequence = []
text = doc["text"]
file = doc["id"]
text = text.replace("\"", "'")
text = text.replace("`", "'")
text = text.replace("``", "")
text = text.replace("''", "")
tokens = custom_span_tokenize(text)
for token in tokens:
token_txt = text[token[0]:token[1]]
found = False
for tag in doc["tags"]:
if int(tag["start"])<=token[0] and int(tag["end"])>=token[1]:
token_tag = tag["tag"]
#token_tag_type = tag["type"]
found = True
if found==False:
token_tag = "O"
#token_tag_type = "O"
sequence.append((token_txt,token_tag))
if token_txt == "." or token_txt == "?" or token_txt == "!":
sequences.append(sequence)
sequence = []
sequences.append(sequence)
return sequences
def tokenize_fa(documents):
"""
Tokenization function. Returns list of sequences
:param documents: list of texts
:type language: list
"""
sequences = []
sequence = []
for doc in documents:
if len(sequence) > 0:
sequences.append(sequence)
sequence = []
text = doc
text = text.replace("\"", "'")
text = text.replace("`", "'")
text = text.replace("``", "")
text = text.replace("''", "")
tokens = custom_span_tokenize(text)
for token in tokens:
token_txt = text[token[0]:token[1]]
found = False
if found == False:
token_tag = "O"
# token_tag_type = "O"
sequence.append((token_txt, token_tag))
if token_txt == "." or token_txt == "?" or token_txt == "!":
sequences.append(sequence)
sequence = []
sequences.append(sequence)
return sequences
def custom_span_tokenize(text, language='english', preserve_line=True):
"""
Returns a spans of tokens in text.
:param text: text to split into words
:param language: the model name in the Punkt corpus
:type language: str
:param preserve_line: An option to keep the preserve the sentence and not sentence tokenize it.
:type preserver_line: bool
"""
tokens = custom_word_tokenize(text)
tokens = ['"' if tok in ['``', "''"] else tok for tok in tokens]
return align_tokens(tokens, text)
def custom_word_tokenize(text, language='english', preserve_line=False):
"""
Return a tokenized copy of *text*,
using NLTK's recommended word tokenizer
(currently an improved :class:`.TreebankWordTokenizer`
along with :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into words
:param text: str
:param language: the model name in the Punkt corpus
:type language: str
:param preserve_line: An option to keep the preserve the sentence and not sentence tokenize it.
:type preserver_line: bool
"""
tokens = []
sentences = [text] if preserve_line else nltk.sent_tokenize(text, language) # splits the text into list of sentences.
for sent in sentences:
for token in _treebank_word_tokenizer.tokenize(sent): # TreeBankWordTokezier returns their tokenized version of those words in sentences in a list. So output is list of words/tokens.
if "-" in token:
m = re.compile("(\d+)(-)([a-zA-z-]+)")
g = m.match(token)
if g:
for group in g.groups():
tokens.append(group)
else:
tokens.append(token)
else:
tokens.append(token)
return tokens
def shape(self,word):
shape = ""
for letter in word:
if letter.isdigit():
shape = shape + "d"
elif letter.isalpha():
if letter.isupper():
shape = shape + "W"
else:
shape = shape + "w"
else:
shape = shape + letter
return shape
| 2.515625 | 3 |
unit_tests/test_training_agents.py | ihopethiswillfi/ElegantRL-1 | 0 | 12761746 | <reponame>ihopethiswillfi/ElegantRL-1
"""
This script tests whether or not each of the agents is able to train, depending
on whether or not it receives an environment with a discrete or continuous
action space.
"""
import gym
import unittest
from elegantrl.agents import *
from elegantrl.train.config import Arguments
from elegantrl.envs.Gym import get_gym_env_args
from elegantrl.train.run import train_and_evaluate
class TestAgents(unittest.TestCase):
def setUp(self):
self.discrete_agents = [
AgentDQN,
AgentDuelingDQN,
AgentDoubleDQN,
AgentD3QN,
AgentDiscretePPO,
]
self.continuous_agents = [
AgentDDPG,
AgentTD3,
AgentSAC,
AgentModSAC,
AgentREDqSAC,
AgentPPO,
AgentPPO_H,
AgentSAC_H,
]
self.discrete_env_args = get_gym_env_args(
gym.make("LunarLander-v2"), if_print=False
)
self.continuous_env_args = get_gym_env_args(
gym.make("BipedalWalker-v3"), if_print=False
)
def test_should_create_arguments_for_each_agent(self):
for agent in self.discrete_agents:
Arguments(agent, env_func=gym.make, env_args=self.discrete_env_args)
for agent in self.continuous_agents:
Arguments(agent, env_func=gym.make, env_args=self.continuous_env_args)
def train_on(self, args: Arguments):
args.eval_times = 2**4
args.break_step = 1
train_and_evaluate(args)
def train_discrete(self, agent: AgentBase):
args = Arguments(agent, env_func=gym.make, env_args=self.discrete_env_args)
self.train_on(args)
def train_continuous(self, agent: AgentBase):
args = Arguments(agent, env_func=gym.make, env_args=self.continuous_env_args)
self.train_on(args)
# first, test discrete agents
def test_should_train_DQN_on_discrete_action_space(self):
self.train_discrete(AgentDQN)
def test_should_not_train_DQN_on_continuous_action_space(self):
self.assertRaises(Exception, self.train_continuous, AgentDQN)
def test_should_train_DuelingDQN_on_discrete_action_space(self):
self.train_discrete(AgentDuelingDQN)
def test_should_not_train_DuelingDQN_on_continuous_action_space(self):
self.assertRaises(Exception, self.train_continuous, AgentDuelingDQN)
def test_should_train_DoubleDQN_on_discrete_action_space(self):
self.train_discrete(AgentDoubleDQN)
def test_should_not_train_DoubleDQN_on_continuous_action_space(self):
self.assertRaises(Exception, self.train_continuous, AgentDoubleDQN)
def test_should_train_D3QN_on_discrete_action_space(self):
self.train_discrete(AgentD3QN)
def test_should_not_train_D3QN_on_continuous_action_space(self):
self.assertRaises(Exception, self.train_continuous, AgentD3QN)
def test_should_train_DiscretePPO_on_discrete_action_space(self):
self.train_discrete(AgentDiscretePPO)
def test_should_not_train_DiscretePPO_on_continuous_action_space(self):
self.assertRaises(Exception, self.train_continuous, AgentDiscretePPO)
# next, test continuous agents
def test_should_train_DDPG_on_continuous_action_space(self):
self.train_continuous(AgentDDPG)
def test_should_not_train_DDPG_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentDDPG)
def test_should_train_TD3_on_continuous_action_space(self):
self.train_continuous(AgentTD3)
def test_should_not_train_TD3_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentTD3)
def test_should_train_SAC_on_continuous_action_space(self):
self.train_continuous(AgentSAC)
def test_should_not_train_SAC_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentSAC)
def test_should_train_ModSAC_on_continuous_action_space(self):
self.train_continuous(AgentModSAC)
def test_should_not_train_ModSAC_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentModSAC)
def test_should_train_REDqSAC_on_continuous_action_space(self):
self.train_continuous(AgentREDqSAC)
def test_should_not_train_REDqSAC_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentREDqSAC)
def test_should_train_PPO_on_continuous_action_space(self):
self.train_continuous(AgentPPO)
def test_should_not_train_PPO_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentPPO)
def test_should_train_PPO_H_on_continuous_action_space(self):
self.train_continuous(AgentPPO_H)
def test_should_not_train_PPO_H_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentPPO_H)
def test_should_train_SAC_H_on_continuous_action_space(self):
self.train_continuous(AgentSAC_H)
def test_should_not_train_SAC_H_on_discrete_action_space(self):
self.assertRaises(Exception, self.train_discrete, AgentSAC_H)
if __name__ == "__main__":
unittest.main()
| 2.71875 | 3 |
src/sdios/api/sharing/driver.py | hyperqube-io/sdios-api-sdk | 0 | 12761747 | """SharingDriver class object"""
from typing import Optional
from sdios.api.base_driver import BaseDriver
from sdios.api.driver import APIDriver
from sdios.api.driver import APIResponse
from sdios.settings.urls import APICategory
class SharingDriver(BaseDriver):
"""Make all network API calls."""
_category = APICategory.SHARING
def __init__(self, api_driver: APIDriver) -> None:
"""Initialize SharingDriver class
:param api_driver: Allows SharingDriver to communicate with SDI OS
:type api_driver: APIDriver class object
"""
super().__init__(api_driver)
self.user_pk = None # type: Optional[int]
def clear(self) -> None:
"""Clear pks."""
self.user_pk = None # type: Optional[int]
def get_all_shared(self) -> APIResponse:
"""Get users/groups shared networks and return response."""
return self._get("network_list")
def get_all_users_shared(self) -> APIResponse:
"""Get all user's shared networks and return response."""
return self._get("user_list")
def get_user_shared(self, user_pk: int = None) -> APIResponse:
"""Get a user's shared networks and return response.
:param user_pk: Pk of user to look up shared networks. Default is self.user_pk.
:type user_pk: int
"""
url_args = {"pk": self.user_pk if user_pk is None else user_pk}
return self._get("user_detail", url_args)
def get_all_groups_shared(self) -> APIResponse:
"""Get all group's shared networks and return response."""
return self._get("group_list")
def get_group_shared(self, group_pk: int) -> APIResponse:
"""Get group's shared networks and return response."""
return self._get("group_detail", {"group_pk": group_pk})
| 2.765625 | 3 |
sources_non_forked/vim-visual-multi/test/tests/oO/commands.py | doitsu2014/vimrc | 2,083 | 12761748 | <filename>sources_non_forked/vim-visual-multi/test/tests/oO/commands.py<gh_stars>1000+
# insert CR, insert line above
keys(':setf vim\<CR>jw')
keys('4\<C-Down>')
keys('Ea')
keys('\<CR>')
keys('CARRYING OVER ')
keys('\<Esc>A')
keys('\<CR>')
keys('CR at EOL')
keys('\<Esc>k')
keys('O')
keys('above CR')
keys('\<Esc>\<Esc>')
| 1.25 | 1 |
OutputAlert_module.py | ec500-software-engineering/exercise-1-modularity-JasonZ95 | 0 | 12761749 | <reponame>ec500-software-engineering/exercise-1-modularity-JasonZ95
# from AI_module import AI_module
def receive_basic_iuput_data(Singal_Loss, Shock_Alert, Oxygen_Supply, Fever, Hypotension, Hypertension):
# Recevie data from input module, then analyze it using some judge functions to generate boolean result
# Boolean Parameters
# If paramter returns True, means it should be alerted, then add it to the array
BasicResult = {'Signal_Loss': False, 'Shock_Alert': False, 'Oxygen_Supply': False, 'Fever': False,
'Hypotension': False, 'Hypertension': False}
if Singal_Loss:
BasicResult['Signal Loss'] = True
if Shock_Alert:
BasicResult['Shock_Alert'] = True
if Oxygen_Supply:
BasicResult['Oxygen_Supply'] = True
if Fever:
BasicResult['Fever'] = True
if Hypotension:
BasicResult['Hypotension'] = True
if Hypertension:
BasicResult['Hypertension'] = True
return BasicResult
# def send_basic_input_data(BasicResult, BasicData):
# Receive the result and show it on terminal or web page
# sentData = analyze(BasicResult)
# return sentData, BasicData
# def display_AI_iuput_data():
# # Recevie AI data from input module, then analyze it using some judge functions to generate boolean result
# # Paramter is boolean
# # If paramter is True, means it should be alerted, then add it to the array
# AI_module.AI_Module(Blood_oxygen, Blood_pressure, Pulses)
# print('blood pressure prediction:')
# print(pressure_predict_result)
# print('blood oxygen prediction:')
# print(oxygen_predict_result)
# print('Pulse_predict_result:')
# print(Pulse_predict_result)
# def send_AI_input_data(AIResult):
# Receive the result and show it on terminal or web page
# sentData = analyze(AIResult)
# return sentData
| 3.171875 | 3 |
Bound_Estimation/parameter_deviation_data_extractor.py | unoyan16/elec491 | 0 | 12761750 | import os
import pickle
import numpy as np
def deviation_from_actual_value(array):
"""
Calculates standard deviation for the parameters
:param array: either (num_iters, num_points_in_sim, [n] params) or (num_iters, num_points_in_sim, [n*m] params)
:return:
"""
if array.ndim == 3:
deviations = np.zeros((array.shape[1],array.shape[2]))
for pt in range(array.shape[1]):
for param in range(array.shape[2]):
dev = np.std(array[:,pt,param])
deviations[pt,param] = dev
return deviations
elif array.ndim == 4:
deviations = np.zeros((array.shape[1], array.shape[2], array.shape[3]))
for pt in range(array.shape[1]):
for param_ind1 in range(array.shape[2]):
for param_ind2 in range(array.shape[3]):
dev = np.std(array[:, pt, param_ind1, param_ind2])
deviations[pt, param_ind1, param_ind2] = dev
return deviations
else:
raise ValueError("Wrong num of dimensions")
def main():
#retrieving pickle data calculated from parameter_deviation_calculator.py
directory_path = os.path.dirname(
os.path.dirname(os.path.join(os.getcwd(), os.listdir(os.getcwd())[0]))) ## directory of directory of file
pickle_dir = directory_path + '/Bound_Estimation/Parameter_Deviation/'
with open(pickle_dir + 'theta.pkl', 'rb') as f:
theta_l_r = pickle.load(f)
with open(pickle_dir + 'rtof_dist.pkl', 'rb') as f:
rtof_dist = pickle.load(f)
with open(pickle_dir + 'tdoa_dist.pkl', 'rb') as f:
tdoa_dist = pickle.load(f)
#calculating deviation for theta, rtof_dist.pkl, tdoa_dist
deviation_theta = deviation_from_actual_value(theta_l_r)
deviation_rtof_dist = deviation_from_actual_value(rtof_dist)
deviation_tdoa_dist = deviation_from_actual_value(tdoa_dist)
#saving calculated deviation parameters.
with open(pickle_dir + 'deviation_theta.pkl', 'wb') as f:
pickle.dump(deviation_theta, f)
with open(pickle_dir + 'deviation_rtof_dist.pkl', 'wb') as f:
pickle.dump(deviation_rtof_dist, f)
with open(pickle_dir + 'deviation_tdoa_dist.pkl', 'wb') as f:
pickle.dump(deviation_tdoa_dist, f)
if __name__ == '__main__':
main()
| 2.9375 | 3 |