hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6e9c16512d69ea6fa5eab9288773894d5292bcf
| 102
|
py
|
Python
|
garage/utils/LED-on.py
|
1337DS/SmartGarage
|
1be4ad010653fc358e59417a26cd34e2146bdbf7
|
[
"Apache-2.0"
] | 1
|
2022-02-09T10:36:43.000Z
|
2022-02-09T10:36:43.000Z
|
garage/utils/LED-on.py
|
1337DS/SmartGarage
|
1be4ad010653fc358e59417a26cd34e2146bdbf7
|
[
"Apache-2.0"
] | null | null | null |
garage/utils/LED-on.py
|
1337DS/SmartGarage
|
1be4ad010653fc358e59417a26cd34e2146bdbf7
|
[
"Apache-2.0"
] | null | null | null |
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(26, GPIO.OUT)
GPIO.output(26, GPIO.HIGH)
| 12.75
| 26
| 0.735294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c6eb3b19d050576ce9764d0276a806ecdcc82b5f
| 2,456
|
py
|
Python
|
experiments/bayesopt/run_direct_surrogate.py
|
lebrice/RoBO
|
0cb58a1622d3a540f7714b239f0cedf048b6fd9f
|
[
"BSD-3-Clause"
] | 455
|
2015-04-02T06:12:13.000Z
|
2022-02-28T10:54:29.000Z
|
experiments/bayesopt/run_direct_surrogate.py
|
lebrice/RoBO
|
0cb58a1622d3a540f7714b239f0cedf048b6fd9f
|
[
"BSD-3-Clause"
] | 66
|
2015-04-07T15:20:55.000Z
|
2021-06-04T16:40:46.000Z
|
experiments/bayesopt/run_direct_surrogate.py
|
lebrice/RoBO
|
0cb58a1622d3a540f7714b239f0cedf048b6fd9f
|
[
"BSD-3-Clause"
] | 188
|
2015-04-14T09:42:34.000Z
|
2022-03-31T21:04:53.000Z
|
import os
import sys
import DIRECT
import json
import numpy as np
from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM
from hpolib.benchmarks.ml.surrogate_cnn import SurrogateCNN
from hpolib.benchmarks.ml.surrogate_fcnet import SurrogateFCNet
run_id = int(sys.argv[1])
benchmark = sys.argv[2]
n_iters = 50
n_init = 2
output_path = "./experiments/RoBO/surrogates"
if benchmark == "svm_mnist":
b = SurrogateSVM(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "cnn_cifar10":
b = SurrogateCNN(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "fcnet_mnist":
b = SurrogateFCNet(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
info = b.get_meta_information()
X = []
y = []
def wrapper(x, user_data):
X.append(x.tolist())
y_ = b.objective_function(x)['function_value']
y.append(y_)
return y_, 0
# Dimension and bounds of the function
bounds = b.get_meta_information()['bounds']
dimensions = len(bounds)
lower = np.array([i[0] for i in bounds])
upper = np.array([i[1] for i in bounds])
start_point = (upper-lower)/2
x, _, _ = DIRECT.solve(wrapper,
l=[lower],
u=[upper],
maxT=n_iters*2,
maxf=n_iters)
X = X[:n_iters]
y = y[:n_iters]
fvals = np.array(y)
incs = []
incumbent_val = []
curr_inc_val = sys.float_info.max
inc = None
for i, f in enumerate(fvals):
if curr_inc_val > f:
curr_inc_val = f
inc = X[i]
incumbent_val.append(curr_inc_val)
incs.append(inc)
# Offline Evaluation
test_error = []
runtime = []
cum_cost = 0
results = dict()
for i, inc in enumerate(incs):
y = b.objective_function_test(np.array(inc))["function_value"]
test_error.append(y)
# Compute the time it would have taken to evaluate this configuration
c = b.objective_function(np.array(X[i]))["cost"]
cum_cost += c
runtime.append(cum_cost)
# Estimate the runtime as the optimization overhead + estimated cost
results["runtime"] = runtime
results["test_error"] = test_error
results["method"] = "direct"
results["benchmark"] = benchmark
results["run_id"] = run_id
results["incumbents"] = incs
results["incumbent_values"] = incumbent_val
results["X"] = X
results["y"] = y
p = os.path.join(output_path, benchmark, "direct")
os.makedirs(p, exist_ok=True)
fh = open(os.path.join(p, '%s_run_%d.json' % (benchmark, run_id)), 'w')
json.dump(results, fh)
| 24.078431
| 74
| 0.678339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 563
| 0.229235
|
c6eb612c8a8c4eac0f2f977fa8c04f601c65f1a7
| 1,197
|
py
|
Python
|
calls/delete_call_feedback_summary.py
|
mickstevens/python3-twilio-sdkv6-examples
|
aac0403533b35fec4e8483de18d8fde2d783cfb2
|
[
"MIT"
] | 1
|
2018-11-23T20:11:27.000Z
|
2018-11-23T20:11:27.000Z
|
calls/delete_call_feedback_summary.py
|
mickstevens/python3-twilio-sdkv6-examples
|
aac0403533b35fec4e8483de18d8fde2d783cfb2
|
[
"MIT"
] | null | null | null |
calls/delete_call_feedback_summary.py
|
mickstevens/python3-twilio-sdkv6-examples
|
aac0403533b35fec4e8483de18d8fde2d783cfb2
|
[
"MIT"
] | null | null | null |
# *** Delete Call Feedback Summary ***
# Code based on https://www.twilio.com/docs/voice/api/call-quality-feedback
# Download Python 3 from https://www.python.org/downloads/
# Download the Twilio helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
# from datetime import datetime | not required for this example
import logging
#write requests & responses from Twilio to log file, useful for debugging:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/usr/local/twilio/python3/sdkv6x/calls/logs/call_feedback.log',
filemode='a')
# Your Account Sid and Auth Token from twilio.com/console & stored in Mac OS ~/.bash_profile in this example
account_sid = os.environ.get('$TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('$TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
# A list of call feedback summary parameters & their permissable values, comment out (#) those lines not required:
# FSe6b77c80b547957f8ab7329b5c0b556c
client.calls \
.feedback_summaries("FSxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") \
.delete()
| 44.333333
| 114
| 0.734336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 833
| 0.695906
|
c6f0d37f8bd7df7e6ea000ba0009d2402adc88b8
| 8,523
|
py
|
Python
|
z42/z42/web/boot/css_js.py
|
jumploop/collection_python
|
f66f18dc5ae50fce95679e0f4aee5e28b2543432
|
[
"MIT"
] | null | null | null |
z42/z42/web/boot/css_js.py
|
jumploop/collection_python
|
f66f18dc5ae50fce95679e0f4aee5e28b2543432
|
[
"MIT"
] | null | null | null |
z42/z42/web/boot/css_js.py
|
jumploop/collection_python
|
f66f18dc5ae50fce95679e0f4aee5e28b2543432
|
[
"MIT"
] | null | null | null |
# coding:utf-8
import _env
from os.path import join, dirname, abspath, exists, splitext
from os import walk, mkdir, remove, makedirs
from collections import defaultdict
from hashlib import md5
from glob import glob
from base64 import urlsafe_b64encode
import envoy
import os
from tempfile import mktemp
from json import dumps
from z42.web.lib.qbox.uploader import QINIU
import re
from z42.config import QINIU as _QINIU, DEBUG
from extract import extract_map
def css_remove_background_url(path, css):
dirpath = dirname(path[len(_env.PREFIX):])
def _(img_url):
if 'data:image' in img_url or img_url.strip('\'")').endswith('.css'):
return img_url
img_url = img_url.replace("'", '').replace('"', '')
img_url = img_url[4:-1].strip()
if not (img_url.startswith('https://') or img_url.startswith('http://')):
if not img_url.startswith('/'):
img_url = join(dirpath, img_url)
if img_url in CSS_IMG2URL:
print img_url, CSS_IMG2URL[img_url]
img_url = CSS_IMG2URL[img_url]
elif img_url.startswith('//'):
pass
elif not exists(join(BULID, img_url)):
raise Exception('css error : %s\n%s not exist' % (path, img_url))
return 'url(%s)' % img_url
css = extract_map('url(', ')', css, _)
css = extract_map("url(\"", "\")", css, _)
css = extract_map("url('", "')", css, _)
return css
# for k, v in CSS_IMG2URL.iteritems():
# txt = txt.replace(k, v)
BULID = '/tmp/%s'%_QINIU.HOST
BULID_EXIST = set(glob(BULID + '/*'))
PATH2HASH = {}
if not exists(BULID):
mkdir(BULID)
os.chmod(BULID, 0777)
#with open(join(_env.PREFIX, 'js/_lib/google_analytics.js'), 'w') as google_analytics:
# google_analytics.write(
# """_gaq=[['_setAccount', '%s'],['_trackPageview']];""" % GOOGLE_ANALYTICS)
CSS_IMG2URL = {}
def dirwalk(dirname):
base = join(_env.PREFIX, dirname)
merge = []
file = []
suffix = '.%s' % dirname
for dirpath, dirnames, filenames in walk(base, followlinks=True):
for i in filenames:
path = abspath(join(dirpath, i))
if i == 'merge.conf':
merge.append((path, merge_conf(path, base)))
if i.endswith(suffix):
file.append(path)
elif dirname == 'css':
filesuffix = splitext(path)[-1][1:]
if filesuffix not in ('py', 'pyc', 'orig', 'swp', 'conf', 'txt', 'rst', 'html'):
url = path[len(_env.PREFIX):]
with open(path, 'rb') as infile:
filemd5 = urlsafe_b64encode(
md5(infile.read()).digest()).rstrip('=')
CSS_IMG2URL[url] = filemd5
cache_path = join(BULID, filemd5)
if not exists(cache_path) and not DEBUG:
print 'upload %s > //%s/%s'% (url, _QINIU.HOST, filemd5)
r = QINIU.upload( filemd5, path)
_hash = r.get('hash', None)
if _hash:
with open(cache_path, 'w') as c:
c.write(_hash)
else:
print r
return file, merge
def merge_conf(file, base):
ft = defaultdict(list)
p = None
dirpath = dirname(file)
with open(file) as f:
for line in f:
line = line.strip()
if line.startswith('#') or not line:
continue
if line[0] == '/':
path = base + line
else:
path = join(dirpath, line)
if line.endswith(':'):
p = path[:-1].strip()
elif line and p:
ft[p].append(path)
return ft
#@import url(ctrl/main.css);
#@import url(ctrl/zsite.css);
#@import url(ctrl/feed.css);
def merge_css(src_list):
result = []
for i in src_list:
result.append("""@import url(/css%s);""" % (i[len(_env.PREFIX) + 4:]))
return result
def merge_js(src_list):
result = [
'''function LOAD(js){ document.write('<script src="'+js+'"></'+"script>") }'''
]
for i in src_list:
result.append("""LOAD('/js%s')""" % (
i[len(_env.PREFIX) + 3:]))
return result
def run(suffix):
file_list, merge_list = dirwalk(suffix)
file_set = set(file_list)
to_merge = defaultdict(list)
for merge_conf, merge in merge_list:
for to_file, src_list in merge.iteritems():
if to_file in file_set:
file_set.remove(to_file)
for i in src_list:
if exists(i):
to_merge[to_file].append(i)
else:
print merge_conf, 'ERROR'
print '\t', i, 'NOT EXIST'
if suffix == 'css':
merger = merge_css
cmd = 'java -jar %s --charset=utf-8 --type css -o %%s %%s' % join(
_env.PREFIX, 'static/yuicompressor.jar')
else:
merger = merge_js
#cmd = 'uglifyjs -b -o %s %s '
cmd = 'uglifyjs -c -o %s %s '
for i in file_set:
base = join(_env.PREFIX, suffix)
with open(i) as infile:
hash = hash_name(infile.read(), i)
path = join(BULID, hash) + '.' + suffix
if path not in BULID_EXIST:
envoy_run(hash, cmd, path, i)
for to_file, src_list, in to_merge.iteritems():
dirpath = dirname(to_file)
if not exists(dirpath):
makedirs(dirpath)
r = merger(src_list)
with open(to_file, 'w') as to:
r = '\n'.join(r)
to.write(r)
r = []
for i in src_list:
build = join(BULID, PATH2HASH[i] + '.' + suffix)
if exists(build):
with open(build) as t:
r.append(t.read())
r = '\n'.join(r)
hash = hash_name(r, to_file)
path = join(BULID, hash) + '.' + suffix
# print path
if path not in BULID_EXIST:
tmp = mktemp()
with open(tmp, 'w') as f:
f.write(r)
envoy_run(hash, cmd, path, tmp)
def envoy_run(hash, cmd, path, tmp):
if DEBUG:
return
if exists(path):
return
t = cmd % (path, tmp)
print t
envoy.run(t)
suffix = path.rsplit('.', 1)[-1]
if suffix == 'css':
content_type = 'text/css'
elif suffix == 'js':
content_type = 'application/javascript'
path = '%s/%s.%s' % (BULID, hash, suffix)
if suffix == 'css':
with open(path) as css:
txt = css.read()
remove(path)
txt = css_remove_background_url(tmp, txt)
with open(path, 'w') as css:
css.write(txt)
QINIU.upload(hash, path, content_type)
def hash_name(content, path):
hash = urlsafe_b64encode(md5(content).digest()).rstrip('=')
PATH2HASH[path] = hash
return hash
run('css')
run('js')
# for i in BULID_EXIST - set(BULID + '/' + i for i in PATH2HASH.itervalues()):
# if i.endswith('.css') or i.endswith('.js'):
# print 'remove', i
# remove(i)
init = defaultdict(list)
for file_name, hash in PATH2HASH.iteritems():
dirname, file_name = file_name[len(_env.PREFIX) + 1:].split('/', 1)
init[dirname].append((file_name.rsplit('.', 1)[0], hash))
for suffix, flist in init.iteritems():
with open(join(_env.PREFIX, suffix, '_hash_.py'), 'w') as h:
h.write("""#coding:utf-8\n
import _env
__HASH__ = {
""")
for name, hash in flist:
h.write(
""" "%s" : '%s', #%s\n""" % (
name,
hash,
name.rsplit('.', 1)[0].replace(
'.', '_').replace('-', '_').replace('/', '_')
)
)
h.write('}')
h.write("""
from z42.config import DEBUG, HOST, QINIU
from os.path import dirname,basename,abspath
__vars__ = vars()
def _():
for file_name, hash in __HASH__.iteritems():
if DEBUG:
suffix = basename(dirname(__file__))
value = "/%s/%s.%s"%(suffix, file_name, suffix)
else:
value = "//%s/%s"%(QINIU.HOST, hash)
name = file_name.replace('.', '_').replace('-', '_').replace('/', '_')
__vars__[name] = value
_()
del __vars__["_"]
""")
| 30.010563
| 96
| 0.516837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,804
| 0.211663
|
c6f1e3f027d95fbea317bf8aa4166e874befc948
| 5,693
|
py
|
Python
|
controllers/transactions_controller.py
|
JeremyCodeClan/spentrack_project
|
455074446b5b335ea77933c80c43745fcad1171c
|
[
"MIT"
] | null | null | null |
controllers/transactions_controller.py
|
JeremyCodeClan/spentrack_project
|
455074446b5b335ea77933c80c43745fcad1171c
|
[
"MIT"
] | null | null | null |
controllers/transactions_controller.py
|
JeremyCodeClan/spentrack_project
|
455074446b5b335ea77933c80c43745fcad1171c
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, Flask, render_template, request, redirect
from models.transaction import Transaction
import repositories.transaction_repository as transaction_repo
import repositories.merchant_repository as merchant_repo
import repositories.tag_repository as tag_repo
transactions_blueprint = Blueprint("transactions", __name__)
@transactions_blueprint.route("/jeremy_e51")
def transactions():
order = 'order_date_desc'
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
@transactions_blueprint.route("/jeremy_e51/new")
def new():
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/new.html",
transactions = transactions, total = total, login = 1, new_cancel = 1
)
@transactions_blueprint.route("/jeremy_e51/<id>")
def transaction_show(id):
order = 'order_date_desc'
show_one = transaction_repo.select(id)
merchant = None
tag = None
if show_one.merchant: merchant = merchant_repo.select(show_one.merchant)
if show_one.tag: tag = tag_repo.select(show_one.tag)
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/show.html",
transactions = transactions, show_one = show_one, merchant = merchant, tag = tag, total = total, login = 1, order = order
)
@transactions_blueprint.route("/jeremy_e51", methods=['POST'])
def add_transaction():
name = request.form['name']
description = request.form['description']
amount = request.form['amount']
date = request.form['date']
transaction = Transaction(name, description, amount, date)
transaction_repo.save(transaction)
return redirect('/jeremy_e51')
@transactions_blueprint.route("/jeremy_e51/<id>/edit")
def edit_transaction(id):
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
merchants = merchant_repo.select_all()
tags = tag_repo.select_all()
return render_template(
'transactions/edit.html',
transactions = transactions, merchants = merchants, tags = tags, id = int(id), total = total, login = 1
)
@transactions_blueprint.route("/jeremy_e51/<id>", methods=['POST'])
def update_transaction(id):
transaction = transaction_repo.select(id)
if "tag_id" in request.form:
if request.form["tag_id"] != "None":
tag_id = request.form["tag_id"]
tag = tag_repo.select(tag_id)
transaction.tag = tag
if "merchant_id" in request.form:
if request.form["merchant_id"] != "None":
merchant_id = request.form["merchant_id"]
merchant = merchant_repo.select(merchant_id)
transaction.merchant = merchant
transaction_repo.update(transaction)
return redirect('/jeremy_e51')
@transactions_blueprint.route("/jeremy_e51/order")
def transactions_by_order():
order_date = request.args['order_date']
order_amount = request.args['order_amount']
order_name = request.args['order_name']
if order_date:
if order_date == 'desc':
order = 'order_date_desc'
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_date == 'asc':
order = 'order_date_asc'
transactions = transaction_repo.select_all_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_amount:
if order_amount == 'desc':
order = 'order_amount_desc'
transactions = transaction_repo.order_by_price_desc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_amount == 'asc':
order = 'order_amount_asc'
transactions = transaction_repo.order_by_price_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_name:
if order_name == 'desc':
order = 'order_name_desc'
transactions = transaction_repo.order_by_name_desc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_name == 'asc':
order = 'order_name_asc'
transactions = transaction_repo.order_by_name_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
return redirect('/jeremy_e51')
| 40.664286
| 129
| 0.657298
| 0
| 0
| 0
| 0
| 5,336
| 0.937291
| 0
| 0
| 748
| 0.131389
|
c6f1fc0edc1a1464fe8ec814304b412c4369a1d8
| 86,261
|
py
|
Python
|
Welcomer 6.20/modules/core.py
|
TheRockettek/Welcomer
|
60706b4d6eec7d4f2500b3acc37530e42d846532
|
[
"MIT"
] | 12
|
2019-09-10T21:31:51.000Z
|
2022-01-21T14:31:05.000Z
|
Welcomer 6.20/modules/core.py
|
TheRockettek/Welcomer
|
60706b4d6eec7d4f2500b3acc37530e42d846532
|
[
"MIT"
] | null | null | null |
Welcomer 6.20/modules/core.py
|
TheRockettek/Welcomer
|
60706b4d6eec7d4f2500b3acc37530e42d846532
|
[
"MIT"
] | 1
|
2021-09-17T09:03:54.000Z
|
2021-09-17T09:03:54.000Z
|
import asyncio
import copy
import csv
import io
import math
from math import inf
import os
import sys
import time
import traceback
import logging
from importlib import reload
from datetime import datetime
import logging
import aiohttp
import discord
import requests
import json
import ujson
from discord.ext import commands
from rockutils import rockutils
import uuid
import handling
def canint(val):
try:
int(val)
return True
except BaseException:
return False
class NoPermission(Exception):
pass
class NoDonator(Exception):
pass
class WelcomerCore(commands.Cog):
def __init__(self, bot):
self.bot = bot
def maketimestamp(
self,
timestamp=0,
lang=[
"second",
"minute",
"hour",
"day",
"and",
"ago",
"year"],
allow_secs=False,
include_ago=True):
if not timestamp:
timestamp = 0
_y, _d, _h, _m, _s = rockutils.parse_unix(
datetime.utcnow().timestamp() - timestamp)
# message = ""
# if _y > 0:
# message += f"{str(_y)} {lang[6]}{'s' if _y > 1 else ''} "
# if _d > 0:
# if _h < 0:
# message += f"{lang[4]} "
# elif len(message) > 1:
# message += ", "
# message += f"{str(_d)} {lang[3]}{'s' if _d > 1 else ''} "
# if _h > 0:
# if _m < 0:
# message += f"{lang[4]} "
# elif len(message) > 1:
# message += ", "
# message += f"{str(_h)} {lang[2]}{'s' if _h > 1 else ''} "
# # if we dont allow seconds, round the minutes up
# if not allow_secs and _s > 0:
# _m += 1
# if _m > 0:
# if _h > 0 or _d > 0:
# message += f"{lang[4]} "
# message += f"{str(_m)} {lang[1]}{'s' if _m > 1 else ''} "
# if allow_secs:
# if _h > 0 or _d > 0 or _m > 0:
# message += f"{lang[4]} "
# message += f"{str(_s)} {lang[0]}{'s' if _s > 1 else ''} "
# if include_ago:
# message += lang[5]
# return message
message = ""
if _y > 0:
message += f"{_y} year{'s' if _y != 1 else ''}"
if _d > 0:
if _h < 0:
message += " and "
elif len(message) > 1:
message += ", "
message += f"{_d} day{'s' if _d != 1 else ''}"
if _h > 0:
if _m < 0:
message += " and "
elif len(message) > 1:
message += ", "
message += f"{_h} hour{'s' if _h != 1 else ''}"
if _m > 0:
if _s < 0 if allow_secs else (_h > 0 or _d > 0):
message += " and "
elif len(message) > 1:
message += ", "
message += f"{_m} minute{'s' if _m != 1 else ''}"
if allow_secs:
if _h > 0 or _d > 0 or _m > 0:
message += " and "
message += f"{_s} second{'s' if _s != 1 else ''}"
if include_ago:
message += " ago"
return message
async def get_value(self, table, key, default=None):
# print("FETCH", table, key)
async with self.bot.connection.acquire() as connection:
value = await connection.fetchrow(
f"SELECT * FROM {table} WHERE id = $1",
key
)
if value:
print("FETCH", table, key, "OK")
try:
return ujson.loads(value["value"])
except ValueError:
return json.loads(value["value"])
else:
print("FETCH", table, key, "FAIL")
return default
async def set_value(self, table, key, value):
if key is None:
key = str(uuid.uuid4())
print("SET", table, key)
try:
async with self.bot.connection.acquire() as connection:
await connection.execute(
f"INSERT INTO {table}(id, value) VALUES($1, $2) ON CONFLICT (id) DO UPDATE SET value = $2",
key, ujson.dumps(value)
)
except Exception as e:
print("Failed to set value", table, ":", key, e)
# return False
else:
# return True
return {
"generated_keys": [key],
"inserted": 1
}
async def get_guild_info(self, id, refer="", reload_data=True, create_cache=True, direct=False, request_invites=True):
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Getting information for G:{id}",
# prefix="Guild Info:Get",
# prefix_colour="light green")
guild_info = await self.get_value("guilds", str(id))
# guild_info = await r.table("guilds").get(str(id)).run(self.bot.connection)
if not direct:
new_data = True if not isinstance(
guild_info, dict) else not bool(guild_info)
has_updated = True if new_data else False
guild = self.bot.get_guild(int(id))
_guild_info = self.bot.serialiser.guild(guild)
_time = time.time()
default_data = copy.deepcopy(self.bot.default_guild)
latest_version = default_data['d']['dv']
if new_data and guild:
# try:
# old_info = await r.db("welcomer5").table("guilds").get(str(id)).run(self.bot.connection)
# if old_info:
# default_data['a']['e'] = old_info['analytics']['enabled']
# default_data['ar']['e'] = old_info['autorole']['enabled']
# default_data['ar']['r'] = list(
# map(str, old_info['autorole']['role_ids']))
# for donation in old_info['donations']:
# default_data['d']['de'].append(donation['id'])
# default_data['d']['b']['hb'] = True
# default_data['l']['e'] = old_info['leaver']['enabled']
# if isinstance(old_info['leaver']['channel'], str):
# default_data['l']['c'] = old_info['leaver']['channel']
# default_data['l']['t'] = old_info['leaver']['text']
# if "prefix" in old_info:
# default_data['d']['b']['p'] = old_info['prefix']
# default_data['r']['e'] = old_info['rules']['enabled']
# default_data['r']['r'] = old_info['rules']['rules']
# default_data['d']['b']['ai'] = old_info['settings']['allow_invite']
# default_data['d']['b']['d'] = old_info['settings']['description']
# default_data['d']['b']['ss'] = old_info['settings']['show_staff']
# default_data['st']['ap'] = old_info['staff']['allow_ping']
# for staff_id, allow_ping in old_info['staff']['staff_ids'].items():
# default_data['st']['u'].append(
# [staff_id, allow_ping])
# # for channel_id, stat in old_info['stats']['channels']:
# # stats = {}
# # stats['c'] = channel_id
# # stats['t'] = stat['type']
# # stats['t'] = stat['text']
# # default_data['s']['c'].append(stat)
# default_data['s']['c'] = old_info['stats']['channels']
# if isinstance(old_info['stats']['enabled'], str):
# default_data['s']['e'] = old_info['stats']['enabled']
# default_data['s']['ca'] = old_info['stats']['category']
# default_data['tc']['e'] = old_info['tempchannels']['enabled']
# if isinstance(old_info['tempchannels']['category'], str):
# default_data['tc']['c'] = old_info['tempchannels']['category']
# default_data['tc']['ap'] = old_info['tempchannels']['autopurge']
# if isinstance(old_info['welcomer']['channel'], str):
# default_data['w']['c'] = old_info['welcomer']['channel']
# default_data['w']['e'] = old_info['welcomer']['enable_embed']
# default_data['w']['b'] = old_info['welcomer']['text']['badges']
# default_data['w']['iv'] = old_info['welcomer']['text']['invited']
# default_data['w']['i']['e'] = old_info['welcomer']['images']['enabled']
# default_data['w']['i']['bg'] = old_info['welcomer']['images']['background']
# # default_data['w']['i']['c']['bo'] = old_info['welcomer']['images']['colour']['border']
# # default_data['w']['i']['c']['b'] = old_info['welcomer']['images']['colour']['text']
# # default_data['w']['i']['c']['pb'] = old_info['welcomer']['images']['colour']['profile']
# default_data['w']['i']['m'] = old_info['welcomer']['images']['message']
# default_data['w']['t']['e'] = old_info['welcomer']['text']['enabled']
# default_data['w']['t']['m'] = old_info['welcomer']['text']['message']
# default_data['w']['dm']['e'] = old_info['welcomer']['dm']['enabled']
# default_data['w']['dm']['m'] = old_info['welcomer']['text']['message']
# if "namepurge" in old_info['welcomer']:
# default_data['np']['e'] = old_info['welcomer']['namepurge']['enabled']
# default_data['np']['f'] = list(map(lambda o: o.replace(
# "\n", ""), old_info['welcomer']['namepurge']['filter']))
# except BaseException:
# exc_info = sys.exc_info()
# traceback.print_exception(*exc_info)
guild_info = default_data
origional_guild_info = copy.deepcopy(guild_info)
guild_info['d']['b']['c'] = self.bot.cluster_id
guild_info['id'] = str(id)
if self.bot.donator:
guild_info['d']['b']['hd'] = True
elif guild:
if not guild.get_member(498519480985583636):
guild_info['d']['b']['hd'] = False
if guild:
if new_data:
guild_info['d']['g']['ga'] = math.ceil(_time)
guild_info['d']['g']['gc'] = math.ceil(
guild.created_at.timestamp())
if request_invites:
try:
guild_info['d']['i'] = await self.bot.serialiser.invites(guild)
except BaseException:
pass
guild_info['d']['g']['i'] = _guild_info['icons']
guild_info['d']['g']['ic'] = _guild_info['icon']
guild_info['d']['g']['n'] = _guild_info['name']
guild_info['d']['b']['r'] = _guild_info['region']
guild_info['d']['b']['sh'] = guild.shard_id
if guild.owner or guild.owner_id:
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
user = self.bot.get_user(owner_id)
if user:
guild_info['d']['g']['o'] = self.bot.serialiser.user(
user)
if _time - guild_info['d']['m']['u'] > 600:
guild_info['d']['m'] = {
"b": _guild_info['bots'],
"m": _guild_info['users'] - _guild_info['bots'],
"a": _guild_info['users'],
"u": _time
}
# if _time - guild_info['d']['d']['u'] > 600:
# _guild_detailed = self.bot.serialiser.guild_detailed(
# guild)
# guild_info['d']['d'] = {
# "s": _guild_detailed['streaming'],
# "o": _guild_detailed['online'],
# "i": _guild_detailed['idle'],
# "d": _guild_detailed['dnd'],
# "of": _guild_detailed['offline'],
# "u": _time
# }
if _time - guild_info['d']['c']['u'] > 600:
_channels = self.bot.serialiser.channels(guild)
guild_info['d']['c'] = {
"c": _channels['categories'],
"v": _channels['voice'],
"t": _channels['text'],
"u": _time
}
if "r" not in guild_info['d'] or (
_time - guild_info['d']['r']['u'] > 600):
_roles = self.bot.serialiser.roles(guild)
guild_info['d']['r'] = {
"r": _roles,
"u": _time
}
has_updated = True if guild_info != origional_guild_info else has_updated
if latest_version != guild_info['d']['dv']:
default_data.update(guild_info)
guild_info = default_data
_version = guild_info['d']['dv']
if _version == 0:
# example hardcoded data overwrite
pass
if "sw" not in guild_info['d']['b']:
guild_info['d']['b']['sw'] = True
guild_info['d']['dv'] = default_data['d']['dv']
has_updated = True
if not isinstance(guild_info['s']['c'], list):
print("Emptying channel list")
guild_info['s']['c'] = []
def normalize_colour(string):
if string.startswith("RGBA|"):
return string
elif string.startswith("RGB|"):
return string
else:
try:
_hex = str(hex(int(string)))[2:]
if len(_hex) >= 8:
return f"RGBA|{str(hex(string))[:8]}"
elif len(_hex) >= 6:
return f"RGB|{str(hex(string))[:6]}"
except BaseException:
pass
return f"RGB|FFFFFF"
keys = ['w.i.c.b', 'w.i.c.b', 'w.i.c.pb', 'w.i.c.ib']
for key in keys:
value = rockutils.getvalue(key, guild_info)
value = str(value)
if not value.startswith("R"):
newvalue = normalize_colour(value)
rockutils.setvalue(key, guild_info, newvalue)
# print("create cache", create_cache)
if create_cache:
guild = self.bot.get_guild(int(id))
if guild:
await self.create_guild_cache(guild_info, guild, force=True)
else:
rockutils.prefix_print(
f"Wanted to make cache for {id} but no guild object", prefix="createcache", prefix_colour="red", text_colour="light red")
create_cache = False
if has_updated or new_data:
if new_data:
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Creating information for G:{id}",
# prefix="Guild Info:Get",
# prefix_colour="light green")
# await r.table("guilds").insert(guild_info).run(self.bot.connection)
await self.set_value("guilds", guild_info["id"], guild_info)
else:
await self.update_guild_info(id, guild_info, refer="getguildinfo:" + (refer or "?"))
# print("create cache", create_cache)
if create_cache:
guild = self.bot.get_guild(int(id))
if guild:
await self.create_guild_cache(guild_info, guild, force=True)
else:
rockutils.prefix_print(
f"Wanted to make cache for {id} but no guild object", prefix="createcache", prefix_colour="red", text_colour="light red")
return guild_info
async def update_info(self, ctx, data):
guilddata = copy.copy(ctx.guildinfo)
if data:
if isinstance(data[0], list):
for key, value in data:
if rockutils.hasvalue(key, guilddata):
rockutils.setvalue(key, guilddata, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
else:
# Table not nested (only one key value pair)
key, value = data[0], data[1]
if rockutils.hasvalue(key, guilddata):
rockutils.setvalue(key, guilddata, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
await self.bot.create_guild_cache(guilddata, guild=ctx.guild, force=True)
return await self.update_guild_info(ctx.guild.id, guilddata, refer="updateinfo")
async def update_info_key(self, guildinfo, data, refer=""):
if isinstance(guildinfo, int):
guildinfo = await self.bot.get_guild_info(guildinfo, refer=f"Update Info Key:{refer}")
if len(data) > 0:
if isinstance(data[0], list):
# print(list(map(lambda o: o[0], data)))
for key, value in data:
if rockutils.hasvalue(key, guildinfo):
rockutils.setvalue(key, guildinfo, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
else:
# print(data[0])
# Table not nested (only one key value pair)
key, value = data[0], data[1]
if rockutils.hasvalue(key, guildinfo):
rockutils.setvalue(key, guildinfo, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
guild = self.bot.get_guild(int(guildinfo['id']))
await self.bot.create_guild_cache(guildinfo, guild=guild, force=True)
return await self.update_guild_info(guildinfo['id'], guildinfo, refer=f"Update Info Key:{refer}")
async def update_guild_info(self, id, data, forceupdate=False, refer=""):
try:
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating information for G:{id}",
# prefix="Guild Info:Update",
# prefix_colour="light green")
t = time.time()
res = await self.set_value("guilds", str(id), data)
# if forceupdate:
# res = await r.table("guilds").get(str(id)).update(data).run(self.bot.connection)
# else:
# res = await r.table("guilds").get(str(id)).replace(data).run(self.bot.connection)
te = time.time()
if te - t > 1:
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating guild info took {math.floor((te-t)*1000)}ms",
prefix="Guild Info:Update",
prefix_colour="red",
text_colour="light red")
return res
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Error occured whilst updating info for G:{id}. {e}",
prefix="Guild Info:Update",
prefix_colour="red",
text_colour="light red")
return False
async def get_user_info(self, id, refer="", reload_data=True, direct=False):
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Getting information for U:{id}",
# prefix="User Info:Get",
# prefix_colour="light green")
# user_info = await r.table("users").get(str(id)).run(self.bot.connection)
user_info = await self.get_value("users", str(id))
if not direct:
new_data = True if not isinstance(
user_info, dict) else not bool(user_info)
has_updated = True if new_data else False
user = self.bot.get_user(int(id))
_user_info = self.bot.serialiser.user(user)
_time = time.time()
default_data = copy.deepcopy(self.bot.default_user)
latest_version = default_data['g']['dv']
if new_data and user:
# try:
# old_info = await r.db("welcomer5").table("guilds").get(str(id)).run(self.bot.connection)
# if old_info:
# if (old_info['membership']['exte'] or
# old_info['membership']['plus'] or
# old_info['membership']['pro']):
# default_data['m']['5']['h'] = True
# default_data['m']['5']['p'] = (old_info['membership']['exte_patr'] or
# old_info['membership']['plus_patr'] or
# old_info['membership']['pro_patr'])
# default_data['m']['5']['u'] = max(
# old_info['membership']['exte_since'],
# old_info['membership']['plus_since'],
# old_info['membership']['pro_since']) + 2592000
# default_data['m']['p'] = old_info['membership']['partner']
# default_data['m']['s'] = list(
# map(lambda o: o['id'], old_info['membership']['servers']))
# default_data['r']['r'] = old_info['reputation']
# default_data['r']['l'] = old_info['last_reputation']
# default_data['g']['b']['pd'] = old_info['prefer_dms']
# except BaseException:
# exc_info = sys.exc_info()
# traceback.print_exception(*exc_info)
user_info = default_data
origional_user_info = copy.deepcopy(user_info)
user_info['id'] = str(id)
if user:
if new_data:
user_info['g']['g']['ua'] = math.ceil(_time)
user_info['g']['g']['uc'] = math.ceil(
user.created_at.timestamp())
if "avatar" in _user_info:
user_info['g']['g']['a'] = _user_info['avatar']
user_info['g']['g']['n'] = _user_info['name']
user_info['g']['g']['d'] = _user_info['discriminator']
user_info['g']['g']['u'] = _time
# if _time - user_info['g']['g']['m']['u'].get(
# self.bot.cluster_id, 0) > 900 and not user.bot:
# user_info['g']['g']['m']['c'][
# self.bot.cluster_id] = self.bot.serialiser.mutualguilds(user)
# user_info['g']['g']['m']['u'][self.bot.cluster_id] = _time
expired = []
renewed = []
changes = []
for membership_type, v in user_info['m'].items():
if isinstance(v, dict):
# print(_time, user_info['m'][membership_type]['u'])
# print(user_info['m'][membership_type]['u'])
if user_info['m'][membership_type]['h'] and user_info['m'][membership_type]['u'] and ((_time > user_info['m'][membership_type]['u'])):
user_info['m'][membership_type]['h'] = False
if user_info['m'][membership_type]['p']:
user_info['m'][membership_type]['h'] = True
user_info['m'][membership_type]['u'] = _time + 2592000
renewed.append("Welcomer x" + membership_type)
else:
expired.append("Welcomer x" + membership_type)
if len(expired) > 0 or len(renewed) > 0:
url = "https://[removed]"
await rockutils.send_webhook(url, f"User: `{id}` <@{id}> membership expired. Expired: `{expired}` Renewed: `{renewed}`")
message = rockutils._(
"Some of your memberships have expired and may have renewed if you have paid using patreon.\n\n__Expired memberships:__**\n{expired}**\n__Renewed memberships:__\n**{renewed}**\n\nYou are able to renew memberships automatically by donating with patreon. Find out more at **{url}**",
user_info).format(
expired=", ".join(expired),
renewed=", ".join(renewed),
url="https://welcomer.gg/donate")
try:
await user.send(message)
except BaseException:
pass
if not user.bot:
user_info['b'] = sorted(
self.bot.serialiser.badges(
user, user_info), key=lambda o: o[0])
has_updated = True if user_info != origional_user_info else has_updated
if latest_version != user_info['g']['dv']:
user_info = default_data.update(user_info)
_version = user_info['g']['dv']
if _version == 0:
# example hardcoded data overwrite
pass
user_info['g']['dv'] = default_data['g']['dv']
has_updated = True
if has_updated or new_data:
if new_data:
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Creating information for G:{id}",
prefix="User Info:Get",
prefix_colour="light green")
# await r.table("users").insert(user_info).run(self.bot.connection)
await self.set_value("users", user_info["id"], user_info)
else:
await self.update_user_info(id, user_info)
return user_info
async def update_user_info(self, id, data, forceupdate=False, refer=""):
try:
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating information for U:{id}",
# prefix="User Info:Update",
# prefix_colour="light green")
t = time.time()
await self.set_value("users", str(id), data)
# if forceupdate:
# await r.table("users").get(str(id)).update(data).run(self.bot.connection)
# else:
# await r.table("users").get(str(id)).replace(data).run(self.bot.connection)
te = time.time()
if te - t > 1:
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating guild info took {math.floor((te-t)*1000)}ms",
prefix="User Info:Update",
prefix_colour="red",
text_colour="light red")
return True
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Error occured whilst updating info for U:{id}. {e}",
prefix="User Info:Update",
prefix_colour="red",
text_colour="light red")
return False
@ commands.Cog.listener()
async def on_shard_connect(self, shard_id):
await self.push_ipc({"o": "SHARD_UPDATE", "d": [0, shard_id]})
@ commands.Cog.listener()
async def on_shard_ready(self, shard_id):
await self.push_ipc({"o": "SHARD_UPDATE", "d": [1, shard_id]})
@ commands.Cog.listener()
async def on_shard_resumed(self, shard_id):
await self.push_ipc({"o": "SHARD_UPDATE", "d": [4, shard_id]})
@ commands.Cog.listener()
async def on_connect(self):
if self.bot.ranonconnect:
return
self.bot.ranonconnect = True
rockutils.prefix_print("Bot is now connecting", prefix_colour="green")
await self.push_ipc({"o": "STATUS_UPDATE", "d": 0})
game = discord.Game("Getting Ready...")
await self.bot.change_presence(status=discord.Status.idle, activity=game)
@ commands.Cog.listener()
async def on_ready(self):
rockutils.prefix_print("Bot is fully ready", prefix_colour="green")
await self.push_ipc({"o": "STATUS_UPDATE", "d": 1})
game = discord.Game("welcomer.gg | +help")
await self.bot.change_presence(status=discord.Status.online, activity=game)
@ commands.Cog.listener()
async def on_resume(self):
rockutils.prefix_print("Bot is now resuming", prefix_colour="green")
await self.push_ipc({"o": "STATUS_UPDATE", "d": 4})
async def sync_task(self):
# ws = self.bot.ipc_ws
rockutils.prefix_print("Starting sync task", prefix="Sync Task")
while True:
try:
await self.sync_handle()
except asyncio.CancelledError:
raise asyncio.CancelledError
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"{type(e)} {str(e)}",
prefix="Sync Task",
prefix_colour="light red",
text_colour="red")
await asyncio.sleep(1)
async def sync_receiver(self):
ws = self.bot.ipc_ws
rockutils.prefix_print("Yielding sync receiver", prefix="Sync Handler")
while not self.bot.is_ready():
await asyncio.sleep(1)
rockutils.prefix_print("Starting sync receiver", prefix="Sync Handler")
while True:
try:
print("Waiting for json")
jobs = await ws.receive_json(loads=ujson.loads)
except ValueError:
pass
except asyncio.CancelledError:
raise asyncio.CancelledError
else:
if len(jobs) > 0:
try:
f = open("handling.py", "r")
file_content = f.read()
f.close()
compile(file_content + "\n", "handling.py", "exec")
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Could not update handling: {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
for job in jobs:
print(f"Running job {job} in task")
self.bot.loop.create_task(self.process_job(job))
async def process_job(self, job):
try:
opcode = job['o'].lower()
try:
args = ujson.loads(job['a'])
except BaseException:
args = job['a']
key = job['k']
if canint(args):
args = int(args)
if hasattr(handling, opcode):
try:
result = await asyncio.wait_for(getattr(handling, opcode)(self, opcode, args), timeout=60)
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
result = result = {
"success": False, "error": "Exception",
"exception": str(type(e))}
rockutils.prefix_print(
f"Could not process job. {opcode}:{args}. {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
else:
result = {
"success": False, "error": "InvalidOPCode"}
_payload = {
"o": "SUBMIT",
"k": key,
"r": self.bot.cluster_id,
"d": result
}
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/ipc_submit/{self.bot.cluster_id}/{self.bot.config['ipc']['auth_key']}"
async with aiohttp.ClientSession() as _session:
await _session.post(domain, json=_payload)
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Could not process jobs: {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
async def sync_send(self, _payload):
try:
_payload['o'] = _payload['o'].upper()
await self.bot.ipc_ws.send_json(_payload, dumps=ujson.dumps)
except asyncio.CancelledError:
raise asyncio.CancelledError
except OverflowError:
# If we have overflowed in a ping, and more than half the
# shards are broken, kill the bot.
if _payload["o"] == "SUBMIT" and "ping" in _payload["k"]:
total = round(len(_payload["d"]["latencies"])/2)
tinf = 0
for i in _payload["d"]["latencies"]:
if i[1] == inf:
tinf += 1
if tinf >= total:
self.bot.logout()
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Could not send payload. {_payload}. {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
async def sync_handle(self):
rockutils.prefix_print("Starting sync handler", prefix="Sync Handler")
try:
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/ipc/{self.bot.cluster_id}/{self.bot.config['ipc']['auth_key']}"
rockutils.prefix_print(f"Connecting to WS via {domain}")
session = aiohttp.ClientSession()
self.bot.ipc_ws = await session.ws_connect(domain)
rockutils.prefix_print(
"Connected to websocket",
prefix="Sync Handler")
self.bot.sync_receiver_task = self.bot.loop.create_task(
self.sync_receiver())
while True:
await asyncio.sleep(1)
if self.bot.sync_receiver_task.done():
rockutils.prefix_print(
"Closing sync", prefix="Sync Handler", text_colour="red")
try:
self.bot.sync_receiver_task.cancel()
except asyncio.CancelledError:
raise asyncio.CancelledError
except BaseException:
pass
await session.close()
return
except aiohttp.client_exceptions.ClientConnectionError:
await session.close()
rockutils.prefix_print(
"Encountered connection error with IPC",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
await asyncio.sleep(2)
except asyncio.CancelledError:
raise asyncio.CancelledError
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"{type(e)} {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
async def push_ipc(self, _payload):
if _payload.get("o", "") != "":
await self.bot.sync_send(_payload)
return True
else:
return False
async def has_guild_donated(self, guild, guild_info, donation=False,
partner=True):
if guild and isinstance(guild, discord.Guild):
_time = time.time()
if partner:
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
_userinfo = await self.bot.get_user_info(owner_id)
if _userinfo and _userinfo['m']['p']:
return True
for id in guild_info['d']['de']:
id = int(id)
try:
_user = self.bot.get_user(id)
if _user:
if await self.bot.has_special_permission(_user, support=True, developer=True, admin=True, trusted=True):
return True
_userinfo = await self.bot.get_user_info(id)
if _userinfo:
if donation:
if _userinfo['m']['1']['h'] and (
_time < (_userinfo['m']['1'].get('u', 0) or 0) or
_userinfo['m']['1']['p']):
return True
if _userinfo['m']['3']['h'] and (
_time < (_userinfo['m']['3'].get('u', 0) or 0) or
_userinfo['m']['3']['p']):
return True
if _userinfo['m']['5']['h'] and (
_time < (_userinfo['m']['5'].get('u', 0) or 0) or
_userinfo['m']['5']['p']):
return True
except BaseException:
pass
return False
async def has_special_permission(self, user, support=False,
developer=False, admin=False,
trusted=False):
_config = rockutils.load_json("cfg/config.json")
if _config != self.bot.config:
self.bot.config = copy.deepcopy(_config)
if user and type(user) in [discord.User, discord.Member]:
if support and user.id in _config['roles']['support']:
return True
if developer and user.id in _config['roles']['developer']:
return True
if admin and user.id in _config['roles']['admins']:
return True
if trusted and user.id in _config['roles']['trusted']:
return True
return False
async def walk_help(self, ctx, group):
message = ""
command_list = []
briefs = {}
for command in group.commands:
key = command.description.split('|')[0]
if key not in briefs:
briefs[key] = []
briefs[key].append(command)
for key, value in briefs.items():
_sorted = sorted(value, key=lambda o: o.name)
briefs[key] = _sorted
for key in sorted(briefs.keys()):
for command in briefs[key]:
command_list.append(command)
for command in command_list:
sub_message = f"**{command.full_parent_name} {command.name} {command.description.split('|')[0]}** | {command.description.split('|')[1]}\n"
if len(message) + len(sub_message) > 2048:
await self.bot.send_data(ctx, message, ctx.userinfo, title=f"{ctx.command.name[0].upper()}{ctx.command.name[1:].lower()} usage")
message = ""
message += sub_message
await self.bot.send_data(ctx, message, ctx.userinfo, title=f"{ctx.command.name[0].upper()}{ctx.command.name[1:].lower()} usage")
async def send_user_data(self, user, message,
title="", footer="", raw=False):
message_kwargs = {}
extra = ""
if raw:
message_kwargs['content'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
else:
embed_kwargs = {}
embed_kwargs['description'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
embed_kwargs['timestamp'] = datetime.utcfromtimestamp(
math.ceil(time.time()))
if title:
embed_kwargs['title'] = title
embed = discord.Embed(colour=3553599, **embed_kwargs)
embed.set_footer(text=footer)
message_kwargs['embed'] = embed
try:
await user.send(**message_kwargs)
except BaseException:
try:
await user.send(message[:2048])
except BaseException:
return
if len(extra) > 0:
return await self.send_user_data(user, message, title, footer, raw)
async def send_data(self, ctx, message, userinfo={}, prefer_dms=False,
force_guild=False, force_dm=False, alert=True,
title="", footer="", raw=False):
if force_dm and force_guild:
force_dm, force_guild = False, False
if userinfo.get("g"):
use_guild = not userinfo['g']['b']['pd']
if force_dm:
use_guild = False
if force_guild:
use_guild = True
if not getattr(ctx, "guild", False):
use_guild = False
message_kwargs = {}
extra = ""
if raw:
message_kwargs['content'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
else:
embed_kwargs = {}
embed_kwargs['description'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
embed_kwargs['timestamp'] = datetime.utcfromtimestamp(
math.ceil(time.time()))
if title:
embed_kwargs['title'] = title
embed = discord.Embed(colour=3553599, **embed_kwargs)
embed.set_footer(text=footer)
message_kwargs['embed'] = embed
if use_guild:
try:
await ctx.send(**message_kwargs)
except BaseException:
try:
await ctx.send(message[:2048])
except BaseException:
return
else:
try:
await ctx.author.send(**message_kwargs)
if alert and getattr(ctx, "guild", False):
try:
_message = rockutils._(
"Help has been sent to your direct messages", ctx)
await ctx.send(":mailbox_with_mail: | " + _message)
except BaseException:
pass
except BaseException:
try:
await ctx.send(**message_kwargs)
except BaseException:
try:
await ctx.send(message[:2048])
except BaseException:
return
if len(extra) > 0:
return await self.send_data(ctx, extra, userinfo, prefer_dms,
force_guild, force_dm, alert,
title, footer, raw)
def reload_data(self, filename, key=None):
if not key:
_, key = os.path.split(filename)
key = key[:key.find(".")]
if os.path.exists(filename):
data = rockutils.load_json(filename)
setattr(self.bot, key, data)
return True, key
else:
return False, key
def should_cache(self, guildinfo):
return guildinfo['a']['e'] or len(
guildinfo['rr']) > 0 or guildinfo['tr']['e'] or guildinfo['am'][
'e'] or guildinfo['s']['e']
async def create_guild_cache(self, guildinfo, guild=None, cache_filter=[],
force=False):
cached = False
force = True
if not guild:
guild = await self.bot.get_guild(int(guildinfo['id']))
_id = None
if guild:
_id = guild.id
else:
_id = int(guildinfo['id'])
if guildinfo and _id:
c = self.bot.cache
# print(f"Creating cache for {_id}")
if (_id not in c['prefix'] or force) and (
"prefix" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['prefix'][_id] = guildinfo['d']['b']['p']
if (_id not in c['guilddetails'] or force) and (
"guilddetails" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['guilddetails'][_id] = guildinfo['d']['b']
if (_id not in c['rules'] or force) and (
"rules" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['rules'][_id] = guildinfo['r']
# if (_id not in c['channels'] or force) and (
# "channels" in cache_filter if len(cache_filter) > 0 else True):
# c['channels'][_id] = guildinfo['ch']
# if (_id not in c['serverlock'] or force) and (
# "serverlock" in cache_filter if len(cache_filter) > 0 else True):
# c['serverlock'][_id] = guildinfo['sl']
if (_id not in c['staff'] or force) and (
"staff" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['staff'][_id] = guildinfo['st']
if (_id not in c['tempchannel'] or force) and (
"tempchannel" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['tempchannel'][_id] = guildinfo['tc']
if (_id not in c['autorole'] or force) and (
"autorole" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['autorole'][_id] = guildinfo['ar']
# if (_id not in c['rolereact'] or force) and (
# "rolereact" in cache_filter if len(cache_filter) > 0 else True):
# c['rolereact'][_id] = guildinfo['rr']
if (_id not in c['leaver'] or force) and (
"leaver" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['leaver'][_id] = guildinfo['l']
if (_id not in c['freerole'] or force) and (
"freerole" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['freerole'][_id] = guildinfo['fr']
if (_id not in c['timeroles'] or force) and (
"timeroles" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['timeroles'][_id] = guildinfo['tr']
if (_id not in c['namepurge'] or force) and (
"namepurge" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['namepurge'][_id] = guildinfo['np']
if (_id not in c['welcomer'] or force) and (
"welcomer" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['welcomer'][_id] = guildinfo['w']
if (_id not in c['stats'] or force) and (
"stats" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['stats'][_id] = guildinfo['s']
if (_id not in c['automod'] or force) and (
"automod" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['automod'][_id] = guildinfo['am']
if (_id not in c['borderwall'] or force) and (
"borderwall" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['borderwall'][_id] = guildinfo['bw']
# if (_id not in c['customcommands'] or force) and (
# "customcommands" in cache_filter if len(cache_filter) > 0 else True):
# c['customcommands'][_id] = guildinfo['cc']
# if (_id not in c['music'] or force) and (
# "music" in cache_filter if len(cache_filter) > 0 else True):
# c['music'][_id] = guildinfo['m']
# if (_id not in c['polls'] or force) and (
# "polls" in cache_filter if len(cache_filter) > 0 else True):
# c['polls'][_id] = guildinfo['p']
# if (_id not in c['logging'] or force) and (
# "logging" in cache_filter if len(cache_filter) > 0 else True):
# c['logging'][_id] = guildinfo['lo']
if (_id not in c['moderation'] or force) and (
"moderation" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['moderation'][_id] = guildinfo['m']
if (_id not in c['activepunishments'] or force) and (
"activepunishments" in cache_filter if len(cache_filter) > 0 else True):
punishments = []
if os.path.exists(f"punishments/{_id}.csv"):
with open(f"punishments/{_id}.csv") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
if row[8].lower() == "false":
punishments.append({
"userid": int(row[0]),
"type": row[4],
"endtime": int(row[6]) + int(row[7])
})
self.bot.cache['activepunishments'][_id] = punishments
# "analytics",
else:
print(f"Skipped cache as missing arg")
return cached
async def has_elevation(self, guild, guildinfo, user):
if await self.bot.has_special_permission(user, developer=True):
return True
if hasattr(guild, "owner") or hasattr(guild, "owner_id"):
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
if owner_id == user.id:
return True
if guildinfo:
if guildinfo.get("st"):
for staff in guildinfo['st']['u']:
if str(user.id) == staff[0]:
return True
if guild:
member = guild.get_member(user.id)
if member and await self.bot.has_permission_node(member, ["manage_guild", "ban_members"]):
return True
return False
async def get_prefix(self, message, return_prefixes=False):
if message.guild:
if message.guild.id not in self.bot.cache['prefix']:
guild_info = await self.bot.get_guild_info(message.guild.id, refer="get_prefix")
self.bot.cache['prefix'][
message.guild.id] = guild_info['d']['b']['p'] or "+"
prefix = self.bot.cache['prefix'][message.guild.id]
else:
prefix = "+"
prefix = prefix
if type(prefix) != str:
print(message.guild.id, "does not have string prefix!!!",
type(prefix), prefix)
if return_prefixes:
return prefix
else:
return commands.when_mentioned_or(prefix)(self.bot, message)
async def has_permission_node(self, target, check_for=[], return_has=False):
permissions = discord.Permissions.all()
my_permissions = {}
for key in list(
node.upper() for node in dir(permissions) if isinstance(
getattr(
permissions,
node),
bool)):
my_permissions[key] = False
for role in target.roles:
for node in my_permissions:
if getattr(role.permissions, node.lower()):
my_permissions[node] = True
if len(check_for) > 0:
my_permissions = list(
node for node,
val in my_permissions.items() if val)
if "ADMINISTRATOR" in my_permissions:
return True
for node in check_for:
if node.upper() in my_permissions:
return True, my_permissions
return False
elif return_has:
return list(node for node, val in my_permissions.items() if val)
else:
return False
def get_emote(self, name, fallback=":grey_question:"):
if getattr(self.bot, "emotes", None) is None:
try:
data = rockutils.load_json("cfg/emotes.json")
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Failed to retrieve emotes.json: {e}",
prefix_colour="light red")
if not data:
guild = self.bot.get_guild(
self.bot.config['bot']['emote_server'])
if guild:
emotes = self.bot.serialiser.emotes(guild)
if emotes[0]:
emotelist = {}
for emote in emotes:
emotelist[emote['name']] = emote['str']
rockutils.save_json("cfg/emotes.json", emotelist)
else:
self.bot.blocking_broadcast(
"emotesdump", "*", args="", timeout=10)
while not os.path.exists("cfg/emotes.json"):
try:
data = rockutils.load_json("cfg/emotes.json")
except BaseException:
pass
setattr(self.bot, "emotes", emotelist)
else:
setattr(self.bot, "emotes", data)
# # sometimes will save it as a list with a table inside, precaution
# if type(self.bot.emotes) == list:
# setattr(self.bot, "emotes", self.bot.emotes[0])
return self.bot.emotes.get(name, fallback)
async def broadcast(self, opcode, recepients, args="", timeout=10):
payload = {
"op": opcode,
"args": ujson.dumps(args),
"recep": recepients,
"timeout": str(timeout),
}
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/job/{self.bot.config['ipc']['auth_key']}"
timeout = aiohttp.ClientTimeout(total=timeout + 2)
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(domain, headers=payload) as resp:
return await resp.json()
def blocking_broadcast(self, opcode, recepients, args="", timeout=10):
payload = {
"op": opcode,
"args": ujson.dumps(args),
"recep": recepients,
"timeout": str(timeout),
}
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/job/{self.bot.config['ipc']['auth_key']}"
timeout = timeout + 2
with requests.post(domain, headers=payload, timeout=timeout) as resp:
return resp.json()
@ commands.Cog.listener()
async def on_command_error(self, ctx, error):
# if isinstance(error, self.NoPermission):
# message = rockutils._("You do not have permission to use this command")
# return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
# if isinstance(error, self.NoDonator):
# message = rockutils._("This command is for donators only. Do +membership to find out more")
# return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, discord.ext.commands.NoPrivateMessage):
message = rockutils._(
"This command cannot be ran in a private message", ctx)
return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, (discord.ext.commands.UnexpectedQuoteError,
discord.ext.commands.InvalidEndOfQuotedStringError)):
message = rockutils._(
"Your message provided has an unexpected quotations and could not be executed", ctx)
return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, discord.ext.commands.BotMissingPermissions):
message = rockutils._(
"The bot is unable to run this command as it is missing permissions: {permissions}",
ctx).format(
permissions=",".join(map(lambda o: o.upper(), error.missing_perms)))
return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, discord.errors.Forbidden):
return
if isinstance(error, discord.ext.commands.CheckFailure):
return
_traceback = traceback.format_exception(
type(error), error, error.__traceback__)
_error = {
"name": str(error),
"type": str(type(error)),
"tb": _traceback,
"status": "not handled",
"occurance": str(datetime.now()),
"timestamp": str(time.time()),
"version": ctx.bot.version,
"gname": getattr(ctx.guild, "name", "Direct Message"),
"gid": str(getattr(ctx.guild, "id", "Direct Message")),
"aname": str(ctx.author),
"aid": str(ctx.author.id),
"mc": getattr(ctx.message, "content", ""),
"command": str(ctx.command),
"cog": str(getattr(ctx.command, "cog", ""))
}
try:
# response = await r.table("errors").insert(_error).run(self.bot.connection)
response = await self.set_value("errors", None, _error)
except BaseException:
response = {"inserted": 0}
if response['inserted'] > 0:
_id = response['generated_keys'][0]
embed = discord.Embed(
title="Uh oh, something bad just happened",
description=f"We tried executing your command but something very unexpected happened. Either a bug or a tiger escaped the zoo but im pretty sure it was a bug. I have alerted my higher ups that this has occured and it should be fixed soon. [Track Issue](https://welcomer.fun/errors/{_id})\n\n`{_error['name']}`")
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Uh oh, something bad just happened",
description=f"We tried executing your command but something extremely unexpected happened. I was unable to contact my higher ups at this moment in time and this could be very bad. Please head to the support server and give them my memo")
await ctx.send(embed=embed, file=discord.File(io.StringIO(ujson.dumps(_error)), "memo.json"))
@ commands.command(
name="help",
description="|Returns list of all commands with their usage and description")
async def custom_help(self, ctx, module=""):
message = ""
modules = dict()
modules['misc'] = []
is_developer = await ctx.bot.has_special_permission(ctx.author,
developer=True)
is_admin = await ctx.bot.has_special_permission(ctx.author,
developer=True,
admin=True)
is_support = await ctx.bot.has_special_permission(ctx.author,
developer=True,
admin=True,
support=True)
for command in self.bot.commands:
if isinstance(command, discord.ext.commands.core.Group):
if (
is_developer if "developer" in (
command.brief or "") else True) and (
is_support if "support" in (
command.brief or "") else True) and (
is_admin if "admin" in (
command.brief or "") else True):
modules[command.name.lower()] = command
else:
modules['misc'].append(command)
if module == "":
message = rockutils._(
"Please specify a module that you would like to look up",
ctx) + "\n\n"
for k in sorted(modules.keys()):
if k == "misc":
message += f"{self.bot.get_emote('dotshorizontal')} **MISC** - `Helpful commands for general use`\n"
c = self.bot.get_command(k)
if c:
message += f"{self.bot.get_emote(c.description.split('|')[0])} **{c.name.upper()}** - "
message += f"`{c.description.split('|')[1]}`\n"
return await self.send_data(ctx, message, ctx.userinfo,
prefer_dms=True, raw=False,
force_guild=False, force_dm=False,
alert=True)
if module != "":
if module.lower() in modules.keys():
modules = {
module.lower(): modules[module.lower()]
}
else:
message = rockutils._(
"Could not find a module with the name: **{modulename}**",
ctx).format(
modulename=module)
message += "\n\n" + rockutils._("Modules", ctx) + ":\n\n"
message += ", ".join(f"**{k}**" for k in modules.keys())
return await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
for cog, cog_obj in modules.items():
if cog.lower() in ['misc']:
message = ""
message += f"\n**{self.bot.get_emote('dotshorizontal')} MISC**\n\n"
for command in sorted(
cog_obj, key=lambda o: f"{o.full_parent_name} {o.name}"):
if len(command.description.split("|")) >= 2:
sub_message = f"**{command.full_parent_name} {command.name} {command.description.split('|')[0]}** | {command.description.split('|')[1]}\n"
else:
sub_message = f"**{command.full_parent_name} {command.name}** | {command.description}\n"
if len(message) + len(sub_message) > 2048:
await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
message = ""
message += sub_message
else:
message = ""
message += f"\n**{self.bot.get_emote(cog_obj.description.split('|')[0])} {cog.upper()}**\n\n"
for command in sorted(
cog_obj.commands,
key=lambda o: f"{o.full_parent_name} {o.name}"):
if len(command.description.split("|")) >= 2:
sub_message = f"**{command.full_parent_name} {command.name} {command.description.split('|')[0]}** | {command.description.split('|')[1]}\n"
else:
sub_message = f"**{command.full_parent_name} {command.name}** | {command.description}\n"
if len(message) + len(sub_message) > 2048:
await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
message = ""
sub_message = ""
message += sub_message
await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
async def chunk_guild(self, guild):
if guild.chunked:
return
a = time.time()
await guild.chunk(cache=True)
if math.ceil((time.time()-a)*1000) >= 10000:
await rockutils.send_webhook(
"https://discord.com/api/webhooks/8[removed]",
f"{'<@143090142360371200>' if math.ceil((time.time()-a)*1000) > 60000 else ''}Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms Shard: {self.bot.shard_id} Cluster: {self.bot.cluster_id}")
rockutils.prefix_print(
f"Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# try:
# a = time.time()
# since = self.bot.chunkcache.get(guild.id, 0)
# cond = self.bot.lockcache.get(guild.id)
# if not cond:
# self.bot.lockcache[guild.id] = asyncio.Condition()
# cond = self.bot.lockcache[guild.id]
# if type(since) != float:
# self.bot.chunkcache[guild.id] = 0
# since = 0
# if a-since > 60:
# rockutils.prefix_print(
# f"Chunking {guild.id}", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# self.bot.chunkcache[guild.id] = a
# await cond.acquire()
# await guild.chunk(cache=True)
# cond.notify_all()
# if math.ceil((time.time()-a)*1000) >= 1000:
# await rockutils.send_webhook(
# "https://discord.com/api/webhooks/[removed]",
# f"{'<@143090142360371200>' if math.ceil((time.time()-a)*1000) > 60000 else ''}Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms Shard: {self.bot.shard_id} Cluster: {self.bot.cluster_id}")
# rockutils.prefix_print(
# f"Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# elif cond:
# rockutils.prefix_print(
# f"Waiting for chunk lock on {guild.id}", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# await cond.wait()
# rockutils.prefix_print(
# f"Finished waiting for chunk lock for {guild.id}", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# # wait on lock
# except Exception as e:
# rockutils.prefix_print(
# f"Failed to chunk guild: {e.id}", prefix_colour="red", prefix="Core:ProcessMessage")
async def process_message(self, message):
prefixes = (await self.get_prefix(message, return_prefixes=True), f"<@{self.bot.user.id}>", f"<@!{self.bot.user.id}>")
if not message.content.startswith(prefixes):
return
ctx = await self.bot.get_context(message)
if ctx.command is None:
if ctx.guild.me in ctx.message.mentions:
message.content = f"{prefixes[0]}prefix"
ctx = await self.bot.get_context(message)
else:
return
if ctx.guild:
try:
await asyncio.wait_for(self.bot.chunk_guild(ctx.guild), timeout=10)
except asyncio.TimeoutError:
await rockutils.send_webhook(
"https://discord.com/api/webhooks/[removed]",
f"Failed to chunk guild `{ctx.guild}` ID: {ctx.guild.id} Shard: {self.bot.shard_id} Cluster: {self.bot.cluster_id}")
return await ctx.send(f"{self.bot.get_emote('alert')} | " + "I am having problems chunking this guild. Try again later. Keep getting this issue? Try the other bot: http://welcomer.gg/invitebot/fallback")
ctx.userinfo = await self.bot.get_user_info(ctx.author.id, refer="process_commands")
if isinstance(message.guild, discord.guild.Guild):
ctx.guildinfo = await self.bot.get_guild_info(ctx.guild.id, refer="process_commands")
else:
ctx.guildinfo = copy.deepcopy(
rockutils.load_json("cfg/default_guild.json"))
ctx.prefix = ctx.guildinfo['d']['b']['p']
rockutils.prefix_print(
ctx.message.content,
prefix=ctx.author.__str__())
# black and whitelist
if self.bot.donator:
if ctx.guild:
has_donated = await self.bot.has_guild_donated(ctx.guild, ctx.guildinfo, donation=True, partner=True)
if not has_donated:
if ctx.command.name not in [
'help', 'donate', 'prefix', 'membership']:
message = rockutils._(
"A membership is required to use the donator bot. You can find out more at **{website}** or by doing `{donatecommand}`. If you have donated, do `{membershipcommand}` to be able to manage servers you have a membership on".format(
website="https://welcomer.fun/donate",
donatecommand="+donate",
membershipcommand="+membership"))
try:
await ctx.send(
f"{self.bot.get_emote('cross')} | " + message)
except BaseException:
pass
elif ctx.guild:
if ctx.command.name not in [
'help', 'donate', 'prefix', 'membership']:
message = rockutils._(
"A membership is required to use the donator bot. You can find out more at **{website}** or by doing `{donatecommand}`. If you have donated, do `{membershipcommand}` to be able to manage servers you have a membership on".format(
website="https://welcomer.fun/donate",
donatecommand="+donate",
membershipcommand="+membership"))
try:
await ctx.send(
f"{self.bot.get_emote('cross')} | " + message)
except BaseException:
pass
else:
if ctx.guild and ctx.guild.get_member(
498519480985583636) and not self.bot.debug:
# If this is normal bot and sees donator welcomer, do not
# respond to messages
return
if self.bot.user == 330416853971107840 and ctx.guild.get_member(824435160593727518):
# Do not process commands if i am the main bot and see bcomer
return
await self.bot.invoke(ctx)
class DataSerialiser:
def __init__(self, bot):
self.bot = bot
# def guild_detailed(self, guild):
# detailed = {
# "streaming": 0,
# "online": 0,
# "idle": 0,
# "dnd": 0,
# "offline": 0,
# "bots": 0,
# "members": 0,
# }
# if guild and isinstance(guild, discord.Guild):
# for member in guild.members:
# detailed["bots" if member.bot else "members"] += 1
# if hasattr(member, "status"):
# detailed[str(member.status)] += 1
# if hasattr(member, "activities"):
# for activity in member.activities:
# if isinstance(
# activity, discord.Streaming):
# detailed['streaming'] += 1
# elif hasattr(member, "activity") and isinstance(member.activity, discord.Streaming):
# detailed['streaming'] += 1
# return detailed
def guild(self, guild):
guild_info = {}
if guild and isinstance(guild, discord.Guild):
guild_info = {
"name": guild.name,
"id": str(guild.id),
"owner": {
"id": "0",
"name": "?"
},
"region": str(guild.region),
"users": guild.member_count,
"bots": sum(1 for m in guild.members if m.bot),
"creation": guild.created_at.timestamp(),
"icon": str(guild.icon),
"icons": [
str(guild.icon_url_as(format="jpeg", size=64)),
str(guild.icon_url_as(format="png", size=256))
]
}
if guild.owner or guild.owner_id:
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
guild_info["owner"]["id"] = str(guild.owner_id)
guild_info["owner"]["name"] = str(guild.owner)
return guild_info
async def guildelevation(self, guild, guildinfo=None, member=None):
guildinfo = {}
if guild and isinstance(guild, discord.Guild):
guild_info = {
"name": guild.name,
"id": str(guild.id),
"owner": {
"id": str(getattr(guild.owner, "id", guild.owner_id)),
"name": str(guild.owner),
},
"users": guild.member_count,
"bots": sum(1 for m in guild.members if m.bot),
"icon": str(guild.icon),
"icons": [
str(guild.icon_url_as(format="jpeg", size=64)),
str(guild.icon_url_as(format="png", size=256))
]
}
if member and guildinfo:
member = guild.get_member(member.id)
if member:
guild_info['elevated'] = await self.bot.has_elevation(guild, guildinfo, member)
return guild_info
def roles(self, guild):
roles = []
for role in guild.roles:
roles.append({
"name": role.name,
"id": str(role.id),
"position": str(role.position),
"higher": role > guild.me.top_role,
})
return roles
def channels(self, guild):
channels = {
"categories": [],
"voice": [],
"text": []
}
if guild and isinstance(guild, discord.Guild):
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
channels['text'].append({
"name": channel.name,
"id": str(channel.id),
"position": channel.position,
"category": str(getattr(channel, "category_id")),
"topic": channel.topic,
"nsfw": channel.is_nsfw()
})
if isinstance(channel, discord.VoiceChannel):
channels['voice'].append({
"name": channel.name,
"id": str(channel.id),
"position": channel.position,
"category": str(getattr(channel, "category_id")),
"bitrate": channel.bitrate,
"user_limit": channel.user_limit
})
if isinstance(channel, discord.CategoryChannel):
channels['categories'].append({
"name": channel.name,
"id": str(channel.id),
"position": channel.position,
"nsfw": channel.is_nsfw()
})
return channels
def emotes(self, guild):
emotes = []
if guild and isinstance(guild, discord.Guild):
for emote in guild.emojis:
emotes.append({
"str": str(emote),
"id": str(emote.id),
"name": emote.name,
"gif": emote.animated,
"url": str(emote.url)
})
return emotes
async def invites(self, guild):
ginvites = []
if guild and isinstance(guild, discord.Guild):
try:
for invite in await guild.invites():
try:
ginvites.append(
{"code": invite.code, "created_at": math.ceil(
invite.created_at.timestamp()),
"temp": invite.temporary, "uses": invite.uses,
"max": invite.max_uses,
"inviter": str(invite.inviter.id)
if invite.inviter else "Unknown",
"inviter_str": str(invite.inviter)
if invite.inviter else "Unknown",
"channel": str(invite.channel.id),
"channel_str": str(invite.channel),
"duration": str(invite.max_age), })
except AttributeError as e:
print("Issue when handling invite", invite.code, "on guild", guild.id, e)
except Exception as e:
raise e
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Failed to retrieve invites: {e}",
prefix_colour="light red")
return []
return ginvites
def user(self, user):
userinfo = {}
if user and type(user) in [discord.User, discord.ClientUser]:
userinfo = {
"name": user.name,
"bot": user.bot,
"id": str(user.id),
"discriminator": user.discriminator,
"display": str(user.name),
"icon": str(user.avatar),
"creation": user.created_at.timestamp(),
"avatar": [
str(user.default_avatar_url),
str(user.avatar_url_as(format="jpeg", size=64)),
str(user.avatar_url_as(format="png", size=256))
]
}
return userinfo
def mutualguildsid(self, _id):
guilds = []
for guild in self.bot.guilds:
member = guild.get_member(_id)
if member.bot:
return []
if member:
guilds.append(self.guild(guild))
return guilds
def mutualguilds(self, user):
guilds = []
if user.bot:
return guilds
for guild in self.bot.guilds:
if guild.get_member(user.id):
guilds.append(self.guild(guild))
return guilds
def badges(self, user, userinfo):
_time = time.time()
badges = []
if (userinfo['m']['1']['h'] and (
_time < (userinfo['m']['1'].get('u', 0) or 0) or userinfo['m']['1']['p'])) or \
(userinfo['m']['3']['h'] and (
_time < (userinfo['m']['3'].get('u', 0) or 0) or userinfo['m']['3']['p'])) or \
(userinfo['m']['5']['h'] and (
_time < (userinfo['m']['5'].get('u', 0) or 0) or userinfo['m']['5']['p'])):
badges.append([
self.bot.get_emote("gift"),
"Donator",
"This user supports welcomer",
"202225"
])
if userinfo['m']['p']:
badges.append([
self.bot.get_emote("starbox"),
"Welcomer Partner",
"Currently a Welcomer partner",
"2D103F"
])
all_guilds = rockutils.merge_embeded_lists(
userinfo['g']['g']['m']['c'])
tops = {}
for guild in all_guilds:
if guild['owner']['id'] == str(user.id):
if guild['users'] > 250:
if not guild['id'] in tops:
tops[guild['id']] = guild
if guild['users'] > tops[guild['id']]['users']:
tops[guild['id']] = guild
for guild in tops.values():
badges.append([
self.bot.get_emote("packagevariantclosed"),
"Server Owner",
f"Owner of server with {guild['users']} members",
"202225"
])
if user.id in self.bot.config['roles']['support']:
badges.append([
self.bot.get_emote("gavel"),
"Welcomer Support",
"Official Welcomer support member",
"202225"
])
if user.id in self.bot.config['roles']['trusted']:
badges.append([
self.bot.get_emote("accountstar"),
"Trusted user",
"User that Welcomer recognises as trustworthy",
"202225"
])
if user.id in self.bot.config['roles']['admins']:
badges.append([
self.bot.get_emote("wrench"),
"Welcomer Administrator",
"Official Welcomer administrator",
"202225"
])
if user.id in self.bot.config['roles']['developer']:
badges.append([
self.bot.get_emote("cogs"),
"Welcomer Developer",
"These people made the bot :)",
"202225"
])
return badges
def setup(bot):
def existingdict(subject, key, data):
if not subject.get(key):
subject[key] = data
caches = [
"prefix",
"guilddetails",
"rules",
"analytics",
"channels",
"serverlock",
"staff",
"tempchannel",
"autorole",
"rolereact",
"leaver",
"freerole",
"timeroles",
"namepurge",
"welcomer",
"stats",
"automod",
"borderwall",
"customcommands",
"music",
"polls",
"logging",
"moderation",
"activepunishments"
]
for name in caches:
existingdict(bot.cache, name, {})
core = WelcomerCore(bot)
for key in dir(core):
if not ("on_" in key[:3] and key != "on_message_handle"):
value = getattr(core, key)
if callable(value) and "_" not in key[0]:
setattr(bot, key, value)
if not hasattr(bot, key):
print(f"I called set for {key} but its not set now")
bot.remove_command("help")
bot.add_cog(core)
if not hasattr(bot, "chunkcache"):
setattr(bot, "chunkcache", {})
if not hasattr(bot, "lockcache"):
setattr(bot, "lockcache", {})
setattr(bot, "ranonconnect", False)
setattr(bot, "cachemutex", False)
setattr(bot, "serialiser", DataSerialiser(bot))
setattr(bot, "emotes", rockutils.load_json("cfg/emotes.json"))
default_data = rockutils.load_json("cfg/default_user.json")
setattr(bot, "default_user", default_data)
default_data = rockutils.load_json("cfg/default_guild.json")
setattr(bot, "default_guild", default_data)
bot.reload_data("cfg/config.json", "config")
reload(handling)
| 42.222712
| 327
| 0.48417
| 83,971
| 0.973453
| 0
| 0
| 10,273
| 0.119092
| 69,497
| 0.80566
| 25,247
| 0.292682
|
c6f49b93679334772aa9bf531c4d72e0b150e6e1
| 1,225
|
py
|
Python
|
evalml/tests/data_checks_tests/test_utils.py
|
Mahesh1822/evalml
|
aa0ec2379aeba12bbd0dcaaa000f9a2a62064169
|
[
"BSD-3-Clause"
] | null | null | null |
evalml/tests/data_checks_tests/test_utils.py
|
Mahesh1822/evalml
|
aa0ec2379aeba12bbd0dcaaa000f9a2a62064169
|
[
"BSD-3-Clause"
] | 1
|
2022-02-19T12:59:09.000Z
|
2022-02-19T12:59:09.000Z
|
evalml/tests/data_checks_tests/test_utils.py
|
Mahesh1822/evalml
|
aa0ec2379aeba12bbd0dcaaa000f9a2a62064169
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from evalml.data_checks import DataCheckActionCode
from evalml.data_checks.utils import handle_data_check_action_code
from evalml.problem_types import ProblemTypes
def test_handle_action_code_errors():
with pytest.raises(KeyError, match="Action code 'dropping cols' does not"):
handle_data_check_action_code("dropping cols")
with pytest.raises(
ValueError,
match="`handle_data_check_action_code` was not passed a str or DataCheckActionCode object",
):
handle_data_check_action_code(None)
with pytest.raises(
ValueError,
match="`handle_data_check_action_code` was not passed a str or DataCheckActionCode object",
):
handle_data_check_action_code(ProblemTypes.BINARY)
@pytest.mark.parametrize(
"action_code_str,expected_enum",
[
("drop_rows", DataCheckActionCode.DROP_ROWS),
("Drop_col", DataCheckActionCode.DROP_COL),
("TRANSFORM_TARGET", DataCheckActionCode.TRANSFORM_TARGET),
(DataCheckActionCode.IMPUTE_COL, DataCheckActionCode.IMPUTE_COL),
],
)
def test_handle_action_code(action_code_str, expected_enum):
assert handle_data_check_action_code(action_code_str) == expected_enum
| 34.027778
| 99
| 0.755102
| 0
| 0
| 0
| 0
| 461
| 0.376327
| 0
| 0
| 291
| 0.237551
|
c6f503162b0ef4701efc6276ebdf2a288cdafb1f
| 3,480
|
py
|
Python
|
figures/bothspectra.py
|
DanielAndreasen/Paper-updated-nir-linelist
|
a4094a1d73a58c1ee1597c6df8a11b0b9ce17777
|
[
"MIT"
] | null | null | null |
figures/bothspectra.py
|
DanielAndreasen/Paper-updated-nir-linelist
|
a4094a1d73a58c1ee1597c6df8a11b0b9ce17777
|
[
"MIT"
] | null | null | null |
figures/bothspectra.py
|
DanielAndreasen/Paper-updated-nir-linelist
|
a4094a1d73a58c1ee1597c6df8a11b0b9ce17777
|
[
"MIT"
] | null | null | null |
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('paper', font_scale=1.7)
from plot_fits import get_wavelength, dopplerShift
from scipy.interpolate import interp1d
plt.rcParams['xtick.direction'] = 'in'
"""
Compare the spectrum of Arcturus with 10 Leo, plus have some Fe lines
identified.
"""
def get_ymin(center, d1, d2):
w1, f1 = d1
i1 = np.argmin(abs(w1-center))
v1 = f1[i1]
w2, f2 = d2
i2 = np.argmin(abs(w2-center))
v2 = f2[i2]
return min([v1])
if __name__ == '__main__':
regions = [[10000, 10100], [10130, 10230], [12200, 12300]]
lines = np.loadtxt('Felines.moog', usecols=(0,))
wArcturus = get_wavelength(fits.getheader('ArcturusSummer.fits'))
fArcturus = fits.getdata('ArcturusSummer.fits')
w10Leo1 = get_wavelength(fits.getheader('10LeoYJ.fits'))
f10Leo1 = fits.getdata('10LeoYJ.fits')
w10Leo2 = get_wavelength(fits.getheader('10LeoH.fits'))
f10Leo2 = fits.getdata('10LeoH.fits')
w10Leo3 = get_wavelength(fits.getheader('10LeoK.fits'))
f10Leo3 = fits.getdata('10LeoK.fits')
f10Leo1, w10Leo1 = dopplerShift(w10Leo1, f10Leo1, -82.53)
f10Leo2, w10Leo2 = dopplerShift(w10Leo2, f10Leo2, -81.82)
f10Leo3, w10Leo3 = dopplerShift(w10Leo3, f10Leo3, -81.37)
for i, region in enumerate(regions):
if i != 1:
continue
if (w10Leo1[0] <= region[0]) and (w10Leo1[-1] >= region[1]):
w10Leo = w10Leo1
f10Leo = f10Leo1
elif (w10Leo2[0] <= region[0]) and (w10Leo2[-1] >= region[1]):
w10Leo = w10Leo2
f10Leo = f10Leo2
elif (w10Leo3[0] <= region[0]) and (w10Leo3[-1] >= region[1]):
w10Leo = w10Leo3
f10Leo = f10Leo3
else:
continue
i1 = (region[0] <= wArcturus) & (wArcturus <= region[1])
i2 = (region[0] <= w10Leo) & (w10Leo <= region[1])
i3 = (region[0] <= lines) & (lines <= region[1])
w1, f1 = wArcturus[i1], fArcturus[i1]
w2, f2 = w10Leo[i2], f10Leo[i2]
plines = lines[i3]
w0 = w1[0] if w1[0] != min((w1[0], w2[0])) else w2[0]
wn = w1[-1] if w1[-1] != max((w1[-1], w2[-1])) else w2[-1]
interp1 = interp1d(w1, f1, kind='linear')
interp2 = interp1d(w2, f2, kind='linear')
w = np.linspace(w0, wn, len(w1))
f1 = interp1(w)
f2 = interp2(w)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.tick_params('y', labelcolor='w', left='off')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.plot(w, f1, label='Arcturus')
ax.plot(w, f2-0.15, label='10 Leo')
ax.plot(w, f1-f2+0.15, label='Difference')
for j, line in enumerate(plines):
if j%2 == 0:
dy = -0.02
else:
dy = 0.02
if j == 6:
dy = 0.02
elif j == 7:
dy = -0.02
ymin = get_ymin(line, (w1, f1), (w2, f2))
plt.vlines(line, ymin, 1.04+dy, linestyles='dashed')
plt.text(line, 1.04+dy, 'Fe')
ax.set_xlabel(r'Wavelength [$\AA$]')
ax.set_ylabel('Normalized flux')
y1, _ = plt.ylim()
plt.ylim(y1, 1.15)
plt.legend(loc='best', frameon=False)
plt.tight_layout()
# plt.savefig('bothspectra.pdf')
plt.show()
| 31.926606
| 70
| 0.561494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 439
| 0.126149
|
c6f5b57e9157f7c17bb6f3082af0b5d89d425e82
| 298
|
py
|
Python
|
main.py
|
pesikj/DataAnalysisUsingPython
|
00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a
|
[
"MIT"
] | null | null | null |
main.py
|
pesikj/DataAnalysisUsingPython
|
00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a
|
[
"MIT"
] | null | null | null |
main.py
|
pesikj/DataAnalysisUsingPython
|
00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a
|
[
"MIT"
] | null | null | null |
from statistical_hypothesis_testing.plots import plots_z_test
from statistical_hypothesis_testing.tails import Tail
#plots_z_test.create_critical_region_plot(alphas=[0.1, 0.05, 0.01], tails=Tail.RIGHT_TAILED)
plots_z_test.create_p_value_plot(0.5109,alpha=0.05,lang='cs', tails=Tail.RIGHT_TAILED)
| 42.571429
| 92
| 0.842282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.322148
|
c6f5b6dd280b07a2399dbf6e91ec39c3acaaae3c
| 3,471
|
py
|
Python
|
projects/migrations/0001_initial.py
|
Zefarak/illidius_plan
|
78dd9cc4da374ff88fc507e4870712d87e9ff6c3
|
[
"MIT"
] | 1
|
2019-02-18T14:31:57.000Z
|
2019-02-18T14:31:57.000Z
|
projects/migrations/0001_initial.py
|
Zefarak/illidius_plan
|
78dd9cc4da374ff88fc507e4870712d87e9ff6c3
|
[
"MIT"
] | null | null | null |
projects/migrations/0001_initial.py
|
Zefarak/illidius_plan
|
78dd9cc4da374ff88fc507e4870712d87e9ff6c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-21 04:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImageProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('alt', models.CharField(blank=True, max_length=60, null=True)),
('image', models.ImageField(upload_to='')),
('text', models.TextField(blank=True, null=True, verbose_name='Optional description.')),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ProjectCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=70)),
('title_eng', models.CharField(max_length=70)),
],
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('active_eng', models.BooleanField(default=True)),
('title', models.CharField(max_length=255)),
('short_description', models.CharField(help_text='The text appears on homepage', max_length=255)),
('description', models.TextField()),
('seo_description', models.CharField(blank=True, max_length=255, null=True)),
('seo_keywords', models.CharField(blank=True, max_length=255, null=True)),
('slug', models.SlugField(allow_unicode=True, blank=True, null=True)),
('title_eng', models.CharField(default='Insert Text', max_length=255)),
('short_description_eng', models.CharField(default='Insert Text', help_text='The text appears on homepage', max_length=255)),
('description_eng', models.TextField(default='Insert Text')),
('seo_description_eng', models.CharField(blank=True, default='Insert Text', max_length=255, null=True)),
('seo_keywords_eng', models.CharField(blank=True, default='Insert Text', max_length=255, null=True)),
('image', models.ImageField(upload_to='')),
('day_added', models.DateField(auto_now_add=True)),
('href', models.CharField(max_length=255)),
('demo', models.BooleanField(default=False)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.ProjectCategory')),
],
options={
'verbose_name_plural': 'Project',
},
managers=[
('my_query', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='imageproject',
name='project_related',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Projects'),
),
]
| 47.547945
| 141
| 0.589455
| 3,250
| 0.93633
| 0
| 0
| 0
| 0
| 0
| 0
| 679
| 0.195621
|
c6f6ce9055d1d8634c3084a055d492122c9b4918
| 1,818
|
py
|
Python
|
EnumLasso/paper/paper_thaliana.py
|
t-basa/LassoVariants
|
ead33ac83de19865a9553dbdda9a28aa5c781e44
|
[
"MIT"
] | 12
|
2016-11-30T04:39:18.000Z
|
2021-09-11T13:57:37.000Z
|
EnumLasso/paper/paper_thaliana.py
|
t-basa/LassoVariants
|
ead33ac83de19865a9553dbdda9a28aa5c781e44
|
[
"MIT"
] | 2
|
2018-03-05T19:01:09.000Z
|
2019-10-10T00:30:55.000Z
|
EnumLasso/paper/paper_thaliana.py
|
t-basa/LassoVariants
|
ead33ac83de19865a9553dbdda9a28aa5c781e44
|
[
"MIT"
] | 6
|
2017-08-19T17:49:51.000Z
|
2022-01-09T07:41:22.000Z
|
# -*- coding: utf-8 -*-
"""
@author: satohara
"""
import sys
sys.path.append('../')
import codecs
import numpy as np
import pandas as pd
from EnumerateLinearModel import EnumLasso
# data - x
fn = './data/call_method_32.b'
df = pd.read_csv(fn, sep=',', header=None)
data_id_x = np.array([int(v) for v in df.ix[1, 2:]])
gene_id = df.ix[2:, :1].values
gene_id = np.array([[int(v[0]), int(v[1])] for v in gene_id])
data = df.ix[2:, 2:].values
data[data=='-'] = 0
data[data=='A'] = 1
data[data=='T'] = 2
data[data=='G'] = 3
data[data=='C'] = 4
count = np.c_[np.sum(data == 1, axis=1), np.sum(data == 2, axis=1), np.sum(data == 3, axis=1), np.sum(data == 4, axis=1)]
c = np.argmax(count, axis=1) + 1
x = data.copy()
for i in range(data.shape[1]):
x[:, i] = 1 - (data[:, i] - c == 0)
# data - y
fn = './data/phenotype_published_raw.tsv'
with codecs.open(fn, 'r', 'Shift-JIS', 'ignore') as file:
df = pd.read_table(file, delimiter='\t')
y = df.ix[:, 41].values
# data - reordering, remove nan
idx = np.argsort(data_id_x)
x = x[:, idx]
idx = ~np.isnan(y)
x = x[:, idx].T
y = y[idx]
# data - training & test split
seed = 0
r = 0.8
np.random.seed(seed)
idx = np.random.permutation(x.shape[0])
m = int(np.round(x.shape[0] * r))
xte = x[idx[m:], :]
yte = y[idx[m:]]
x = x[idx[:m], :]
y = y[idx[:m]]
# EnumLasso
rho = 0.1
delta = 0.05
mdl = EnumLasso(rho=rho, warm_start=True, enumtype='k', k=50, delta=delta, save='paper_thaliana.npy', modeltype='regression', verbose=True)
mdl.fit(x, y)
print()
print('--- Enumerated Solutions ---')
print(mdl)
# evaluate
print('--- Mean Square Error / # of Non-zeros ---')
for i in range(len(mdl.obj_)):
a = mdl.a_[i]
b = mdl.b_[i]
z = xte.dot(a) + b
mse = np.mean((z - yte)**2)
print('Solution %3d: MSE = %f / NNZ = %d' % (i+1, mse, a.nonzero()[0].size))
| 24.90411
| 139
| 0.593509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 404
| 0.222222
|
c6f74625e459f6cfa2aca2f74b48bf8881d4641b
| 8,309
|
py
|
Python
|
lib/backup_service_client/models/bucket.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 14
|
2015-02-06T02:47:57.000Z
|
2020-03-14T15:06:05.000Z
|
lib/backup_service_client/models/bucket.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 3
|
2019-02-27T19:29:11.000Z
|
2021-06-02T02:14:27.000Z
|
lib/backup_service_client/models/bucket.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 155
|
2018-11-13T14:57:07.000Z
|
2022-03-28T11:53:22.000Z
|
# coding: utf-8
"""
Couchbase Backup Service API
This is REST API allows users to remotely schedule and run backups, restores and merges as well as to explore various archives for all there Couchbase Clusters. # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Bucket(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'size': 'int',
'items': 'int',
'mutations': 'int',
'tombstones': 'int',
'views_count': 'int',
'fts_count': 'int',
'index_count': 'int',
'analytics_count': 'int'
}
attribute_map = {
'name': 'name',
'size': 'size',
'items': 'items',
'mutations': 'mutations',
'tombstones': 'tombstones',
'views_count': 'views_count',
'fts_count': 'fts_count',
'index_count': 'index_count',
'analytics_count': 'analytics_count'
}
def __init__(self, name=None, size=None, items=None, mutations=None, tombstones=None, views_count=None, fts_count=None, index_count=None, analytics_count=None): # noqa: E501
"""Bucket - a model defined in Swagger""" # noqa: E501
self._name = None
self._size = None
self._items = None
self._mutations = None
self._tombstones = None
self._views_count = None
self._fts_count = None
self._index_count = None
self._analytics_count = None
self.discriminator = None
if name is not None:
self.name = name
if size is not None:
self.size = size
if items is not None:
self.items = items
if mutations is not None:
self.mutations = mutations
if tombstones is not None:
self.tombstones = tombstones
if views_count is not None:
self.views_count = views_count
if fts_count is not None:
self.fts_count = fts_count
if index_count is not None:
self.index_count = index_count
if analytics_count is not None:
self.analytics_count = analytics_count
@property
def name(self):
"""Gets the name of this Bucket. # noqa: E501
:return: The name of this Bucket. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Bucket.
:param name: The name of this Bucket. # noqa: E501
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this Bucket. # noqa: E501
:return: The size of this Bucket. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this Bucket.
:param size: The size of this Bucket. # noqa: E501
:type: int
"""
self._size = size
@property
def items(self):
"""Gets the items of this Bucket. # noqa: E501
:return: The items of this Bucket. # noqa: E501
:rtype: int
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this Bucket.
:param items: The items of this Bucket. # noqa: E501
:type: int
"""
self._items = items
@property
def mutations(self):
"""Gets the mutations of this Bucket. # noqa: E501
:return: The mutations of this Bucket. # noqa: E501
:rtype: int
"""
return self._mutations
@mutations.setter
def mutations(self, mutations):
"""Sets the mutations of this Bucket.
:param mutations: The mutations of this Bucket. # noqa: E501
:type: int
"""
self._mutations = mutations
@property
def tombstones(self):
"""Gets the tombstones of this Bucket. # noqa: E501
:return: The tombstones of this Bucket. # noqa: E501
:rtype: int
"""
return self._tombstones
@tombstones.setter
def tombstones(self, tombstones):
"""Sets the tombstones of this Bucket.
:param tombstones: The tombstones of this Bucket. # noqa: E501
:type: int
"""
self._tombstones = tombstones
@property
def views_count(self):
"""Gets the views_count of this Bucket. # noqa: E501
:return: The views_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._views_count
@views_count.setter
def views_count(self, views_count):
"""Sets the views_count of this Bucket.
:param views_count: The views_count of this Bucket. # noqa: E501
:type: int
"""
self._views_count = views_count
@property
def fts_count(self):
"""Gets the fts_count of this Bucket. # noqa: E501
:return: The fts_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._fts_count
@fts_count.setter
def fts_count(self, fts_count):
"""Sets the fts_count of this Bucket.
:param fts_count: The fts_count of this Bucket. # noqa: E501
:type: int
"""
self._fts_count = fts_count
@property
def index_count(self):
"""Gets the index_count of this Bucket. # noqa: E501
:return: The index_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._index_count
@index_count.setter
def index_count(self, index_count):
"""Sets the index_count of this Bucket.
:param index_count: The index_count of this Bucket. # noqa: E501
:type: int
"""
self._index_count = index_count
@property
def analytics_count(self):
"""Gets the analytics_count of this Bucket. # noqa: E501
:return: The analytics_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._analytics_count
@analytics_count.setter
def analytics_count(self, analytics_count):
"""Sets the analytics_count of this Bucket.
:param analytics_count: The analytics_count of this Bucket. # noqa: E501
:type: int
"""
self._analytics_count = analytics_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Bucket, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Bucket):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.965625
| 178
| 0.563846
| 7,910
| 0.95198
| 0
| 0
| 4,095
| 0.492839
| 0
| 0
| 3,942
| 0.474425
|
c6f93b1caf13cee134c81078e57fec4a501c2e10
| 1,618
|
py
|
Python
|
funciones/app.py
|
christophermontero/estima-tu-proyecto
|
19f533be203c9ac2c4383ded5a1664dd1d05d679
|
[
"MIT"
] | 2
|
2021-05-29T16:57:17.000Z
|
2021-06-13T18:39:24.000Z
|
funciones/app.py
|
christophermontero/estima-tu-proyecto
|
19f533be203c9ac2c4383ded5a1664dd1d05d679
|
[
"MIT"
] | 22
|
2021-05-22T18:23:40.000Z
|
2021-12-18T21:09:59.000Z
|
funciones/app.py
|
christophermontero/estima-tu-proyecto
|
19f533be203c9ac2c4383ded5a1664dd1d05d679
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
from db import db_session, init_db
from model import Funcion
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
init_db()
@app.route("/funciones", methods=["POST"])
def create_funcion():
data = request.json
if data["nombreFuncion"] is None:
return jsonify({"mensaje": "error"}), 400
funcion = Funcion.create(
data["idFuncion"], data["nombreFuncion"], data["numCampos"], data["numObjetos"],
data["complejidad"], data["modulo_id"],
)
return jsonify({"funcion": funcion.toJson()})
@app.route("/funciones", methods=["GET"])
def get_funciones():
funciones = [funcion.toJson() for funcion in Funcion.query.all()]
return jsonify({"funciones": funciones})
@app.route("/funciones/<idFuncion>", methods=["GET"])
def get_funcion(idFuncion):
funcion = Funcion.query.filter_by(idFuncion=idFuncion).first()
if funcion is None:
return jsonify({"message": "La función no existe"}), 404
return jsonify({"funcion": funcion.toJson()})
@app.route("/funciones/porModulo/<idModule>", methods=["GET"])
def get_funcion_byModule(idModule):
m = [function.toJson() for function in Funcion.query.filter_by(modulo_id=idModule).all()]
return jsonify({"funcion": m})
@app.route("/funciones/<idFuncion>", methods=["DELETE"])
def delete_funcion(idFuncion):
function = Funcion.query.filter_by(idFuncion=idFuncion).first()
confirmation = Funcion.delete(function)
return jsonify({"modulos": confirmation})
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 25.68254
| 93
| 0.685414
| 0
| 0
| 0
| 0
| 1,346
| 0.831377
| 0
| 0
| 365
| 0.225448
|
c6f9a9602db33208c1f896b22af13200b9be42d9
| 309
|
py
|
Python
|
onnx_script/check_onnx_model.py
|
abyssss52/pytorch-image-models
|
6ed4124c610a73fc849e7e9567bc36cf5bf38ceb
|
[
"Apache-2.0"
] | null | null | null |
onnx_script/check_onnx_model.py
|
abyssss52/pytorch-image-models
|
6ed4124c610a73fc849e7e9567bc36cf5bf38ceb
|
[
"Apache-2.0"
] | null | null | null |
onnx_script/check_onnx_model.py
|
abyssss52/pytorch-image-models
|
6ed4124c610a73fc849e7e9567bc36cf5bf38ceb
|
[
"Apache-2.0"
] | null | null | null |
import onnx
# Load the ONNX model
model = onnx.load("./mobilenetv2_new.onnx")
# model = onnx.load("../FaceAnti-Spoofing.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
print(model.graph)
| 25.75
| 52
| 0.76699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 179
| 0.579288
|
c6fa00680fcfe377a498032a4d31cbf4682bc376
| 1,071
|
py
|
Python
|
2015/07/puzzle2.py
|
jsvennevid/adventofcode
|
c6d5e3e3a166ffad5e8a7cc829599f49607a1efe
|
[
"MIT"
] | null | null | null |
2015/07/puzzle2.py
|
jsvennevid/adventofcode
|
c6d5e3e3a166ffad5e8a7cc829599f49607a1efe
|
[
"MIT"
] | null | null | null |
2015/07/puzzle2.py
|
jsvennevid/adventofcode
|
c6d5e3e3a166ffad5e8a7cc829599f49607a1efe
|
[
"MIT"
] | null | null | null |
import re
wires = {}
for i in open('day7.txt'):
set = re.match(r'([a-z0-9]+) -> ([a-z]+)',i)
if set:
wires[set.group(2)] = set.group(1)
op1 = re.match(r'(NOT) ([a-z0-9]+) -> ([a-z]+)',i)
if op1:
wires[op1.group(3)] = [op1.group(1), op1.group(2)]
op2 = re.match(r'([a-z0-9]+) (AND|OR|LSHIFT|RSHIFT) ([a-z0-9]+) -> ([a-z]+)',i)
if op2:
wires[op2.group(4)] = [op2.group(2), op2.group(1), op2.group(3)]
def visit(wire,results):
if re.match(r'[0-9]+',wire):
return int(wire)
if results.has_key(wire):
return results[wire]
data = wires[wire]
if not isinstance(data, list):
return visit(data, results)
value = {
'NOT': lambda d: (~visit(d[1],results)) & 65535,
'AND': lambda d: visit(d[1],results) & visit(d[2],results),
'OR': lambda d: visit(d[1],results) | visit(d[2],results),
'RSHIFT': lambda d: (visit(d[1],results) >> visit(d[2],results)) & 65535,
'LSHIFT': lambda d: (visit(d[1],results) << visit(d[2],results)) & 65535
}[data[0]](data)
results[wire] = value
return value
wires['b'] = str(visit('a', {}))
print 'a:', visit('a', {})
| 31.5
| 80
| 0.5845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 181
| 0.169001
|
c6fa99e51df1893798f6cb4d6c3cbd2091fbf05a
| 7,167
|
py
|
Python
|
src/visualization/plot_grid.py
|
davimnz/boa
|
0546ad4df0ecabec1fd3beb1264cd0930dce13a9
|
[
"MIT"
] | null | null | null |
src/visualization/plot_grid.py
|
davimnz/boa
|
0546ad4df0ecabec1fd3beb1264cd0930dce13a9
|
[
"MIT"
] | null | null | null |
src/visualization/plot_grid.py
|
davimnz/boa
|
0546ad4df0ecabec1fd3beb1264cd0930dce13a9
|
[
"MIT"
] | null | null | null |
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from math import cos, radians
def shift_position(pos, x_shift, y_shift) -> dict:
"""
Moves nodes' position by (x_shift, y_shift)
"""
return {n: (x + x_shift, y + y_shift) for n, (x, y) in pos.items()}
def convert_to_2d(latitude, longitude, center_latitude=50.0):
"""
Converts (lat, long) to (x, y) using approximation for small areas.
"""
earth_radius = 6373.0 # unit : km
aspect_ratio = radians(center_latitude)
x = earth_radius * longitude * cos(aspect_ratio)
y = earth_radius * latitude
return x, y
def plot_stock_grid(data, position, supply_site_code,
sku_code, balance=False) -> None:
"""
Plots a map containing the amount of stock in each location of a given
grid: Hub, Depot or Distributor.
"""
grid_table = data[(data['Supply Site Code'] == supply_site_code)]
grid_table = grid_table[(grid_table['SKU'] == sku_code)]
stock_mean = []
positions = {}
labels = {}
colors = []
color_dict = {"DEP": "#3f60e1",
"DIST": "#60e13f",
"HUB": "#e13f60",
"DEPOT": '#3f60e1'}
location_index = grid_table.columns.to_list().index('Location Code')
if balance:
stock_index = grid_table.columns.to_list().index('x_opt')
else:
stock_index = grid_table.columns.to_list().index('Closing Stock')
type_index = grid_table.columns.to_list().index('Location Type')
reorder_index = grid_table.columns.to_list().index('Reorder Point (Hl)')
for row in grid_table.itertuples():
location_code = row[location_index + 1]
stock = round(100 * row[stock_index + 1]
/ row[reorder_index + 1]) / 100
stock_mean.append(stock)
type = row[type_index + 1]
if location_code == supply_site_code:
color = color_dict["HUB"]
colors.append(color)
else:
color = color_dict[type]
colors.append(color)
position_row = position[position['code'] == location_code]
latitude = position_row['latitude']
longitude = position_row['longitude']
position_2d = convert_to_2d(latitude, longitude)
positions[location_code] = position_2d
labels[location_code] = stock
positions_nodes = shift_position(positions, 0, 500)
print(np.mean(stock_mean))
grid = nx.Graph()
for key, value in labels.items():
grid.add_node(key, stock=value)
nx.draw_networkx(grid, pos=positions, with_labels=False,
node_size=350, node_color=colors)
nx.draw_networkx_labels(grid, pos=positions_nodes,
labels=labels, font_size=16)
ylim = plt.ylim()
plt.ylim(0.99 * ylim[0], 1.01 * ylim[1])
dep_legend = mpatches.Patch(color=color_dict["DEP"], label='Depósito')
dist_legend = mpatches.Patch(color=color_dict["DIST"], label='CDD')
hub_legend = mpatches.Patch(color=color_dict["HUB"], label="Hub")
plt.legend(handles=[dep_legend, dist_legend, hub_legend], fontsize=20)
plt.axis('off')
plt.show()
def plot_exchange_map(data, exchange, position,
supply_site_code, sku_code) -> None:
"""
Plots the optimal exchange map for a given grid.
"""
exchange_table = exchange[(
exchange['Supply Site Code'] == supply_site_code)]
exchange_table = exchange_table[(exchange_table['SKU'] == sku_code)]
grid_table = data[(data['Supply Site Code'] == supply_site_code)]
grid_table = grid_table[(grid_table['SKU'] == sku_code)]
labels = {'Hub': 'Hub'}
colors = {}
color_dict = {"DEP": "#3f60e1", "DIST": "#60e13f", "HUB": "#e13f60"}
location_index = grid_table.columns.to_list().index('Location Code')
type_index = grid_table.columns.to_list().index('Location Type')
for row in grid_table.itertuples():
location_code = row[location_index + 1]
type = row[type_index + 1]
if location_code == supply_site_code:
color = color_dict["HUB"]
colors[location_code] = color
else:
color = color_dict[type]
colors[location_code] = color
labels[location_code] = location_code
grid = nx.DiGraph()
for key, value in labels.items():
grid.add_node(key, stock=value)
nodes_with_edges = []
origin_index = exchange_table.columns.to_list().index('Origin')
destiny_index = exchange_table.columns.to_list().index('Destiny')
amount_index = exchange_table.columns.to_list().index('Amount')
for row in exchange_table.itertuples():
origin = row[origin_index + 1]
destiny = row[destiny_index + 1]
amount = round(row[amount_index + 1])
if origin == "Available":
origin = supply_site_code
if destiny == supply_site_code:
destiny = 'Hub'
colors['Hub'] = colors[supply_site_code]
grid.add_edge(origin, destiny, weight=amount)
nodes_with_edges.append(origin)
nodes_with_edges.append(destiny)
layout = nx.planar_layout(grid)
layout_label = shift_position(layout, -0.03, 0.03)
nodes_with_edges = list(set(nodes_with_edges))
nodes_colors = []
nodes_labels = {}
for node in nodes_with_edges:
nodes_colors.append(colors[node])
nodes_labels[node] = labels[node]
nx.draw_networkx(grid, layout, node_color=nodes_colors,
nodelist=nodes_with_edges, with_labels=False,
arrowsize=20, node_size=400)
grid_edge_labels = nx.get_edge_attributes(grid, 'weight')
nx.draw_networkx_edge_labels(grid, layout,
edge_labels=grid_edge_labels)
nx.draw_networkx_labels(grid, pos=layout_label, labels=nodes_labels)
dep_legend = mpatches.Patch(color=color_dict["DEP"], label='Depósito')
dist_legend = mpatches.Patch(color=color_dict["DIST"], label='CDD')
hub_legend = mpatches.Patch(color=color_dict["HUB"], label="Hub")
plt.legend(handles=[dep_legend, dist_legend, hub_legend], fontsize=20)
plt.axis('off')
plt.show()
if __name__ == "__main__":
unbalanced = pd.read_csv('data/data.csv', delimiter=';', decimal=',')
balanced = pd.read_csv('output/distribution_output_cvxopt.csv',
delimiter=';', decimal=',')
position = pd.read_csv('data/geopositioning.csv',
delimiter=';', decimal=',')
exchange = pd.read_csv('output/exchanges_output.csv',
delimiter=';', decimal=',')
# choose which grid to plot. The grid cannot be scenario 0
supply_site_code = 'PL-1721'
sku_code = 85023
# plots unbalanced grid, balanced grid, and exchange map
plot_stock_grid(unbalanced, position, supply_site_code, sku_code)
plot_stock_grid(balanced, position, supply_site_code,
sku_code, balance=True)
plot_exchange_map(unbalanced, exchange, position,
supply_site_code, sku_code)
| 34.960976
| 76
| 0.635412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,063
| 0.148277
|
c6fb2216661678548d14f34f7328e08d3f4c59ba
| 1,254
|
py
|
Python
|
my_project/urls.py
|
stripathi669/codepal-sample-login
|
f553cc7f7794dd20197b1df336ed7953ac7a62dc
|
[
"MIT"
] | 2
|
2017-04-23T08:54:09.000Z
|
2017-12-19T17:51:38.000Z
|
my_project/urls.py
|
stripathi669/codepal-sample-login
|
f553cc7f7794dd20197b1df336ed7953ac7a62dc
|
[
"MIT"
] | null | null | null |
my_project/urls.py
|
stripathi669/codepal-sample-login
|
f553cc7f7794dd20197b1df336ed7953ac7a62dc
|
[
"MIT"
] | 1
|
2019-10-01T17:51:13.000Z
|
2019-10-01T17:51:13.000Z
|
"""my_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework_jwt.views import obtain_jwt_token
from registration.views import register_user_via_facebook, get_user_details
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-token-auth/', obtain_jwt_token),
# Url for facebook signup
url(r'^api/v1/user/register/facebook', register_user_via_facebook),
# Url to fetch user details
url(r'^api/v1/user/get/account', get_user_details),
url(r'^$', TemplateView.as_view(template_name='home.html')),
]
| 32.153846
| 79
| 0.725678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 800
| 0.637959
|
c6fb42ccff41d5e02e75ca92305085547bd5ee39
| 3,870
|
py
|
Python
|
datascripts/make_placescsv.py
|
NCI-NAACCR-Zone-Design/Louisiana
|
42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07
|
[
"MIT"
] | null | null | null |
datascripts/make_placescsv.py
|
NCI-NAACCR-Zone-Design/Louisiana
|
42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07
|
[
"MIT"
] | 1
|
2020-03-05T23:20:38.000Z
|
2020-03-10T18:03:31.000Z
|
datascripts/make_placescsv.py
|
NCI-NAACCR-Zone-Design/Louisiana
|
42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
from osgeo import ogr
import os
import csv
import settings
class PlacesIntersector:
def run(self):
print("PlacesIntersector")
self.reproject(settings.INPUT_ZONESFILE, settings.REPROJECTED_ZONESFILE, settings.CTAZONES_SHAPEFILE_IDFIELD, settings.CTAZONES_SHAPEFILE_NAMEFIELD)
self.reproject(settings.INPUT_CITYBOUNDS_SHP, settings.REPROJECTED_CITY_SHP, settings.CITYBOUNDS_IDFIELD, settings.CITYBOUNDS_NAMEFIELD)
self.reproject(settings.INPUT_COUNTYBOUNDS_SHP, settings.REPROJECTED_COUNTY_SHP, settings.COUNTYBOUNDS_IDFIELD, settings.COUNTYBOUNDS_NAMEFIELD)
self.findplaces(settings.REPROJECTED_CITY_SHP, settings.OUTPUT_CITYCSV, 'City')
self.findplaces(settings.REPROJECTED_COUNTY_SHP, settings.OUTPUT_COUNTYCSV, 'County')
def reproject(self, inputshp, outputshp, idfield, namefield):
# reproject the shapefile to an Albers so we can do area calculations in findplaces()
# and to standardize on there being only one attribute: name
print(" Reproject {} => {}".format(inputshp, outputshp))
command = "{} {} -proj {} -filter-fields {} -rename-fields name={},id={} -o {} -quiet".format(
settings.MAPSHAPER_CLI,
inputshp,
settings.PLANAR_SRS,
','.join([idfield, namefield]),
namefield, idfield,
outputshp
)
# print(command)
os.system(command)
def findplaces(self, placesdataset, csvfilename, placecolumnname):
print(" Calculating {} => {}".format(placesdataset, csvfilename))
outfh = open(csvfilename, 'w')
csvfh = csv.writer(outfh)
csvfh.writerow(['Zone', placecolumnname])
ctads = ogr.Open(settings.REPROJECTED_ZONESFILE, False)
ctalayer = ctads.GetLayer(0)
for cta in ctalayer:
ctaid = cta.GetField('id')
ctageom = cta.GetGeometryRef()
places = []
ds = ogr.Open(placesdataset, False)
layer = ds.GetLayer(0)
layer.SetSpatialFilter(ctageom)
for thisplace in layer:
# work around twitchy hands making false intersections
# "% of CTA area" strategy doesn't work: small towns in large rural CTAs = small percentage
# but a town sliver over X acres, well, that should count as intersecting the town.
#
# also work around boundary datasets that are so precisely snapped,
# that we get zero-area intersection as the overlapping boundary linestring of two areas
# this leads to harmless but scary "non-surface geometry" warnings
#
# also, note that we collect names here and unique-ify them in a second step
# multipolygon datasets means that a CTA may intersect the same place more than once!
geom = thisplace.GetGeometryRef()
intersection = geom.Intersection(ctageom)
iacres = 0
if intersection.GetGeometryName() in ('POLYGON', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION'):
iacres = intersection.Area() * settings.SQMETERS_TO_ACRES
if iacres < 2000:
continue
name = thisplace.GetField('name')
# print(" {}".format(name))
places.append(name)
ds = None # close places dataset, will reopen at next CTA
# done collecting: unique-ify the list, write the CSV rows
places = list(set(places))
for name in places:
csvfh.writerow([ctaid, name])
# done CTA loop, close geo fh and CSV fh
ctads = None
outfh.close()
if __name__ == '__main__':
PlacesIntersector().run()
print("DONE")
| 39.896907
| 156
| 0.62093
| 3,710
| 0.958656
| 0
| 0
| 0
| 0
| 0
| 0
| 1,217
| 0.31447
|
c6fd01691eb418ac4d1818fca0bd68461092ddaa
| 580
|
py
|
Python
|
Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
# scrapes both regular and shopping ads (top, right blocks)
from serpapi import GoogleSearch
import json, os
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google",
"q": "buy coffee",
"gl": "us",
"hl": "en"
}
search = GoogleSearch(params)
results = search.get_dict()
if results.get("ads", []):
for ad in results["ads"]:
print(json.dumps(ad, indent=2))
if results.get("shopping_results", []):
for shopping_ad in results["shopping_results"]:
print(json.dumps(shopping_ad, indent=2))
else:
print("no shopping ads found.")
| 22.307692
| 59
| 0.639655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.334483
|
c6fd244b6ad93e904d3cfe0db3dd28977bc63c93
| 3,316
|
py
|
Python
|
tomomibot/commands/start.py
|
adzialocha/tomomibot
|
ed3964223bd63340f28d36daa014865f61aaf571
|
[
"MIT"
] | 28
|
2018-07-26T09:47:32.000Z
|
2022-01-24T10:38:13.000Z
|
tomomibot/commands/start.py
|
adzialocha/tomomibot
|
ed3964223bd63340f28d36daa014865f61aaf571
|
[
"MIT"
] | null | null | null |
tomomibot/commands/start.py
|
adzialocha/tomomibot
|
ed3964223bd63340f28d36daa014865f61aaf571
|
[
"MIT"
] | 5
|
2018-08-11T08:07:23.000Z
|
2021-12-23T14:47:40.000Z
|
import click
from tomomibot.cli import pass_context
from tomomibot.runtime import Runtime
from tomomibot.utils import check_valid_voice, check_valid_model
from tomomibot.const import (INTERVAL_SEC, INPUT_DEVICE, OUTPUT_CHANNEL,
INPUT_CHANNEL, OUTPUT_DEVICE, SAMPLE_RATE,
THRESHOLD_DB, NUM_CLASSES_SOUNDS,
SEQ_LEN, TEMPERATURE,
PENALTY, VOLUME, OSC_ADDRESS, OSC_PORT)
@click.command('start', short_help='Start a live session')
@click.option('--interval',
default=INTERVAL_SEC,
help='Interval (in seconds) of analyzing incoming live signal')
@click.option('--input_device',
default=INPUT_DEVICE,
help='Index of audio device for incoming signal')
@click.option('--output_device',
default=OUTPUT_DEVICE,
help='Index of audio device for outgoing signal')
@click.option('--input_channel',
default=INPUT_CHANNEL,
help='Index of channel for incoming signal')
@click.option('--output_channel',
default=OUTPUT_CHANNEL,
help='Index of channel for outgoing signal')
@click.option('--samplerate',
default=SAMPLE_RATE,
help='Sample rate of audio signals')
@click.option('--threshold',
default=THRESHOLD_DB,
help='Ignore audio events under this db value')
@click.option('--num_classes',
default=NUM_CLASSES_SOUNDS,
help='Number of k-means classes')
@click.option('--dynamics/--no_dynamics',
default=False,
help='Use dynamics (volume) classes')
@click.option('--durations/--no_durations',
default=False,
help='Use duration classes (length of sound events)')
@click.option('--seq_len',
default=SEQ_LEN,
help='How long is the sequence the model needs to predict')
@click.option('--temperature',
default=TEMPERATURE,
help='Softmax reweighting temperature')
@click.option('--penalty',
default=PENALTY,
help='Multiple of seq_len to be reached for cutting sequence')
@click.option('--reference',
default=None,
help='Use this voice as a reference for PCA and k-means')
@click.option('--volume',
default=VOLUME,
type=float,
help='Volume of the audio output')
@click.option('--osc_address',
default=OSC_ADDRESS,
type=str,
help='Address of OSC server')
@click.option('--osc_port',
default=OSC_PORT,
type=int,
help='Port of OSC server')
@click.argument('voice')
@click.argument('model')
@pass_context
def cli(ctx, voice, model, **kwargs):
"""Start a live session with tomomibot."""
try:
check_valid_model(model)
except FileNotFoundError as err:
ctx.elog('Model "{}" is invalid: {}'.format(model, err))
else:
try:
check_valid_voice(voice)
except FileNotFoundError as err:
ctx.elog('Voice "{}" is invalid: {}'.format(voice, err))
else:
runtime = Runtime(ctx, voice, model, **kwargs)
runtime.initialize()
| 39.011765
| 77
| 0.596803
| 0
| 0
| 0
| 0
| 2,829
| 0.853136
| 0
| 0
| 1,061
| 0.319964
|
059afd391bdb4d5d0ce5e8f183cba9cadeed7065
| 3,451
|
py
|
Python
|
state/GameState.py
|
philippehenri-gosselin/tankgame
|
ceabbee7c348bfd4c95d2ee2ae0015d6d761154b
|
[
"X11"
] | 4
|
2020-09-15T02:00:39.000Z
|
2021-05-11T17:23:28.000Z
|
state/GameState.py
|
philippehenri-gosselin/tankgame
|
ceabbee7c348bfd4c95d2ee2ae0015d6d761154b
|
[
"X11"
] | null | null | null |
state/GameState.py
|
philippehenri-gosselin/tankgame
|
ceabbee7c348bfd4c95d2ee2ae0015d6d761154b
|
[
"X11"
] | null | null | null |
"""
MIT License
Copyrights © 2020, Philippe-Henri Gosselin.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the “Software”), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The Software is provided “as is”, without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability, fitness
for a particular purpose and noninfringement. In no event shall the authors or
copyright holders be liable for any claim, damages or other liability, whether
in an action of contract, tort or otherwise, arising from, out of or in
connection with the software or the use or other dealings in the Software.
Except as contained in this notice, the name of Philippe-Henri Gosselin shall
not be used in advertising or otherwise to promote the sale, use or other
dealings in this Software without prior written authorization from
Philippe-Henri Gosselin.
"""
from .Unit import Unit
from pygame.math import Vector2
class GameState():
def __init__(self):
self.epoch = 0
self.worldSize = Vector2(16,10)
self.ground = [ [ Vector2(5,1) ] * 16 ] * 10
self.walls = [ [ None ] * 16 ] * 10
self.units = [ Unit(self,Vector2(8,9),Vector2(1,0)) ]
self.bullets = [ ]
self.bulletSpeed = 0.1
self.bulletRange = 4
self.bulletDelay = 5
self.observers = [ ]
@property
def worldWidth(self):
"""
Returns the world width as an integer
"""
return int(self.worldSize.x)
@property
def worldHeight(self):
"""
Returns the world height as an integer
"""
return int(self.worldSize.y)
def isInside(self,position):
"""
Returns true is position is inside the world
"""
return position.x >= 0 and position.x < self.worldWidth \
and position.y >= 0 and position.y < self.worldHeight
def findUnit(self,position):
"""
Returns the index of the first unit at position, otherwise None.
"""
for unit in self.units:
if int(unit.position.x) == int(position.x) \
and int(unit.position.y) == int(position.y):
return unit
return None
def findLiveUnit(self,position):
"""
Returns the index of the first live unit at position, otherwise None.
"""
unit = self.findUnit(position)
if unit is None or unit.status != "alive":
return None
return unit
def addObserver(self,observer):
"""
Add a game state observer.
All observer is notified when something happens (see GameStateObserver class)
"""
self.observers.append(observer)
def notifyUnitDestroyed(self,unit):
for observer in self.observers:
observer.unitDestroyed(unit)
def notifyBulletFired(self,unit):
for observer in self.observers:
observer.bulletFired(unit)
| 34.51
| 85
| 0.654593
| 2,044
| 0.590751
| 0
| 0
| 286
| 0.082659
| 0
| 0
| 1,873
| 0.541329
|
059b0412d51d78feb8e9b2b1008cb427fb6c0e11
| 5,516
|
py
|
Python
|
Bot/commands_handling/group_commands.py
|
DogsonPl/bot_for_messenger
|
2d6664b52b59696dc82efb3d361b7700ebb3960b
|
[
"MIT"
] | 19
|
2021-03-11T12:59:00.000Z
|
2022-02-12T18:50:58.000Z
|
Bot/commands_handling/group_commands.py
|
DogsonPl/bot_for_messenger
|
2d6664b52b59696dc82efb3d361b7700ebb3960b
|
[
"MIT"
] | null | null | null |
Bot/commands_handling/group_commands.py
|
DogsonPl/bot_for_messenger
|
2d6664b52b59696dc82efb3d361b7700ebb3960b
|
[
"MIT"
] | 4
|
2021-03-10T23:07:13.000Z
|
2021-09-28T18:55:30.000Z
|
import fbchat
import random as rd
from .logger import logger
from ..bot_actions import BotActions
from ..sql import handling_group_sql
BOT_WELCOME_MESSAGE = """👋 Witajcie, jestem botem 🤖
❓ Jeśli chcesz zobaczyć moje komendy napisz !help"""
def check_admin_permission(function):
async def wrapper(self, event, group_info):
if event.author.id not in group_info.admins:
return await self.send_text_message(event, "🚫 Tylko administartor grupy może używać tej funkcji")
return await function(self, event, group_info)
return wrapper
def check_group_instance(function):
async def wrapper(self, event):
if not isinstance(event.thread, fbchat.Group):
return await self.send_text_message(event, "🚫 To komenda tylko dla grup")
group_info = await self.get_thread_info(event.thread.id)
return await function(self, event, group_info)
return wrapper
class GroupCommands(BotActions):
def __init__(self, loop, bot_id, client):
super().__init__(loop, bot_id, client)
@logger
@check_group_instance
@check_admin_permission
async def delete_random_person(self, event, group_info):
member_to_kick = rd.choice(group_info.participants).id
if member_to_kick in group_info.admins:
await self.send_text_message(event, "🚫 Wylosowalo admina. Nie moge go usunąć")
elif member_to_kick == self.bot_id:
await self.send_text_message(event, "🚫 Wylosowało mnie")
else:
try:
await event.thread.remove_participant(member_to_kick)
except fbchat.InvalidParameters:
await self.send_text_message(event, "🚫 Żeby działała ta funkcja na grupie, muszę mieć admina")
@logger
@check_group_instance
@check_admin_permission
async def set_welcome_message(self, event, group_info):
if event.message.text.lower() == "!powitanie":
message = "🚫 Po !powitanie ustaw treść powitania"
else:
await handling_group_sql.set_welcome_message(event)
message = "✅ Powitanie zostało zmienione :)"
await self.send_text_message(event, message)
@logger
@check_group_instance
@check_admin_permission
async def set_new_group_regulations(self, event, group_info):
if event.message.text.lower() == "!nowyregulamin":
message = "🚫 Po !nowyregulamin ustaw treść regulaminu"
else:
await handling_group_sql.set_group_regulations(event)
message = "✅ Regulamin został zmieniony :) Użyj komendy !regulamin by go zobaczyć"
await self.send_text_message(event, message)
@logger
@check_group_instance
async def get_group_regulations(self, event, group_info):
group_regulations = await handling_group_sql.fetch_group_regulations(event)
if group_regulations is None:
group_regulations = "📜 Grupa nie ma regulaminu. Aby go ustawić użyj komendy\n!nowyregulamin 'treść'"
await self.send_text_message(event, group_regulations)
@logger
@check_group_instance
@check_admin_permission
async def mention_everyone(self, event, group_info):
mentions = [fbchat.Mention(thread_id=participant.id, offset=0, length=12) for participant in group_info.participants]
await self.send_text_message_with_mentions(event, "💬 ELUWA ALL", mentions)
@logger
@check_group_instance
async def send_message_with_random_mention(self, event, group_info):
lucky_member = rd.choice(group_info.participants).id
mention = [fbchat.Mention(thread_id=lucky_member, offset=0, length=12)]
await self.send_text_message_with_mentions(event, "🎆 Zwycięzca", mention)
@logger
@check_group_instance
async def send_love_message(self, event, group_info):
try:
first_person, second_person = event.message.mentions
except ValueError:
await self.send_text_message(event, "💡 Po !kocha oznacz dwie osoby, np !kocha @nick1 @nick2")
else:
love_percent = rd.randint(0, 100)
if love_percent <= 25:
emoji = "💔"
elif love_percent <= 50:
emoji = "💛"
elif love_percent <= 75:
emoji = "❤"
else:
emoji = "💝💘"
first_person_name = event.message.text[8:first_person.length+7]
second_person_name = event.message.text[9+first_person.length:8+first_person.length+second_person.length]
await self.send_text_message(event, f"{emoji} {first_person_name} kocha {second_person_name} w {love_percent} procentach")
@logger
async def reply_on_person_removed(self, event):
if self.bot_id != event.removed.id:
# if bot is removed from group, bot can`t send removed message
await self.send_text_message(event, "🥂 Jakaś kurwa opusciła grupe")
@logger
async def send_message_on_person_added(self, event):
for user in event.added:
if user.id == self.bot_id:
await self.send_text_message(event, BOT_WELCOME_MESSAGE)
break
else:
message = await handling_group_sql.fetch_welcome_message(event)
if message is None:
message = """🥂 Witaj w grupie! Jeśli chcesz zobaczyć moje funkcje napisz !help
Jeśli chesz ustawić wiadomość powitalną użyj komendy !powitanie"""
await self.send_text_message(event, message)
| 41.787879
| 134
| 0.676215
| 4,673
| 0.831939
| 0
| 0
| 4,494
| 0.800071
| 4,654
| 0.828556
| 1,087
| 0.19352
|
059f84fb457661f2a82136d2fab085f6c614dd8f
| 1,100
|
py
|
Python
|
util/file_parsing.py
|
LindaSt/BT-graph-creation
|
a6aa4d0ca42db4744150f11f17aea7e98d391319
|
[
"MIT"
] | 1
|
2022-03-09T07:28:14.000Z
|
2022-03-09T07:28:14.000Z
|
util/file_parsing.py
|
LindaSt/BT-graph-creation
|
a6aa4d0ca42db4744150f11f17aea7e98d391319
|
[
"MIT"
] | null | null | null |
util/file_parsing.py
|
LindaSt/BT-graph-creation
|
a6aa4d0ca42db4744150f11f17aea7e98d391319
|
[
"MIT"
] | null | null | null |
import os
import xml.etree.ElementTree as ET
def parse_xml(file_path) -> dict:
tree = ET.parse(file_path)
root = tree.getroot()
groups_colours = {i.attrib['Name']: i.attrib['Color'] for i in root.iter('Group')}
groups = ['hotspot', 'lymphocytes', 'tumorbuds', 'lymphocytesR', 'tumorbudsR']
annotations_elements = {g: [] for g in groups}
for i in root.iter('Annotation'):
annotations_elements[i.attrib['PartOfGroup']].append(i)
annotations = {g: [] for g in groups}
for group, element_list in annotations_elements.items():
for element in element_list:
if element.attrib['Type'] == 'Dot':
annotations[group].append(
[[float(i.attrib['X']), float(i.attrib['Y'])] for i in element.iter('Coordinate')][0])
else:
if group in ['lymphocytes', 'tumorbuds']:
group = 'rectangles_' + group
annotations[group].append(
[[float(i.attrib['X']), float(i.attrib['Y'])] for i in element.iter('Coordinate')])
return annotations
| 36.666667
| 106
| 0.59
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 188
| 0.170909
|
05a1b225db67c9294be8ffcb48b01e142b5fd38c
| 51,802
|
py
|
Python
|
python source files/trainer.py
|
barneyga/A-Recurrent-Model-of-Approximate-Enumeration
|
8a0ca5094a2e180939c25e55f376f30dfa1095bd
|
[
"MIT"
] | null | null | null |
python source files/trainer.py
|
barneyga/A-Recurrent-Model-of-Approximate-Enumeration
|
8a0ca5094a2e180939c25e55f376f30dfa1095bd
|
[
"MIT"
] | 1
|
2021-12-08T00:52:53.000Z
|
2021-12-08T00:52:53.000Z
|
python source files/trainer.py
|
barneyga/A-Recurrent-Model-of-Approximate-Enumeration
|
8a0ca5094a2e180939c25e55f376f30dfa1095bd
|
[
"MIT"
] | null | null | null |
import os
import time
import shutil
import pickle
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tensorboard_logger import configure, log_value
import pandas as pd
from model import RecurrentAttention
from stop_model import StopRecurrentAttention
from utils import AverageMeter
class Trainer:
"""A Recurrent Attention Model trainer.
All hyperparameters are provided by the user in the
config file.
"""
def __init__(self, config, data_loader):
"""
Construct a new Trainer instance.
Args:
config: object containing command line arguments.
data_loader: A data iterator.
"""
self.config = config
if config.use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
# glimpse network params
self.patch_size = config.patch_size
self.glimpse_scale = config.glimpse_scale
self.num_patches = config.num_patches
self.loc_hidden = config.loc_hidden
self.glimpse_hidden = config.glimpse_hidden
# core network params
self.num_glimpses = config.num_glimpses
self.hidden_size = config.hidden_size
self.include_stop = config.include_stop
# reinforce params
self.std = config.std
self.M = config.M
# data params
if config.is_train:
self.train_loader = data_loader[0]
self.valid_loader = data_loader[1]
self.num_train = len(self.train_loader.sampler.indices)
self.num_valid = len(self.valid_loader.sampler.indices)
else:
self.test_loader = data_loader
self.num_test = len(self.test_loader.dataset)
self.num_classes = config.num_classes
self.num_channels = config.num_channels
# training params
self.epochs = config.epochs
self.start_epoch = 0
self.momentum = config.momentum
self.lr = config.init_lr
self.hesitation_penalty = config.hesitation_penalty
# misc params
self.best = config.best
self.logs_dir = config.logs_dir
self.best_valid_acc = 0.0
self.counter = 0
self.lr_patience = config.lr_patience
self.train_patience = config.train_patience
self.use_tensorboard = config.use_tensorboard
self.resume = config.resume
self.print_freq = config.print_freq
self.plot_freq = config.plot_freq
self.model_name = config.model_name
self.model_dir = config.model_dir
self.plot_dir = config.plot_dir
# configure tensorboard logging
if self.use_tensorboard:
tensorboard_dir = self.logs_dir + self.model_name
print("[*] Saving tensorboard logs to {}".format(tensorboard_dir))
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
configure(tensorboard_dir)
# build RAM model
if self.include_stop:
self.model = StopRecurrentAttention(
self.patch_size,
self.num_patches,
self.glimpse_scale,
self.num_channels,
self.loc_hidden,
self.glimpse_hidden,
self.std,
self.hidden_size,
self.num_classes,
)
else:
self.model = RecurrentAttention(
self.patch_size,
self.num_patches,
self.glimpse_scale,
self.num_channels,
self.loc_hidden,
self.glimpse_hidden,
self.std,
self.hidden_size,
self.num_classes,
)
self.model.to(self.device)
# initialize optimizer and scheduler
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.config.init_lr
)
self.scheduler = ReduceLROnPlateau(
self.optimizer, "min", patience=self.lr_patience
)
def reset(self):
h_t = torch.zeros(
self.batch_size,
self.hidden_size,
dtype=torch.float,
device=self.device,
requires_grad=True,
)
l_t = torch.zeros(
self.batch_size,
2,
dtype=torch.float,
device=self.device,
requires_grad=True,
)
l_t.requires_grad = True
if not self.include_stop:
return h_t, l_t
s_t = torch.ones(
self.batch_size,
1,
dtype=torch.float,
device=self.device,
requires_grad=True,
)
return h_t, l_t, s_t
def train(self):
"""Train the model on the training set.
A checkpoint of the model is saved after each epoch
and if the validation accuracy is improved upon,
a separate ckpt is created for use on the test set.
"""
# load the most recent checkpoint
if self.resume:
self.load_checkpoint(best=False)
print(
"\n[*] Train on {} samples, validate on {} samples".format(
self.num_train, self.num_valid
)
)
for epoch in range(self.start_epoch, self.epochs):
print(
"\nEpoch: {}/{} - LR: {:.6f}".format(
epoch + 1, self.epochs, self.optimizer.param_groups[0]["lr"]
)
)
# train for 1 epoch
if self.include_stop:
train_loss, train_acc = self.train_one_epoch_stop(epoch)
else:
train_loss, train_acc = self.train_one_epoch(epoch)
# evaluate on validation set
if self.include_stop:
valid_loss, valid_acc = self.validate(epoch)
else:
valid_loss, valid_acc = self.validate(epoch)
# # reduce lr if validation loss plateaus
self.scheduler.step(-valid_acc)
is_best = valid_acc > self.best_valid_acc
msg1 = "train loss: {:.3f} - train acc: {:.3f} "
msg2 = "- val loss: {:.3f} - val acc: {:.3f} - val err: {:.3f}"
if is_best:
self.counter = 0
msg2 += " [*]"
msg = msg1 + msg2
print(
msg.format(
train_loss, train_acc, valid_loss, valid_acc, 100 - valid_acc
)
)
# check for improvement
if not is_best:
self.counter += 1
if self.counter > self.train_patience:
print("[!] No improvement in a while, stopping training.")
return
self.best_valid_acc = max(valid_acc, self.best_valid_acc)
self.save_checkpoint(
{
"epoch": epoch + 1,
"model_state": self.model.state_dict(),
"optim_state": self.optimizer.state_dict(),
"best_valid_acc": self.best_valid_acc,
},
is_best,
)
def train_one_epoch_stop(self, epoch):
"""
Train the model for 1 epoch of the training set.
An epoch corresponds to one full pass through the entire
training set in successive mini-batches.
This is used by train() and should not be called manually.
"""
self.model.train()
batch_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
tic = time.time()
with tqdm(total=self.num_train) as pbar:
for i, (x, y) in enumerate(self.train_loader):
self.optimizer.zero_grad()
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
plot = False
if (epoch % self.plot_freq == 0) and (i == 0):
plot = True
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t, s_t = self.reset()
# save images
imgs = []
imgs.append(x[0:9])
# extract the glimpses
locs = []
l_log_pi = []
s_log_pi = []
baselines = []
log_probas = []
stop_signals = []
for t in range(self.num_glimpses):
# forward pass through model
h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t, t == self.num_glimpses - 1)
# store
locs.append(l_t[0:9])
baselines.append(b_t)
l_log_pi.append(l_p)
s_log_pi.append(s_p)
log_probas.append(log_ps)
stop_signals.append(s_t)
# # last iteration
# h_t, l_t, b_t, log_probas, p = self.model(x, l_t, h_t, last=True)
# log_pi.append(p)
# baselines.append(b_t)
# locs.append(l_t[0:9])
# convert list to tensors and reshape
baselines = torch.stack(baselines).transpose(1, 0)
l_log_pi = torch.stack(l_log_pi).transpose(1, 0)
s_log_pi = torch.stack(s_log_pi).transpose(1, 0)
log_probas = torch.stack(log_probas).transpose(1, 0)
stop_signals = torch.stack(stop_signals).transpose(1, 0).squeeze(2)
#process stop signals
up_through_stop = stop_signals
count = torch.arange(self.batch_size)
num_steps = torch.sum(stop_signals, dim=1).long()
up_through_stop[count,num_steps] += 1
#extract log_probas at first stop signal
log_probas = log_probas[count,num_steps,:]
#clip histories after stop signal
baselines = baselines * up_through_stop
l_log_pi = l_log_pi * up_through_stop
s_log_pi = s_log_pi * up_through_stop
# calculate reward
predicted = torch.max(log_probas, 1)[1]
R = (predicted.detach() == y).float()
R = R.unsqueeze(1).repeat(1, self.num_glimpses)
mask = (torch.arange(R.size(1), device=num_steps.device)==num_steps.unsqueeze(1))
R = mask*R #Reward of 1 at first stop signal
R = R - stop_signals * self.hesitation_penalty
# compute losses for differentiable modules
loss_action = F.nll_loss(log_probas, y)
loss_baseline = F.mse_loss(baselines, R)
# compute reinforce loss
# summed over timesteps and averaged across batch
adjusted_reward = R - baselines.detach()
loss_reinforce = torch.sum(-l_log_pi * adjusted_reward, dim=1) + torch.sum(-s_log_pi * adjusted_reward, dim=1)
loss_reinforce = torch.mean(loss_reinforce, dim=0)
# sum up into a hybrid loss
loss = loss_action + loss_baseline + loss_reinforce * 0.01
# compute accuracy
correct = (predicted == y).float()
acc = 100 * (correct.sum() / len(y))
# store
losses.update(loss.item(), x.size()[0])
accs.update(acc.item(), x.size()[0])
# compute gradients and update SGD
loss.backward()
self.optimizer.step()
# measure elapsed time
toc = time.time()
batch_time.update(toc - tic)
pbar.set_description(
(
"{:.1f}s - loss: {:.3f} - acc: {:.3f}".format(
(toc - tic), loss.item(), acc.item()
)
)
)
pbar.update(self.batch_size)
# dump the glimpses and locs
if plot:
imgs = [g.cpu().data.numpy().squeeze() for g in imgs]
locs = [l.cpu().data.numpy() for l in locs]
pickle.dump(
imgs, open(self.plot_dir + "g_{}.p".format(epoch + 1), "wb")
)
pickle.dump(
locs, open(self.plot_dir + "l_{}.p".format(epoch + 1), "wb")
)
# log to tensorboard
if self.use_tensorboard:
iteration = epoch * len(self.train_loader) + i
log_value("train_loss", losses.avg, iteration)
log_value("train_acc", accs.avg, iteration)
return losses.avg, accs.avg
@torch.no_grad()
def validate_stop(self, epoch):
"""Evaluate the RAM model on the validation set.
"""
losses = AverageMeter()
accs = AverageMeter()
for i, (x, y) in enumerate(self.valid_loader):
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
# duplicate M times
x = x.repeat(self.M, 1, 1, 1)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t, s_t = self.reset()
# extract the glimpses
l_log_pi = []
s_log_pi = []
baselines = []
log_probas = []
stop_signals = []
for t in range(self.num_glimpses):
# forward pass through model
h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t)
# store
baselines.append(b_t)
l_log_pi.append(l_p)
s_log_pi.append(s_p)
log_probas.append(log_ps)
stop_signals.append(s_t)
# convert list to tensors and reshape
baselines = torch.stack(baselines).transpose(1, 0)
l_log_pi = torch.stack(l_log_pi).transpose(1, 0)
s_log_pi = torch.stack(s_log_pi).transpose(1, 0)
log_probas = torch.stack(log_probas).transpose(1, 0)
stop_signals = torch.stack(stop_signals).transpose(1, 0).squeeze(2)
#process stop signals
up_through_stop = stop_signals
count = torch.arange(self.batch_size)
num_steps = torch.sum(stop_signals, dim=1).long()
up_through_stop[count,num_steps] += 1
#extract log_probas at first stop signal
log_probas = log_probas[count,num_steps,:]
#clip histories after stop signal
baselines = baselines * up_through_stop
l_log_pi = l_log_pi * up_through_stop
s_log_pi = s_log_pi * up_through_stop
# average
log_probas = log_probas.view(self.M, -1, log_probas.shape[-1])
log_probas = torch.mean(log_probas, dim=0)
baselines = baselines.contiguous().view(self.M, -1, baselines.shape[-1])
baselines = torch.mean(baselines, dim=0)
l_log_pi = l_log_pi.contiguous().view(self.M, -1, l_log_pi.shape[-1])
l_log_pi = torch.mean(l_log_pi, dim=0)
s_log_pi = s_log_pi.contiguous().view(self.M, -1, s_log_pi.shape[-1])
s_log_pi = torch.mean(s_log_pi, dim=0)
# calculate reward
predicted = torch.max(log_probas, 1)[1]
R = (predicted.detach() == y).float()
R = R.unsqueeze(1).repeat(1, self.num_glimpses)
mask = (torch.arange(R.size(1), device=num_steps.device)==num_steps.unsqueeze(1))
R = mask*R
R = R - stop_signals * self.hesitation_penalty
# compute losses for differentiable modules
loss_action = F.nll_loss(log_probas, y)
loss_baseline = F.mse_loss(baselines, R)
# compute reinforce loss
adjusted_reward = R - baselines.detach()
loss_reinforce = torch.sum(-l_log_pi * adjusted_reward, dim=1) + torch.sum(-s_log_pi * adjusted_reward, dim=1)
loss_reinforce = torch.mean(loss_reinforce, dim=0)
# sum up into a hybrid loss
loss = loss_action + loss_baseline + loss_reinforce * 0.01
# compute accuracy
correct = (predicted == y).float()
acc = 100 * (correct.sum() / len(y))
# store
losses.update(loss.item(), x.size()[0])
accs.update(acc.item(), x.size()[0])
# log to tensorboard
if self.use_tensorboard:
iteration = epoch * len(self.valid_loader) + i
log_value("valid_loss", losses.avg, iteration)
log_value("valid_acc", accs.avg, iteration)
return losses.avg, accs.avg
@torch.no_grad()
def test_stop(self):
"""Test the RAM model.
This function should only be called at the very
end once the model has finished training.
"""
correct = 0
# load the best checkpoint
self.load_checkpoint(best=self.best)
# removed image, final_softmax, hidden layer, softmax, final_persistant_softmax
cols = ['image_id', 'timestep', 'num_dots', 'area',
'num_steps', 'final_prediction', 'next_location', 'prediction',
'final_persistant_absolute_error', 'final_persistant_prediction',
'absolute_error', 'stop_signal', 'stop_probability']
# changed naming to SMALL
filename = self.model_name + "_SMALL.csv"
test_path = os.path.join(self.model_dir, filename)
for i, (x, y, a) in enumerate(self.test_loader):
df_dict = {column_name : [] for column_name in cols}
batch_size = x.shape[0]
df_dict['image_id'].extend(sum([[image_id]*self.num_glimpses for image_id in range(i*batch_size, (i+1)*batch_size)], []))
df_dict['timestep'].extend(sum([list(range(self.num_glimpses)) for image_id in range(i*batch_size, (i+1)*batch_size)], []))
# df_dict['image'].extend(x.repeat_interleave(self.num_glimpses, dim=0).cpu().tolist())
repeat_y = y.repeat_interleave(self.num_glimpses)
df_dict['num_dots'].extend(repeat_y.cpu().tolist())
df_dict['area'].extend(a.repeat_interleave(self.num_glimpses).cpu().tolist())
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t, s_t = self.reset()
# extract the glimpses
l_ts = []
h_ts = []
output_log_probas = []
stop_signals = []
stop_log_probs = []
for t in range(self.num_glimpses):
# forward pass through model
h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t)
# store
h_ts.append(h_t)
l_ts.append(l_t)
output_log_probas.append(log_ps)
stop_signals.append(s_t)
stop_log_probs.append(s_p)
# convert list to tensors and reshape
output_log_probas = torch.stack(output_log_probas).transpose(1, 0)
h_ts = torch.stack(h_ts).transpose(1, 0)
l_ts = torch.stack(l_ts).transpose(1, 0)
stop_log_probs = torch.stack(stop_log_probs).transpose(1, 0)
stop_signals = torch.stack(stop_signals).transpose(1, 0)
stretched_output_log_probas = output_log_probas.reshape(batch_size*self.num_glimpses, -1)
stretched_h_ts = h_ts.reshape(batch_size * self.num_glimpses, -1)
stretched_l_ts = l_ts.reshape(batch_size * self.num_glimpses, -1)
stretched_stop_log_probs = stop_log_probs.reshape(batch_size * self.num_glimpses, -1)
stretched_stop_signals = stop_signals.reshape(batch_size * self.num_glimpses, -1)
softmaxes = torch.exp(output_log_probas)
stretched_softmaxes = softmaxes.reshape(batch_size * self.num_glimpses, -1)
stretched_stop_probs = torch.exp(stretched_stop_log_probs)
# df_dict['softmax'].extend(stretched_softmaxes.cpu().tolist())
df_dict['stop_probability'].extend(stretched_stop_probs.squeeze(1).cpu().tolist())
# df_dict['hidden_layer'].extend(stretched_h_ts.cpu().tolist())
df_dict['next_location'].extend(stretched_l_ts.cpu().tolist())
stop_signals = stop_signals.squeeze(2)
df_dict['stop_signal'].extend(stretched_stop_signals.squeeze(1).cpu().tolist())
#process stop signals
count = torch.arange(batch_size)
num_steps = torch.sum(stop_signals, dim=1).long()
#print(f"num steps: {num_steps}")
df_dict['num_steps'].extend(num_steps.repeat_interleave(self.num_glimpses).cpu().tolist())
up_through_stop = stop_signals
up_through_stop[count,num_steps] += 1
final_persistant_mask = (up_through_stop == 0)
#print(f"mask shape: {final_persistant_mask.shape}")
#print(f"mask: {final_persistant_mask}")
#extract output_log_probas at first stop signal
final_softmax = softmaxes[count,num_steps,:]
#print(f"final soft shape: {final_softmax.shape}")
#print(f"final soft: {final_softmax}")
# df_dict['final_softmax'].extend(final_softmax.repeat_interleave(self.num_glimpses, dim = 0).cpu().tolist())
unsqueezed_final_persistant_mask = final_persistant_mask.unsqueeze(2)
repeated_final_softmax = final_softmax.unsqueeze(1).repeat(1,self.num_glimpses,1)
final_persistant_softmaxes = torch.where(unsqueezed_final_persistant_mask, repeated_final_softmax, softmaxes)
# df_dict['final_persistant_softmax'].extend(final_persistant_softmaxes.reshape(batch_size*self.num_glimpses, -1).cpu().tolist())
final_pred = final_softmax.data.max(1, keepdim=True)[1]
#print(f"final pred: {final_pred}")
df_dict['final_prediction'].extend(final_pred.repeat_interleave(self.num_glimpses).cpu().tolist())
correct += final_pred.eq(y.data.view_as(final_pred)).cpu().sum()
stretched_predictions = stretched_softmaxes.data.max(1, keepdim=True)[1].squeeze(1)
df_dict['prediction'].extend(stretched_predictions.cpu().tolist())
predictions = stretched_predictions.reshape(batch_size, self.num_glimpses)
repeated_final_pred = final_pred.repeat(1, self.num_glimpses)
final_persistant_predictions = torch.where(final_persistant_mask, repeated_final_pred, predictions)
stretched_final_persistant_predictions = final_persistant_predictions.reshape(batch_size*self.num_glimpses, -1)
#print(f"stretched_final_persistant_predictions shape: {stretched_final_persistant_predictions.shape}")
#print(f"stretched_final_persistant_predictions: {stretched_final_persistant_predictions}")
df_dict['final_persistant_prediction'].extend(stretched_final_persistant_predictions.squeeze(1).cpu().tolist())
#print(f"stretched_pred/y devices: {stretched_predictions.device}, {repeat_y.device}")
stretched_error = torch.abs(stretched_predictions - repeat_y.cuda())
df_dict['absolute_error'].extend(stretched_error.cpu().tolist())
#print(f"error : {df_dict['absolute_error']}")
final_error = torch.abs(final_pred - y.unsqueeze(1))
error = stretched_error.reshape(batch_size, self.num_glimpses)
repeated_final_error = final_error.repeat(1, self.num_glimpses)
# print(f"shapes: {final_persistant_mask.shape}, {repeated_final_error.shape}, {error.shape}")
final_persistant_error = torch.where(final_persistant_mask, repeated_final_error, error.long())
stretched_final_persistant_error = final_persistant_error.reshape(batch_size*self.num_glimpses, -1)
#print(f"stretched_final_persistant_error shape: {stretched_final_persistant_error.shape}")
#print(f"stretched_final_persistant_error: {stretched_final_persistant_error}")
df_dict['final_persistant_absolute_error'].extend(stretched_final_persistant_error.squeeze(1).cpu().tolist())
df = pd.DataFrame(df_dict)
df.to_csv(test_path, mode='a', header=not os.path.exists(test_path))
perc = (100.0 * correct) / (self.num_test)
error = 100 - perc
print(
"[*] Test Acc: {}/{} ({:.2f}% - {:.2f}%)".format(
correct, self.num_test, perc, error
)
)
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
def train_one_epoch(self, epoch):
"""
Train the model for 1 epoch of the training set.
An epoch corresponds to one full pass through the entire
training set in successive mini-batches.
This is used by train() and should not be called manually.
"""
self.model.train()
batch_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
tic = time.time()
with tqdm(total=self.num_train) as pbar:
for i, (x, y) in enumerate(self.train_loader):
self.optimizer.zero_grad()
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
plot = False
if (epoch % self.plot_freq == 0) and (i == 0):
plot = True
# initialize location vector and hidden state
self.batch_size = x.shape[0]
#h_t, l_t, s_t = self.reset()
h_t, l_t = self.reset()
# save images
imgs = []
imgs.append(x[0:9])
# extract the glimpses
locs = []
l_log_pi = []
#s_log_pi = []
baselines = []
log_probas = []
#stop_signals = []
for t in range(self.num_glimpses):
# forward pass through model
#h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t, t == self.num_glimpses - 1)
h_t, l_t, b_t, log_ps, l_p = self.model(x, l_t, h_t, t == self.num_glimpses - 1)
# store
locs.append(l_t[0:9])
baselines.append(b_t)
l_log_pi.append(l_p)
#s_log_pi.append(s_p)
log_probas.append(log_ps)
#stop_signals.append(s_t)
# # last iteration
# h_t, l_t, b_t, log_probas, p = self.model(x, l_t, h_t, last=True)
# log_pi.append(p)
# baselines.append(b_t)
# locs.append(l_t[0:9])
# convert list to tensors and reshape
baselines = torch.stack(baselines).transpose(1, 0)
l_log_pi = torch.stack(l_log_pi).transpose(1, 0)
#s_log_pi = torch.stack(s_log_pi).transpose(1, 0)
log_probas = torch.stack(log_probas).transpose(1, 0)
#stop_signals = torch.stack(stop_signals).transpose(1, 0).squeeze(2)
#process stop signals
#up_through_stop = stop_signals
#count = torch.arange(self.batch_size)
#num_steps = torch.sum(stop_signals, dim=1).long()
#up_through_stop[count,num_steps] += 1
#extract log_probas at first stop signal
#log_probas = log_probas[count,num_steps,:]
#clip histories after stop signal
#baselines = baselines * up_through_stop
#l_log_pi = l_log_pi * up_through_stop
#s_log_pi = s_log_pi * up_through_stop
# calculate reward
predicted = torch.max(log_probas, 2)[1]
repeat_y = y.unsqueeze(1).repeat(1, self.num_glimpses)
R = (predicted.detach() == repeat_y).float()
#R = R.unsqueeze(1).repeat(1, self.num_glimpses)
#mask = (torch.arange(R.size(1), device=num_steps.device)==num_steps.unsqueeze(1))
#R = mask*R #Reward of 1 at first stop signal
#R = R - stop_signals * self.hesitation_penalty
# compute losses for differentiable modules
#loss_action = F.nll_loss(log_probas, y)
loss_action = F.nll_loss(log_probas.reshape(self.batch_size * self.num_glimpses, -1), repeat_y.reshape(self.batch_size*self.num_glimpses))
loss_baseline = F.mse_loss(baselines, R)
# compute reinforce loss
# summed over timesteps and averaged across batch
adjusted_reward = R - baselines.detach()
loss_reinforce = torch.sum(-l_log_pi * adjusted_reward, dim=1) #+ torch.sum(-s_log_pi * adjusted_reward, dim=1)
loss_reinforce = torch.mean(loss_reinforce, dim=0)
# sum up into a hybrid loss
loss = loss_action + loss_baseline + loss_reinforce * 0.01
# compute accuracy
correct = (predicted[:,-1] == y).float()
acc = 100 * (correct.sum() / len(y))
# store
losses.update(loss.item(), x.size()[0])
accs.update(acc.item(), x.size()[0])
# compute gradients and update SGD
loss.backward()
self.optimizer.step()
# measure elapsed time
toc = time.time()
batch_time.update(toc - tic)
pbar.set_description(
(
"{:.1f}s - loss: {:.3f} - acc: {:.3f}".format(
(toc - tic), loss.item(), acc.item()
)
)
)
pbar.update(self.batch_size)
# dump the glimpses and locs
if plot:
imgs = [g.cpu().data.numpy().squeeze() for g in imgs]
locs = [l.cpu().data.numpy() for l in locs]
pickle.dump(
imgs, open(self.plot_dir + "g_{}.p".format(epoch + 1), "wb")
)
pickle.dump(
locs, open(self.plot_dir + "l_{}.p".format(epoch + 1), "wb")
)
# log to tensorboard
if self.use_tensorboard:
iteration = epoch * len(self.train_loader) + i
log_value("train_loss", losses.avg, iteration)
log_value("train_acc", accs.avg, iteration)
return losses.avg, accs.avg
@torch.no_grad()
def validate(self, epoch):
"""Evaluate the RAM model on the validation set.
"""
losses = AverageMeter()
accs = AverageMeter()
for i, (x, y) in enumerate(self.valid_loader):
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
# duplicate M times
x = x.repeat(self.M, 1, 1, 1)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
#h_t, l_t, s_t = self.reset()
h_t, l_t = self.reset()
# extract the glimpses
l_log_pi = []
#s_log_pi = []
baselines = []
log_probas = []
#stop_signals = []
for t in range(self.num_glimpses):
# forward pass through model
#h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t)
h_t, l_t, b_t, log_ps, l_p = self.model(x, l_t, h_t)
# store
baselines.append(b_t)
l_log_pi.append(l_p)
#s_log_pi.append(s_p)
log_probas.append(log_ps)
#stop_signals.append(s_t)
# convert list to tensors and reshape
baselines = torch.stack(baselines).transpose(1, 0)
l_log_pi = torch.stack(l_log_pi).transpose(1, 0)
#s_log_pi = torch.stack(s_log_pi).transpose(1, 0)
log_probas = torch.stack(log_probas).transpose(1, 0)
#stop_signals = torch.stack(stop_signals).transpose(1, 0).squeeze(2)
#process stop signals
#up_through_stop = stop_signals
#count = torch.arange(self.batch_size)
#num_steps = torch.sum(stop_signals, dim=1).long()
#up_through_stop[count,num_steps] += 1
#extract log_probas at first stop signal
#log_probas = log_probas[count,num_steps,:]
#clip histories after stop signal
#baselines = baselines * up_through_stop
#l_log_pi = l_log_pi * up_through_stop
#s_log_pi = s_log_pi * up_through_stop
# average
log_probas = log_probas.contiguous().view(self.M, -1, log_probas.shape[-2], log_probas.shape[-1])
log_probas = torch.mean(log_probas, dim=0)
baselines = baselines.contiguous().view(self.M, -1, baselines.shape[-1])
baselines = torch.mean(baselines, dim=0)
l_log_pi = l_log_pi.contiguous().view(self.M, -1, l_log_pi.shape[-1])
l_log_pi = torch.mean(l_log_pi, dim=0)
#s_log_pi = s_log_pi.contiguous().view(self.M, -1, s_log_pi.shape[-1])
#s_log_pi = torch.mean(s_log_pi, dim=0)
# calculate reward
repeat_y = y.unsqueeze(1).repeat(1, self.num_glimpses)
predicted = torch.max(log_probas, 2)[1]
R = (predicted.detach() == repeat_y).float()
#R = R.unsqueeze(1).repeat(1, self.num_glimpses)
#mask = (torch.arange(R.size(1), device=num_steps.device)==num_steps.unsqueeze(1))
#R = mask*R
#R = R - stop_signals * self.hesitation_penalty
# compute losses for differentiable modules
loss_action = F.nll_loss(log_probas.reshape(self.batch_size * self.num_glimpses, -1), repeat_y.reshape(self.batch_size * self.num_glimpses))
loss_baseline = F.mse_loss(baselines, R)
# compute reinforce loss
adjusted_reward = R - baselines.detach()
loss_reinforce = torch.sum(-l_log_pi * adjusted_reward, dim=1)# + torch.sum(-s_log_pi * adjusted_reward, dim=1)
loss_reinforce = torch.mean(loss_reinforce, dim=0)
# sum up into a hybrid loss
loss = loss_action + loss_baseline + loss_reinforce * 0.01
# compute accuracy
correct = (predicted[:,-1] == y).float()
acc = 100 * (correct.sum() / len(y))
# store
losses.update(loss.item(), x.size()[0])
accs.update(acc.item(), x.size()[0])
# log to tensorboard
if self.use_tensorboard:
iteration = epoch * len(self.valid_loader) + i
log_value("valid_loss", losses.avg, iteration)
log_value("valid_acc", accs.avg, iteration)
return losses.avg, accs.avg
@torch.no_grad()
def test(self):
"""Test the RAM model.
This function should only be called at the very
end once the model has finished training.
"""
correct = 0
# load the best checkpoint
self.load_checkpoint(best=self.best)
# removed image, final_softmax, hidden layer, softmax, final_persistant_softmax
cols = ['image_id', 'image', 'timestep', 'num_dots', 'area',
'next_location', 'prediction',
'absolute_error']
# changed naming to SMALL
filename = self.model_name + "_SMALL.csv"
test_path = os.path.join(self.model_dir, filename)
for i, (x, y, a) in enumerate(self.test_loader):
df_dict = {column_name : [] for column_name in cols}
batch_size = x.shape[0]
df_dict['image_id'].extend(sum([[image_id]*self.num_glimpses for image_id in range(i*batch_size, (i+1)*batch_size)], []))
df_dict['timestep'].extend(sum([list(range(self.num_glimpses)) for image_id in range(i*batch_size, (i+1)*batch_size)], []))
df_dict['image'].extend(x.repeat_interleave(self.num_glimpses, dim=0).cpu().tolist())
repeat_y = y.repeat_interleave(self.num_glimpses)
df_dict['num_dots'].extend(repeat_y.cpu().tolist())
df_dict['area'].extend(a.repeat_interleave(self.num_glimpses).cpu().tolist())
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
#h_t, l_t, s_t = self.reset()
h_t, l_t = self.reset()
# extract the glimpses
l_ts = []
h_ts = []
output_log_probas = []
#stop_signals = []
#stop_log_probs = []
for t in range(self.num_glimpses):
# forward pass through model
#h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t)
h_t, l_t, b_t, log_ps, l_p = self.model(x, l_t, h_t)
# store
h_ts.append(h_t)
l_ts.append(l_t)
output_log_probas.append(log_ps)
#stop_signals.append(s_t)
#stop_log_probs.append(s_p)
# convert list to tensors and reshape
output_log_probas = torch.stack(output_log_probas).transpose(1, 0)
h_ts = torch.stack(h_ts).transpose(1, 0)
l_ts = torch.stack(l_ts).transpose(1, 0)
#stop_log_probs = torch.stack(stop_log_probs).transpose(1, 0)
#stop_signals = torch.stack(stop_signals).transpose(1, 0)
stretched_output_log_probas = output_log_probas.reshape(batch_size*self.num_glimpses, -1)
stretched_h_ts = h_ts.reshape(batch_size * self.num_glimpses, -1)
stretched_l_ts = l_ts.reshape(batch_size * self.num_glimpses, -1)
#stretched_stop_log_probs = stop_log_probs.reshape(batch_size * self.num_glimpses, -1)
#stretched_stop_signals = stop_signals.reshape(batch_size * self.num_glimpses, -1)
softmaxes = torch.exp(output_log_probas)
stretched_softmaxes = softmaxes.reshape(batch_size * self.num_glimpses, -1)
#stretched_stop_probs = torch.exp(stretched_stop_log_probs)
# df_dict['softmax'].extend(stretched_softmaxes.cpu().tolist())
#df_dict['stop_probability'].extend(stretched_stop_probs.squeeze(1).cpu().tolist())
# df_dict['hidden_layer'].extend(stretched_h_ts.cpu().tolist())
df_dict['next_location'].extend(stretched_l_ts.cpu().tolist())
#stop_signals = stop_signals.squeeze(2)
#df_dict['stop_signal'].extend(stretched_stop_signals.squeeze(1).cpu().tolist())
#process stop signals
#count = torch.arange(batch_size)
#num_steps = torch.sum(stop_signals, dim=1).long()
#print(f"num steps: {num_steps}")
#df_dict['num_steps'].extend(num_steps.repeat_interleave(self.num_glimpses).cpu().tolist())
#up_through_stop = stop_signals
#up_through_stop[count,num_steps] += 1
#final_persistant_mask = (up_through_stop == 0)
#print(f"mask shape: {final_persistant_mask.shape}")
#print(f"mask: {final_persistant_mask}")
#extract output_log_probas at first stop signal
#final_softmax = softmaxes[count,num_steps,:]
#print(f"final soft shape: {final_softmax.shape}")
#print(f"final soft: {final_softmax}")
# df_dict['final_softmax'].extend(final_softmax.repeat_interleave(self.num_glimpses, dim = 0).cpu().tolist())
#unsqueezed_final_persistant_mask = final_persistant_mask.unsqueeze(2)
#repeated_final_softmax = final_softmax.unsqueeze(1).repeat(1,self.num_glimpses,1)
#final_persistant_softmaxes = torch.where(unsqueezed_final_persistant_mask, repeated_final_softmax, softmaxes)
# df_dict['final_persistant_softmax'].extend(final_persistant_softmaxes.reshape(batch_size*self.num_glimpses, -1).cpu().tolist())
final_pred = output_log_probas[:,-1,:].data.max(1, keepdim=True)[1]
#print(f"final pred: {final_pred}")
#df_dict['final_prediction'].extend(final_pred.repeat_interleave(self.num_glimpses).cpu().tolist())
correct += final_pred.eq(y.data.view_as(final_pred)).cpu().sum()
stretched_predictions = stretched_output_log_probas.data.max(1, keepdim=True)[1].squeeze(1)
df_dict['prediction'].extend(stretched_predictions.cpu().tolist())
predictions = stretched_predictions.reshape(batch_size, self.num_glimpses)
#repeated_final_pred = final_pred.repeat(1, self.num_glimpses)
#final_persistant_predictions = torch.where(final_persistant_mask, repeated_final_pred, predictions)
#stretched_final_persistant_predictions = final_persistant_predictions.reshape(batch_size*self.num_glimpses, -1)
#print(f"stretched_final_persistant_predictions shape: {stretched_final_persistant_predictions.shape}")
#print(f"stretched_final_persistant_predictions: {stretched_final_persistant_predictions}")
#df_dict['final_persistant_prediction'].extend(stretched_final_persistant_predictions.squeeze(1).cpu().tolist())
#print(f"stretched_pred/y devices: {stretched_predictions.device}, {repeat_y.device}")
stretched_error = torch.abs(stretched_predictions - repeat_y.cuda())
df_dict['absolute_error'].extend(stretched_error.cpu().tolist())
#print(f"error : {df_dict['absolute_error']}")
#final_error = torch.abs(final_pred - y.unsqueeze(1))
#error = stretched_error.reshape(batch_size, self.num_glimpses)
#repeated_final_error = final_error.repeat(1, self.num_glimpses)
# print(f"shapes: {final_persistant_mask.shape}, {repeated_final_error.shape}, {error.shape}")
#final_persistant_error = torch.where(final_persistant_mask, repeated_final_error, error.long())
#stretched_final_persistant_error = final_persistant_error.reshape(batch_size*self.num_glimpses, -1)
#print(f"stretched_final_persistant_error shape: {stretched_final_persistant_error.shape}")
#print(f"stretched_final_persistant_error: {stretched_final_persistant_error}")
#df_dict['final_persistant_absolute_error'].extend(stretched_final_persistant_error.squeeze(1).cpu().tolist())
df = pd.DataFrame(df_dict)
df.to_csv(test_path, mode='a', header=not os.path.exists(test_path))
perc = (100.0 * correct) / (self.num_test)
error = 100 - perc
print(
"[*] Test Acc: {}/{} ({:.2f}% - {:.2f}%)".format(
correct, self.num_test, perc, error
)
)
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
| 40.032457
| 154
| 0.572661
| 51,437
| 0.992954
| 0
| 0
| 24,540
| 0.473727
| 0
| 0
| 16,618
| 0.320798
|
05a68fa246d27153d4fabeb9ddac94a69fd17785
| 392
|
py
|
Python
|
src/apps/shop/serializers.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
src/apps/shop/serializers.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
src/apps/shop/serializers.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers
from .models import ShopItem
class ShopItemSerializer(serializers.ModelSerializer):
buy_method = serializers.SerializerMethodField()
class Meta:
model = ShopItem
fields = ("id", "name", "cost", "source", "buy_method")
def get_buy_method(self, obj):
return {"is_coin_available": True, "is_level_available": True}
| 26.133333
| 70
| 0.706633
| 320
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.191327
|
05a722d6a74837776cdd4f147e146b4674a0d013
| 2,205
|
py
|
Python
|
app.py
|
limjierui/money-goose-telebot
|
bf048e27598b9ff6da580ee62309c4ca33eae0c5
|
[
"MIT"
] | null | null | null |
app.py
|
limjierui/money-goose-telebot
|
bf048e27598b9ff6da580ee62309c4ca33eae0c5
|
[
"MIT"
] | null | null | null |
app.py
|
limjierui/money-goose-telebot
|
bf048e27598b9ff6da580ee62309c4ca33eae0c5
|
[
"MIT"
] | 3
|
2020-12-21T16:21:45.000Z
|
2020-12-24T16:21:28.000Z
|
from flask import Flask, request
import telegram
from moneyGooseBot.master_mind import mainCommandHandler
from moneyGooseBot.credentials import URL, reset_key, bot_token, bot_user_name
from web_server import create_app
# https://api.telegram.org/bot1359229669:AAEm8MG26qbA9XjJyojVKvPI7jAdMVqAkc8/getMe
bot = telegram.Bot(token=bot_token)
app = create_app()
@app.route('/{}'.format(TOKEN), methods=['POST'])
def respond():
# retrieve the message in JSON and then transform it to the Telegram object
print("Received message")
# for overwhelming updates, clear the update attemp (this line below)
# and have the method return 1 to clear all pending updates
try:
update = telegram.Update.de_json(request.get_json(force=True), bot)
except:
print("some error has occured internally")
if update.message:
mainCommandHandler(incoming_message = update.messagem, telebot_instance = bot)
return 'ok'
@app.route('/{}'.format(RESETKEY), methods=['POST'])
def reset():
return 'ok'
@app.route('/setwebhook', methods=['GET', 'POST'])
def set_webhook():
# we use the bot object to link the bot to our app which live
# in the link provided by URL
s = bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=bot_token))
# something to let us know things work
if s:
return "webhook setup ok"
else:
return "webhook setup failed"
@app.route('/resetupdate', methods=['GET','POST'])
def reset_update():
"""
Really a temprorary method to keep the update from flooding
"""
s = bot.setWebhook('{URL}{RESET}'.format(URL=URL, RESET=reset_key))
if s:
return "reset hook setup ok"
else:
return "reset hook setup failed"
@app.route('/dropwebhook', methods=['GET'])
def drop_webhook():
"""
Stops the webhook from polling the server and drops all pending requests
"""
s = bot.deleteWebhook(drop_pending_updates=True)
if s:
return "web hook delete success"
else:
return "web hook delete failure"
if __name__ == '__main__':
# note the threaded arg which allow
# your app to have more than one thread
app.run(threaded=True, debug=True)
| 30.625
| 86
| 0.686168
| 0
| 0
| 0
| 0
| 1,683
| 0.763265
| 0
| 0
| 974
| 0.441723
|
05aa26976885770e54982447eb4735e665e02cf2
| 3,061
|
py
|
Python
|
final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py
|
mmwvh/ce
|
162064eeb6668896410c9d176fe75531cd3493fb
|
[
"MIT"
] | 28
|
2021-04-08T15:59:56.000Z
|
2022-03-12T20:42:16.000Z
|
final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py
|
mmwvh/ce
|
162064eeb6668896410c9d176fe75531cd3493fb
|
[
"MIT"
] | 7
|
2020-08-25T07:58:01.000Z
|
2020-09-12T20:44:12.000Z
|
final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py
|
mmwvh/ce
|
162064eeb6668896410c9d176fe75531cd3493fb
|
[
"MIT"
] | 13
|
2020-02-13T18:25:57.000Z
|
2022-03-01T11:27:12.000Z
|
import yaml
from collections import OrderedDict
def construct_odict(load, node):
"""This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop.
>>> yaml.load('''
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark
)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark
)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark
)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
yaml.add_constructor(u'tag:yaml.org,2002:omap', construct_odict)
def repr_pairs(dump, tag, sequence, flow_style=None):
"""This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple."""
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def repr_odict(dumper, data):
"""
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False)
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True)
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return repr_pairs(dumper, u'tag:yaml.org,2002:omap', data.iteritems())
yaml.add_representer(OrderedDict, repr_odict)
| 37.329268
| 90
| 0.613525
| 0
| 0
| 1,648
| 0.538386
| 0
| 0
| 0
| 0
| 1,268
| 0.414244
|
05ac654490e3084f2724bef66dfbbee9d64e72f4
| 10,609
|
py
|
Python
|
app.py
|
isabella232/arrested-development
|
ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea
|
[
"FSFAP"
] | 1
|
2015-03-16T21:22:58.000Z
|
2015-03-16T21:22:58.000Z
|
app.py
|
nprapps/arrested-development
|
ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea
|
[
"FSFAP"
] | 1
|
2021-02-24T06:08:41.000Z
|
2021-02-24T06:08:41.000Z
|
app.py
|
isabella232/arrested-development
|
ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea
|
[
"FSFAP"
] | 2
|
2015-02-22T23:39:11.000Z
|
2021-02-23T10:45:05.000Z
|
#!/usr/bin/env python
import json
from mimetypes import guess_type
import urllib
import envoy
from flask import Flask, Markup, abort, render_template, redirect, Response
import app_config
from models import Joke, Episode, EpisodeJoke, JokeConnection
from render_utils import flatten_app_config, make_context
app = Flask(app_config.PROJECT_NAME)
def _all_seasons():
output = []
SEASONS = [1, 2, 3]
if app_config.IMPORT_NEW_SEASON is True:
SEASONS.append(4)
for season in SEASONS:
season_dict = {}
season_dict['season'] = season
season_dict['episodes'] = []
for episode in Episode.select().where(Episode.season == season):
season_dict['episodes'].append({
'url': 'episode-%s.html' % episode.code,
'text': '%s: %s' % (episode.episode, episode.title),
'episode': episode.episode,
'code': episode.code
})
season_dict['episodes'] = sorted(season_dict['episodes'], key=lambda episode: episode['episode'])
output.append(season_dict)
return output
@app.route('/episode-<episode_code>.html')
def _episode_detail(episode_code):
context = make_context()
context['episode'] = Episode.get(Episode.code == episode_code)
context['jokes'] = {}
context['joke_count'] = 0
for joke in EpisodeJoke.select().where(EpisodeJoke.episode == context['episode']):
group = joke.joke.primary_character
if group not in app_config.PRIMARY_CHARACTER_LIST:
group = 'Miscellaneous'
if group not in context['jokes']:
context['jokes'][group] = []
context['jokes'][group].append(joke)
context['joke_count'] += 1
context['seasons'] = _all_seasons()
context['group_order'] = [g for g in app_config.PRIMARY_CHARACTER_LIST if g in context['jokes']]
try:
context['next'] = Episode.get(number=context['episode'].number + 1)
except Episode.DoesNotExist:
context['next'] = None
try:
context['prev'] = Episode.get(number=context['episode'].number - 1)
except Episode.DoesNotExist:
context['prev'] = None
return render_template('episode_detail.html', **context)
@app.route('/joke-<joke_code>.html')
def _joke_detail(joke_code):
context = make_context()
context['joke'] = Joke.get(Joke.code == int(joke_code))
context['episodejokes'] = EpisodeJoke.select().where(EpisodeJoke.joke == context['joke'])
context['episodejokes'] = sorted(context['episodejokes'], key=lambda ej: ej.episode.code)
context['seasons'] = _all_seasons()
with open('www/live-data/jokes.json') as f:
data = json.load(f)
group_order = data['group_order']
joke_data = data['jokes']
connections = data['connections']
connected_joke_codes = [int(joke_code)]
def filter_connections(c):
if c['joke1_code'] == int(joke_code) or c['joke2_code'] == int(joke_code):
connected_joke_codes.append(c['joke1_code'])
connected_joke_codes.append(c['joke2_code'])
return True
return False
connections = filter(filter_connections, connections)
def filter_jokes(c):
return c['code'] in connected_joke_codes
for group, jokes in joke_data.items():
joke_data[group] = filter(filter_jokes, jokes)
if len(joke_data[group]) == 0:
del joke_data[group]
group_order.remove(group)
context['group_order'] = Markup(json.dumps(group_order))
context['joke_data'] = Markup(json.dumps(joke_data))
context['connection_data'] = Markup(json.dumps(connections))
context['episodes'] = Markup(json.dumps(data['episodes']))
group = context['joke'].primary_character
if group not in app_config.PRIMARY_CHARACTER_LIST:
group = 'Miscellaneous'
context['group'] = group
return render_template('joke_detail.html', **context)
@app.route('/')
def index():
context = make_context()
context['jokes'] = []
for joke in Joke.select():
context['jokes'].append(joke)
context['jokes'] = sorted(context['jokes'], key=lambda joke: joke.code)
context['seasons'] = _all_seasons()
with open('www/live-data/jokes.json') as f:
data = json.load(f)
context['group_order'] = Markup(json.dumps(data['group_order']))
context['joke_data'] = Markup(json.dumps(data['jokes']))
context['connection_data'] = Markup(json.dumps(data['connections']))
context['episodes'] = Markup(json.dumps(data['episodes']))
return render_template('viz.html', **context)
@app.route('/admin/episodes/<episode_code>/jokeconnection/<joke_connection_id>/delete/', methods=['DELETE'])
def _admin_jokeconnection_delete(episode_code, joke_connection_id):
from flask import request
if request.method == 'DELETE':
JokeConnection.delete().where(JokeConnection.id == int(joke_connection_id)).execute()
return joke_connection_id
@app.route('/admin/episodes/<episode_code>/episodejoke/<episode_joke_id>/delete/', methods=['DELETE'])
def _admin_episodejokes_delete(episode_code, episode_joke_id):
from flask import request
if request.method == 'DELETE':
EpisodeJoke.delete().where(EpisodeJoke.id == int(episode_joke_id)).execute()
return episode_joke_id
@app.route('/admin/episodes/<episode_code>/episodejoke/', methods=['PUT', 'POST'])
def _admin_episodejokes(episode_code):
from flask import request
details = request.form.get('details', None)
if request.method == 'POST':
episode_joke_id = request.form.get('episode_joke_id', None)
ej = EpisodeJoke.get(id=int(episode_joke_id))
ej.details = details
ej.save()
return '%s' % ej.id
if request.method == 'PUT':
joke_code = request.form.get('joke_code', None)
joke_type = request.form.get('type', None)
joke = Joke.get(code=int(joke_code))
episode = Episode.get(code=episode_code)
code = 's%se%sj%s' % (
str(episode.season).zfill(2),
str(episode.episode).zfill(2),
joke.code
)
context = {}
context['ej'] = EpisodeJoke(joke=joke, episode=episode, joke_type=joke_type, details=details, code=code)
context['ej'].save()
return render_template('_episodejoke_form_row.html', **context)
@app.route('/admin/episodes/<episode_code>/jokeconnection/', methods=['PUT'])
def _admin_jokeconnections(episode_code):
from flask import request
if request.method == 'POST':
pass
if request.method == 'PUT':
payload = {}
ej = EpisodeJoke.get(id=int(request.form.get('episode_joke_id')))
payload['joke1'] = ej.joke
payload['joke2'] = Joke.get(code=int(request.form.get('joke_code')))
payload['episode'] = ej.episode
j = JokeConnection(**payload)
j.save()
return("""
<br/>
<a class="related kill-related" href="#" data-jc-id="%s">×</a>
<a class="related" href="#joke-%s">%s →</a>""" % (j.id, j.joke2.code, j.joke2.text))
@app.route('/admin/episodes/')
def _admin_episodes_nocode():
return redirect('/admin/episodes/s04e01/')
@app.route('/admin/episodes/<episode_code>/', methods=['GET', 'PUT'])
def _admin_episodes(episode_code):
from flask import request
if request.method == 'GET':
context = {}
context['episode'] = Episode.get(code=episode_code)
context['episodejokes'] = EpisodeJoke.select().join(Episode).where(Episode.code == episode_code)
context['jokes'] = Joke.select().order_by(Joke.primary_character)
context['seasons'] = _all_seasons()
try:
context['next'] = Episode.get(number=context['episode'].number + 1)
except Episode.DoesNotExist:
context['next'] = None
try:
context['prev'] = Episode.get(number=context['episode'].number - 1)
except Episode.DoesNotExist:
context['prev'] = None
return render_template('admin_episode_detail.html', **context)
if request.method == 'PUT':
e = Episode.get(code=episode_code)
e.blurb = request.form.get('blurb', None)
e.save()
return '%s' % e.id
@app.route('/admin/output/')
def _admin_output():
output = {}
output['joke_main'] = ''
output['joke_details'] = ''
output['joke_connections'] = ''
for joke in Joke.select():
for episode in Episode.select().where(Episode.season == 4).order_by(Episode.number):
try:
ej = EpisodeJoke.get(episode=episode, joke=joke)
output['joke_main'] += '%s\t' % ej.joke_type
output['joke_details'] += '\'%s\t' % ej.details
if ej.connections():
output['joke_connections'] += '\'%s\t' % ej.connections()[0]['text']
else:
output['joke_connections'] += '\t'
except EpisodeJoke.DoesNotExist:
output['joke_main'] += '\t'
output['joke_details'] += '\t'
output['joke_connections'] += '\t'
output['joke_main'] += '\n'
output['joke_details'] += '\n'
output['joke_connections'] += '\n'
return render_template('_output.html', **output)
# Render LESS files on-demand
@app.route('/less/<string:filename>')
def _less(filename):
try:
with open('less/%s' % filename) as f:
less = f.read()
except IOError:
abort(404)
r = envoy.run('%s/lessc -' % app_config.APPS_NODE_PATH, data=less)
return r.std_out, 200, {'Content-Type': 'text/css'}
# Render JST templates on-demand
@app.route('/js/templates.js')
def _templates_js():
r = envoy.run('%s/jst --template underscore jst' % app_config.APPS_NODE_PATH)
return r.std_out, 200, {'Content-Type': 'application/javascript'}
# Render application configuration
@app.route('/js/app_config.js')
def _app_config_js():
config = flatten_app_config()
js = 'window.APP_CONFIG = ' + json.dumps(config)
return js, 200, {'Content-Type': 'application/javascript'}
# Server arbitrary static files on-demand
@app.route('/<path:path>')
def _static(path):
try:
with open('www/%s' % path) as f:
return f.read(), 200, {'Content-Type': guess_type(path)[0]}
except IOError:
abort(404)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=app_config.DEBUG)
| 33.153125
| 112
| 0.624658
| 0
| 0
| 0
| 0
| 9,226
| 0.869639
| 0
| 0
| 2,360
| 0.222453
|
05ae582a0fb6d75889c4d858419450e634ed3a1d
| 12,129
|
py
|
Python
|
json_modify.py
|
Enacero/yaml-patch
|
7270d431447c82d665622cc316f0941214e7eee2
|
[
"MIT"
] | 2
|
2020-04-21T08:49:39.000Z
|
2020-12-21T07:28:43.000Z
|
json_modify.py
|
Enacero/json_modify
|
7270d431447c82d665622cc316f0941214e7eee2
|
[
"MIT"
] | null | null | null |
json_modify.py
|
Enacero/json_modify
|
7270d431447c82d665622cc316f0941214e7eee2
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Oleksii Petrenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from copy import deepcopy
import json
import typing
import os
import yaml
__version__ = "1.0.1"
__license__ = "MIT"
__all__ = (
"apply_actions",
"apply_to_list",
"apply_to_dict",
"validate_action",
"validate_marker",
"apply_action",
"get_path",
"get_section",
"get_reader",
"find_section_in_list",
)
def get_reader(
file_name: str,
) -> typing.Callable[[typing.Any], typing.Iterable[typing.Any]]:
"""
Determine reader for file.
:param file_name: name of the file with source data
:return: function to read data from file
"""
ext = os.path.splitext(file_name)[-1]
if ext in [".yaml", "yml"]:
return yaml.safe_load
elif ext == ".json":
return json.load
raise ValueError("Cant determine reader for {} extension".format(ext))
def find_section_in_list(
section: typing.List[typing.Any], action: typing.Dict[str, typing.Any], key: str
) -> int:
"""
Find index of section in list
:param section: list, where we want to search
:param action: action dictionary
:param key: the key marker
:return: index of searched section
"""
key = key[1:]
if key.isdigit():
return int(key)
if key not in action:
raise KeyError("Action {}: marker {} not found in action".format(action, key))
compares = action[key]
for index, section in enumerate(section):
if all(section[compare["key"]] == compare["value"] for compare in compares):
return index
raise IndexError(
"Action {}: Value with {} filters not found".format(action, compares)
)
def get_path(action: typing.Dict[str, typing.Any], path_delim: str) -> typing.List[str]:
"""
Get path from action
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: list of keys
"""
path = action["path"]
if isinstance(path, str):
keys = [str(key) for key in action["path"].split(path_delim)]
return keys
elif isinstance(path, typing.List) and all(isinstance(key, str) for key in path):
return path
else:
raise TypeError(
"Action {}: path should be str or list of strings".format(action)
)
def get_section(
source_data: typing.Iterable[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> typing.Iterable[typing.Any]:
"""
Get section descried by action's path.
:param source_data: source data where to search
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: section from source_data described by path
"""
section = source_data
path = get_path(action, path_delim)
if not action["action"] == "add":
path = path[:-1]
for key in path:
key = key.strip()
if key.startswith("$"):
if not isinstance(section, typing.List):
raise TypeError(
"Action {}: section {} is not list".format(action, section)
)
section_index = find_section_in_list(section, action, key)
section = section[section_index]
else:
if not isinstance(section, typing.Dict):
raise TypeError(
"Action {}: section {} is not dict".format(action, section)
)
section = section[key]
return section
def apply_to_dict(
section: typing.Dict[str, typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to dictionary.
:param section: section on which action should be applied
:param action: action object that should be applied
:param path_delim: delimiter
"""
action_name = action["action"]
value = action.get("value")
if action_name == "add":
if isinstance(value, typing.Dict):
section.update(value)
else:
raise TypeError(
"Action {}: value for add operation on dict should "
"be of type dict".format(action)
)
else:
path = get_path(action, path_delim)
key = path[-1].strip()
if action_name == "replace":
section[key] = value
elif action_name == "delete":
if key not in section:
raise KeyError("Action {}: no such key {}".format(action, key))
del section[key]
elif action_name == "rename":
if key not in section:
raise KeyError("Action {}: no such key {}".format(action, key))
elif isinstance(value, str):
section[value] = section[key]
del section[key]
else:
raise TypeError(
"Action {}: for rename action on dict value "
"should be string".format(action)
)
def apply_to_list(
section: typing.List[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to list.
:param section: section on which action should be applied
:param action: action object that should be applied
:param path_delim: delimiter
"""
action_name = action["action"]
value = action.get("value")
if action_name == "add":
if isinstance(value, list):
section.extend(value)
else:
raise TypeError(
"Action {}: value for add operation on list should "
"be of type list".format(action)
)
else:
path = get_path(action, path_delim)
key = path[-1].strip()
section_index = find_section_in_list(section, action, key)
if action_name == "replace":
section[section_index] = value
elif action_name == "delete":
section.pop(section_index)
def apply_action(
section: typing.Iterable[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to selected section.
:param section: section to be modified
:param action: action object
:param path_delim: path delimiter. default is '/'
"""
if isinstance(section, typing.Dict):
apply_to_dict(section, action, path_delim)
elif isinstance(section, typing.List):
apply_to_list(section, action, path_delim)
else:
raise TypeError(
"Action {}: Section {} is not of type dict or list".format(action, section)
)
def validate_marker(action: typing.Dict[str, typing.Any], key: str) -> None:
"""
Validate marker from action's path.
:param action: action object
:param key: key that is used as marker
"""
key = key[1:]
marker = action.get(key)
if not marker:
raise KeyError(
"Action {}: marker {} should be defined in action".format(action, key)
)
if not isinstance(marker, typing.List):
raise TypeError(
"Action {}: marker {} should be of type list".format(action, key)
)
for search_filter in marker:
if not isinstance(search_filter, typing.Dict):
raise TypeError(
"Action {}: marker {} filters should be of type dict".format(
action, key
)
)
filter_key = search_filter.get("key")
filter_value = search_filter.get("value")
if not filter_key or not filter_value:
raise KeyError(
"Action {}: for marker {} key and value should be specified".format(
action, key
)
)
def validate_action(action: typing.Dict[str, typing.Any], path_delim: str) -> None:
"""
Validate action.
:param action: action object
:param path_delim: path delimiter
"""
action_name = action.get("action")
if not action_name:
raise KeyError("Action {}: key action is required".format(action))
path = action.get("path")
if not path:
raise KeyError("Action {}: key path is required".format(action))
path = get_path(action, path_delim)
for key in path:
if key.startswith("$") and not key[1:].isdigit():
validate_marker(action, key)
value = action.get("value")
if action_name in ["add", "replace", "rename"] and not value:
raise KeyError(
"Action {}: for {} action key value is required".format(action, action_name)
)
if action_name == "add":
key = path[-1]
if key.startswith("$") and not isinstance(value, typing.List):
raise TypeError(
"Action {}: for add action on list value should be list".format(action)
)
elif not isinstance(value, typing.Dict):
raise TypeError(
"Action {}: for add action on dict value should be dict".format(action)
)
elif action_name == "rename":
if not isinstance(value, str):
raise TypeError(
"Action {}: for rename action on dict value should be string".format(
action
)
)
def apply_actions(
source: typing.Union[typing.Dict[str, typing.Any], str],
actions: typing.Union[typing.List[typing.Dict[str, typing.Any]], str],
copy: bool = False,
path_delim: str = "/",
) -> typing.Iterable[typing.Any]:
"""
Apply actions on source_data.
:param source: dictionary or json/yaml file with data that should be modified
:param actions: list or json/yaml file with actions, that should be applied to
source
:param copy: should source be copied before modification or changed in place
(works only when source is dictionary not file). default is False
:param path_delim: path delimiter. default is '/'
:return: source modified after applying actions
"""
if isinstance(source, str):
reader = get_reader(source)
with open(source, "r") as f:
source_data = reader(f)
elif isinstance(source, typing.Dict):
if copy:
source_data = deepcopy(source)
else:
source_data = source
else:
raise TypeError("source should be data dictionary or file_name with data")
if isinstance(actions, str):
reader = get_reader(actions)
with open(actions, "r") as f:
actions_data = reader(f)
elif isinstance(actions, typing.List):
actions_data = actions
else:
raise TypeError(
"actions should be data dictionary or file_name with actions list"
)
for action in actions_data:
validate_action(action, path_delim)
for action in actions_data:
section = get_section(source_data, action, path_delim)
apply_action(section, action, path_delim)
return source_data
| 32.692722
| 88
| 0.612252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,806
| 0.39624
|
05aed2b7bdb2d62afb387bf3fa03ff50f51651b0
| 43,958
|
py
|
Python
|
serial_scripts/vm_regression/test_vm_serial.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 1
|
2017-06-13T04:42:34.000Z
|
2017-06-13T04:42:34.000Z
|
serial_scripts/vm_regression/test_vm_serial.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
serial_scripts/vm_regression/test_vm_serial.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
import traffic_tests
from vn_test import *
from vm_test import *
from floating_ip import *
from policy_test import *
from compute_node_test import ComputeNodeFixture
from user_test import UserFixture
from multiple_vn_vm_test import *
from tcutils.wrappers import preposttest_wrapper
sys.path.append(os.path.realpath('tcutils/pkgs/Traffic'))
from traffic.core.stream import Stream
from traffic.core.profile import create, ContinuousProfile
from traffic.core.helpers import Host
from traffic.core.helpers import Sender, Receiver
from base import BaseVnVmTest
from common import isolated_creds
import inspect
from tcutils.util import skip_because
from tcutils.tcpdump_utils import start_tcpdump_for_intf,\
stop_tcpdump_for_intf, verify_tcpdump_count
import test
from tcutils.contrail_status_check import ContrailStatusChecker
class TestBasicVMVN0(BaseVnVmTest):
@classmethod
def setUpClass(cls):
super(TestBasicVMVN0, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestBasicVMVN0, cls).tearDownClass()
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_bring_up_vm_with_control_node_down(self):
'''
Description: Create VM when there is not active control node. Verify VM comes up fine when all control nodes are back
Test steps:
1. Create a VN.
2. Shutdown the control node and spawn some VMs.
3. The VMS info should get deleted from the agents after 2 minutes.
4. The Tap intf corresponding to the VM should go to ERROR state.
5. Bring up the control nodes.
Pass criteria: The VMs should be back to ACTIVE state, so should the Tap interfaces.
Maintainer : ganeshahv@juniper.net
'''
if len(set(self.inputs.bgp_ips)) < 2:
raise self.skipTest(
"Skipping Test. At least 2 control node required to run the test")
result = True
vn1_name = get_random_name('vn30')
vn1_subnets = ['30.1.1.0/24']
# Collecting all the control node details
controller_list = []
for entry in self.inputs.compute_ips:
inspect_h = self.agent_inspect[entry]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
controller_list.append(entry['controller_ip'])
controller_list = set(controller_list)
# Stop all the control node
for entry in controller_list:
self.logger.info('Stoping the Control service in %s' % (entry))
self.inputs.stop_service('contrail-control', [entry])
self.addCleanup(self.inputs.start_service,
'contrail-control', [entry])
sleep(30)
vn1_vm1_name = get_random_name('vm1')
vn1_vm2_name = get_random_name('vm2')
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
vm1_fixture.verify_vm_launched()
vm2_fixture.verify_vm_launched()
vm1_node_ip = self.inputs.host_data[
self.nova_h.get_nova_host_of_vm(vm1_fixture.vm_obj)]['host_ip']
vm2_node_ip = self.inputs.host_data[
self.nova_h.get_nova_host_of_vm(vm2_fixture.vm_obj)]['host_ip']
inspect_h1 = self.agent_inspect[vm1_node_ip]
inspect_h2 = self.agent_inspect[vm2_node_ip]
self.logger.info(
'Checking TAP interface is created for all VM and should be in Error state')
vm1_tap_intf = None
vm2_tap_intf = None
vm1_tap_intf = inspect_h1.get_vna_tap_interface_by_ip(
vm1_fixture.vm_ip)
if vm1_tap_intf is []:
self.logger.error('TAP interface is not created for VM %s' %
(vn1_vm1_name))
result = result and False
else:
if vm1_tap_intf[0]['vrf_name'] != '--ERROR--':
self.logger.error(
'TAP interface VRF info should be Error . But currently in %s' %
(vm1_tap_intf[0]['vrf_name']))
result = result and False
vm2_tap_intf = inspect_h2.get_vna_tap_interface_by_ip(
vm2_fixture.vm_ip)
if vm2_tap_intf is []:
self.logger.error('TAP interface is not created for VM %s' %
(vn1_vm2_name))
result = result and False
else:
if vm2_tap_intf[0]['vrf_name'] != '--ERROR--':
self.logger.error(
'TAP interface VRF info should be Error . But currently in %s' %
(vm2_tap_intf[0]['vrf_name']))
result = result and False
self.logger.info('Waiting for 120 sec for cleanup to begin')
sleep(120)
# Check agent should not have any VN info
for entry in self.inputs.compute_ips:
inspect_h = self.agent_inspect[entry]
self.logger.info('Checking VN info in agent %s.' % (entry))
if inspect_h.get_vna_vn(domain=self.project.domain_name,
project=self.project.project_name,
vn_name=vn1_fixture.vn_name):
self.logger.error(
'Agent should not have any VN info present when control node is down')
result = result and False
# Start all the control node
for entry in controller_list:
self.logger.info('Starting the Control service in %s' % (entry))
self.inputs.start_service('contrail-control', [entry])
sleep(10)
self.logger.info('Checking the VM came up properly or not')
assert vn1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
assert vm1_fixture.verify_on_setup()
# Check ping between VM
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
if not result:
self.logger.error(
'Test to verify cleanup of agent after control nodes stop Failed')
assert result
return True
# end test_bring_up_vm_with_control_node_down
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_ipam_persistence_across_restart_reboots(self):
'''
Description: Test to validate IPAM persistence across restarts and reboots of nodes.
Test steps:
1. Create a IPAM.
2. Create a VN and launch VMs in it.
3. Restart the contrail-vrouter-agent and contrail-control services.
Pass criteria: The VMs should be back to ACTIVE state and the ping between them should PASS.
Maintainer : ganeshahv@juniper.net
'''
ipam_obj=self.useFixture( IPAMFixture(project_obj= self.project, name = get_random_name('my-ipam')))
assert ipam_obj.verify_on_setup()
ts = time.time()
vn_name = get_random_name('vn')
vn_fixture=self.useFixture( VNFixture(project_name= self.project.project_name, connections= self.connections,
vn_name= vn_name, inputs= self.inputs, subnets=['22.1.1.0/24'], ipam_fq_name = ipam_obj.fq_name))
assert vn_fixture.verify_on_setup()
vm1_fixture = self.useFixture(VMFixture(connections=self.connections,project_name = self.inputs.project_name,
vn_obj=vn_fixture.obj, vm_name = get_random_name('vm1')))
vm2_fixture = self.useFixture(VMFixture(connections=self.connections,project_name = self.inputs.project_name,
vn_obj=vn_fixture.obj, vm_name = get_random_name('vm2')))
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
self.nova_h.wait_till_vm_is_up( vm1_fixture.vm_obj )
self.nova_h.wait_till_vm_is_up( vm2_fixture.vm_obj )
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
self.logger.info('Will restart the services now')
for compute_ip in self.inputs.compute_ips:
pass
self.inputs.restart_service('contrail-vrouter-agent',[compute_ip])
for bgp_ip in self.inputs.bgp_ips:
self.inputs.restart_service('contrail-control',[bgp_ip])
pass
cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable()
assert cluster_status, 'Cluster is not stable after restart'
self.logger.info('Will check if the ipam persists and ping b/w VMs is still successful')
assert ipam_obj.verify_on_setup()
msg = 'VM verification failed after process restarts'
assert vm1_fixture.verify_on_setup(), msg
assert vm2_fixture.verify_on_setup(), msg
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
return True
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_multistep_vm_add_delete_with_stop_start_service(self):
'''
Description: Test to validate VMs addition deletion after service restarts.
Test steps:
1. Create a VN and launch a VM in it.
2. Stop the contrail-vrouter-agent service and check the VM's status.
3. Launch one more VM.
4. Start the contrail-vrouter-agent service.
Pass criteria: The VMs should be in ACTIVE state after the contrail-vrouter-agent service is UP.
Maintainer : ganeshahv@juniper.net
'''
vn_name = get_random_name('vn1')
vn_subnets = ['10.10.10.0/24']
vn_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets))
assert vn_fixture.verify_on_setup()
vn_obj = vn_fixture.obj
self.logger.info('Launching VM')
vm1_fixture = VMFixture(connections=self.connections,
vn_obj=vn_obj, vm_name=get_random_name('vm1') , project_name=self.inputs.project_name)
vm1_fixture.setUp()
assert vm1_fixture.verify_vm_launched()
self.logger.info('vm1 launched successfully.Stopping vrouter service')
for compute_ip in self.inputs.compute_ips:
self.inputs.stop_service('contrail-vrouter-agent', [compute_ip])
self.addCleanup(self.inputs.start_service,
'contrail-vrouter-agent', [compute_ip])
self.logger.info('Trying to delete vm1')
assert not vm1_fixture.cleanUp()
self.logger.info(
'vm1 is not deleted as expected.Trying to launch a new VM vm2')
vm2_fixture = self.useFixture(VMFixture(connections=self.connections,
vn_obj=vn_obj, vm_name = get_random_name ('vm2'), project_name=self.inputs.project_name))
assert vm2_fixture.verify_vm_launched()
self.logger.info('Checking if vm2 has booted up')
assert not self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
self.logger.info(
'vm2 has not booted up as expected.Starting vrouter service')
for compute_ip in self.inputs.compute_ips:
self.inputs.start_service('contrail-vrouter-agent', [compute_ip])
vm2_fixture.wait_till_vm_is_up()
self.logger.info('vm2 is up now as expected')
assert vm2_fixture.verify_on_setup()
return True
# end test_multistep_vm_add_delete_with_stop_start_service
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_multistep_vm_delete_with_stop_start_service(self):
'''
Description: Test to validate VM's deletion attempt fails when the contrail-vrouter-agent service is down.
Test steps:
1. Create a VN and launch a VM in it.
2. Stop the contrail-vrouter-agent service and check the VM's status.
3. Try deleting the VM.
4. Start the contrail-vrouter-agent service.
Pass criteria: The VM's deletion should fail and it should come back to ACTIVE state after the contrail-vrouter-agent service is UP.
Maintainer : ganeshahv@juniper.net
'''
vn_name = get_random_name('vn1')
vn_subnets = ['10.10.10.0/24']
vn_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets))
assert vn_fixture.verify_on_setup()
vn_obj = vn_fixture.obj
self.logger.info('Launching VM')
vm1_fixture = VMFixture(connections=self.connections,
vn_obj=vn_obj, vm_name = get_random_name('vm1'), project_name=self.inputs.project_name)
vm1_fixture.setUp()
vm1_fixture.verify_vm_launched()
self.logger.info('VM launched successfully.Stopping vrouter service')
for compute_ip in self.inputs.compute_ips:
self.inputs.stop_service('contrail-vrouter-agent', [compute_ip])
# self.addCleanup( sleep(10))
self.addCleanup(self.inputs.start_service,
'contrail-vrouter-agent', [compute_ip])
self.logger.info('Trying to delete the VM')
assert not vm1_fixture.cleanUp()
self.logger.info('VM is not deleted as expected')
for compute_ip in self.inputs.compute_ips:
self.logger.info('Starting Vrouter Service')
self.inputs.start_service('contrail-vrouter-agent', [compute_ip])
sleep(10)
return True
# end test_multistep_vm_delete_with_stop_start_service
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter')
def test_nova_com_sch_restart_with_multiple_vn_vm(self):
'''
Description: Test to validate that multiple VM creation and deletion after service restarts.
Test steps:
1. Create multiple VNs and VMs in them.
2. Restart the openstack-nova-compute and openstack-nova-scheduler services.
Pass criteria: The VMs should all be UP and running after the restarts.
Maintainer : ganeshahv@juniper.net
'''
vm1_name = get_random_name('vm_mine')
vn_name = get_random_name('vn222')
vn_subnets = ['11.1.1.0/24']
vn_count_for_test = 32
if (len(self.inputs.compute_ips) == 1):
vn_count_for_test = 5
vm_fixture = self.useFixture(
create_multiple_vn_and_multiple_vm_fixture(
connections=self.connections,
vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name,
subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros-0.3.0-x86_64-uec',
flavor='m1.tiny'))
time.sleep(100)
try:
assert vm_fixture.verify_vms_on_setup()
assert vm_fixture.verify_vns_on_setup()
except Exception as e:
self.logger.exception("Got exception as %s" % (e))
compute_ip = []
for vmobj in vm_fixture.vm_obj_dict.values():
vm_host_ip = vmobj.vm_node_ip
if vm_host_ip not in compute_ip:
compute_ip.append(vm_host_ip)
self.inputs.restart_service('openstack-nova-compute', compute_ip)
self.inputs.restart_service('openstack-nova-scheduler', compute_ip)
sleep(30)
for vmobj in vm_fixture.vm_obj_dict.values():
assert vmobj.verify_on_setup()
return True
# end test_nova_com_sch_restart_with_multiple_vn_vm
@retry(delay=10, tries=30)
def verification_after_process_restart_in_policy_between_vns(self):
result=True
try:
self.analytics_obj.verify_process_and_connection_infos_agent()
self.analytics_obj.verify_process_and_connection_infos_control_node()
self.analytics_obj.verify_process_and_connection_infos_config()
except:
result=False
return result
@test.attr(type=['sanity'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_process_restart_in_policy_between_vns(self):
''' Test to validate that with policy having rule to check icmp fwding between VMs on different VNs , ping between VMs should pass
with process restarts
1. Pick 2 VN's from resource pool which has one VM each
2. Create policy with icmp allow rule between those VN's and bind it networks
3. Ping from one VM to another VM
4. Restart process 'vrouter' and 'control' on setup
5. Ping again between VM's after process restart
Pass criteria: Step 2,3,4 and 5 should pass
'''
vn1_name = get_random_name('vn1')
vn1_subnets = ["192.168.1.0/24"]
vn2_name = get_random_name('vn2')
vn2_subnets = ["192.168.2.0/24"]
policy1_name = 'policy1'
policy2_name = 'policy2'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
rev_rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn2_name,
'dest_network': vn1_name,
},
]
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
policy2_fixture = self.useFixture(
PolicyFixture(
policy_name=policy2_name,
rules_list=rev_rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.create_vn(vn1_name, vn1_subnets,option = 'api')
assert vn1_fixture.verify_on_setup()
vn1_fixture.bind_policies(
[policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
vn2_fixture = self.create_vn(vn2_name, vn2_subnets, option = 'api')
assert vn2_fixture.verify_on_setup()
vn2_fixture.bind_policies(
[policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
vn1_vm1_name = get_random_name('vn1_vm1')
vn2_vm1_name = get_random_name('vn2_vm1')
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
vm2_fixture = self.create_vm(vn2_fixture, vn2_vm1_name)
assert vm1_fixture.wait_till_vm_is_up()
assert vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
for compute_ip in self.inputs.compute_ips:
pass
self.inputs.restart_service('contrail-vrouter-agent', [compute_ip])
for bgp_ip in self.inputs.bgp_ips:
pass
self.inputs.restart_service('contrail-control', [bgp_ip])
for cfgm_ip in self.inputs.cfgm_ips:
pass
self.inputs.restart_service('contrail-api', [cfgm_ip])
self.verification_after_process_restart_in_policy_between_vns()
self.logger.info('Sleeping for a min.')
sleep(60)
for cfgm_name in self.inputs.cfgm_names:
assert self.analytics_obj.verify_cfgm_uve_module_state\
(self.inputs.collector_names[0],
cfgm_name,'contrail-api')
vn1_vm2_name = get_random_name('vn1_vm2')
vn2_vm2_name = get_random_name('vn2_vm2')
vn3_name = get_random_name('vn3')
vn3_subnets = ["192.168.4.0/24"]
vn3_fixture = self.create_vn(vn3_name, vn3_subnets,option = 'api')
assert vn1_fixture.verify_on_setup()
vm3_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
assert vm3_fixture.verify_on_setup()
vm4_fixture = self.create_vm(vn2_fixture, vn2_vm2_name)
assert vm4_fixture.verify_on_setup()
vm3_fixture.wait_till_vm_is_up()
vm4_fixture.wait_till_vm_is_up()
assert vm3_fixture.ping_with_certainty(vm4_fixture.vm_ip)
# end test_process_restart_in_policy_between_vns
@test.attr(type=['sanity', 'ci_sanity_WIP'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_process_restart_with_multiple_vn_vm(self):
'''
Description: Test to validate that multiple VM creation and deletion after service restarts.
Test steps:
1. Create multiple VNs and VMs in them.
2. Restart the contrail-vrouter-agent service.
Pass criteria: The VMs should all be UP and running after the restarts.
Maintainer : ganeshahv@juniper.net
'''
vm1_name = 'vm_mine'
vn_name = 'vn222'
vn_subnets = ['11.1.1.0/24']
vn_count_for_test = 32
if (len(self.inputs.compute_ips) == 1):
vn_count_for_test = 10
if os.environ.has_key('ci_image'):
vn_count_for_test = 3
vm_fixture = self.useFixture(
create_multiple_vn_and_multiple_vm_fixture(
connections=self.connections,
vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name,
subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros-0.3.0-x86_64-uec',
flavor='m1.tiny'))
time.sleep(100)
try:
assert vm_fixture.wait_till_vms_are_up()
assert vm_fixture.verify_vns_on_setup()
except Exception as e:
self.logger.exception("Got exception as %s" % (e))
compute_ip = []
for vmobj in vm_fixture.vm_obj_dict.values():
vm_host_ip = vmobj.vm_node_ip
if vm_host_ip not in compute_ip:
compute_ip.append(vm_host_ip)
self.inputs.restart_service('contrail-vrouter-agent', compute_ip)
sleep(50)
for vmobj in vm_fixture.vm_obj_dict.values():
assert vmobj.verify_on_setup()
return True
#end test_process_restart_with_multiple_vn_vm
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_kill_service_verify_core_generation(self):
'''
Description: Test to Validate core is generated for services on SIGQUIT
Test steps:
1. Issue commands to generate cores for multipe process.
Pass criteria: Verify core generation is successful.
Maintainer : sandipd@juniper.net
'''
compute_ip = self.inputs.compute_ips[0]
compute_user = self.inputs.host_data[compute_ip]['username']
compute_pwd = self.inputs.host_data[compute_ip]['password']
cfgm_ip = self.inputs.cfgm_ips[0]
cfgm_user = self.inputs.host_data[cfgm_ip]['username']
cfgm_pwd = self.inputs.host_data[cfgm_ip]['password']
collector_ip = self.inputs.collector_ips[0]
collector_user = self.inputs.host_data[collector_ip]['username']
collector_pwd = self.inputs.host_data[collector_ip]['password']
control_ip = self.inputs.bgp_ips[0]
control_user = self.inputs.host_data[control_ip]['username']
control_pwd = self.inputs.host_data[control_ip]['password']
result = True
err_msg = []
# Format <service_name> : [<process_name>,
# <role_on_which_process_running>]
service_list = {
'contrail-control': 'control',
'contrail-vrouter-agent': 'compute',
'contrail-query-engine': 'collector',
'contrail-collector': 'collector',
'contrail-analytics-api': 'collector',
'contrail-discovery': 'cfgm',
'contrail-api': 'cfgm',
'contrail-svc-monitor': 'cfgm'
}
for service, role in service_list.iteritems():
cmd = "service %s status | awk '{print $4}' | cut -f 1 -d','" % service
self.logger.info("service:%s, role:%s" % (service, role))
if role == 'cfgm':
login_ip = cfgm_ip
login_user = cfgm_user
login_pwd = cfgm_pwd
elif role == 'compute':
login_ip = compute_ip
login_user = compute_user
login_pwd = compute_pwd
elif role == 'control':
login_ip = control_ip
login_user = control_user
login_pwd = control_pwd
elif role == 'collector':
login_ip = collector_ip
login_user = collector_user
login_pwd = collector_pwd
else:
self.logger.error("invalid role:%s" % role)
result = result and False
assert result, "Invalid role:%s specified for service:%s" % (
role, service)
with settings(host_string='%s@%s' % (login_user, login_ip),
password=login_pwd, warn_only=True, abort_on_prompts=False):
pid = run(cmd)
self.logger.info("service:%s, pid:%s" % (service, pid))
run('kill -3 %s' % pid)
sleep(10)
if "No such file or directory" in run("ls -lrt /var/crashes/core.*%s*" % (pid)):
self.logger.error(
"core is not generated for service:%s" % service)
err_msg.append("core is not generated for service:%s" %
service)
result = result and False
else:
# remove core after generation
run("rm -f /var/crashes/core.*%s*" % (pid))
assert result, "core generation validation test failed: %s" % err_msg
return True
# end test_kill_service_verify_core_generation
@test.attr(type=['sanity'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_control_node_switchover(self):
''' Stop the control node and check peering with agent fallback to other control node.
1. Pick one VN from respource pool which has 2 VM's in it
2. Verify ping between VM's
3. Find active control node in cluster by agent inspect
4. Stop control service on active control node
5. Verify agents are connected to new active control-node using xmpp connections
6. Bring back control service on previous active node
7. Verify ping between VM's again after bringing up control serveice
Pass criteria: Step 2,5 and 7 should pass
'''
if len(set(self.inputs.bgp_ips)) < 2:
self.logger.info(
"Skipping Test. At least 2 control node required to run the test")
raise self.skipTest(
"Skipping Test. At least 2 control node required to run the test")
result = True
vn1_name = get_random_name('vn1')
vn1_subnets = ['192.168.1.0/24']
vn1_vm1_name = get_random_name('vn1_vm1')
vn1_vm2_name = get_random_name('vn1_vm2')
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
assert vn1_fixture.verify_on_setup()
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
assert vm1_fixture.wait_till_vm_is_up()
vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
assert vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
# Figuring the active control node
active_controller = None
self.agent_inspect = self.connections.agent_inspect
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes':
active_controller = entry['controller_ip']
active_controller_host_ip = self.inputs.host_data[
active_controller]['host_ip']
self.logger.info('Active control node from the Agent %s is %s' %
(vm1_fixture.vm_node_ip, active_controller_host_ip))
# Stop on Active node
self.logger.info('Stoping the Control service in %s' %
(active_controller_host_ip))
self.inputs.stop_service(
'contrail-control', [active_controller_host_ip])
sleep(5)
# Check the control node shifted to other control node
new_active_controller = None
new_active_controller_state = None
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes':
new_active_controller = entry['controller_ip']
new_active_controller_state = entry['state']
new_active_controller_host_ip = self.inputs.host_data[
new_active_controller]['host_ip']
self.logger.info('Active control node from the Agent %s is %s' %
(vm1_fixture.vm_node_ip, new_active_controller_host_ip))
if new_active_controller_host_ip == active_controller_host_ip:
self.logger.error(
'Control node switchover fail. Old Active controlnode was %s and new active control node is %s' %
(active_controller_host_ip, new_active_controller_host_ip))
result = False
if new_active_controller_state != 'Established':
self.logger.error(
'Agent does not have Established XMPP connection with Active control node')
result = result and False
# Start the control node service again
self.logger.info('Starting the Control service in %s' %
(active_controller_host_ip))
self.inputs.start_service(
'contrail-control', [active_controller_host_ip])
# Check the BGP peering status from the currently active control node
sleep(5)
cn_bgp_entry = self.cn_inspect[
new_active_controller_host_ip].get_cn_bgp_neigh_entry()
for entry in cn_bgp_entry:
if entry['state'] != 'Established':
result = result and False
self.logger.error(
'With Peer %s peering is not Established. Current State %s ' %
(entry['peer'], entry['state']))
assert vm1_fixture.verify_on_setup(), 'VM Verification failed'
assert vm2_fixture.verify_on_setup(), 'VM Verification failed'
# Check the ping
self.logger.info('Checking the ping between the VM again')
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
if not result:
self.logger.error('Switchover of control node failed')
assert result
return True
# end test_control_node_switchover
@test.attr(type=['sanity'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_max_vm_flows(self):
''' Test to validate setting up of the max_vm_flows parameter in agent
config file has expected effect on the flows in the system.
1. Set VM flow cache time and max_vm_flows to 0.01% of max system
flows(512K).
2. Create 2 VN's and connect them using a policy.
3. Launch 2 VM's in the respective VN's.
4. Start traffic with around 20000 flows.
6. Restart vrouter agent service and check the flows are limited
0.01% of max system flows.
Pass criteria: Step 6 should pass
'''
result = True
# Set VM flow cache time to 30 and max_vm_flows to 0.1% of max system
# flows(512K).
self.comp_node_fixt = {}
self.flow_cache_timeout = 10
self.max_system_flows = 0
self.max_vm_flows = 0.01
for cmp_node in self.inputs.compute_ips:
self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
self.connections, cmp_node))
self.comp_node_fixt[cmp_node].set_flow_aging_time(
self.flow_cache_timeout)
self.comp_node_fixt[cmp_node].get_config_per_vm_flow_limit()
self.comp_node_fixt[cmp_node].set_per_vm_flow_limit(
self.max_vm_flows)
self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()
if self.max_system_flows < self.comp_node_fixt[
cmp_node].max_system_flows:
self.max_system_flows = self.comp_node_fixt[
cmp_node].max_system_flows
self.addCleanup(self.cleanup_test_max_vm_flows_vrouter_config,
self.inputs.compute_ips,
self.comp_node_fixt)
# Define resources for this test.
vn1_name = get_random_name('VN1')
vn1_subnets = ['10.1.1.0/24']
vn2_name = get_random_name('VN2')
vn2_subnets = ['10.2.1.0/24']
vn1_vm1_name = get_random_name('VM1')
vn2_vm2_name = get_random_name('VM2')
policy1_name = 'policy1'
policy2_name = 'policy2'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'any',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
rev_rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'any',
'source_network': vn2_name,
'dest_network': vn1_name,
},
]
# Create 2 VN's and connect them using a policy.
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
assert vn1_fixture.verify_on_setup()
vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
assert vn2_fixture.verify_on_setup()
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name,
rules_list=rules, inputs=self.inputs,
connections=self.connections))
policy2_fixture = self.useFixture(
PolicyFixture(
policy_name=policy2_name,
rules_list=rev_rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture.bind_policies(
[policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
vn2_fixture.bind_policies(
[policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
# Launch 2 VM's in the respective VN's.
vm1_fixture = self.create_vm(vn1_fixture,vm_name=vn1_vm1_name,
flavor='contrail_flavor_small', image_name='ubuntu-traffic')
vm2_fixture = self.create_vm(vn2_fixture,vm_name=vn2_vm2_name,
flavor='contrail_flavor_small', image_name='ubuntu-traffic')
assert vm1_fixture.verify_on_setup(), 'VM1 verifications FAILED'
assert vm2_fixture.verify_on_setup(), 'VM2 verifications FAILED'
assert vm1_fixture.wait_till_vm_is_up(), 'VM1 does not seem to be up'
assert vm2_fixture.wait_till_vm_is_up(), 'VM2 does not seem to be up'
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), \
'Ping from VM1 to VM2 FAILED'
# Set num_flows to fixed, smaller value but > 1% of
# system max flows
max_system_flows = self.max_system_flows
vm_flow_limit = int((self.max_vm_flows/100.0)*max_system_flows)
num_flows = vm_flow_limit + 30
generated_flows = 2*num_flows
flow_gen_rate = 5
proto = 'udp'
# Start Traffic.
self.traffic_obj = self.useFixture(
traffic_tests.trafficTestFixture(self.connections))
startStatus = self.traffic_obj.startTraffic(
total_single_instance_streams=int(num_flows),
pps=flow_gen_rate,
start_sport=5000,
cfg_profile='ContinuousSportRange',
tx_vm_fixture=vm1_fixture,
rx_vm_fixture=vm2_fixture,
stream_proto=proto)
msg1 = "Status of start traffic : %s, %s, %s" % (
proto, vm1_fixture.vm_ip, startStatus['status'])
self.logger.info(msg1)
assert startStatus['status'], msg1
self.logger.info("Wait for 3 sec for flows to be setup.")
sleep(3)
# 4. Poll live traffic & verify VM flow count
flow_cmd = 'flow -l | grep %s -A2 |' % vm1_fixture.vm_ip
flow_cmd = flow_cmd + ' grep "Action" | grep -v "Action:D(FlowLim)" | wc -l'
sample_time = 2
vm_flow_list=[]
for i in range(5):
sleep(sample_time)
vm_flow_record = self.inputs.run_cmd_on_server(
vm1_fixture.vm_node_ip,
flow_cmd,
self.inputs.host_data[vm1_fixture.vm_node_ip]['username'],
self.inputs.host_data[vm1_fixture.vm_node_ip]['password'])
vm_flow_record = vm_flow_record.strip()
vm_flow_list.append(int(vm_flow_record))
self.logger.info("%s iteration DONE." % i)
self.logger.info("VM flow count = %s." % vm_flow_list[i])
self.logger.info("Sleeping for %s sec before next iteration."
% sample_time)
vm_flow_list.sort(reverse=True)
if vm_flow_list[0] > int(1.1*vm_flow_limit):
self.logger.error("TEST FAILED.")
self.logger.error("VM flow count seen is greater than configured.")
result = False
elif vm_flow_list[0] < int(0.9*vm_flow_limit):
self.logger.error("TEST FAILED.")
self.logger.error("VM flow count seen is much lower than config.")
self.logger.error("Something is stopping flow creation. Please debug")
result = False
else:
self.logger.info("TEST PASSED")
self.logger.info("Expected range of vm flows seen.")
self.logger.info("Max VM flows = %s" % vm_flow_list[0])
# Stop Traffic.
self.logger.info("Proceed to stop traffic..")
try:
self.traffic_obj.stopTraffic(wait_for_stop=False)
except:
self.logger.warn("Failed to get a VM handle and stop traffic.")
self.logger.info("Wait for the flows to get purged.")
sleep(self.flow_cache_timeout)
return result
# end test_max_vm_flows
@test.attr(type=['sanity'])
@preposttest_wrapper
def test_underlay_broadcast_traffic_handling(self):
''' Test the underlay brocast traffic handling by vrouter. (Bug-1545229).
1. Send broadcast traffic from one compute node.
2. Other compute in same subnet should receive that traffic.
3. Receiving compute should treat this traffic as underlay.
4. Compute should not replicate the packet and send the copy back.
Pass criteria: Step 3-4 should pass
Maintainer : chhandak@juniper.net
'''
if (len(self.inputs.compute_ips) < 2):
raise self.skipTest(
"Skipping Test. At least 2 compute node required to run the test")
result = True
# Find ignore brocast exiting value
ignore_broadcasts={}
cmd='cat /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts'
for item in self.inputs.compute_ips:
ignore_broadcasts[item]=self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
# Set ignore brocast to false
cmd='echo "0" > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts'
for item in self.inputs.compute_ips:
self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
# Find the Brocast address from first compute
cmd='ifconfig | grep %s' %(self.inputs.host_data[item]['host_control_ip'])
output=self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
broadcast_address=output.split(" ")[3].split(":")[1]
# Start tcpdump on receiving compute
inspect_h = self.agent_inspect[self.inputs.compute_ips[1]]
comp_intf = inspect_h.get_vna_interface_by_type('eth')
if len(comp_intf) == 1:
comp_intf = comp_intf[0]
self.logger.info('Agent interface name: %s' % comp_intf)
compute_ip = self.inputs.compute_ips[1]
compute_user = self.inputs.host_data[self.inputs.compute_ips[1]]['username']
compute_password = self.inputs.host_data[self.inputs.compute_ips[1]]['password']
filters = "host %s" %(broadcast_address)
(session, pcap) = start_tcpdump_for_intf(compute_ip, compute_user,
compute_password, comp_intf, filters, self.logger)
sleep(5)
# Ping broadcast address
self.logger.info(
'Pinging broacast address %s from compute %s' %(broadcast_address,\
self.inputs.host_data[self.inputs.compute_ips[0]]['host_control_ip']))
packet_count = 10
cmd='ping -c %s -b %s' %(packet_count, broadcast_address)
output=self.inputs.run_cmd_on_server(
self.inputs.compute_ips[0], cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
sleep(5)
# Stop tcpdump
stop_tcpdump_for_intf(session, pcap, self.logger)
# Set back the ignore_broadcasts to original value
for item in self.inputs.compute_ips:
cmd='echo "%s" > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts' %(ignore_broadcasts[item])
self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
# Analyze pcap
assert verify_tcpdump_count(self, session, pcap, exp_count=packet_count), "There should only be %s\
packet from source %s on compute %s" %(packet_count, broadcast_address, compute_ip)
self.logger.info(
'Packet count matched: Compute %s has receive only %s packet from source IP %s.\
No duplicate packet seen' %(compute_ip, packet_count, broadcast_address))
return result
# end test_underlay_brodcast_traffic_handling
# end TestBasicVMVN0
| 46.125918
| 140
| 0.626189
| 43,106
| 0.980618
| 0
| 0
| 42,453
| 0.965763
| 0
| 0
| 13,110
| 0.298239
|
05afa4697f046e6af89220c07fb5a8db5f7b4cae
| 2,466
|
py
|
Python
|
odata/tests/test_context.py
|
suhrawardi/python-odata
|
8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0
|
[
"MIT"
] | 74
|
2015-04-13T15:12:44.000Z
|
2022-01-24T08:06:16.000Z
|
odata/tests/test_context.py
|
suhrawardi/python-odata
|
8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0
|
[
"MIT"
] | 43
|
2015-04-11T15:08:08.000Z
|
2021-04-14T16:08:43.000Z
|
odata/tests/test_context.py
|
suhrawardi/python-odata
|
8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0
|
[
"MIT"
] | 63
|
2016-06-22T03:52:39.000Z
|
2022-02-25T10:56:34.000Z
|
# -*- coding: utf-8 -*-
import json
import base64
import decimal
from unittest import TestCase
import requests
import responses
from odata.tests import Service, Product, DemoUnboundAction
class TestContext(TestCase):
def test_context_query_without_auth(self):
def request_callback(request):
self.assertIsNone(request.headers.get('Authorization'))
headers = {}
body = dict(value=[])
return requests.codes.ok, headers, json.dumps(body)
with responses.RequestsMock() as rsps:
rsps.add_callback(rsps.GET, Product.__odata_url__(),
callback=request_callback,
content_type='application/json')
context = Service.create_context()
context.query(Product).first()
def test_context_query_with_basic_auth(self):
test_username = 'username'
test_password = 'password'
test_auth = (test_username, test_password)
def request_callback(request):
auth_text = request.headers.get('Authorization')
_, auth_b64 = auth_text.split(' ', 1)
decoded = base64.urlsafe_b64decode(auth_b64.encode()).decode()
username, password = decoded.split(':', 1)
self.assertEqual(test_username, username)
self.assertEqual(test_password, password)
headers = {}
body = dict(value=[])
return requests.codes.ok, headers, json.dumps(body)
with responses.RequestsMock() as rsps:
rsps.add_callback(rsps.GET, Product.__odata_url__(), request_callback,
content_type='application/json')
context = Service.create_context(auth=test_auth)
context.query(Product).first()
def test_context_call_unbound_action(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, Service.url + 'ODataTest.DemoUnboundAction')
context = Service.create_context()
context.call(DemoUnboundAction)
def test_context_call_bound_action(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, Product.__odata_url__() + '/ODataTest.DemoActionParameters')
context = Service.create_context()
context.call(Product.DemoActionWithParameters,
Name='TestName',
Price=decimal.Decimal('25.0'))
| 34.732394
| 92
| 0.623277
| 2,272
| 0.92133
| 0
| 0
| 0
| 0
| 0
| 0
| 193
| 0.078264
|
05b079948e8c02888049d1f77a57cfcbe4bb8e4b
| 1,432
|
py
|
Python
|
readouts/basic_readout.py
|
qbxlvnf11/graph-neural-networks-for-graph-classification
|
5d69ead58c786aa8e472ab0433156fe09fe6ca4b
|
[
"MIT"
] | 20
|
2020-09-02T07:07:35.000Z
|
2022-03-16T15:22:14.000Z
|
readouts/basic_readout.py
|
yuexiarenjing/graph-neural-networks-for-graph-classification
|
5d69ead58c786aa8e472ab0433156fe09fe6ca4b
|
[
"MIT"
] | 2
|
2021-11-01T08:32:10.000Z
|
2022-03-25T04:29:35.000Z
|
readouts/basic_readout.py
|
yuexiarenjing/graph-neural-networks-for-graph-classification
|
5d69ead58c786aa8e472ab0433156fe09fe6ca4b
|
[
"MIT"
] | 11
|
2020-09-02T07:13:46.000Z
|
2022-03-23T10:38:07.000Z
|
import torch
def readout_function(x, readout, batch=None, device=None):
if len(x.size()) == 3:
if readout == 'max':
return torch.max(x, dim=1)[0].squeeze() # max readout
elif readout == 'avg':
return torch.mean(x, dim=1).squeeze() # avg readout
elif readout == 'sum':
return torch.sum(x, dim=1).squeeze() # sum readout
elif len(x.size()) == 2:
batch = batch.cpu().tolist()
readouts = []
max_batch = max(batch)
temp_b = 0
last = 0
for i, b in enumerate(batch):
if b != temp_b:
sub_x = x[last:i]
if readout == 'max':
readouts.append(torch.max(sub_x, dim=0)[0].squeeze()) # max readout
elif readout == 'avg':
readouts.append(torch.mean(sub_x, dim=0).squeeze()) # avg readout
elif readout == 'sum':
readouts.append(torch.sum(sub_x, dim=0).squeeze()) # sum readout
last = i
temp_b = b
elif b == max_batch:
sub_x = x[last:len(batch)]
if readout == 'max':
readouts.append(torch.max(sub_x, dim=0)[0].squeeze()) # max readout
elif readout == 'avg':
readouts.append(torch.mean(sub_x, dim=0).squeeze()) # avg readout
elif readout == 'sum':
readouts.append(torch.sum(sub_x, dim=0).squeeze()) # sum readout
break
readouts = torch.cat(readouts, dim=0)
return readouts
| 34.095238
| 77
| 0.552374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 162
| 0.113128
|
05b273137ad8f8c40be4550bda786ffd468b9e75
| 362
|
py
|
Python
|
src/ef/external_field_uniform.py
|
tnakaicode/ChargedPaticle-LowEnergy
|
47b751bcada2af7fc50cef587a48b1a3c12bcbba
|
[
"MIT"
] | 6
|
2019-04-14T06:19:40.000Z
|
2021-09-14T13:46:26.000Z
|
src/ef/external_field_uniform.py
|
tnakaicode/ChargedPaticle-LowEnergy
|
47b751bcada2af7fc50cef587a48b1a3c12bcbba
|
[
"MIT"
] | 31
|
2018-03-02T12:05:20.000Z
|
2019-02-20T09:29:08.000Z
|
src/ef/external_field_uniform.py
|
tnakaicode/ChargedPaticle-LowEnergy
|
47b751bcada2af7fc50cef587a48b1a3c12bcbba
|
[
"MIT"
] | 10
|
2017-12-21T15:16:55.000Z
|
2020-10-31T23:59:50.000Z
|
from ef.external_field import ExternalField
class ExternalFieldUniform(ExternalField):
def __init__(self, name, electric_or_magnetic, uniform_field_vector):
super().__init__(name, electric_or_magnetic)
self.uniform_field_vector = uniform_field_vector
def get_at_points(self, positions, time):
return self.uniform_field_vector
| 30.166667
| 73
| 0.773481
| 315
| 0.870166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
05b2b6ec5edc971fee6f55c38fd27eec4af6014d
| 11,493
|
py
|
Python
|
plugins/helpers/EFO.py
|
opentargets/platform-input-support
|
555c3ed091a7a3a767dc0c37054dbcd369f02252
|
[
"Apache-2.0"
] | 4
|
2019-03-26T15:54:35.000Z
|
2021-05-27T13:18:43.000Z
|
plugins/helpers/EFO.py
|
opentargets/platform-input-support
|
555c3ed091a7a3a767dc0c37054dbcd369f02252
|
[
"Apache-2.0"
] | 12
|
2019-04-23T14:45:04.000Z
|
2022-03-17T09:40:04.000Z
|
plugins/helpers/EFO.py
|
opentargets/platform-input-support
|
555c3ed091a7a3a767dc0c37054dbcd369f02252
|
[
"Apache-2.0"
] | 2
|
2019-06-15T17:21:14.000Z
|
2021-05-14T18:35:18.000Z
|
import logging
import re
import json
import jsonlines
from urllib import parse
logger = logging.getLogger(__name__)
# EFO
# The current implementation is based on the conversion from owl format to json lines format using Apache RIOT
# The structure disease_obsolete stores the obsolete terms and it is used to retrieve the relationship between valid
# term and obsolete terms.
# The locationIds are generated retriving the structure parent/child and recursevely retrieve the proper info
class EFO(object):
def __init__(self, efo_input):
self.efo_input = efo_input
self.diseases = {}
self.diseases_obsolete = {}
self.has_location_ids = {}
self.all_path = {}
self.parent_child_tuples = []
def init_disease(self, id, code):
self.diseases[id] = {}
self.diseases[id]['id'] = id
self.diseases[id]['code'] = code
# return the cross reference for the phenotype.
# ETL uses it with hpo-phenotypes-_yyyy-mm-dd_.jsonl
def set_phenotypes(self, id, disease):
if 'hasDbXref' in disease:
self.diseases[id]['dbXRefs'] = disease['hasDbXref']
# Retrieve the definition info
def set_definition(self, id, disease):
if 'IAO_0000115' in disease:
if isinstance(disease['IAO_0000115'], str):
self.diseases[id]['definition'] = disease['IAO_0000115'].strip('\n')
else:
definitions = self.get_array_value(disease['IAO_0000115'])
self.diseases[id]['definition'] = definitions[0]
if len(definitions) > 1: self.diseases[id]['definition_alternatives'] = definitions[1:]
# Return an array of strings without new line.
def get_array_value(self, value):
if isinstance(value, str):
return [value.strip('\n')]
else:
return [x.strip() for x in value if isinstance(x, str)]
# Return the synonyms. Complex structure. Clean and flatten.
def set_efo_synonyms(self, id, disease):
synonyms_details = {}
if 'hasExactSynonym' in disease:
if len(disease['hasExactSynonym']) > 0:
synonyms = self.get_array_value(disease['hasExactSynonym'])
synonyms_details['hasExactSynonym'] = synonyms
if 'hasRelatedSynonym' in disease:
if len(disease['hasRelatedSynonym']) > 0:
synonyms = self.get_array_value(disease['hasRelatedSynonym'])
synonyms_details['hasRelatedSynonym'] = synonyms
if 'hasBroadSynonym' in disease:
if len(disease['hasBroadSynonym']) > 0:
synonyms = self.get_array_value(disease['hasBroadSynonym'])
synonyms_details['hasBroadSynonym'] = synonyms
if 'hasNarrowSynonym' in disease:
if len(disease['hasNarrowSynonym']) > 0:
synonyms = self.get_array_value(disease['hasNarrowSynonym'])
synonyms_details['hasNarrowSynonym'] = synonyms
if len(synonyms_details.keys()) > 0:
self.diseases[id]['synonyms'] = synonyms_details
# Extract skos: related: used for check phenotype info.
def get_phenotypes(self, phenotypes):
if isinstance(phenotypes, str):
return [self.get_id(phenotypes)]
else:
return [self.get_id(phenotype) for phenotype in phenotypes]
# The field sko is used to check if the phenotype cross references are correct.
# ETL - GraphQL test.
def set_phenotypes_old(self, id, disease):
if "related" in disease:
self.diseases[id]['sko'] = self.get_phenotypes(disease["related"])
# Return if the term is a TherapeuticArea
def set_therapeutic_area(self, id, disease):
if 'oboInOwl:inSubset' in disease:
self.diseases[id]['isTherapeuticArea'] = True
else:
self.diseases[id]['isTherapeuticArea'] = False
# Return the label of the term
def set_label(self, id, disease):
if 'label' in disease:
if isinstance(disease['label'], str):
self.diseases[id]['label'] = disease['label'].strip('\n')
elif isinstance(disease['label'], dict):
self.diseases[id]['label'] = disease['label']['@value'].strip('\n')
else:
self.diseases[id]['label'] = self.get_array_value(disease['label'])[0]
# Return the parents for the term
def set_parents(self, id, disease):
if 'subClassOf' in disease:
subset = disease['subClassOf']
parents = []
if len(subset) > 0:
for father in subset:
if father.startswith('_:'):
self.has_location_ids[father] = id
else:
father_id = self.get_id(father)
parents.append(father_id)
self.diseases[id]['parents'] = parents
def extract_id(self, elem):
return elem.replace(":", "_")
# return the proper prefix.
def get_prefix(self, id):
simple_id = re.match(r'^(.+?)_', id)
if simple_id.group() in ["EFO_", "OTAR_"]:
return "http://www.ebi.ac.uk/efo/"
elif (simple_id.group() in 'Orphanet_'):
return "http://www.orpha.net/ORDO/"
else:
return "http://purl.obolibrary.org/obo/"
def extract_id_from_uri(self, uri):
new_terms = []
if isinstance(uri, str):
uris_to_extract = [uri]
elif isinstance(uri, list):
uris_to_extract = self.get_array_value(uri)
else:
# todo: investigate to this case.
uris_to_extract = []
for uri_i in uris_to_extract:
full_path = parse.urlsplit(uri_i).path
new_terms.append(full_path.rpartition('/')[2])
return new_terms
# Get the id and create a standard output. Eg. EFO:123 -> EFO_123, HP:9392 -> HP_9392
def get_id(self, id):
ordo = re.sub(r'^.*?ORDO/', '', id)
new_id = re.sub(r'^.*?:', '', ordo)
return new_id
# Check if the efo term is valid. term obsolete goes to a dedicated structure
def is_obsolete(self, disease, disease_id):
if 'owl:deprecated' in disease:
if 'IAO_0100001' in disease:
new_terms = self.extract_id_from_uri(disease['IAO_0100001'])
for term in new_terms:
if term in self.diseases_obsolete:
self.diseases_obsolete[term].append(disease_id)
else:
self.diseases_obsolete[term] = [disease_id]
return True
else:
return False
# LocationIds: This procedure fills in the structure parent,child
def set_locationIds_structure(self, disease_id, disease):
collection = None
if "unionOf" in disease:
collection = disease["unionOf"]["@list"]
elif "intersectionOf" in disease:
collection = disease["intersectionOf"]["@list"]
if collection is not None:
for elem in collection:
if elem.startswith('_:'):
self.parent_child_tuples.append((disease["@id"], elem))
def load_type_class(self, disease, disease_id):
if not disease["@id"].startswith('_:'):
code = self.get_prefix(disease_id) + disease_id
self.init_disease(disease_id, code)
self.set_label(disease_id, disease)
self.set_definition(disease_id, disease)
self.set_therapeutic_area(disease_id, disease)
self.set_efo_synonyms(disease_id, disease)
self.set_phenotypes(disease_id, disease)
self.set_phenotypes_old(disease_id, disease)
self.set_parents(disease_id, disease)
else:
self.set_locationIds_structure(disease_id, disease)
#
def get_obsolete_info(self):
for k, v in self.diseases_obsolete.items():
if k in self.diseases:
self.diseases[k]['obsoleteTerms'] = list(self.diseases_obsolete[k])
# LocationIds: This is part of the structure to retrieve the info about locationIds
def get_children(self, node):
return [x[1] for x in self.parent_child_tuples if x[0] == node]
# LocationIds: This is part of the structure to retrieve the info about locationIds.
# Recursively retrieve the location.
def get_nodes(self, node, path):
data = set()
data.add(node)
path.add(node)
children = self.get_children(node)
if children:
lista = set()
for child in children:
if not child.startswith("obo:"):
lista.update(self.get_nodes(child, path))
else:
child_clean_code = re.sub(r'^.*?:', '', child)
lista.add(child_clean_code)
data.update(lista)
return data
# LocationIds are stored in the restriction tag.
# The info are stored inside a structure json parent-child
def get_locationIds(self):
parents, children = zip(*self.parent_child_tuples)
self.root_nodes = {x for x in parents if x not in children}
for node in self.root_nodes:
result = self.get_nodes(node, set())
self.all_path[node] = [x for x in list(result) if not x.startswith('_:')]
for k, v in self.has_location_ids.items():
if k in self.all_path:
if not "locationIds" in self.diseases[v]:
self.diseases[v]["locationIds"] = set()
self.diseases[v]["locationIds"].update(self.all_path[k])
# For any term it generates the dict id info.
def generate(self):
with open(self.efo_input) as input:
for line in input:
disease = json.loads(line)
disease_id = self.get_id(disease['@id'])
if not self.is_obsolete(disease, disease_id):
if disease["@type"] == "Class":
self.load_type_class(disease, disease_id)
else:
# @Type: Restriction
if 'someValuesFrom' in disease:
self.parent_child_tuples.append((disease["@id"], disease["someValuesFrom"]))
self.get_obsolete_info()
self.get_locationIds()
# Static file for alpha and production
def save_static_disease_file(self, output_filename):
valid_keys = ["parents", "id", "label"]
with jsonlines.open(output_filename, mode='w') as writer:
for id in self.diseases:
entry = {k: v for k, v in self.diseases[id].items() if k in valid_keys}
entry["parentIds"] = entry["parents"]
del (entry["parents"])
entry["name"] = entry["label"]
del (entry["label"])
writer.write(entry)
def save_diseases(self, output_filename):
with jsonlines.open(output_filename, mode='w') as writer:
for disease in self.diseases:
# Set cannot be transform in Json. Transform into list.
if 'locationIds' in self.diseases[disease]:
listValues = list(self.diseases[disease]['locationIds'])
self.diseases[disease]['locationIds'] = listValues
writer.write(self.diseases[disease])
return output_filename
| 40.326316
| 116
| 0.59297
| 11,001
| 0.957191
| 0
| 0
| 0
| 0
| 0
| 0
| 2,778
| 0.241712
|
05b664d9f22c51662666d538e6f424b0f69a4ea2
| 948
|
py
|
Python
|
interaction3/mfield/tests/test_transmit_receive_beamplot.py
|
bdshieh/interaction3
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
[
"MIT"
] | 2
|
2020-07-08T14:42:52.000Z
|
2022-03-13T05:25:55.000Z
|
interaction3/mfield/tests/test_transmit_receive_beamplot.py
|
bdshieh/interaction3
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
[
"MIT"
] | null | null | null |
interaction3/mfield/tests/test_transmit_receive_beamplot.py
|
bdshieh/interaction3
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
[
"MIT"
] | null | null | null |
import numpy as np
from interaction3 import abstract
from interaction3.arrays import matrix
from interaction3.mfield.solvers.transmit_receive_beamplot_2 import TransmitReceiveBeamplot2
array = matrix.create(nelem=[2, 2])
simulation = abstract.MfieldSimulation(sampling_frequency=100e6,
sound_speed=1540,
excitation_center_frequecy=5e6,
excitation_bandwidth=4e6,
field_positions=np.array([[0, 0, 0.05],
[0, 0, 0.06],
[0, 0, 0.07]])
)
kwargs, meta = TransmitReceiveBeamplot2.connector(simulation, array)
sim = TransmitReceiveBeamplot2(**kwargs)
sim.solve()
rf_data = sim.result['rf_data']
times = sim.result['times']
| 35.111111
| 92
| 0.517932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.016878
|
05b7efff7d41c4651007c0d46a051ea437cab70c
| 16,172
|
py
|
Python
|
scripts/make_plots.py
|
facebookresearch/mpcfp
|
cb29797aa4f2ce524dd584ecf47c863fd9f414a6
|
[
"MIT"
] | 5
|
2020-11-18T23:55:17.000Z
|
2022-01-14T07:15:35.000Z
|
scripts/make_plots.py
|
facebookresearch/mpcfp
|
cb29797aa4f2ce524dd584ecf47c863fd9f414a6
|
[
"MIT"
] | null | null | null |
scripts/make_plots.py
|
facebookresearch/mpcfp
|
cb29797aa4f2ce524dd584ecf47c863fd9f414a6
|
[
"MIT"
] | 2
|
2021-11-06T14:06:13.000Z
|
2022-01-14T07:16:29.000Z
|
#!/usr/bin/env python2
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import seaborn
# constants:
NAN = float('nan')
# From https://blog.graphiq.com/
# finding-the-right-color-palettes-for-data-visualizations-fcd4e707a283
BAR_COLORS_PURPLES = [
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
BAR_COLORS_GRAY_PURPLES = [
(.7, .7, .7),
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
BAR_COLORS_DETECTION = [
(.8, .8, .8),
(.4, .4, .4),
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
LINE_COLORS = seaborn.cubehelix_palette(
4, start=2, rot=0, dark=0.15, light=0.75, reverse=False, as_cmap=False)
BAR_COLORS = BAR_COLORS_GRAY_PURPLES
FS = 18
color_counter = [0]
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
def set_style():
params = {
"legend.fontsize": FS - 4,
"axes.labelsize": FS,
"axes.titlesize": FS,
"xtick.labelsize": FS - 4,
"ytick.labelsize": FS - 4,
}
matplotlib.rcParams.update(params)
fig = plt.gcf()
for ax in fig.axes:
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
# make generic line plot:
def make_line_plot(Y, x=None, title='',
xlabel='', ylabel='', xlog=False, ylog=False,
xmin=None, xmax=None, ymin=None, ymax=None,
legend=[], legend_title=None, show_legend=True,
text_labels=None, colors=[], linestyle=[], markerstyle=[],
append=False, filename=None, linewidth=2., legloc=None,
errors=None, xticks=None, yticks=None):
# assertions and defaults:
x = np.linspace(0, Y.shape[1]) if x is None else x
ymin = Y.min() if ymin is None else ymin
ymax = Y.max() if ymax is None else ymax
xmin = x.min() if xmin is None else xmin
xmax = x.max() if xmax is None else xmax
if len(legend) > 0:
assert len(legend) == Y.shape[0]
if len(colors) == 0:
colors = LINE_COLORS
if isinstance(linestyle, str):
linestyle = [linestyle] * Y.shape[0]
if len(linestyle) == 0:
linestyle = ['-'] * Y.shape[0]
if isinstance(markerstyle, str):
markerstyle = [markerstyle] * Y.shape[0]
if len(markerstyle) == 0:
markerstyle = [''] * Y.shape[0]
# make plot:
if not append:
plt.clf()
for n in range(Y.shape[0]):
linecolor = colors[color_counter[0] % len(colors)]
color_counter[0] += 1
plt.plot(x, Y[n, :],
label=legend[n] if len(legend) > 0 else None,
linewidth=linewidth, linestyle=linestyle[n],
marker=markerstyle[n], markersize=linewidth * 1.5,
color=linecolor)
if errors is not None:
plt.fill_between(
x, Y[n, :] - errors[n, :], Y[n, :] + errors[n, :],
alpha=0.2, color=linecolor)
plt.xlabel(xlabel, fontweight='bold', fontsize=FS)
plt.ylabel(ylabel, fontweight='bold', fontsize=FS)
if show_legend:
plt.legend(fontsize=FS - 4, loc=0 if legloc is None else legloc,
title=legend_title)
# add text labels:
if text_labels is not None:
assert isinstance(text_labels, list)
for text_label in text_labels:
assert isinstance(text_label, list) \
or isinstance(text_label, tuple)
assert len(text_label) == 3
plt.text(*text_label)
# makes axes look pretty:
axes = plt.gca()
axes.set_xlim([xmin, xmax])
axes.set_ylim([ymin, ymax])
if xlog:
axes.semilogx(10.)
if ylog:
axes.semilogy(10.)
if xticks is not None:
axes.set_xticks(xticks)
if yticks is not None:
axes.set_yticks(yticks)
for tick in axes.xaxis.get_major_ticks():
tick.label.set_fontsize(FS - 4)
for tick in axes.yaxis.get_major_ticks():
tick.label.set_fontsize(FS - 4)
if title != '':
axes.set_title(title, fontweight='bold', fontsize=FS)
if show_legend and legend_title is not None:
legend_title = axes.get_legend().get_title().properties()[
'fontproperties']
legend_title.set_weight('bold')
# remove legend border:
legend = axes.get_legend()
if legend is not None:
legend.get_frame().set_linewidth(0.0)
# export plot:
set_style()
if filename is not None:
plt.savefig(filename, format='pdf', transparent=True,
bbox_inches='tight')
def read_log(logfile, timings=False, test=False):
x = []
y = []
yy = []
z = []
with open(os.path.join("results/", logfile), 'r') as fid:
for line in fid:
if test and "Test Set" in line:
fields = line.strip().split()
if len(fields) > 4:
test_loss = float(fields[3][:-1])
test_accuracy = float(fields[5])
else:
test_loss = float(fields[3])
test_accuracy = 0
if "Iter" not in line:
continue
fields = line.strip().split()
it = int(fields[1][:-1])
loss = float(fields[3][:-1])
if len(fields) > 6:
accuracy = float(fields[5][:-1])
runtime = float(fields[7])
yy.append(accuracy)
else:
runtime = float(fields[5])
x.append(it)
y.append(loss)
z.append(runtime)
if test:
return test_loss, test_accuracy
return np.array(x), np.array(y), np.array(yy), np.array(z)
def read_log_synth(logfile):
x = []
with open(os.path.join("results/", logfile), 'r') as fid:
for line in fid:
if "normalizing both weights and iweights" not in line:
continue
fields = line.strip().split()
diff = float(fields[7])
x.append(diff)
return np.array(x)
def mnist_width_train(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Train Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['mnist_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
it, loss, _, _ = read_log(logfile, test=False)
Y.append(loss[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.2,
xmin=9e2, xmax=6e6,
filename=filename, linewidth=2.,
legloc='upper left')
def mnist_width_test(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Test Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['mnist_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
loss, _ = read_log(logfile, test=True)
Y.append(loss)
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.2,
xmin=9e2, xmax=6e6,
filename=filename, linewidth=2.,
legloc='upper left')
def covtype_width_train(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Train Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6', '1e7']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['covtype_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
it, loss, _, _ = read_log(logfile, test=False)
Y.append(loss[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=1.3,
xmin=9e2, xmax=1.2e7,
filename=filename, linewidth=2.,
legloc='upper left')
def covtype_width_test(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Test Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6', '1e7']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['covtype_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
loss, _ = read_log(logfile, test=True)
Y.append(loss)
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=1.3,
xmin=9e2, xmax=1.2e7,
filename=filename, linewidth=2.,
legloc='upper left')
def synth_width(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'$\mathbf{\|\frac{x}{\|x\|} - \frac{w}{\|w\|}\|}$'
widths = ['1e1', '1e2', '1e3', '1e4', '1e5', '1e6', '5e6']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in ['identity', 'logit', 'probit']:
files = ['synth_width%d_link_%s.txt' % (int(float(w)), link)
for w in widths]
Y = []
for logfile in files:
normdiff = read_log_synth(logfile)
Y.append(normdiff[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.02,
xmin=8, xmax=6e6,
filename=filename, linewidth=2.,
legloc='upper left')
def synth_terms(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Terms}'
ylabel = r'$\mathbf{\|\frac{x}{\|x\|} - \frac{w}{\|w\|}\|}$'
terms = list(range(6, 42, 2))
Ys = []
links = ["Logit", "Probit"]
for link in links:
files = ['synth_terms%d_link_%s.txt' % (t, link.lower())
for t in terms]
Y = []
for logfile in files:
normdiff = read_log_synth(logfile)
Y.append(normdiff[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array(terms)
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k'],
linestyle=['-', '--'],
xlog=False, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.025, xticks=list(range(6, 42, 4)),
xmin=5, xmax=42,
filename=filename, linewidth=2.,
legloc='upper right')
def mnist_multi(filename):
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Accuracy}'
widths = ['1e3', '1e4', '1e5', '1e6']
files = ['mnist_width%d_multi.txt' % (int(float(w))) for w in widths]
Y = []
Y_train = []
for logfile in files:
_, acc = read_log(logfile, test=True)
_, _, train_acc, _ = read_log(logfile)
Y.append(acc)
Y_train.append(train_acc[-1])
Y = np.stack([Y_train, Y])
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel,
legend=['Train', 'Test'],
colors=['k', 'k'],
linestyle=['-', '--'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.7, ymax=1,
xmin=9e2, xmax=1.2e6,
filename=filename, linewidth=2.,
legloc='upper left')
def covtype_multi(filename):
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Accuracy}'
widths = ['1e3', '1e4', '1e5', '1e6']
files = ['covtype_width%d_multi.txt' % (int(float(w))) for w in widths]
Y = []
Y_train = []
for logfile in files:
_, acc = read_log(logfile, test=True)
_, _, train_acc, _ = read_log(logfile)
Y.append(acc)
Y_train.append(train_acc[-1])
Y = np.stack([Y_train, Y])
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel,
legend=['Train', 'Test'],
colors=['k', 'k'],
linestyle=['-', '--'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.5, ymax=0.8,
xmin=9e2, xmax=1.2e6,
filename=filename, linewidth=2.,
legloc='upper left')
# make all the plots:
def main():
# get destination folder:
parser = argparse.ArgumentParser(
description='Make plots for floating point MPC')
parser.add_argument('--destination', default='./results/', type=str,
help='folder in which to dump figures')
args = parser.parse_args()
# create plots:
mnist_width_train(os.path.join(args.destination,
'mnist_widths_train_loss.pdf'))
mnist_width_test(os.path.join(args.destination,
'mnist_widths_test_loss.pdf'))
covtype_width_train(os.path.join(args.destination,
'covtype_widths_train_loss.pdf'))
covtype_width_test(os.path.join(args.destination,
'covtype_widths_test_loss.pdf'))
synth_width(os.path.join(args.destination, 'synth_widths_weightdiffs.pdf'))
synth_terms(os.path.join(args.destination, 'synth_terms_weightdiffs.pdf'))
mnist_multi(os.path.join(args.destination,
'mnist_multiclass_accuracy.pdf'))
covtype_multi(os.path.join(args.destination,
'covtype_multiclass_accuracy.pdf'))
# run all the things:
if __name__ == '__main__':
main()
| 32.539235
| 79
| 0.537225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,741
| 0.16949
|
05b87ef1f9d957ce2aacbc7ba9bf31d3f24627e5
| 2,782
|
py
|
Python
|
example_backtesting.py
|
brokenlab/finance4py
|
839fb4c262c369973c1afaebb23291355f8b4668
|
[
"MIT"
] | 6
|
2016-12-28T03:40:46.000Z
|
2017-03-31T12:04:43.000Z
|
example_backtesting.py
|
brokenlab/finance4py
|
839fb4c262c369973c1afaebb23291355f8b4668
|
[
"MIT"
] | null | null | null |
example_backtesting.py
|
brokenlab/finance4py
|
839fb4c262c369973c1afaebb23291355f8b4668
|
[
"MIT"
] | 3
|
2018-04-26T03:14:29.000Z
|
2021-06-13T16:18:04.000Z
|
# -*- coding: utf-8 -*-
'''
* finance4py
* Based on Python Data Analysis Library.
* 2016/03/22 by Sheg-Huai Wang <m10215059@csie.ntust.edu.tw>
* Copyright (c) 2016, finance4py team
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from finance4py import Stock
from finance4py.backtesting import BandTest
from pylab import *
if __name__ == '__main__':
# 建立股票資訊連結以及將資訊丟入回測程式
s = Stock('2330', '2015-10-31', '2016-03-05')
bt = BandTest(s)
# 範例策略一
# 在歷史股價內新增K, D兩個值的欄位
s['K'], s['D'] = s.KD()
# 撰寫個人策略 => def 名稱自取(今日, 今日資訊, 股票資訊)
def golden_cross(today, today_data, stock):
# 回傳資訊為 True = 持有狀態, False = 非持有狀態
return today_data['K'] > today_data['D']
# 將策略新增至回測程式中並取名
bt.addStrategy('KD黃金交叉', golden_cross)
# 範例策略二
s['MA5'] = s.MA()
s['MA20'] = s.MA(20)
def average_cross(today, today_data, stock):
return today_data['MA5'] > today_data['MA20']
bt.addStrategy('均線黃金交叉', average_cross)
# 範例策略三
s['DIF'], s['DEM'], s['OSC']= s.MACD()
def macd_cross(today, today_data, stock):
# 可調整today並透過stock取得其他日的資訊
yesterday = today - 1
yesterday_data = stock.getData(yesterday)
return (today_data['DIF'] > today_data['DEM']) & \
(yesterday_data['DIF'] > yesterday_data['DEM'])
bt.addStrategy('MACD連續兩日黃金交叉', macd_cross)
# 繪製回測結果 (縱軸為資產倍率)
bt.plot()
show()
| 35.21519
| 104
| 0.727175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,332
| 0.761097
|
05b8e002f7910268a9002f66a3d07f197f31db7a
| 1,778
|
py
|
Python
|
utils/cloud/cloud_client/__init__.py
|
alexfdo/asr_eval
|
d1573cc3113ce9df1ae64c3b91b5f495e2cff9a3
|
[
"MIT"
] | 3
|
2020-03-06T17:20:34.000Z
|
2021-09-09T09:18:48.000Z
|
utils/cloud/cloud_client/__init__.py
|
alexfdo/asr_eval
|
d1573cc3113ce9df1ae64c3b91b5f495e2cff9a3
|
[
"MIT"
] | 1
|
2020-02-03T18:25:08.000Z
|
2020-02-03T18:25:08.000Z
|
utils/cloud/cloud_client/__init__.py
|
alexfdo/asr_eval
|
d1573cc3113ce9df1ae64c3b91b5f495e2cff9a3
|
[
"MIT"
] | 1
|
2020-01-29T19:47:54.000Z
|
2020-01-29T19:47:54.000Z
|
# coding: utf-8
# flake8: noqa
"""
ASR documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.dev
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from cloud_client.api.packages_api import PackagesApi
from cloud_client.api.recognize_api import RecognizeApi
from cloud_client.api.session_api import SessionApi
# import ApiClient
from cloud_client.cloud_api_client import CloudApiClient
from cloud_client.configuration import Configuration
# import models into sdk package
from cloud_client.models.advanced_recognition_request_dto import AdvancedRecognitionRequestDto
from cloud_client.models.asr_advanced_result_dto import ASRAdvancedResultDto
from cloud_client.models.asr_result_dto import ASRResultDto
from cloud_client.models.audio_file_dto import AudioFileDto
from cloud_client.models.auth_request_dto import AuthRequestDto
from cloud_client.models.auth_response_dto import AuthResponseDto
from cloud_client.models.auth_status_dto import AuthStatusDto
from cloud_client.models.message_dto import MessageDto
from cloud_client.models.package_dto import PackageDto
from cloud_client.models.recognition_request_dto import RecognitionRequestDto
from cloud_client.models.sessionless_recognition_request_dto import SessionlessRecognitionRequestDto
from cloud_client.models.start_session_request import StartSessionRequest
from cloud_client.models.status_dto import StatusDto
from cloud_client.models.stream_request_dto import StreamRequestDto
from cloud_client.models.stream_response_dto import StreamResponseDto
from cloud_client.models.word_dto import WordDto
| 41.348837
| 119
| 0.865017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 368
| 0.206974
|
05b95038357172273cd6bf5b94205ef5e3a1bff8
| 2,818
|
py
|
Python
|
main.py
|
af12066/cancel-sit
|
29977bb86927e69ae7f94a160ef4d1fb810f0117
|
[
"MIT"
] | null | null | null |
main.py
|
af12066/cancel-sit
|
29977bb86927e69ae7f94a160ef4d1fb810f0117
|
[
"MIT"
] | null | null | null |
main.py
|
af12066/cancel-sit
|
29977bb86927e69ae7f94a160ef4d1fb810f0117
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) T. H.
import urllib.request
import re
import urllib.parse
import codecs
import filecmp
import os.path
import os
from bs4 import BeautifulSoup
from slacker import Slacker
from datetime import datetime
class Slack(object):
__slacker = None
def __init__(self, token):
self.__slacker = Slacker(token)
def get_channnel_list(self):
"""
Slackチーム内のチャンネルID、チャンネル名一覧を取得する。
"""
# bodyで取得することで、[{チャンネル1},{チャンネル2},...,]の形式で取得できる。
raw_data = self.__slacker.channels.list().body
result = []
for data in raw_data["channels"]:
result.append(dict(channel_id=data["id"], channel_name=data["name"]))
return result
def post_message_to_channel(self, channel, message):
"""
Slackチームの任意のチャンネルにメッセージを投稿する。
"""
channel_name = "#" + channel
self.__slacker.chat.post_message(channel_name, message)
def writeFile(fileName, content):
print(fileName)
f = codecs.open(fileName, 'w', 'utf-8')
f.write(content)
f.close()
if __name__ == '__main__':
slack = Slack('...')
print(slack.get_channnel_list())
#今月と翌月のデータを取得
uri = 'http://attend.sic.shibaura-it.ac.jp/cancelCalendar/t04/calendar{0:d}{1:02d}-{2:02d}.html'.format(datetime.today().year, datetime.today().month, (lambda x: x if x != 12 else x - 11)(datetime.today().month + 1))
html = urllib.request.urlopen(uri)
soup = BeautifulSoup(html, 'lxml')
link = soup.find_all('a', href=re.compile("/cancel/")) #href属性に'/cancel/'を含むa要素を取得し,相対パスを絶対パスに変換
for a in link:
path = urllib.parse.urljoin(uri, a['href']) #href属性のみを取得
print(path)
fileName = path.split('/')[-1]
fileName = fileName.replace("html", "txt")
html2 = urllib.request.urlopen(path) #リストの要素のURLをオープン
soup2 = BeautifulSoup(html2, 'lxml')
dat = soup2.find_all(text=True) #テキストをすべて取得
settext = "\n".join([x for x in dat if x != '\n']) #改行文字のみのリスト項目を削除.リストを結合し,文字列を整形
# スクレイピングしたテキストを書き出す.
# もしその日付のファイルが存在しなければ新規に作成し,
# 既にファイルが存在していれば拡張子に'.tmp'を付加して一時ファイルを作成する.
# もとのtxtファイルとtmpファイルの差分を比較し,更新があればtxtファイルを更新し,Slackにポストする.
if os.path.isfile(fileName):
tmpfileName = fileName + '.tmp'
writeFile(tmpfileName, settext)
if filecmp.cmp(fileName, tmpfileName):
print("no diff")
else:
writeFile(fileName, settext)
slack.post_message_to_channel("class", settext) #Slackにポスト (チャンネル, テキスト)
os.remove(tmpfileName)
else:
#print('write a new file')
slack.post_message_to_channel("class", settext) #Slackにポスト (チャンネル, テキスト)
writeFile(fileName, settext)
| 29.663158
| 220
| 0.625621
| 872
| 0.251878
| 0
| 0
| 0
| 0
| 0
| 0
| 1,414
| 0.408434
|
05ba89852c4740460e1cce9740e5ab37d0b77443
| 582
|
py
|
Python
|
minitf/kernel/_numpy_math.py
|
guocuimi/minitf
|
f272a6b1546b82aaec41ec7d2c2d34fa40a40385
|
[
"MIT"
] | 7
|
2020-02-10T08:16:30.000Z
|
2021-01-31T14:08:02.000Z
|
minitf/kernel/_numpy_math.py
|
guocuimi/minitf
|
f272a6b1546b82aaec41ec7d2c2d34fa40a40385
|
[
"MIT"
] | 1
|
2020-02-29T01:57:54.000Z
|
2020-02-29T01:57:54.000Z
|
minitf/kernel/_numpy_math.py
|
guocuimi/minitf
|
f272a6b1546b82aaec41ec7d2c2d34fa40a40385
|
[
"MIT"
] | null | null | null |
import numpy as _np
from minitf.kernel.core import notrace_primitive
from minitf.kernel.core import primitive
# ----- Differentiable functions -----
add = primitive(_np.add)
subtract = primitive(_np.subtract)
multiply = primitive(_np.multiply)
divide = primitive(_np.divide)
dot = primitive(_np.dot)
square = primitive(_np.square)
reduce_mean = primitive(_np.average)
exp = primitive(_np.exp)
negative = primitive(_np.negative)
maximum = primitive(_np.maximum)
minimum = primitive(_np.minimum)
# temporarily put it here as nograd function
reduce_sum = notrace_primitive(_np.sum)
| 27.714286
| 48
| 0.780069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.140893
|
05bdd1c7fb73fc917e7e9bacb41962e3873e9769
| 5,802
|
py
|
Python
|
map/migrations/0001_initial.py
|
matthewoconnor/mapplot-cdp
|
19513e6617f878d717ab4e917ffc9d22270edcfe
|
[
"MIT"
] | null | null | null |
map/migrations/0001_initial.py
|
matthewoconnor/mapplot-cdp
|
19513e6617f878d717ab4e917ffc9d22270edcfe
|
[
"MIT"
] | null | null | null |
map/migrations/0001_initial.py
|
matthewoconnor/mapplot-cdp
|
19513e6617f878d717ab4e917ffc9d22270edcfe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-01-10 20:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('external_identifier', models.CharField(max_length=256)),
('area_type', models.CharField(choices=[('UNCATEGORIZED', 'Uncategorized'), ('NEIGHBORHOOD', 'Neighborhood'), ('WARD', 'Ward'), ('DISTRICT', 'District'), ('STATE', 'State'), ('COUNTRY', 'Country'), ('REGION', 'Region'), ('COUNTY', 'County')], max_length=64)),
('boundary_type', models.CharField(choices=[('OUTER', 'Outer Boundary'), ('INNER', 'Inner Boundary')], max_length=64)),
('polygon', models.TextField()),
('mbr', models.CharField(max_length=256)),
('is_primary', models.BooleanField(default=True)),
('created_time', models.DateTimeField()),
('outer_area', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inner_areas', related_query_name='inner_area', to='map.Area')),
('primary_area', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child_areas', related_query_name='child_area', to='map.Area')),
],
),
migrations.CreateModel(
name='AreaBin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField(default=0.0)),
('count', models.IntegerField(default=0)),
('area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Area')),
],
),
migrations.CreateModel(
name='AreaMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.CharField(blank=True, max_length=256)),
('data_source', models.CharField(blank=True, max_length=256, null=True)),
('dataset_identifier', models.CharField(blank=True, max_length=256, null=True)),
('kml_file', models.FileField(blank=True, null=True, upload_to='uploads/areamap/')),
('area_name_path', models.CharField(blank=True, max_length=256, null=True)),
('area_external_identifier_path', models.CharField(blank=True, max_length=256, null=True)),
('area_default_type', models.CharField(blank=True, max_length=64, null=True)),
('created_time', models.DateTimeField()),
('areas', models.ManyToManyField(blank=True, null=True, to='map.Area')),
],
),
migrations.CreateModel(
name='DataMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.CharField(blank=True, max_length=256)),
('dataset_type', models.CharField(blank=True, choices=[('SOCRATA', 'Socrata Soda Data Portal'), ('OTHER', 'Url for Other Data Source')], max_length=256)),
('data_source', models.CharField(blank=True, max_length=256, null=True)),
('dataset_identifier', models.CharField(blank=True, max_length=256, null=True)),
('dataset_url', models.URLField(blank=True, max_length=256)),
('weight_type', models.CharField(choices=[('COUNT', 'Count Instances'), ('SUM', 'Sum Field value')], max_length=64)),
('categorize_type', models.CharField(choices=[('POINT', 'Location Point'), ('LATLNG', 'Latitude Longitude'), ('JOIN', 'Join on Common Field'), ('JOIN_MAP', 'Join on Field Mapping')], max_length=64)),
('point_key', models.CharField(blank=True, max_length=256)),
('latitude_key', models.CharField(blank=True, max_length=256)),
('longitude_key', models.CharField(blank=True, max_length=256)),
('join_key', models.CharField(blank=True, max_length=256)),
('join_map_file', models.FileField(blank=True, null=True, upload_to='uploads/joinmap/')),
('value_key', models.CharField(blank=True, max_length=256)),
('querystring', models.CharField(blank=True, max_length=256)),
('kml_file', models.FileField(blank=True, null=True, upload_to='uploads/datamap/')),
('task_id', models.CharField(blank=True, max_length=256)),
('created_time', models.DateTimeField()),
('updated_time', models.DateTimeField()),
('area_map', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='map.AreaMap')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='areabin',
name='data_map',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.DataMap'),
),
]
| 62.387097
| 275
| 0.608239
| 5,579
| 0.961565
| 0
| 0
| 0
| 0
| 0
| 0
| 1,300
| 0.224061
|
05be03857ac9bab749c288e65ba7f0f36541df9b
| 4,561
|
py
|
Python
|
Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\gsi_handlers\object_lost_and_found_service_handlers.py
# Compiled at: 2018-10-26 00:20:22
# Size of source mod 2**32: 4629 bytes
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
import services
olaf_service_objects_schema = GsiGridSchema(label='Object Lost & Found')
olaf_service_objects_schema.add_field('object', label='Object')
olaf_service_objects_schema.add_field('zone', label='Zone')
olaf_service_objects_schema.add_field('street', label='Street')
olaf_service_objects_schema.add_field('sim', label='Sim')
olaf_service_objects_schema.add_field('household', label='Household')
olaf_service_deleted_clone_schema = GsiGridSchema(label='Object Lost & Found/To Be Deleted')
olaf_service_deleted_clone_schema.add_field('object', label='Object')
olaf_service_deleted_clone_schema.add_field('zone', label='Zone')
olaf_service_deleted_clone_schema.add_field('street', label='Street')
def _olaf_zone_str(zone_id, zone):
if zone:
return '{}:{}'.format(str(zone), zone.lot.get_lot_name())
return str(zone_id)
def _olaf_obj_str(zone, object_id):
obj_str = str(object_id)
if zone is not None:
if zone.is_instantiated:
obj = zone.object_manager.get(object_id)
if obj:
obj_str = str(obj)
return obj_str
@GsiHandler('object_lost_and_found_service_objects', olaf_service_objects_schema)
def generate_object_lost_and_found_service_data(*args, zone_id: int=None, filter=None, **kwargs):
lost_and_found = services.get_object_lost_and_found_service()
zone_manager = services.get_zone_manager()
sim_info_manager = services.sim_info_manager()
household_manager = services.household_manager()
if not (lost_and_found and zone_manager and sim_info_manager and household_manager):
return []
registered_objects = []
for locator in lost_and_found.registered_object_locators:
if zone_id is not None:
if zone_id != locator.zone_id:
continue
zone = zone_manager.get(locator.zone_id)
sim_str = str(locator.sim_id)
sim_info = sim_info_manager.get(locator.sim_id)
if sim_info:
sim_str = '{}:{}'.format(str(sim_info), locator.sim_id)
household_str = str(locator.household_id)
household = household_manager.get(locator.household_id)
if household:
household_str = '{}:{}'.format(household.name, locator.household_id)
registered_objects.append({'object':_olaf_obj_str(zone, locator.object_id),
'zone':_olaf_zone_str(locator.zone_id, zone),
'street':locator.open_street_id,
'sim':sim_str,
'household':household_str})
return registered_objects
@GsiHandler('object_lost_and_found_service_clones', olaf_service_deleted_clone_schema)
def generate_olaf_service_deleted_clone_schema_data(*args, zone_id: int=None, filter=None, **kwargs):
lost_and_found = services.get_object_lost_and_found_service()
zone_manager = services.get_zone_manager()
return lost_and_found and zone_manager or []
clones_to_delete_by_zone = lost_and_found.clones_to_delete_by_zone
clones_to_delete_by_street = lost_and_found.clones_to_delete_by_street
clones_to_delete = []
object_ids = set()
for zone_id, objects in clones_to_delete_by_zone.items():
if zone_id is not None:
if zone_id != zone_id:
continue
zone = zone_manager.get(zone_id)
for object_id in objects:
street_str = 'n/a'
for street_id, objects in clones_to_delete_by_street.items():
if object_id in objects:
street_str = str(street_id)
break
clones_to_delete.append({'object':_olaf_obj_str(zone, object_id),
'zone':_olaf_zone_str(zone_id, zone),
'street':street_str})
object_ids.add(object_id)
if zone_id is None:
for street_id, objects in clones_to_delete_by_street.items():
for object_id in objects:
if object_id in object_ids:
continue
clones_to_delete.append({'object':_olaf_obj_str(services.current_zone(), object_id),
'zone':'n/a',
'street':street_id})
return clones_to_delete
| 44.281553
| 110
| 0.70182
| 0
| 0
| 0
| 0
| 3,025
| 0.663232
| 0
| 0
| 709
| 0.155448
|
05bf284e1bf49c109f8df75324eddb8540d17a61
| 685
|
py
|
Python
|
testing/test_pendulum.py
|
delock/pytorch-a3c-mujoco
|
82e0c854417ac05e0f414eab1710794d41515591
|
[
"MIT"
] | null | null | null |
testing/test_pendulum.py
|
delock/pytorch-a3c-mujoco
|
82e0c854417ac05e0f414eab1710794d41515591
|
[
"MIT"
] | null | null | null |
testing/test_pendulum.py
|
delock/pytorch-a3c-mujoco
|
82e0c854417ac05e0f414eab1710794d41515591
|
[
"MIT"
] | null | null | null |
#Importing OpenAI gym package and MuJoCo engine
import gym
import numpy as np
import mujoco_py
import matplotlib.pyplot as plt
import env
#Setting MountainCar-v0 as the environment
env = gym.make('InvertedPendulum-down')
#Sets an initial state
env.reset()
print (env.action_space)
# Rendering our instance 300 times
i = 0
while True:
#renders the environment
env.render()
#Takes a random action from its action space
# aka the number of unique actions an agent can perform
action = env.action_space.sample()
ob, reward, done, _ = env.step([-5])
if i == 0:
print (action)
print ("ob = {}, reward = {}, done = {}".format(ob, reward, done))
i += 1
env.close()
| 25.37037
| 70
| 0.706569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 325
| 0.474453
|
05bf7c9f0303c517554bb2670af4a9a4baf2a54a
| 5,317
|
py
|
Python
|
plots/plot_drift_types.py
|
ChristophRaab/RRSLVQ
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
[
"MIT"
] | 1
|
2021-06-22T20:54:03.000Z
|
2021-06-22T20:54:03.000Z
|
plots/plot_drift_types.py
|
ChristophRaab/RRSLVQ
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
[
"MIT"
] | 5
|
2020-04-20T09:31:02.000Z
|
2021-07-10T01:23:36.000Z
|
plots/plot_drift_types.py
|
ChristophRaab/RRSLVQ
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
[
"MIT"
] | 1
|
2020-07-03T04:00:47.000Z
|
2020-07-03T04:00:47.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import logit
import pandas as pd
from matplotlib.axes import Axes, Subplot
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
SMALL = 14
SIZE = 16
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL) # legend fontsize
plt.rc('figure', titlesize=SIZE) # fontsize of the figure title
plt.rc('lines', lw=4)
def reoccuring_drift(length=50000,width=10,rate=0.1,plot=True,filename="reoccuring_drift.eps"):
length = length / 2
probability_drift = np.array([])
time = np.array([])
fig, ax = plt.subplots()
fig.set_size_inches(6.4, 4.8)
part_length = rate*length
for part in range(int(length/part_length)):
t = np.arange(time.size, time.size+part_length, 1)
x = np.array([1.0 / (1.0 + np.exp(-4.0 * float(i - int(time.size+part_length-part_length/2)) / float(width))) for i in t])
y = np.array([1 - p for p in x])
probability_drift = np.append(probability_drift,x)
probability_drift = np.append(probability_drift,y)
time = np.append(time,t)
probability_drift = (probability_drift-.5)*2
t = np.arange(1, probability_drift.size+1, 1)
signal = probability_drift
pos_signal = signal.copy()
neg_signal = signal.copy()
pos_signal[pos_signal <= 0] = np.nan
neg_signal[neg_signal > 0] = np.nan
ax.plot(pos_signal,label="Concept 2", linestyle='dotted')
ax.plot(neg_signal,label="Concept 1")
plt.xticks(np.arange(0, 45000, step=10000))
plot_attributes(plt,ax)
fig.savefig(filename,dpi=1000, format='eps',bbox_inches='tight')
plt.show() if plot else ""
def incremental_drift(length=50000,width=10000,plot=True,filename="incremental_drift.eps"):
probability_drift = np.array([])
time = np.array([])
fig, ax = plt.subplots()
fig.set_size_inches(6.4, 4.8)
t = np.arange(time.size, length, 1)
x = np.array([1.0 / (1.0 + np.exp(-4.0 * float(i - int(length/2)) / float(width))) for i in t])
probability_drift = np.append(probability_drift,x)
# probability_drift = np.append(probability_drift,y)
time = np.append(time,t)
probability_drift = (probability_drift-.5)*2
t = np.arange(1, probability_drift.size+1, 1)
signal = probability_drift
pos_signal = signal.copy()
neg_signal = signal.copy()
pos_signal[pos_signal <= 0] = np.nan
neg_signal[neg_signal > 0] = np.nan
ax.plot(pos_signal,label="Concept 2", linestyle='dotted')
ax.plot(neg_signal,label="Concept 1")
plot_attributes(plt,ax)
fig.savefig(filename,dpi=1000, format='eps',bbox_inches='tight')
plt.show() if plot else ""
def gradual_drift(length=50000,width=10,rate=0.4,plot=True,filename="gradual_drift.eps"):
length = length / 2
probability_drift = np.array([])
time = np.array([])
fig, ax = plt.subplots()
fig.set_size_inches(6.4, 4.8)
part_length = rate*length
for part in range(int(length/part_length)):
t = np.arange(time.size, time.size+part_length, 1)
x = np.array([1.0 / (1.0 + np.exp(-4.0 * float(i - int(time.size+part_length-part_length/2)) / float(width))) for i in t])
y = np.array([1 - p for p in x])
if 0 == part:
probability_drift = np.append(probability_drift,np.zeros(10000))
if int(length/part_length)-1 == part:
probability_drift = np.append(probability_drift,x)
probability_drift = np.append(probability_drift,np.ones(10000))
else:
probability_drift = np.append(probability_drift,x)
probability_drift = np.append(probability_drift,y)
time = np.append(time,t)
probability_drift = (probability_drift-.5)*2
t = np.arange(1, probability_drift.size+1, 1)
signal = probability_drift
pos_signal = signal.copy()
neg_signal = signal.copy()
pos_signal[pos_signal <= 0] = np.nan
neg_signal[neg_signal > 0] = np.nan
ax.plot(pos_signal,label="Concept 2", linestyle='dotted')
ax.plot(neg_signal,label="Concept 1")
plot_attributes(plt,ax)
plt.show() if plot else ""
fig.savefig(filename,dpi=1000, format='eps',bbox_inches='tight')
def plot_attributes(plt,ax):
#plotting
ax.set_xlabel('Timestep')
ax.set_ylabel('Data Mean')
plt.style.use('seaborn-paper')
ax.legend()
plt.yticks([-1,1.0],["Concept 1","Concept 2"],rotation='vertical')
ticks = ax.yaxis.get_majorticklabels()
ticks[0].set_verticalalignment("center")
ticks[1].set_verticalalignment("center")
# ax1 = ax.twinx()
# plt.yticks([-1,0,1],["","",""],rotation='vertical')
#reoccuring_drift(width=600,filename="frequent_reoccuing_drift.eps") # Frequent Reoccurring
#reoccuring_drift(width=1000,rate=0.4) # Reoccurring
#incremental_drift(width=15000) # Incremental
#incremental_drift(width=2500,filename="abrupt_drift.eps") # Abrupt
gradual_drift(length=45000,width=1000,rate=0.3) #Gradual
| 33.024845
| 130
| 0.671995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 924
| 0.173782
|
05c1f456776569370085a917d41ee8b850f0a3b7
| 15,773
|
py
|
Python
|
simulation/src/utils.py
|
VIDA-NYU/pedestrian-sensing-model
|
e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf
|
[
"MIT"
] | 2
|
2020-01-14T12:44:11.000Z
|
2021-09-29T16:09:37.000Z
|
simulation/src/utils.py
|
VIDA-NYU/pedestrian-sensing-model
|
e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf
|
[
"MIT"
] | 1
|
2021-09-11T14:13:57.000Z
|
2021-09-11T14:13:57.000Z
|
simulation/src/utils.py
|
VIDA-NYU/pedestrian-sensing-model
|
e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf
|
[
"MIT"
] | 2
|
2020-07-13T17:08:25.000Z
|
2021-03-31T15:10:58.000Z
|
#!/usr/bin/env python3
import numpy as np
import math
import random
import time
import scipy.misc
import scipy.signal
import multiprocessing
import json
import itertools
import os
import pprint
from collections import namedtuple
from fractions import gcd
from optimized import get_distance
OBSTACLE = -1
MAX = 2147483647 #MAXIMUM INT 32
Graph = namedtuple('Graph', 'adj nodes2d nodesflat indices cachedravel ' \
'mapshape nnodes maplen')
##########################################################
def compute_gcd_intervals(speed1, speed2):
_gcd = gcd(speed1, speed2)
interval2 = int(min(speed1, speed2) / _gcd)
interval1 = int(max(speed1, speed2) / _gcd)
return interval1, interval2
def get_distance_from_npy_idx(npypos1, npypos2, mapshape):
"""Compute manhattan difference tween @pos1 and @pos2.
Args:
pos1(tuple): position 1 in flattened numpy array
pos2(tuple): position 2 in flattened numpy array
Returns:
float: manhattan difference
"""
pos1 = np.array(np.unravel_index(npypos1, mapshape))
pos2 = np.array(np.unravel_index(npypos2, mapshape))
return get_distance(pos1, pos2)
def flatten_indices(indices, mapshape):
return np.ravel_multi_index(np.transpose(indices), mapshape)
def unflatten_indices(indices, mapshape):
out = np.unravel_index(indices, mapshape)
return list(zip(out[0], out[1]))
def parse_image(imagefile, thresh=128):
"""Parse the streets from image and return a numpy ndarray,
with 0 as streets and OBSTACLE as non-streets. Assumes a
BW image as input, with pixels in white representing streets.
Args:
imagefile(str): image path
Returns:
numpy.ndarray: structure of the image
"""
img = scipy.misc.imread(imagefile)
if img.ndim > 2: img = img[:, :, 0]
return (img > thresh).astype(int) - 1
def find_crossings_crossshape(npmap):
"""Convolve with kernel considering input with
0 as streets and OBSTACLE as non-streets. Assumes a
BW image as input, with pixels in black representing streets.
Args:
npmap(numpy.ndarray): ndarray with two dimensions composed of -1 (obstacles)
and 0 (travesable paths)
Returns:
list: set of indices that contains the nodes
"""
ker = np.array([[0,1,0], [1, 1, 1], [0, 1, 0]])
convolved = scipy.signal.convolve2d(npmap, ker, mode='same',
boundary='fill', fillvalue=OBSTACLE)
inds = np.where(convolved >= OBSTACLE)
return set([ (a,b) for a,b in zip(inds[0], inds[1]) ])
def find_crossings_squareshape(npmap, supressredundant=True):
"""Convolve with kernel considering input with
0 as streets and -1 as non-streets. Assumes a
BW image as input, with pixels in black representing streets.
Args:
npmap(numpy.ndarray): ndarray with two dimensions composed of -1 (obstacles)
and 0 (travesable paths)
Returns:
list: set of indices that contains the nodes
"""
ker = np.array([[1,1], [1, 1]])
convolved = scipy.signal.convolve2d(npmap, ker, mode='same',
boundary='fill', fillvalue=OBSTACLE)
inds = np.where(convolved >= 0)
crossings = np.array([ np.array([a,b]) for a,b in zip(inds[0], inds[1]) ])
if supressredundant:
return filter_by_distance(crossings)
else: return crossings
def filter_by_distance(points, mindist=4):
"""Evaluate the distance between each pair os points in @points
and return just the ones with distance gt @mindist
Args:
points(set of tuples): set of positions
mindist(int): minimum distance
Returns:
set: set of points with a minimum distance between each other
"""
cr = list(points)
npoints = len(points)
valid = np.full(npoints, np.True_)
for i in range(npoints):
if not valid[i]: continue
for j in range(i + 1, npoints):
dist = get_distance(cr[i], cr[j])
if dist < mindist: valid[j] = np.False_
return points[valid]
def get_adjacency_dummy(nodes, npmap):
return set([ (a,b) for a,b in zip(ind[0], ind[1]) ])
##########################################################
def compute_heuristics(nodes, goal):
"""Compute heuristics based on the adjcency matrix provided and on the goal. If the guy is in the adjmatrix, then it is not an obstacle.
IMPORTANT: We assume that there is just one connected component.
Args:
adjmatrix(dict of list of neighbours): posiitons as keys and neighbours as values
goal(tuple): goal position
Returns:
dict of heuristics: heuristic for each position
"""
subt = np.subtract
abso = np.absolute
return {v: np.sum(abso(subt(v, goal))) for v in nodes}
##########################################################
def compute_heuristics_from_map(searchmap, goal):
s = searchmap
gy, gx = goal
height, width = s.shape
h = {}
for j in range(height):
disty = math.fabs(j-gy)
for i in range(width):
v = s[j][i]
if v == OBSTACLE:
h[(j, i)] = MAX
else:
distx = math.fabs(j-gx)
h[(j, i)] = distx + disty + v
return h
##########################################################
def get_adjmatrix_from_npy(_map):
"""Easiest approach, considering 1 for each neighbour.
"""
connectivity = 8
h, w = _map.shape
nodes = np.empty((1, 0), dtype=int)
adj = np.empty((0, 10), dtype=int)
for j in range(0, h):
for i in range(0, w):
if _map[j, i] == OBSTACLE: continue
nodes = np.append(nodes, np.ravel_multi_index((j, i), _map.shape))
ns1, ns2 = get_neighbours_coords((j, i), _map.shape)
neigh[0] = -1
acc = 1
neigh = np.full(connectivity, -1)
for jj, ii in ns1:
if _map[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _map.shape)
acc += 1
neigh[acc] = -1.4142135623730951 #sqrt(2)
acc += 1
adj = np.append(adj, np.reshape(neigh, (1, 10)), axis=0)
return nodes, adj
##########################################################
def get_full_adjmatrix_from_npy(_mapmatrix):
"""Create a graph structure of a 2d matrix with two possible values: OBSTACLE
or 0. It returns a big structure in different formats to suit every need
Returns:
Structure with attributes
adj(maplen, 10) - stores the neighbours of each npy coordinate
nodes2d(nnodes, 2) - sparse list of nodes in 2d
nodesflat(nnodes) - sparse list of nodes in npy
indices(maplen) - dense list of points in sparse indexing
cachedravel(mapshape) - cached ravel of points to be used
mapshape(2) - height and width
nnodes(1) - number of traversable nodes
"""
h, w = _mapmatrix.shape
maplen = np.product(_mapmatrix.shape)
adj = np.full((np.product(_mapmatrix.shape), 10), -1, dtype=int)
nodes2d = np.full((maplen, 2), -1, dtype=int)
nodesflat = np.empty((0, 1), dtype=int)
indices = np.full(maplen, -1, dtype=int)
cachedravel = np.full(_mapmatrix.shape, -1)
nodesidx = 0
#TODO: convert everything to numpy indexing
for j in range(h):
for i in range(w):
if _mapmatrix[j, i] == OBSTACLE: continue
npyidx = np.ravel_multi_index((j, i), _mapmatrix.shape)
indices[npyidx] = nodesidx
nodes2d[nodesidx] = np.array([j, i])
ns1, ns2 = get_neighbours_coords((j, i), _mapmatrix.shape)
neigh = np.full(10, -MAX)
neigh[0] = -1
acc = 1
cachedravel[j, i] = npyidx
for jj, ii in ns1:
if _mapmatrix[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _mapmatrix.shape)
acc += 1
neigh[acc] = -2 #sqrt(2)
acc += 1
for jj, ii in ns2:
if _mapmatrix[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _mapmatrix.shape)
acc += 1
adj[npyidx] = np.reshape(neigh, (1, 10))
nodesidx += 1
nodes2d = nodes2d[:nodesidx]
nodesflat = np.array([ np.ravel_multi_index((xx, yy),_mapmatrix.shape) for xx, yy in nodes2d])
return Graph(adj=adj, nodes2d=nodes2d, nodesflat=nodesflat,
indices=indices, cachedravel=cachedravel,
mapshape=_mapmatrix.shape, nnodes=len(nodesflat),
maplen=np.product(_mapmatrix.shape))
##########################################################
def get_neighbours_coords(pos, mapshape):
""" Get neighbours. Do _not_ verify whether it is a valid coordinate
Args:
j(int): y coordinate
i(int): x coordinate
connectedness(int): how consider the neighbourhood, 4 or 8
yourself(bool): the point itself is included in the return
The order returned is:
5 1 6
4 9 2
8 3 7
"""
j, i = pos
neighbours1 = [ (j-1, i), (j, i+1), (j+1, i), (j, i-1) ]
neighbours2 = [(j-1, i-1), (j-1, i+1), (j+1, i+1), (j+1, i-1) ]
n1 = eliminate_nonvalid_coords(neighbours1, mapshape)
n2 = eliminate_nonvalid_coords(neighbours2, mapshape)
return (n1, n2)
#########################################################
def get_neighbours_coords_npy_indices(idx, mapshape, connectedness=8,
yourself=False):
""" Get neighbours. Do _not_ verify whether it is a valid coordinate
Args:
idx(int): npy indexing of a matrix
connectedness(int): how consider the neighbourhood, 8 or 4
yourself(bool): the point itself is included in the return
The order returned is:
c5 c1 c6
c4 c9 c2
c8 c3 c7
"""
nrows, ncols = mapshape
maplen = np.product(mapshape)
c1 = idx - ncols
c2 = idx + 1
c3 = idx + ncols
c4 = idx - 1
neighbours = []
if c1 >= 0 : neighbours.append(c1)
if c2 < maplen: neighbours.append(c2)
if c3 < maplen: neighbours.append(c3)
if c4 >= 0 : neighbours.append(c4)
if connectedness == 8:
c5 = c1 - 1
c6 = c1 + 1
c7 = c3 + 1
c8 = c3 - 1
if c5 >= 0:
neighbours.append(c5)
neighbours.append(c6)
if c7 < maplen:
neighbours.append(c7)
neighbours.append(c8)
if yourself: neighbours.append(idx)
return neighbours
##########################################################
def eliminate_nonvalid_coords(coords, mapshape):
""" Eliminate nonvalid indices
Args:
coords(set of tuples): input set of positions
h(int): height
w(int): width
Returns:
set of valid coordinates
"""
h, w = mapshape
valid = []
for j, i in coords:
if j < 0 or j >= h: continue
if i < 0 or i >= w: continue
valid.append((j, i))
return valid
##########################################################
def get_adjmatrix_from_image(image):
"""Get the adjacenty matrix from image
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(image)
return get_full_adjmatrix_from_npy(searchmap)
##########################################################
def get_crossings_from_image(imagefile):
"""Get crossings from image file
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(imagefile)
return find_crossings_squareshape(searchmap)
##########################################################
def get_obstacles_from_image(imagefile):
"""Get obstacles from image file
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(imagefile)
indices = np.where(searchmap==OBSTACLE)
return set(map(tuple, np.transpose(indices)))
##########################################################
def get_mapshape_from_searchmap(hashtable):
"""Suppose keys have the form (x, y). We want max(x), max(y)
such that not necessarily the key (max(x), max(y)) exists
Args:
hashtable(dict): key-value pairs
Returns:
int, int: max values for the keys
"""
ks = hashtable.keys()
h = max([y[0] for y in ks])
w = max([x[1] for x in ks])
return h+1, w+1
##########################################################
def get_random_els_with_reposition(inputlist, rng, n=1, avoided=[]):
if not avoided: return [rng.choice(inputlist) for _ in range(n)]
_list = list(inputlist)
nfree = len(_list)
els = [] # we accept repetitions
while len(els) < n:
rndidx = rng.randrange(0, nfree)
chosen = _list[rndidx]
if chosen != avoided: els.append(chosen)
return els
##########################################################
def get_multiprocessing_logger(loglevel):
log = multiprocessing.log_to_stderr()
log.setLevel(loglevel)
return log
##########################################################
def split_all_combinations_from_config(configfile, tmpdir, prefix=''):
with open(configfile) as fh:
config = json.load(fh)
configcopy = []
_keys = []
_values = []
for k, v in config.items():
if type(v) == list:
_keys.append(k)
_values.append(v)
comb = itertools.product(*_values)
f = os.path.basename(configfile)
for c in comb:
filename = os.path.join(tmpdir, prefix + '_' + (str(c))[1:-1].replace(', ', '-') + '_' + f)
newconfig = config.copy()
for i in range(len(c)):
newconfig[_keys[i]] = [c[i]]
with open(filename, 'w') as fh:
json.dump(newconfig, fh)
##########################################################
def copy_list_to_boolsparseindexing(_list, sparseindex):
boolsparseidx = np.full(sparseindex.shape, np.False_, dtype=np.bool_)
for el in _list:
boolsparseidx[el] = True
return boolsparseidx
##########################################################
def copy_list_to_boolindexing(_list, maplen):
boolidx = np.full(maplen, 0, dtype=np.int64)
boolidx[_list] = 1
return boolidx
##########################################################
def rename_old_folder(filesdir):
# Unfortunately, it cannot be called from numpy due to the cython file dependency
# Just create a .py file calling utils.rename_old_folder()
if not os.path.exists(filesdir):
print('Dir {} does not exist'.format(filesdir))
return
os.chdir(filesdir)
newnames = {
'fleetsz':'sensorsnum',
'rad': 'sensorrange',
'splng': 'sensorinterval',
'detprob': 'sensortpr',
'speed': 'sensorspeed'
}
def get_new_set_of_names(params):
newparams = []
for param in params:
p = param
for k, v in newnames.items():
if k in p:
p = p.replace(k, v)
newparams.append(p)
return newparams
for f in os.listdir('./'):
if not f.endswith('.npy'): continue
print(f)
suff = f.split('.npy')[0]
params = suff.split('_')
newparams = get_new_set_of_names(params)
beg = '_'.join(newparams[:5])
beg = beg.replace('sensortpr1', 'sensortpr1.0')
en = '_'.join(newparams[5:])
newname = '{}_sensorexfp0.0_{}.npy'.format(beg, en)
print(newname)
os.rename(f, newname)
| 30.216475
| 140
| 0.573131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,763
| 0.365371
|
05c354eab5a376b1dcdf00dc912ca4e24bdc43ea
| 2,438
|
py
|
Python
|
luxor/controllers/types.py
|
sam007961/luxor
|
31838c937b61bfbc400103d58ec5b5070471767e
|
[
"MIT"
] | null | null | null |
luxor/controllers/types.py
|
sam007961/luxor
|
31838c937b61bfbc400103d58ec5b5070471767e
|
[
"MIT"
] | 5
|
2020-09-06T15:44:13.000Z
|
2020-11-02T11:30:22.000Z
|
luxor/controllers/types.py
|
sam007961/luxor
|
31838c937b61bfbc400103d58ec5b5070471767e
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Union
from luxor.core.events import Event
from luxor.controllers.expressions import Var
class Int(Var):
def __init__(self, value: Number = 0, **kwargs) -> None:
super(Int, self).__init__(**kwargs)
self.event_prefix = self.name + '.int.'
self.__obj = self.ctx.request_object()
self.__obj['class'] = frozenset({'int', self.callstack + '.int'})
self.__obj['label'] = self.name
self.sset(value)
self.trigger('new', value)
def sget(self) -> int:
return self.__obj.sget('value')
def sset(self, value: Number) -> (int, int):
if type(value) == Int:
new = value.get()
else:
new = int(value)
old = self.sget()
self.__obj['value'] = new
return old, new
def get(self) -> int:
value = self.__obj['value']
self.trigger('get', value)
return value
def set(self, value: Number) -> None:
old, new = self.sset(value)
if type(value) == float:
self.trigger('cast_literal', value, new)
self.trigger('set', old, new)
@property
def value(self) -> int:
pass
@value.getter
def value(self) -> int:
return self.get()
@value.setter
def value(self, value: Number) -> None:
self.set(value)
def trigger_new(self, value) -> None:
return Event(self.event_prefix + 'new',
source=self.__obj, meta={
'new.value': value
})
def trigger_get(self, value) -> Event:
return Event(self.event_prefix + 'get',
source=self.__obj, meta={
'get.value': value
})
def trigger_set(self, old: int, new: int) -> None:
return Event(self.event_prefix + 'set',
source=self.__obj, meta={
'set.value.old': old,
'set.value.new': new
})
def trigger_cast_literal(self, old: float, new: int) -> None:
return Event(self.event_prefix + 'literal.cast',
source=self.__obj, meta={
'cast.value.type': type(old),
'cast.value.old': old,
'cast.value.new': new
})
Number = Union[int, float, Int]
| 30.098765
| 73
| 0.511895
| 2,259
| 0.926579
| 0
| 0
| 198
| 0.081214
| 0
| 0
| 212
| 0.086957
|
05c47851eed298a1ca3b5574ee61fdfb8228a592
| 412
|
py
|
Python
|
Les 1/1_1.py
|
tloader11/TICT-V1PROG-15
|
dac7e991dcb11a397082bdceaf60a07b9bbc1a4a
|
[
"Unlicense"
] | null | null | null |
Les 1/1_1.py
|
tloader11/TICT-V1PROG-15
|
dac7e991dcb11a397082bdceaf60a07b9bbc1a4a
|
[
"Unlicense"
] | null | null | null |
Les 1/1_1.py
|
tloader11/TICT-V1PROG-15
|
dac7e991dcb11a397082bdceaf60a07b9bbc1a4a
|
[
"Unlicense"
] | null | null | null |
5 5 integer
5.0 5.0 float
5 % 2 1 int
5 > 1 True boolean
'5' '5' String
5 * 2 10 int
'5' * 2 '55' String
'5' + '2' '52' String
5 / 2 2.5 float
5 // 2 2 int
[5, 2, 1] [5,2,1] list?
5 in [1, 4, 6] False boolean
| 29.428571
| 35
| 0.279126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.055825
|
05c54a12ada174aedbee75dcfaa2218242c10ec6
| 1,270
|
py
|
Python
|
edgecast/command_line.py
|
ganguera/edgecast
|
43ab240698a50c1382eb11bdb79acc5683bc10ea
|
[
"MIT"
] | null | null | null |
edgecast/command_line.py
|
ganguera/edgecast
|
43ab240698a50c1382eb11bdb79acc5683bc10ea
|
[
"MIT"
] | null | null | null |
edgecast/command_line.py
|
ganguera/edgecast
|
43ab240698a50c1382eb11bdb79acc5683bc10ea
|
[
"MIT"
] | null | null | null |
import argparse
import arrow
import json
import config
from . import EdgecastReportReader
from media_type import PLATFORM
def main():
parser = argparse.ArgumentParser(
description='EdgeCast Usage Report Reader'
)
parser.add_argument('-g', '--granularity',
dest='granularity', action='store', type=str,
default='day', choices=['hour', 'day'],
help='Size in which data fields are sub-divided'
)
parser.add_argument('-r', '--range',
dest='date_range', action='store', type=str,
default='{yesterday}:{yesterday}'.format(yesterday=arrow.utcnow().shift(days=-1).format('YYYYMMDD')),
help='Date range to show. YYYYMMDD:YYYYMMDD'
)
parser.add_argument('-t', '--type',
dest='type', action='store', type=str, default='region',
choices=['region', 'cname', 'custom'], help='Report Type'
)
parser.add_argument('-p', '--platform',
dest='platform', action='store', type=str, default='HTTP_SMALL',
choices=PLATFORM, help='Media Type Platform'
)
args = parser.parse_args()
start, end = args.date_range.split(':')
platform = PLATFORM[args.platform]
reader = EdgecastReportReader(config.input['edgecast'], start, end, args.type, platform, args.granularity)
for read in reader:
print json.dumps(read)
| 27.608696
| 108
| 0.684252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 382
| 0.300787
|
05c66e3dcdf2a391e7cb2ae90afaebe8a08c59e9
| 3,483
|
py
|
Python
|
skeletons/browser/browser.py
|
gbkim000/wxPython
|
b1604d71cf04801f9efa8b26b935561a88ef1daa
|
[
"BSD-2-Clause"
] | 80
|
2018-05-25T00:37:25.000Z
|
2022-03-13T12:31:02.000Z
|
skeletons/browser/browser.py
|
gbkim000/wxPython
|
b1604d71cf04801f9efa8b26b935561a88ef1daa
|
[
"BSD-2-Clause"
] | 1
|
2021-01-08T20:22:52.000Z
|
2021-01-08T20:22:52.000Z
|
skeletons/browser/browser.py
|
gbkim000/wxPython
|
b1604d71cf04801f9efa8b26b935561a88ef1daa
|
[
"BSD-2-Clause"
] | 32
|
2018-05-24T05:40:55.000Z
|
2022-03-24T00:32:11.000Z
|
#!/usr/bin/python
"""
ZetCode wxPython tutorial
This program creates a browser UI.
author: Jan Bodnar
website: zetcode.com
last edited: May 2018
"""
import wx
from wx.lib.buttons import GenBitmapTextButton
class Example(wx.Frame):
def __init__(self, *args, **kw):
super(Example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
self.CreateMenuBar()
panel = wx.Panel(self)
# panel.SetBackgroundColour('white')
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
line1 = wx.StaticLine(panel)
vbox.Add(line1, 0, wx.EXPAND)
toolbar1 = wx.Panel(panel, size=(-1, 30))
back = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/back.png'),
style=wx.NO_BORDER)
forward = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/forw.png'),
style=wx.NO_BORDER)
refresh = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/refresh.png'),
style=wx.NO_BORDER)
stop = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/stop.png'),
style=wx.NO_BORDER)
home = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/home.png'),
style=wx.NO_BORDER)
address = wx.ComboBox(toolbar1, size=(50, -1))
go = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/play.png'),
style=wx.NO_BORDER)
text = wx.TextCtrl(toolbar1, size=(150, -1))
hbox1.Add(back)
hbox1.Add(forward)
hbox1.Add(refresh)
hbox1.Add(stop)
hbox1.Add(home)
hbox1.Add(address, 1, wx.TOP, 3)
hbox1.Add(go, 0, wx.TOP | wx.LEFT, 3)
hbox1.Add(text, 0, wx.TOP | wx.RIGHT, 3)
toolbar1.SetSizer(hbox1)
vbox.Add(toolbar1, 0, wx.EXPAND)
line = wx.StaticLine(panel)
vbox.Add(line, 0, wx.EXPAND)
toolbar2 = wx.Panel(panel, size=(-1, 30))
bookmark1 = wx.BitmapButton(toolbar2, bitmap=wx.Bitmap('images/love.png'),
style=wx.NO_BORDER)
bookmark2 = wx.BitmapButton(toolbar2, bitmap=wx.Bitmap('images/book.png'),
style=wx.NO_BORDER)
bookmark3 = wx.BitmapButton(toolbar2, bitmap=wx.Bitmap('images/sound.png'),
style=wx.NO_BORDER)
hbox2.Add(bookmark1, flag=wx.RIGHT, border=5)
hbox2.Add(bookmark2, flag=wx.RIGHT, border=5)
hbox2.Add(bookmark3)
toolbar2.SetSizer(hbox2)
vbox.Add(toolbar2, 0, wx.EXPAND)
line2 = wx.StaticLine(panel)
vbox.Add(line2, 0, wx.EXPAND)
panel.SetSizer(vbox)
self.CreateStatusBar()
self.SetTitle("Browser")
self.Centre()
def CreateMenuBar(self):
menubar = wx.MenuBar()
file = wx.Menu()
file.Append(wx.ID_ANY, '&Quit', '')
edit = wx.Menu()
view = wx.Menu()
go = wx.Menu()
bookmarks = wx.Menu()
tools = wx.Menu()
help = wx.Menu()
menubar.Append(file, '&File')
menubar.Append(edit, '&Edit')
menubar.Append(view, '&View')
menubar.Append(go, '&Go')
menubar.Append(bookmarks, '&Bookmarks')
menubar.Append(tools, '&Tools')
menubar.Append(help, '&Help')
self.SetMenuBar(menubar)
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| 27.642857
| 83
| 0.584266
| 3,141
| 0.901809
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.121447
|
05c7ce421e8fdf3698aad581723528f431eaafbe
| 1,673
|
py
|
Python
|
model/tds_block.py
|
SABER-labs/SABERv2
|
028d403beadec3adebd51582fd8ef896a2fe3696
|
[
"MIT"
] | 1
|
2022-03-02T02:52:24.000Z
|
2022-03-02T02:52:24.000Z
|
model/tds_block.py
|
SABER-labs/SABERv2
|
028d403beadec3adebd51582fd8ef896a2fe3696
|
[
"MIT"
] | null | null | null |
model/tds_block.py
|
SABER-labs/SABERv2
|
028d403beadec3adebd51582fd8ef896a2fe3696
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class TDSBlock(nn.Module):
def __init__(self, channels, kernel_size, width, dropout, right_padding):
super().__init__()
self.channels = channels
self.width = width
assert(right_padding >= 0)
self.conv_block = nn.Sequential(
nn.ConstantPad2d(
(kernel_size - 1 - right_padding, right_padding, 0, 0), 0),
nn.Conv2d(
channels, channels, (1, kernel_size), 1, (0, 0)),
nn.ReLU(inplace=True),
nn.Dropout(dropout)
)
linear_dim = channels * width
self.linear_block = nn.Sequential(
nn.Linear(linear_dim, linear_dim),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(linear_dim, linear_dim),
nn.Dropout(dropout)
)
self.conv_layerN = nn.LayerNorm([channels, width])
self.linear_layerN = nn.LayerNorm([channels, width])
def forward(self, x):
# X is B, C, W, T
out = self.conv_block(x) + x
out = out.permute(0, 3, 1, 2) # B, T, C, W
out = self.conv_layerN(out)
B, T, C, W = out.shape
out = out.view((B, T, 1, C * W))
out = self.linear_block(out) + out
out = out.view(B, T, C, W)
out = self.linear_layerN(out)
out = out.permute(0, 2, 3, 1) # B, C, W, T
return out
if __name__ == "__main__":
model = TDSBlock(15, 10, 80, 0.1, 1)
x = torch.rand(8, 15, 80, 400)
import time
start = time.perf_counter()
model(x)
end = time.perf_counter()
print(f"Time taken: {(end-start)*1000:.3f}ms")
| 28.355932
| 77
| 0.545129
| 1,388
| 0.829647
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.053796
|
05c8724a622688c0f5c093058bd7213a2efddffc
| 1,968
|
py
|
Python
|
blackcompany/serve/vcs.py
|
clckwrkbdgr/blackcompany
|
9164a0db3e9f11878ce12da6ebdf82a300e1c6f4
|
[
"WTFPL"
] | null | null | null |
blackcompany/serve/vcs.py
|
clckwrkbdgr/blackcompany
|
9164a0db3e9f11878ce12da6ebdf82a300e1c6f4
|
[
"WTFPL"
] | null | null | null |
blackcompany/serve/vcs.py
|
clckwrkbdgr/blackcompany
|
9164a0db3e9f11878ce12da6ebdf82a300e1c6f4
|
[
"WTFPL"
] | null | null | null |
from ._base import Endpoint
from ..util._six import Path
import bottle
from ..util import gitHttpBackend
class GitHTTPBackend:
""" WSGI git-http-backend interface to actual endpoints.
"""
def __init__(self, route, repo_root):
self.route = route
self.repo_root = Path(repo_root)
def get(self, path):
return self._serve(path)
def post(self, path):
return self._serve(path)
def _serve(self, path):
git_project_root = self.repo_root
git_dir = git_project_root/'.git'
if not git_dir.exists() and (git_project_root/'HEAD').exists():
git_dir = git_project_root
git_info = git_dir/'info'
if path == 'sparse-checkout' or (git_info/path).exists():
return bottle.static_file(path, root=str(git_info))
webroot = self.route
environ = dict(bottle.request.environ)
environ['PATH_INFO'] = environ['PATH_INFO'][len(webroot):]
status_line, headers, response_body_generator = gitHttpBackend.wsgi_to_git_http_backend(environ, str(git_project_root))
response = bottle.Response(response_body_generator, status_line, headers)
bottle.response.content_type = response.get_header('Content-Type')
return response
class MethodHandler:
def __init__(self, handler_func, path_param):
self.handler_func = handler_func
self.path_param = path_param
def __call__(self, route, _data, path, **kwargs):
return self.handler_func(path, **kwargs)
def git_repo(route, repo_root, **serve_params):
""" Defines Git repo endpoint on given route with given root.
Endpoint() objects will be created for GET and POST.
Rest of parameters will be passed through to underlying Endpoint() objects.
"""
backend = GitHTTPBackend(route, repo_root)
get_endpoint = Endpoint(route, None, method='GET', custom_handler=MethodHandler(backend.get, 'path:path'), **serve_params)
get_endpoint.serve()
post_endpoint = Endpoint(route, None, method='POST', custom_handler=MethodHandler(backend.post, 'path:path'), read_data=False, **serve_params)
post_endpoint.serve()
| 37.846154
| 143
| 0.758638
| 1,251
| 0.635671
| 0
| 0
| 0
| 0
| 0
| 0
| 363
| 0.184451
|
05cc0547376efd7b3d0398149b11f68433ccaf60
| 2,999
|
py
|
Python
|
imaginaire/discriminators/cagan.py
|
zebincai/imaginaire
|
f5a707f449d93c33fbfe19bcd975a476f2c1dd7a
|
[
"RSA-MD"
] | null | null | null |
imaginaire/discriminators/cagan.py
|
zebincai/imaginaire
|
f5a707f449d93c33fbfe19bcd975a476f2c1dd7a
|
[
"RSA-MD"
] | null | null | null |
imaginaire/discriminators/cagan.py
|
zebincai/imaginaire
|
f5a707f449d93c33fbfe19bcd975a476f2c1dd7a
|
[
"RSA-MD"
] | null | null | null |
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import torch
import torch.nn as nn
from imaginaire.layers import Conv2dBlock
from imaginaire.layers.misc import ApplyNoise
class Discriminator(nn.Module):
"""Dummy Discriminator constructor.
Args:
dis_cfg (obj): Discriminator definition part of the yaml config file.
data_cfg (obj): Data definition part of the yaml config file
"""
def __init__(self, gen_cfg, data_cfg):
super(Discriminator, self).__init__()
nonlinearity = gen_cfg.nonlinearity
# input downsample
self.downsample1 = nn.Upsample(scale_factor=0.5, mode='bilinear')
self.downsample2 = nn.Upsample(scale_factor=0.25, mode='bilinear')
self.downsample3 = nn.Upsample(scale_factor=0.125, mode='bilinear')
self.downsample4 = nn.Upsample(scale_factor=0.0625, mode='bilinear')
conv_params = dict(kernel_size=3,
padding=1,
activation_norm_type="instance",
nonlinearity=nonlinearity,
inplace_nonlinearity=True)
# encoder
self.apply_noise = ApplyNoise()
self.layer1 = Conv2dBlock(in_channels=6, out_channels=64, kernel_size=3, padding=1, stride=2,
nonlinearity=nonlinearity, inplace_nonlinearity=True)
self.layer2 = Conv2dBlock(in_channels=64 + 6, out_channels=128, stride=2, **conv_params)
self.layer3 = Conv2dBlock(in_channels=128 + 6, out_channels=256, stride=2, **conv_params)
self.layer4 = Conv2dBlock(in_channels=256 + 6, out_channels=512, stride=2, **conv_params)
self.outlayer = Conv2dBlock(in_channels=512 + 6, out_channels=1, kernel_size=3,
nonlinearity="sigmoid")
# self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.apply_noise(x)
x_d02 = self.downsample1(x)
x_d04 = self.downsample2(x)
x_d08 = self.downsample3(x)
x_d16 = self.downsample4(x)
# encoder
x_en2 = self.layer1(x)
x_en2 = torch.cat([x_en2, x_d02], dim=1)
x_en4 = self.layer2(x_en2)
x_en4 = torch.cat([x_en4, x_d04], dim=1)
x_en8 = self.layer3(x_en4)
x_en8 = torch.cat([x_en8, x_d08], dim=1)
x_en16 = self.layer4(x_en8)
x_en16 = torch.cat([x_en16, x_d16], dim=1)
out = self.outlayer(x_en16)
# out = self.sigmoid(out)
return out
if __name__ == "__main__":
from imaginaire.config import Config
cfg = Config("D:/workspace/develop/imaginaire/configs/projects/cagan/LipMPV/base.yaml")
dis = Discriminator(cfg.dis, cfg.data)
batch = torch.randn((8, 6, 256, 192))
y = dis(batch)
print(y.shape)
| 40.527027
| 102
| 0.617206
| 2,371
| 0.790597
| 0
| 0
| 0
| 0
| 0
| 0
| 634
| 0.211404
|
05cc10143e791bcc38db23bf914cc748df6a3237
| 2,959
|
py
|
Python
|
Chapter10/Ch10/server/database.py
|
henrryyanez/Tkinter-GUI-Programming-by-Example
|
c8a326d6034b5e54f77605a8ec840cb8fac89412
|
[
"MIT"
] | 127
|
2018-08-27T16:34:43.000Z
|
2022-03-22T19:20:53.000Z
|
Chapter10/Ch10/server/database.py
|
PiotrAdaszewski/Tkinter-GUI-Programming-by-Example
|
c8a326d6034b5e54f77605a8ec840cb8fac89412
|
[
"MIT"
] | 8
|
2019-04-11T06:47:36.000Z
|
2022-03-11T23:23:42.000Z
|
Chapter10/Ch10/server/database.py
|
PiotrAdaszewski/Tkinter-GUI-Programming-by-Example
|
c8a326d6034b5e54f77605a8ec840cb8fac89412
|
[
"MIT"
] | 85
|
2018-04-30T19:42:21.000Z
|
2022-03-30T01:22:54.000Z
|
import sqlite3
class Database:
def __init__(self):
self.database = "chat.db"
def perform_insert(self, sql, params):
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
cursor.execute(sql, params)
conn.commit()
conn.close()
def perform_select(self, sql, params):
conn = sqlite3.connect(self.database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute(sql, params)
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results
def add_user(self, username, real_name):
sql = "INSERT INTO users (username, real_name) VALUES (?,?)"
query_params = (username, real_name)
self.perform_insert(sql, query_params)
def get_all_users(self):
sql = "SELECT username, real_name, avatar FROM users"
params = []
return self.perform_select(sql, params)
def user_exists(self, username):
sql = "SELECT username FROM users WHERE username = ?"
params = (username,)
results = self.perform_select(sql, params)
if len(results):
return True
return False
def update_avatar(self, username, img_b64):
sql = "UPDATE users SET avatar=? WHERE username=?"
params = (img_b64, username)
return self.perform_insert(sql, params)
def get_user_avatar(self, username):
sql = "SELECT avatar FROM users WHERE username=?"
params = (username,)
return self.perform_select(sql, params)
def add_friend(self, user_one, user_two):
sql = "INSERT INTO friends (user_one, user_two, blocked) VALUES (?,?,0)"
query_params = (user_one, user_two)
self.perform_insert(sql, query_params)
def get_friends(self, username):
all_friends = []
sql = "SELECT user_two FROM friends WHERE user_one=? AND blocked=0"
params = (username,)
friends = self.perform_select(sql, params)
sql = "SELECT user_one FROM friends WHERE user_two=? AND blocked=0"
friends2 = self.perform_select(sql, params)
for friend in friends:
all_friends.append(friend["user_two"])
for friend in friends2:
all_friends.append(friend["user_one"])
return all_friends
def get_users_by_usernames(self, usernames):
question_marks = ','.join(['?' for user in usernames])
sql = f"SELECT * FROM users WHERE username IN ({question_marks})"
params = [user for user in usernames]
friends = self.perform_select(sql, params)
return friends
def block_friend(self, username, contact_to_block):
sql = "UPDATE friends SET blocked=1 WHERE (user_one = ? AND user_two = ?) OR (user_two = ? AND user_one = ?)"
query_params = (username, contact_to_block, username, contact_to_block)
self.perform_insert(sql, query_params)
| 30.822917
| 117
| 0.630618
| 2,941
| 0.993917
| 0
| 0
| 0
| 0
| 0
| 0
| 620
| 0.20953
|
05cea8e33b54e9775229454c04e0071781d3127e
| 938
|
py
|
Python
|
ad_hoc_scripts/update_by_condition.py
|
IgorZyktin/MediaStorageSystem
|
df8d260581cb806eb54f320d63aa674c6175c17e
|
[
"MIT"
] | 2
|
2021-03-06T16:07:30.000Z
|
2021-03-17T10:27:25.000Z
|
ad_hoc_scripts/update_by_condition.py
|
IgorZyktin/MediaStorageSystem
|
df8d260581cb806eb54f320d63aa674c6175c17e
|
[
"MIT"
] | null | null | null |
ad_hoc_scripts/update_by_condition.py
|
IgorZyktin/MediaStorageSystem
|
df8d260581cb806eb54f320d63aa674c6175c17e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Non user friendly script.
"""
from mss.core.class_filesystem import Filesystem
def update_by_condition(root_path: str, theme: str):
"""Change records by condition."""
fs = Filesystem()
path = fs.join(root_path, theme, 'metainfo')
for folder, filename, name, ext in fs.iter_ext(path):
modified = False
if ext != '.json':
continue
full_path = fs.join(folder, filename)
content = fs.read_json(full_path)
for uuid, record in content.items():
if record['group_name'] == 'grand mal 1 rus':
record['sub_series'] = 'grand mal 1 rus'
modified = True
if modified:
fs.write_json(full_path, content)
print(f'Modified: {full_path}')
if __name__ == '__main__':
update_by_condition(
root_path='D:\\BGC_ARCHIVE_TARGET\\',
theme='bubblegum_crisis',
)
| 26.055556
| 57
| 0.590618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.257996
|
05cf590b42b6da085a51776ee9e5aa949a057c25
| 2,555
|
py
|
Python
|
2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | null | null | null |
2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | 11
|
2020-01-28T22:33:49.000Z
|
2022-03-11T23:41:08.000Z
|
2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | 2
|
2019-06-01T04:14:52.000Z
|
2020-05-31T08:13:23.000Z
|
from environment import *
import random
class ValueIterationGraphicDisplay(GraphicDisplay):
def __init__(self, agent, title):
self.btn_1_text = "Calculate"
self.btn_2_text = "Print Policy"
self.btn_1_func = self.calculate_value
self.btn_2_func = self.print_optimal_policy
self.btn_3_func = self.move_by_value_iteration
GraphicDisplay.__init__(self, agent, title)
def move_by_value_iteration(self):
if self.improvement_count != 0 and self.is_moving != 1:
self.is_moving = 1
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
x, y = self.find_rectangle()
while len(self.agent.get_action([x, y])) != 0:
action = random.sample(self.agent.get_action([x, y]), 1)[0]
self.after(100, self.rectangle_move(action))
x, y = self.find_rectangle()
self.is_moving = 0
def draw_one_arrow(self, col, row, action):
if col == 2 and row == 2:
return
if action == 0: # up
origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.up))
elif action == 1: # down
origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.down))
elif action == 3: # right
origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.right))
elif action == 2: # left
origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.left))
def draw_from_values(self, state, action_list):
i = state[0]
j = state[1]
for action in action_list:
self.draw_one_arrow(i, j, action)
def calculate_value(self):
self.iter_count += 1
for i in self.texts:
self.canvas.delete(i)
self.agent.value_iteration()
self.print_value_table(self.agent.value_table)
def print_optimal_policy(self):
self.improvement_count += 1
for i in self.arrows:
self.canvas.delete(i)
for state in self.env.all_states:
action = self.agent.get_action(state)
self.draw_from_values(state, action)
| 39.307692
| 94
| 0.600391
| 2,514
| 0.983953
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.018787
|
05cff405e8dd7ef93166ffc63471b8011294be84
| 8,289
|
py
|
Python
|
csimpy/test.py
|
dewancse/csimpy
|
58c32e40e5d991b4ca98df05e6f61020def475a9
|
[
"Apache-2.0"
] | null | null | null |
csimpy/test.py
|
dewancse/csimpy
|
58c32e40e5d991b4ca98df05e6f61020def475a9
|
[
"Apache-2.0"
] | null | null | null |
csimpy/test.py
|
dewancse/csimpy
|
58c32e40e5d991b4ca98df05e6f61020def475a9
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
from math import *
from scipy import integrate
import matplotlib.pyplot as plt
from libcellml import *
import lxml.etree as ET
__version__ = "0.1.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 1
VARIABLE_COUNT = 29
class VariableType(Enum):
CONSTANT = 1
COMPUTED_CONSTANT = 2
ALGEBRAIC = 3
VOI_INFO = {"name": "time", "units": "second", "component": "environment"}
STATE_INFO = [
{"name": "pH_ext", "units": "dimensionless", "component": "Concentrations"}
]
VARIABLE_INFO = [
{"name": "C_ext_NH4", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_ext_Na", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_H", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_NH4", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_Na", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "K_NHE3_H", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "K_NHE3_NH4", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "K_NHE3_Na", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_H", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "C_ext_H", "units": "mM", "component": "Concentrations", "type": VariableType.ALGEBRAIC},
{"name": "alpha_ext_Na", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "beta_ext_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "gamma_ext_NH4", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "alpha_int_Na", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "beta_int_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "gamma_int_NH4", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_H", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "sum_NHE3", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_H", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_Na_Max", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "plot_a", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "plot_b", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "K_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT}
]
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialize_states_and_constants(states, variables):
variables[0] = 0.0
variables[1] = 0.1
variables[2] = 1.0e-3
variables[3] = 0.0
variables[4] = 0.0
variables[5] = 72.0e-6
variables[6] = 0.027e3
variables[7] = 30.0
variables[8] = 0.48e-3
variables[9] = 1.6e-3
variables[10] = 1.6e-3
states[0] = 6.0
def compute_computed_constants(variables):
variables[12] = variables[1]/variables[7]
variables[14] = variables[0]/variables[6]
variables[15] = variables[4]/variables[7]
variables[16] = variables[2]/variables[5]
variables[17] = variables[3]/variables[6]
variables[18] = variables[10]*2.0*variables[2]/(variables[2]+1.0e-6)
variables[19] = variables[8]*2.0*variables[2]/(variables[2]+1.0e-6)
variables[20] = variables[9]*2.0*variables[2]/(variables[2]+1.0e-6)
variables[25] = variables[18]*variables[19]/(variables[18]+variables[19])
variables[28] = ((1.0+variables[12])*variables[16]+(1.0+variables[16])*variables[12]*variables[18]/variables[19])/(1.0+2.0*variables[16])
def compute_rates(voi, states, rates, variables):
rates[0] = 2.0
def compute_variables(voi, states, rates, variables):
variables[11] = 1.0e3*pow(10.0, -states[0])
variables[13] = variables[11]/variables[5]
variables[21] = (1.0+variables[12]+variables[13]+variables[14])*(variables[18]*variables[15]+variables[19]*variables[16]+variables[20]*variables[17])+(1.0+variables[15]+variables[16]+variables[17])*(variables[18]*variables[12]+variables[19]*variables[13]+variables
[20]*variables[14])
variables[22] = variables[18]*variables[19]/variables[21]*(variables[12]*variables[16]-variables[15]*variables[13])+variables[18]*variables[20]/variables[21]*(variables[12]*variables[17]-variables[15]*variables[14])
variables[23] = variables[18]*variables[19]/variables[21]*(variables[15]*variables[13]-variables[12]*variables[16])+variables[19]*variables[20]/variables[21]*(variables[13]*variables[17]-variables[16]*variables[14])
variables[24] = variables[18]*variables[20]/variables[21]*(variables[15]*variables[14]-variables[12]*variables[17])+variables[19]*variables[20]/variables[21]*(variables[14]*variables[16]-variables[13]*variables[17])
variables[26] = variables[22]/variables[25]
variables[27] = 1.0/variables[26]
# LSODA
start = 0.0
end = 1
numpoints = 1000
stepsize = (end - start) / numpoints
print(start, end, numpoints, stepsize)
states = create_states_array()
variables = create_variables_array()
initialize_states_and_constants(states, variables)
compute_computed_constants(variables) # added this line
temp = []
def func(t, y):
rates = create_states_array()
compute_rates(t, y, rates, variables)
compute_variables(t, y, rates, variables) # added this line
print("variables[22]: ", variables[22])
temp.append(variables[22])
return rates
print("start: ", start)
print("end: ", end)
print("states: ", states)
solution = integrate.solve_ivp(func,[start, end], states, method='LSODA', max_step=stepsize, atol=1e-4, rtol=1e-6)
print(solution.t)
print(solution.y)
# graph
fig, ax = plt.subplots()
ax.plot(solution.y[0], temp, label='Line 1')
ax.set_xlabel('t')
ax.set_ylabel('y')
ax.set_title('Some Title')
ax.legend()
fig.savefig('test.png')
# # test
# def exponential_decay(t, y):
# return -0.5 * y
#
# sol = integrate.solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
#
# print(sol.t)
# print(sol.y)
#
# fig2, ax2 = plt.subplots()
# ax2.plot(sol.t, sol.y[0], label='Line 1')
# ax2.plot(sol.t, sol.y[1], label='Line 2')
# ax2.plot(sol.t, sol.y[2], label='Line 3')
# ax2.set_xlabel('x label')
# ax2.set_ylabel('y label')
# ax2.set_title('Simple Plot')
# ax2.legend()
# fig2.savefig('test.png')
# convert cellml1.0 or 1.1 to 2.0
# with open('../tests/fixtures/chang_fujita_1999.xml') as f:
# read_data = f.read()
# f.close()
#
# p = Parser()
# importedModel = p.parseModel(read_data)
#
# # parsing cellml 1.0 or 1.1 to 2.0
# dom = ET.fromstring(read_data.encode("utf-8"))
# xslt = ET.parse("../tests/fixtures/cellml1to2.xsl")
# transform = ET.XSLT(xslt)
# newdom = transform(dom)
#
# mstr = ET.tostring(newdom, pretty_print=True)
# mstr = mstr.decode("utf-8")
#
# # parse the string representation of the model to access by libcellml
# importedModel = p.parseModel(mstr)
#
# f = open('../tests/fixtures/chang_fujita_1999.xml', 'w')
# f.write(mstr)
| 42.507692
| 268
| 0.68054
| 86
| 0.010375
| 0
| 0
| 0
| 0
| 0
| 0
| 3,233
| 0.390035
|
05d337eef8af353471796ace517f3b818298177f
| 2,342
|
py
|
Python
|
camera_calib/image.py
|
justinblaber/camera_calib_python
|
9427ff31d55af7619e7aee74136446a31d10def0
|
[
"Apache-2.0"
] | 3
|
2020-10-14T10:24:09.000Z
|
2021-09-19T20:48:40.000Z
|
camera_calib/image.py
|
justinblaber/camera_calib_python
|
9427ff31d55af7619e7aee74136446a31d10def0
|
[
"Apache-2.0"
] | 1
|
2021-09-28T02:06:42.000Z
|
2021-09-28T02:06:42.000Z
|
camera_calib/image.py
|
justinblaber/camera_calib_python
|
9427ff31d55af7619e7aee74136446a31d10def0
|
[
"Apache-2.0"
] | 2
|
2021-01-07T20:13:31.000Z
|
2021-01-08T18:16:53.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: image.ipynb (unless otherwise specified).
__all__ = ['Img', 'FileImg', 'File16bitImg', 'ArrayImg']
# Cell
import warnings
import numpy as np
import torch
from PIL import Image
from .utils import *
# Cell
class Img:
def exists(self): raise NotImplementedError('Please implement exists()')
@property
def name(self): raise NotImplementedError('Please implement name')
@property
def size(self): raise NotImplementedError('Please implement size')
def array(self, dtype, device): raise NotImplementedError('Please implement array')
def array_gs(self, dtype, device=None): # gs == "gray scale"
arr = self.array(dtype, device)
sz = arr.shape
if len(sz) == 3 and sz[2] == 3: arr = rgb2gray(arr)
elif len(sz) == 2: pass
else: raise RuntimeError(f'Invalid shape: {arr.shape}')
return arr
def __repr__(self): return f'{self.__class__.__name__}({self.name})'
# Cell
class FileImg(Img):
def __init__(self, file_img):
self.file_img = file_img
def exists(self): return self.file_img.exists()
@property
def name(self): return self.file_img.stem
@property
def size(self): return reverse(Image.open(self.file_img).size) # fast
# Cell
class File16bitImg(FileImg):
def __init__(self, file_img):
super().__init__(file_img)
def array(self, dtype, device=None):
arr = np2torch(np.array(Image.open(self.file_img))).to(dtype=dtype, device=device)
arr /= 2**16-1 # Scale between 0 and 1 for 16 bit image
return arr
# Cell
class ArrayImg(Img):
def __init__(self, arr, name=None):
if len(arr.shape) < 2: raise RuntimeError('Input array has less than 2 dimensions')
self.sz = arr.shape[:2]
self.n = name
if arr.min() < 0: warnings.warn('Value less than 0 found')
if arr.max() > 1: warnings.warn('Value greater than 1 found')
self.arr = arr
@property
def name(self): return self.n
@property
def size(self): return self.sz
def array(self, dtype, device=None): return self.arr.to(dtype=dtype, device=device)
def exists(self): return True
| 32.527778
| 91
| 0.61614
| 2,062
| 0.880444
| 0
| 0
| 463
| 0.197694
| 0
| 0
| 479
| 0.204526
|
05d462566b4d5254250d288dd86dc436b3f67818
| 2,144
|
py
|
Python
|
einshape/src/jax/jax_ops.py
|
LaudateCorpus1/einshape
|
b1a0e696c20c025074f09071790b97b42754260d
|
[
"Apache-2.0"
] | 38
|
2021-07-23T12:00:08.000Z
|
2022-03-18T08:40:33.000Z
|
einshape/src/jax/jax_ops.py
|
LaudateCorpus1/einshape
|
b1a0e696c20c025074f09071790b97b42754260d
|
[
"Apache-2.0"
] | 1
|
2021-10-05T16:20:23.000Z
|
2021-10-05T16:20:23.000Z
|
einshape/src/jax/jax_ops.py
|
LaudateCorpus1/einshape
|
b1a0e696c20c025074f09071790b97b42754260d
|
[
"Apache-2.0"
] | 3
|
2021-08-04T16:18:29.000Z
|
2021-11-13T14:33:20.000Z
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Einshape implementation for JAX."""
from typing import Any, Union
from einshape.src import abstract_ops
from einshape.src import backend
from jax import lax
import jax.numpy as jnp
class _JaxBackend(backend.Backend[jnp.ndarray]):
"""Jax implementation of reshaping ops."""
def reshape(self, x: jnp.ndarray, op: abstract_ops.Reshape) -> jnp.ndarray:
return jnp.reshape(x, op.shape)
def transpose(
self, x: jnp.ndarray, op: abstract_ops.Transpose)-> jnp.ndarray:
return jnp.transpose(x, axes=op.perm)
def broadcast(
self, x: jnp.ndarray, op: abstract_ops.Broadcast) -> jnp.ndarray:
shape = op.transform_shape(x.shape)
# For each input dimension, lax needs to know which output dimension it
# corresponds to.
broadcast_dims = [j for j in range(len(shape)) if j not in op.axis_sizes]
return lax.broadcast_in_dim(x, shape, broadcast_dims)
def einshape(
equation: str,
value: Union[jnp.ndarray, Any],
**index_sizes: int
) -> jnp.ndarray:
"""Reshapes `value` according to the given Shape Equation.
Args:
equation: The Shape Equation specifying the index regrouping and reordering.
value: Input tensor, or tensor-like object.
**index_sizes: Sizes of indices, where they cannot be inferred
from `input_shape`.
Returns:
Tensor derived from `value` by reshaping as specified by `equation`.
"""
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
return _JaxBackend().exec(equation, value, value.shape, **index_sizes)
| 33.5
| 80
| 0.726213
| 703
| 0.327892
| 0
| 0
| 0
| 0
| 0
| 0
| 1,143
| 0.533116
|
05d4760733051270e73120a1ac9a61ea86e6cde5
| 1,800
|
py
|
Python
|
DOOM.py
|
ariel139/DOOM-port-scanner
|
328678b9f79855de472967f1a3e4b3e9181a3706
|
[
"MIT"
] | 6
|
2020-11-24T06:51:02.000Z
|
2022-02-26T23:19:46.000Z
|
DOOM.py
|
ariel139/DOOM-port-scanner
|
328678b9f79855de472967f1a3e4b3e9181a3706
|
[
"MIT"
] | null | null | null |
DOOM.py
|
ariel139/DOOM-port-scanner
|
328678b9f79855de472967f1a3e4b3e9181a3706
|
[
"MIT"
] | null | null | null |
import socket
from IPy import IP
print("""
You are using the DOOM Port scanner.
This tool is for educational purpose ONLY!!!!
1. You can change the range of the ports you want to scan.
2. You can change the speedof the scan
3. you can scan a list of targets by using ', ' after each target
4. You can scan both URL links and both IP's
""")
# ip adresess
targets = input("enter targets or URL's ")
# min range of ports
min_port = int(input("enter min number of ports "))
# max range of ports
max_port = int(input("enter max number of ports "))
try:
speed = int(input("Enter the speed you want to scan in (try using a Irrational number, deffult is 0.1) "))
except:
speed = 0.1
def multi_targets(ip):
converted_ip = check_ip(ip)
# using loop to scan the port
print(f'scaning port for {ip}')
for port in range(min_port,max_port +1):
scan_port(converted_ip,port)
# check if the ip is URL link or ip
def check_ip(ip):
try:
IP(ip)
return ip
except ValueError:
socket.gethostbyname(ip)
return ip
def get_data_from_port(soc):
return soc.recv(1024)
# scan port function
def scan_port(ip, port):
try:
sc = socket.socket()
sc.settimeout(speed)
sc.connect((ip, port))
try:
data = get_data_from_port(sc)
print(f'[+] port {port} is on and recived data is: {data}')
except:
print(f'[+] {port} port is open')
except:
print('scaning ports...')
# converted ip adress to link and int ip
if ', ' in targets:
for ip_add in targets.split(','):
multi_targets(ip_add.strip(' '))
else:
multi_targets(targets)
| 24.657534
| 111
| 0.597778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 798
| 0.443333
|
05d4a6a91e58732f8757086328fccaf5f8b61a70
| 9,380
|
py
|
Python
|
finding_models/testing_classifiers.py
|
NtMalDetect/NtMalDetect
|
5bf8f35491bf8081d0b721fa1bf90582b410ed74
|
[
"MIT"
] | 10
|
2018-01-04T07:59:59.000Z
|
2022-01-17T08:56:33.000Z
|
finding_models/testing_classifiers.py
|
NtMalDetect/NtMalDetect
|
5bf8f35491bf8081d0b721fa1bf90582b410ed74
|
[
"MIT"
] | 2
|
2020-01-12T19:32:05.000Z
|
2020-04-11T09:38:07.000Z
|
finding_models/testing_classifiers.py
|
NtMalDetect/NtMalDetect
|
5bf8f35491bf8081d0b721fa1bf90582b410ed74
|
[
"MIT"
] | 1
|
2018-08-31T04:13:43.000Z
|
2018-08-31T04:13:43.000Z
|
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
from sklearn.utils import shuffle
useTFIDF = True
showSampleVector = False
showMostInformativeFeatures = True
howManyInformativeFeatures = 10
nGRAM1 = 10
nGRAM2 = 10
weight = 10
ask = input("Do you want to specify parameters or use default values? Input 'T' or 'F'. ")
if ask == "T":
useTFIDFStr = input("Do you want to use tfidfVectorizer or CountVectorizer? Type T for tfidfVectorizer and F for CountVectorizer ")
if useTFIDFStr == "T":
useTFIDF = True
else:
useTFIDF = False
showSampleVectorStr = input("Do you want to print an example vectorized corpus? (T/F) ")
if showSampleVectorStr == "T":
showSampleVector = True
else:
showSampleVector = False
showMostInformativeFeaturesStr = input("Do you want to print the most informative feature in some of the classifiers? (T/F) ")
if showMostInformativeFeaturesStr == "T":
showMostInformativeFeatures = True
howManyInformativeFeatures = int(input("How many of these informative features do you want to print for each binary case? Input a number "))
else:
showMostInformativeFeatures = False
nGRAM1 = int(input("N-Gram lower bound (Read README.md for more information)? Input a number "))
nGRAM2 = int(input("N-Gram Upper bound? Input a number "))
weight = int(input("What weight do you want to use to separate train & testing? Input a number "))
main_corpus = []
main_corpus_target = []
my_categories = ['benign', 'malware']
# feeding corpus the testing data
print("Loading system call database for categories:")
print(my_categories if my_categories else "all")
import glob
import os
malCOUNT = 0
benCOUNT = 0
for filename in glob.glob(os.path.join('./sysMAL', '*.txt')):
fMAL = open(filename, "r")
aggregate = ""
for line in fMAL:
linea = line[:(len(line)-1)]
aggregate += " " + linea
main_corpus.append(aggregate)
main_corpus_target.append(1)
malCOUNT += 1
for filename in glob.glob(os.path.join('./sysBEN', '*.txt')):
fBEN = open(filename, "r")
aggregate = ""
for line in fBEN:
linea = line[:(len(line) - 1)]
aggregate += " " + linea
main_corpus.append(aggregate)
main_corpus_target.append(0)
benCOUNT += 1
# shuffling the dataset
main_corpus_target, main_corpus = shuffle(main_corpus_target, main_corpus, random_state=0)
# weight as determined in the top of the code
train_corpus = main_corpus[:(weight*len(main_corpus)//(weight+1))]
train_corpus_target = main_corpus_target[:(weight*len(main_corpus)//(weight+1))]
test_corpus = main_corpus[(len(main_corpus)-(len(main_corpus)//(weight+1))):]
test_corpus_target = main_corpus_target[(len(main_corpus)-len(main_corpus)//(weight+1)):]
print("%d documents - %0.3fMB (training set)" % (
len(train_corpus_target), train_corpus_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(test_corpus_target), test_corpus_size_mb))
print("%d categories" % len(my_categories))
print()
print("Benign Traces: "+str(benCOUNT)+" traces")
print("Malicious Traces: "+str(malCOUNT)+" traces")
print()
print("Extracting features from the training data using a sparse vectorizer...")
t0 = time()
if useTFIDF:
vectorizer = TfidfVectorizer(ngram_range=(nGRAM1, nGRAM2), min_df=1, use_idf=True, smooth_idf=True) ##############
else:
vectorizer = CountVectorizer(ngram_range=(nGRAM1, nGRAM2))
analyze = vectorizer.build_analyzer()
if showSampleVector:
print(analyze(test_corpus[1]))
X_train = vectorizer.fit_transform(train_corpus)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, train_corpus_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer...")
t0 = time()
X_test = vectorizer.transform(test_corpus)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, test_corpus_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# show which are the definitive features
def show_most_informative_features(vectorizer, clf, n=20):
feature_names = vectorizer.get_feature_names()
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))
coefs_with_fns_mal = coefs_with_fns[:-(n + 1):-1]
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))[:n]
print()
print("Most Informative Benign Features:")
for (coef_1, fn_1) in coefs_with_fns:
print(coef_1, fn_1)
print()
print("Most Informative Malicious Features:")
for (coef_2, fn_2) in coefs_with_fns_mal:
print(coef_2, fn_2)
print()
def benchmark(clf, showTopFeatures=False):
print('_'*60)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, train_corpus_target)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(test_corpus_target, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
print()
print(metrics.classification_report(test_corpus_target, pred,target_names=my_categories))
print()
clf_descr = str(clf).split('(')[0]
print("Predicted values: ")
print(pred.tolist());
print()
print("Real values:")
print(test_corpus_target)
print()
mCount = 0
for i in test_corpus_target:
if i == 1:
mCount+=1
print("Proportion of malicious trace:")
print(mCount/len(test_corpus_target))
if showTopFeatures:
show_most_informative_features(vectorizer, clf, 10)
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3), showMostInformativeFeatures))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty), showMostInformativeFeatures))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# plotting results
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| 31.059603
| 150
| 0.698294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,196
| 0.234115
|
05d5479edfdc67ed72d1fed7ba706e163051f970
| 5,953
|
py
|
Python
|
neutron/tests/fullstack/test_firewall.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 1
|
2018-10-19T01:48:37.000Z
|
2018-10-19T01:48:37.000Z
|
neutron/tests/fullstack/test_firewall.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
neutron/tests/fullstack/test_firewall.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron.agent.common import ovs_lib
from neutron.agent.linux import iptables_firewall
from neutron.agent.linux import iptables_manager
from neutron.agent.linux.openvswitch_firewall import iptables as ovs_iptables
from neutron.common import utils
from neutron.tests.common import machine_fixtures
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
LOG = logging.getLogger(__name__)
class IptablesNotConfiguredException(Exception):
pass
class VmsUnreachableException(Exception):
pass
class FirewallMigrationTestCase(base.BaseFullStackTestCase):
def setUp(self):
host_descriptions = [
environment.HostDescription(
l3_agent=False,
of_interface='native',
l2_agent_type=constants.AGENT_TYPE_OVS,
firewall_driver='iptables_hybrid',
dhcp_agent=False,
)]
env = environment.Environment(
environment.EnvironmentDescription(),
host_descriptions)
super(FirewallMigrationTestCase, self).setUp(env)
# fullstack doesn't separate nodes running ovs agent so iptables rules
# are implemented in root namespace
self.iptables_manager = iptables_manager.IptablesManager()
def _prepare_resources(self):
self.tenant_uuid = uuidutils.generate_uuid()
network = self.safe_client.create_network(self.tenant_uuid)
self.safe_client.create_subnet(
self.tenant_uuid, network['id'], '20.0.0.0/24', enable_dhcp=False)
vms = machine.FakeFullstackMachinesList(
self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
network['id'],
self.tenant_uuid,
self.safe_client,
use_dhcp=False))
for i in range(2))
vms.block_until_all_boot()
for vm in vms:
self._add_icmp_security_group_rule(vm)
return vms
def _add_icmp_security_group_rule(self, vm):
sg_id = self.safe_client.create_security_group(self.tenant_uuid)['id']
self.safe_client.create_security_group_rule(
self.tenant_uuid, sg_id,
direction=constants.INGRESS_DIRECTION,
ethertype=constants.IPv4,
protocol=constants.PROTO_NAME_ICMP)
self.safe_client.client.update_port(
vm.neutron_port['id'],
body={'port': {'security_groups': [sg_id]}})
self.addCleanup(
self.safe_client.client.update_port,
vm.neutron_port['id'],
body={'port': {'security_groups': []}})
def _validate_iptables_rules(self, vms):
"""Check if rules from iptables firewall are configured.
Raises IptablesNotConfiguredException exception if no rules are found.
"""
for vm in vms:
vm_tap_device = iptables_firewall.get_hybrid_port_name(
vm.neutron_port['id'])
filter_rules = self.iptables_manager.get_rules_for_table('filter')
if not any(vm_tap_device in line for line in filter_rules):
raise IptablesNotConfiguredException(
"There are no iptables rules configured for interface %s" %
vm_tap_device)
def _switch_firewall(self, firewall_driver):
"""Switch firewall_driver to given driver and restart the agent."""
l2_agent = self.environment.hosts[0].l2_agent
l2_agent_config = l2_agent.agent_cfg_fixture.config
l2_agent_config['securitygroup']['firewall_driver'] = firewall_driver
l2_agent.agent_cfg_fixture.write_config_to_configfile()
l2_agent.restart()
int_bridge = ovs_lib.OVSBridge(
l2_agent_config['ovs']['integration_bridge'])
predicate = functools.partial(
ovs_iptables.is_bridge_cleaned, int_bridge)
utils.wait_until_true(
predicate,
exception=RuntimeError(
"Bridge %s hasn't been marked as clean." % int_bridge.br_name))
def test_migration(self):
vms = self._prepare_resources()
# Make sure ICMP packets can get through with iptables firewall
vms.ping_all()
self._validate_iptables_rules(vms)
self._switch_firewall('openvswitch')
# Make sure security groups still work after migration
vms.ping_all()
self.assertRaises(
IptablesNotConfiguredException, self._validate_iptables_rules, vms)
# Remove security groups so traffic cannot get through
for vm in vms:
self.safe_client.client.update_port(
vm.neutron_port['id'],
body={'port': {'security_groups': []}})
# TODO(jlibosva): Test all permutations and don't fail on the first one
self.assertRaises(machine_fixtures.FakeMachineException, vms.ping_all)
# Add back some security groups allowing ICMP and test traffic can now
# get through
for vm in vms:
self._add_icmp_security_group_rule(vm)
vms.ping_all()
| 38.908497
| 79
| 0.666891
| 4,727
| 0.794053
| 0
| 0
| 0
| 0
| 0
| 0
| 1,547
| 0.259869
|
05d679b96fcc27f56541b2f87e6ba4b22f90adbe
| 709
|
py
|
Python
|
Analysis/pdf_to_txt.py
|
ashishnitinpatil/resanalysersite
|
0604d2fed4760be741c4d90b6d230d0f2cd8bf9e
|
[
"CC-BY-4.0"
] | null | null | null |
Analysis/pdf_to_txt.py
|
ashishnitinpatil/resanalysersite
|
0604d2fed4760be741c4d90b6d230d0f2cd8bf9e
|
[
"CC-BY-4.0"
] | null | null | null |
Analysis/pdf_to_txt.py
|
ashishnitinpatil/resanalysersite
|
0604d2fed4760be741c4d90b6d230d0f2cd8bf9e
|
[
"CC-BY-4.0"
] | null | null | null |
from pdfminer.pdfinterp import PDFResourceManager, process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from cStringIO import StringIO
def convert_pdf(path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = file(path, 'rb')
process_pdf(rsrcmgr, device, fp)
fp.close()
device.close()
str = retstr.getvalue()
retstr.close()
return str
with open('C:\\Users\\ashis\\Desktop\\CIVIL ENGINEERING.txt', 'w') as to_write:
to_write.write(convert_pdf('C:\\Users\\ashis\\Desktop\\CIVIL ENGINEERING.pdf'))
| 27.269231
| 83
| 0.712271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.16079
|
05d6c824429b4f5fccdfe1433815eb6c96e18c8f
| 480
|
py
|
Python
|
local/handler/TravisHandler.py
|
fasterit/supybot-github
|
37b80046c0f0d5a66b2107a63e380002adbb66f5
|
[
"MIT"
] | 7
|
2016-07-16T22:16:37.000Z
|
2021-06-14T20:45:37.000Z
|
local/handler/TravisHandler.py
|
fasterit/supybot-github
|
37b80046c0f0d5a66b2107a63e380002adbb66f5
|
[
"MIT"
] | 30
|
2015-06-03T22:40:28.000Z
|
2022-02-11T08:49:44.000Z
|
local/handler/TravisHandler.py
|
fasterit/supybot-github
|
37b80046c0f0d5a66b2107a63e380002adbb66f5
|
[
"MIT"
] | 5
|
2018-01-12T21:28:50.000Z
|
2020-10-01T13:44:09.000Z
|
from ..utility import *
def handle(data, theme):
if isStatusVisible(data['repository']['url'], data['status_message'].lower()):
theme.travis(
branch = data['branch'],
repo = data['repository']['name'],
status = data['status_message'],
commitId = data['commit'],
commitMessage = data['message'],
commitAuthor = data['author_name'],
buildUrl = getShortURL(data['build_url'])
)
| 34.285714
| 82
| 0.554167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.241667
|
05d8328fda38c6d6fda5c13e5f09ac74925e7f3b
| 10,417
|
py
|
Python
|
pyart/io/tests/test_mdv_radar.py
|
josephhardinee/pyart
|
909cd4a36bb4cae34349294d2013bc7ad71d0969
|
[
"OLDAP-2.6",
"Python-2.0"
] | null | null | null |
pyart/io/tests/test_mdv_radar.py
|
josephhardinee/pyart
|
909cd4a36bb4cae34349294d2013bc7ad71d0969
|
[
"OLDAP-2.6",
"Python-2.0"
] | null | null | null |
pyart/io/tests/test_mdv_radar.py
|
josephhardinee/pyart
|
909cd4a36bb4cae34349294d2013bc7ad71d0969
|
[
"OLDAP-2.6",
"Python-2.0"
] | null | null | null |
""" Unit Tests for Py-ART's io/mdv_radar.py module. """
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.ma.core import MaskedArray
import pyart
############################################
# read_mdv tests (verify radar attributes) #
############################################
# read in the sample file and create a a Radar object
radar = pyart.io.read_mdv(pyart.testing.MDV_PPI_FILE)
# time attribute
def test_time():
assert 'comment' in radar.time.keys()
assert 'long_name' in radar.time.keys()
assert 'standard_name' in radar.time.keys()
assert 'units' in radar.time.keys()
assert 'calendar' in radar.time.keys()
assert 'data' in radar.time.keys()
assert radar.time['units'] == 'seconds since 2011-05-20T11:01:00Z'
assert radar.time['data'].shape == (360, )
assert_almost_equal(radar.time['data'][200], 187, 0)
# range attribute
def test_range():
assert 'long_name' in radar.range
assert 'standard_name' in radar.range
assert 'meters_to_center_of_first_gate' in radar.range
assert 'meters_between_gates' in radar.range
assert 'units' in radar.range
assert 'data' in radar.range
assert 'spacing_is_constant' in radar.range
assert radar.range['data'].shape == (110, )
assert_almost_equal(radar.range['data'][0], 118, 0)
# fields attribute is tested later
# metadata attribute
def test_metadata():
assert 'instrument_name' in radar.metadata
assert 'source' in radar.metadata
# scan_type attribute
def test_scan_type():
assert radar.scan_type == 'ppi'
# latitude attribute
def test_latitude():
assert 'data' in radar.latitude
assert 'standard_name' in radar.latitude
assert 'units' in radar.latitude
assert radar.latitude['data'].shape == (1, )
assert_almost_equal(radar.latitude['data'], 37, 0)
# longitude attribute
def test_longitude():
assert 'data' in radar.longitude
assert 'standard_name' in radar.longitude
assert 'units' in radar.longitude
assert radar.longitude['data'].shape == (1, )
assert_almost_equal(radar.longitude['data'], -97, 0)
# altitude attribute
def test_altitude():
assert 'data' in radar.altitude
assert 'standard_name' in radar.altitude
assert 'units' in radar.altitude
assert 'positive' in radar.altitude
assert radar.altitude['data'].shape == (1, )
assert_almost_equal(radar.altitude['data'], 328, 0)
# altitude_agl attribute
def test_altitude_agl():
assert radar.altitude_agl is None
# sweep_number attribute
def test_sweep_number():
assert 'standard_name' in radar.sweep_number
assert np.all(radar.sweep_number['data'] == range(1))
# sweep_mode attribute
def test_sweep_mode():
assert 'standard_name' in radar.sweep_mode
assert radar.sweep_mode['data'].shape == (1, )
assert radar.sweep_mode['data'].dtype.char == 'S'
assert np.all(radar.sweep_mode['data'] == [b'azimuth_surveillance'])
# fixed_angle attribute
def test_fixed_angle():
assert 'standard_name' in radar.fixed_angle
assert 'units' in radar.fixed_angle
assert radar.fixed_angle['data'].shape == (1, )
assert_almost_equal(radar.fixed_angle['data'][0], 0.75, 2)
# sweep_start_ray_index attribute
def test_sweep_start_ray_index():
assert 'long_name' in radar.sweep_start_ray_index
assert radar.sweep_start_ray_index['data'].shape == (1, )
assert_almost_equal(radar.sweep_start_ray_index['data'][0], 0, 0)
# sweep_end_ray_index attribute
def test_sweep_end_ray_index():
assert 'long_name' in radar.sweep_end_ray_index
assert radar.sweep_end_ray_index['data'].shape == (1, )
assert_almost_equal(radar.sweep_end_ray_index['data'][0], 359, 0)
# target_scan_rate attribute
def test_target_scan_rate():
assert radar.target_scan_rate is None
# azimuth attribute
def test_azimuth():
assert 'standard_name' in radar.azimuth
assert 'long_name' in radar.azimuth
assert 'units' in radar.azimuth
assert 'axis' in radar.azimuth
assert_almost_equal(radar.azimuth['data'][0], 0, 0)
assert_almost_equal(radar.azimuth['data'][10], 10.0, 0)
# elevation attribute
def test_elevation():
assert 'standard_name' in radar.elevation
assert 'long_name' in radar.azimuth
assert 'units' in radar.elevation
assert 'axis' in radar.elevation
assert radar.elevation['data'].shape == (360, )
assert_almost_equal(radar.elevation['data'][0], 0.75, 2)
# scan_rate attribute
def test_scan_rate():
assert radar.scan_rate is None
# antenna_transition attribute
def test_antenna_transition():
assert radar.antenna_transition is None
# instrument_parameters attribute
def test_instument_parameters():
# instrument_parameter sub-convention
keys = ['prt', 'unambiguous_range', 'prt_mode', 'nyquist_velocity']
for k in keys:
description = 'instrument_parameters: %s' % k
check_instrument_parameter.description = description
yield check_instrument_parameter, k
def check_instrument_parameter(param):
assert param in radar.instrument_parameters
param_dic = radar.instrument_parameters[param]
assert param_dic['meta_group'] == 'instrument_parameters'
# radar_parameters attribute
def test_radar_parameters():
# radar_parameter sub-convention
keys = ['radar_beam_width_h', 'radar_beam_width_v']
for k in keys:
description = 'radar_parameters: %s' % k
check_radar_parameter.description = description
yield check_radar_parameter, k
def check_radar_parameter(param):
assert param in radar.instrument_parameters
param_dic = radar.instrument_parameters[param]
assert param_dic['meta_group'] == 'radar_parameters'
# radar_calibration attribute
def test_radar_calibration():
assert radar.radar_calibration is None
# ngates attribute
def test_ngates():
assert radar.ngates == 110
# nrays attribute
def test_nrays():
assert radar.nrays == 360
# nsweeps attribute
def test_nsweeps():
assert radar.nsweeps == 1
####################
# fields attribute #
####################
def test_field_dics():
fields = ['reflectivity', ]
for field in fields:
description = "field : %s, dictionary" % field
check_field_dic.description = description
yield check_field_dic, field
def check_field_dic(field):
""" Check that the required keys are present in a field dictionary. """
assert 'standard_name' in radar.fields[field]
assert 'units' in radar.fields[field]
assert '_FillValue' in radar.fields[field]
assert 'coordinates' in radar.fields[field]
def test_field_shapes():
fields = ['reflectivity', ]
for field in fields:
description = "field : %s, shape" % field
check_field_shape.description = description
yield check_field_shape, field
def check_field_shape(field):
assert radar.fields[field]['data'].shape == (360, 110)
def test_field_types():
fields = {'reflectivity': MaskedArray, }
for field, field_type in fields.items():
description = "field : %s, type" % field
check_field_type.description = description
yield check_field_type, field, field_type
def check_field_type(field, field_type):
assert type(radar.fields[field]['data']) is field_type
def test_field_first_points():
# these values can be found using:
# [round(radar.fields[f]['data'][0,0]) for f in radar.fields]
fields = {'reflectivity': 24.0}
for field, field_value in fields.items():
description = "field : %s, first point" % field
check_field_first_point.description = description
yield check_field_first_point, field, field_value
def check_field_first_point(field, value):
assert_almost_equal(radar.fields[field]['data'][0, 0], value, 0)
#############
# RHI tests #
#############
RADAR_RHI = pyart.io.read_mdv(pyart.testing.MDV_RHI_FILE,
delay_field_loading=True)
# nsweeps attribute
def test_rhi_nsweeps():
assert RADAR_RHI.nsweeps == 1
# sweep_number attribute
def test_rhi_sweep_number():
assert 'standard_name' in RADAR_RHI.sweep_number
assert np.all(RADAR_RHI.sweep_number['data'] == range(1))
# sweep_mode attribute
def test_rhi_sweep_mode():
assert 'standard_name' in RADAR_RHI.sweep_mode
assert RADAR_RHI.sweep_mode['data'].shape == (1, )
assert np.all(RADAR_RHI.sweep_mode['data'] == [b'rhi'])
# fixed_angle attribute
def test_rhi_fixed_angle():
assert 'standard_name' in RADAR_RHI.fixed_angle
assert 'units' in RADAR_RHI.fixed_angle
assert RADAR_RHI.fixed_angle['data'].shape == (1, )
assert_almost_equal(RADAR_RHI.fixed_angle['data'][0], 189.00, 2)
# sweep_start_ray_index attribute
def test_rhi_sweep_start_ray_index():
assert 'long_name' in RADAR_RHI.sweep_start_ray_index
assert RADAR_RHI.sweep_start_ray_index['data'].shape == (1, )
assert_almost_equal(RADAR_RHI.sweep_start_ray_index['data'][0], 0, 0)
# sweep_end_ray_index attribute
def test_rhi_sweep_end_ray_index():
assert 'long_name' in RADAR_RHI.sweep_end_ray_index
assert RADAR_RHI.sweep_end_ray_index['data'].shape == (1, )
assert_almost_equal(RADAR_RHI.sweep_end_ray_index['data'][0], 282, 0)
# azimuth attribute
def test_rhi_azimuth():
assert 'standard_name' in RADAR_RHI.azimuth
assert 'long_name' in RADAR_RHI.azimuth
assert 'units' in RADAR_RHI.azimuth
assert 'axis' in RADAR_RHI.azimuth
assert_almost_equal(RADAR_RHI.azimuth['data'][0], 189, 0)
assert_almost_equal(RADAR_RHI.azimuth['data'][10], 189, 0)
# elevation attribute
def test_rhi_elevation():
assert 'standard_name' in RADAR_RHI.elevation
assert 'long_name' in RADAR_RHI.azimuth
assert 'units' in RADAR_RHI.elevation
assert 'axis' in RADAR_RHI.elevation
assert RADAR_RHI.elevation['data'].shape == (283, )
assert_almost_equal(RADAR_RHI.elevation['data'][0], 19.6, 2)
# field data
def test_rhi_elevation():
assert_almost_equal(RADAR_RHI.fields['reflectivity']['data'][0, 0],
23.93, 2)
def test_open_from_file_obj():
fh = open(pyart.testing.MDV_PPI_FILE, 'rb')
radar = pyart.io.read_mdv(pyart.testing.MDV_PPI_FILE)
fh.close()
def test_radar_exclude_fields():
# skip fields
radar = pyart.io.read_mdv(
pyart.testing.MDV_PPI_FILE, exclude_fields=['reflectivity'])
assert 'reflectivity' not in radar.fields
| 29.179272
| 75
| 0.707977
| 0
| 0
| 1,703
| 0.163483
| 0
| 0
| 0
| 0
| 2,742
| 0.263224
|
05d878ca2e433fc4c0d9802abde19f10dbc8863e
| 2,430
|
py
|
Python
|
model/UserAccess.py
|
EmbeddedSoftwareCaiShuPeng/vehicleDispatcher
|
aacebb1656fe095485041de0bcbb67627e384abc
|
[
"MIT"
] | 1
|
2016-04-27T14:23:53.000Z
|
2016-04-27T14:23:53.000Z
|
model/UserAccess.py
|
EmbeddedSoftwareCaiShuPeng/vehicleDispatcher
|
aacebb1656fe095485041de0bcbb67627e384abc
|
[
"MIT"
] | null | null | null |
model/UserAccess.py
|
EmbeddedSoftwareCaiShuPeng/vehicleDispatcher
|
aacebb1656fe095485041de0bcbb67627e384abc
|
[
"MIT"
] | null | null | null |
import uuid, json, os, pymongo
from models import User
def addUser(user):
res = {}
res['result'] = 1
res['message'] = ''
if User.insert_one(user).inserted_id != '':
res['message'] = 'success'
else:
res['result'] = 0
res['message'] = 'Fail to add user in database!'
return res
def deleteUserById(id):
res = {}
res['result'] = 1
res['message'] = ''
if User.delete_one({'id': id}).deleted_count == 1:
res['message'] = 'success'
else:
res['result'] = 0
res['message'] = 'Fail to delete user in database!'
return res
def editUser(user):
res = {}
res['result'] = 1
res['message'] = ''
if User.update({'id': user['id']}, {"$set": user}):
res['message'] = 'success'
else:
res['result'] = 0
res['message'] = 'Fail to modify user in database!'
return res
def getAllUser():
userList = []
for item in User.find():
user = {
'id' : item['id'],
'user_name' : item['user_name'],
'password' : item['password'],
'type' : item['type'],
'project_id' : item['project_id'],
'vehicle_id' : item['vehicle_id'],
'name' : item['name'],
'phone' : item['phone'],
'status' : item['status'],
}
userList.append(user)
return userList
def getUserByName(name):
item = User.find_one({'name': name})
if item:
user = {
'id' : item['id'],
'user_name' : item['user_name'],
'password' : item['password'],
'type' : item['type'],
'project_id' : item['project_id'],
'vehicle_id' : item['vehicle_id'],
'name' : item['name'],
'phone' : item['phone'],
'status' : item['status'],
}
return user
else:
return None
def getUserById(id):
item = User.find_one({'id': id})
if item:
user = {
'id' : item['id'],
'user_name' : item['user_name'],
'password' : item['password'],
'type' : item['type'],
'project_id' : item['project_id'],
'vehicle_id' : item['vehicle_id'],
'name' : item['name'],
'phone' : item['phone'],
'status' : item['status'],
}
return user
else:
return None
| 24.545455
| 59
| 0.475309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 747
| 0.307407
|
05ddcfc4ce86d56934f5e0733a719cb7c2450e6f
| 969
|
py
|
Python
|
sdk/python/pulumi_google_native/genomics/v1alpha2/_enums.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/genomics/v1alpha2/_enums.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/genomics/v1alpha2/_enums.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'DiskType',
]
class DiskType(str, Enum):
"""
Required. The type of the disk to create.
"""
TYPE_UNSPECIFIED = "TYPE_UNSPECIFIED"
"""
Default disk type. Use one of the other options below.
"""
PERSISTENT_HDD = "PERSISTENT_HDD"
"""
Specifies a Google Compute Engine persistent hard disk. See https://cloud.google.com/compute/docs/disks/#pdspecs for details.
"""
PERSISTENT_SSD = "PERSISTENT_SSD"
"""
Specifies a Google Compute Engine persistent solid-state disk. See https://cloud.google.com/compute/docs/disks/#pdspecs for details.
"""
LOCAL_SSD = "LOCAL_SSD"
"""
Specifies a Google Compute Engine local SSD. See https://cloud.google.com/compute/docs/disks/local-ssd for details.
"""
| 30.28125
| 136
| 0.672859
| 744
| 0.767802
| 0
| 0
| 0
| 0
| 0
| 0
| 783
| 0.80805
|
05df1e31c5373f19f615a0dfa51f726a3fbefbbb
| 634
|
py
|
Python
|
plugins/startHelp.py
|
REX-BOTZ/MegaUploaderbot-1
|
025fd97344da388fe607f5db73ad9f4435f51baa
|
[
"Apache-2.0"
] | 2
|
2021-11-12T13:15:03.000Z
|
2021-11-13T12:17:33.000Z
|
plugins/startHelp.py
|
REX-BOTZ/MegaUploaderbot-1
|
025fd97344da388fe607f5db73ad9f4435f51baa
|
[
"Apache-2.0"
] | null | null | null |
plugins/startHelp.py
|
REX-BOTZ/MegaUploaderbot-1
|
025fd97344da388fe607f5db73ad9f4435f51baa
|
[
"Apache-2.0"
] | 1
|
2022-01-07T09:55:53.000Z
|
2022-01-07T09:55:53.000Z
|
#!/usr/bin/env python3
"""Importing"""
# Importing Common Files
from botModule.importCommon import *
"""Start Handler"""
@Client.on_message(filters.private & filters.command("start"))
async def start_handler(bot:Update, msg:Message):
if await search_user_in_community(bot, msg):
await msg.reply_text(BotMessage.start_msg, parse_mode = 'html')
return
"""Help Handler"""
@Client.on_message(filters.private & filters.command("help"))
async def help_handler(bot:Update, msg:Message):
if await search_user_in_community(bot, msg):
await msg.reply_text(BotMessage.help_msg, parse_mode = 'html')
return
| 26.416667
| 71
| 0.728707
| 0
| 0
| 0
| 0
| 485
| 0.764984
| 360
| 0.567823
| 123
| 0.194006
|
05e108ee92867afb8794b956bcf9b413dc00ac01
| 206
|
py
|
Python
|
webSys/dbweb/util/__init__.py
|
Qiumy/FIF
|
8c9c58504ecab510dc0a96944f0031a3fd513d74
|
[
"Apache-2.0"
] | 2
|
2018-12-21T02:01:03.000Z
|
2019-10-17T08:07:04.000Z
|
webSys/dbweb/util/__init__.py
|
Qiumy/FIF
|
8c9c58504ecab510dc0a96944f0031a3fd513d74
|
[
"Apache-2.0"
] | null | null | null |
webSys/dbweb/util/__init__.py
|
Qiumy/FIF
|
8c9c58504ecab510dc0a96944f0031a3fd513d74
|
[
"Apache-2.0"
] | 1
|
2018-06-01T07:56:09.000Z
|
2018-06-01T07:56:09.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
filter_blueprint = Blueprint('filters', __name__)
# Register all the filter.
from . import time_process, text_process, user_manage
| 29.428571
| 53
| 0.747573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.38835
|
05e10cbd60c9a8c4e9d6e849c57e56e13a3dc1f5
| 3,596
|
py
|
Python
|
Code/network_model_HiCoDe.py
|
AbinavRavi/Network_Analysis_Eur_Parl
|
dea84d3375eea07676e0193d575e3deef76312bc
|
[
"MIT"
] | 1
|
2020-12-15T16:35:20.000Z
|
2020-12-15T16:35:20.000Z
|
Code/network_model_HiCoDe.py
|
AbinavRavi/Network_Analysis_Eur_Parl
|
dea84d3375eea07676e0193d575e3deef76312bc
|
[
"MIT"
] | null | null | null |
Code/network_model_HiCoDe.py
|
AbinavRavi/Network_Analysis_Eur_Parl
|
dea84d3375eea07676e0193d575e3deef76312bc
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
import pandas as pd
import ast
import itertools
from itertools import product
from collections import Counter
import networkx as nx
import network_utils as nu
import hicode as hc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('classic')
# -----------------------------------------------------------------------------------------------------------------------
## Loading data
topicDF = pd.read_csv('../Topics/topicsData350.csv')
topicDF['date'] = pd.to_datetime(topicDF['date'])
# topicDF_part = topicDF[(topicDF.date < '2001-07-01') & (topicDF.date >= '2000-07-01')]
# topicDF_part = topicDF[topicDF.date == '2000-07-01']
sit = 0
count = Counter([])
for i in range(58):
year = 1999 + (i + 6) // 12
month = (i + 6) % 12 + 1
date = '{:4d}-{:02d}-01'.format(year, month)
year = 1999 + (i + 9) // 12
month = (i + 9) % 12 + 1
date2 = '{:4d}-{:02d}-01'.format(year, month)
topicDF_part = topicDF[(topicDF.date < date2) & (topicDF.date >= date)]
if topicDF_part.shape[0] == 0:
continue
else:
sit += 1
f = open('../data/outliers.txt', 'a')
f.write('{:s}\n'.format(date))
print(date)
# -----------------------------------------------------------------------------------------------------------------------
## Building network
network = nu.build_network(topicDF_part, 350, exclude=[])
#print(len(network.nodes()))
bottom_nodes = [n for n in network.nodes() if n not in range(350)]
network = nu.fold_network(network, bottom_nodes, mode='single')
network = nu.normalize_edgeweight(network)
# -----------------------------------------------------------------------------------------------------------------------
## Analyzing network
networks, partitions = hc.hicode(network, True)
candidates = [(u, v) for u, v in product(network.nodes(), network.nodes()) if
u != v and partitions[0][u] != partitions[0][v]]
for i in range(1,len(partitions)):
candidates = [(u,v) for u, v in candidates if partitions[i][u] == partitions[i][v]]
candidates = [(u,v) for u,v in candidates]
# candidates.sort()
# candidates = list(k for k,_ in itertools.groupby(candidates))
# print(candidates)
# candidates = [tuple(c) for c in candidates ]
count+=Counter(candidates)
count = dict(count)
count = sorted(count.items(), key=lambda kv: kv[1], reverse=True)
with open('../Results_Hicode/first_session_redweight.txt', 'w') as f:
f.write('Total sittings: {:d}\n\n'.format(int(sit)))
for k, v in count:
f.write('{:s}: {:d}, {:f}\n'.format(str(k), int(v), v / sit))
# -----------------------------------------------------------------------------------------------------------------------
## Drawing network
# for i in range(len(networks)):
# plt.figure()
# values = [partitions[0].get(n) for n in networks[i].nodes()]
# removeE = [e for e in networks[i].edges() if partitions[i][e[0]] != partitions[i][e[1]]]
# networks[i].remove_edges_from(removeE)
# pos = nx.spring_layout(networks[i], iterations=15, weight='weight')
# sizes = [50 * nu.node_weight(networks[i], node) for node in networks[i].nodes()]
# weights = [networks[i][u][v]['weight'] for u, v, in networks[i].edges()]
# nc = nx.draw_networkx_nodes(networks[i], pos, with_labels=False, node_color=values, node_size=sizes, alpha=0.4,
# cmap=cm.gist_rainbow)
# nx.draw_networkx_edges(networks[i], pos, width=weights)
# plt.axis('off')
# plt.colorbar(nc)
# plt.show()
| 38.666667
| 121
| 0.547553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,873
| 0.520857
|
05e2589d4291356b8e585fa87a27f0d7fe177954
| 209
|
py
|
Python
|
py_battlescribe/shared/rules.py
|
akabbeke/py_battlescribe
|
7f96d44295d46810268e666394e3e3238a6f2c61
|
[
"MIT"
] | 1
|
2021-11-17T22:00:21.000Z
|
2021-11-17T22:00:21.000Z
|
py_battlescribe/shared/rules.py
|
akabbeke/py_battlescribe
|
7f96d44295d46810268e666394e3e3238a6f2c61
|
[
"MIT"
] | null | null | null |
py_battlescribe/shared/rules.py
|
akabbeke/py_battlescribe
|
7f96d44295d46810268e666394e3e3238a6f2c61
|
[
"MIT"
] | null | null | null |
from ..bs_node.iterable import BSNodeIterable
from ..bs_reference.iter import BSReferenceIter
class SharedRules(BSNodeIterable):
_tag_name = 'sharedRules'
_iter_child_class = BSReferenceIter('Rule')
| 26.125
| 47
| 0.794258
| 113
| 0.54067
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.090909
|
05e43c552c5879146cf3f036c106616fa493ebaa
| 5,487
|
py
|
Python
|
priorgen/pca_utils.py
|
joshjchayes/PriorGen
|
228be0b06dca29ad2ad33ae216f494eaead6161f
|
[
"MIT"
] | 1
|
2021-12-09T10:29:20.000Z
|
2021-12-09T10:29:20.000Z
|
priorgen/pca_utils.py
|
joshjchayes/PriorGen
|
228be0b06dca29ad2ad33ae216f494eaead6161f
|
[
"MIT"
] | null | null | null |
priorgen/pca_utils.py
|
joshjchayes/PriorGen
|
228be0b06dca29ad2ad33ae216f494eaead6161f
|
[
"MIT"
] | null | null | null |
'''
pca_utils.py
Module containing functions to run PCAs, and generate diagnostic plots
'''
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
def run_PCA(parameters, observables, n_components):
'''
Runs a principal component analysis to reduce dimensionality of
observables.
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
Returns
-------
pca : sklearn.decomposition.PCA
The scikit-learn PCA object
reduced_d_observables : array_like, shape(N, n_components)
The observables after PCA has been applied to them
'''
pca = PCA(n_components=n_components)
fitted_pca = pca.fit(observables)
reduced_d_observables = fitted_pca.transform(observables)
return pca, reduced_d_observables
def pca_plot(parameters, observables, n_components, save=True,
save_path='PCA_plot.pdf'):
'''
Produces a plot of the explained variance of the first n_components
principal components, along with a cumulative variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
save : bool, optional:
If True, will save the output figure to save_path. Default is True.
save_path : str, optional
If save is True, this is the path that the figures will
be saved to. Default is 'PCA_plot.pdf'.
Returns
-------
fig : matplotlib.Figure
The pca plot
'''
pca, _ = run_PCA(parameters, observables, n_components)
variance = pca.explained_variance_ratio_
cumulative_variance = np.cumsum(variance).round(4)
fig, ax = plt.subplots(2,1, sharex=True)
# Plot the
ax[0].bar(np.arange(n_components), variance, label='Associated variance')
#ax[0].set_xlabel('Principal component')
ax[0].set_ylabel('Fractional variance')
ax[0].set_yscale('log')
ax[1].plot(np.arange(n_components), cumulative_variance, 'r', label='Cumulative variance')
ax[1].set_xlabel('Principal component')
ax[1].set_ylabel('Cumulative variance')
ax[1].margins(x=0.01)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
if save:
fig.savefig(save_path)
return fig
def find_required_components(parameters, observables, variance):
'''
Calculates the number of principal components required for reduced
dimensionality obserables to contain a given fraction of explained variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
variance : float
The fraction of explained variance you want the principal components
to contain
Returns
-------
n_components : int
The smallest number of principal comonents required to contain the
specified fraction of explained variance
'''
if not 0 <= variance < 1:
raise ValueError('variance must be between 0 and 1')
# run PCA and keep all components
pca, _ = run_PCA(parameters, observables, None)
cumulative_variance = np.cumsum(pca.explained_variance_ratio_)
# The +1 is required because the first part finds an index where the
# cumulative explained variance ratio is larger than the threshold
# and the indices start from 0
n_PCs = np.where(cumulative_variance >= variance)[0][0] + 1
if n_PCs > 30:
print('WARNING: {} principal components are required - this may lead to slow run times.'.format(n_PCs))
return n_PCs
| 35.862745
| 111
| 0.686896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,072
| 0.742118
|
05e5ab63cfbf61b1260c3430dac86bcf4cae1b06
| 17,452
|
py
|
Python
|
prompt_tuning/data/super_glue.py
|
techthiyanes/prompt-tuning
|
9f4d7082aa6dbd955e38488d6d3fa5a7c039f6c7
|
[
"Apache-2.0"
] | 108
|
2021-11-05T21:44:27.000Z
|
2022-03-31T14:19:30.000Z
|
prompt_tuning/data/super_glue.py
|
techthiyanes/prompt-tuning
|
9f4d7082aa6dbd955e38488d6d3fa5a7c039f6c7
|
[
"Apache-2.0"
] | 172
|
2022-02-01T00:08:39.000Z
|
2022-03-31T12:44:07.000Z
|
prompt_tuning/data/super_glue.py
|
dumpmemory/prompt-tuning
|
bac77e4f5107b4a89f89c49b14d8fe652b1c5734
|
[
"Apache-2.0"
] | 9
|
2022-01-16T11:55:18.000Z
|
2022-03-06T23:26:36.000Z
|
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Special version of the SuperGlue Tasks.
The main task formats here are:
* super_glue_{name}_v102_examples
* mt5_super_glue_{name}_v102_examples
* taskless_super_glue_{name}_v102
* taskless_super_glue_{name}_v102_examples
* mt5_taskless_super_glue_{name}_v102
* mt5_taskless_super_glue_{name}_v102_examples
Any task that starts with `mT5` uses the `mT5` vocab. Any task that ends with
`examples` is setup to log intermediate examples to tensorboard. Any task with
`taskless` does not have the task name as the initial text token (like t5 tasks
do). Any task with `task_index` in the name has a special task index as the
initial post-integerization token.
"""
import functools
from prompt_tuning.data import features
from prompt_tuning.data import metrics as pt_metrics
from prompt_tuning.data import postprocessors as pt_postprocessors
from prompt_tuning.data import preprocessors as pt_preprocessors
from prompt_tuning.data import utils
import seqio
from t5.data import postprocessors
from t5.data import preprocessors
from t5.data.glue_utils import get_glue_postprocess_fn
from t5.data.glue_utils import get_glue_text_preprocessor
from t5.data.glue_utils import get_super_glue_metric
from t5.evaluation import metrics
import tensorflow_datasets as tfds
super_glue_task_indexer = utils.task_mapping(
tuple(b.name
for b in tfds.text.super_glue.SuperGlue.builder_configs.values()), {
"wsc.fixed": "wsc",
"axb": "rte",
"axg": "rte"
})
for model_prefix, feats in features.MODEL_TO_FEATURES.items():
for log_examples in (True, False):
# ========== SuperGlue ==========
# This section adds the core SuperGlue tasks. We do not include WSC in this
# loop WSC has different setting for training and validation because t5
# casts it as a short text generation task instead of as classification (via
# generation of class labels). We will add that as a mixture later.
for b in tfds.text.super_glue.SuperGlue.builder_configs.values():
if "wsc" in b.name:
continue
if log_examples:
postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples,
get_glue_postprocess_fn(b))
metric_fns = [
functools.partial(pt_metrics.metric_with_examples, func)
for func in get_super_glue_metric(b.name)
] + [functools.partial(pt_metrics.text_examples, task_name=b.name)]
examples_suffix = "_examples"
else:
postprocess_fn = get_glue_postprocess_fn(b)
metric_fns = get_super_glue_metric(b.name)
examples_suffix = ""
# The axb task needs to be rekeyed before we apply the glue text
# preprocessor, instead of detecting this and registering axb different
# (which would need to be repeated for each variant of the dataset we
# have) we have a list of preprocessors, for most tasks this is empty and
# for axb it has the rekey function. Then when we register a task we add
# the text processor to this list and it all works out. We can't
# predefined the full list upfront (like they do in t5) because the actual
# text preprocessor can be different for tasks like the taskless version.
pre_preprocessors = []
if b.name == "axb":
pre_preprocessors = [
functools.partial(
preprocessors.rekey,
key_map={
"premise": "sentence1",
"hypothesis": "sentence2",
"label": "label",
"idx": "idx"
})
]
# The default tasks have already be register elsewhere so only add the
# example logging version
if log_examples:
seqio.TaskRegistry.add(
f"{model_prefix}super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b), seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# This version of the task removes the initial text token of the dataset
# name
seqio.TaskRegistry.add(
f"{model_prefix}taskless_super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# This version of the task adds a task index to the first token.
seqio.TaskRegistry.add(
f"{model_prefix}task_index_super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer[b.name]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# ========= Definite Pronoun Resolution =========
# Similar to the Winograd Schema Challenge but doesn't require semantic
# knowledge to disambiguate between two different options. Training on this
# has been shown to be effective for increasing performance on WSC.
# [Kocijan, et. al., 2019](https://arxiv.org/abs/1905.06290)
if log_examples:
dpr_postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples, utils.identity),
dpr_metric_fns = [
functools.partial(pt_metrics.metric_with_examples, metrics.accuracy)
] + [functools.partial(pt_metrics.text_examples, task_name="dpr")]
else:
dpr_postprocess_fn = utils.identity
dpr_metric_fns = [metrics.accuracy]
# DPR without the initial dataset text token.
seqio.TaskRegistry.add(
f"{model_prefix}taskless_dpr_v001_simple{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="definite_pronoun_resolution:1.1.0"),
preprocessors=[
preprocessors.definite_pronoun_resolution_simple,
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=dpr_postprocess_fn,
metric_fns=dpr_metric_fns,
output_features=feats,
)
seqio.TaskRegistry.add(
f"{model_prefix}task_index_dpr_v001_simple{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="definite_pronoun_resolution:1.1.0"),
preprocessors=[
preprocessors.definite_pronoun_resolution_simple,
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=dpr_postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# ========== WSC ==========
# This adds a "simplified" version of WSC like they do in t5. Instead of
# predicting if the supplied referent matches the highlighted pronoun in the
# text, the model generate a referent. If the referent matches the supplied
# one then the model predictions True, otherwise it will predict false. This
# means that we can only train on examples where the referent is correct.
# T5 does WSC in two different tasks. The first is a training task that only
# uses examples where the referent is true. We never do any evaluation on
# this dataset so the training data doesn't need anything like post
# processors or metric_fns. The second task is the evaluation task. This
# considers all examples and does use the output functions. These tasks are
# then combined into a mixture.
# Looking at positive and negative examples of WSC can be hard. If the label
# is 1 then the target referent should match the models predicted referent.
# If they match this examples was correct, if they don't the model was
# wrong. If the label is 0, then the target referent is not correct and we
# hope the model output something different.
if log_examples:
postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples,
postprocessors.wsc_simple)
metric_fns = [
functools.partial(pt_metrics.metric_with_examples, metrics.accuracy),
functools.partial(pt_metrics.text_examples, task_name="wsc")
]
else:
postprocess_fn = postprocessors.wsc_simple
metric_fns = [metrics.accuracy]
if log_examples:
# This version outputs examples to tensorboard.
seqio.TaskRegistry.add(
f"{model_prefix}super_glue_wsc_v102_simple_eval{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=("validation", "test")),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
# This mixture is WSC where predictions are output to tensorboard.
seqio.MixtureRegistry.add(
f"{model_prefix}super_glue_wsc_and_dev_v102_simple{examples_suffix}",
[
# We don't need a special version of the training data because it
# is never processed for output anyway.
f"{model_prefix}super_glue_wsc_v102_simple_train",
f"{model_prefix}super_glue_wsc_v102_simple_eval{examples_suffix}"
],
default_rate=1.0)
# This version remove the initial dataset text token.
seqio.TaskRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2", splits=("train",)),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=True),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[],
output_features=feats)
seqio.TaskRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=["validation", "test"]),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
seqio.MixtureRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_and_dev_v102_simple"
f"{examples_suffix}"),
[
# We don't need a special version of the training data because it is
# never processed for output anyway.
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}")
],
default_rate=1.0)
# This version adds a task index as the first token.
seqio.TaskRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2", splits=("train",)),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=True),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[],
output_features=feats)
seqio.TaskRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=["validation", "test"]),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
seqio.MixtureRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_and_dev_v102_simple"
f"{examples_suffix}"),
[(f"{model_prefix}task_index_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}")],
default_rate=1.0)
# =========== Mixtures ==========
# These are Mixtures of the task index tasks to train on all super glue tasks
# at once.
# This is a copy of the super glue weights from t5 but adapted to use the task
# index version of the datasets.
WEIGHT_MAPPING = {
"task_index_super_glue_wsc_v102_simple_train": 259.,
"task_index_super_glue_wsc_v102_simple_eval_examples": 0.,
"task_index_super_glue_boolq_v102_examples": 9_427.,
"task_index_super_glue_cb_v102_examples": 250.,
"task_index_super_glue_copa_v102_examples": 400.,
"task_index_super_glue_multirc_v102_examples": 27_243.,
"task_index_super_glue_record_v102_examples": 138_854.,
"task_index_super_glue_rte_v102_examples": 2_490.,
"task_index_super_glue_wic_v102_examples": 5_428.,
}
WEIGHT_MAPPING_WITH_DPR = {
"task_index_dpr_v001_simple_examples": 1_322.,
"task_index_super_glue_wsc_v102_simple_train": 259.,
"task_index_super_glue_wsc_v102_simple_eval_examples": 0.,
"task_index_super_glue_boolq_v102_examples": 9_427.,
"task_index_super_glue_cb_v102_examples": 250.,
"task_index_super_glue_copa_v102_examples": 400.,
"task_index_super_glue_multirc_v102_examples": 27_243.,
"task_index_super_glue_record_v102_examples": 138_854.,
"task_index_super_glue_rte_v102_examples": 2_490.,
"task_index_super_glue_wic_v102_examples": 5_428.,
}
seqio.MixtureRegistry.add("task_index_super_glue_v102_examples_proportional",
list(WEIGHT_MAPPING.items()))
seqio.MixtureRegistry.add(
"task_index_super_glue_with_dpr_v102_examples_proportional",
list(WEIGHT_MAPPING_WITH_DPR.items()))
| 42.77451
| 80
| 0.67276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,377
| 0.422702
|
05e5bab9ff77cdee550c0152d15077d78e190eff
| 952
|
py
|
Python
|
src/runtime/tasks.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
src/runtime/tasks.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
src/runtime/tasks.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
from django_rq.decorators import job
from src.core.core import runtime_calculate
from src.jobs.models import JobStatuses
from src.jobs.ws_publisher import publish
from src.logs.models import Log
from src.utils.file_service import get_log
@job("default", timeout='1h')
def runtime_task(job, model):
print("Start runtime task ID {}".format(job.pk))
try:
job.status = JobStatuses.RUNNING.value
job.save()
log = Log.objects.get(pk=job.config['log_id'])
run_log = get_log(log.path)
result_data = runtime_calculate(run_log, model.to_dict())
result = result_data['prediction']
job.result = result
job.status = JobStatuses.COMPLETED.value
job.error = ''
except Exception as e:
print("error " + str(e.__repr__()))
job.status = JobStatuses.ERROR.value
job.error = str(e.__repr__())
raise e
finally:
job.save()
publish(job)
| 30.709677
| 65
| 0.657563
| 0
| 0
| 0
| 0
| 710
| 0.745798
| 0
| 0
| 69
| 0.072479
|
05e6f09ddfc0212cb3f08469b5c83b81051137ad
| 99
|
py
|
Python
|
django_models_from_csv/__init__.py
|
themarshallproject/django-collaborative
|
1474b9737eaea35eb11b39380b35c2a801831d9c
|
[
"MIT"
] | 88
|
2019-05-17T19:52:44.000Z
|
2022-03-28T19:43:07.000Z
|
django_models_from_csv/__init__.py
|
themarshallproject/django-collaborative
|
1474b9737eaea35eb11b39380b35c2a801831d9c
|
[
"MIT"
] | 65
|
2019-05-17T20:06:18.000Z
|
2021-01-13T03:51:07.000Z
|
django_models_from_csv/__init__.py
|
themarshallproject/django-collaborative
|
1474b9737eaea35eb11b39380b35c2a801831d9c
|
[
"MIT"
] | 15
|
2019-07-09T20:48:14.000Z
|
2021-07-24T20:45:55.000Z
|
default_app_config = 'django_models_from_csv.apps.DjangoDynamicModelsConfig'
__version__ = "1.1.0"
| 33
| 76
| 0.838384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.626263
|
05e70bf4fcafed340bac69f51837c437a43b38d8
| 93
|
py
|
Python
|
utensor_cgen/backend/utensor/code_generator/__init__.py
|
uTensor/utensor_cgen
|
eccd6859028d0b6a350dced25ea72ff02faaf9ad
|
[
"Apache-2.0"
] | 49
|
2018-01-06T12:57:56.000Z
|
2021-09-03T09:48:32.000Z
|
utensor_cgen/backend/utensor/code_generator/__init__.py
|
uTensor/utensor_cgen
|
eccd6859028d0b6a350dced25ea72ff02faaf9ad
|
[
"Apache-2.0"
] | 101
|
2018-01-16T19:24:21.000Z
|
2021-11-10T19:39:33.000Z
|
utensor_cgen/backend/utensor/code_generator/__init__.py
|
uTensor/utensor_cgen
|
eccd6859028d0b6a350dced25ea72ff02faaf9ad
|
[
"Apache-2.0"
] | 32
|
2018-02-15T19:39:50.000Z
|
2020-11-26T22:32:05.000Z
|
from .legacy import uTensorLegacyCodeGenerator
from .rearch import uTensorRearchCodeGenerator
| 46.5
| 46
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
05ec45e9e0486f8c0920e8e4a6acabaf4897caee
| 417
|
py
|
Python
|
ch3/ricolisp/token.py
|
unoti/rico-lisp
|
367f625dcd086e207515bdeb5561763754a3531c
|
[
"MIT"
] | null | null | null |
ch3/ricolisp/token.py
|
unoti/rico-lisp
|
367f625dcd086e207515bdeb5561763754a3531c
|
[
"MIT"
] | null | null | null |
ch3/ricolisp/token.py
|
unoti/rico-lisp
|
367f625dcd086e207515bdeb5561763754a3531c
|
[
"MIT"
] | null | null | null |
from collections import UserString
from typing import List
class Token(UserString):
"""A string that has additional information about the source code for the string."""
def __init__(self, s: str, line_number:int, character_number: int, filename: str = None):
super().__init__(s)
self.line_number = line_number
self.character_number = character_number
self.filename = filename
| 37.909091
| 93
| 0.717026
| 356
| 0.853717
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.201439
|
05ed3bd6a82da190685915c3b42fde3a3b5e118a
| 2,655
|
py
|
Python
|
utils.py
|
ali-ramadhan/wxConch
|
1106ce17d25f96a038ca784029261faafd7cfaf9
|
[
"MIT"
] | 1
|
2019-03-09T01:10:59.000Z
|
2019-03-09T01:10:59.000Z
|
utils.py
|
ali-ramadhan/weather-prediction-model-consensus
|
1106ce17d25f96a038ca784029261faafd7cfaf9
|
[
"MIT"
] | 1
|
2019-08-19T12:26:06.000Z
|
2019-08-19T12:26:06.000Z
|
utils.py
|
ali-ramadhan/weather-prediction-model-consensus
|
1106ce17d25f96a038ca784029261faafd7cfaf9
|
[
"MIT"
] | null | null | null |
import os
import time
import math
import logging.config
from datetime import datetime
from subprocess import run
from urllib.request import urlopen, urlretrieve
from urllib.parse import urlparse, urljoin
import smtplib, ssl
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from bs4 import BeautifulSoup
logging.config.fileConfig("logging.ini", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "wxConch (Python3.7) https://github.com/ali-ramadhan/wxConch",
"From": "alir@mit.edu"
}
def K2F(K):
return (K - 273.15) * (9/5) + 32
def download_file(url, local_filepath):
run(["wget", "-nc", url, "-O", local_filepath])
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html, features="lxml")
def download_images(url, filename=None):
soup = make_soup(url)
# Make a list of bs4 element tags.
images = [img for img in soup.findAll("img")]
logger.debug("{:s}: {:d} images found.".format(url, len(images)))
# Compile our unicode list of image links.
image_links = [img.get("src") for img in images]
for img_url in image_links:
if filename is None:
filename = img_url.split('/')[-1]
url_parts = urlparse(url)
real_img_url = url_parts.scheme + "://" + url_parts.netloc + img_url
logger.debug("Downloading image: {:s} -> {:s}".format(real_img_url, filename))
# urlretrieve(real_img_url, filename)
download_file(real_img_url, filename)
return image_links
def send_email(send_from, send_to, subject, text, files=None, gmail="wxconch.forecast@gmail.com"):
assert isinstance(send_to, list)
msg = MIMEMultipart()
msg["From"] = send_from
msg["To"] = COMMASPACE.join(send_to)
msg["Date"] = formatdate(localtime=True)
msg["Subject"] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(fil.read(), Name=basename(f))
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
port = 465 # For SSL
password = input("Gmail password for {:s}: ".format(gmail))
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(gmail, password)
server.sendmail(send_from, send_to, msg.as_string())
| 28.858696
| 98
| 0.680979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 529
| 0.199247
|
05ed9c8e8fd31a9e77da54a3f25437648359aef1
| 1,987
|
py
|
Python
|
aiida_fleur/cmdline/__init__.py
|
sphuber/aiida-fleur
|
df33e9a7b993a52c15a747a4ff23be3e19832b8d
|
[
"MIT"
] | null | null | null |
aiida_fleur/cmdline/__init__.py
|
sphuber/aiida-fleur
|
df33e9a7b993a52c15a747a4ff23be3e19832b8d
|
[
"MIT"
] | null | null | null |
aiida_fleur/cmdline/__init__.py
|
sphuber/aiida-fleur
|
df33e9a7b993a52c15a747a4ff23be3e19832b8d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
'''
Module for the command line interface of AiiDA-FLEUR
'''
import click
import click_completion
from aiida.cmdline.params import options, types
from .launch import cmd_launch
from .data import cmd_data
from .workflows import cmd_workflow
from .visualization import cmd_plot
# Activate the completion of parameter types provided by the click_completion package
# for bash: eval "$(_AIIDA_FLEUR_COMPLETE=source aiida-fleur)"
click_completion.init()
# Instead of using entrypoints and directly injecting verdi commands into aiida-core
# we created our own separete CLI because verdi will prob change and become
# less material science specific
@click.group('aiida-fleur', context_settings={'help_option_names': ['-h', '--help']})
@options.PROFILE(type=types.ProfileParamType(load_profile=True))
def cmd_root(profile): # pylint: disable=unused-argument
"""CLI for the `aiida-fleur` plugin."""
# To avoid circular imports all commands are not yet connected to the root
# but they have to be here because of bash completion
cmd_root.add_command(cmd_launch)
cmd_root.add_command(cmd_data)
cmd_root.add_command(cmd_workflow)
cmd_root.add_command(cmd_plot)
| 43.195652
| 85
| 0.622043
| 0
| 0
| 0
| 0
| 252
| 0.126761
| 0
| 0
| 1,455
| 0.731891
|
05efd08ce434309fea6a153caaf4f36da65f692b
| 243
|
py
|
Python
|
textract/parsers/doc_parser.py
|
Pandaaaa906/textract
|
cee75460d3d43f0aa6f4967c6ccf069ee79fc560
|
[
"MIT"
] | 1,950
|
2015-01-01T18:30:11.000Z
|
2022-03-30T21:06:41.000Z
|
textract/parsers/doc_parser.py
|
nike199000/textract
|
9d739f807351fd9e430a193eca853f5f2171a82a
|
[
"MIT"
] | 322
|
2015-01-05T09:54:45.000Z
|
2022-03-28T17:47:15.000Z
|
textract/parsers/doc_parser.py
|
nike199000/textract
|
9d739f807351fd9e430a193eca853f5f2171a82a
|
[
"MIT"
] | 470
|
2015-01-14T11:51:42.000Z
|
2022-03-23T07:05:46.000Z
|
from .utils import ShellParser
class Parser(ShellParser):
"""Extract text from doc files using antiword.
"""
def extract(self, filename, **kwargs):
stdout, stderr = self.run(['antiword', filename])
return stdout
| 22.090909
| 57
| 0.654321
| 209
| 0.860082
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.263374
|
05f2bf19df0a5655faf30da01ad995b33a5ff920
| 4,674
|
py
|
Python
|
create_multi_langs/command_line.py
|
mychiux413/ConstConv
|
6c2190d1bb37ae5cfef8464f88371db97719b032
|
[
"MIT"
] | null | null | null |
create_multi_langs/command_line.py
|
mychiux413/ConstConv
|
6c2190d1bb37ae5cfef8464f88371db97719b032
|
[
"MIT"
] | null | null | null |
create_multi_langs/command_line.py
|
mychiux413/ConstConv
|
6c2190d1bb37ae5cfef8464f88371db97719b032
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
from create_multi_langs.creater.go import CreaterGo
from create_multi_langs.creater.python import CreaterPython
from create_multi_langs.creater.python_typing import CreaterPythonTyping
from create_multi_langs.creater.typescript_backend import CreaterTypeScriptBackEnd # noqa: E501
from create_multi_langs.creater.typescript_frontend import CreaterTypeScriptFrontEnd # noqa: E501
from create_multi_langs.creater.javascript_backend import CreaterJavaScriptBackEnd # noqa: E501
from create_multi_langs.creater.javascript_frontend import CreaterJavaScriptFrontEnd # noqa: E501
import argparse
import time
import os
import sys
from functools import partial
VALID_EXTS = ['.py', '.go', '.ts', '.js', '.mjs']
def main():
parser = argparse.ArgumentParser(
description='Running DeepSpeech inference.')
parser.add_argument(dest='from_csv',
type=str, help='Generate script from csv')
parser.add_argument(dest='to_file',
type=str,
help='generate file path, support ext: .go .py .js .ts .mjs') # noqa: E501
parser.add_argument('--backend', '-b', action='store_true',
help='Default is generate frontend script for js/ts')
parser.add_argument('--py_typing', '-t', action='store_true',
help='Default is generate python script without typing') # noqa: E501
parser.add_argument('--watch', '-w', action='store_true',
help='Watch csv file changed')
parser.add_argument('--sep', '-s', default=',', type=str,
help='CSV seperated punctuation')
naming_help = """specify your property style,
[valid options]
`ucc`(UpperCamelCase),
`lcc`(lowerCamelCase),
`upper`(ALL_UPERCASE_UNDERSCORE),
`lower`(all_lowercase_underscore)
[default setting]
Go: `ucc`,
Python: `lower`,
Typescript: `lcc`,
javascript: `lcc`
"""
parser.add_argument('--naming_rule', '-n', type=str,
help=naming_help)
args = parser.parse_args()
args.from_csv = os.path.abspath(args.from_csv)
args.to_file = os.path.abspath(args.to_file)
assert os.path.exists(args.from_csv), \
"The csv file `{}` doesn't exists".format(args.from_csv)
assert os.path.splitext(args.to_file)[1] in VALID_EXTS, \
"The extension filename must be in " + str(VALID_EXTS)
if os.path.exists(args.to_file):
print('[WARNING] the to_file `{}` already exists'.format(
args.to_file) +
', and will be overwritten.')
if args.watch:
try:
print('[Enable Watching Mode]')
print('[From CSV File] {}'.format(args.from_csv))
print('[To File] {}'.format(args.to_file))
last_mtime = os.stat(args.from_csv).st_mtime
while True:
time.sleep(0.5)
current_mtime = os.stat(args.from_csv).st_mtime
if current_mtime != last_mtime:
print('Detect csv file changed...')
_generate(args)
last_mtime = current_mtime
except KeyboardInterrupt:
print('Stop watching')
sys.exit(0)
if os.path.exists(args.to_file):
yes_no = input('Overwrite (y/n)?').lower()
if yes_no != "y":
print('Abort program')
sys.exit(0)
_generate(args)
def _generate(args: argparse.Namespace):
to_file = args.to_file
if to_file.endswith('.go'):
from_csv_file = CreaterGo.from_csv_file
elif to_file.endswith('.py'):
if args.py_typing:
from_csv_file = CreaterPythonTyping.from_csv_file
else:
from_csv_file = CreaterPython.from_csv_file
elif to_file.endswith('.ts'):
if args.backend:
from_csv_file = CreaterTypeScriptBackEnd.from_csv_file
else:
from_csv_file = CreaterTypeScriptFrontEnd.from_csv_file
elif to_file.endswith(('.js', '.mjs')):
if args.backend:
from_csv_file = CreaterJavaScriptBackEnd.from_csv_file
else:
from_csv_file = CreaterJavaScriptFrontEnd.from_csv_file
else:
raise argparse.ArgumentError(
"must set to_file as .go .py .ts .js or .mjs, but got {}".format(
to_file
))
if args.naming_rule:
from_csv_file = partial(from_csv_file, naming_rule=args.naming_rule)
creater = from_csv_file(
args.from_csv,
to_file,
sep=args.sep)
creater()
if __name__ == "__main__":
main()
| 37.095238
| 99
| 0.627942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,176
| 0.251605
|
05f359b7dd7f8c17e74d1e4576ab789a5ca9047c
| 297
|
py
|
Python
|
test_resources/run_tests.py
|
tud-python-courses/lesson-builder
|
11b1cc958723e9f75de27cde68daa0fdc18b929f
|
[
"MIT"
] | null | null | null |
test_resources/run_tests.py
|
tud-python-courses/lesson-builder
|
11b1cc958723e9f75de27cde68daa0fdc18b929f
|
[
"MIT"
] | null | null | null |
test_resources/run_tests.py
|
tud-python-courses/lesson-builder
|
11b1cc958723e9f75de27cde68daa0fdc18b929f
|
[
"MIT"
] | null | null | null |
__author__ = 'Justus Adam'
__version__ = '0.1'
def main():
import unittest
import sys
import os
m = os.path.dirname(__file__)
sys.path = [m, os.path.split(m)[0]] + sys.path
import test
unittest.main(test)
if __name__ == '__main__':
main()
else:
del main
| 13.5
| 50
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.094276
|
05f89c6e9f8cabc37acf4ef72901aa6289131ace
| 15,798
|
py
|
Python
|
parse_to_latex.py
|
bkolosk1/bkolosk1-CrossLingualKeywords
|
27cdc5075d1e30b02bb38891933a8fbb51957899
|
[
"MIT"
] | 2
|
2021-04-19T23:57:58.000Z
|
2021-11-02T08:40:16.000Z
|
parse_to_latex.py
|
bkolosk1/bkolosk1-CrossLingualKeywords
|
27cdc5075d1e30b02bb38891933a8fbb51957899
|
[
"MIT"
] | 1
|
2021-11-22T09:05:10.000Z
|
2021-11-22T09:05:10.000Z
|
bert/parse_to_latex.py
|
bkolosk1/Extending-Neural-Keyword-Extraction-with-TF-IDF-tagset-matching
|
d52b9b9e1fb9130239479b1830b0930161672325
|
[
"MIT"
] | null | null | null |
import re
def parse_to_latex():
configs = ['nolm', 'lm', 'maskedlm', 'lm+bp', 'lm+pos', 'lm+rnn', 'lm+bpe+rnn', 'lm+bpe+crf']
datasets = ['kp20k', 'inspec', 'krapivin', 'nus', 'semeval', 'kptimes', 'jptimes', 'duc']
config_dict = {}
with open('class_results-FINAL.txt', 'r', encoding='utf8') as file:
for line in file.readlines():
if line.startswith('Classification'):
config = line.split('_')[-3]
print(config)
if line.startswith('Dataset:'):
dataset = line.split()[-1]
print(dataset)
if line.startswith('Precision') and not line.startswith('Precision@M:') and not line.startswith(('Precision@k')):
measure = line.split()[-2][:-1]
score = line.split()[-1]
print(measure, score)
if config not in config_dict:
config_dict[config] = {}
config_dict[config][dataset] = [(measure, score)]
else:
if dataset in config_dict[config]:
config_dict[config][dataset].append((measure, score))
else:
config_dict[config][dataset] = [(measure, score)]
lines = []
average5 = []
average10 = []
for config in configs:
sum5 = 0
sum10 = 0
column = []
for dataset in datasets:
column.append(dataset)
for e in config_dict[config][dataset]:
column.append((e[0], e[1]))
if e[0].endswith('10'):
sum10 += float(e[1])
if e[0].endswith('5'):
sum5 += float(e[1])
sum10 = sum10/len(datasets)
sum5 = sum5/len(datasets)
average5.append(sum5)
average10.append(sum10)
lines.append(column)
print(lines)
print("& " + " & ".join(configs) + '\\\\\\hline')
for i in range(len(lines[0])):
if i % 3 == 0:
dataset = lines[0][i]
#print(dataset)
print('& \\multicolumn{8}{c}{\\textbf{' + dataset + '}}\\\\\\hline')
else:
#print(lines[0])
line = lines[0][i][0] + " & " + " & ".join([x[i][1] for x in lines]) + '\\\\'
print(line)
print('& \\multicolumn{7}{c}{\\textbf{Average}}\\\\\\hline')
print("F@5 & " + " & ".join(["{:.4f}".format(x) for x in average5]) + '\\\\')
print("F@10 & " + " & ".join(["{:.4f}".format(x) for x in average10]) + '\\\\')
#parse_to_latex()
def get_averages():
results ='''
& \multicolumn{9}{c}{\textbf{KP20k}} \\\hline
F1@5 & 0.072 & 0.181 & 0.141* & 0.177* & 0.046 & 0.005 & 0.317 & \textbf{0.348} & 0.252* & 0.339* & 0.342*\\
F1@10 & 0.094 & 0.151 & 0.146* & 0.160* & 0.044 & 0.005 & 0.273 & 0.298 & 0.256* & 0.342* & \textbf{0.346*}\\
\hline
& \multicolumn{9}{c}{\textbf{Inspec}} \\\hline
F1@5 & 0.160 & 0.286 & 0.204* & 0.101* & 0.022 & 0.035 & 0.244 & 0.276 & 0.293* & \textbf{0.467*} & 0.447*\\
F1@10 & 0.244 & 0.339 & 0.223* & 0.108* & 0.022 & 0.046 & 0.289 & 0.333 & 0.335* & \textbf{0.525*} & \textbf{0.525*}\\
\hline
& \multicolumn{9}{c}{\textbf{Krapivin}} \\\hline
F1@5 & 0.067 & 0.185 & 0.215* & 0.127* & 0.018 & 0.005 & 0.305 & \textbf{0.325} & 0.210* & 0.280* & 0.301*\\
F1@10 & 0.093 & 0.160 & 0.196* & 0.106* & 0.017 & 0.007 & 0.266 & 0.285 & 0.214* & 0.283* & \textbf{0.307*}\\
\hline
& \multicolumn{9}{c}{\textbf{NUS}} \\\hline
F1@5 & 0.112 & 0.230 & 0.159* & 0.224* & 0.073 & 0.004 & 0.376 & \textbf{0.374} & 0.274* & 0.311* & 0.350*\\
F1@10 & 0.140 & 0.216 & 0.196* & 0.193* & 0.071 & 0.006 & 0.352 & 0.366 & 0.305* & 0.332* & \textbf{0.369*}\\
\hline
& \multicolumn{9}{c}{\textbf{SemEval}} \\\hline
F1@5 & 0.088 & 0.217 & 0.151* & 0.167* & 0.068 & 0.011 & 0.318 & \textbf{0.327} & 0.261* & 0.214 & 0.291*\\
F1@10 & 0.147 & 0.226 & 0.212* & 0.159* & 0.065 & 0.014 & 0.318 & 0.352 & 0.295* & 0.232 & \textbf{0.355*}\\
\hline\hline
& \multicolumn{9}{c}{\textbf{KPTimes}} \\\hline
F1@5 & 0.179* & 0.022* & 0.105* & 0.168* & * & * & 0.406* & 0.424* & 0.353* & 0.439* & \textbf{0.469*}\\
F1@10 & 0.151* & 0.030* & 0.118* & 0.139* & * & * & 0.393 & 0.424* & 0.354* & 0.440* & \textbf{0.469*}\\\hline
& \multicolumn{9}{c}{\textbf{JPTimes}} \\\hline
F1@5 & 0.266* & 0.012* & 0.109* & 0.225* & * & * & 0.256* & 0.238* & 0.258* & \textbf{0.344*} & 0.337*\\
F1@10 & 0.229* & 0.026* & 0.135* & 0.185* & * & * & 0.246 & 0.238* & 0.267* & 0.346* & \textbf{0.360*}\\\hline
& \multicolumn{9}{c}{\textbf{DUC}} \\\hline
F1@5 & 0.098* & 0.120* & 0.106* & 0.189* & * & * & 0.083 & 0.063* & 0.247* & 0.281* & \textbf{0.312*}\\
F1@10 & 0.120* & 0.181* & 0.132* & 0.172* & * & * & 0.105 & 0.063* & 0.277* & 0.321* & \textbf{0.355*}\\\hline
'''
f5s = [[], [], [], [], [], [], [], [], [], [], []]
f10s = [[], [], [], [], [], [], [], [], [], [], []]
for line in results.split('\n'):
line = line.strip()
print(line)
if line.startswith('F1@5'):
line = line.split('&')
line = line[1:]
for idx, score in enumerate(line):
score = score.strip()
score = re.findall(r'\d+', score)
if len(score) > 0:
f5s[idx].append((float(".".join(score))))
else:
f5s[idx].append(0)
elif line.startswith('F1@10'):
line = line.split('&')
line = line[1:]
for idx, score in enumerate(line):
score = score.strip()
#print(score)
score = re.findall(r'\d+', score)
#print(score)
if len(score) > 0:
f10s[idx].append((float(".".join(score))))
else:
f10s[idx].append(0)
print(f5s)
print(f10s)
f5s = " & ".join(['F1@5'] + ["{:.4f}".format(sum(x)/len(x)) for x in f5s])
f10s = " & ".join(['F1@10'] + ["{:.4f}".format(sum(x)/len(x)) for x in f10s])
print(f5s)
print(f10s)
#get_averages()
def revert():
results = '''& TfIdf & TextRank & YAKE & RaKUn & Key2Vec & EmbedRank & KEA & Maui & SemiSupervised & CopyRNN & CatSeqD & CorrRNN & GPT-2 & \begin{tabular}[x]{@{}c@{}}GPT-2 + \\ BiLSTM-CRF\end{tabular} & TNT-KID \\\hline
& \multicolumn{9}{c}{\textbf{KP20k}} \\\hline
F1@5 & 0.072 & 0.181 & 0.141* & 0.177* & 0.080* & 0.135* & 0.046 & 0.005 & 0.308 & 0.317 & \textbf{0.348} & / & 0.252* & 0.343* & 0.338*\\
F1@10 & 0.094 & 0.151 & 0.146* & 0.160* & 0.090* & 0.134* & 0.044 & 0.005 & 0.245 & 0.273 & 0.298 & / & 0.256* & \textbf{0.347*} & 0.342*\\
\hline
& \multicolumn{9}{c}{\textbf{Inspec}} \\\hline
F1@5 & 0.160 & 0.286 & 0.204* & 0.101* & 0.121* & 0.345* & 0.022 & 0.035 & 0.326 & 0.244 & 0.276 & / & 0.293* & \textbf{0.468*} & 0.456*\\
F1@10 & 0.244 & 0.339 & 0.223* & 0.108* & 0.181* & 0.394* & 0.022 & 0.046 & 0.334 & 0.289 & 0.333 & / & 0.335* & \textbf{0.535*} & 0.534*\\
\hline
& \multicolumn{9}{c}{\textbf{Krapivin}} \\\hline
F1@5 & 0.067 & 0.185 & 0.215* & 0.127* & 0.068* & 0.149* & 0.018 & 0.005 & 0.296 & 0.305 & \textbf{0.325} & 0.318 & 0.210* & 0.302* & 0.313*\\
F1@10 & 0.093 & 0.160 & 0.196* & 0.106* & 0.082* & 0.158* & 0.017 & 0.007 & 0.240 & 0.266 & 0.285 & 0.278 & 0.214* & 0.302* & \textbf{0.318*}\\
\hline
& \multicolumn{9}{c}{\textbf{NUS}} \\\hline
F1@5 & 0.112 & 0.230 & 0.159* & 0.224* & 0.109* & 0.173* & 0.073 & 0.004 & 0.356 & 0.376 & \textbf{0.374} & 0.361 & 0.274* & 0.315* & 0.345*\\
F1@10 & 0.140 & 0.216 & 0.196* & 0.193* & 0.121* & 0.190* & 0.071 & 0.006 & 0.320 & 0.352 & 0.366 & 0.335 & 0.305* & 0.333* & \textbf{0.357*}\\
\hline
& \multicolumn{9}{c}{\textbf{SemEval}} \\\hline
F1@5 & 0.088 & 0.217 & 0.151* & 0.167* & 0.081* & 0.189* & 0.068 & 0.011 & 0.322 & 0.318 & \textbf{0.327} & 0.320 & 0.261* & 0.262 & 0.294*\\
F1@10 & 0.147 & 0.226 & 0.212* & 0.159* & 0.126* & 0.217* & 0.065 & 0.014 & 0.294 & 0.318 & \textbf{0.352} & 0.320 & 0.295* & 0.273 & 0.334*\\
\hline\hline
& \multicolumn{9}{c}{\textbf{KPTimes}} \\\hline
F1@5 & 0.179* & 0.022* & 0.105* & 0.168* & 0.126* & 0.063* & * & * & / & 0.406* & 0.424* & / & 0.353* & \textbf{0.497*} & \textbf0.488*\\
F1@10 & 0.151* & 0.030* & 0.118* & 0.139* & 0.116* & 0.057* & * & * & / & 0.393 & 0.424* & / & 0.354* & \textbf{0.497*} & 0.486*\\\hline
& \multicolumn{9}{c}{\textbf{JPTimes}} \\\hline
F1@5 & 0.266* & 0.012* & 0.109* & 0.225* & 0.158* & 0.081* & * & * & / & 0.256* & 0.238* & / & 0.258* & 0.375* & \textbf{0.385*}\\
F1@10 & 0.229* & 0.026* & 0.135* & 0.185* & 0.145* & 0.074* & * & * & / & 0.246 & 0.238* & / & 0.267* & 0.380* & \textbf{0.385*}\\\hline
& \multicolumn{9}{c}{\textbf{DUC}} \\\hline
F1@5 & 0.098* & 0.120* & 0.106* & 0.189* & 0.062* & 0.219* & * & * & / & 0.083 & 0.063* & / & 0.247* & \textbf{0.334*} & 0.310*\\
F1@10 & 0.120* & 0.181* & 0.132* & 0.172* & 0.078* & 0.246* & * & * & / & 0.105 & 0.063* & / & 0.277* & 0.369* & \textbf{0.372*}\\\hline
'''
re.sub("[^0-9]", "", "sdkjh987978asd098as0980a98sd")
alg2idx = {}
datasets = {}
order = []
for line in results.split('\n'):
line = line.strip()
if line.startswith('& TfIdf'):
algs = line.replace('\\hline' ,'').replace('\\', '').split('&')[1:]
for idx, alg in enumerate(algs):
alg2idx[alg] = idx
elif line.startswith('& \multicolumn'):
dataset = line.replace('\\', '').replace('& multicolumn{9}{c}{', '').replace('}} hline', '').replace('extbf{', '').strip()
order.append(dataset)
if dataset not in datasets:
datasets[dataset] = [[],[]]
elif line.startswith('F1@5'):
line = line.replace('\\hline' ,'').replace('\\', '')
line = line.split('&')[1:]
#print(line)
for score in line:
datasets[dataset][0].append(score)
elif line.startswith('F1@10'):
line = line.replace('\\hline', '').replace('\\', '')
line = line.split('&')[1:]
#print(line)
for score in line:
datasets[dataset][1].append(score)
print(" & " + " & ".join(order) + ' \\\\\\hline')
for alg in algs:
line_f5 = ['F1@5']
line_f10 = ['F1@10']
for dataset in order:
f5_score = datasets[dataset][0][alg2idx[alg]]
line_f5.append(f5_score.replace('\t', '\\t'))
f10_score = datasets[dataset][1][alg2idx[alg]]
line_f10.append(f10_score.replace('\t', '\\t'))
print('& \\multicolumn{8}{c}{\\textbf{' + alg + '}} \\\\')
print(" & ".join(line_f5) + ' \\\\')
print(" & ".join(line_f10) + ' \\\\\\hline')
#revert()
def get_averages_reverted():
results ='''
& KP20k & Inspec & Krapivin & NUS & SemEval & KPTimes & JPTimes & DUC \\\hline
& \multicolumn{8}{c}{\textbf{ TfIdf }} \\
F1@5 & 0.072 & 0.160 & 0.067 & 0.112 & 0.088 & 0.179* & 0.266* & 0.098* \\
F1@10 & 0.094 & 0.244 & 0.093 & 0.140 & 0.147 & 0.151* & 0.229* & 0.120* \\\hline
& \multicolumn{8}{c}{\textbf{ TextRank }} \\
F1@5 & 0.181 & 0.286 & 0.185 & 0.230 & 0.217 & 0.022* & 0.012* & 0.120* \\
F1@10 & 0.151 & 0.339 & 0.160 & 0.216 & 0.226 & 0.030* & 0.026* & 0.181* \\\hline
& \multicolumn{8}{c}{\textbf{ YAKE }} \\
F1@5 & 0.141* & 0.204* & 0.215* & 0.159* & 0.151* & 0.105* & 0.109* & 0.106* \\
F1@10 & 0.146* & 0.223* & 0.196* & 0.196* & 0.212* & 0.118* & 0.135* & 0.132* \\\hline
& \multicolumn{8}{c}{\textbf{ RaKUn }} \\
F1@5 & 0.177* & 0.101* & 0.127* & 0.224* & 0.167* & 0.168* & 0.225* & 0.189* \\
F1@10 & 0.160* & 0.108* & 0.106* & 0.193* & 0.159* & 0.139* & 0.185* & 0.172* \\\hline
& \multicolumn{8}{c}{\textbf{ Key2Vec }} \\
F1@5 & 0.080* & 0.121* & 0.068* & 0.109* & 0.081* & 0.126* & 0.158* & 0.062* \\
F1@10 & 0.090* & 0.181* & 0.082* & 0.121* & 0.126* & 0.116* & 0.145* & 0.078* \\\hline
& \multicolumn{8}{c}{\textbf{ EmbedRank }} \\
F1@5 & 0.135* & 0.345* & 0.149* & 0.173* & 0.189* & 0.063* & 0.081* & 0.219* \\
F1@10 & 0.134* & 0.394* & 0.158* & 0.190* & 0.217* & 0.057* & 0.074* & 0.246* \\\hline
& \multicolumn{8}{c}{\textbf{ KEA }} \\
F1@5 & 0.046 & 0.022 & 0.018 & 0.073 & 0.068 & * & * & * \\
F1@10 & 0.044 & 0.022 & 0.017 & 0.071 & 0.065 & * & * & * \\\hline
& \multicolumn{8}{c}{\textbf{ Maui }} \\
F1@5 & 0.005 & 0.035 & 0.005 & 0.004 & 0.011 & * & * & * \\
F1@10 & 0.005 & 0.046 & 0.007 & 0.006 & 0.014 & * & * & * \\\hline
& \multicolumn{8}{c}{\textbf{ SemiSupervised }} \\
F1@5 & 0.308 & 0.326 & 0.296 & 0.356 & 0.322 & / & / & / \\
F1@10 & 0.245 & 0.334 & 0.240 & 0.320 & 0.294 & / & / & / \\\hline
& \multicolumn{8}{c}{\textbf{ CopyRNN }} \\
F1@5 & 0.317 & 0.244 & 0.305 & 0.376 & 0.318 & 0.406* & 0.256* & 0.083 \\
F1@10 & 0.273 & 0.289 & 0.266 & 0.352 & 0.318 & 0.393 & 0.246 & 0.105 \\\hline
& \multicolumn{8}{c}{\textbf{ CatSeqD }} \\
F1@5 & \textbf{0.348} & 0.276 & \textbf{0.325} & \textbf{0.374} & \textbf{0.327} & 0.424* & 0.238* & 0.063* \\
F1@10 & 0.298 & 0.333 & 0.285 & 0.366 & \textbf{0.352} & 0.424* & 0.238* & 0.063* \\\hline
& \multicolumn{8}{c}{\textbf{ CorrRNN }} \\
F1@5 & / & / & 0.318 & 0.361 & 0.320 & / & / & / \\
F1@10 & / & / & 0.278 & 0.335 & 0.320 & / & / & / \\\hline
& \multicolumn{8}{c}{\textbf{ GPT-2 }} \\
F1@5 & 0.252* & 0.293* & 0.210* & 0.274* & 0.261* & 0.353* & 0.258* & 0.247* \\
F1@10 & 0.256* & 0.335* & 0.214* & 0.305* & 0.295* & 0.354* & 0.267* & 0.277* \\\hline
& \multicolumn{8}{c}{\textbf{\begin{tabular}[x]{@{}c@{}}GPT-2 + BiLSTM-CRF\end{tabular} }} \\
F1@5 & 0.343* & \textbf{0.468*} & 0.302* & 0.315* & 0.262 & \textbf{0.497*} & 0.375* & \textbf{0.334*} \\
F1@10 & \textbf{0.347*} & \textbf{0.535*} & 0.302* & 0.333* & 0.273 & \textbf{0.497*} & 0.380* & 0.369* \\\hline
& \multicolumn{8}{c}{\textbf{ TNT-KID }} \\
F1@5 & 0.338* & 0.456* & 0.313* & 0.345* & 0.294* & \textbf0.488* & \textbf{0.385*} & 0.310* \\
F1@10 & 0.342* & 0.534* & \textbf{0.318*} & \textbf{0.357*} & 0.334* & 0.486* & \textbf{0.385*} & \textbf{0.372*} \\\hline
'''
for line in results.split('\n'):
line = line.strip()
if line.startswith('F1') :
scores = line.split('&')
scores = scores[1:]
clean = []
for idx, score in enumerate(scores):
score = score.strip()
score = re.findall(r'\d+', score)
if len(score) > 0:
clean.append((float(".".join(score))))
else:
clean.append(0)
avg = sum(clean)/len(clean)
line = line.replace('\\', '').replace('hline', '')
line = line + " & " + "{:.3f}".format(avg)
if line.startswith('F1@5'):
line += ' \\\\'
else:
line += ' \\\\\\hline'
print(line.replace('\t', '\\t'))
else:
print(line.replace('\t', '\\t'))
get_averages_reverted()
| 50.152381
| 223
| 0.44993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,211
| 0.646348
|
05fd8b2f68e0ad751b568376c91ded4488f3dd84
| 55,975
|
py
|
Python
|
cc_bm_parallel_pyr_dev.py
|
xdenisx/ice_drift_pc_ncc
|
f2992329e8509dafcd37596271e80cbf652d14cb
|
[
"MIT"
] | 3
|
2021-11-10T04:03:10.000Z
|
2022-02-27T10:36:02.000Z
|
cc_bm_parallel_pyr_dev.py
|
xdenisx/ice_drift_pc_ncc
|
f2992329e8509dafcd37596271e80cbf652d14cb
|
[
"MIT"
] | 1
|
2021-10-12T17:29:53.000Z
|
2021-10-12T17:29:53.000Z
|
cc_bm_parallel_pyr_dev.py
|
xdenisx/ice_drift_pc_ncc
|
f2992329e8509dafcd37596271e80cbf652d14cb
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
# coding: utf-8
#
# Ice drift retrieval algorithm based on [1] from a pair of SAR images
# [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and Magic.
#
##################################################
# Last modification: 22 July, 2019
# TODO:
# 1) Pyramidal strategy (do we need this?)
# 2) add ocean cm maps ('Balance' for divergence)
##################################################
import cv2
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import time
import multiprocessing
from skimage.feature import match_template
from skimage.transform import rescale, resize, downscale_local_mean
from skimage import io, img_as_ubyte
from skimage.morphology import disk
from skimage.filters.rank import median
from skimage.filters import laplace
from skimage import exposure
from skimage.filters.rank import gradient
from skimage import filters
from sklearn.neighbors import KDTree
import sys
import sklearn.neighbors
import re
import geojson
import shapefile as sf
import pyproj
from osgeo import gdal, osr
from datetime import datetime
from netCDF4 import Dataset
from osgeo import gdal, osr, gdal_array, ogr
import warnings
warnings.filterwarnings('ignore')
import matplotlib as mpl
import time
def remove_files(ddir):
ffiles = glob.glob('%s/*.*' % ddir)
for ifile in ffiles:
try:
os.remove(ifile)
except:
pass
def length_between(v1, v2):
v1_length = np.hypot(v1[0], v1[1])
v2_length = np.hypot(v2[0], v2[1])
return abs(v1_length - v2_length)
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
angle_between((1, 0, 0), (1, 0, 0))
0.0
angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return np.degrees(0.0)
else:
return np.degrees(np.pi)
return np.degrees(angle)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 6}
matplotlib.rc('font', **font)
def plot_peaks(immm1, immm2, uuu, vvv, iidx_line, iidx_row, resss, pref,
lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1):
plt.clf()
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2)
ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2)
ax1.imshow(immm1, cmap=plt.cm.gray)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(immm2, cmap=plt.cm.gray)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
rect = plt.Rectangle((uuu - Conf.grid_step, vvv - Conf.grid_step), Conf.block_size, Conf.block_size,
edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(resss)
ax3.set_axis_off()
ax3.set_title('match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(uuu, vvv, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
# !Plot control imformation
plt.title('ll1: %s rr1:%s ll2:%s rr2:%s\nu: %s v: %s Li0: %s Li1: %s' %
(lline_1, rrow_1, lline_2, rrow_2,
u_direct, v_direct, Li0, Li1))
# plt.show()
plt.savefig('peaks_plot/%s_%s_%s.png' % (pref, iidx_line, iidx_row), bbox_inches='tight', dpi=300)
# TODO: check
def check_borders(im):
''' n pixels along line means image has a black border '''
flag = 0
ch = 0
j = 0
for i in range(im.shape[0] - 1):
while j < im.shape[1] - 1 and im[i,j] > 0:
j += 1
else:
if j < im.shape[1] - 1 and (im[i,j] == 0 or im[i,j] == 255):
while im[i,j] == 0 and j < im.shape[1] - 1:
j += 1
ch += 1
if ch >= 15:
flag = 1
#print('Black stripe detected!')
return flag
j = 0
ch = 0
return flag
# Matching
def matching(templ, im):
''' Matching '''
# Direct macthing
#pool = Pool(processes=3)
#result = pool.apply(match_template, args=(im, templ, True, 'edge',))
#pool.close()
result = match_template(im, templ, True, 'edge',)
# Drihle statement
# No need if 'edge' in 'match_template'
#n = Conf.block_size #/ 2 # 100
n = int(im.shape[0]/10.)
# First and last n lines
result[0:n, :] = 0.
result[-n:, :] = 0.
# First and last n rows
result[:, 0:n] = 0.
result[:, -n:] = 0.
ij = np.unravel_index(np.argmax(result), result.shape)
u_peak, v_peak = ij[::-1]
#print('u_peak, v_peak: (%s, %s)' % (u_peak, v_peak))
return u_peak, v_peak, result
def filter_local_homogenity(arr_cc_max, y, x, u, v, filter_all=False):
'''
Local homogenity filtering (refine CC peak)
y - axe (top -> bottom)
x - axe (left -> right)
u - along Y (top -> bottom)
v - along X (left -> right)
mask - indicate that a vector has been reprocessed
'''
# Mask array with refined tie points
mask = np.zeros_like(arr_cc_max)
# TODO: processing of border vectors
for i in range(1, x.shape[0] - 1):
for j in range(1, x.shape[1] - 1):
# Calculate median of u and v for 8 neighbors
# Matrix with negbors
nn = np.zeros(shape=(2, 3, 3))
nn[:] = np.nan
# U and V
#if not np.isnan(u[i - 1, j - 1]):
nn[0, 0, 0] = u[i - 1, j - 1]
nn[0, 0, 1] = u[i - 1, j]
nn[0, 0, 2] = u[i - 1, j + 1]
nn[1, 0, 0] = v[i - 1, j - 1]
nn[1, 0, 1] = v[i - 1, j]
nn[1, 0, 2] = v[i - 1, j + 1]
nn[0, 1, 0] = u[i, j-1]
nn[0, 1, 2] = u[i, j+1]
nn[1, 1, 0] = v[i, j - 1]
nn[1, 1, 2] = v[i, j + 1]
nn[0, 2, 0] = u[i + 1, j - 1]
nn[0, 2, 1] = u[i + 1, j]
nn[0, 2, 2] = u[i + 1, j + 1]
nn[1, 2, 0] = v[i + 1, j - 1]
nn[1, 2, 1] = v[i + 1, j]
nn[1, 2, 2] = v[i + 1, j + 1]
# Check number of nans and find median for U and V
uu = nn[0, :, :]
# If number of neighbors <= 3
if len(uu[np.isnan(uu)]) > 5:
u[i, j] = np.nan
v[i, j] = np.nan
arr_cc_max[i, j] = 0
#print 'NANs > 3!'
else:
u_median = np.nanmedian(nn[0, :, :])
v_median = np.nanmedian(nn[1, :, :])
if not filter_all:
if np.isnan(u[i, j]) or abs(u[i, j] - u_median) > abs(u_median) or \
abs(v[i, j] - v_median) > abs(v_median):
u[i, j] = u_median
v[i, j] = v_median
mask[i, j] = 1
arr_cc_max[i, j] = 1
#print '%s %s %s %s' % (u[i, j], v[i, j], u_median, v_median)
else:
u[i, j] = u_median
v[i, j] = v_median
mask[i, j] = 1
arr_cc_max[i, j] = 1
return mask, y, x, u, v, arr_cc_max
def filter_Rmin(arr_cc_max):
''' Minimum correlation threshold filtering '''
# Remove and plot vectors with R < Rmin, where Rmin = Rmean - Rstd
R_mean = np.nanmean(arr_cc_max)
R_std = np.nanstd(arr_cc_max)
R_min = R_mean - R_std
mask = np.zeros_like(arr_cc_max)
mask[(arr_cc_max < R_min)] = 1
return mask
def plot_scatter(fname, img, x, y, msize=0.1):
''' Plot scatter of initial points '''
plt.clf()
plt.imshow(Conf.img1, cmap='gray')
plt.scatter(x, y, s=msize, color='red')
plt.savefig(fname, bbox_inches='tight', dpi=600)
def plot_arrows(fname, img, x, y, u, v, cc, arrwidth=0.005, headwidth=3.5, flag_color=True):
''' Plot arrows on top of image '''
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
plt.imshow(img, cmap='gray')
if flag_color:
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
else:
plt.quiver(x, y, u, v, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=600)
# Plot start points
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
plt.imshow(img, cmap='gray')
plt.scatter(x[~np.isnan(u)], y[~np.isnan(u)], s=Conf.grid_step/2., facecolors='yellow', edgecolors='black')
plt.savefig('%s/pts_%s' % (os.path.dirname(fname), os.path.basename(fname)), bbox_inches='tight', dpi=600)
# TODO!: remove
def plot_arrows_one_color(fname, img, x, y, u, v, cc, arrwidth=0.005, headwidth=3.5, flag_color=False):
''' Plot arrows on top of image '''
plt.clf()
plt.imshow(img, cmap='gray')
if flag_color:
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
else:
plt.quiver(x, y, u, v, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=1200)
def crop_images(img1, img2, y0, x0):
'''
:param Conf.img1: image1
:param Conf.img2: image2
:param x0: center of patch on image2
:param y0: center of patch on image2
:return: image patches
'''
# TODO: x2, y2 for Conf.img2
height, width = img1.shape
# Crop Conf.img1
iidx_line = int(x0)
iidx_row = int(y0)
LLt0 = np.max([0, iidx_line - Conf.grid_step])
LLt1 = np.max([0, iidx_row - Conf.grid_step])
RRt0 = np.min([iidx_line + Conf.grid_step, height])
RRt1 = np.min([iidx_row + Conf.grid_step, width])
# Crop patch from Conf.img1
im1 = Conf.img1[LLt0:RRt0, LLt1:RRt1]
LLi0 = np.max([0, iidx_line - Conf.block_size * Conf.search_area])
LLi1 = np.max([0, iidx_row - Conf.block_size * Conf.search_area])
RRi0 = np.min([iidx_line + Conf.block_size * Conf.search_area, height])
RRi1 = np.min([iidx_row + Conf.block_size * Conf.search_area, width])
# Crop search area from Conf.img2
im2 = Conf.img2[LLi0:RRi0, LLi1:RRi1]
# Offset for image1
y_offset_Conf.img1 = iidx_line # - Conf.block_size/2
x_offset_Conf.img1 = iidx_row # - Conf.block_size/2
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im2 = median(im2, disk(Conf.median_kernel))
im1 = median(im1, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im2 = laplace(im2)
im1 = laplace(im1)
if Conf.img_gradient_filtering:
im2 = gradient(im2, disk(3))
im1 = gradient(im1, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im2 = filters.scharr(im2)
im1 = filters.scharr(im1)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
return im1, im2
# TODO: EXPERIMENTAL
def cc_bm(arguments):
# BM test flag
f=0
# Parse arguments
iidx_line, iidx_row, LLi0, LLi1, im1_name, im2_name, pref, lll_line_start, lll_row_start = arguments
if iidx_line is not None:
# Open two images
im1 = io.imread(im1_name, 0)
im2 = io.imread(im2_name, 0)
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im1 = median(im1, disk(Conf.median_kernel))
im2 = median(im2, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im1 = laplace(im1)
im2 = laplace(im2)
if Conf.img_gradient_filtering:
im1 = gradient(im1, disk(3))
im2 = gradient(im2, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im1 = filters.scharr(im1)
im2 = filters.scharr(im2)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
# No black borders in the first image
if flag1 == 0 and flag2 == 0:
u_direct, v_direct, result = matching(im1, im2)
# Peak maximum CC
cc_max = np.max(result)
# Get coordinates with offsets
lline_2, rrow_2 = u_direct + LLi0, v_direct + LLi1
lline_2_test, rrow_2_test = v_direct + LLi0, u_direct + LLi1
lline_1, rrow_1 = iidx_line, iidx_row
# If obtained end of bm vectors compared to start points of direct
if abs(lline_2_test - lll_line_start) < Conf.bm_th and abs(rrow_2_test - lll_row_start) < Conf.bm_th:
#print('\nlline_2_test, lll_line_start: (%s, %s)' % (lline_2_test, lll_line_start))
#print('rrow_2_test, lll_row_start: (%s, %s)\n' % (rrow_2_test, lll_row_start))
#print('\nCOORDS: %s %s' % (arr_lines_1[i, j], arr_rows_1[i, j]))
#print('COORDS: %s %s\n' % (arr_lines_2[i, j], arr_rows_2[i, j]))
# Peaks plot
if Conf.plot_correlation_peaks:
plot_peaks(im1, im2, u_direct, v_direct, iidx_line, iidx_row, result, pref)
#plot_peaks(im1_bm, im2_bm, uu_bm, vv_bm, iidx_line, iidx_row,
# result_bm, 'bm')
return lline_1, rrow_1, lline_2-lline_1, rrow_2-rrow_1, cc_max
#return lline_2, rrow_2, lline_1 - lline_2, rrow_1 - rrow_2, cc_max
else:
pass
else:
# if crop images have black stripes
if flag1 == 1:
print('IMG_1: %s_%s' % (iidx_line, iidx_row))
io.imsave('ci_%s_1/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im1)
if flag2 == 1:
print('IMG_2: %s_%s' % (idx_line, idx_row))
io.imsave('ci_%s_2/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im2)
def filter_BM(th = 10):
''' Back matching test '''
Conf.bm_th = th # pixels
u_back = arr_rows_2_bm - arr_rows_1_bm
u_direct = arr_rows_2 - arr_rows_1
v_back = arr_lines_2_bm - arr_lines_1_bm
v_direct = arr_lines_2 - arr_lines_1
u_dif = u_direct - u_back * (-1)
v_dif = v_direct - v_back * (-1)
#arr_rows_1, arr_lines_1, arr_rows_2, arr_lines_2, arr_cc_max
#arr_rows_1_bm, arr_lines_1_bm, arr_rows_2_bm, arr_lines_2_bm, arr_cc_max_bm
mask = np.zeros_like(arr_cc_max)
mask[:,:] = 1
mask[((abs(u_dif) < Conf.bm_th) & (abs(v_dif) < Conf.bm_th))] = 0
#mask[((abs(arr_lines_1 - arr_lines_2_bm) > Conf.bm_th) | (abs(arr_rows_1 - arr_rows_2_bm) > Conf.bm_th))] = 1
return mask
def plot_arrows_from_list(pref, fname, img, ll_data, arrwidth=0.005, headwidth=3.5, flag_color=True):
''' Plot arrows on top of image form a list of data '''
plt.clf()
plt.imshow(img, cmap='gray')
# Get list without none and each elements
ll_data = [x for x in ll_data if x is not None]
yyy = [i[0] for i in ll_data]
xxx = [i[1] for i in ll_data]
uuu = [i[2] for i in ll_data]
vvv = [i[3] for i in ll_data]
ccc = [i[4] for i in ll_data]
if flag_color:
plt.quiver(xxx, yyy, uuu, vvv, ccc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
# Plot text with coordinates
for i in range(len(xxx)):
plt.text(xxx[i], yyy[i], r'(%s,%s)' % (yyy[i], xxx[i]), fontsize=0.07, color='yellow')
plt.text(xxx[i] + uuu[i], yyy[i] + vvv[i], r'(%s,%s)' % (yyy[i] + vvv[i], xxx[i] + uuu[i]),
fontsize=0.07, color='yellow') # bbox={'facecolor': 'yellow', 'alpha': 0.5}
else:
plt.quiver(xxx, yyy, uuu, vvv, ccc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=600)
# Filter outliers here and plot
plt.clf()
plt.imshow(img, cmap='gray')
def outliers_filtering(x1, y1, uu, vv, cc, radius=256, angle_difference=5, length_difference=30,
total_neighbours=7, angle_neighbours=7, length_neighbours=7):
# Get values of vector components
#uu = x2 - x1
#vv = y2 - y1
idx_mask = []
# Make 2D data of components
#data = np.vstack((uu, vv)).T
x1, y1, uu, vv, cc = np.array(x1), np.array(y1),\
np.array(uu, np.float), np.array(vv, np.float), np.array(cc, np.float)
# Radius based filtering
vector_start_data = np.vstack((x1, y1)).T
vector_start_tree = sklearn.neighbors.KDTree(vector_start_data)
for i in range(0, len(x1), 1):
# For list
# req_data = np.array([x1[i], y1[i]]).reshape(1, -1)
req_data = np.array((x1[i], y1[i])).reshape(1, -1)
# Getting number of neighbours
num_nn = vector_start_tree.query_radius(req_data, r=radius, count_only=True)
if num_nn[0] < total_neighbours:
idx_mask.append(i)
# Keep small vectors
if np.hypot(uu[i], vv[i]) < 10.:
pass
else:
nn = vector_start_tree.query_radius(req_data, r=radius)
data = np.vstack((uu[nn[0]], vv[nn[0]])).T
num_of_homo_NN = 0
num_of_length_homo_NN = 0
####################################################################
# Loop through all found ice drift vectors to filter not homo
####################################################################
for ii in range(num_nn[0]):
# Angle between "this" vector and others
angle_v1_v2 = angle_between([uu[i], vv[i]], [data[:, 0][ii], data[:, 1][ii]])
# Length between "this" vector and others
diff_v1_v2 = length_between([uu[i], vv[i]], [data[:, 0][ii], data[:, 1][ii]])
if angle_v1_v2 <= angle_difference:
num_of_homo_NN = num_of_homo_NN + 1
if diff_v1_v2 < length_difference:
num_of_length_homo_NN = num_of_length_homo_NN + 1
if not (num_of_homo_NN >= angle_neighbours and num_of_length_homo_NN >= length_neighbours):
idx_mask.append(i)
tt = list(set(idx_mask))
iidx_mask = np.array(tt)
# Delete bad data
'''
x1_f = np.delete(x1, iidx_mask)
y1_f = np.delete(y1, iidx_mask)
uu_f = np.delete(uu, iidx_mask)
vv_f = np.delete(vv, iidx_mask)
cc_f = np.delete(cc, iidx_mask)
'''
# Mask (=NaN) bad values
uu = np.array(uu, np.float)
vv = np.array(vv, np.float)
uu[iidx_mask] = np.nan
vv[iidx_mask] = np.nan
cc[iidx_mask] = 0.
return x1, y1, uu, vv, cc
def export_to_vector(gtiff, x1, y1, u, v, output_path, gridded=False, data_format='geojson'):
print('\nStart exporting to vector file...')
if data_format not in ['geojson', 'shp']:
print('Invalid format')
return
x2 = x1 + u
y2 = y1 + v
ds = gdal.Open(gtiff)
geotransform = ds.GetGeoTransform()
old_cs = osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjection())
new_cs = osr.SpatialReference()
new_cs.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
transform = osr.CoordinateTransformation(old_cs, new_cs)
if data_format == 'shp':
w = sf.Writer(sf.POLYLINE)
# w.field('id', 'C', '40')
w.field('lat1', 'C', '40')
w.field('lon1', 'C', '40')
w.field('lat2', 'C', '40')
w.field('lon2', 'C', '40')
w.field('drift_m', 'C', '40')
w.field('direction', 'C', '40')
if data_format == 'geojson':
features = []
pixelWidth = geotransform[1]
pixelHeight = geotransform[-1]
print('Pixel size (%s, %s) m' % (pixelWidth, pixelHeight))
for i in range(len(x1)):
# print '%s %s %s %s' % (y[ch], x[ch], u[ch], v[ch])
if np.isnan(x2[i]) == False and np.isnan(y2[i]) == False:
xx1 = geotransform[0] + float(x1[i]) * pixelWidth
yy1 = geotransform[3] + float(y1[i]) * pixelHeight
xx2 = geotransform[0] + float(x2[i]) * pixelWidth
yy2 = geotransform[3] + float(y2[i]) * pixelHeight
# print(xx1, yy1)
latlon = transform.TransformPoint(float(xx1), float(yy1))
lon1 = latlon[0]
lat1 = latlon[1]
latlon = transform.TransformPoint(float(xx2), float(yy2))
lon2 = latlon[0]
lat2 = latlon[1]
# Big circle length
try:
mag, az = calc_distance(float(lon1), float(lat1), float(lon2), float(lat2))
az = float(az)
if az <= 180.0:
az = az + 180.0
else:
az = az - 180.0
except:
mag, az = 999., 999.
if data_format == 'shp':
w.line(parts=[[[lon1, lat1], [lon2, lat2]]])
w.record(str(i), str(lat1), str(lon1), str(lat2), str(lon2), str(mag), str(az))
# coords_list.append((lon1, lat1))
if data_format == 'geojson':
new_line = geojson.Feature(geometry=geojson.LineString([(lon1, lat1), (lon2, lat2)]),
properties={'id': str(i),
'lat1': lat1,
'lon1': lon1,
'lat2': lat2,
'lon2': lon2,
'drift_m': mag,
'azimuth': az})
features.append(new_line)
if data_format == 'shp':
try:
w.save(output_path)
# create the PRJ file
prj = open('%s.prj' % output_path.split('.')[0], "w")
prj.write(
'''GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]''')
prj.close()
except:
print('Impossible to create shapefile, sorry.')
if data_format == 'geojson':
try:
collection = geojson.FeatureCollection(features=features)
output_geojson = open(output_path, 'w')
output_geojson.write(geojson.dumps(collection))
output_geojson.close()
except Exception:
print('Impossible to create geojson, sorry: %s' % str(Exception))
print('Geojson creation success!\n')
def calc_distance(lon1, lat1, lon2, lat2):
import pyproj
geod = pyproj.Geod(ellps="WGS84")
angle1, angle2, distance = geod.inv(lon1, lat1, lon2, lat2)
return '%0.2f' % distance, '%0.1f' % angle2
def median_filtering(x1, y1, uu, vv, cc, radius=512, total_neighbours=7):
'''
Median filtering of resultant ice vectors as a step before deformation calculation
'''
fast_ice_th = 5.
# Get values of vector components
#uu = x2 - x1
#vv = y2 - y1
idx_mask = []
# Make 2D data of components
#data = np.vstack((uu, vv)).T
x1, y1, uu, vv, cc = np.array(x1), np.array(y1), np.array(uu), np.array(vv), np.array(cc)
# Radius based filtering
vector_start_data = np.vstack((x1, y1)).T
vector_start_tree = sklearn.neighbors.KDTree(vector_start_data)
for i in range(0, len(x1), 1):
# If index of element in mask list form 'outliers_filtering' then replace with median
#if i in mask_proc:
# print('Replace with median!')
req_data = np.array([x1[i], y1[i]]).reshape(1, -1)
# Getting number of neighbours
num_nn = vector_start_tree.query_radius(req_data, r=radius, count_only=True)
# Check number of neighboors
'''
if num_nn[0] < total_neighbours:
idx_mask.append(i)
cc[i] = 0.
else:
'''
# Apply median filtering
nn = vector_start_tree.query_radius(req_data, r=radius)
data = np.vstack((uu[nn[0]], vv[nn[0]])).T
####################################################################
# Loop through all found ice drift vectors to filter not homo
####################################################################
for ii in range(num_nn[0]):
# Calculate median
#data[:, 0][ii], data[:, 1][ii]
# Replace raw with median
# If not fast ice (> 5 pixels)
if (np.hypot(uu[i], vv[i]) > fast_ice_th or np.isnan(uu[i]) or np.isnan(vv[i])):
u_median = np.nanmedian(data[:, 0][ii])
v_median = np.nanmedian(data[:, 1][ii])
#u_median = np.nanmean(data[:, 0][ii])
#v_median = np.nanmean(data[:, 1][ii])
uu[i], vv[i] = u_median, v_median
cc[i] = 0
#tt = list(set(idx_mask))
#iidx_mask = np.array(tt)
x1_f = np.array(x1)
y1_f = np.array(y1)
uu_f = np.array(uu)
vv_f = np.array(vv)
cc_f = np.array(cc)
return x1_f, y1_f, uu_f, vv_f, cc_f
def calc_deformations(dx, dy, normalization=False, normalization_time=None, cell_size=1.,
invert_meridional=True, out_png_name='test.png'):
'''
Calculate deformation invariants from X and Y ice drift components
dx, dy - x and y component of motion (pixels)
normalization - normalize to time (boolean)
normalization_time - normalization time (in seconds)
cell_size - ground meters in a pixel
invert_meridional - invert y component (boolean)
'''
# Cell size factor (in cm)
cell_size_cm = cell_size * 100.
cell_size_factor = 1 / cell_size_cm
m_div = np.empty((dx.shape[0], dx.shape[1],))
m_div[:] = np.NAN
m_curl = np.empty((dx.shape[0], dx.shape[1],))
m_curl[:] = np.NAN
m_shear = np.empty((dx.shape[0], dx.shape[1],))
m_shear[:] = np.NAN
m_tdef = np.empty((dx.shape[0], dx.shape[1],))
m_tdef[:] = np.NAN
# Invert meridional component
if invert_meridional:
dy = dy * (-1)
# Normilize u and v to 1 hour
if not normalization:
pass
else:
# Convert to ground distance (pixels*cell size(m) * 100.)
dx = dx * cell_size_cm # cm
dy = dy * cell_size_cm # cm
# Get U/V components of speed (cm/s)
dx = dx / normalization_time
dy = dy / normalization_time
# Calculate magnitude (speed module) (cm/s)
mag_speed = np.hypot(dx, dy)
# Print mean speed in cm/s
print('Mean speed: %s [cm/s]' % (np.nanmean(mag_speed)))
#cell_size_factor = 1 / cell_size
# Test
#plt.clf()
#plt.imshow(m_div)
for i in range(1, dx.shape[0] - 1):
for j in range(1, dx.shape[1] - 1):
# div
if (np.isnan(dx[i, j + 1]) == False and np.isnan(dx[i, j - 1]) == False
and np.isnan(dy[i - 1, j]) == False and np.isnan(dy[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# m_div[i,j] = 0.5*((u_int[i,j + 1] - u_int[i,j - 1]) + (v_int[i + 1,j] - v_int[i - 1,j]))/m_cell_size
# !Exclude cell size factor!
m_div[i, j] = cell_size_factor * 0.5 * ((dx[i, j + 1] - dx[i, j - 1])
+ (dy[i - 1, j] - dy[i + 1, j]))
# print m_div[i,j]
# Curl
if (np.isnan(dy[i, j + 1]) == False and np.isnan(dy[i, j - 1]) == False and
np.isnan(dx[i - 1, j]) == False and np.isnan(dx[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# !Exclude cell size factor!
m_curl[i, j] = cell_size_factor * 0.5 * (dy[i, j + 1] - dy[i, j - 1]
- dx[i - 1, j] + dx[i + 1, j]) / cell_size
# Shear
if (np.isnan(dy[i + 1, j]) == False and np.isnan(dy[i - 1, j]) == False and
np.isnan(dx[i, j - 1]) == False and np.isnan(dx[i, j + 1]) == False and
np.isnan(dy[i, j - 1]) == False and np.isnan(dy[i, j + 1]) == False and
np.isnan(dx[i + 1, j]) == False and np.isnan(dx[i - 1, j]) == False and
(np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
dc_dc = cell_size_factor * 0.5 * (dy[i + 1, j] - dy[i - 1, j])
dr_dr = cell_size_factor * 0.5 * (dx[i, j - 1] - dx[i, j + 1])
dc_dr = cell_size_factor * 0.5 * (dy[i, j - 1] - dy[i, j + 1])
dr_dc = cell_size_factor * 0.5 * (dx[i + 1, j] - dx[i - 1, j])
# !Exclude cell size factor!
m_shear[i, j] = np.sqrt(
(dc_dc - dr_dr) * (dc_dc - dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc)) / cell_size
'''
# Den
dc_dc = 0.5*(v_int[i + 1,j] - v_int[i - 1,j])
dr_dr = 0.5*(u_int[i,j + 1] - u_int[i,j - 1])
dc_dr = 0.5*(v_int[i,j + 1] - v_int[i,j - 1])
dr_dc = 0.5*(u_int[i + 1,j] - u_int[i - 1,j])
m_shear[i,j] = np.sqrt((dc_dc -dr_dr) * (dc_dc -dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc))/m_cell_size
'''
# Total deformation
if (np.isnan(m_shear[i, j]) == False and np.isnan(m_div[i, j]) == False):
m_tdef[i, j] = np.hypot(m_shear[i, j], m_div[i, j])
# Invert dy back
if invert_meridional:
dy = dy * (-1)
# data = np.vstack((np.ravel(xx_int), np.ravel(yy_int), np.ravel(m_div), np.ravel(u_int), np.ravel(v_int))).T
divergence = m_div
# TODO: Plot Test Div
plt.clf()
plt.gca().invert_yaxis()
plt.imshow(divergence, cmap='RdBu', vmin=-0.00008, vmax=0.00008,
interpolation='nearest', zorder=2) # vmin=-0.06, vmax=0.06,
# Plot u and v values inside cells (for testing porposes)
'''
font_size = .0000003
for ii in range(dx.shape[1]):
for jj in range(dx.shape[0]):
try:
if not np.isnan(divergence[ii,jj]):
if divergence[ii,jj] > 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '+', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] < 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] == 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '0', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if np.isnan(divergence[ii,jj]):
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
except:
pass
'''
# Plot drift arrows on the top
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
# Invert meridional component for plotting
ddy = dy * (-1)
#norm = Normalize()
colors = np.hypot(dx, ddy)
#print(colors)
#norm.autoscale(colors)
# we need to normalize our colors array to match it colormap domain
# which is [0, 1]
#colormap = cm.inferno
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
plt.quiver(xxx, yyy, dx, ddy, colors, cmap='Greys', zorder=3) #'YlOrBr')
# Invert Y axis
plt.savefig(out_png_name, bbox_inches='tight', dpi=800)
curl = m_curl
shear = m_shear
total_deform = m_tdef
# return mag in cm/s
return mag_speed, divergence, curl, shear, total_deform
# !TODO:
def make_nc(nc_fname, lons, lats, data):
"""
Make netcdf4 file for deformation (divergence, shear, total deformation), scaled 10^(-4)
"""
print('\nStart making nc for defo...')
ds = Dataset(nc_fname, 'w', format='NETCDF4_CLASSIC')
print(ds.file_format)
# Dimensions
y_dim = ds.createDimension('y', lons.shape[0])
x_dim = ds.createDimension('x', lons.shape[1])
time_dim = ds.createDimension('time', None)
#data_dim = ds.createDimension('data', len([k for k in data.keys()]))
# Variables
times = ds.createVariable('time', np.float64, ('time',))
latitudes = ds.createVariable('lat', np.float32, ('y', 'x',))
longitudes = ds.createVariable('lon', np.float32, ('y', 'x',))
for var_name in data.keys():
globals()[var_name] = ds.createVariable(var_name, np.float32, ('y', 'x',))
globals()[var_name][:, :] = data[var_name]['data']
globals()[var_name].units = data[var_name]['units']
globals()[var_name].scale_factor = data[var_name]['scale_factor']
# Global Attributes
ds.description = 'Sea ice deformation product'
ds.history = 'Created ' + time.ctime(time.time())
ds.source = 'NIERSC/NERSC'
# Variable Attributes
latitudes.units = 'degree_north'
longitudes.units = 'degree_east'
times.units = 'hours since 0001-01-01 00:00:00'
times.calendar = 'gregorian'
# Put variables
latitudes[:, :] = lats
longitudes[:, :] = lons
ds.close()
def _create_geotiff(suffix, Array, NDV, xsize, ysize, GeoT, Projection, deformation):
from osgeo import gdal_array
DataType = gdal_array.NumericTypeCodeToGDALTypeCode(Array.dtype)
if type(DataType) != np.int:
if DataType.startswith('gdal.GDT_') == False:
DataType = eval('gdal.GDT_' + DataType)
NewFileName = suffix + '.tif'
zsize = 1 #Array.shape[0]
driver = gdal.GetDriverByName('GTiff')
Array[np.isnan(Array)] = NDV
DataSet = driver.Create(NewFileName, xsize, ysize, zsize, DataType)
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection(Projection)#.ExportToWkt())
# for testing
# DataSet.SetProjection('PROJCS["NSIDC Sea Ice Polar Stereographic North",GEOGCS["Unspecified datum based upon the Hughes 1980 ellipsoid",DATUM["Not_specified_based_on_Hughes_1980_ellipsoid",SPHEROID["Hughes 1980",6378273,298.279411123061,AUTHORITY["EPSG","7058"]],AUTHORITY["EPSG","6054"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4054"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",70],PARAMETER["central_meridian",-45],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3411"]]')
#for i in xrange(0, zsize):
DataSet.GetRasterBand(1).WriteArray(deformation) # Array[i])
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
DataSet.FlushCache()
return NewFileName
def create_geotiff(suffix, data, NDV, GeoT, Projection):
''' Create geotiff file (1 band)'''
# Get GDAL data type
dataType = gdal_array.NumericTypeCodeToGDALTypeCode(data.dtype)
# NaNs to the no data value
data[np.isnan(data)] = NDV
if type(dataType) != np.int:
if dataType.startswith('gdal.GDT_') == False:
dataType = eval('gdal.GDT_' + dataType)
newFileName = suffix + '_test.tif'
cols = data.shape[1]
rows = data.shape[0]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newFileName, cols, rows, 1, dataType)
#outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outRaster.SetGeoTransform(GeoT)
outband = outRaster.GetRasterBand(1)
outband.WriteArray(data)
outRaster.SetProjection(Projection)
outband.SetNoDataValue(NDV)
outband.FlushCache()
return newFileName
def cc(arguments):
# BM test flag
f=0
# Parse arguments
#iidx_line, iidx_row, LLi0, LLi1, im1_name, im2_name, pref = arguments
iidx_line, iidx_row, Lt0, Rt0, Lt1, Rt1, Li0, Ri0, Li1, Ri1, pref, Conf.img1, Conf.img2, itr, itrCnt = arguments
#print("Processing block: {} from {} ({:.2f}%) at pid={}".format(itr, itrCnt, itr/itrCnt*100, multiprocessing.current_process()))
if iidx_line is not None:
# Open two images
im1 = Conf.img1[Lt0:Rt0, Lt1:Rt1]
im2 = Conf.img2[Li0:Ri0, Li1:Ri1]
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im1 = median(im1, disk(Conf.median_kernel))
im2 = median(im2, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im1 = laplace(im1)
im2 = laplace(im2)
if Conf.img_gradient_filtering:
im1 = gradient(im1, disk(3))
im2 = gradient(im2, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im1 = filters.scharr(im1)
im2 = filters.scharr(im2)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
# No black borders in the first image
if flag1 == 0: # and flag2 == 0:
u_direct, v_direct, result = matching(im1, im2)
# Peak maximum CC
cc_max = np.max(result)
# Get coordinates with offsets
lline_2, rrow_2 = v_direct + Li0, u_direct + Li1
lline_1, rrow_1 = iidx_line, iidx_row
#ff_out_txt.write('%s, %s, %s, %s, %s, %s, %s, %s' %
# (lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1))
print(lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1)
#print('\nCOORDS: %s %s' % (arr_lines_1[i, j], arr_rows_1[i, j]))
#print('COORDS: %s %s\n' % (arr_lines_2[i, j], arr_rows_2[i, j]))
# Peaks plot
if Conf.plot_correlation_peaks:
plot_peaks(im1, im2, u_direct, v_direct, iidx_line, iidx_row, result, pref,
lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1)
#plot_peaks(im1_bm, im2_bm, uu_bm, vv_bm, iidx_line, iidx_row,
# result_bm, 'bm')
# If all elements are equal
if np.unique(result).size == 1:
return np.nan, np.nan, np.nan, np.nan, np.nan
# If second peak close to first
flat = result.flatten()
flat.sort()
#print('#Flat: %s' % flat)
#if abs(flat[-1]-flat[-2]) < 0.05:
# return np.nan, np.nan, np.nan, np.nan, np.nan
ret = (lline_1, rrow_1, rrow_2-rrow_1, lline_2-lline_1, cc_max)
#return lline_1, rrow_1, u_direct, v_direct, cc_max
else:
#pass
# ! Testing (return result in any case)
ret = (np.nan, np.nan, np.nan, np.nan, np.nan)
'''
# if crop images have black stripes
if flag1 == 1:
print('IMG_1: %s_%s' % (iidx_line, iidx_row))
io.imsave('ci_%s_1/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im1)
if flag2 == 1:
print('IMG_2: %s_%s' % (idx_line, idx_row))
io.imsave('ci_%s_2/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im2)
'''
#print("Processed block: {} from {}".format(itr, itrCnt))
return ret
def apply_anisd(img, gamma=0.25, step=(1., 1.), ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Sep 2017 modified by Denis Demchev
"""
# init args
kappa = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['kappa']
niter = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['N']
option = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['equation']
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# niter
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
fig = pl.figure(figsize=(20, 5.5), num="Anisotropic diffusion")
ax1, ax2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)
ax1.imshow(img, interpolation='nearest')
ih = ax2.imshow(imgout, interpolation='nearest', animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS / kappa) ** 2.) / step[0]
gE = np.exp(-(deltaE / kappa) ** 2.) / step[1]
elif option == 2:
gS = 1. / (1. + (deltaS / kappa) ** 2.) / step[0]
gE = 1. / (1. + (deltaE / kappa) ** 2.) / step[1]
# update matrices
E = gE * deltaE
S = gS * deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma * (NS + EW)
if ploton:
iterstring = "Iteration %i" % (ii + 1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return cv2.convertScaleAbs(imgout)
#################################################################################
#################################################################################
#################################################################################
# MAIN PROGRAM
#################################################################################
#################################################################################
#################################################################################
# run cc_bm_parallel_dev.py ./data/test_kara_01.tif ./data/test_kara_02.tif 64 4 100
import cc_config
import cc_calc_drift
import cc_calc_drift_filter
import cc_calc_defo
#VAS
if __name__ == '__main__':
multiprocessing.freeze_support()
# check command line args
assert (len(sys.argv) == 6), "Expecting 5 arguments: filename1 filename2 block_size search_area grid_step"
# init config class
Conf = cc_config.Config()
Conf.init(f1_name=sys.argv[1], f2_name=sys.argv[2],
block_size=int(sys.argv[3]), search_area=int(sys.argv[4]), grid_step=int(sys.argv[5]))
Conf.self_prepare()
global_start_time = time.time()
# Downscale
if Conf.rescale_apply:
print('Rescaling...')
Conf.img1 = rescale(Conf.img1, 1.0 / Conf.rescale_factor)
Conf.img2 = rescale(Conf.img2, 1.0 / Conf.rescale_factor)
print('Done!')
# Image intensity normalization
if Conf.image_intensity_byte_normalization:
print('\nImage intensity rescaling (0, 255)...')
#Conf.img1 = exposure.adjust_log(Conf.img1)
#Conf.img2 = exposure.adjust_log(Conf.img2)
# Rescale intensity only
Conf.img1 = exposure.rescale_intensity(Conf.img1, out_range=(0, 255))
Conf.img2 = exposure.rescale_intensity(Conf.img2, out_range=(0, 255))
p2, p98 = np.percentile(Conf.img1, (2, 98))
Conf.img1 = img_as_ubyte(exposure.rescale_intensity(Conf.img1, in_range=(p2, p98)))
p2, p98 = np.percentile(Conf.img2, (2, 98))
Conf.img2 = img_as_ubyte(exposure.rescale_intensity(Conf.img2, in_range=(p2, p98)))
print('Done!')
# Normalization
#print('\n### Laplacian! ###\n')
#Conf.img1 = cv2.Laplacian(Conf.img1, cv2.CV_64F, ksize=19)
#Conf.img2 = cv2.Laplacian(Conf.img2, cv2.CV_64F, ksize=19)
# Speckle filtering
if Conf.speckle_filtering:
assert (Conf.speckle_filtering and (Conf.speckle_filter_name in Conf.speckle_filter_name)), \
'%s error: appropriate processor is not found' % Conf.speckle_filter_name
print('\nSpeckle filtering with %s\n' % Conf.speckle_filter_name)
if Conf.speckle_filter_name == 'Anisd':
Conf.img1 = apply_anisd(Conf.img1, gamma=0.25, step=(1., 1.), ploton=False)
Conf.img2 = apply_anisd(Conf.img2, gamma=0.25, step=(1., 1.), ploton=False)
#####################
### Calculate Drift ###
#####################
print('\nStart multiprocessing...')
nb_cpus = 10
height, width = Conf.img1.shape
print('Image size Height: %s px Width: %s px' % (height, width))
# init drift calculator class
Calc = cc_calc_drift.CalcDrift(Conf, Conf.img1, Conf.img2)
Calc.create_arguments(height, width)
# arg generator
argGen = ((i) for i in range(Calc.Count))
pool = multiprocessing.Pool(processes=nb_cpus)
# calculate
results = pool.map(Calc.calculate_drift, argGen)
pool.close()
pool.join()
print('Done!')
exec_t = (time.time() - global_start_time) / 60.
print('Calculated in--- %.1f minutes ---' % exec_t)
pref = 'dm'
'''
print('\nPlotting...')
try:
plot_arrows_from_list(pref, '%s/%s_%s_01.png' % (Conf.res_dir, pref, Conf.out_fname),
Conf.img1, results, arrwidth=0.0021, headwidth=2.5, flag_color=True)
plot_arrows_from_list(pref, '%s/%s_%s_02.png' % (Conf.res_dir, pref, Conf.out_fname),
Conf.img2, results, arrwidth=0.0021, headwidth=2.5, flag_color=True)
print('Plot end!')
except:
print('Plot FAULT!')
'''
#####################
#### Filter vectors ####
#####################
print('\nStart outliers filtering...')
# init result filtering class
Filter = cc_calc_drift_filter.CalcDriftFilter(Conf)
# filter
Cnt = Filter.filter_outliers(results)
# Filter land vectors
print('\nLand mask filtering...')
land_filtered_vectors = Filter.filter_land()
print('Done\n')
print('Done!')
print('\nNumber of vectors: \n Unfiltered: %d Filtered: %d\n' %
(Cnt[0], Cnt[1]))
print('\nPlotting...')
plot_arrows('%s/01_spikes_%s_%s.png' % (Conf.res_dir, pref, Conf.out_fname), Conf.img1, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f, Filter.ccc_f,
arrwidth=0.002, headwidth=5.5, flag_color=True)
plot_arrows('%s/02_spikes_%s_%s.png' % (Conf.res_dir, pref, Conf.out_fname), Conf.img2, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f, Filter.ccc_f,
arrwidth=0.002, headwidth=5.5, flag_color=True)
#####################
#### Defo calculate ####
#####################
print('\n### Start deformation calculation...')
# init defo calculator class
Defo = cc_calc_defo.CalcDefo(Conf, Calc, Filter)
# calculate deformation from the 2D arrays
mag_speed, divergence, curl, shear, total_deform = Defo.calculate_defo()
print('\n### Success!\n')
#########################
# EXPORT TO GEO-FORMATS
#########################
files_pref = '%spx' % Conf.grid_step
try:
os.makedirs('%s/vec' % Conf.res_dir)
except:
pass
try:
os.makedirs('%s/defo/nc' % Conf.res_dir)
except:
pass
# Vector
export_to_vector(Conf.f1_name, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f,
'%s/vec/%s_ICEDRIFT_%s.json' % (Conf.res_dir, files_pref, Conf.out_fname),
gridded=False, data_format='geojson')
################
# Geotiff
################
print('\nStart making geotiff..')
try:
os.makedirs('%s/defo/gtiff' % Conf.res_dir)
except:
pass
scale_factor = 1
divergence_gtiff = divergence * scale_factor
GeoT = (Calc.geotransform[0] - Conf.grid_step/2.*Calc.pixelHeight, Conf.grid_step*Calc.pixelWidth, 0.,
Calc.geotransform[3] + Conf.grid_step/2.*Calc.pixelHeight, 0., Conf.grid_step*Calc.pixelHeight)
NDV = np.nan
# Get projection WKT
gd_raster = gdal.Open(Conf.f1_name)
Projection = gd_raster.GetProjection()
#create_geotiff('%s/defo/gtiff/%s_ICEDIV_%s' % (Conf.res_dir, files_pref, Conf.out_fname),
# divergence_gtiff, NDV, u_2d.shape[0], u_2d.shape[1], GeoT, Projection, divergence_gtiff)
create_geotiff('%s/defo/gtiff/%s_ICEDIV_%s' % (Conf.res_dir, files_pref, Conf.out_fname), divergence_gtiff, NDV, GeoT, Projection)
#####################
# Shear
#####################
shear_gtiff = shear * scale_factor
GeoT = (Calc.geotransform[0] - Conf.grid_step / 2. * Calc.pixelHeight, Conf.grid_step * Calc.pixelWidth, 0.,
Calc.geotransform[3] + Conf.grid_step / 2. * Calc.pixelHeight, 0., Conf.grid_step * Calc.pixelHeight)
NDV = np.nan
# Get projection WKT
gd_raster = gdal.Open(Conf.f1_name)
Projection = gd_raster.GetProjection()
create_geotiff('%s/defo/gtiff/%s_ICESHEAR_%s' % (Conf.res_dir, files_pref, Conf.out_fname), shear_gtiff, NDV,
GeoT, Projection)
################
# END Geotiff
################
############
# Netcdf
############
dict_deformation = {'ice_speed': {'data': mag_speed, 'scale_factor': 1., 'units': 'cm/s'},
'ice_divergence': {'data': divergence, 'scale_factor': scale_factor, 'units': '1/h'},
'ice_curl': {'data': curl, 'scale_factor': scale_factor, 'units': '1/h'},
'ice_shear': {'data': shear, 'scale_factor': scale_factor, 'units': '1/h'},
'total_deformation': {'data': total_deform, 'scale_factor': scale_factor, 'units': '1/h'}}
print('\nStart making netCDF for ice deformation...\n')
make_nc('%s/defo/nc/%s_ICEDEF_%s.nc' % (Conf.res_dir, files_pref, Conf.out_fname),
Calc.lon_2d, Calc.lat_2d, dict_deformation)
print('Success!\n')
############
# END Netcdf
############
############################
# END EXPORT TO GEO-FORMATS
############################
# Calc_img_entropy
calc_img_entropy = False
#ent_spikes_dm_S1A_EW_GRDM_1SDH_20150114T133134_20150114T133234_004168_0050E3_8C66_HV_S1A_EW_GRDM_1SDH_20150115T025040_20150115T025140_004176_005114_5C27_HV
d1 = re.findall(r'\d\d\d\d\d\d\d\d\w\d\d\d\d\d\d', Conf.f1_name)[0]
d2 = re.findall(r'\d\d\d\d\d\d\d\d\w\d\d\d\d\d\d', Conf.f2_name)[0]
# Calculate entropy
if calc_img_entropy:
print('Calculate entropy')
plt.clf()
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
entr_Conf.img1 = entropy(Conf.img1, disk(16))
# xxx_f, yyy_f
ff = open('%s/entropy/ent_NCC_%s_%s.txt' % (Conf.res_dir, d1, d2), 'w')
for i in range(len(xxx_f)):
ff.write('%7d %7.2f\n' % (i+1, np.mean(entr_Conf.img1[yyy_f[i]-Conf.grid_step:yyy_f[i]+Conf.grid_step,
xxx_f[i]-Conf.grid_step:xxx_f[i]+Conf.grid_step])))
ff.close()
# TODO:
plt.imshow(entr_Conf.img1, cmap=plt.cm.get_cmap('hot', 10))
plt.colorbar()
plt.clim(0, 10);
plt.savefig('%s/entropy/img/ent_NCC_%s_%s.png' % (Conf.res_dir, d1, d2), bbox_inches='tight', dpi=300)
# END
| 35.517132
| 716
| 0.553318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20,725
| 0.370255
|
05fe79efe59900fb39e193105ec376940b5bbe44
| 426
|
py
|
Python
|
tests/test_version.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 708
|
2019-10-11T06:23:40.000Z
|
2022-03-31T09:39:08.000Z
|
tests/test_version.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 302
|
2019-11-11T22:09:21.000Z
|
2022-03-29T11:21:04.000Z
|
tests/test_version.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 122
|
2019-12-04T16:22:53.000Z
|
2022-03-20T09:31:10.000Z
|
from betterproto import __version__
from pathlib import Path
import tomlkit
PROJECT_TOML = Path(__file__).joinpath("..", "..", "pyproject.toml").resolve()
def test_version():
with PROJECT_TOML.open() as toml_file:
project_config = tomlkit.loads(toml_file.read())
assert (
__version__ == project_config["tool"]["poetry"]["version"]
), "Project version should match in package and package config"
| 30.428571
| 78
| 0.706573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.251174
|
af01a3ec2accdacee77c90151e5eed151050b732
| 383
|
py
|
Python
|
PythonMundoDois/ex048.py
|
HendrylNogueira/CursoPython3
|
c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f
|
[
"MIT"
] | null | null | null |
PythonMundoDois/ex048.py
|
HendrylNogueira/CursoPython3
|
c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f
|
[
"MIT"
] | null | null | null |
PythonMundoDois/ex048.py
|
HendrylNogueira/CursoPython3
|
c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f
|
[
"MIT"
] | null | null | null |
'''Faça um programa que calcule a soma entre todos os números impares que são múltiplos de três e que se encontram
no intervalo de 1 até 500. '''
cont = 0
total = 0
for soma in range(1, 501, 2):
if soma % 3 == 0:
cont += 1
total += soma
print(f'Foram encontrados {cont} valores coma as características especificadas.')
print(f'E a soma deles é igual a {total}')
| 31.916667
| 114
| 0.67624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 262
| 0.670077
|
af029a134b4e84a7dca43a17a1ce48c9d78abdd2
| 9,722
|
py
|
Python
|
Models.py
|
BradHend/machine_learning_from_scratch
|
6c83f17d1c48da9ad3df902b3090a8cb2c544f15
|
[
"MIT"
] | null | null | null |
Models.py
|
BradHend/machine_learning_from_scratch
|
6c83f17d1c48da9ad3df902b3090a8cb2c544f15
|
[
"MIT"
] | null | null | null |
Models.py
|
BradHend/machine_learning_from_scratch
|
6c83f17d1c48da9ad3df902b3090a8cb2c544f15
|
[
"MIT"
] | null | null | null |
"""classes and methods for different model architectures
"""
#python packages
import numpy as np
# Machine Learning from Scratch packages
from Layers import FullyConnected
from utils.optimizers import *
class NeuralNet():
"""
Linear stack of layers.
"""
def __init__(self, layers=None):
# Add any layers passed into constructor to the model
if layers:
for layer in layers:
self.layers.append(layer)
else:
self.layers = []
self.output = None
def add_layer(self, layer_type=None,
input_shape=None,
output_shape=None,
activation=None,
dropout=1.,
lambd=0,):
"""Adds a Layer class to model
"""
#only FullyConnected layer type supported right now
if layer_type=="FullyConnected":
layer = FullyConnected(input_shape=input_shape,
output_shape=output_shape,
activation=activation,
dropout=dropout,
lambd=lambd
)
#append layer to model Class
self.layers.append(layer)
def model_forward(self,X,training=False):
""" Perform forward evaluation of model on given data
Inputs:
X -- input data to be evaluated by model vector shape=(len(Wl_1), number of examples)
training -- training flag, no layer dropout if True
Outputs:
predictions -- model prediction(s) for given data
"""
layer_inputs = X
for layer in self.layers:
if training==False: #only use dropout when training
layer.dropout=1.
#loop over all layers, using the output of previous layer as input
layer.layer_forward(layer_inputs=layer_inputs)
#update "layer_inputs" for next iteration
layer_inputs = layer.outputs
#predictions will be layer.output of the last layer
predictions = layer_inputs
return predictions
def model_backprop(self,Y):
""" Perform back-prop. of prediction error through model
Inputs:
Y -- truth "label" vector shape=(n_y, number of examples)
Outputs:
None -- updates Layer properties
"""
# output_layer = self.layers[-1]
dZ = self.compute_loss_grad(Y)
#backprop output layer results through the network
for layer in reversed(self.layers):
#loop over all layers, using following layerdZ
layer.layer_backprop(dZ)
#update "dZ" for next iteration, set to current layer's Activation gradient
dZ = layer.dA
def compute_cost(self,predictions,Y):
""" compute "cost" for given predictions/truth
Inputs:
predictions -- model predictions vector shape=(n_y, number of examples)
Y -- truth "label" vector shape=(n_y, number of examples)
Outputs:
cost - gradient of output layer's activation
"""
m = Y.shape[1]
# Compute loss from predictions and y.
predictions = np.clip(predictions, 1e-13, 1 - 1e-13)
if self.loss == 'binary-crossentropy':
cost = np.multiply(-np.log(predictions),Y) + np.multiply(-np.log(1 - predictions), 1 - Y)
elif self.loss == 'categorical-crossentropy':
#Categorical Crossentropy
cost = np.sum(np.multiply(Y, -np.log(predictions)),axis=0,keepdims=False)
else:
return None
return cost
def compute_loss_grad(self,Y):
"""
Inputs:
Y -- truth "label" vector shape=(n_y, number of examples)
Outputs:
dZ - gradient of output layer's loss
"""
output_layer = self.layers[-1]
# outputs = output_layer.outputs
predictions = np.clip(output_layer.outputs, 1e-13, 1 - 1e-13)
if self.loss == 'binary-crossentropy':
#gradient of sigmoid (for now)
# print("outputs: ", output_layer.outputs)
# print(1 - output_layer.outputs)
dZ = - (np.divide(Y, predictions) - np.divide(1 - Y, 1 - predictions))
elif self.loss == 'categorical-crossentropy':
#gradient of softmax
dZ = predictions - Y
return dZ
def predict(self, X):
predictions = self.model_forward(X,training=False)
return predictions
def train(self, X, Y,
optimizer="gd",
loss=None,
learning_rate = 0.007,
mini_batch_size = [],
num_epochs = 100,
print_cost=True):
"""
Inputs:
X -- input data, of shape=(n_x, number of examples)
Y -- truth "label" vector shape=(n_y, number of examples)
loss -- loss function to use
optimizer -- optimizer to use to update trainable params.
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of each dataset mini batch
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
"""
self.loss = loss
if print_cost:
#print at every 1% of training completion, or at every epoch if num_epoch <= 100
print_interval = np.max([1,int(0.01*num_epochs)])
m = X.shape[1] # number of training examples
if not mini_batch_size:
mini_batch_size = m #make the mini-batch the entire dataset
costs = [] # to keep track of the cost
accuracy_lst = [] # keep track of acc. for multi-class problems
seed = 10
# Initialize layers (weights & bias vectors)
for layer in self.layers:
layer.initialize_layer()
if layer.dropout > 1.: #check that inputs make sense
layer.dropout = 1.
#if true, dropout was requested, override/ignore user's L2 reg. request (as of this commit)
if layer.dropout < 1.:
layer.lambd = 0
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization needed
elif optimizer == "momentum":
initialize_velocity(self.layers)
beta = 0.90
elif optimizer == "adam":
t = 0 #counter required for Adam update
#use values from the ADAM paper
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-7
learning_rate = 0.01
initialize_adam(self.layers)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches, change seed each time
seed = seed + 1
minibatches = make_sub_batches(X, Y, mini_batch_size, seed)
#init cost summation variable
cost_total = 0.
#init accuracy summation variable
training_correct = 0.
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward prop
predictions = self.model_forward(minibatch_X, training=True)
# Compute cost (for printing) and add to the running total
cost_total += np.nansum(self.compute_cost(predictions, minibatch_Y))
#compute train set acc. for multi-class class. problems
if (predictions.shape[0] > 1) | (self.loss == ('categorical-crossentropy')):
#compute number of examples correctly classified, assuming only one class can present right now
training_correct += np.sum(np.argmax(predictions,axis=0)==np.argmax(minibatch_Y,axis=0),keepdims=False)
# Backprop
self.model_backprop(Y=minibatch_Y)
# Update weights/bias
if optimizer == "gd":
update_layers_with_gradient_descent(self.layers, learning_rate)
elif optimizer == "momentum":
update_parameters_with_momentum(self.layers, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
update_parameters_with_adam(self.layers, t, learning_rate, beta1, beta2, epsilon)
#compute training stats. for this epoch
cost_avg = cost_total / m
if predictions.shape[0] > 1: #for multi-class class. problems show accuracy
accuracy_percent = 100.*(training_correct/m)
# Print the cost every epoch
# if print_cost and i % print_interval == 0:
if print_cost and i % 1 == 0:
if predictions.shape[0] > 1: #for multi-class class. problems show accuracy
print("Cost after epoch %i: %f, Acc.: %f" %(i, cost_avg, accuracy_percent))
accuracy_lst.append(accuracy_percent)
else:
print(("Cost after epoch %i: %f" %(i, cost_avg)))
costs.append(cost_avg)
#will need to implement better convergence detection..
if self.loss == ('categorical-crossentropy'):
pass
elif cost_avg < 0.17:
break
| 40.508333
| 123
| 0.54783
| 9,516
| 0.978811
| 0
| 0
| 0
| 0
| 0
| 0
| 3,923
| 0.403518
|
af03e1bca2e6bcaf4e2f161d2b4078d32b20e402
| 421
|
py
|
Python
|
tests/parser/aggregates.count.assignment.17.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.17.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.17.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
a(S,T,Z) :- #count{X: r(T,X)} = Z, #count{W: q(W,S)} = T, #count{K: p(K,Y)} = S.
q(1,1).
q(2,2).
r(1,1).
r(1,2).
r(1,3).
r(2,2).
r(3,3).
p(1,1).
p(2,2).
%out{ a(2,1,3) }
%repository error
"""
output = """
a(S,T,Z) :- #count{X: r(T,X)} = Z, #count{W: q(W,S)} = T, #count{K: p(K,Y)} = S.
q(1,1).
q(2,2).
r(1,1).
r(1,2).
r(1,3).
r(2,2).
r(3,3).
p(1,1).
p(2,2).
%out{ a(2,1,3) }
%repository error
"""
| 10.268293
| 80
| 0.420428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 402
| 0.954869
|
af055ba7a6d6cbe2445070c4e478e7e26c56dad3
| 1,724
|
py
|
Python
|
ipmi_power_manager.py
|
spirkaa/ansible-homelab
|
94138c85ddb132a08dab55b4e9a9b43160d02c76
|
[
"MIT"
] | null | null | null |
ipmi_power_manager.py
|
spirkaa/ansible-homelab
|
94138c85ddb132a08dab55b4e9a9b43160d02c76
|
[
"MIT"
] | null | null | null |
ipmi_power_manager.py
|
spirkaa/ansible-homelab
|
94138c85ddb132a08dab55b4e9a9b43160d02c76
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import requests
import urllib3
from dotenv import load_dotenv
logger = logging.getLogger("__name__")
logging.basicConfig(
format="%(asctime)s [%(levelname)8s] [%(name)s:%(lineno)s:%(funcName)20s()] --- %(message)s",
level=logging.INFO,
)
logging.getLogger("urllib3").setLevel(logging.WARNING)
urllib3.disable_warnings()
load_dotenv()
IPMI_USERNAME = os.getenv("IPMI_USERNAME")
IPMI_PASSWORD = os.getenv("IPMI_PASSWORD")
API_ROOT = "https://spmaxi-ipmi.home.devmem.ru/redfish/v1/"
API_AUTH = "SessionService/Sessions"
API_ACTIONS_RESET = "Systems/1/Actions/ComputerSystem.Reset"
POWER_STATE_ON = "On"
POWER_STATE_OFF = "GracefulShutdown"
parser = argparse.ArgumentParser(description="Supermicro IPMI Power Manager")
parser.add_argument("--on", dest="power_state", action="store_true")
parser.add_argument("--off", dest="power_state", action="store_false")
args = parser.parse_args()
if args.power_state:
power_state = POWER_STATE_ON
else:
power_state = POWER_STATE_OFF
def get_auth_headers():
logger.debug("Get session headers")
endpoint_url = API_ROOT + API_AUTH
payload = f'{{"UserName": "{IPMI_USERNAME}","Password": "{IPMI_PASSWORD}"}}'
headers = {"Content-Type": "application/json"}
r = requests.post(endpoint_url, headers=headers, data=payload, verify=False)
return r.headers
def set_power_state(value):
logger.debug("Set power state to '%s'", value)
endpoint_url = API_ROOT + API_ACTIONS_RESET
payload = f'{{"ResetType": "{value}"}}'
headers = get_auth_headers()
r = requests.post(endpoint_url, headers=headers, data=payload, verify=False)
print(r.json())
set_power_state(power_state)
| 28.262295
| 101
| 0.728538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 537
| 0.311485
|
af05ab26695bad32472af5a5dde8334bddbea53d
| 1,572
|
py
|
Python
|
pyhsi/gui/graphics.py
|
rddunphy/pyHSI
|
b55c2a49568e04e0a2fb39da01cfe1f129bc86a4
|
[
"MIT"
] | null | null | null |
pyhsi/gui/graphics.py
|
rddunphy/pyHSI
|
b55c2a49568e04e0a2fb39da01cfe1f129bc86a4
|
[
"MIT"
] | null | null | null |
pyhsi/gui/graphics.py
|
rddunphy/pyHSI
|
b55c2a49568e04e0a2fb39da01cfe1f129bc86a4
|
[
"MIT"
] | null | null | null |
"""Stuff to do with processing images and loading icons"""
import importlib.resources as res
import cv2
import PySimpleGUI as sg
def get_application_icon():
"""Get the PyHSI icon for this OS (.ico for Windows, .png otherwise)"""
return res.read_binary("pyhsi.gui.icons", "pyhsi.png")
def get_icon(icon_name, hidpi=False):
"""Return full path for icon with given name"""
size = 40 if hidpi else 25
return res.read_binary("pyhsi.gui.icons", f"{icon_name}{size}.png")
def get_icon_button(icon_name, hidpi=False, **kwargs):
"""Create a button with an icon as an image"""
mc = ("white", "#405e92")
icon = get_icon(icon_name, hidpi=hidpi)
return sg.Button("", image_data=icon, mouseover_colors=mc, **kwargs)
def set_button_icon(button, icon_name, hidpi=False, **kwargs):
"""Change image on button"""
icon = get_icon(icon_name, hidpi=hidpi)
button.update(image_data=icon, **kwargs)
def resize_img_to_area(img, size, preserve_aspect_ratio=True, interpolation=False):
"""Resize frame to fill available area in preview panel"""
max_w = max(size[0] - 20, 20)
max_h = max(size[1] - 20, 20)
if preserve_aspect_ratio:
old_h = img.shape[0]
old_w = img.shape[1]
new_w = round(min(max_w, old_w * max_h / old_h))
new_h = round(min(max_h, old_h * max_w / old_w))
else:
new_w = max_w
new_h = max_h
if interpolation:
interp = cv2.INTER_LINEAR
else:
interp = cv2.INTER_NEAREST
return cv2.resize(img, (new_w, new_h), interpolation=interp)
| 31.44
| 83
| 0.667939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 395
| 0.251272
|
af0729cb1679e26625740cd816c3bcd5296cbb19
| 315
|
py
|
Python
|
configs/densenet169_lr_0.001.py
|
FeiYuejiao/NLP_Pretrain
|
7aa4693c31a7bba9b90f401d2586ef154dd7fb81
|
[
"MIT"
] | null | null | null |
configs/densenet169_lr_0.001.py
|
FeiYuejiao/NLP_Pretrain
|
7aa4693c31a7bba9b90f401d2586ef154dd7fb81
|
[
"MIT"
] | 1
|
2020-12-30T13:49:29.000Z
|
2020-12-30T13:49:29.000Z
|
configs/densenet169_lr_0.001.py
|
FeiYuejiao/NLP_Pretrain
|
7aa4693c31a7bba9b90f401d2586ef154dd7fb81
|
[
"MIT"
] | null | null | null |
lr = 0.001
model_path = 'model/IC_models/densenet169_lr_0.001/'
crop_size = 32
log_step = 10
save_step = 500
num_epochs = 400
batch_size = 256
num_workers = 8
loading = False
# lr
# Model parameters
model = dict(
net='densenet169',
embed_size=256,
hidden_size=512,
num_layers=1,
resnet=101
)
| 14.318182
| 52
| 0.695238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.234921
|
af08ea1d739ab24c301e649fcfca7bffa176fb4c
| 3,750
|
py
|
Python
|
src/models/metapop.py
|
TLouf/multiling-twitter
|
9a39b5b70da53ca717cb74480697f3756a95b8e4
|
[
"RSA-MD"
] | 1
|
2021-05-09T15:42:04.000Z
|
2021-05-09T15:42:04.000Z
|
src/models/metapop.py
|
TLouf/multiling-twitter
|
9a39b5b70da53ca717cb74480697f3756a95b8e4
|
[
"RSA-MD"
] | 3
|
2020-10-21T09:04:03.000Z
|
2021-06-02T02:05:13.000Z
|
src/models/metapop.py
|
TLouf/multiling-twitter
|
9a39b5b70da53ca717cb74480697f3756a95b8e4
|
[
"RSA-MD"
] | null | null | null |
'''
Implements the computation of the time derivatives and associated Jacobian
corresponding to the approximated equations in a metapopulation. Added kwargs in
every function so that we may reuse the parameter dictionary used in the models,
even if some of the parameters it contains are not used in these functions.
'''
import numpy as np
def bi_model_system(N_L, N, nu, nu_T_N, a=1, s=0.5, rate=1, **kwargs):
'''
Computes the values of the time derivatives in every cell for the two
monolingual kinds, for Castello's model.
'''
N_A = N_L[:N.shape[0]]
N_B = N_L[N.shape[0]:]
# Every element of the line i of nu must be divided by the same value
# sigma[i], hence this trick with the two transpose.
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
N_A_eq = rate * (
s * (N - N_A - N_B) * np.dot(nu, (1 - nu_T_N_B / nu_T_N)**a)
- (1-s) * N_A * np.dot(nu, (nu_T_N_B / nu_T_N)**a))
N_B_eq = rate * (
(1-s) * (N - N_A - N_B) * np.dot(nu, (1 - nu_T_N_A / nu_T_N)**a)
- s * N_B * np.dot(nu, (nu_T_N_A / nu_T_N)**a))
return np.concatenate((N_A_eq, N_B_eq))
def bi_pref_system(N_L, N, nu, nu_T_N, mu=0.02, c=0.1, s=0.5, q=0.5, rate=1,
**kwargs):
'''
Computes the values of the time derivatives in every cell for the two
monolingual kinds, for our model.
'''
N_A = N_L[:N.shape[0]]
N_B = N_L[N.shape[0]:]
# Every element of the line i of nu must be divided by the same value
# sigma[i], hence this trick with the two transpose.
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
sum_nu_rows = np.sum(nu, axis=1)
nu_nu_T_N_L_term = np.dot(nu, ((1-q)*nu_T_N_A - q*nu_T_N_B) / nu_T_N)
N_A_eq = rate * (
mu*s * (N - N_A - N_B) * (q*sum_nu_rows + nu_nu_T_N_L_term)
- c*(1-mu)*(1-s) * N_A * ((1-q)*sum_nu_rows - nu_nu_T_N_L_term))
N_B_eq = rate * (
mu*(1-s) * (N - N_A - N_B) * ((1-q)*sum_nu_rows - nu_nu_T_N_L_term)
- c*(1-mu)*s * N_B * (q*sum_nu_rows + nu_nu_T_N_L_term))
return np.concatenate((N_A_eq, N_B_eq))
def bi_pref_jacobian(N_L, N, nu, nu_T_N, mu=0.02, c=0.1, s=0.5, q=0.5,
**kwargs):
'''
Computes the Jacobian of the system at a given point for our model.
'''
n_cells = N.shape[0]
N_A = N_L[:n_cells]
N_B = N_L[n_cells:]
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
nu_cols_prod = np.dot(nu / nu_T_N, nu.T)
nu_T_N_L_term = ((1-q)*nu_T_N_A - q*nu_T_N_B) / nu_T_N
sum_nu_rows = np.sum(nu, axis=1)
AA_block = ((mu*s*(1-q)*(N-N_A-N_B) + c*(1-mu)*(1-s)*(1-q)*N_A)
* nu_cols_prod.T).T
AA_block += np.eye(n_cells) * (
(-mu*s*q - c*(1-mu)*(1-s)*(1-q)) * sum_nu_rows
+ np.dot(
nu,
(c*(1-mu)*(1-s) - mu*s) * nu_T_N_L_term))
AB_block = ((-mu*s*q*(N-N_A-N_B) - c*(1-mu)*(1-s)*q*N_A)
* nu_cols_prod.T).T
AB_block += np.eye(n_cells) * (
-mu*s*q * sum_nu_rows
+ np.dot(
nu,
-mu*s * nu_T_N_L_term))
BA_block = (-(mu*(1-s)*(1-q)*(N-N_A-N_B) - c*(1-mu)*s*(1-q)*N_B)
* nu_cols_prod.T).T
BA_block += np.eye(n_cells) * (
-mu*(1-s)*(1-q) * sum_nu_rows
+ np.dot(
nu,
mu*(1-s) * nu_T_N_L_term))
BB_block = ((mu*(1-s)*q*(N-N_A-N_B) + c*(1-mu)*s*q*N_B)
* nu_cols_prod.T).T
BB_block += np.eye(n_cells) * (
(-mu*(1-s)*(1-q) - c*(1-mu)*s*q) * sum_nu_rows
+ np.dot(
nu,
(-c*(1-mu)*s + mu*(1-s)) * nu_T_N_L_term))
jacobian = np.block([[AA_block, AB_block],
[BA_block, BB_block]])
return jacobian
| 37.128713
| 80
| 0.553333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 898
| 0.239467
|
af0a0e2a3cb4cd7ca612fe33ee2283d0d807bbec
| 2,759
|
py
|
Python
|
abstract_tiles.py
|
CompassMentis/towers_of_strength
|
405af4dc114bd15fed24135b050267a2126c9d52
|
[
"MIT"
] | null | null | null |
abstract_tiles.py
|
CompassMentis/towers_of_strength
|
405af4dc114bd15fed24135b050267a2126c9d52
|
[
"MIT"
] | 1
|
2019-10-12T10:31:24.000Z
|
2019-10-12T10:31:24.000Z
|
abstract_tiles.py
|
CompassMentis/towers_of_strength
|
405af4dc114bd15fed24135b050267a2126c9d52
|
[
"MIT"
] | null | null | null |
import pygame
from settings import Settings
from vector import Vector
import utils
class AbstractTile:
pass
class AbstractStaticTile(AbstractTile):
IMAGE_FOLDER = 'static_tiles'
def __init__(self, code, filename, with_sparkle=False):
self.code = code
self.filename = filename
self.image = pygame.image.load(f'{Settings.image_folder}/{self.IMAGE_FOLDER}/{filename}.png')
if with_sparkle:
self.spark_image = pygame.image.load(f'{Settings.image_folder}/{self.IMAGE_FOLDER}/{filename}_sparkle.png')
self.offset = Vector(0, Settings.tile_height - self.image.get_rect().height)
@property
def code_type(self):
return self.code.split('_')[0]
@property
def direction(self):
return self.code.split('_')[-1]
@property
def is_path(self):
return self.code_type in ['CG', 'CS', 'C', 'F', 'S', 'L']
@property
def is_coins(self):
return self.code_type in ['CG', 'CS']
@property
def value(self):
if self.code_type == 'CG':
return Settings.gold_value
if self.code_type == 'CS':
return Settings.silver_value
return 0
def __repr__(self):
return f'<Tile>(code={self.code}, filename={self.IMAGE_FOLDER}/{self.filename})'
@property
def is_start(self):
return self.code_type == 'SP'
@property
def is_end(self):
return self.code_type == 'F'
@property
def is_empty(self):
return self.code_type == 'O'
class AbstractTowerTile:
def __init__(self, type):
self.type = type
self.cost = Settings.tower_costs[type]
self.images = {
f'{direction}': pygame.image.load(f'images/tower_tiles/{type}_{direction}.png')
for direction in 'NESW'
}
self.font = pygame.font.SysFont('Arial', 24)
self.cost_text_active = self.font.render(f'({self.cost})', True, pygame.Color('yellow'))
self.cost_text_inactive = self.font.render(f'({self.cost})', True, pygame.Color('grey'))
self.image_menu_active = pygame.image.load(f'images/tower_tiles/{type}_menu.png')
self.image_menu_inactive = pygame.image.load(f'images/tower_tiles/{type}_menu_grey.png')
self.image_heart = pygame.image.load(f'images/misc/heart_{type}.png')
self.menu_location = utils.cell_to_isometric(Settings.menu_tower_locations[type]) + \
Vector(0, Settings.tile_height - self.image_menu_active.get_rect().height)
self.cost_location = utils.cell_to_isometric(
Settings.menu_tower_locations[type] + Settings.tower_cost_offset
)
self.offset = Vector(0, Settings.tile_height - self.images['E'].get_rect().height)
| 29.98913
| 119
| 0.642987
| 2,666
| 0.966292
| 0
| 0
| 732
| 0.265314
| 0
| 0
| 499
| 0.180863
|
af0a3c55728ddc9080f992028fc9b392f3c49b8c
| 657
|
py
|
Python
|
code/examples/classifier_compression/save_weights_cifar10.py
|
he-actlab/waveq.code
|
024d55af6d989d4074d3e555d03b76a2f7eac209
|
[
"CNRI-Python"
] | 1
|
2020-04-09T03:21:32.000Z
|
2020-04-09T03:21:32.000Z
|
code/examples/classifier_compression/save_weights_cifar10.py
|
he-actlab/waveq.code
|
024d55af6d989d4074d3e555d03b76a2f7eac209
|
[
"CNRI-Python"
] | 4
|
2020-09-26T00:53:47.000Z
|
2022-02-10T01:23:34.000Z
|
code/examples/classifier_compression/save_weights_cifar10.py
|
sinreq-learn/sinreq-learn.code
|
a205d3fa22a41d5f4fc1ef1e5698c4f1dbb11e6a
|
[
"BSD-4-Clause-UC"
] | null | null | null |
import torch
import numpy as np
filename = '2020.01.12-044406'
model = torch.load('logs/'+filename+'/checkpoint.pth.tar')
k1 = model['state_dict']['module.conv1.weight'].data.cpu().numpy()
k2 = model['state_dict']['module.conv2.weight'].data.cpu().numpy()
k3 = model['state_dict']['module.fc1.weight'].data.cpu().numpy()
k4 = model['state_dict']['module.fc2.weight'].data.cpu().numpy()
k5 = model['state_dict']['module.fc3.weight'].data.cpu().numpy()
np.save('weights_sin2Reg/cifar10/l1',k1)
np.save('weights_sin2Reg/cifar10/l2',k2)
np.save('weights_sin2Reg/cifar10/l3',k3)
np.save('weights_sin2Reg/cifar10/l4',k4)
np.save('weights_sin2Reg/cifar10/l5',k5)
| 41.0625
| 66
| 0.721461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 346
| 0.526636
|
af0ab77a97059c19f88a0b36ce01422819f17356
| 2,174
|
py
|
Python
|
tests/app/dao/test_marketings_dao.py
|
kentsanggds/api
|
651cdf7d496690722d6a4f5b51f04f4be97899d4
|
[
"MIT"
] | 1
|
2018-10-12T15:04:31.000Z
|
2018-10-12T15:04:31.000Z
|
tests/app/dao/test_marketings_dao.py
|
kentsanggds/api
|
651cdf7d496690722d6a4f5b51f04f4be97899d4
|
[
"MIT"
] | 169
|
2017-11-07T00:45:25.000Z
|
2022-03-12T00:08:59.000Z
|
tests/app/dao/test_marketings_dao.py
|
kentsanggds/api
|
651cdf7d496690722d6a4f5b51f04f4be97899d4
|
[
"MIT"
] | 1
|
2019-08-15T14:51:31.000Z
|
2019-08-15T14:51:31.000Z
|
from sqlalchemy.exc import IntegrityError
import pytest
from app.dao.marketings_dao import (
dao_update_marketing, dao_get_marketing_by_id, dao_get_marketings
)
from app.models import Marketing
from tests.db import create_marketing
class WhenUsingMarketingsDAO(object):
def it_creates_an_marketing(self, db_session):
marketing = create_marketing()
assert Marketing.query.count() == 1
marketing_from_db = Marketing.query.filter(Marketing.id == marketing.id).first()
assert marketing == marketing_from_db
def it_updates_a_marketing_dao(self, db, db_session, sample_marketing):
dao_update_marketing(sample_marketing.id, description='New posters')
marketing_from_db = Marketing.query.filter(Marketing.id == sample_marketing.id).first()
assert marketing_from_db.description == 'New posters'
def it_gets_all_active_marketings(self, db, db_session, sample_marketing):
create_marketing(description='Email')
create_marketing(description='Old magazine', active=False)
fetched_marketings = dao_get_marketings()
assert len(fetched_marketings) == 2
def it_gets_an_marketing_by_id(self, db, db_session, sample_marketing):
marketing = create_marketing(description='Email')
fetched_marketing = dao_get_marketing_by_id(marketing.id)
assert fetched_marketing == marketing
def it_doesnt_create_marketings_with_same_description(self, db_session, sample_marketing):
with pytest.raises(expected_exception=IntegrityError):
create_marketing(description=sample_marketing.description)
marketings = Marketing.query.all()
assert len(marketings) == 1
def it_doesnt_update_marketingss_with_same_description(self, db_session, sample_marketing):
marketing = create_marketing(description='New posters')
with pytest.raises(expected_exception=IntegrityError):
dao_update_marketing(str(marketing.id), description=sample_marketing.description)
found_marketing = Marketing.query.filter(Marketing.id == marketing.id).one()
assert found_marketing.description == 'New posters'
| 38.821429
| 95
| 0.75069
| 1,933
| 0.889144
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.036799
|
af0ac97f6ae7709623b9997f5f301e7547049b9a
| 14,898
|
py
|
Python
|
tracetools_analysis/tracetools_analysis/data_model/ros2.py
|
christophebedard/tracetools_analysis
|
1dfb747b62311ee370ed392a0ad4a5cd2d11d3be
|
[
"Apache-2.0"
] | 6
|
2020-04-02T21:10:09.000Z
|
2021-06-07T06:56:16.000Z
|
tracetools_analysis/tracetools_analysis/data_model/ros2.py
|
christophebedard/tracetools_analysis
|
1dfb747b62311ee370ed392a0ad4a5cd2d11d3be
|
[
"Apache-2.0"
] | null | null | null |
tracetools_analysis/tracetools_analysis/data_model/ros2.py
|
christophebedard/tracetools_analysis
|
1dfb747b62311ee370ed392a0ad4a5cd2d11d3be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Robert Bosch GmbH
# Copyright 2020-2021 Christophe Bedard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
import numpy as np
import pandas as pd
from . import DataModel
from . import DataModelIntermediateStorage
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._rmw_publishers: DataModelIntermediateStorage = []
self._rcl_publishers: DataModelIntermediateStorage = []
self._rmw_subscriptions: DataModelIntermediateStorage = []
self._rcl_subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self._rclcpp_publish_instances: DataModelIntermediateStorage = []
self._rcl_publish_instances: DataModelIntermediateStorage = []
self._rmw_publish_instances: DataModelIntermediateStorage = []
self._rmw_take_instances: DataModelIntermediateStorage = []
self._rcl_take_instances: DataModelIntermediateStorage = []
self._rclcpp_take_instances: DataModelIntermediateStorage = []
self._callback_instances: DataModelIntermediateStorage = []
self._lifecycle_transitions: DataModelIntermediateStorage = []
def add_context(
self, context_handle, timestamp, pid, version
) -> None:
self._contexts.append({
'context_handle': context_handle,
'timestamp': timestamp,
'pid': pid,
'version': version,
})
def add_node(
self, node_handle, timestamp, tid, rmw_handle, name, namespace
) -> None:
self._nodes.append({
'node_handle': node_handle,
'timestamp': timestamp,
'tid': tid,
'rmw_handle': rmw_handle,
'name': name,
'namespace': namespace,
})
def add_rmw_publisher(
self, handle, timestamp, gid,
) -> None:
self._rmw_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_publisher(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_publish_instance(
self, timestamp, message,
) -> None:
self._rclcpp_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rcl_publish_instance(
self, publisher_handle, timestamp, message,
) -> None:
self._rcl_publish_instances.append({
'publisher_handle': publisher_handle,
'timestamp': timestamp,
'message': message,
})
def add_rmw_publish_instance(
self, timestamp, message,
) -> None:
self._rmw_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rmw_subscription(
self, handle, timestamp, gid
) -> None:
self._rmw_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
self._subscription_objects.append({
'subscription': subscription_pointer,
'timestamp': timestamp,
'subscription_handle': subscription_handle,
})
def add_service(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._services.append({
'service_handle': timestamp,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_client(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._clients.append({
'client_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_timer(
self, handle, timestamp, period, tid
) -> None:
self._timers.append({
'timer_handle': handle,
'timestamp': timestamp,
'period': period,
'tid': tid,
})
def add_timer_node_link(
self, handle, timestamp, node_handle
) -> None:
self._timer_node_links.append({
'timer_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
})
def add_callback_object(
self, reference, timestamp, callback_object
) -> None:
self._callback_objects.append({
'reference': reference,
'timestamp': timestamp,
'callback_object': callback_object,
})
def add_callback_symbol(
self, callback_object, timestamp, symbol
) -> None:
self._callback_symbols.append({
'callback_object': callback_object,
'timestamp': timestamp,
'symbol': symbol,
})
def add_callback_instance(
self, callback_object, timestamp, duration, intra_process
) -> None:
self._callback_instances.append({
'callback_object': callback_object,
'timestamp': np.datetime64(timestamp, 'ns'),
'duration': np.timedelta64(duration, 'ns'),
'intra_process': intra_process,
})
def add_rmw_take_instance(
self, subscription_handle, timestamp, message, source_timestamp, taken
) -> None:
self._rmw_take_instances.append({
'subscription_handle': subscription_handle,
'timestamp': timestamp,
'message': message,
'source_timestamp': source_timestamp,
'taken': taken,
})
def add_rcl_take_instance(
self, timestamp, message
) -> None:
self._rcl_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rclcpp_take_instance(
self, timestamp, message
) -> None:
self._rclcpp_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_lifecycle_state_machine(
self, handle, node_handle
) -> None:
self._lifecycle_state_machines.append({
'state_machine_handle': handle,
'node_handle': node_handle,
})
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
self._lifecycle_transitions.append({
'state_machine_handle': state_machine_handle,
'start_label': start_label,
'goal_label': goal_label,
'timestamp': timestamp,
})
def _finalize(self) -> None:
# Some of the lists of dicts might be empty, and setting
# the index for an empty dataframe leads to an error
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index('context_handle', inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index('node_handle', inplace=True, drop=True)
self.rmw_publishers = pd.DataFrame.from_dict(self._rmw_publishers)
if self._rmw_publishers:
self.rmw_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rcl_publishers = pd.DataFrame.from_dict(self._rcl_publishers)
if self._rcl_publishers:
self.rcl_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rmw_subscriptions = pd.DataFrame.from_dict(self._rmw_subscriptions)
if self._rmw_subscriptions:
self.rmw_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.rcl_subscriptions = pd.DataFrame.from_dict(self._rcl_subscriptions)
if self._rcl_subscriptions:
self.rcl_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.subscription_objects = pd.DataFrame.from_dict(self._subscription_objects)
if self._subscription_objects:
self.subscription_objects.set_index('subscription', inplace=True, drop=True)
self.services = pd.DataFrame.from_dict(self._services)
if self._services:
self.services.set_index('service_handle', inplace=True, drop=True)
self.clients = pd.DataFrame.from_dict(self._clients)
if self._clients:
self.clients.set_index('client_handle', inplace=True, drop=True)
self.timers = pd.DataFrame.from_dict(self._timers)
if self._timers:
self.timers.set_index('timer_handle', inplace=True, drop=True)
self.timer_node_links = pd.DataFrame.from_dict(self._timer_node_links)
if self._timer_node_links:
self.timer_node_links.set_index('timer_handle', inplace=True, drop=True)
self.callback_objects = pd.DataFrame.from_dict(self._callback_objects)
if self._callback_objects:
self.callback_objects.set_index('reference', inplace=True, drop=True)
self.callback_symbols = pd.DataFrame.from_dict(self._callback_symbols)
if self._callback_symbols:
self.callback_symbols.set_index('callback_object', inplace=True, drop=True)
self.lifecycle_state_machines = pd.DataFrame.from_dict(self._lifecycle_state_machines)
if self._lifecycle_state_machines:
self.lifecycle_state_machines.set_index(
'state_machine_handle', inplace=True, drop=True)
self.rclcpp_publish_instances = pd.DataFrame.from_dict(self._rclcpp_publish_instances)
self.rcl_publish_instances = pd.DataFrame.from_dict(self._rcl_publish_instances)
self.rmw_publish_instances = pd.DataFrame.from_dict(self._rmw_publish_instances)
self.rmw_take_instances = pd.DataFrame.from_dict(self._rmw_take_instances)
self.rcl_take_instances = pd.DataFrame.from_dict(self._rcl_take_instances)
self.rclcpp_take_instances = pd.DataFrame.from_dict(self._rclcpp_take_instances)
self.callback_instances = pd.DataFrame.from_dict(self._callback_instances)
self.lifecycle_transitions = pd.DataFrame.from_dict(self._lifecycle_transitions)
def print_data(self) -> None:
print('====================ROS 2 DATA MODEL===================')
print('Contexts:')
print(self.contexts.to_string())
print()
print('Nodes:')
print(self.nodes.to_string())
print()
print('Publishers (rmw):')
print(self.rmw_publishers.to_string())
print()
print('Publishers (rcl):')
print(self.rcl_publishers.to_string())
print()
print('Subscriptions (rmw):')
print(self.rmw_subscriptions.to_string())
print()
print('Subscriptions (rcl):')
print(self.rcl_subscriptions.to_string())
print()
print('Subscription objects:')
print(self.subscription_objects.to_string())
print()
print('Services:')
print(self.services.to_string())
print()
print('Clients:')
print(self.clients.to_string())
print()
print('Timers:')
print(self.timers.to_string())
print()
print('Timer-node links:')
print(self.timer_node_links.to_string())
print()
print('Callback objects:')
print(self.callback_objects.to_string())
print()
print('Callback symbols:')
print(self.callback_symbols.to_string())
print()
print('Callback instances:')
print(self.callback_instances.to_string())
print()
print('Publish instances (rclcpp):')
print(self.rclcpp_publish_instances.to_string())
print()
print('Publish instances (rcl):')
print(self.rcl_publish_instances.to_string())
print()
print('Publish instances (rmw):')
print(self.rmw_publish_instances.to_string())
print()
print('Take instances (rmw):')
print(self.rmw_take_instances.to_string())
print()
print('Take instances (rcl):')
print(self.rcl_take_instances.to_string())
print()
print('Take instances (rclcpp):')
print(self.rclcpp_take_instances.to_string())
print()
print('Lifecycle state machines:')
print(self.lifecycle_state_machines.to_string())
print()
print('Lifecycle transitions:')
print(self.lifecycle_transitions.to_string())
print('==================================================')
| 37.716456
| 94
| 0.631293
| 14,130
| 0.948449
| 0
| 0
| 0
| 0
| 0
| 0
| 2,794
| 0.187542
|
af0d81f9655852ff10a8be8a0499f540fd5bf5d2
| 1,543
|
py
|
Python
|
setup.py
|
KunihikoKido/elasticsearch-fabric
|
5dea163b455f954d31dc685cf2b4fec077aee50a
|
[
"MIT"
] | 10
|
2016-12-17T03:37:43.000Z
|
2019-09-09T23:00:40.000Z
|
setup.py
|
KunihikoKido/elasticsearch-fabric
|
5dea163b455f954d31dc685cf2b4fec077aee50a
|
[
"MIT"
] | null | null | null |
setup.py
|
KunihikoKido/elasticsearch-fabric
|
5dea163b455f954d31dc685cf2b4fec077aee50a
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
from distutils.spawn import find_executable
from setuptools import setup, find_packages
import sys
sys.path.append('./test')
from esfabric import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
if os.path.exists(os.path.join(os.path.dirname(__file__), 'README.txt')):
with open(os.path.join(os.path.dirname(__file__), 'README.txt')) as readme:
README = readme.read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as requirements:
REQUIREMENTS = requirements.read().splitlines()
setup(
name='elasticsearch-fabric',
version=__version__,
packages=find_packages(),
install_requires=REQUIREMENTS,
license='MIT',
author='Kunihiko Kido',
author_email='kunihiko.kido@me.com',
url='https://github.com/KunihikoKido/elasticsearch-fabric',
description='This package provides a unified command line interface to Elasticsearch in Fabric.',
long_description=README,
platforms=['OS Independent'],
keywords=['elasticsearch', 'fabric'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2.7',
],
include_package_data=True,
test_suite = "tasks_test.suite",
scripts=['bin/es_bash_completion'],
)
| 32.829787
| 101
| 0.695399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 600
| 0.388853
|
af0fe57c93b0182742617678b6b627538eed3937
| 673
|
py
|
Python
|
exhibit/catalogue/migrations/0008_auto_20190809_1822.py
|
yrhooke/exhibit-proj
|
899f340390761423f8d2fe7f1edbad4e9f79435e
|
[
"MIT"
] | null | null | null |
exhibit/catalogue/migrations/0008_auto_20190809_1822.py
|
yrhooke/exhibit-proj
|
899f340390761423f8d2fe7f1edbad4e9f79435e
|
[
"MIT"
] | null | null | null |
exhibit/catalogue/migrations/0008_auto_20190809_1822.py
|
yrhooke/exhibit-proj
|
899f340390761423f8d2fe7f1edbad4e9f79435e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-08-09 18:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0007_auto_20190809_1735'),
]
operations = [
migrations.AlterField(
model_name='artwork',
name='image',
field=models.ImageField(null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='artwork',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalogue.Location'),
),
]
| 26.92
| 130
| 0.619614
| 549
| 0.81575
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.215453
|
af13b442639e939a58e7ab827a12150bdfb1b715
| 3,455
|
py
|
Python
|
utils.py
|
cedias/Hierarchical-Sentiment
|
19dbbae45707839d380f07e09e24f47c8267e72e
|
[
"MIT"
] | 107
|
2018-01-16T13:57:54.000Z
|
2022-03-14T14:44:31.000Z
|
utils.py
|
cedias/Hierarchical-Sentiment
|
19dbbae45707839d380f07e09e24f47c8267e72e
|
[
"MIT"
] | 5
|
2018-07-05T06:01:11.000Z
|
2019-11-16T13:14:09.000Z
|
utils.py
|
cedias/Hierarchical-Sentiment
|
19dbbae45707839d380f07e09e24f47c8267e72e
|
[
"MIT"
] | 25
|
2018-02-02T05:46:42.000Z
|
2021-03-23T17:08:15.000Z
|
#utils.py
import torch
from tqdm import tqdm
from torch.autograd import Variable
from fmtl import FMTL
def tuple2var(tensors,data):
def copy2tensor(t,data):
t.resize_(data.size()).copy_(data,async=True)
return Variable(t)
return tuple(map(copy2tensor,tensors,data))
def new_tensors(n,cuda,types={}):
def new_tensor(t_type,cuda):
x = torch.Tensor()
if t_type:
x = x.type(t_type)
if cuda:
x = x.cuda()
return x
return tuple([new_tensor(types.setdefault(i,None),cuda) for i in range(0,n)])
def accuracy(out,truth):
def sm(mat):
exp = torch.exp(mat)
sum_exp = exp.sum(1,True)+0.0001
return exp/sum_exp.expand_as(exp)
_,max_i = torch.max(sm(out),1)
eq = torch.eq(max_i,truth).float()
all_eq = torch.sum(eq)
return all_eq, all_eq/truth.size(0)*100, max_i.float()
def checkpoint(epoch,net,output):
model_out_path = output+"_epoch_{}.pth".format(epoch)
torch.save(net, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
def load_embeddings(file,offset=0):
emb_file = open(file).readlines()
first = emb_file[0]
word, vec = int(first.split()[0]),int(first.split()[1])
size = (word,vec)
print("--> Got {} words of {} dimensions".format(size[0],size[1]))
tensor = torch.zeros(size[0]+offset,size[1]) ## adding offset
word_d = {}
print("--> Shape with padding and unk_token:")
print(tensor.size())
for i,line in tqdm(enumerate(emb_file),desc="Creating embedding tensor",total=len(emb_file)):
if i==0: #skipping header (-1 to the enumeration to take it into account)
print("skipping embedding size line:\'{}\'".format(line.strip()))
continue
spl = line.strip().split(" ")
if len(spl[1:]) == size[1]: #word is most probably whitespace or junk if badly parsed
word_d[spl[0]] = i + offset-1
tensor[i+offset-1] = torch.FloatTensor(list(float(x) for x in spl[1:]))
else:
print("WARNING: MALFORMED EMBEDDING DICTIONNARY:\n {} \n line isn't parsed correctly".format(line))
try:
assert(len(word_d)==size[0])
except:
print("Final dictionnary length differs from number of embeddings - some lines were malformed.")
return tensor, word_d
def FMTL_train_val_test(datatuples,splits,split_num=0,validation=0.5,rows=None):
"""
Builds train/val/test indexes sets from tuple list and split list
Validation set at 0.5 if n split is 5 gives an 80:10:10 split as usually used.
"""
train,test = [],[]
for idx,split in tqdm(enumerate(splits),total=len(splits),desc="Building train/test of split #{}".format(split_num)):
if split == split_num:
test.append(idx)
else:
train.append(idx)
if len(test) <= 0:
raise IndexError("Test set is empty - split {} probably doesn't exist".format(split_num))
if rows and type(rows) is tuple:
rows = {v:k for k,v in enumerate(rows)}
print("Tuples rows are the following:")
print(rows)
if validation > 0:
if 0 < validation < 1:
val_len = int(validation * len(test))
validation = test[-val_len:]
test = test[:-val_len]
else:
validation = []
idxs = (train,test,validation)
fmtl = FMTL(datatuples,rows)
iters = idxs
return (fmtl,iters)
| 30.848214
| 121
| 0.621708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 777
| 0.224891
|
af15f4dafca99a33d4cb28c9e33b6eb81ea8619b
| 2,867
|
py
|
Python
|
Back/ecoreleve_be_server/Models/Import.py
|
NaturalSolutions/ecoReleve-BE
|
e120be8236d3f16d4a698058dcf43a4ed8b18e7b
|
[
"MIT"
] | 2
|
2019-01-22T15:19:48.000Z
|
2019-07-18T06:55:29.000Z
|
Back/ecoreleve_be_server/Models/Import.py
|
NaturalSolutions/ecoReleve-BE
|
e120be8236d3f16d4a698058dcf43a4ed8b18e7b
|
[
"MIT"
] | 2
|
2018-04-04T15:48:24.000Z
|
2018-08-29T11:01:26.000Z
|
Back/ecoreleve_be_server/Models/Import.py
|
NaturalSolutions/ecoReleve-BE
|
e120be8236d3f16d4a698058dcf43a4ed8b18e7b
|
[
"MIT"
] | 2
|
2018-02-26T11:50:22.000Z
|
2018-03-13T08:16:42.000Z
|
# from ..Models import Base, dbConfig
# from sqlalchemy import (
# Column,
# DateTime,
# ForeignKey,
# Integer,
# Numeric,
# String,
# Unicode,
# text,
# Sequence,
# orm,
# func,
# select,
# bindparam,
# UniqueConstraint,
# event)
# from sqlalchemy.orm import relationship
# from sqlalchemy.ext.hybrid import hybrid_property
# # from ..GenericObjets.DataBaseObjects import ConfiguredDbObjectMapped, DbObject
# from ..GenericObjets.OrmModelsMixin import HasStaticProperties
# from ..Models import Base, dbConfig
# from urllib.parse import quote_plus
# sensor_schema = dbConfig['sensor_schema']
# dialect = dbConfig['cn.dialect']
# class Import(HasStaticProperties, Base):
# __tablename__ = 'Import'
# moduleGridName = 'ImportHistoryFilter'
# # TempTable_GUID = Column(String(250), default=None)
# # Status = Column(Integer)
# # ObjectName = Column(String(250))
# # ObjectType = Column(String(250))
# # FK_ImportType = Column(Integer, ForeignKey(
# # 'ImportType.ID'), nullable=False)
# # __table_args__ = ({'schema': sensor_schema,
# # 'implicit_returning': False
# # })
# ID = Column(Integer, Sequence('Import__id_seq'), primary_key=True)
# GPXrawDatas = relationship('GPX', back_populates='ImportedFile')
# ArgosGPSRawDatas = relationship('ArgosGps', back_populates='ImportedFile')
# ArgosEngRawDatas = relationship(
# 'ArgosEngineering', back_populates='ImportedFile')
# RFIDrawDatas = relationship('Rfid', back_populates='ImportedFile')
# GSMrawDatas = relationship('Gsm', back_populates='ImportedFile')
# GSMengRawDatas = relationship(
# 'GsmEngineering', back_populates='ImportedFile')
# @hybrid_property
# def relatedDatas(self):
# dictType = {
# 'GPX': self.GPXrawDatas,
# 'Argos': self.ArgosGPSRawDatas,
# 'GSM': self.GSMrawDatas,
# 'RFID': self.RFIDrawDatas
# }
# return dictType.get(self.ImportType)
# __table_args__ = ({'schema': quote_plus('"ecoreleve_sensor".')+quote_plus('"public"'),
# 'implicit_returning': False
# })
# # @hybrid_property
# # def maxDate(self):
# # return max(row.date for row in self.relatedDatas)
# # @hybrid_property
# # def minDate(self):
# # return min(row.date for row in self.relatedDatas)
# # @hybrid_property
# # def nbRow(self):
# # return len(self.relatedDatas)
# # class ImportType(Base):
# # __tablename__ = 'ImportType'
# # ID = Column(Integer, primary_key=True)
# # Name = Column(String(250))
# # __table_args__ = ({'schema': sensor_schema,
# # 'implicit_returning': False
# # })
| 32.213483
| 92
| 0.618068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,779
| 0.969306
|
af16c33bdba13b28d77f33ac28f80dcfc81a9c64
| 11,704
|
py
|
Python
|
bin/server.py
|
tolstoyevsky/blackmagic
|
0be5f041cbd42d9fb140957f0946d0ac7cb68848
|
[
"Apache-2.0"
] | null | null | null |
bin/server.py
|
tolstoyevsky/blackmagic
|
0be5f041cbd42d9fb140957f0946d0ac7cb68848
|
[
"Apache-2.0"
] | 3
|
2018-12-08T16:51:11.000Z
|
2020-10-16T09:39:00.000Z
|
bin/server.py
|
tolstoyevsky/blackmagic
|
0be5f041cbd42d9fb140957f0946d0ac7cb68848
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import logging
import os
import os.path
import tornado.web
import tornado.options
from appleseed import AlpineIndexFile, DebianIndexFile
from cdtz import set_time_zone
from motor import MotorClient
from shirow.ioloop import IOLoop
from shirow.server import RPCServer, TOKEN_PATTERN, remote
from tornado.options import define, options
from blackmagic import defaults, docker
from blackmagic.db import Image
from blackmagic.codes import (
IMAGE_BUILDING_UNAVAILABLE,
IMAGE_IS_NOT_AVAILABLE_FOR_RECOVERY,
LOCKED,
READY,
RECOVERY_IMAGE_MISSING,
)
from blackmagic.decorators import only_if_initialized
from blackmagic.exceptions import RecoveryImageIsMissing
from images.models import Image as ImageModel
from images.serializers import ImageSerializer
define('base_systems_path',
default='/var/chroot',
help='The path to the directory which contains chroot environments '
'which, in turn, contain the Debian base system')
define('db_name',
default='cusdeb',
help='')
define('dominion_workspace',
default='/var/dominion/workspace/',
help='')
define('max_builds_number',
default=8,
type=int,
help='Maximum allowed number of builds at the same time.')
define('mongodb_host',
default='',
help='')
define('mongodb_port',
default='33018',
help='')
LOGGER = logging.getLogger('tornado.application')
class DistroDoesNotExist(Exception):
"""Exception raised by the get_os_name function if the specified suite is not valid. """
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/bm/token/' + TOKEN_PATTERN, RPCHandler),
]
super().__init__(handlers)
class RPCHandler(RPCServer):
base_packages_list = {}
users_list = {}
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
self._global_lock = True
self._init_lock = False
self._collection = None
self._collection_name = ''
self._db = None
self._distro = None
self._target_device = None
self._base_packages_number = 0
self._base_packages_query = {}
self._selected_packages = []
self._configuration = dict(defaults.CONFIGURATION)
self._image = None
self._need_update = True
def destroy(self):
if self._need_update and self._image:
self._image.dump_sync()
def _init_mongodb(self):
client = MotorClient(options.mongodb_host, int(options.mongodb_port))
self._db = client[options.db_name]
async def _init(self, request, image_id=None, device_name=None, distro_name=None, flavour=None):
if self._init_lock:
request.ret(LOCKED)
self._init_lock = True
try:
self._image = Image(image_id=image_id, user_id=self.user_id, device_name=device_name,
distro_name=distro_name, flavour=flavour)
except RecoveryImageIsMissing:
request.ret(RECOVERY_IMAGE_MISSING)
if image_id:
self._selected_packages = self._image.selected_packages
self._configuration = self._image.configuration
self._init_mongodb()
self._collection_name = self._image.distro_name
self._collection = self._db[self._collection_name]
self._base_packages_query = {
'package': {
'$in': self.base_packages_list[self._collection_name],
},
}
self._base_packages_number = await self._collection.count_documents(self._base_packages_query)
LOGGER.debug('Finishing initialization')
self._init_lock = False
self._global_lock = False
@remote
async def init_new_image(self, request, device_name, distro_name, flavour):
await self._init(request, device_name=device_name, distro_name=distro_name, flavour=flavour)
request.ret_and_continue(self._image.image_id)
request.ret(READY)
@remote
async def init_existing_image(self, request, image_id):
await self._init(request, image_id=image_id)
request.ret(READY)
@remote
async def is_image_available_for_recovery(self, request, image_id):
try:
image = ImageModel.objects.get(image_id=image_id, status=ImageModel.UNDEFINED)
serializer = ImageSerializer(image)
request.ret(serializer.data)
except ImageModel.DoesNotExist:
request.ret_error(IMAGE_IS_NOT_AVAILABLE_FOR_RECOVERY)
@only_if_initialized
@remote
async def build(self, request):
from users.models import Person
if not Person.objects.filter(user__pk=self.user_id).exists():
request.ret_error(IMAGE_BUILDING_UNAVAILABLE)
self._image.enqueue()
await self._image.dump()
self._need_update = False
request.ret(READY)
@only_if_initialized
@remote
async def add_user(self, request, username, password):
self._image.pieman_user = {
'username': username,
'password': password,
}
request.ret(READY)
@only_if_initialized
@remote
async def change_root_password(self, request, password):
self._image.root_password = password
request.ret(READY)
@only_if_initialized
@remote
async def get_configuration(self, request):
request.ret(self._configuration)
@only_if_initialized
@remote
async def set_configuration(self, request, configuration):
for key in configuration:
if key in self._configuration:
self._configuration[key] = configuration[key]
self._image.configuration = self._configuration
request.ret(READY)
@only_if_initialized
@remote
async def get_packages_list(self, request, page_number, per_page, search_token=None):
if page_number > 0:
start_position = (page_number - 1) * per_page
else:
start_position = 0
find_query = {}
if search_token:
find_query.update({
'package': {'$regex': search_token, '$options': '-i'},
})
packages_list = []
async for document in self._collection.find(find_query).skip(start_position).limit(per_page):
# Originally _id is an ObjectId instance and it's not JSON serializable
document['_id'] = str(document['_id'])
if document['package'] in self.base_packages_list[self._collection_name]:
document['type'] = 'base'
if document['package'] in self._selected_packages:
document['type'] = 'selected'
packages_list.append(document)
request.ret(packages_list)
@only_if_initialized
@remote
async def get_base_packages_list(self, request, page_number, per_page):
start_position = (page_number - 1) * per_page if page_number > 0 else 0
collection = self._collection
base_packages_list = []
async for document in collection.find(
self._base_packages_query
).skip(start_position).limit(per_page):
# Originally _id is an ObjectId instance and it's not JSON serializable
document['_id'] = str(document['_id'])
base_packages_list.append(document)
request.ret(base_packages_list)
@only_if_initialized
@remote
async def get_selected_packages_list(self, request, page_number, per_page):
start_position = (page_number - 1) * per_page if page_number > 0 else 0
collection = self._collection
selected_packages_list = []
async for document in collection.find({
'package': {
'$in': self._selected_packages,
}
}).skip(start_position).limit(per_page):
# Originally _id is an ObjectId instance and it's not JSON serializable
document['_id'] = str(document['_id'])
selected_packages_list.append(document)
request.ret(selected_packages_list)
@only_if_initialized
@remote
async def get_initial_selected_packages_list(self, request):
request.ret(self._selected_packages)
@only_if_initialized
@remote
async def get_root_password(self, request):
request.ret(self._image.root_password)
@only_if_initialized
@remote
async def get_shells_list(self, request):
request.ret(['/bin/sh', '/bin/dash', '/bin/bash', '/bin/rbash'])
@only_if_initialized
@remote
async def get_packages_number(self, request, search_token=None):
find_query = {}
if search_token:
find_query.update({
'package': {'$regex': search_token, '$options': '-i'}
})
packages_number = await self._collection.count_documents(find_query)
request.ret(packages_number)
@only_if_initialized
@remote
async def get_base_packages_number(self, request):
request.ret(self._base_packages_number)
@only_if_initialized
@remote
async def get_selected_packages_number(self, request):
selected_packages_count = await self._collection.count_documents({
'package': {
'$in': self._selected_packages,
}
})
request.ret(selected_packages_count)
@only_if_initialized
@remote
async def get_user(self, request):
request.ret(self._image.pieman_user)
@only_if_initialized
@remote
async def get_users_list(self, request):
request.ret(self.users_list[self._collection_name])
@only_if_initialized
@remote
async def resolve(self, request, packages_list):
LOGGER.debug(f'Resolve dependencies for {packages_list}')
self._selected_packages = self._image.selected_packages = packages_list
request.ret([])
def main():
set_time_zone(docker.TIME_ZONE)
tornado.options.parse_command_line()
if not os.path.isdir(options.base_systems_path):
LOGGER.error('The directory specified via the base_systems_path parameter does not exist')
exit(1)
for item_name in os.listdir(options.base_systems_path):
item_path = os.path.join(options.base_systems_path, item_name)
if os.path.isdir(item_path):
debian_status_file = os.path.join(item_path, 'var/lib/dpkg/status')
alpine_installed_file = os.path.join(item_path, 'lib/apk/db/installed')
if os.path.exists(debian_status_file):
file_path = debian_status_file
index_file_cls = DebianIndexFile
elif os.path.exists(alpine_installed_file):
file_path = alpine_installed_file
index_file_cls = AlpineIndexFile
else:
continue
distro, suite, arch = item_name.split('-')
with index_file_cls(distro, suite, arch, file_path) as index_file:
RPCHandler.base_packages_list[item_name] = []
for package in index_file.iter_paragraphs():
RPCHandler.base_packages_list[item_name].append(package['package'])
passwd_file = os.path.join(item_path, 'etc/passwd')
with open(passwd_file, encoding='utf-8') as infile:
RPCHandler.users_list[item_name] = []
for line in infile:
RPCHandler.users_list[item_name].append(line.split(':'))
LOGGER.info('RPC server is ready!')
IOLoop().start(Application(), options.port)
if __name__ == "__main__":
main()
| 32.242424
| 102
| 0.655161
| 8,547
| 0.730263
| 0
| 0
| 6,060
| 0.517772
| 6,531
| 0.558014
| 1,178
| 0.100649
|