max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
sqlbuilder/smartsql/dialects/python.py | emacsway/sqlbuilder | 33 | 12757551 | <gh_stars>10-100
import copy
import operator
import weakref
from sqlbuilder.smartsql.constants import CONTEXT, OPERATOR
from sqlbuilder.smartsql.expressions import Param
from sqlbuilder.smartsql.exceptions import Error
from sqlbuilder.smartsql.compiler import compile
from sqlbuilder.smartsql.fields import Field
from sqlbuilder.smartsql.operators import Binary
__all__ = ('Executor', 'State', 'execute')
OPERATOR_MAPPING = {
OPERATOR.ADD: operator.add,
OPERATOR.SUB: operator.sub,
OPERATOR.MUL: operator.mul,
OPERATOR.DIV: operator.truediv,
OPERATOR.GT: operator.gt,
OPERATOR.LT: operator.lt,
OPERATOR.GE: operator.ge,
OPERATOR.LE: operator.le,
OPERATOR.AND: operator.and_,
OPERATOR.OR: operator.or_,
OPERATOR.EQ: operator.eq,
OPERATOR.NE: operator.ne,
OPERATOR.IS: operator.is_,
OPERATOR.IS_NOT: operator.is_not,
OPERATOR.RSHIFT: operator.rshift,
OPERATOR.LSHIFT: operator.lshift,
}
class Executor(object):
compile = compile
def __init__(self, parent=None):
self._children = weakref.WeakKeyDictionary()
self._parents = []
self._local_registry = {}
self._registry = {}
if parent:
self._parents.extend(parent._parents)
self._parents.append(parent)
parent._children[self] = True
self._update_cache()
def create_child(self):
return self.__class__(self)
def when(self, cls):
def deco(func):
self._local_registry[cls] = func
self._update_cache()
return func
return deco
def _update_cache(self):
for parent in self._parents:
self._registry.update(parent._local_registry)
self._registry.update(self._local_registry)
for child in self._children:
child._update_cache()
def get_row_key(self, field):
return self.compile(field)[0]
def __call__(self, expr, state=None):
cls = expr.__class__
for c in cls.__mro__:
if c in self._registry:
return self._registry[c](self, expr, state)
else:
raise Error("Unknown executor for {0}".format(cls))
execute = Executor()
class State(object):
def __init__(self):
# For join we simple add joined objects to the row
self.row = {}
self.rows_factory = lambda table: () # for joins
self._stack = []
self.auto_tables = []
self.auto_join_tables = []
self.joined_table_statements = set()
self.context = CONTEXT.QUERY
def push(self, attr, new_value=None):
old_value = getattr(self, attr, None)
self._stack.append((attr, old_value))
if new_value is None:
new_value = copy.copy(old_value)
setattr(self, attr, new_value)
return old_value
def pop(self):
setattr(self, *self._stack.pop(-1))
@execute.when(object)
def execute_python_builtin(execute, expr, state):
return expr
@execute.when(Field)
def execute_field(execute, expr, state):
return state.row[execute.get_row_key(expr)]
@execute.when(Param)
def execute_field(execute, expr, state):
return expr.params
@execute.when(Binary)
def execute_field(execute, expr, state):
return OPERATOR_MAPPING[expr.sql](execute(expr.left, state), execute(expr.right, state))
| 1.875 | 2 |
bin/testgen.py | muh-bazm/eventgen | 3 | 12757552 | <filename>bin/testgen.py
import sys
# while True:
# print '111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'
#while True:
# print '2014-01-05 23:07:08 WINDBAG Event 1 of 100000'
q = [ ]
def send(msg):
q.append(msg)
if len(q) > 1000:
flush()
def flush():
buf = ''
for x in q:
buf += x+'\n'
sys.stdout.write(buf)
while True:
for i in xrange(20000):
send('2014-01-05 23:07:08 WINDBAG Event 1 of 100000') | 2.296875 | 2 |
test/test_samples/unittest/test_invalid_import_failed.py | bergkvist/vscode-python-test-adapter | 96 | 12757553 | <filename>test/test_samples/unittest/test_invalid_import_failed.py
import unittest
import some_non_existing_module
class InvalidImportTests(unittest.TestCase):
def test_with_invalid_import(self):
self.assertEqual(3, 2 + 1)
| 2.6875 | 3 |
src/bxcommon/utils/stats/stat_event_logic_flags.py | dolphinridercrypto/bxcommon | 12 | 12757554 | from enum import Flag
class StatEventLogicFlags(Flag):
NONE = 0
BLOCK_INFO = 1
MATCH = 2
SUMMARY = 4
PROPAGATION_START = 8
PROPAGATION_END = 16
def __str__(self) -> str:
return str(self.value)
| 2.65625 | 3 |
seed_services_client/tests/test_message_sender.py | praekeltfoundation/seed-services-client | 0 | 12757555 | from unittest import TestCase
import json
import responses
import re
from seed_services_client.message_sender \
import MessageSenderApiClient
class TestMessageSenderClient(TestCase):
def setUp(self):
self.api = MessageSenderApiClient(
"NO", "http://ms.example.org/api/v1")
@responses.activate
def test_create_inbound(self):
# Catch all requests
responses.add(
responses.POST, re.compile(r'.*'), json={'test': 'response'},
status=200)
inbound_payload = {
'from_addr': '+1234'
}
response = self.api.create_inbound(inbound_payload)
# Check
self.assertEqual(response, {'test': 'response'})
self.assertEqual(len(responses.calls), 1)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(request.url, "http://ms.example.org/api/v1/inbound/")
self.assertEqual(json.loads(request.body), inbound_payload)
@responses.activate
def test_create_outbound(self):
# Setup
outbound_payload = {
"to_addr": "+27123",
"content": "my outbound message",
"metadata": {}
}
response = {
'attempts': 0,
'updated_at': '2016-08-18T11:32:17.750207Z',
'content': outbound_payload["content"],
'created_at': '2016-08-18T11:32:17.750236Z',
'vumi_message_id': '075a32da-e1e4-4424-be46-1d09b71056fd',
'to_addr': outbound_payload["to_addr"],
'metadata': outbound_payload["metadata"],
'id': 'c99bd21e-6b9d-48ba-9f07-1e8e406737fe',
'delivered': False,
'version': 1,
'url': 'http://ms.example.org/api/v1/outbound/c99bd21e-6b9d-48ba-9f07-1e8e406737fe/' # noqa
}
responses.add(
responses.POST,
"http://ms.example.org/api/v1/outbound/",
json=response,
status=200, content_type='application/json',
)
# Execute
result = self.api.create_outbound(outbound_payload)
# Check
self.assertEqual(result["id"], "c99bd21e-6b9d-48ba-9f07-1e8e406737fe")
self.assertEqual(result["content"], outbound_payload["content"])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url,
"http://ms.example.org/api/v1/outbound/")
@responses.activate
def test_get_outbounds_single_page(self):
outbounds = {
"next": None,
"previous": None,
"results": [
{'to_addr': 'addr1', 'content': 'content1'},
{'to_addr': 'addr2', 'content': 'content2'},
]
}
responses.add(
responses.GET,
"http://ms.example.org/api/v1/outbound/",
json=outbounds,
status=200, content_type='application/json',
)
# Execute
result = self.api.get_outbounds()
# Check
self.assertEqual(list(result["results"]), [
{'to_addr': 'addr1', 'content': 'content1'},
{'to_addr': 'addr2', 'content': 'content2'}])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url,
"http://ms.example.org/api/v1/outbound/"
)
@responses.activate
def test_get_outbounds_mulitple_pages(self):
outbounds = {
"next": "http://ms.example.org/api/v1/outbound/?cursor=1",
"previous": None,
"results": [
{'to_addr': 'addr1', 'content': 'content1'},
{'to_addr': 'addr2', 'content': 'content2'},
]
}
responses.add(
responses.GET,
"http://ms.example.org/api/v1/outbound/",
json=outbounds,
status=200, content_type='application/json', match_querystring=True
)
outbounds = {
"next": None,
"previous": "http://ms.example.org/api/v1/outbound/?cursor=0",
"results": [
{'to_addr': 'addr3', 'content': 'content3'},
]
}
responses.add(
responses.GET,
"http://ms.example.org/api/v1/outbound/?cursor=1",
json=outbounds,
status=200, content_type='application/json', match_querystring=True
)
# Execute
result = self.api.get_outbounds()
# Check
self.assertEqual(list(result["results"]), [
{'to_addr': 'addr1', 'content': 'content1'},
{'to_addr': 'addr2', 'content': 'content2'},
{'to_addr': 'addr3', 'content': 'content3'}])
self.assertEqual(len(responses.calls), 2)
self.assertEqual(
responses.calls[0].request.url,
"http://ms.example.org/api/v1/outbound/"
)
self.assertEqual(
responses.calls[1].request.url,
"http://ms.example.org/api/v1/outbound/?cursor=1"
)
@responses.activate
def test_get_inbounds_single_page(self):
inbounds = {
"next": None,
"previous": None,
"results": [
{'from_addr': '+1234', 'content': 'content1'},
{'from_addr': '+1234', 'content': 'content2'},
]
}
# Catch all requests
responses.add(
responses.GET, "http://ms.example.org/api/v1/inbound/",
json=inbounds, status=200)
# Execute
response = self.api.get_inbounds({'from_addr': '+1234'})
# Check
self.assertEqual(list(response["results"]), [
{'from_addr': '+1234', 'content': 'content1'},
{'from_addr': '+1234', 'content': 'content2'}])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.method, 'GET')
self.assertEqual(
responses.calls[0].request.url,
"http://ms.example.org/api/v1/inbound/?from_addr=%2B1234"
)
@responses.activate
def test_get_inbounds_mulitple_pages(self):
inbounds = {
"next": "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234"
"&cursor=1",
"previous": None,
"results": [
{'from_addr': '+1234', 'content': 'content1'},
{'from_addr': '+1234', 'content': 'content2'},
]
}
# Catch all requests
responses.add(
responses.GET,
"http://ms.example.org/api/v1/inbound/?from_addr=%2B1234",
json=inbounds, status=200, match_querystring=True)
inbounds = {
"next": None,
"previous": "http://ms.example.org/api/v1/inbound/?"
"from_addr=%2B1234&cursor=1",
"results": [
{'from_addr': '+1234', 'content': 'content3'},
]
}
responses.add(
responses.GET,
"http://ms.example.org/api/v1/inbound/?from_addr=%2B1234&cursor=1",
json=inbounds, status=200, match_querystring=True)
# Execute
response = self.api.get_inbounds({'from_addr': '+1234'})
# Check
self.assertEqual(list(response["results"]), [
{'from_addr': '+1234', 'content': 'content1'},
{'from_addr': '+1234', 'content': 'content2'},
{'from_addr': '+1234', 'content': 'content3'}])
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[0].request.method, 'GET')
self.assertEqual(
responses.calls[0].request.url,
"http://ms.example.org/api/v1/inbound/?from_addr=%2B1234"
)
self.assertEqual(responses.calls[1].request.method, 'GET')
self.assertEqual(
responses.calls[1].request.url,
"http://ms.example.org/api/v1/inbound/?from_addr=%2B1234&cursor=1"
)
| 2.46875 | 2 |
move_test.py | hyjalxl/spidier2 | 0 | 12757556 | # coding=utf-8
# name=hu_yang_jie
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
from PIL import Image
import time
import handl
if __name__ == '__main__':
driver = webdriver.PhantomJS()
driver.get('https://passport.bilibili.com/login')
print driver.title
driver.find_element_by_id('login-username').send_keys(u'<PASSWORD>')
driver.find_element_by_id('login-passwd').send_keys(u'<PASSWORD>')
while True:
try:
print 'try it'
# ac_slider 就是滑块元素
ac_slider = driver.find_element_by_xpath('//div[@class="gt_slider_knob gt_show"]')
# 滑轨元素
ac_guide = driver.find_element_by_xpath('//div[@class="gt_guide_tip gt_show"]')
# 滑块滑轨底层元素
ac_curtain = driver.find_element_by_xpath('//div[@class="gt_curtain_tip gt_hide"]')
# 小锁元素
ac_ajax = driver.find_element_by_xpath('//div[@class="gt_ajax_tip gt_ready"]')
print 'Find all.********************'
print ac_slider.location
print ac_guide.location
print ac_curtain.location
print ac_ajax.location
break
except:
time.sleep(1)
print 'Not find slider.'
dr1 = ActionChains(driver)
dr1.move_to_element(ac_slider).pause(0.5).perform()
driver.save_screenshot('1.png')
# move_by_offset是基于当前元素坐标的位移
dr1.click_and_hold(ac_slider).move_by_offset(198, 0).pause(0.5).perform()
# dr1.reset_actions()方法很重要——重置操作
time.sleep(0.5)
file_name = 'bi.png'
driver.save_screenshot(file_name)
im = Image.open(file_name)
im2 = Image.open('1.png')
box = (562, 224, 822, 340)
region = im.crop(box)
region2 = im2.crop(box)
im_cut = 'cut.png'
region.save(im_cut)
region2.save('cut2.png')
move_num = handl.scan_img(im_cut)
print move_num
dr1.reset_actions()
dr1.click_and_hold(ac_slider).move_by_offset(move_num-7, 0).pause(0.5).release().perform()
time.sleep(1)
driver.save_screenshot('3.png')
time.sleep(8)
cookie = [item["name"] + "=" + item["value"] for item in driver.get_cookies()]
cookiestr = ';'.join(item for item in cookie)
print cookiestr
driver.quit()
| 2.421875 | 2 |
leetcode/560.py | windniw/just-for-fun | 1 | 12757557 | """
link: https://leetcode.com/problems/subarray-sum-equals-k
problem: 求数组中是否存在子串满足其和为给定数字,求满足条件子串数。
solution: 记录前缀和。
"""
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
s, cur, res = {0: 1}, 0, 0
for x in nums:
if cur + x - k in s:
res += s[cur + x - k]
cur += x
s[cur] = s[cur] + 1 if cur in s else 1
return res
| 3.1875 | 3 |
OctaHomeAlarm/OctaFiles/urls.py | Tomcuzz/OctaHomeAutomation | 4 | 12757558 | <gh_stars>1-10
from OctaHomeCore.OctaFiles.urls.base import *
from OctaHomeAlarm.views import *
class AlarmOctaUrls(OctaUrls):
@classmethod
def getUrls(cls):
return [
url(r'^Alarm/$', handleAlarmView.as_view(), name='Alarm'),
]
| 1.585938 | 2 |
examples/emb_models/hin2vec.py | Sengxian/cogdl | 0 | 12757559 | from cogdl import experiment
from cogdl.utils import build_args_from_dict
DATASET_REGISTRY = {}
def default_parameter():
args = {
"hidden_size": 128,
"seed": [0, 1, 2],
"lr": 0.025,
"walk_length": 80,
"walk_num": 40,
"batch_size": 1000,
"hop": 2,
"negative": 5,
"epochs": 1,
}
return build_args_from_dict(args)
def register_func(name):
def register_func_name(func):
DATASET_REGISTRY[name] = func
return func
return register_func_name
@register_func("gtn-dblp")
def dblp_config(args):
return args
@register_func("gtn-acm")
def acm_config(args):
return args
@register_func("gtn-imdb")
def imdb_config(args):
return args
def run(dataset_name):
args = default_parameter()
args = DATASET_REGISTRY[dataset_name](args).__dict__
results = experiment(task="multiplex_node_classification", dataset=dataset_name, model="hin2vec", **args)
return results
if __name__ == "__main__":
datasets = ["gtn-dblp", "gtn-acm", "gtn-imdb"]
for x in datasets:
run(x)
| 2.34375 | 2 |
alipay/aop/api/domain/OuterTargetingItem.py | articuly/alipay-sdk-python-all | 0 | 12757560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class OuterTargetingItem(object):
def __init__(self):
self._type = None
self._value_list = None
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def value_list(self):
return self._value_list
@value_list.setter
def value_list(self, value):
if isinstance(value, list):
self._value_list = list()
for i in value:
self._value_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
if self.value_list:
if isinstance(self.value_list, list):
for i in range(0, len(self.value_list)):
element = self.value_list[i]
if hasattr(element, 'to_alipay_dict'):
self.value_list[i] = element.to_alipay_dict()
if hasattr(self.value_list, 'to_alipay_dict'):
params['value_list'] = self.value_list.to_alipay_dict()
else:
params['value_list'] = self.value_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OuterTargetingItem()
if 'type' in d:
o.type = d['type']
if 'value_list' in d:
o.value_list = d['value_list']
return o
| 2.25 | 2 |
twitchbot/util/dict_util.py | max-susi/PythonTwitchBotFramework | 87 | 12757561 | import json
import re
from typing import Any
__all__ = [
'dict_get_value',
'dict_has_keys',
'try_parse_json',
]
def dict_has_keys(data: dict, *keys) -> bool:
for key in keys:
if not isinstance(data, dict):
return False
if key not in data:
return False
data = data[key]
return True
def dict_get_value(data: dict, *keys, default: Any = None) -> Any:
if len(keys) == 1:
keys = re.split(r'[./]', keys[0])
for key in keys:
try:
data = data[key]
except (TypeError, IndexError, KeyError):
return default
return data
def try_parse_json(data, **default_keys) -> dict:
if isinstance(data, dict):
return data
try:
return json.loads(data)
except (TypeError, json.JSONDecodeError) as _:
return default_keys
| 3.03125 | 3 |
main.py | TobyBoyne/fourier-animation | 1 | 12757562 | <filename>main.py<gh_stars>1-10
import matplotlib.pyplot as plt
import numpy as np
from draw import Drawer
from fourier import Fourier
from anim import Animator, GroupAnimator
def run(Ns, save_anim=False):
# --- user input ---
fig, ax = plt.subplots()
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
draw = Drawer(fig, ax)
plt.show()
# --- find Fourier series for drawn shape ---
fouriers = [Fourier(draw.points, N) for N in Ns]
# --- plot drawn shape against Fourier approximation ---
fig, axs = plt.subplots(1, len(Ns))
ps = draw.points
t_start, t_end = ps[0, 0].real, ps[-1, 0].real
ts = np.linspace(t_start, t_end, 100)
fs = [f(ts) for f in fouriers]
for ax, f in zip(axs, fs):
ax.plot(ps[:, 1].real, ps[:, 1].imag)
ax.plot(f.real, f.imag)
plt.legend(("User Input", "Fourier Approximation"))
plt.show()
# --- animate Fourier drawing ---
anim_fig, anim_axs = plt.subplots(1, len(Ns))
anim_fig.suptitle(f"Fourier approximations of orders {Ns}")
anims = []
for anim_ax, fourier in zip(anim_axs, fouriers):
anim_ax.set_xlim([0, 1])
anim_ax.set_ylim([0, 1])
anim_ax.set_title(f"N = {len(fourier.n) // 2}")
anims.append(Animator(anim_ax, fourier))
group_anim = GroupAnimator(anim_fig, anims, ts[-1])
if save_anim:
fig.savefig('images\comparison.png')
group_anim.save('images\drawing.gif', writer='imagemagick', fps=30)
plt.show()
if __name__ == "__main__":
# number of coefficients in Fourier series
Ns = (2, 12)
run(Ns, save_anim=True) | 3.265625 | 3 |
src/data_analyze.py | leclair-7/CarND-Unscented-Kalman-Filter-Project | 0 | 12757563 |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# In[2]:
data = pd.read_csv("../build/nis_array_.log",delimiter="\n")
t = [7.8 for i in range(498)]
ts = np.arange(0,498,1)
# In[3]:
plt.plot(ts, t, label='first plot')
plt.plot(ts, data, label='second plot')
plt.legend
# If the curve is way under, we're overestimating the uncertainty in the system; if half of the curve is over, we're underestimating the uncertainty
| 3.125 | 3 |
jaikucommon/protocol/base.py | noutrela/jaikuenginepatch | 1 | 12757564 | <gh_stars>1-10
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jaikucommon import exception
class Connection(object):
pass
class Service(object):
connection = None
handlers = None
_handlers = None
def __init__(self, connection):
self.connection = connection
self._handlers = []
def init_handlers(self):
if not self.handlers:
return
for handler_class in self.handlers:
self._handlers.append(handler_class(self))
def handle_message(self, sender, target, message):
matched = None
handler = None
for h in self._handlers:
matched = h.match(sender, message)
if matched:
handler = h
break
if not matched:
rv = self.unknown(sender, message)
return self.response_ok(rv)
try:
rv = handler.handle(sender, matched, message)
return self.response_ok(rv)
except exception.UserVisibleError, e:
exception.log_exception()
self.send_message([sender], str(e))
return self.response_error(e)
except exception.Error:
exception.log_exception()
| 2.078125 | 2 |
Homework_5/G_Hyper_checkers/G_Hyper_checkers_refactoring.py | dimk00z/summer_yandex_algorithmic_course | 8 | 12757565 | <filename>Homework_5/G_Hyper_checkers/G_Hyper_checkers_refactoring.py
from itertools import permutations
def calculate_variants(n, k, x):
result = 0
zipped_x = set()
cnt = {}
for element in x:
if element not in cnt:
cnt[element] = 0
cnt[element] += 1
zipped_x.add(element)
zipped_x = list(zipped_x)
zipped_x.sort()
end_set = set()
for position, element in enumerate(zipped_x):
high_border = element*k
inner_cnt = {}
inner_position = position
while True:
if inner_position == len(zipped_x):
break
if zipped_x[inner_position] <= high_border:
if cnt[zipped_x[inner_position]] > 2:
result += 1
cnt[zipped_x[inner_position]] = 2
inner_cnt[zipped_x[inner_position]
] = cnt[zipped_x[inner_position]]
inner_position += 1
else:
break
if len(inner_cnt) < 2:
continue
inner_list = []
for inner_element in inner_cnt:
inner_list.extend([inner_element]*inner_cnt[inner_element])
if len(inner_list) < 3:
continue
inner_set = set(permutations(inner_list, 3))
end_set |= inner_set
# print(inner_list)
if high_border >= zipped_x[-1]:
break
result += len(end_set)
return result
with open('input.txt') as file:
lines = file.readlines()
n, k = tuple(map(int, lines[0].split()))
x = list(map(int, lines[1].split()))
with open('output.txt', 'w') as file:
file.write(str(calculate_variants(n, k, x)))
| 3.359375 | 3 |
allsky/launch_allsky.py | philbull/SinglePixel | 0 | 12757566 | <reponame>philbull/SinglePixel
# Simple program to launch a serial job for the all sky fitting
# mpiexec -n 4 python launch_allsky.py
import os
import numpy as np
import healpy as hp
from mpi4py import MPI
import model_values_allsky
import model_list_allsky
import run_joint_mcmc_allsky_split
import time
def main( in_list = 'sync,freefree,mbb',
fit_list = 'sync,freefree,mbb',
Nside = 32,
nu_min = 15,
nu_max = 800,
n_bands = 7,
seed = 100,
redo = 0 ):
# 1) Set-up MPI
comm = MPI.COMM_WORLD
my_id = comm.Get_rank()
n_proc = comm.Get_size()
t0_run = time.time()
# 2) Preparing the list of with the input and fit model
in_list_all = [ 'cmb', ] ; fit_list_all = [ 'cmb', ]
# Removing any white spaces
in_list = "".join( in_list.split( ) )
# Creating a list
in_list_all += in_list.split( "," )
fit_list = "".join( fit_list.split( ) )
fit_list_all += fit_list.split( "," )
# Getting the file name and other useful labels
amp_names_in, param_names_in, amp_names_fit, param_names_fit, name_in, name_fit = \
model_list_allsky.get_model_param_names( in_list = in_list, fit_list = fit_list )
# Getting the number of variables that will be stored
N_fit = len( amp_names_fit ) + len( param_names_fit )
# output directory
out_dir = "output"
if ( os.path.isdir( out_dir ) != 1 ):
os.system( 'mkdir %s' % out_dir )
# Name for the results
filename = '%s/allsky_summary_%s.%s_nb7_seed%04i_nside%04i.fits' % ( out_dir, name_in, name_fit, seed, Nside )
if my_id == 0:
print( "(LAUNCH_ALLSKY) Running in a system with %i processors" % ( n_proc ) )
# Defaut list of allowed models
allowed_comps = model_list_allsky.model_dict()
# Make sure models are of known types
for item in in_list_all:
if item not in allowed_comps.keys():
raise ValueError("Unknown component type '%s'" % item)
for item in fit_list_all:
if item not in allowed_comps.keys():
raise ValueError("Unknown component type '%s'" % item)
# 2.2) Checking the work done
# If it is not to be redone and the file exists, open the file
if ( redo == 0 ) and ( os.path.isfile( filename ) == 1 ):
maps = hp.read_map( filename, 0 )
# Get pixels that have not been run
px_unseen = np.where( maps == hp.UNSEEN )
# Tuple to array
px_unseen = px_unseen[ 0 ]
# If we want to re-do the analysis, remove the file
if ( redo == 1 ) and ( os.path.isfile( filename ) == 1 ):
os.system( 'rm %s' % ( filename ) )
# If it does no exist, create the file filled with UNSEEN values
if ( os.path.isfile( filename ) != 1 ):
px_unseen = np.arange( 12 * Nside * Nside )
maps = np.full( ( 3 * N_fit, 12 * Nside * Nside ), hp.UNSEEN, dtype = 'float32' )
hp.write_map( filename, maps, dtype = 'float32' )
# 2.3) If there is no work to do
n_unseen = len( px_unseen )
# Else, run it
if n_unseen != 0:
print "(LAUNCH_ALLSKY) There are still %i pixels to be analyzed. Running ...'" % ( n_unseen )
# Create array of dictionaries from the global dictionary, which is created for the missing pixels to be run
allsky_dict_splt = model_values_allsky.get_allsky_dict_split( in_list, Nside, px_unseen, seed, n_proc )
# Create array to store the results
rslts_all = np.array_split( np.full( ( n_unseen, 3 * N_fit ), hp.UNSEEN, dtype = 'float32' ), n_proc )
px_all = np.array_split( px_unseen, n_proc )
else:
allsky_dict_splt = None
n_unseen = None
rslts_all = None
px_all = None
# 3) MPI I/O
# 3.1) Broadcasting whether there is any work to do
n_unseen = comm.bcast( n_unseen, root = 0 )
if n_unseen == 0:
print "(LAUNCH_ALLSKY) No more pixels to analyze in proc %i. Results are found in: %s" % ( my_id, filename )
return
# 3.2) The portion of the allsky dictionary that gets assigned to each processor
dict_splt = comm.scatter( allsky_dict_splt, root = 0 )
# 3.3) The portion of the results that will be considered
rslts_splt = comm.scatter( rslts_all, root = 0 )
# 3.4) The pixel numbers that will be considered (for intermediate storage only)
px_splt = comm.scatter( px_all, root = 0 )
# 4) MPI runs
# Number of pixels to consider in a processor
n_px = len( dict_splt['cmb'][ 0 ] )
# Timing
t0 = time.time()
t1 = 0
for i_px in range( n_px ):
# 4.1) Run single pixel with dict_splt
rslts_splt[ i_px ] = run_joint_mcmc_allsky_split.main(
in_list = in_list_all,
fit_list = fit_list_all,
nu_min = nu_min,
nu_max = nu_max,
n_bands = n_bands,
input_dict = dict_splt,
idx_px = i_px,
seed = i_px + 2 * n_px * my_id, # Distinct seed. Notice that n_px may vary slightly from process to process -> '2'
Nside = Nside )
t1 = t1 + time.time() - t0
t0 = time.time()
if ( np.ceil( ( i_px + 1 ) / 10. ) == ( ( i_px + 1 ) / 10. ) ):
print( "(LAUNCH_ALLSKY) %2.1f %s of the run in processor %i done. ~%2.1f minutes for completion. ETA: %s" % ( ( 100. * i_px / n_px ), '%', my_id, 1. * ( n_px - i_px ) * ( t1 ) / i_px / 60., time.ctime(time.time() + 1. * ( n_px - i_px ) * ( t1 ) / i_px ) ) )
# Option of storing partial results (no need to gather since this is a temporary result)
if ( np.ceil( ( i_px + 1 ) / 20. ) == ( ( i_px + 1 ) / 20. ) ) and ( i_px != n_px - 1 ):
try:
maps_tmp = hp.read_map( filename, np.arange( 3 * N_fit ) )
rslts_tmp = np.transpose( rslts_splt )
for ii in range( 3 * N_fit ):
maps_tmp[ ii ][ px_splt[ 0 : i_px ] ] = rslts_tmp[ ii ][ 0 : i_px ]
hp.write_map( filename, maps_tmp, dtype = 'float32' )
print( "(LAUNCH_ALLSKY) Intermediate data written (My_ID=%i)" % my_id )
except ( IOError, ValueError, UnboundLocalError ): # These are probably all, but add other excpetion types if necessary.
print( "(LAUNCH_ALLSKY) Could not write intermediate data (My_ID=%i). Continuing ..." % my_id )
pass
# 4.2) Gathering the results
comm.Barrier()
rslts_all = comm.gather( rslts_splt, root = 0 )
# 4.3) Writing the results
if my_id == 0:
# Joining the different pieces into the two-dimensional structure to be stored
data_fits = np.concatenate( rslts_all )
# Re-shaping the array to be stored (alternatively, one could store it as n_px * (3*N_fit) but it looks less intuitive for reading and plotting)
maps = hp.read_map( filename, range( 3 * N_fit ) )
data_fits_tmp = np.transpose( data_fits )
for ii in range( 3 * N_fit ):
maps[ ii ][ px_unseen ] = data_fits_tmp[ ii ][ : ]
# NB: Letting try/exception inherited from pyfits (embedded in healpy) take care of any eventual issue (it should never happen indeed unless major IO issue)
hp.write_map( filename, maps, dtype = 'float32' )
print( 'Results written in %s' % filename )
t_run = ( time.time() - t0_run ) / 3600
str_run = 'hours'
if t_run < 1:
t_run *= 60
str_run = 'minutes'
print( 'Total running time %2.2f %s' % ( t_run, str_run ) )
if __name__ == '__main__':
main()
| 2.296875 | 2 |
test/dynamic_url_test.py | JhoneM/DynamicUrl | 0 | 12757567 | # -*- coding: utf-8 -*-
import unittest
from dynamic_url import Url
class TestParams(unittest.TestCase):
array_test = ["Origin"]
dict_test = {"Origin": "test"}
dict_test2 = {"Not_Origin": "test"}
dict_test3 = {"Origin": "https://www.google.com/"}
dict_test4 = {"Origin": ""}
def test_get_url_two_params(self):
self.assertEqual(
Url().get_url("hola", "db"), "", "Error in params -> two strings"
)
self.assertEqual(
Url().get_url("", ""), "", "Error in params -> two empty strings"
)
def test_get_url_array_params(self):
self.assertEqual(
Url().get_url(self.array_test, "db"),
"",
"Error in params with array",
)
def test_get_url_dict_params(self):
self.assertEqual(
Url().get_url(self.dict_test, "db"),
"",
"Error in params with array",
)
self.assertEqual(
Url().get_url(self.dict_test2, db={"db": ""}),
"",
"Error in params with array",
)
self.assertEqual(
Url().get_url(self.dict_test3, "db"),
"",
"Error in params with array",
)
self.assertEqual(
Url().get_url(self.dict_test4, "db"),
"",
"Error in params with array",
)
if __name__ == "__main__":
unittest.main()
| 3.375 | 3 |
src/cupcake/sequence/GFF.py | milescsmith/cDNA_Cupcake | 0 | 12757568 | <gh_stars>0
import re
import sys
from collections import defaultdict
from csv import DictReader
from pathlib import Path
from bx.intervals.intersection import Interval, IntervalTree
from cupcake import cupcake_logger as logger
class GTF:
def __init__(self, gtf_filename):
self.gtf_filename = (
str(gtf_filename) if isinstance(gtf_filename, Path) else gtf_filename
)
self.genome = defaultdict(
IntervalTree()
) # chr --> IntervalTree --> (0-start, 1-end, transcript ID)
self.transcript = defaultdict(
IntervalTree()
) # tID --> IntervalTree --> (0-start, 1-end, {'ith': i-th exon, 'eID': exon ID})
self.exon = defaultdict(
lambda: []
) # (0start,1end) --> list of (tID, ith-exon, chr)
self.transcript_info = {} # tID --> chr
self.readGTF(self.gtf_filename)
def readGTF(self, filename):
"""
GTF files
(0) seqname
(1) annotation source
(2) feature: gene|transcript|CDS|exon|UTR
(3) 1-based start
(4) 1-based end
(5) ignore
(6) strand: +|-
(7) phase
(8) extra stuff (gene ID, transcript ID...)
"""
for line in open(filename):
if line.startswith("#"):
continue # header section, ignore
if len(line.strip()) == 0:
continue # some gtf files have blank lines
raw = line.strip().split("\t")
seqname = raw[0]
feature = raw[2]
strand = raw[6]
start0, end1 = int(raw[3]) - 1, int(raw[4])
gtype, gstat = "NA", "NA"
gName = "NA"
tName = "NA"
tSupportLevel = "NA"
gtags = []
for stuff in raw[8].split("; "):
_a, _b = stuff.split(None, 1)
if _a == "transcript_id":
tID = _b[1:-1] # removing quotes ""
elif _a == "transcript_name":
tName = _b[1:-1] # removing quotes
elif _a == "transcript_support_level":
tSupportLevel = _b[1:-1]
elif _a == "gene_name":
gName = _b[1:-1]
elif _a == "gene_id":
gID = _b[1:-1] # removing quotes ""
elif _a == "gene_type":
gtype = _b[1:-1]
elif _a == "gene_status":
gstat = _b[1:-1]
elif _a == "tag":
gtags.append(_b[1:-1])
if feature == "transcript":
self.genome[seqname].insert(start0, end1, tID)
self.transcript_info[tID] = {
"chr": seqname,
"gname": gName,
"gid": gID,
"type": gtype,
"status": gstat,
"strand": strand,
"tags": gtags,
"tname": tName,
"tlevel": tSupportLevel,
}
ith = 0
elif feature == "exon":
self.transcript[tID].insert(start0, end1, {"ith": ith, "chr": seqname})
self.exon[(start0, end1)].append((tID, ith, seqname))
ith += 1
def get_exons(self, tID):
"""
Return the list of intervals for a given tID
"""
pp = []
self.transcript[tID].traverse(pp.append)
return pp
def find(self, chrom, start0, end1):
return list({self.genome[chrom].find(start0, end1)})
class polyAGFF(GTF):
def readGTF(self, filename):
with open(filename) as f:
for line in f:
if line.startswith("##"):
continue # just comments
raw = line.strip().split("\t")
assert raw[2] in ("polyA_signal", "polyA_site", "pseudo_polyA")
if raw[2] == "polyA_site":
chrom = raw[0]
strand = raw[6]
start = int(raw[3]) - 1
end = int(raw[4])
for stuff in raw[8].split("; "):
_a, _b = stuff.split(None, 1)
if _a == "transcript_id":
tID = _b[1:-1] # removing quotes ""
self.genome[chrom].insert(start, end, tID)
self.transcript_info[tID] = {"chr": chrom, "strand": strand}
self.transcript[tID].insert(start, end, 0)
class TSSGFF(GTF):
def readGTF(self, filename):
with open(filename) as f:
for line in f:
if line.startswith("##"):
continue
raw = line.strip().split("\t")
assert raw[2] in "Gencode TSS"
chrom = raw[0]
strand = raw[6]
start = int(raw[3]) - 1
end = int(raw[4])
for stuff in raw[8].split("; "):
_a, _b = stuff.split(None, 1)
if _a == "gene_id":
gID = _b[1:-1] # removing quotes ""
self.genome[chrom].insert(start, end, (gID, start))
assert start + 1 == end
if gID in self.transcript_info:
self.transcript_info[gID].append(
{"chr": chrom, "strand": strand, "position": start}
)
else:
self.transcript_info[gID] = [
{"chr": chrom, "strand": strand, "position": start}
]
class ucscGTF(GTF):
"""
UCSC-style GFF, which is
0) seqname (chromosome)
1) source
2) feature (gene|exon|mRNA...)
3) start (1-based)
4) end (1-based)
5) score
6) strand
7) frame
8) group
"""
def readGTF(self, filename):
for line in open(filename):
raw = line.strip().split("\t")
if raw[2] == "exon":
_chr = raw[0]
strand = raw[6]
start = int(raw[3]) - 1
end = int(raw[4])
tID = raw[8]
if tID not in self.transcript:
# new transcript
self.genome[_chr].insert(start, end, tID)
self.transcript_info[tID] = {
"chr": _chr,
"gid": None,
"type": None,
"status": None,
"strand": strand,
}
ith = 0
self.exon[(start, end)].append((tID, ith, _chr))
self.transcript[tID].insert(start, end, {"ith": ith, "chr": _chr})
else:
ith += 1
self.transcript[tID].insert(start, end, {"ith": ith, "chr": _chr})
self.exon[(start, end)].append((tID, ith, _chr))
class variantRecord:
def __init__(
self,
seqname,
feature,
start,
end,
reference,
variant,
freq,
coverage,
confidence,
):
self.chr = seqname
self.type = feature
self.start = start
self.end = end
self.reference = reference
self.variant = variant
self.freq = freq
self.coverage = coverage
self.confidence = confidence
def __str__(self):
return f"""
{self.type} {self.chr}:{self.start}-{self.end}
reference: {self.reference}
variant: {self.variant} ({self.freq})
coverage: {self.coverage}
confidence: {self.confidence}
"""
class variantGFFReader:
"""
Chr1 . substitution 86591 86591 . . . reference=T;variantSeq=T/G;frequency=35/15;
coverage=66;confidence=40
"""
def __init__(self, filename):
self.filename = filename
self.f = open(filename)
while True:
cur = self.f.tell()
if not self.f.readline().startswith("#"):
break
self.f.seek(cur)
def __iter__(self):
return self
def next(self):
return self.read()
def read(self):
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
raw = line.strip().split("\t")
seqname = raw[0]
feature = raw[2]
start = int(raw[3])
end = int(raw[4])
for x in raw[8].split(";"):
a, b = x.split("=")
if a == "reference":
reference = b
elif a == "variantSeq":
variant = b
elif a == "frequency":
freq = b
elif a == "coverage":
coverage = int(b)
elif a == "confidence":
confidence = int(b)
return variantRecord(
seqname, feature, start, end, reference, variant, freq, coverage, confidence
)
class Coords(GTF):
def readGTF(self, filename):
"""
.coords files
(0) gene name
(1) chr
(2) number of exons
(3) strand
(4) list of space-separated 1-based start, 1-based end
"""
for line in open(filename):
raw = line.strip().split()
tID = raw[0]
seqname = raw[1]
ith = 0
if tID in self.transcript:
logger.info(f"duplicate tID {tID} seen, ignore!")
continue
self.transcript_info[tID] = {"chr": seqname}
for i in range(4, len(raw), 2):
start0 = int(raw[i]) - 1
end1 = int(raw[i + 1])
self.genome[seqname].insert(start0, end1, tID)
self.transcript[tID].insert(start0, end1, {"ith": ith, "chr": seqname})
self.exon[(start0, end1)].append((tID, ith, seqname))
i += 1
def write_gtf_records(gtf, tIDs, output_filename):
f = open(output_filename, "w")
for tID in tIDs:
info = gtf.transcript_info[tID]
seqname = info["chr"]
strand = info["strand"]
tab = "\t"
start = info["start"]
end = info["end"]
gid = info["gid"]
f.write(
f'{seqname}{tab}JUNK{tab}gene{tab}{start}{tab}{end}{tab}.{tab}{strand}{tab}.{tab}gene_id "{gid}"; transcript_id "{tID}"'
)
f.close()
class btabReader:
def __init__(self, filename):
self.filename = filename
self.f = open(filename)
def __iter__(self):
return self
def next(self):
return self.read()
def read(self):
"""
(0) seqname
(1)-(2) blank
(3) gmap
(4) blank
(5) seqid
(6) ref start (1-based)
(7) ref end
(8) seq start (1-based)
(9) seq end
(10) score
(11)-(12) blank
(13) ith-seq
(14) ith-exon
start-end will be flipped if start > end !!!
"""
cur = self.f.tell()
line = self.f.readline().strip()
if cur == self.f.tell():
raise StopIteration("EOF reached!")
raw = line.split("\t")
seqname = raw[0]
seqid = raw[5]
rStart1 = int(raw[6])
rEnd1 = int(raw[7])
i = raw[-2]
if rStart1 > rEnd1:
rStart1, rEnd1 = rEnd1, rStart1
return {
"chr": seqname,
"seqid": seqid,
"rStart1": rStart1,
"rEnd1": rEnd1,
"i": i,
}
class btabBlockReader(btabReader):
def next(self):
recs = [self.read()]
while recs[-1]["i"] == recs[0]["i"]:
cur = self.f.tell()
recs.append(self.read())
self.f.seek(cur)
return recs[:-1]
pbid_rex = re.compile(r"(PB\.\d+|PBfusion\.\d+)(\.\d+){0,1}")
class gmapRecord:
def __init__(self, seqname, coverage, identity, strand, seqid, geneid=None):
"""
Record keeping for GMAP output:
chr, coverage, identity, seqid, exons
exons --- list of Interval, 0-based start, 1-based end
"""
if strand not in ("+", "-"):
raise ValueError(
f"`strand` must be either `+` or `-`, but instead {strand} was passed."
)
self.chr = seqname
self.coverage = coverage
self.identity = identity
self.strand = strand
self.seqid = seqid
self.ref_exons = []
self.seq_exons = []
self.cds_exons = []
self.scores = []
# pdb.set_trace()
self.geneid = None
# handle gene ids specially for PB.X.Y and PBfusion.X.Y
if geneid is not None:
self.geneid = geneid
else:
m = pbid_rex.match(seqid)
if m is not None:
self.geneid = m.group(1)
def __str__(self):
return f"""
chr: {self.chr}
strand: {self.strand}
coverage: {self.coverage}
identity: {self.identity}
seqid: {self.seqid}
geneid: {self.geneid}
ref exons: {self.ref_exons}
seq exons: {self.seq_exons}
scores: {self.scores}
"""
def __getattr__(self, key):
if key in ("rstart", "start"):
return self.get_start()
elif key in ("rend", "end"):
return self.get_end()
else:
raise AttributeError(key)
def get_start(self):
return self.ref_exons[0].start
def get_end(self):
return self.ref_exons[-1].end
def add_cds_exon(self, start, end):
self.cds_exons.append(Interval(start, end))
def add_exon(self, rStart0, rEnd1, sStart0, sEnd1, rstrand, score=0):
assert rStart0 < rEnd1 and sStart0 < sEnd1
if rstrand == "-":
if not len(self.ref_exons) == 0:
if self.ref_exons[0].start < rEnd1:
raise ValueError(
"The exon being added has a starting position that is past the end position!"
)
self.scores.insert(0, score)
self.ref_exons.insert(0, Interval(rStart0, rEnd1))
else:
if len(self.ref_exons) != 0:
if self.ref_exons[-1].end > rStart0:
raise ValueError(
"The exon being added has a starting position that is past the end position!"
)
self.scores.append(score)
self.ref_exons.append(Interval(rStart0, rEnd1))
if rstrand == "-":
self.seq_exons.insert(0, Interval(sStart0, sEnd1))
else:
self.seq_exons.append(Interval(sStart0, sEnd1))
# Why are we rolling our own GFF parser here? Why not use gtfparse or bcbio-gff
class gmapGFFReader(object):
def __init__(self, filename):
self.filename = filename
self.f = open(filename)
# read through possible header
while True:
cur = self.f.tell()
if (
not self.f.readline().startswith("#") or self.f.tell() == cur
): # first non-# seen or EOF
self.f.seek(cur)
break
self.sanity_format_check()
def __iter__(self):
return self
def __next__(self):
return self.read()
def sanity_format_check(self):
"""
GFF3 formats are supposed to be tab-delimited and 9 required fields
https://learn.gencore.bio.nyu.edu/ngs-file-formats/gff3-format/
"""
cur = self.f.tell()
raw = self.f.readline().strip().split("\t")
if len(raw) != 9:
print(
f"ERROR:Sanity checking {self.filename} is GFF3 format: expected 9 tab-delimited fields but saw {len(raw)}! Abort!"
)
sys.exit(-1)
self.f.seek(cur)
def read(self):
"""
GFF files
(0) seqname
(1) annotation source
(2) feature: gene|transcript|CDS|exon|UTR
(3) 1-based start # MUST CONVERT TO 0-based!!!
(4) 1-based end
(5) score (I think it's similarity for GMAP)
(6) strand: +|-
(7) phase
(8) extra stuff (gene ID, transcript ID...)
For gmap output, a series is delimited by '###' line
"""
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
raw = line.strip().split("\t")
while raw[0].startswith("#"):
line = self.f.readline().strip()
raw = line.strip().split("\t")
if len(raw) == 0 or raw[0] == "":
raise StopIteration("EOF reached!!")
assert raw[2] == "gene"
raw = self.f.readline().strip().split("\t")
assert raw[2] == "mRNA"
seqname = raw[0]
strand = raw[6]
for blob in raw[8].split(";"):
if blob.startswith("coverage="):
coverage = float(blob[9:])
elif blob.startswith("identity="):
identity = float(blob[9:])
elif blob.startswith("Name="):
seqid = blob[5:]
rec = gmapRecord(seqname, coverage, identity, strand, seqid)
cds_exons = []
cds_seq_start = None
cds_seq_end = None
while True:
line = self.f.readline().strip()
if line.startswith("##"):
rec.cds_exons = cds_exons
rec.cds_seq_start = cds_seq_start
rec.cds_seq_end = cds_seq_end
return rec
raw = line.split("\t")
feature = raw[2]
if feature == "exon":
rstart1, rend1 = int(raw[3]), int(raw[4])
score = float(raw[5])
rstrand = raw[6] # this is the strand on the reference genome
for blob in raw[8].split(";"):
if blob.startswith("Target="):
# sstrand is the strand on the query sequence
_, sstart1, send1, sstrand = blob.split()
sstart1 = int(sstart1)
send1 = int(send1)
rec.sstrand = sstrand
try:
rec.add_exon(rstart1 - 1, rend1, sstart1 - 1, send1, rstrand, score)
except AssertionError:
logger.error(f"{rec.seqid} has non-colinear exons!")
while True:
line = self.f.readline().strip()
if line.startswith("##"):
return rec
rec.strand = rstrand
elif feature == "CDS":
rstart1, rend1 = int(raw[3]), int(raw[4])
cds_exons.append(Interval(rstart1 - 1, rend1))
for blob in raw[8].split(";"):
if blob.startswith("Target="):
junk, sstart1, send1, sstrand = blob.split()
sstart1 = int(sstart1)
send1 = int(send1)
cds_seq_start = (
sstart1 - 1 if cds_seq_start is None else cds_seq_start
)
cds_seq_end = send1
else:
raise Exception(f"Not supposed to see type {feature} here!!")
# return rec
class pasaGFFReader(gmapGFFReader):
"""
Slight differences in PASA's GTF output (.gtf)
Each transcript is separated by 1 or more blank lines
"""
def read(self):
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
while line.startswith("#"): # header section, ignore
line = self.f.readline().strip()
raw = line.split("\t")
assert raw[2] == "transcript"
seqname = raw[0]
strand = raw[6]
for blob in raw[8].split("; "):
if blob.startswith("transcript_id"): # ex: transcript_id "asmbl_7"
tid = blob[15:-1]
# elif blob.startswith('gene_id'): # ex: gene_id "S2"
# gid = blob[9:-1]
rec = gmapRecord(
seqname=seqname, coverage=None, identity=None, strand=strand, seqid=tid
)
while True:
# pdb.set_trace()
line = self.f.readline().strip()
if line.startswith("###"): # end of this record
return rec
raw = line.split("\t")
feature = raw[2]
start1, end1 = int(raw[3]), int(raw[4])
if feature == "exon":
rec.add_exon(start1 - 1, end1, -2, -1, None)
def write_collapseGFF_format(f, r):
f.write(
f'{r.chr}\tPacBio\ttranscript\t{r.start+1}\t{r.end}\t.\t{r.strand}\t.\tgene_id "{r.geneid}"; transcript_id "{r.seqid}";\n'
)
for exon in r.ref_exons:
f.write(
f'{r.chr}\tPacBio\texon\t{exon.start + 1}\t{exon.end}\t.\t{r.strand}\t.\tgene_id "{r.geneid}"; transcript_id "{r.seqid}";\n'
)
if r.cds_exons is not None:
for exon in r.cds_exons:
f.write(
f'{r.chr}\tPacBio\tCDS\t{exon.start + 1}\t{exon.end}\t.\t{r.strand}\t.\tgene_id "{r.geneid}"; transcript_id "{r.seqid}";\n'
)
class collapseGFFReader(gmapGFFReader):
def read(self):
"""
PacBio-style GFF from the collapsed output, which is
0) seqname
1) source (PacBio)
2) feature (transcript|exon)
3) start (1-based)
4) end (1-based)
5) score (always .)
6) strand
7) frame (always .)
8) blurb
ex:
chr1 PacBio transcript 897326 901092 . + . gene_id "PB.1"; transcript_id "PB.1.1";
chr1 PacBio exon 897326 897427 . + . gene_id "PB.1"; transcript_id "PB.1.1";
"""
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
raw = line.strip().split("\t")
assert raw[2] == "transcript"
seqname = raw[0]
strand = raw[6]
seqid = None
geneid = None
for stuff in raw[8].split(";"):
if len(stuff.strip()) > 0:
a, b = stuff.strip().split()
if a == "transcript_id":
seqid = b[1:-1]
if a == "gene_id":
geneid = b[1:-1]
rec = gmapRecord(
seqname,
coverage=None,
identity=None,
strand=strand,
seqid=seqid,
geneid=geneid,
)
while True:
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
return rec
raw = line.split("\t")
if raw[2] == "exon":
s, e = int(raw[3]) - 1, int(raw[4])
rec.add_exon(s, e, s, e, rstrand="+", score=None)
elif raw[2] == "CDS":
s, e = int(raw[3]) - 1, int(raw[4])
rec.add_cds_exon(s, e)
else: # another new record, wind back and return
self.f.seek(cur)
return rec
raise Exception("Should not reach here!")
fusion_seqid_rex = re.compile(r"(\\S+\\.\\d+)\\.(\\d+)")
class collapseGFFFusionReader(collapseGFFReader):
def read(self):
r0 = super(collapseGFFFusionReader, self).read()
m = fusion_seqid_rex.match(r0.seqid)
fusion_id = m.group(1)
records = [r0]
while True:
cur = self.f.tell()
try:
r = super(collapseGFFFusionReader, self).read()
except StopIteration:
return fusion_id, records
m = fusion_seqid_rex.match(r.seqid)
if m.group(1) != fusion_id:
self.f.seek(cur)
return fusion_id, records
else:
records.append(r)
class ucscGFFReader(gmapGFFReader):
def read(self):
"""
UCSC-style GFF, which is
0) seqname (chromosome)
1) source
2) feature (gene|exon|mRNA...)
3) start (1-based)
4) end (1-based)
5) score
6) strand
7) frame
8) group
A series is delimited by '###' line
"""
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
raw = line.strip().split("\t")
assert raw[2] == "exon"
seqname = raw[0]
s, e = int(raw[3]) - 1, int(raw[4])
strand = raw[6]
seqid = raw[8]
rec = gmapRecord(
seqname, coverage=None, identity=None, strand=strand, seqid=seqid
)
rec.add_exon(s, e, s, e, strand, score=None)
while True:
line = self.f.readline().strip()
if line.startswith("###"):
return rec
raw = line.split("\t")
assert raw[2] == "exon"
s, e = int(raw[3]) - 1, int(raw[4])
rec.add_exon(s, e, s, e, strand, score=None)
# return rec
def GFFReader(filename):
"""
Reads the 2nd column to decide GMAP or PASA parser to use
"""
with open(filename) as f:
program = f.readline().strip().split("\t")[1]
if program == "GMAP":
return gmapGFFReader(filename)
elif program == "PASA":
return pasaGFFReader(filename)
else:
raise Exception(f"{program} is not a recognizable GFF program")
def write_fancyGeneformat(f, r):
for exon in r.ref_exons:
f.write(f"{r.seqid} exon {exon.start + 1} {exon.end + 1}\n")
def write_GFF_UCSCformat(f, r):
"""
UCSC GTF format:
0) seqname
1) source
2) feature (gene|exon|mRNA...)
3) start (1-based)
4) end (1-based)
5) score
6) strand
7) frame
8) group
r should be gmapRecord object
"""
ref_exons = r.ref_exons
if r.strand == "-":
ref_exons.reverse()
for exon in r.ref_exons:
f.write(f"{r.chr}\t")
f.write("NA\t")
f.write("exon\t")
f.write(f"{str(exon.start + 1)}\t")
f.write(f"{str(exon.end)}\t")
f.write(".\t")
f.write(f"{r.strand}\t")
try:
f.write(f"{str(r.score)}\t")
except:
f.write(".\t")
f.write(f"{r.seqid}\n")
f.write("###\n")
def convert_BLAST9rec_to_gmapRecord(rec_list):
"""
Adds .chr, .seqid, and .ref_exons so we can use it to write in UCSC format
"""
if not len(rec_list) > 0:
raise RuntimeError("Cannot convert an empty record list!")
seqname = rec_list[0].sID
seqid = rec_list[0].qID
strand = rec_list[0].strand
if not all(x.sID == chr for x in rec_list):
raise RuntimeError(
"The record list has differing `sID` valuess - they must all be the same!!"
)
if not all(x.qID == seqid for x in rec_list):
raise RuntimeError(
"The record list has differing `qID` valuess - they must all be the same!"
)
if not all(x.strand == strand for x in rec_list):
raise RuntimeError(
"The record list has differing `strand` values - they must all be the same!!"
)
r = gmapRecord(seqname, coverage=0, identity=0, strand=strand, seqid=seqid)
r.ref_exons = [Interval(x.sStart, x.sEnd) for x in rec_list]
return r
def btab_reclist_to_interval_list_0basedStart(recs):
"""
Return chr, list of IntervalNode
"""
tree = IntervalTree()
for rec in recs:
tree.insert(rec["rStart1"] - 1, rec["rEnd1"])
path = []
tree.traverse(path.append)
seqname = recs[0]["chr"]
return seqname, path
def getOverlap(a, b):
return max(0, min(a.end, b.end) - max(a.start, b.start))
def CompareSimCoordinatesToAlnPath(alnPath, simCoordinates):
#
# do silly little dynamic programming to align sets of exons.
# This could be done in a while loop if there is a 1-1
# correspondende of exons that overlap, but if multiple overlap,
# that could cause problems.
#
nAlnExons = len(alnPath)
nSimExons = len(simCoordinates)
scoreMat = [[0 for j in range(nSimExons + 1)] for i in range(nAlnExons + 1)]
pathMat = [[0 for j in range(nSimExons + 1)] for i in range(nAlnExons + 1)]
diagonal = 0
up = 1
left = 2
for i in range(nAlnExons):
pathMat[i + 1][0] = up
for j in range(nSimExons):
pathMat[0][j + 1] = left
pathMat[0][0] = diagonal
# return 0
for i in range(nAlnExons):
for j in range(nSimExons):
overlapScore = 0
if (
len(simCoordinates[j].find(alnPath[i].start, alnPath[i].end)) > 0
): # overlaps!
overlapScore = (
getOverlap(alnPath[i], simCoordinates[j])
* 1.0
/ (simCoordinates[j].end - simCoordinates[j].start)
) # GetOverlapPercent(alnPair, simCoordinates.exonList[j])
assert 0 <= overlapScore <= 1.0
scoreMat[i + 1][j + 1] = scoreMat[i][j] + overlapScore
pathMat[i + 1][j + 1] = diagonal
else:
order = (
simCoordinates[j].end <= alnPath[i].start
) # WhichIntervalIsFirst(alnPair, simCoordinates.exonList[j])
if order:
scoreMat[i + 1][j + 1] = (
scoreMat[i][j + 1] - 2
) # penalize assembled exons that were skipped
pathMat[i + 1][j + 1] = up
else:
scoreMat[i + 1][j + 1] = (
scoreMat[i + 1][j] - 1
) # penalize gencode exons being skipped
pathMat[i + 1][j + 1] = left
# pdb.set_trace()
i = nAlnExons
j = nSimExons
matchedExons = []
_cur_best_j = nSimExons
for j in range(nSimExons - 1, -1, -1):
if scoreMat[i][j] > scoreMat[i][_cur_best_j]:
_cur_best_j = j
j = _cur_best_j
while i > 0 and j > 0:
if pathMat[i][j] == diagonal:
matchedExons.append((j - 1, i - 1)) # format should be (ref_ind, seq_ind)
i = i - 1
j = j - 1
elif pathMat[i][j] == left:
j = j - 1
else:
i = i - 1
matchedExons.reverse()
return (scoreMat[nAlnExons][_cur_best_j] - (nSimExons - _cur_best_j), matchedExons)
def match_transcript(gtf, seqname, exon_path):
"""
exon_tree is an IntervalTree, so it's already sorted
"""
# num_exon = len(exon_path)
# print 'matching transcript for:', exon_path
best_score, best_matchedExons, best_tID, best_tNum = 0, None, None, None
for tID in gtf.find(chr, exon_path[0].start, exon_path[-1].end):
t_paths = gtf.get_exons(tID)
score, matchedExons = CompareSimCoordinatesToAlnPath(exon_path, t_paths)
# print 'matching:', tID, score, matchedExons
# pdb.set_trace()
if score > best_score:
best_tID = tID
best_tNum = len(t_paths)
best_score = score
best_matchedExons = matchedExons
return {
"score": best_score,
"matchedExons": best_matchedExons,
"tID": best_tID,
"tID_num_exons": best_tNum,
}
def categorize_transcript_recovery(info):
"""
full --- means that every exon in the tID was covered!
fused --- full, but assembled exon match start > 0, meaning
likely fusion of overlapped transcripts
5missX --- means that the assembled one is missing beginning X exons
3missY --- means that the assembled one is missing ending Y exons
skipped --- means that the asseembled one is missing some intermediate exons!
"""
if len(info["matchedExons"]) == info["tID_num_exons"]:
if info["matchedExons"][0][1] == 0:
return "full"
else:
return "fused"
msg = ""
if info["matchedExons"][0][0] > 0:
msg += "5miss" if info["strand"] == "+" else "3miss"
msg += str(info["matchedExons"][0][0])
if info["matchedExons"][-1][0] < info["tID_num_exons"] - 1:
msg += ";" if msg != "" else ""
msg += "3miss" if info["strand"] == "+" else "5miss"
msg += str(info["tID_num_exons"] - 1 - info["matchedExons"][-1][0])
if msg == "": # must be missing some ground truth exons!
return "skipped"
return msg
def evaluate_alignment_boundary_goodness(ref_exons, aln_exons, matches):
"""
Returns a list of comma-separated numbers (head,tail).
For each head element: 0 if precise, +k if seq starts at ref.start+k, -k if ref.start-k
For each tail element: 0 if precise, +k if seq starts at ref.end+k, -k if ref.end-k
"""
result = []
for ind_ref, ind_aln in matches:
result.append(
(
aln_exons[ind_aln].start - ref_exons[ind_ref].start,
aln_exons[ind_aln].end - ref_exons[ind_ref].end,
)
)
return result
def main(gtf):
transcript_tally = {}
for tID in gtf.transcript:
transcript_tally[tID] = [0] * len(gtf.get_exons(tID))
for r in btabBlockReader("sim_gencode_20x_first1000_test2.gmap.tophits.btab"):
path = btab_reclist_to_interval_list_0basedStart(r)
info = match_transcript(gtf, r[0]["chr"], path)
if info["matchedExons"] is None:
logger.info(f"Did not find a match for {r[0]['seqid']}!")
continue
for i, _ in info["matchedExons"]:
transcript_tally[info["tID"]][i] += 1
return transcript_tally
def main_pasa(gtf):
pasa_tally = {}
for tID in gtf.transcript:
pasa_tally[tID] = [0] * len(gtf.get_exons(tID))
pasa = GTF(
"sim_gencode_20x_first1000_test2.pasa_assemblies.denovo_transcript_isoforms.gtf"
)
for tID in pasa.transcript:
path = pasa.get_exons(tID)
seqname = pasa.exon[(path[0].start, path[0].end)][0][2]
info = match_transcript(gtf, seqname, path)
if info["matchedExons"] is None:
logger.info(f"Did not find a match for {format(tID)}!")
continue
for i, j in info["matchedExons"]:
pasa_tally[info["tID"]][i] += 1
return pasa_tally
def eval_gmap(gtf, gmap_filename, input_filename):
"""
Expected seqID format: m000000_000000_00000_cSIMULATED_s0_p0/0/0_1250 or p0/1395/ccs
Input:
gtf --- GTF/Coords object as ground truth transcripts
gmap_filename --- gmap output in .gff format
input_filename --- input fasta to gmap (to identify unmapped seqs)
Output: <output_prefix> is just <gmap_filename>
<output_prefix>.bad --- list of seqids that had no GMAP output or did not match a transcript
<output_prefix>.report ---
<seqid>, <seqlen>, <seqMatchStart>, <seqMatchEnd>, <transcript/gene ID>, <category:full|5missX|3missY|skipped>, <matchedExons>
"""
from Bio import SeqIO
output_prefix = gmap_filename
fbad = open(f"{output_prefix}.bad", "w")
fbad.write("seqID\tinfo\n")
fgood = open(f"{output_prefix}.report", "w")
fgood.write(
"seqID\tseqLen\tchr\tstrand\tseqMatchStart0\tseqMatchEnd1\trefID\tcategory\tmatches\tboundary\n"
)
seqlen_dict = {
r.id: len(r.seq)
for r in SeqIO.parse(
open(input_filename),
"fastq" if input_filename.endswith(".fastq") else "fasta",
)
}
seqid_missed = seqlen_dict.keys()
for rec in gmapGFFReader(gmap_filename):
seqname = rec.chr
seqid = rec.seqid
print(f"seqid: {seqid}")
seqlen = seqlen_dict[seqid]
try:
seqid_missed.remove(seqid)
except ValueError: # already removed, ignore?
pass
info = match_transcript(gtf, seqname, rec.ref_exons)
info["strand"] = rec.strand
if info["matchedExons"] is None:
fbad.write(f"{seqid}\tBAD\n")
else:
fgood.write(
"{seqid}\t{seqlen}\t{chr}\t{strand}\t{smstart0}\t{smend1}\t{refID}\t{cat}\t{mat}\t{bound}\n".format(
seqid=seqid,
seqlen=seqlen,
chr=seqname,
smstart0=rec.start,
smend1=rec.end,
strand=rec.strand,
refID=info["tID"],
cat=categorize_transcript_recovery(info),
mat=info["matchedExons"],
bound=evaluate_alignment_boundary_goodness(
gtf.get_exons(info["tID"]), rec.ref_exons, info["matchedExons"]
),
)
)
for seqid in seqid_missed:
fbad.write(f"{seqid}\tMISSED\n")
fbad.close()
fgood.close()
def eval_pasa(gtf, pasa_filename, gmap_report_filename):
"""
Output:
<gID> <tID> <number of exons> <refID> <category> <matches>
"""
output_prefix = pasa_filename
fbad = open(f"{output_prefix}.bad", "w")
fbad.write("tID\tinfo\n")
fgood = open(f"{output_prefix}.report", "w")
fgood.write(
"gID\ttID\tchr\tstrand\tnum_exon\ttlen\trefID\treflen\trefStrand\tcategory\tmatches\tboundary\n"
)
refid_missed = list(
{x["refID"] for x in DictReader(open(gmap_report_filename), delimiter="\t")}
)
for rec in pasaGFFReader(pasa_filename):
gid = rec.seqid
tid = rec.seqid
num_exon = len(rec.ref_exons)
tLen = sum(
x.end - x.start for x in rec.ref_exons
) # i know it's confusing but seq_exon is not used in parsing pASA output!
info = match_transcript(gtf, rec.chr, rec.ref_exons)
info["strand"] = rec.strand
refid = info["tID"]
try:
refid_missed.remove(refid)
except ValueError:
pass
refLen = sum(x.end - x.start for x in gtf.get_exons(info["tID"]))
if info["matchedExons"] is None:
fbad.write(f"{tid}\tBAD\n")
else:
fgood.write(f"{gid}\t{tid}\t")
fgood.write(f"{rec.chr}\t{rec.strand}\t")
fgood.write(f"{num_exon}\t{tLen}\t")
fgood.write(f"{info['tID']}\t{refLen}\t")
fgood.write(f"{gtf.transcript_info[refid]['strand']}\t")
fgood.write(
f"{categorize_transcript_recovery(info)}\t{info['matchedExons']}\t"
)
fgood.write(
"{bound}\n".format(
bound=evaluate_alignment_boundary_goodness(
gtf.get_exons(info["tID"]), rec.ref_exons, info["matchedExons"]
)
)
)
for refid in refid_missed:
fbad.write(f"{refid}\tMISSED\n")
fbad.close()
fgood.close()
def make_exon_report(gtf, gmap_report_filename):
"""
Output for each exon:
<tID> <exon number 0-based> <length> <coverage>
Output will be written to .exon_report
"""
coverage = defaultdict(lambda: defaultdict(lambda: 0)) # tID --> ith-exon --> count
for r in DictReader(open(gmap_report_filename), delimiter="\t"):
tID = r["refID"]
for i, _ in eval(r["matches"]):
coverage[tID][i] += 1
with open(f"{gmap_report_filename}.exon_report", "w") as f:
for tID in coverage:
path = gtf.get_exons(tID)
for ith, exon in enumerate(path):
f.write(
f"{tID}\t{ith}\t{exon.end - exon.start}\t{coverage[tID][ith]}\n"
)
"""
##gff-version 3
# generated on Tue Dec 10 12:13:05 2013 by ./dump_all_gramene_dbs_continuing.pl
# for species maize
# genebuild 2010-01-MaizeSequence
5 ensembl gene 1579 3920 . - . ID=GRMZM2G356204;Name=GRMZM2G356204;biotype=protein_coding
5 ensembl mRNA 1579 3920 . - . ID=GRMZM2G356204_T01;Parent=GRMZM2G356204;Name=GRMZM2G356204_T01;biotype=protein_coding
5 ensembl exon 1579 3920 . - . Parent=GRMZM2G356204_T01;Name=exon.12
5 ensembl CDS 1681 3903 . - . Parent=GRMZM2G356204_T01;Name=CDS.13
5 ensembl gene 10731 23527 . - . ID=GRMZM2G054378;Name=GRMZM2G054378;biotype=protein_coding
5 ensembl mRNA 22087 23527 . - . ID=GRMZM2G054378_T09;Parent=GRMZM2G054378;Name=GRMZM2G054378_T09;biotype=protein_coding
5 ensembl intron 22956 23034 . - . Parent=GRMZM2G054378_T09;Name=intron.19
5 ensembl intron 22799 22898 . - . Parent=GRMZM2G054378_T09;Name=intron.20
5 ensembl intron 22634 22722 . - . Parent=GRMZM2G054378_T09;Name=intron.21
5 ensembl intron 22456 22553 . - . Parent=GRMZM2G054378_T09;Name=intron.22
5 ensembl exon 23035 23527 . - . Parent=GRMZM2G054378_T09;Name=exon.23
5 ensembl exon 22899 22955 . - . Parent=GRMZM2G054378_T09;Name=exon.24
5 ensembl exon 22723 22798 . - . Parent=GRMZM2G054378_T09;Name=exon.25
5 ensembl exon 22554 22633 . - . Parent=GRMZM2G054378_T09;Name=exon.26
5 ensembl exon 22087 22455 . - . Parent=GRMZM2G054378_T09;Name=exon.27
5 ensembl CDS 23035 23193 . - . Parent=GRMZM2G054378_T09;Name=CDS.28
5 ensembl CDS 22929 22955 . - 0 Parent=GRMZM2G054378_T09;Name=CDS.29
"""
class MaizeGFFReader(collapseGFFReader):
def read(self):
"""
PacBio-style GFF from the collapsed output, which is
0) chrmosome
1) source (PacBio)
2) feature (transcript|exon)
3) start (1-based)
4) end (1-based)
5) score (always .)
6) strand
7) frame (always .)
8) blurb
ex:
chr1 PacBio transcript 897326 901092 . + . gene_id "PB.1"; transcript_id "PB.1.1";
chr1 PacBio exon 897326 897427 . + . gene_id "PB.1"; transcript_id "PB.1.1";
"""
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
raw = line.strip().split("\t")
feature = raw[2]
if feature == "gene": # ignore this and read the next line 'mRNA'
line = self.f.readline().strip()
raw = line.strip().split("\t")
assert feature == "mRNA"
seqname = raw[0]
start = raw[3]
end = raw[4]
strand = raw[6]
seqid = None
for stuff in raw[8].split(
";"
): # ex: ID=GRMZM2G054378;Name=GRMZM2G054378;biotype=protein_coding
a, b = stuff.strip().split("=")
if a == "ID":
seqid = b
break
rec = gmapRecord(
seqname, coverage=None, identity=None, strand=strand, seqid=seqid
)
while True:
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
return rec
raw = line.split("\t")
if feature == "exon":
s, e = int(start) - 1, int(end)
rec.add_exon(s, e, s, e, rstrand=strand, score=None)
elif feature == "CDS":
s, e = int(start) - 1, int(end)
rec.add_cds_exon(s, e)
elif feature == "intron":
pass # ignore intron annotations
else: # another new record, wind back and return
self.f.seek(cur)
return rec
raise Exception("Should not reach here!")
"""
Because f***ing exonerate cannot generate GFF3 formats.
X exonerate:est2genome gene 78421768 78454596 5775 - . gene_id 0 ; sequence lcl|NM_001001171.1_cds_NP_001001171.1_1 ; gene_orient
ation + ; identity 99.92 ; similarity 99.92
X exonerate:est2genome utr5 78454482 78454596 . - .
X exonerate:est2genome exon 78454482 78454596 . - . insertions 0 ; deletions 0 ; identity 100.00 ; similarity 100.00
X exonerate:est2genome splice5 78454480 78454481 . - . intron_id 1 ; splice_site "GT"
X exonerate:est2genome intron 78451617 78454481 . - . intron_id 1
X exonerate:est2genome splice3 78451617 78451618 . - . intron_id 0 ; splice_site "AG"
"""
class ExonerateGFF2Reader(collapseGFFReader):
def read(self):
"""
PacBio-style GFF from the collapsed output, which is
0) chrmosome
1) source (PacBio)
2) feature (transcript|exon)
3) start (1-based)
4) end (1-based)
5) score (always .)
6) strand
7) frame (always .)
8) blurb
ex:
chr1 PacBio transcript 897326 901092 . + . gene_id "PB.1"; transcript_id "PB.1.1";
chr1 PacBio exon 897326 897427 . + . gene_id "PB.1"; transcript_id "PB.1.1";
"""
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
raise StopIteration("EOF reached!!")
raw = line.strip().split("\t")
if raw[2] == "gene": # read the gene line to get the ID and strand
for stuff in raw[8].split(" ; "):
a, b = stuff.strip().split(" ")
if a == "sequence":
seqid = b
seqname = raw[0]
strand = raw[6]
assert strand in ("+", "-")
rec = gmapRecord(
seqname, coverage=None, identity=None, strand=strand, seqid=seqid
)
while True:
cur = self.f.tell()
line = self.f.readline().strip()
if self.f.tell() == cur:
return rec
raw = line.split("\t")
if raw[2] == "exon":
s, e = int(raw[3]) - 1, int(raw[4])
rec.add_exon(s, e, s, e, rstrand=strand, score=None)
elif raw[2] == "CDS":
pass # ignore CDS annotations
elif raw[2] == "intron":
pass # ignore intron annotations
elif raw[2] == "similarity":
# end of the record! return
return rec
elif raw[2] in ("splice3", "splice5", "utr3", "utr5"):
pass # ignore
raise Exception("Should not reach here!")
| 2.546875 | 3 |
SourceCode/PyWiseFuncs.py | samuellando/PyWise.todo | 1 | 12757569 | <gh_stars>1-10
from time import sleep, strftime
from os import system, remove
from subprocess import Popen
from traceback import print_exc
from msvcrt import getch
class AllFunctions():
def load(FileName): #Open text file and return todo list and pri list
ToList = []
State = open(FileName, "r")
for Line in State:
Line = Line.replace("\n", "")
ToList.append(Line)
PList = []
TempList = []
Lenght = len(ToList)
C = 0
while C < Lenght:
CUT = ToList[C].split("PRI")
TempList.append(CUT[0])
PList.append(CUT[1])
C = C+1
ToList = []
ToList = TempList
return(ToList, PList)
def save(FileName, ToList, PList): #Update text file to current lsit state
remove(FileName)
State = open(FileName, "a")
C = 0
for Item in ToList:
State.write(ToList[C]+"PRI"+PList[C]+"\n")
C = C+1
def post(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode): #Post list to screen with colors
system("cls")
print(" ")
print("\t"+"\t"+"\033[1;37;40m "+"\t--Todo List--")
print(" ")
C = 0
Max = len(ToList)
while C < Max:
Coef = str(C+1)
if PList[C] == "1":
print("\t"+"\033[1;"+PriOneCode+";40m "+Coef+"\t"+ ToList[C])
elif PList[C] == "2":
print("\t"+"\033[1;"+PriTwoCode+";40m "+Coef+"\t"+ ToList[C])
elif PList[C] == "3":
print("\t"+"\033[1;"+PriThreeCode+";40m "+Coef+"\t"+ ToList[C])
else:
print("\t"+"\033[1;"+PriZeroCode+";40m "+Coef+"\t"+ToList[C])
C = C+1
print("\033[1;37;40m "+" ")
if len(ToList) == 0:
print("\t"+"\033[1;35;40m "+"\tNothing To Do !!")
print("\033[1;37;40m "+" ")
def add(Item, ToList, PList): #add item to list
ToList.append(Item)
PList.append("0")
return(ToList, PList)
def pri(Num, NP, PList): #chage pri of item in list
Max = len(PList)
C = int(Num)-1
if C >= Max or C < 0:
return
if int(NP) > 3 or int(NP) < 0:
return
PList[C] = NP
return(PList)
def done(Num, ToList, PList): #remove item from list
Max = len(ToList)
C = int(Num)-1
if C >= Max or C < 0:
return
del ToList[C]
del PList[C]
return(ToList, PList)
def switch(NumO, NumT, ToList, PList): #witch position of two items in list
CO = int(NumO)-1
CT = int(NumT)-1
Max = len(ToList)
if CO >= Max or CT >= Max or CO < 0 or CT < 0:
return
TempVal = ""
TempVal = ToList[CO]
ToList[CO] = ToList[CT]
ToList[CT] = TempVal
TempVal = "0"
TempVal = PList[CO]
PList[CO] = PList[CT]
PList[CT] = TempVal
return(ToList, PList)
def fliter(TEXT, ToList, PList): #filter by a search term
FlitList = []
FLP = []
C = 0
for Item in ToList:
if TEXT in Item:
FlitList.append(Item)
FLP.append(PList[C])
C = C+1
return(FlitList, FLP)
def fliterpri(Num, ToList, PList): #filter by a priority level
FlitList = []
FLP = []
C = 0
for Item in PList:
if Num in Item:
FlitList.append(ToList[C])
FLP.append(Item)
C = C+1
return(FlitList, FLP)
def BulkAdd(ToList, PList): #open text flie to bulk add items
Info = open("BulkAdd.txt", "a")
Info.write("--- Write Tasks Line by Line below ---\n+XX @XX/XX/2017 TEXT")
Info.close()
p = Popen(('notepad.exe', 'BulkAdd.txt'))
p.wait()
Info = open("BulkAdd.txt", "r+")
C = 0
for Line in Info:
if "\n" in Line:
Line = Line.replace("\n", "")
if C > 0:
ToList, PList = AllFunctions.add(Line, ToList, PList)
C = C+1
Info.close()
remove("BulkAdd.txt")
return(ToList, PList)
def BulkWork(FileName): #open text file to bulk edit items
p = Popen(('notepad.exe', FileName))
p.wait()
ToList, PList = AllFunctions.load(FileName)
return(ToList, PList)
def SortPri(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode, S): #buble sort by priority
if S == 0:
system("cls")
print("Sorting...")
Lenght = len(PList)
Cur = 0
C = 1
while AllFunctions.OrderCheck(PList):
while C < Lenght:
if int(PList[Cur]) <= int(PList[(Cur+1)]):
ToList, PList = AllFunctions.switch((Cur+1), (Cur+2), ToList, PList)
Cur = Cur+1
C = C+1
if S == 1:
AllFunctions.post(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode)
sleep(0.02)
C = 1
Cur = 0
return(ToList, PList)
def OrderCheck(L): #check order of items in list
Lenght = len(L)
Cur = 0
C = 1
while C < Lenght:
if int(L[Cur]) >= int(L[(Cur+1)]):
Cur = Cur+1
else:
return True
C = C+1
return False
def SortDate(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode, S): #buble sort by date
DateList = []
for Item in ToList:
if "@" in Item:
day = Item[(Item.index("@")+1)]+Item[(Item.index("@")+2)]
month = Item[(Item.index("@")+4)]+Item[(Item.index("@")+5)]
year = Item[(Item.index("@")+7)]+Item[(Item.index("@")+8)]+Item[(Item.index("@")+9)]+Item[(Item.index("@")+10)]
Date = (int(day)*86164)+(int(month)*2592000)+(int(year)*31563000)
DateList.append(Date)
else:
DateList.append(0)
if S == 0:
system("cls")
print("Sorting...")
Lenght = len(ToList)
Cur = 0
C = 1
while AllFunctions.OrderCheck(DateList):
while C < Lenght:
if int(DateList[Cur]) <= int(DateList[(Cur+1)]):
ToList, PList = AllFunctions.switch((Cur+1), (Cur+2), ToList, PList)
temp = DateList[Cur]
DateList[Cur] = DateList[(Cur+1)]
DateList[(Cur+1)] = temp
Cur = Cur+1
C = C+1
if S == 1:
AllFunctions.post(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode)
sleep(0.02)
C = 1
Cur = 0
ToList.reverse()
PList.reverse()
return(ToList, PList)
def PriByDate(ToList, PList): #prooritiz by date
DateList = []
for Item in ToList:
if "@" in Item:
day = Item[(Item.index("@")+1)]+Item[(Item.index("@")+2)]
month = Item[(Item.index("@")+4)]+Item[(Item.index("@")+5)]
year = Item[(Item.index("@")+7)]+Item[(Item.index("@")+8)]+Item[(Item.index("@")+9)]+Item[(Item.index("@")+10)]
Date = (int(day)*86164)+(int(month)*2592000)+(int(year)*31536000)
DateList.append(Date)
else:
DateList.append(0)
TDay = int(strftime("%d"))
TMonth = int(strftime("%m"))
TYear = int(strftime("%y"))+2000
TDate = (TDay*86164)+(TMonth*2592000)+(TYear*31536000)
C = 0
for Item in ToList:
if DateList[C] <= TDate:
PList[C] = "3"
elif DateList[C] < (TDate+86400):
PList[C] = "3"
elif DateList[C] <= (TDate+172800):
PList[C] = "2"
elif DateList[C] <= (TDate+604800):
PList[C] = "1"
else:
PList[C] = "0"
C = C+1
return(ToList, PList)
def Export(ToList, PList, ToFile): #export and move using a bat file
Ex = open(ToFile, "w")
C = 0
Ex.write("\t\t -- Todo -- \n")
Ex.write("\n")
for Item in ToList:
Ex.write("PRI: "+PList[C]+"\t"+ToList[C]+"\n")
C = C+1
Ex.close()
system("start MoveList.bat")
def UpCACHE(LineNum, Cvar, ToFile):
LineNum = LineNum-1
with open(ToFile, 'r+') as UP:
LineLenghts = []
for Line in UP:
LineLenghts.append((len(Line)+1))
C = 0
Cur = 0
while C < LineNum:
Cur = Cur+LineLenghts[C]
C = C+1
UP.seek(Cur)
PrintLen = len(str(Cvar))
Offset = 10 - PrintLen
while Offset > 0:
UP.write("0")
Offset = Offset-1
UP.write(str(Cvar))
def ReCACHE(StartUpCount, ErrorCount, TaskCount, ToFile):
StartUpCount = 0
ErrorCount = 0
TaskCount = 0
AllFunctions.UpCACHE(1, StartUpCount, ToFile)
AllFunctions.UpCACHE(2, ErrorCount, ToFile)
AllFunctions.UpCACHE(3, TaskCount, ToFile)
def LoadCACHE(ToFile):
with open(ToFile, "r+") as CACHE:
CACHEList = []
for Line in CACHE:
CACHEList.append(Line[0:10])
StartUpCount = int(CACHEList[0])
ErrorCount = int(CACHEList[1])
TaskCount = int(CACHEList[2])
PO = CACHEList[3]
PTW = CACHEList[4]
PTH = CACHEList[5]
PZ = CACHEList[6]
CACHEList = []
return(StartUpCount, ErrorCount, TaskCount, PO, PTW, PTH, PZ)
def edit(text):
C = len(text)
A = 0
while True:
system("cls")
print(text[:C]+"|"+text[C:])
Ent = getch()
if Ent == b'\x08':
if C > 0:
C = C-1
text = text[:C]+text[(C+1):]
elif Ent == b'\r':
system("cls")
return(text)
elif Ent == b'\xe0':
A = 1
print(A)
elif Ent == b'K' and A == 1:
if C > 0:
A = 0
C = C-1
elif Ent == b'M' and A == 1:
if C < len(text):
A = 0
C = C+1
elif Ent == b'S' and A == 1:
if C < len(text):
A = 0
text = text[:C]+text[(C+1):]
elif A == 1:
A = 0
pass
else:
Ent = str(Ent)
Ent = Ent[2:]
Ent = Ent[:1]
text = text[:C]+Ent+text[C:]
C = C+1
def template():
system("cls")
print(" ")
print("Templates Menu")
print(" ")
print("T - Open template file")
print("S - switch back to TodoTXT")
print(" ")
Input = input("Choose an option: ")
if Input == "T" or Input == "t":
FileName = input("Enter template file name: ")
if ".txt" in FileName:
pass
else:
FileName = FileName+".txt"
FileCreate = open(FileName, "a")
FileCreate.close()
return(FileName)
elif Input == "S" or Input == "s":
return("TodoTXT.txt")
def color(ToFile, Pone, Ptwo, Pthree, Pzero):
system("cls")
print(" ")
print("chage color for priority 0, 1, 2, or 3 ?")
print(" ")
Ent = input(":")
if Ent == "0":
P = 0
elif Ent == "1":
P = 1
elif Ent == "2":
P = 2
elif Ent == "3":
P = 3
else:
return
C = 0
A = 0
while True:
system("cls")
print(" ")
print("\033[1;37;40m Pick a color, use the up down arrow keys")
print(" ")
if C == 0:
print("\033[1;37;40m White")
elif C == 1:
print("\033[1;36;40m Cyan")
elif C == 2:
print("\033[1;35;40m Purple")
elif C == 3:
print("\033[1;34;40m Blue")
elif C == 4:
print("\033[1;33;40m Yellow")
elif C == 5:
print("\033[1;32;40m Green")
elif C == 6:
print("\033[1;31;40m Red")
Ent = getch()
if Ent == b'\r':
system("cls")
if C == 0:
Col = 37
elif C == 1:
Col = 36
elif C == 2:
Col = 35
elif C == 3:
Col = 34
elif C == 4:
Col = 33
elif C == 5:
Col = 32
elif C == 6:
Col = 31
if P == 0:
AllFunctions.UpCACHE(7, Col, ToFile)
return(Pone, Ptwo, Pthree, str(Col))
elif P == 1:
AllFunctions.UpCACHE(4, Col, ToFile)
return(str(Col), Ptwo, Pthree, Pzero)
elif P == 2:
AllFunctions.UpCACHE(5, Col, ToFile)
return(Pone, str(Col), Pthree, Pzero)
elif P == 3:
AllFunctions.UpCACHE(6, Col, ToFile)
return(Pone, Ptwo, str(Col), Pzero)
elif Ent == b'\xe0':
A = 1
print(A)
elif Ent == b'P' and A == 1:
A = 0
if C > 0:
C = C-1
else:
C = 6
elif Ent == b'H' and A == 1:
A = 0
if C < 6:
C = C+1
else:
C = 0
else:
A = 0
pass
| 2.765625 | 3 |
examples/algorithms/pendulum_utilities.py | 4kubo/rllib | 0 | 12757570 | """Utilities for Inverted Pendulum."""
import torch
from torch.distributions import MultivariateNormal
from rllib.model import AbstractModel
from rllib.reward.utilities import tolerance
from rllib.util.neural_networks.utilities import to_torch
class PendulumSparseReward(AbstractModel):
"""Reward for Inverted Pendulum."""
def __init__(self, action_cost=0):
super().__init__(dim_state=(2,), dim_action=(1,), model_kind="rewards")
self.action_cost = action_cost
self.reward_offset = 0
def forward(self, state, action, next_state):
"""See `abstract_reward.forward'."""
state, action = to_torch(state), to_torch(action)
cos_angle = torch.cos(state[..., 0])
velocity = state[..., 1]
angle_tolerance = tolerance(cos_angle, lower=0.95, upper=1.0, margin=0.1)
velocity_tolerance = tolerance(velocity, lower=-0.5, upper=0.5, margin=0.5)
state_cost = angle_tolerance * velocity_tolerance
action_tolerance = tolerance(action[..., 0], lower=-0.1, upper=0.1, margin=0.1)
action_cost = self.action_cost * (action_tolerance - 1)
cost = state_cost + action_cost
return cost.unsqueeze(-1), torch.zeros(1)
class PendulumDenseReward(AbstractModel):
"""Reward for Inverted Pendulum."""
def __init__(self, action_cost=0.0):
super().__init__(dim_state=(2,), dim_action=(1,), model_kind="rewards")
self.action_cost = action_cost
self.reward_offset = 0
def forward(self, state, action, next_state):
"""See `abstract_reward.forward'."""
state, action = to_torch(state), to_torch(action)
cos_angle = 1 - torch.cos(state[..., 0])
state_cost = cos_angle ** 2
action_cost = self.action_cost * (action ** 2).sum(-1)
return -(action_cost + state_cost), torch.tensor(0.0)
class PendulumModel(AbstractModel):
"""Pendulum Model.
Torch implementation of a pendulum model using euler forwards integration.
"""
def __init__(
self, mass, length, friction, step_size=1 / 80, noise: MultivariateNormal = None
):
super().__init__(dim_state=(2,), dim_action=(1,))
self.mass = mass
self.length = length
self.friction = friction
self.step_size = step_size
self.noise = noise
def forward(self, state, action):
"""Get next-state distribution."""
# Physical dynamics
action = action.clamp(-1.0, 1.0)
mass = self.mass
gravity = 9.81
length = self.length
friction = self.friction
inertia = mass * length ** 2
dt = self.step_size
angle, angular_velocity = torch.split(state, 1, dim=-1)
for _ in range(1):
x_ddot = (
(gravity / length) * torch.sin(angle)
+ action * (1 / inertia)
- (friction / inertia) * angular_velocity
)
angle = angle + dt * angular_velocity
angular_velocity = angular_velocity + dt * x_ddot
next_state = torch.cat((angle, angular_velocity), dim=-1)
if self.noise is None:
return next_state, torch.zeros(1)
else:
return next_state + self.noise.mean, self.noise.covariance_matrix
| 3.171875 | 3 |
docker-app/qfieldcloud/core/tests/test_queryset.py | stcz/qfieldcloud | 0 | 12757571 | import logging
from qfieldcloud.authentication.models import AuthToken
from qfieldcloud.core import querysets_utils
from qfieldcloud.core.models import (
Organization,
OrganizationMember,
Project,
ProjectCollaborator,
ProjectQueryset,
Team,
TeamMember,
User,
)
from rest_framework.test import APITestCase
logging.disable(logging.CRITICAL)
class QfcTestCase(APITestCase):
def setUp(self):
# user1 owns p1 and p2
# user1 owns o1
# user1 collaborates on p7
self.user1 = User.objects.create_user(username="user1", password="<PASSWORD>")
self.token1 = AuthToken.objects.get_or_create(user=self.user1)[0]
# user2 owns p3 and p4
# user2 admins o1
self.user2 = User.objects.create_user(username="user2", password="<PASSWORD>")
self.token2 = AuthToken.objects.get_or_create(user=self.user2)[0]
# user2 owns p7 and p8
# user2 is member of o1
self.user3 = User.objects.create_user(username="user3", password="<PASSWORD>")
self.token3 = AuthToken.objects.get_or_create(user=self.user3)[0]
# organization1 owns p4 and p5
self.organization1 = Organization.objects.create(
username="organization1",
password="<PASSWORD>",
user_type=2,
organization_owner=self.user1,
)
self.membership1 = OrganizationMember.objects.create(
organization=self.organization1,
member=self.user2,
role=OrganizationMember.Roles.ADMIN,
)
self.membership2 = OrganizationMember.objects.create(
organization=self.organization1,
member=self.user3,
role=OrganizationMember.Roles.MEMBER,
)
self.team1 = Team.objects.create(
username="team1",
password="<PASSWORD>",
user_type=User.TYPE_TEAM,
team_organization=self.organization1,
)
self.teammembership1 = TeamMember.objects.create(
team=self.team1,
member=self.user3,
)
self.project1 = Project.objects.create(
name="project1", is_public=False, owner=self.user1
)
self.project2 = Project.objects.create(
name="project2", is_public=True, owner=self.user1
)
self.project3 = Project.objects.create(
name="project3", is_public=False, owner=self.user2
)
self.project4 = Project.objects.create(
name="project4", is_public=True, owner=self.user2
)
self.project5 = Project.objects.create(
name="project5", is_public=False, owner=self.organization1
)
self.project6 = Project.objects.create(
name="project6", is_public=True, owner=self.organization1
)
self.project7 = Project.objects.create(
name="project7", is_public=False, owner=self.user3
)
self.project8 = Project.objects.create(
name="project8", is_public=True, owner=self.user3
)
self.project9 = Project.objects.create(
name="project9", is_public=False, owner=self.organization1
)
self.collaborator1 = ProjectCollaborator.objects.create(
project=self.project7,
collaborator=self.user1,
role=ProjectCollaborator.Roles.REPORTER,
)
self.collaborator2 = ProjectCollaborator.objects.create(
project=self.project9,
collaborator=self.team1,
role=ProjectCollaborator.Roles.EDITOR,
)
def test_get_users(self):
# should get all the available users
queryset = querysets_utils.get_users("")
self.assertEqual(len(queryset), 5)
self.assertTrue(self.user1 in queryset)
self.assertTrue(self.user2 in queryset)
self.assertTrue(self.user3 in queryset)
self.assertTrue(self.organization1.user_ptr in queryset)
self.assertTrue(self.team1.user_ptr in queryset)
# should get all the available users
queryset = querysets_utils.get_users("user3")
self.assertEqual(len(queryset), 1)
self.assertTrue(self.user3 in queryset)
# should get only the users that are not an organization
queryset = querysets_utils.get_users("", exclude_organizations=True)
self.assertEqual(len(queryset), 4)
self.assertTrue(self.user1 in queryset)
self.assertTrue(self.user2 in queryset)
self.assertTrue(self.user3 in queryset)
self.assertTrue(self.team1.user_ptr in queryset)
# should get only the users that are not a team
queryset = querysets_utils.get_users("", exclude_teams=True)
self.assertEqual(len(queryset), 4)
self.assertTrue(self.user1 in queryset)
self.assertTrue(self.user2 in queryset)
self.assertTrue(self.user3 in queryset)
self.assertTrue(self.organization1.user_ptr in queryset)
# should get all the users, that are not members or owners of an organization
queryset = querysets_utils.get_users("", organization=self.organization1)
self.assertEqual(len(queryset), 1)
# should get all the users, that are not members or owner of a project
queryset = querysets_utils.get_users("", project=self.project1)
self.assertEqual(len(queryset), 3)
self.assertTrue(self.user2 in queryset)
self.assertTrue(self.user3 in queryset)
self.assertTrue(self.organization1.user_ptr in queryset)
# should get all the users, that are not members or owner of a project
queryset = querysets_utils.get_users("", project=self.project5)
self.assertEqual(len(queryset), 4)
self.assertTrue(self.user1 in queryset)
self.assertTrue(self.user2 in queryset)
self.assertTrue(self.user3 in queryset)
self.assertTrue(self.team1.user_ptr in queryset)
# should get all the users, that are not members or owner of a project and are not an organization
queryset = querysets_utils.get_users(
"", project=self.project1, exclude_organizations=True
)
self.assertEqual(len(queryset), 2)
self.assertTrue(self.user2 in queryset)
self.assertTrue(self.user3 in queryset)
def test_projects_roles_and_role_origins(self):
"""
Checks user_role and user_role_origin are correctly defined
"""
def p(proj, user):
return Project.objects.for_user(user).get(pk=proj.pk)
# fmt: off
self.assertEqual(p(self.project1, self.user1).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project1, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project2, self.user1).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project2, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
with self.assertRaises(Project.DoesNotExist):
p(self.project3, self.user1)
self.assertEqual(p(self.project4, self.user1).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project4, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
self.assertEqual(p(self.project5, self.user1).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project5, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value)
self.assertEqual(p(self.project6, self.user1).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project6, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value)
self.assertEqual(p(self.project7, self.user1).user_role, ProjectCollaborator.Roles.REPORTER)
self.assertEqual(p(self.project7, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.COLLABORATOR.value)
self.assertEqual(p(self.project8, self.user1).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project8, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
self.assertEqual(p(self.project9, self.user1).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project9, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value)
with self.assertRaises(Project.DoesNotExist):
p(self.project1, self.user2)
self.assertEqual(p(self.project2, self.user2).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project2, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
self.assertEqual(p(self.project3, self.user2).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project3, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project4, self.user2).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project4, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project5, self.user2).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project5, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value)
self.assertEqual(p(self.project6, self.user2).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project6, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value)
with self.assertRaises(Project.DoesNotExist):
p(self.project7, self.user2)
self.assertEqual(p(self.project8, self.user2).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project8, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
self.assertEqual(p(self.project9, self.user2).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project9, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value)
with self.assertRaises(Project.DoesNotExist):
p(self.project1, self.user3)
self.assertEqual(p(self.project2, self.user3).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project2, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
with self.assertRaises(Project.DoesNotExist):
p(self.project3, self.user3)
self.assertEqual(p(self.project4, self.user3).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project4, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
with self.assertRaises(Project.DoesNotExist):
p(self.project5, self.user3)
self.assertEqual(p(self.project6, self.user3).user_role, ProjectCollaborator.Roles.READER)
self.assertEqual(p(self.project6, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value)
self.assertEqual(p(self.project7, self.user3).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project7, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project8, self.user3).user_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project8, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project9, self.user3).user_role, ProjectCollaborator.Roles.EDITOR)
self.assertEqual(p(self.project9, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.TEAMMEMBER.value)
# fmt: on
def test_user_roles_and_role_origins(self):
"""
Checks project_role and project_role_origin are correctly defined
"""
def p(proj, user):
return User.objects.for_project(proj).get(pk=user.pk)
# fmt: off
self.assertEqual(p(self.project1, self.user1).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project1, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project2, self.user1).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project2, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
with self.assertRaises(User.DoesNotExist):
p(self.project3, self.user1)
with self.assertRaises(User.DoesNotExist):
p(self.project4, self.user1)
self.assertEqual(p(self.project5, self.user1).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project5, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value)
self.assertEqual(p(self.project6, self.user1).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project6, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value)
self.assertEqual(p(self.project7, self.user1).project_role, ProjectCollaborator.Roles.REPORTER)
self.assertEqual(p(self.project7, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.COLLABORATOR.value)
with self.assertRaises(User.DoesNotExist):
p(self.project8, self.user1)
self.assertEqual(p(self.project9, self.user1).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project9, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value)
with self.assertRaises(User.DoesNotExist):
p(self.project1, self.user2)
with self.assertRaises(User.DoesNotExist):
p(self.project2, self.user2)
self.assertEqual(p(self.project3, self.user2).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project3, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project4, self.user2).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project4, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project5, self.user2).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project5, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value)
self.assertEqual(p(self.project6, self.user2).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project6, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value)
with self.assertRaises(User.DoesNotExist):
p(self.project7, self.user2)
with self.assertRaises(User.DoesNotExist):
p(self.project8, self.user2)
self.assertEqual(p(self.project9, self.user2).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project9, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value)
with self.assertRaises(User.DoesNotExist):
p(self.project1, self.user3)
with self.assertRaises(User.DoesNotExist):
p(self.project2, self.user3)
with self.assertRaises(User.DoesNotExist):
p(self.project3, self.user3)
with self.assertRaises(User.DoesNotExist):
p(self.project4, self.user3)
with self.assertRaises(User.DoesNotExist):
p(self.project5, self.user3)
with self.assertRaises(User.DoesNotExist):
p(self.project6, self.user3)
self.assertEqual(p(self.project7, self.user3).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project7, self.user3).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project8, self.user3).project_role, ProjectCollaborator.Roles.ADMIN)
self.assertEqual(p(self.project8, self.user3).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value)
self.assertEqual(p(self.project9, self.user3).project_role, ProjectCollaborator.Roles.EDITOR)
self.assertEqual(p(self.project9, self.user3).project_role_origin, ProjectQueryset.RoleOrigins.TEAMMEMBER.value)
# fmt: on
| 2.1875 | 2 |
test-gpubsub.py | psriramula/google-pub-sub-tester | 0 | 12757572 | <filename>test-gpubsub.py
from subscriber import create_subscription
from publisher import create_topic
project_id = "my-fast-trace-project1"
topic_name = "TEST_TOPIC_ID"
subscription_name = "number-one-sub-1"
topic_res = create_topic(project_id, topic_name)
subsction_res = create_subscription(project_id, topic_name, subscription_name)
| 1.59375 | 2 |
setup.py | RiccardoCereghino/rpi_ws281x_cool_animations | 0 | 12757573 | <filename>setup.py
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='rpi_ws281x_cool_animations',
version='0.0',
description='Cool animations for led matrixes of the family rpi_ws281x on Raspbian',
long_description=readme(),
classifiers=[
'Development Status :: 0 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Linguistic',
],
keywords='raspberry raspbian matrix rpi_ws281x animations',
url='http://github.com/RiccardoCereghino/rpi_ws281x_cool_animations',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['rpi_ws281x_cool_animations'],
install_requires=[
],
dependency_links=['https://github.com/jgarff/rpi_ws281x'],
zip_safe=False) | 1.351563 | 1 |
superai/data_program/router/basic_router.py | mysuperai/superai-sdk | 1 | 12757574 | <gh_stars>1-10
import logging
import os
from typing import Dict, List
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from superai.data_program import DataProgram
import superai_schema.universal_schema.data_types as dt
from colorama import Fore, Style
from superai import Client
from superai.data_program import Workflow
from superai.data_program.Exceptions import *
from superai.data_program.protocol.task import (
execute,
get_job_app,
get_job_type,
input_schema,
metric_schema,
output_schema,
param_schema,
workflow,
)
from superai.data_program.router import Router
from superai.log import logger
from superai.utils import load_api_key
from superai.utils import load_auth_token, load_id_token
log = logger.get_logger(__name__)
class BasicRouter(Router):
def __init__(
self,
name: str = "router", # Can't get overriden for now
client: Client = None,
dataprorgam: "DataProgram" = None,
**kwargs,
):
# TODO: Enable measurer and notify
super().__init__(
name=name,
client=client,
dataprorgam=dataprorgam,
**kwargs,
)
self.client = (
client if client else Client(api_key=load_api_key(), auth_token=load_auth_token(), id_token=load_id_token())
)
self.default_wf = dataprorgam.default_workflow
self.gold_wf = dataprorgam.gold_workflow
self.workflows = dataprorgam.workflows
assert len(self.workflows) > 0, "Router must have at least one workflow"
assert self.default_wf is not None, "No default method registered."
assert self.gold_wf is not None, "No gold method registered."
self.prefix = dataprorgam.name
self.input_schema = self.workflows[0].input_schema
self.parameter_schema = self.workflows[0].parameter_schema
self.output_schema = self.workflows[0].output_schema
self.name = name
self.qualified_name = "{}.{}".format(self.prefix, self.name)
self.validate()
self.subscribe_wf()
## TODO: Maybe this validation shouldn't be performed here, the workflow should validate on creation
def validate(self):
self.validate_workflow_attribute("prefix")
self.validate_workflow_attribute("input_schema")
self.validate_workflow_attribute("parameter_schema")
self.validate_workflow_attribute("output_schema")
def validate_workflow_attribute(self, attr: str):
if not hasattr(self, attr):
log.warn(Fore.RED + f"{self.name} missing attribute {attr}" + Style.RESET_ALL)
for workflow in self.workflows:
if not hasattr(workflow, attr):
log.warn(Fore.RED + f"workflow {workflow.name} missing attribute {attr}" + Style.RESET_ALL)
if getattr(self, attr) != getattr(workflow, attr):
log.warn(
Fore.RED + f"{self.name} with {attr}: {getattr(self, attr)} has workflow {workflow.name} with"
f" {attr}: {getattr(workflow, attr)}" + Style.RESET_ALL
)
def subscribe_wf(self):
@workflow(self.name, self.prefix)
@input_schema(name="inp", schema=self.input_schema)
@param_schema(name="params", schema=self.parameter_schema)
@metric_schema(name="metric", schema=dt.bundle())
@output_schema(schema=self.output_schema)
def router(inp, metric, params):
app_id = get_job_app()
job_type = get_job_type()
log.info(f"Routing {job_type} job")
if job_type == "BOT_INIT":
return send_workflow_job(
workflow=self.default_wf.name,
input=inp,
params=params,
job_type=job_type,
app_uuid=app_id,
)
elif job_type in ("DEFAULT", "ONBOARDING", "COLLABORATOR"):
# Get selected method workflow
selected_workflow = self.client.get_project(uuid=app_id).get("selectedWorkflow")
if selected_workflow:
# Send job
job_response = send_workflow_job(
workflow=selected_workflow,
input=inp,
params=params,
job_type=job_type,
app_uuid=app_id,
)
return job_response
else:
logging.warning(
Fore.LIGHTRED_EX + f"No selected workflow for app {app_id}. "
"Falling back to dataprogram default." + Style.RESET_ALL
)
return send_workflow_job(
workflow=self.default_wf_name,
input=inp,
params=params,
job_type=job_type,
app_uuid=app_id,
)
elif job_type == "CALIBRATION":
# Send job to gold method
job_response = send_workflow_job(
workflow=self.gold_wf_name,
input=inp,
params=params,
job_type=job_type,
app_uuid=app_id,
)
return job_response
else:
raise JobTypeNotImplemented("Router does not support the given job type: {}".format(job_type))
def send_workflow_job(workflow, input, params, job_type, app_uuid):
job = execute(workflow, params=input, app_params={"params": params}, tag=app_uuid)
result = job.result()
status = result.status()
if not status or status != "COMPLETED":
raise ChildJobFailed(
"{} method did not complete for {} job. Result {}. Status {}".format(
workflow, job_type, result, status
)
)
return job.result().response()
| 2.046875 | 2 |
gammapy/catalog/tests/test_xmatch.py | grburgess/gammapy | 3 | 12757575 | <reponame>grburgess/gammapy
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.coordinates import Angle
from ...astro.population import make_catalog_random_positions_sphere
from ...catalog import (
catalog_xmatch_circle,
catalog_xmatch_combine,
table_xmatch_circle_criterion,
table_xmatch,
)
def test_catalog_xmatch_circle():
random_state = np.random.RandomState(seed=0)
catalog = make_catalog_random_positions_sphere(size=100, center='Milky Way',
random_state=random_state)
catalog['Source_Name'] = ['source_{:04d}'.format(_) for _ in range(len(catalog))]
catalog['Association_Radius'] = Angle(random_state.uniform(0, 10, len(catalog)), unit='deg')
other_catalog = make_catalog_random_positions_sphere(size=100, center='Milky Way',
random_state=random_state)
other_catalog['Source_Name'] = ['source_{:04d}'.format(_) for _ in range(len(other_catalog))]
result = catalog_xmatch_circle(catalog, other_catalog)
assert len(result) == 23
def test_catalog_xmatch_combine():
# TODO: implement tests
assert True
def test_table_xmatch():
# TODO: implement tests
assert True
| 2.09375 | 2 |
beecrowd exercises/beecrowd-1002.py | pachecosamuel/Python-Exercises | 0 | 12757576 | <filename>beecrowd exercises/beecrowd-1002.py
pi = 3.14159
raio = eval(input())
area = pi * (raio ** 2)
print(f'A={area:.4f}')
| 3.109375 | 3 |
fanuc_planning/src/scripts/plugins/mapping.py | oaghub/FMS | 0 | 12757577 | <reponame>oaghub/FMS<gh_stars>0
#! /usr/bin/env python
import rospy
import sys
import time
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import numpy
from math import pi
from std_msgs.msg import Float32MultiArray, MultiArrayDimension, Int8MultiArray, Bool, Int8
from geometry_msgs.msg import Pose, PoseStamped
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectoryPoint
## Handling the request from the user or the system itself
def handle_scan_request(msg): # User
global request
request = msg.data
def handle_manufacturing_request(msg): # System
global manufacturing_conditions
global flag_erp_request
manufacturing_conditions = numpy.array(msg.data[1::])
#manufacturing_conditions[manufacturing_conditions > 0] = 1
flag_erp_request = True
## Handling the (previously defined) assembly part containers
# ArUco with ID 0
def handle_aruco_ID_0_location(msg):
global aruco_ID_0_position
global flag_ID_0_position
aruco_ID_0_position = msg.position
flag_ID_0_position = True
#print(aruco_ID_0_position)
def handle_aruco_ID_0_orientation(msg):
global timestamp_0
global aruco_ID_0_orientation
global flag_ID_0_orientation
aruco_ID_0_orientation = msg.orientation
flag_ID_0_orientation = True
timestamp_0 = time.time()
# ArUco with ID 1
def handle_aruco_ID_1_location(msg):
global aruco_ID_1_position
global flag_ID_1_position
aruco_ID_1_position = msg.position
flag_ID_1_position = True
#print(aruco_ID_1_position)
def handle_aruco_ID_1_orientation(msg):
global timestamp_1
global aruco_ID_1_orientation
global flag_ID_1_orientation
aruco_ID_1_orientation = msg.orientation
flag_ID_1_orientation = True
timestamp_1 = time.time()
def handle_production_request(msg):
global product
product = msg.data
def mapping():
global timestamp_0
global timestamp_1
global timestamp_request
global request
global flag_ID_0_position
global flag_ID_0_orientation
global flag_ID_1_position
global flag_ID_1_orientation
flag_ID_0_position = False
flag_ID_0_orientation = False
flag_ID_1_position = False
flag_ID_1_orientation = False
global manufacturing_conditions
global flag_erp_request
flag_erp_request = False
global product
''' SUBSCRIBERS '''
## Topics to extract the location of the bins
rospy.Subscriber("world_assembly_part_ID_0/target_pose", Pose, handle_aruco_ID_0_location)
rospy.Subscriber("world_assembly_part_ID_1/target_pose", Pose, handle_aruco_ID_1_location)
## Topics to extract the orientations of the bins
rospy.Subscriber("/aruco_simple/pose_ID_0", Pose, handle_aruco_ID_0_orientation)
rospy.Subscriber("/aruco_simple/pose_ID_1", Pose, handle_aruco_ID_1_orientation)
rospy.Subscriber("scan/request/map/trigger", Bool, handle_scan_request)
## Topic to extract the conditions to fulfill the request from the frp
rospy.Subscriber("frp/manufacturing/product/info", Int8MultiArray, handle_manufacturing_request)
rospy.Subscriber("user/manufacturing/request", Int8, handle_production_request)
''' PUBLISHERS '''
pub_locations = rospy.Publisher("map/locations", Float32MultiArray, queue_size=1)
pub_states = rospy.Publisher("map/detected_parts", Int8MultiArray, queue_size=1)
pub_map_result = rospy.Publisher("map/result", Bool, queue_size=1)
pub_failed_request = rospy.Publisher("user/manufacturing/request", Int8, queue_size=1)
# 2D Array that will contain the map in terms of x - y - z ArUco coordinates
aruco_map = Float32MultiArray()
aruco_map.layout.dim.append(MultiArrayDimension())
aruco_map.layout.dim.append(MultiArrayDimension())
aruco_map.layout.dim[0].label = "xyz_xyzw"
aruco_map.layout.dim[1].label = "aruco id location"
aruco_map.layout.dim[0].size = 7
aruco_map.layout.dim[1].size = 5
aruco_map.layout.dim[0].stride = 7*5 # Number of elements in the matrix
aruco_map.layout.dim[1].stride = 5
aruco_map.layout.data_offset = 0
aruco_map.data = [0]*35
# save a few dimensions:
dstride0 = aruco_map.layout.dim[0].stride
dstride1 = aruco_map.layout.dim[1].stride
offset = aruco_map.layout.data_offset
bin_state = Int8MultiArray()
bin_state.layout.dim.append(MultiArrayDimension())
bin_state.layout.dim.append(MultiArrayDimension())
bin_state.layout.dim[0].label = "id"
bin_state.layout.dim[1].label = "detected bin"
bin_state.layout.dim[0].size = 5
bin_state.layout.dim[1].size = 1
bin_state.layout.dim[0].stride = 1*5 # Number of elements in the matrix
bin_state.layout.dim[1].stride = 1
bin_state.layout.data_offset = 0
bin_state.data = [0]*5
# save a few dimensions:
dstride0 = bin_state.layout.dim[0].stride
dstride1 = bin_state.layout.dim[1].stride
offset = bin_state.layout.data_offset
rospy.sleep(1)
tmp_aruco_map = numpy.zeros((5,7)) # 2D array layout
tmp_bin_state = numpy.zeros((1,5)) # 1D array layot
scene.remove_world_object()
p = PoseStamped()
request = False
trigger = False
print('\x1b[7;49;96m' + '=================================' + '\x1b[0m')
print('\x1b[7;49;96m' + ' <<< (MAP) ' + '\x1b[0m' + '\x1b[7;49;96m'+ " " + 'Initialized >>> ' + " " + '\x1b[0m')
print('\x1b[7;49;96m' + '=================================' + '\x1b[0m')
while not rospy.is_shutdown():
if request == True:
if trigger == False:
aruco_map.data = [0]*35
bin_state.data = [0]*5
pub_locations.publish(aruco_map)
pub_states.publish(bin_state)
rospy.sleep(2)
#print("I have just recieved a scan request!")
timestamp_request = time.time()
trigger = True
## Only removing the displayed bins
''' en lugar de poner todo manualmente que sea todo automatico'''
scene.remove_world_object("bin_0")
scene.remove_world_object("part_0")
scene.remove_world_object("bin_1")
scene.remove_world_object("part_1")
if flag_ID_0_position == True and flag_ID_0_orientation == True and (timestamp_request < timestamp_0): # Condicion de tiempo para que posicion y orientacion sean differentes
''' ArUco ID: 0 (Bin containing assembly parts with ID 0)
these coordinates specify the location of the object to be grasp'''
aruco_map.data[offset + 0 + dstride1*0] = aruco_ID_0_position.x # Filling element [0]
aruco_map.data[offset + 0 + dstride1*1] = aruco_ID_0_position.y # Filling element [1]
aruco_map.data[offset + 0 + dstride1*2] = aruco_ID_0_position.z # Filling element [2]
aruco_map.data[offset + 0 + dstride1*3] = aruco_ID_0_orientation.x # Filling element [3]
aruco_map.data[offset + 0 + dstride1*4] = aruco_ID_0_orientation.y # Filling element [4]
aruco_map.data[offset + 0 + dstride1*5] = aruco_ID_0_orientation.z # Filling element [5]
aruco_map.data[offset + 0 + dstride1*6] = aruco_ID_0_orientation.w # Filling element [6]
bin_state.data[offset + 0 + dstride1*0] = 100 # id container detected
print('\x1b[7;49;34m' + ' (MAP) >>' + '\x1b[0m' + '\x1b[7;49;34m'+ " " + 'Bin with ID 0 detected ' + " " + '\x1b[0m')
flag_ID_0_position = False
flag_ID_0_orientation = False
# Spawning the obstacle from detected aruco frame
p.header.frame_id = "world_aruco_ID_0" #robot.get_planning_frame()
p.pose.position.x = 0.15 #locations[0]
p.pose.position.y = 0 #locations[1]
p.pose.position.z = 0.025 #locations[2]
scene.add_box("bin_0", p, (0.3, 0.15, 0.05))
# Spawning the location of the assebly part as na obstacle
p.header.frame_id = "assembly_part_ID_0" #robot.get_planning_frame()
p.pose.position.x = 0 #locations[0]
p.pose.position.y = 0 #locations[1]
p.pose.position.z = 0.005 #locations[2]
scene.add_cylinder("part_0", p, 0.01, 0.02)
pub_locations.publish(aruco_map)
pub_states.publish(bin_state)
if flag_ID_1_position == True and flag_ID_1_orientation == True and (timestamp_request < timestamp_1): # Condition to have always new readings (when requested)
''' ArUco ID: 1 (Bin containing assembly parts with ID 1)
these coordinates specify the location of the object to be grasp'''
aruco_map.data[offset + 0 + dstride1*7] = aruco_ID_1_position.x # Filling element [0]
aruco_map.data[offset + 0 + dstride1*8] = aruco_ID_1_position.y # Filling element [1]
aruco_map.data[offset + 0 + dstride1*9] = aruco_ID_1_position.z # Filling element [2]
aruco_map.data[offset + 0 + dstride1*10] = aruco_ID_1_orientation.x # Filling element [3]
aruco_map.data[offset + 0 + dstride1*11] = aruco_ID_1_orientation.y # Filling element [4]
aruco_map.data[offset + 0 + dstride1*12] = aruco_ID_1_orientation.z # Filling element [5]
aruco_map.data[offset + 0 + dstride1*13] = aruco_ID_1_orientation.w # Filling element [6]
bin_state.data[offset + 0 + dstride1*1] = 101 #id container detected
print('\x1b[7;49;34m' + ' (MAP) >>' + '\x1b[0m' + '\x1b[7;49;34m'+ " " + 'Bin with ID 1 detected ' + " " + '\x1b[0m')
flag_ID_1_position = False
flag_ID_1_orientation = False
# Spawning the obstacle from detected aruco frame
p.header.frame_id = "world_aruco_ID_1" #robot.get_planning_frame()
p.pose.position.x = 0.15 #locations[0]
p.pose.position.y = 0 #locations[1]
p.pose.position.z = 0.025 #locations[2]
scene.add_box("bin_1", p, (0.3, 0.15, 0.05))
# Spawning the location of the assebly part as an obstacle
p.header.frame_id = "assembly_part_ID_1" #robot.get_planning_frame()
p.pose.position.x = 0 #locations[0]
p.pose.position.y = 0 #locations[1]
p.pose.position.z = 0.005 #locations[2]
scene.add_cylinder("part_1", p, 0.01, 0.02)
pub_locations.publish(aruco_map)
pub_states.publish(bin_state)
#rospy.sleep(1)
detected = numpy.array(bin_state.data)
comparison = numpy.isin(manufacturing_conditions,detected)
complete = comparison.all()
if complete:
request = False
trigger = False
print('\x1b[7;49;92m' + ' (MAP) >>' + '\x1b[0m' + '\x1b[7;49;92m'+ " " + 'All assembly parts containers have been detected! ' + " " + '\x1b[0m')
pub_map_result.publish(False) # Everything has been detected
pub_failed_request.publish(product)
elif (time.time()) > (timestamp_request + 20.00): # 30 seconds of scanning before timeout
pub_map_result.publish(False)
request = False
trigger = False
print('\x1b[7;49;31m' + ' (MAP) >>' + '\x1b[0m' + '\x1b[7;49;31m'+ " " + 'After 20.0 seconds the conditions have not been fulfilled...' + " " + '\x1b[0m')
print('\x1b[7;49;31m' + ' (MAP) >>' + '\x1b[0m' + '\x1b[7;49;31m'+ " " + 'Shutting down the MAP service... ' + " " + '\x1b[0m')
print('\x1b[7;49;31m' + ' (MAP) >>' + '\x1b[0m' + '\x1b[7;49;31m'+ " " + 'Please, check the workspace due to missing containers... ' + " " + '\x1b[0m')
rospy.sleep(1)
''' automatizad de manera que cuando no sea posible, que se relance el pedido'''
else:
pass
else:
pub_locations.publish(aruco_map)
pub_states.publish(bin_state)
rospy.sleep(2)
# Do nothing until next request
if __name__ == '__main__':
try:
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('map')
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
mapping()
except rospy.ROSInterruptException:
pass | 2.078125 | 2 |
single_inference/test_kitti.py | yohannes-taye/mobilePydnet | 182 | 12757578 | """
Evaluate the model using Eigen split of KITTI dataset
- prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py
"""
import argparse
import os
import cv2
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from eval_utils import compute_errors, compute_scale_and_shift
from network import Pydnet
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class KITTILoader(object):
def __init__(self, params):
self.params = params
self.height = params["height"]
self.width = params["width"]
self.data_list_file = params["data_list_file"]
self.data_path = params["data_path"]
self.num_workers = 4
self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str)
self.default_img_shape = None
def read_and_decode(self, filename_queue):
"""Read jpeg file from file system"""
img0_name = tf.strings.join([self.data_path, "/", filename_queue, ".jpg"])
img0 = tf.image.decode_jpeg(tf.io.read_file(img0_name), channels=3)
img0 = tf.cast(img0, tf.float32)
return img0
def preprocess(self, filename_queue):
"""Prepare single image at testing time"""
img0 = self.read_and_decode(filename_queue)
img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
img0.set_shape([self.height, self.width, 3])
img0 = img0 / 255.0
return img0
def create_iterator(self, num_parallel_calls=4):
"""Create iterator"""
data_list = tf.convert_to_tensor(self.data_list, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(data_list)
dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(1)
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
return iterator
def read_test_files(test_file) -> list:
"""Read test files from txt file"""
assert os.path.exists(test_file)
with open(test_file, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def run_inference(opts):
"""Run the model on KITTI"""
network_params = {"height": 320, "width": 640, "is_training": False}
dataset_params = {
"height": 320,
"width": 640,
"data_path": opts.data_path,
"data_list_file": opts.data_list_file,
}
dataset = KITTILoader(dataset_params)
iterator = dataset.create_iterator()
batch_img = iterator.get_next()
network = Pydnet(network_params)
predicted_idepth = network.forward(batch_img)
predicted_idepth = tf.nn.relu(predicted_idepth)
# restore graph
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(iterator.initializer)
saver.restore(sess, opts.ckpt)
os.makedirs(opts.dest, exist_ok=True)
test_images = read_test_files(opts.data_list_file)
num_images = len(test_images)
with tqdm(total=num_images) as pbar:
for i in range(num_images):
idepth = sess.run(predicted_idepth)
idepth = np.squeeze(idepth)
min_idepth = idepth.min()
max_idepth = idepth.max()
norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
norm_idepth *= 255.0
target_path = os.path.join(opts.data_path, f"{test_images[i]}.jpg")
target = cv2.imread(target_path)
h, w = target.shape[:2]
norm_idepth = cv2.resize(norm_idepth, (w, h))
img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
pbar.update(1)
print("Inference done!")
def eval(opts):
"""Compute error metrics."""
errors = []
test_images = read_test_files(opts.data_list_file)
print("=> loading gt data")
gt_depths = np.load(opts.gt_path, fix_imports=True, encoding="latin1", allow_pickle=True)[
"data"
]
print("=> starting evaluation")
with tqdm(total=len(test_images)) as pbar:
for i in range(len(test_images)):
target = gt_depths[i]
pred_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
prediction_idepth = cv2.imread(pred_path, -1) / 256.0
mask = (target > 1e-3) & (target < opts.max_depth)
target_idepth = np.zeros_like(target)
target_idepth[mask == 1] = 1.0 / target[mask == 1]
scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
prediction_idepth_aligned = scale * prediction_idepth + shift
disparity_cap = 1.0 / opts.max_depth
prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
target = target[mask == 1]
errors.append(compute_errors(target, prediciton_depth_aligned))
pbar.update(1)
mean_errors = np.array(errors).mean(0)
labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
for i in range(len(labels)):
print(f"{labels[i]}:{mean_errors[i]}")
print("Evaluation done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate depth network on KITTI")
parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
parser.add_argument("--data_path", type=str, help="path to kitti", required=True)
parser.add_argument("--gt_path", type=str, help="path to gt_depths.npz", required=True)
parser.add_argument(
"--data_list_file", type=str, help="path to data list", default="test_kitti.txt"
)
parser.add_argument("--dest", type=str, help="prediction folder", default="kitti")
parser.add_argument("--max_depth", type=float, help="maximum depth value", default=80.0)
opts = parser.parse_args()
run_inference(opts)
eval(opts)
| 2.453125 | 2 |
samples/quickbooksGLtoDatabase.py | blu2ego/ADS---Python-Example- | 37 | 12757579 | <reponame>blu2ego/ADS---Python-Example-
""" open General Ledger from Excel, reformat into a database. """
import pandas as pd
import numpy as np
import argparse
from itertools import cycle
from time import sleep
columnNames = ["Account Description", "Type", "Date", "Num", "Name",
"Memo", "Split", "Amount", "Balance"]
def format_file(df, columnNames=columnNames, columnsToBeMerged = [0,1,2,3,4]):
df['Account Description'] = merge_columns(df[columnsToBeMerged])
# clean up empty columns/rows
df = df.drop(0)
df = df.drop(columnsToBeMerged, axis=1)
df = df.dropna(axis=0, thresh=6)
df = df.dropna(axis=1, how='all')
# Reindex Columns, move last column to first position
cols = df.columns.tolist()
df = df[cols[-1:] + cols[:-1]]
df.columns = columnNames
return df
def merge_columns(df):
# Create single column from DataFrame argument. Value in the merged column
# will be the first non-NaN value encountered in the row. If the entire row
# is NaN, it will fill using the previous value in the merged column.
temp_df = df.copy()
temp_df = temp_df.replace(' ', np.nan)
temp_df["mergeColumn"] = [np.nan for _ in df.index]
for column in temp_df.columns:
temp_df["mergeColumn"] = temp_df["mergeColumn"].fillna(temp_df[column])
return temp_df["mergeColumn"].fillna(method='ffill')
def open_file(filename):
# try to open filename as Pandas DataFrame, if error, quit.
try:
df = pd.read_excel(filename, index_col=None, header=None)
return df
except:
print("Error with filename")
quit()
# progress bar. Unnecessary for functionality and can be omitted.
def progress(percent=0, width=30):
left = width * percent // 100
right = width - left
print('\r[', '#' * left, ' ' * right, ']',
f' {percent:.0f}%',
sep='', end='', flush=True)
def main(file_location):
file = format_file(open_file(file_location))
print(f"Reformatting {file_location} in progress.")
for i in range(101):
progress(i)
sleep(0.01)
# consider using Path object for file location, to allow for
# more accurate location saving.
file.to_csv("modified_GL.csv", index=False)
print("")
print("File Successfully converted.")
return file
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Location of Excel File to be formatted.')
parser.add_argument('file_location', type=str, help='file location')
args = parser.parse_args()
main(args.file_location)
| 3.578125 | 4 |
docker/tensorflow-lite-micro-rtos-fvp/sw/data_injection_utils/config.py | junpuf/Tool-Solutions | 1 | 12757580 | <filename>docker/tensorflow-lite-micro-rtos-fvp/sw/data_injection_utils/config.py
import logging
import subprocess
from os import path, mkdir, getenv, getcwd
from argparse import ArgumentParser
from packaging import version
class AppConfiguration:
def __init__(self):
self.logging=logging
format = "[%(levelname)s] %(message)s"
self.logging.basicConfig(format=format, level=logging.DEBUG)
# the path of this file
self.repo_root = path.join(path.dirname(path.abspath(__file__)), "../..")
# path to the ml-embedded-evaluation-kit
self.eval_kit_base = path.join(self.repo_root, "dependencies/ml-embedded-evaluation-kit")
# default usecase
self.usecase = "person_detection"
#default image path to use
self.image_path = f"{self.eval_kit_base}/resources/{self.usecase}/samples/"
# whether to use USB camera or not
self.use_camera = False
# whether to use fast mode or not
self.speed_mode = False
self.num_macs = 128
# Compiler, armclang or gcc
self.compiler = getenv('COMPILER', default='armclang')
# directory of the compiled binaries
self.bin_dir = path.join(self.eval_kit_base, "build-data_injection/bin")
def ParseInputArguments(self):
"""
Example usage
"""
parser = ArgumentParser(description="Build and run Ethos Evaluation Samples")
parser.add_argument('--usecase', type=str.lower, default=self.usecase, choices=["img_class", "person_detection"],
help='Application to run')
parser.add_argument('--enable_camera', default=False, action="store_true",
help='Use a camera feed as input (Default: False)')
parser.add_argument('--image_path', type=str, default=self.image_path,
help='Path to image or folder to use in inference (injected into application)')
parser.add_argument('--compiler', type=str.lower, default=self.compiler, choices=["armclang", "gcc"],
help='Which compiler to use, armclang or gcc')
parser.add_argument('--enable_speed_mode', default=False, action="store_true",
help='Use FVP speed mode, making inferences go faster (Default: False)')
parser.add_argument('--num_macs', type=int, default=128, choices=[32, 64, 128, 256],
help='Ethos-U55 mac configuration. 32, 64, 128, 256 (Default: 128)')
#parser.add_argument('--log_file', type=str, default="/tmp/run_log.txt",
# help="Log file path")
args = parser.parse_args()
self.usecase = args.usecase
logging.debug(f"Running usecase {self.usecase}")
self.image_path = args.image_path
if not self.image_path[0] == '/':
self.image_path = path.join(getcwd(), self.image_path)
if not path.exists(self.image_path):
logging.error(f"{self.image_path} was not found. Has to be a valid path")
exit()
logging.debug(f"Setting image path to {self.image_path}")
self.compiler = args.compiler
logging.debug(f"Using compiler {self.compiler}")
self.use_camera = args.enable_camera
# speed mode is only available on FVP 11.14 and higher
# demo is only supported on 11.13 and higher
fvp_version=subprocess.check_output("FVP_Corstone_SSE-300_Ethos-U55 --version | grep 'Fast Models \[[0-9][0-9].[0-9][0-9]'| awk '{ print $3 }'", shell=True).decode('utf-8').replace('[', '')
if version.parse(fvp_version) < version.parse("11.13"):
logging.error("{}{}".format(f"This demo only supports only supports FVP version 11.13 or higher. Installed version is {fvp_version}",
"\tPlease upgrade the FVP: https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps\n"))
exit()
self.speed_mode = args.enable_speed_mode
if self.speed_mode:
if version.parse(fvp_version) >= version.parse("11.14"):
logging.debug("Enabling speed mode.")
else:
logging.error("{}{}".format(f"Speed mode is only supported on FVP version 11.14 or higher. Installed version is {fvp_version}",
"\tPlease upgrade the FVP: https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps\n"))
exit()
self.num_macs = args.num_macs
logging.debug(f"Setting mac count to {self.num_macs}")
| 2.15625 | 2 |
MUNDO1/Ex033_Maior3.py | KayDeVC/Python-CeV | 0 | 12757581 | <gh_stars>0
print('\n==== MAIOR/MENOR ====')
num1 = float(input('Digite o primeiro número:'))
num2 = float(input('Digite o segundo número:'))
num3 = float(input('Dugite o terceiro número:'))
maior = num1
if num2 > num1 and num2 > num3:
maior = num2
if num3 > num1 and num3 > num2:
maior = num3
menor = num1
if num2 < num1 and num2 < num3:
menor = num2
if num3 < num1 and num3 < num2:
menor = num3
print('\nO maior número é: {}!'.format(maior))
print('O menor número é: {}!'.format(menor)) | 4.125 | 4 |
bempp/core/singular_assembler.py | pescap/bempp-cl | 0 | 12757582 | <filename>bempp/core/singular_assembler.py
import numpy as _np
from bempp.api.assembly import assembler as _assembler
from bempp.api.integration import duffy_galerkin as _duffy_galerkin
import collections as _collections
class SingularAssembler(_assembler.AssemblerBase):
"""Assembler for the singular part of boundary integral operators."""
def __init__(self, domain, dual_to_range, parameters=None):
"""Instantiate the assembler."""
super().__init__(domain, dual_to_range, parameters)
def assemble(
self, operator_descriptor, device_interface, precision, *args, **kwargs
):
"""Assemble the singular part."""
from bempp.api.assembly.discrete_boundary_operator import (
SparseDiscreteBoundaryOperator,
)
from bempp.api.utils.helpers import promote_to_double_precision
from scipy.sparse import coo_matrix, csr_matrix
from bempp.api.space.space import return_compatible_representation
domain, dual_to_range = return_compatible_representation(
self.domain, self.dual_to_range
)
row_dof_count = dual_to_range.global_dof_count
col_dof_count = domain.global_dof_count
row_grid_dofs = dual_to_range.grid_dof_count
col_grid_dofs = domain.grid_dof_count
if domain.grid != dual_to_range.grid:
return SparseDiscreteBoundaryOperator(
csr_matrix((row_dof_count, col_dof_count), dtype="float64")
)
trial_local2global = domain.local2global.ravel()
test_local2global = dual_to_range.local2global.ravel()
trial_multipliers = domain.local_multipliers.ravel()
test_multipliers = dual_to_range.local_multipliers.ravel()
rows, cols, values = assemble_singular_part(
domain.localised_space,
dual_to_range.localised_space,
self.parameters,
operator_descriptor,
device_interface,
)
global_rows = test_local2global[rows]
global_cols = trial_local2global[cols]
global_values = values * trial_multipliers[cols] * test_multipliers[rows]
if self.parameters.assembly.always_promote_to_double:
values = promote_to_double_precision(values)
mat = coo_matrix(
(global_values, (global_rows, global_cols)),
shape=(row_grid_dofs, col_grid_dofs),
).tocsr()
if domain.requires_dof_transformation:
mat = mat @ domain.dof_transformation
if dual_to_range.requires_dof_transformation:
mat = dual_to_range.dof_transformation.T @ mat
return SparseDiscreteBoundaryOperator(mat)
def assemble_singular_part(
domain, dual_to_range, parameters, operator_descriptor, device_interface
):
"""Actually assemble the Numba kernel."""
from bempp.api.utils.helpers import get_type
from bempp.core.dispatcher import singular_assembler_dispatcher
import bempp.api
precision = operator_descriptor.precision
kernel_options = operator_descriptor.options
is_complex = operator_descriptor.is_complex
grid = domain.grid
order = parameters.quadrature.singular
rule = _SingularQuadratureRuleInterfaceGalerkin(
grid, order, dual_to_range.support, domain.support
)
number_of_test_shape_functions = dual_to_range.number_of_shape_functions
number_of_trial_shape_functions = domain.number_of_shape_functions
[
test_points,
trial_points,
quad_weights,
test_elements,
trial_elements,
test_offsets,
trial_offsets,
weights_offsets,
number_of_quad_points,
] = rule.get_arrays()
if is_complex:
result_type = get_type(precision).complex
else:
result_type = get_type(precision).real
result = _np.zeros(
number_of_test_shape_functions
* number_of_trial_shape_functions
* len(test_elements),
dtype=result_type,
)
with bempp.api.Timer(
message=(
f"Singular assembler:{operator_descriptor.identifier}:{device_interface}"
)
):
singular_assembler_dispatcher(
device_interface,
operator_descriptor,
grid,
domain,
dual_to_range,
test_points,
trial_points,
quad_weights,
test_elements,
trial_elements,
test_offsets,
trial_offsets,
weights_offsets,
number_of_quad_points,
kernel_options,
result,
)
irange = _np.arange(number_of_test_shape_functions)
jrange = _np.arange(number_of_trial_shape_functions)
i_ind = _np.tile(
_np.repeat(irange, number_of_trial_shape_functions), len(rule.trial_indices)
) + _np.repeat(
rule.test_indices * number_of_test_shape_functions,
number_of_test_shape_functions * number_of_trial_shape_functions,
)
j_ind = _np.tile(
_np.tile(jrange, number_of_test_shape_functions), len(rule.trial_indices)
) + _np.repeat(
rule.trial_indices * number_of_trial_shape_functions,
number_of_test_shape_functions * number_of_trial_shape_functions,
)
return (i_ind, j_ind, result)
_SingularQuadratureRule = _collections.namedtuple(
"_QuadratureRule", "test_points trial_points weights"
)
class _SingularQuadratureRuleInterfaceGalerkin(object):
"""Interface for a singular quadrature rule."""
def __init__(self, grid, order, test_support, trial_support):
"""Initialize singular quadrature rule."""
self._grid = grid
self._order = order
self._test_indices = None
self._trial_indices = None
self._coincident_rule = _SingularQuadratureRule(
*_duffy_galerkin.rule(order, "coincident")
)
self._edge_adjacent_rule = _SingularQuadratureRule(
*_duffy_galerkin.rule(order, "edge_adjacent")
)
self._vertex_adjacent_rule = _SingularQuadratureRule(
*_duffy_galerkin.rule(order, "vertex_adjacent")
)
# Iterate through the singular pairs and only add those that are
# in the support of the space.
self._index_count = {}
self._coincident_indices = _np.flatnonzero(test_support * trial_support)
self._index_count["coincident"] = len(self._coincident_indices)
# test_support and trial_support are boolean arrays.
# * operation corresponds to and op between the arrays.
edge_adjacent_pairs = _np.flatnonzero(
test_support[grid.edge_adjacency[0, :]]
* trial_support[grid.edge_adjacency[1, :]]
)
self._edge_adjacency = grid.edge_adjacency[:, edge_adjacent_pairs]
vertex_adjacent_pairs = _np.flatnonzero(
test_support[grid.vertex_adjacency[0, :]]
* trial_support[grid.vertex_adjacency[1, :]]
)
self._vertex_adjacency = grid.vertex_adjacency[:, vertex_adjacent_pairs]
self._index_count["edge_adjacent"] = self._edge_adjacency.shape[1]
self._index_count["vertex_adjacent"] = self._vertex_adjacency.shape[1]
self._index_count["all"] = (
self._index_count["coincident"]
+ self._index_count["edge_adjacent"]
+ self._index_count["vertex_adjacent"]
)
@property
def order(self):
"""Return the order."""
return self._order
@property
def coincident_rule(self):
"""Return coincident rule."""
return self._coincident_rule
@property
def edge_adjacent_rule(self):
"""Return edge adjacent rule."""
return self._edge_adjacent_rule
@property
def vertex_adjacent_rule(self):
"""Return vertex adjacent rule."""
return self._vertex_adjacent_rule
@property
def grid(self):
"""Return the grid."""
return self._grid
@property
def edge_adjacency(self):
"""Return the grid edge adjacency information."""
return self._edge_adjacency
@property
def vertex_adjacency(self):
"""Return vertex adjacency."""
return self._vertex_adjacency
@property
def number_of_elements(self):
"""Return the number of elements of the underlying grid."""
return self.grid.number_of_elements
@property
def index_count(self):
"""Return the index count."""
return self._index_count
@property
def test_indices(self):
"""Return the test indicies of all singular contributions."""
return self._test_indices
@property
def trial_indices(self):
"""Return the trial indicies of all singular contributions."""
return self._trial_indices
def number_of_points(self, adjacency):
"""Return the number of quadrature points for given adjacency."""
return _duffy_galerkin.number_of_quadrature_points(self.order, adjacency)
def get_arrays(self):
"""Return the arrays."""
test_indices, trial_indices = self._vectorize_indices()
test_points, trial_points = self._vectorize_points()
weights = self._vectorize_weights()
test_offsets, trial_offsets, weights_offsets = self._vectorize_offsets()
number_of_quad_points = self._get_number_of_quad_points()
self._test_indices = test_indices
self._trial_indices = trial_indices
arrays = [
test_points,
trial_points,
weights,
test_indices,
trial_indices,
test_offsets,
trial_offsets,
weights_offsets,
number_of_quad_points,
]
return arrays
def _collect_remapped_quad_points_for_edge_adjacent_rule(self, quad_points):
"""
Remap quad points for edge adjacent quadrature rules.
Given a 2xN array of quadrature points, return all possible
combinations of remapped rules, according to the following
order
0: edge (index 0, 1)
1: edge (index 1, 0)
2: edge (index 1, 2)
3: edge (index 2, 1)
4: edge (index 0, 2)
5: edge (index 2, 0)
"""
return _np.hstack(
[
_duffy_galerkin.remap_points_shared_edge(quad_points, 0, 1),
_duffy_galerkin.remap_points_shared_edge(quad_points, 1, 0),
_duffy_galerkin.remap_points_shared_edge(quad_points, 1, 2),
_duffy_galerkin.remap_points_shared_edge(quad_points, 2, 1),
_duffy_galerkin.remap_points_shared_edge(quad_points, 0, 2),
_duffy_galerkin.remap_points_shared_edge(quad_points, 2, 0),
]
)
def _collect_remapped_quad_points_for_vertex_adjacent_rule(self, quad_points):
"""
Remap quad points for vertex adjacent quadrature rules.
Given a 2xN array of quadrature points, return all possible
combinations of remapped rules, according to the following
order
0: vertex (index 0)
1: vertex (index 1)
2: vertex (index 2)
"""
return _np.hstack(
[
_duffy_galerkin.remap_points_shared_vertex(quad_points, 0),
_duffy_galerkin.remap_points_shared_vertex(quad_points, 1),
_duffy_galerkin.remap_points_shared_vertex(quad_points, 2),
]
)
def _compute_edge_offsets(self):
"""Compute offsets for the edge based rule."""
ncoincident = self.number_of_points("coincident")
nedge_adjacent = self.number_of_points("edge_adjacent")
# Offset values is a 3 x 3 matrix such that
# the value (i, j) is the offset of the rule
# associted with the (i, j) remap case,
# where i and j are 0, 1, or 2. The diagonal
# elements are not needed, so just set to -1.
offset_values = _np.array([[-1, 0, 4], [1, -1, 2], [5, 3, -1]])
edge_offsets = ncoincident + nedge_adjacent * offset_values
return edge_offsets
def _compute_vertex_offsets(self):
"""Compute offsets for the vertex based rules."""
ncoincident = self.number_of_points("coincident")
nedge_adjacent = self.number_of_points("edge_adjacent")
nvertex_adjacent = self.number_of_points("vertex_adjacent")
vertex_offsets = (
ncoincident
+ 6 * nedge_adjacent
+ nvertex_adjacent * _np.arange(3, dtype="uint32")
)
return vertex_offsets
def _vectorize_indices(self):
"""Return vector of test and trial indices for sing. integration."""
test_indices = _np.empty(self.index_count["all"], dtype="uint32")
trial_indices = _np.empty(self.index_count["all"], dtype="uint32")
for array, index in zip([test_indices, trial_indices], [0, 1]):
count = self._index_count["coincident"]
array[:count] = self._coincident_indices
array[
count : (count + self.index_count["edge_adjacent"])
] = self.edge_adjacency[index, :]
count += self.index_count["edge_adjacent"]
array[count:] = self.vertex_adjacency[index, :]
return test_indices, trial_indices
def _get_number_of_quad_points(self):
"""Compute an array of local numbers of integration points."""
number_of_quad_points = _np.empty(self.index_count["all"], dtype="uint32")
number_of_quad_points[: self.index_count["coincident"]] = self.number_of_points(
"coincident"
)
number_of_quad_points[
self.index_count["coincident"] : (
self.index_count["coincident"] + self.index_count["edge_adjacent"]
)
] = self.number_of_points("edge_adjacent")
number_of_quad_points[
-self.index_count["vertex_adjacent"] :
] = self.number_of_points("vertex_adjacent")
return number_of_quad_points
def _vectorize_points(self):
"""Return an array of all quadrature points for all different rules."""
test_points = _np.hstack(
[
self.coincident_rule.test_points,
self._collect_remapped_quad_points_for_edge_adjacent_rule(
self.edge_adjacent_rule.test_points
),
self._collect_remapped_quad_points_for_vertex_adjacent_rule(
self.vertex_adjacent_rule.test_points
),
]
)
trial_points = _np.hstack(
[
self.coincident_rule.trial_points,
self._collect_remapped_quad_points_for_edge_adjacent_rule(
self.edge_adjacent_rule.trial_points
),
self._collect_remapped_quad_points_for_vertex_adjacent_rule(
self.vertex_adjacent_rule.trial_points
),
]
)
return test_points, trial_points
def _vectorize_weights(self):
"""Vectorize the quadrature weights."""
weights = _np.hstack(
[
self.coincident_rule.weights,
self.edge_adjacent_rule.weights,
self.vertex_adjacent_rule.weights,
]
)
return weights
def _vectorize_offsets(self):
"""Vectorize the offsets."""
edge_offsets = self._compute_edge_offsets()
vertex_offsets = self._compute_vertex_offsets()
test_offsets = _np.empty(self.index_count["all"], dtype="uint32")
trial_offsets = _np.empty(self.index_count["all"], dtype="uint32")
weights_offsets = _np.empty(self.index_count["all"], dtype="uint32")
test_offsets[: self.index_count["coincident"]] = 0
test_offsets[
self.index_count["coincident"] : (
self.index_count["coincident"] + self.index_count["edge_adjacent"]
)
] = edge_offsets[self.edge_adjacency[2, :], self.edge_adjacency[3, :]]
test_offsets[
(self.index_count["coincident"] + self.index_count["edge_adjacent"]) :
] = vertex_offsets[self.vertex_adjacency[2, :]]
trial_offsets[: self.index_count["coincident"]] = _np.zeros(
self.index_count["coincident"]
)
trial_offsets[
self.index_count["coincident"] : (
self.index_count["coincident"] + self.index_count["edge_adjacent"]
)
] = edge_offsets[self.edge_adjacency[4, :], self.edge_adjacency[5, :]]
trial_offsets[
(self.index_count["coincident"] + self._index_count["edge_adjacent"]) :
] = vertex_offsets[self.vertex_adjacency[3, :]]
weights_offsets[: self.index_count["coincident"]] = 0
weights_offsets[
self.index_count["coincident"] : (
self.index_count["coincident"] + self.index_count["edge_adjacent"]
)
] = self.number_of_points("coincident")
weights_offsets[-self.index_count["vertex_adjacent"] :] = self.number_of_points(
"coincident"
) + self.number_of_points("edge_adjacent")
return test_offsets, trial_offsets, weights_offsets
| 2.25 | 2 |
benchmarks/reinforcement/tensorflow/minigo/training/fp32/model_init.py | hekaplex/resnet_dl | 0 | 12757583 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from common.base_model_init import BaseModelInitializer
import os
import argparse
class ModelInitializer(BaseModelInitializer):
"""Model initializer for minigo"""
def __init__(self, args, custom_args=[], platform_util=None):
super(ModelInitializer, self).__init__(args, custom_args, platform_util)
arg_parser = argparse.ArgumentParser(description='Parse additional args')
arg_parser.add_argument(
"--quantization", help="quantization flag",
dest="quantization", default="False")
arg_parser.add_argument(
"--large-scale", help="train on large scale",
dest="large_scale", default="False")
arg_parser.add_argument(
"--num-train-nodes", help="number of train nodes",
dest="num_train_nodes", default=0, type=int)
arg_parser.add_argument(
"--num-eval-nodes", help="number of evaluation nodes",
dest="num_eval_nodes", default=0, type=int)
arg_parser.add_argument(
"--multi-node", help="train on large scale",
dest="multi_node", default="False")
self.additional_args, unknown_args = arg_parser.parse_known_args(custom_args)
if self.additional_args.large_scale == "True" and self.additional_args.multi_node == "True":
# multi-node training mode with large scale
self.cmd = "./run_mn.sh "
self.cmd += " {0}".format(self.additional_args.num_train_nodes)
self.cmd += " {0}".format(self.additional_args.num_eval_nodes)
self.cmd += " {0}".format(self.additional_args.quantization)
elif self.additional_args.large_scale == "False" and self.additional_args.multi_node == "True":
# multi-node training mode
self.cmd = "./run_mn.sh "
self.cmd += " {0}".format(self.additional_args.num_train_nodes)
self.cmd += " {0}".format(self.additional_args.quantization)
else:
# single-node training mode
self.cmd = "./run.sh "
self.cmd += " {0}".format(self.additional_args.quantization)
def run(self):
org_path = os.getcwd()
os.chdir(self.args.model_source_dir)
self.run_command(self.cmd)
os.chdir(org_path)
| 2.09375 | 2 |
apps/core/utils.py | mehrbodjavadi79/AIC21-Backend | 3 | 12757584 | <filename>apps/core/utils.py
import json
import string
import random
from django.conf import settings
import telegram
LETTERS = string.ascii_letters
NUMBERS = string.digits
PUNCTUATION = string.punctuation
def send_email(subject, template_name, context,
from_email=settings.EMAIL_HOST_USER,
receipts=None, file_path=None, file_name=None,
file_content=None, mime_type=None):
from django.core.mail.message import EmailMultiAlternatives
from django.core.mail import DEFAULT_ATTACHMENT_MIME_TYPE
from django.template.loader import render_to_string
from django.utils.html import strip_tags
if receipts is None:
receipts = []
email_message_html = render_to_string(template_name, context=context)
email_message_plaintext = strip_tags(email_message_html)
email = EmailMultiAlternatives(
subject=subject,
body=email_message_plaintext,
from_email=from_email,
to=receipts
)
email.attach_alternative(email_message_html, 'text/html')
if file_path:
email.attach_file(file_path, mimetype=DEFAULT_ATTACHMENT_MIME_TYPE)
if file_content:
email.attach(filename=file_name, content=file_content,
mimetype=mime_type)
email.send()
def send_to_telegram(dict):
# todo set proxy after deploy
# REQUEST_KWARGS = {
# # "USERNAME:PASSWORD@" is optional, if you need authentication:
# 'proxy_url': 'http://127.0.0.1:12733/',
# }
bot = telegram.Bot(token='<KEY>')
bot.send_message("@ai_challange_alert", json.dumps(dict, indent=4))
def get_password_length():
length = input("How long do you want your password: ")
return int(length)
def password_generator(length=32):
printable = f'{LETTERS}{NUMBERS}{PUNCTUATION}'
printable = list(printable)
random.shuffle(printable)
random_password = random.choices(printable, k=length)
random_password = ''.join(random_password)
return random_password
| 2.28125 | 2 |
Pyto/Samples/SciKit-Image/plot_edge_filter.py | snazari/Pyto | 701 | 12757585 | """
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, \
scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h
image = camera()
edge_roberts = roberts(image)
edge_sobel = sobel(image)
fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
ax[0].imshow(edge_roberts, cmap=plt.cm.gray)
ax[0].set_title('Roberts Edge Detection')
ax[1].imshow(edge_sobel, cmap=plt.cm.gray)
ax[1].set_title('Sobel Edge Detection')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# Different operators compute different finite-difference approximations of
# the gradient. For example, the Scharr filter results in a less rotational
# variance than the Sobel filter that is in turn better than the Prewitt
# filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters
# and the Scharr filter is illustrated below with an image that is the
# discretization of a rotation- invariant continuous function. The
# discrepancy between the Prewitt and Sobel filters, and the Scharr filter is
# stronger for regions of the image where the direction of the gradient is
# close to diagonal, and for regions with high spatial frequencies. For the
# example image the differences between the filter results are very small and
# the filter results are visually almost indistinguishable.
#
# .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
#
# .. [2] <NAME>, <NAME>, and <NAME>. Principles of filter design.
# In Handbook of Computer Vision and Applications. Academic Press,
# 1999.
#
# .. [3] https://en.wikipedia.org/wiki/Prewitt_operator
x, y = np.ogrid[:100, :100]
# Rotation-invariant image with different spatial frequencies
img = np.exp(1j * np.hypot(x, y) ** 1.3 / 20.).real
edge_sobel = sobel(img)
edge_scharr = scharr(img)
edge_prewitt = prewitt(img)
diff_scharr_prewitt = edge_scharr - edge_prewitt
diff_scharr_sobel = edge_scharr - edge_sobel
max_diff = np.max(np.maximum(diff_scharr_prewitt, diff_scharr_sobel))
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
ax = axes.ravel()
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(edge_scharr, cmap=plt.cm.gray)
ax[1].set_title('Scharr Edge Detection')
ax[2].imshow(diff_scharr_prewitt, cmap=plt.cm.gray, vmax=max_diff)
ax[2].set_title('Scharr - Prewitt')
ax[3].imshow(diff_scharr_sobel, cmap=plt.cm.gray, vmax=max_diff)
ax[3].set_title('Scharr - Sobel')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# As in the previous example, here we illustrate the rotational invariance of
# the filters. The top row shows a rotationally invariant image along with the
# angle of its analytical gradient. The other two rows contain the difference
# between the different gradient approximations (Sobel, Prewitt, Scharr &
# Farid) and analytical gradient.
#
# The Farid & Simoncelli derivative filters [4]_, [5]_ are the most
# rotationally invariant, but require a 5x5 kernel, which is computationally
# more intensive than a 3x3 kernel.
#
# .. [4] <NAME>. and <NAME>., "Differentiation of discrete
# multidimensional signals", IEEE Transactions on Image Processing 13(4):
# 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
#
# .. [5] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
# <https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
x, y = np.mgrid[-10:10:255j, -10:10:255j]
img = np.sin(x ** 2 + y ** 2)
imgx = 2 * x * np.cos(x ** 2 + y ** 2)
imgy = 2 * y * np.cos(x ** 2 + y ** 2)
def angle(dx, dy):
return np.mod(np.arctan2(dy, dx), np.pi)
true_angle = angle(imgx, imgy)
angle_farid = angle(farid_h(img), farid_v(img))
angle_sobel = angle(sobel_h(img), sobel_v(img))
angle_scharr = angle(scharr_h(img), scharr_v(img))
angle_prewitt = angle(prewitt_h(img), prewitt_v(img))
def diff_angle(angle_1, angle_2):
return np.minimum(np.pi - np.abs(angle_1 - angle_2),
np.abs(angle_1 - angle_2))
diff_farid = diff_angle(true_angle, angle_farid)
diff_sobel = diff_angle(true_angle, angle_sobel)
diff_scharr = diff_angle(true_angle, angle_scharr)
diff_prewitt = diff_angle(true_angle, angle_prewitt)
fig, axes = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
ax = axes.ravel()
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(true_angle, cmap=plt.cm.hsv)
ax[1].set_title('Analytical gradient angle')
ax[2].imshow(diff_sobel, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[2].set_title('Sobel error')
ax[3].imshow(diff_prewitt, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[3].set_title('Prewitt error')
ax[4].imshow(diff_scharr, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[4].set_title('Scharr error')
cax = ax[5].imshow(diff_farid, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[5].set_title('Farid error')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.90, 0.10, 0.02, 0.50])
fig.colorbar(cax, cax=cbar_ax, ticks=[0, 0.01, 0.02])
for a in ax:
a.axis('off')
plt.show()
| 3.96875 | 4 |
app/home/routes.py | asad70/reddit-analysis | 0 | 12757586 | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import time
from flask.globals import request
from app.home import blueprint
from flask import render_template, redirect, url_for
from flask_login import login_required, current_user
from app import login_manager
from jinja2 import TemplateNotFound
from flask import jsonify
import matplotlib.pyplot as plt
import nltk
import pandas as pd
import praw
import squarify
from flask import Flask, render_template
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import os
from app.settings import APP_STATIC
from data import *
from app.base.models import User, Picks
from app import db
nltk.download('stopwords')
set(stopwords.words('english'))
@blueprint.route('/index')
#@login_required
def index1():
return render_template('core/reddit-index.html')
@blueprint.route('/index1')
#@login_required
def index():
# db.drop_all()
# db.create_all()
#found=Picks.query.all()
arr=[]
for i in Picks.query.all():
print(i.__dict__)
temp = i
#temp.time = int(time.mktime(temp.time.timetuple())) * 1000
del temp._sa_instance_state
arr.append(temp.__dict__)
return render_template('index.html', time=12345, df=arr)
@blueprint.route('/reddit-index')
def my_form():
return render_template('core/reddit-index.html')
@blueprint.route('/reddit-index', methods=['POST'])
def my_form_input():
input = {
'subs': request.form['subs'] if request.form['subs'] else ['wallstreetbets'],
'post_flairs': request.form['post_flairs'] if request.form['post_flairs'] else {'Daily Discussion', 'Weekend Discussion', 'Discussion'},
'goodAuth': request.form['goodAuth'] if request.form['goodAuth'] else{'AutoModerator'},
'uniqueCmt': request.form['uniqueCmt'] if request.form['uniqueCmt'] else True,
'ignoreAuthP': request.form['ignoreAuthP'] if request.form['ignoreAuthP'] else {'example'},
'ignoreAuthC': request.form['ignoreAuthC'] if request.form['ignoreAuthC'] else {'example,'},
'upvoteRatio': request.form['upvoteRatio'] if request.form['upvoteRatio'] else 0.70,
'ups': request.form['ups'] if request.form['ups'] else 20,
'limit': request.form['limit'] if request.form['limit'] else 500,
'upvotes': request.form['upvotes'] if request.form['upvotes'] else 2,
'picks': request.form['picks'] if request.form['picks'] else 10,
'picks_ayz': request.form['picks_ayz'] if request.form['picks_ayz'] else 5,
}
print("input is", input)
return render_template('core/reddit-index.html')
@ blueprint.route('/data', methods=['POST', 'GET'])
def my_form_post():
import time
start_time = time.time()
ctime = time.ctime()
print('time is', time.ctime())
reddit = praw.Reddit(user_agent="Comment Extraction",
client_id="ZM9jcd0nyXvtlA",
client_secret="<KEY>",
username="",
password="")
'''############################################################################'''
# set the program parameters
subs = ['wallstreetbets'] # sub-reddit to search
# posts flairs to search || None flair is automatically considered
post_flairs = {'Daily Discussion', 'Weekend Discussion', 'Discussion'}
# authors whom comments are allowed more than once
goodAuth = {'AutoModerator'}
uniqueCmt = True # allow one comment per author per symbol
ignoreAuthP = {'example'} # authors to ignore for posts
ignoreAuthC = {'example'} # authors to ignore for comment
upvoteRatio = 0.70 # upvote ratio for post to be considered, 0.70 = 70%
ups = 20 # define # of upvotes, post is considered if upvotes exceed this #
limit = 5 # define the limit, comments 'replace more' limit
upvotes = 2 # define # of upvotes, comment is considered if upvotes exceed this #
picks = 10 # define # of picks here, prints as "Top ## picks are:"
picks_ayz = 5 # define # of picks for sentiment analysis
'''############################################################################'''
posts, count, c_analyzed, tickers, titles, a_comments = 0, 0, 0, {}, [], {}
cmt_auth = {}
num = 0
comm = 0
for sub in subs:
subreddit = reddit.subreddit(sub)
hot_python = subreddit.hot() # sorting posts by hot
# Extracting comments, symbols from subreddit
print("running", str(hot_python))
for submission in hot_python:
flair = submission.link_flair_text
author = submission.author.name
# custom write func
file = open(os.path.join(APP_STATIC, "output/sample.py"),
"w", encoding='utf-8')
hotlist = [i for i in hot_python]
file.write("start time was %s num is %d and hotlist is %s " %
(str(time.ctime()), num, str(hotlist)))
print('num is', num)
file.close()
num += 1
# checking: post upvote ratio # of upvotes, post flair, and author
if submission.upvote_ratio >= upvoteRatio and submission.ups > ups and (flair in post_flairs or flair is None) and author not in ignoreAuthP:
submission.comment_sort = 'new'
comments = submission.comments
titles.append(submission.title)
posts += 1
try:
submission.comments.replace_more(limit=limit)
for comment in comments:
file = open(os.path.join(
APP_STATIC, "output/sample.py"), "a", encoding='utf-8')
file.write("comnum is %d and comm is %s " %
(comm, str(comment)))
file.close()
comm += 1
#print("comnum is", comm)
# try except for deleted account?
try:
auth = comment.author.name
except:
pass
c_analyzed += 1
# checking: comment upvotes and author
if comment.score > upvotes and auth not in ignoreAuthC:
split = comment.body.split(" ")
for word in split:
word = word.replace("$", "")
# upper = ticker, length of ticker <= 5, excluded words,
if word.isupper() and len(word) <= 5 and word not in blacklist and word in us:
# unique comments, try/except for key errors
if uniqueCmt and auth not in goodAuth:
try:
if auth in cmt_auth[word]:
break
except:
pass
# counting tickers
if word in tickers:
tickers[word] += 1
a_comments[word].append(comment.body)
cmt_auth[word].append(auth)
count += 1
else:
tickers[word] = 1
cmt_auth[word] = [auth]
a_comments[word] = [comment.body]
count += 1
except Exception as e:
print(e)
# sorts the dictionary
symbols = dict(
sorted(tickers.items(), key=lambda item: item[1], reverse=True))
top_picks = list(symbols.keys())[0:picks]
time = (time.time() - start_time)
# print top picks
print("It took {t:.2f} seconds to analyze {c} comments in {p} posts in {s} subreddits.\n".format(
t=time, c=c_analyzed, p=posts, s=len(subs)))
print("Posts analyzed saved in titles")
# for i in titles: print(i) # prints the title of the posts analyzed
print(f"\n{picks} most mentioned picks: ")
times = []
top = []
for i in top_picks:
print(f"{i}: {symbols[i]}")
times.append(symbols[i])
top.append(f"{i}: {symbols[i]}")
# Applying Sentiment Analysis
scores, s = {}, {}
vader = SentimentIntensityAnalyzer()
# adding custom words from data.py
vader.lexicon.update(new_words)
picks_sentiment = list(symbols.keys())[0:picks_ayz]
for symbol in picks_sentiment:
stock_comments = a_comments[symbol]
for cmnt in stock_comments:
score = vader.polarity_scores(cmnt)
if symbol in s:
s[symbol][cmnt] = score
else:
s[symbol] = {cmnt: score}
if symbol in scores:
for key, _ in score.items():
scores[symbol][key] += score[key]
else:
scores[symbol] = score
# calculating avg.
for key in score:
scores[symbol][key] = scores[symbol][key] / symbols[symbol]
scores[symbol][key] = "{pol:.3f}".format(pol=scores[symbol][key])
picksdb = Picks(pick=scores)
timesdb = Picks(pick=[times, top, top_picks])
# print(picks)
db.session.add(picksdb)
db.session.add(timesdb)
db.session.commit()
# printing sentiment analysis
print(f"\nSentiment analysis of top {picks_ayz} picks:")
df = pd.DataFrame(scores)
df.index = ['Bearish', 'Neutral', 'Bullish', 'Total/Compound']
df = df.T
print(df)
# Date Visualization
# most mentioned picks
squarify.plot(sizes=times, label=top, alpha=.7)
plt.axis('off')
plt.title(f"{picks} most mentioned picks")
# plt.show()
# Sentiment analysis
df = df.astype(float)
colors = ['red', 'springgreen', 'forestgreen', 'coral']
df.plot(kind='bar', color=colors,
title=f"Sentiment analysis of top {picks_ayz} picks:")
# plt.show()
print('done')
file = open(os.path.join(APP_STATIC, "output/final_output.py"),
"w", encoding='utf-8')
file.write("start time was %s /n/n top picks are %s and df is %s" %
(str(ctime), str(top_picks), str(df)))
print('num is', num)
file.close()
return render_template('core/reddit-data.html', result='done', final=df, t=ctime, c=c_analyzed, p=posts, s=len(subs))
@ blueprint.route('/visualize', methods=['POST', 'GET'])
def visualize():
return render_template('core/reddit-data.html', result='done', final='ok')
@ blueprint.route('/status_bar', methods=['POST', 'GET'])
def status_bar():
file = open(os.path.join(APP_STATIC, "output/sample.py"), "r")
stat = file.read()
file.close()
admin = User(username='admin', email='<EMAIL>', password='<PASSWORD>')
db.session.add(admin)
print(User.query.all())
return render_template('core/reddit-data.html', final=stat, result='read complete')
@ blueprint.route('/output', methods=['POST', 'GET'])
def output():
file = open(os.path.join(APP_STATIC, 'output/final_output.py'), "r")
stat = file.read()
print("stat is %s" % stat)
file.close()
return render_template('core/reddit-output.html', arg=stat)
@ blueprint.route('/test', methods=['POST', 'GET'])
def test():
picks = Picks(pick='hoho', bearish='whooter', bullish='what')
db.session.add(picks)
db.session.commit()
return jsonify({'result': 'ohk'})
@ blueprint.route('/test2', methods=['POST', 'GET'])
def test2():
hoho = 'hoho'
found=Picks.query.filter_by(pick='hoho').first()
print((Picks.query.filter_by(pick='hoho').first()))
return 'ohkk'
@ blueprint.route('/core/settings', methods=['GET'])
def settingsGet():
return render_template('core/settings.html',delete_db=delete_db, create_db=create_db)
@ blueprint.route('/core/settings', methods=['POST'])
def settings():
query = request.form['query']
found = Picks.query.filter_by(id=query).first()
print(found)
return render_template('core/settings.html', found=found, delete_db=delete_db, create_db=create_db)
def delete_db():
#db.drop_all()
return 'DB deleted'
def create_db():
db.create_all()
return 'All DB created'
@ blueprint.route('/core/<template>')
def route_core_template(template):
try:
if not template.endswith('.html'):
core='core/'
template += '.html'
template=core+template
return render_template(template)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@ blueprint.route('/<template>')
def route_template(template):
try:
if not template.endswith('.html'):
template += '.html'
return render_template(template)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
| 2.296875 | 2 |
examples/example3.py | olitheolix/kubernetes_asyncio | 1 | 12757587 | import asyncio
from kubernetes_asyncio import client, config
from kubernetes_asyncio.stream import WsApiClient
async def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
v1 = client.CoreV1Api()
print("Try to find a pod with busybox (name busybox*) ...")
ret = await v1.list_pod_for_all_namespaces()
for i in ret.items:
if i.metadata.name.startswith('busybox'):
pod = i.metadata.name
namespace = i.metadata.namespace
print('Buxy box', pod, 'namespace', namespace)
break
else:
print('Busybox not found !')
return
v1_ws = client.CoreV1Api(api_client=WsApiClient())
exec_command = [
'/bin/sh',
'-c',
'echo This message goes to stderr >&2; echo This message goes to stdout']
resp = v1_ws.connect_get_namespaced_pod_exec(pod, namespace,
command=exec_command,
stderr=True, stdin=False,
stdout=True, tty=False)
ret = await resp
print("Response: ", ret)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| 2.265625 | 2 |
sfpl2.py | htomtom/AZGomo | 5 | 12757588 | # self play
print("loading...")
import numpy as np
import tensorflow as tf
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
import MCTS
import sys
if(len(sys.argv)<=1):
print("Error! No argument given! Quiting.")
quit()
alwaysNew=False
fpu,fpu1=1.3,1.0
t0,t1=5,5
if(alwaysNew):#use newest engine to generate
MCTS.loadEngine(2,"./RNG64.tf")
MCTS.setFPU(fpu,fpu1)
MCTS.timeReset()
MCTS.selfPlay(t0,MCTS.evaluatePositionA2,"./games/dat_train"+sys.argv[1]+".npz",40)
#vldn time is 2 as the argument is the total time from calling time reset
MCTS.selfPlay(t1,MCTS.evaluatePositionA2,"./games/dat_vlidn"+sys.argv[1]+".npz",10)
else:#use best engine by test to generate
MCTS.loadEngine(1,"./weights/RNG64.tf")
MCTS.setFPU(fpu,fpu1)
MCTS.timeReset()
MCTS.selfPlay(t0,MCTS.evaluatePositionA,"./games/dat_train"+sys.argv[1]+".npz",40)
#vldn time is 2 as the argument is the total time from calling time reset
MCTS.selfPlay(t1,MCTS.evaluatePositionA,"./games/dat_vlidn"+sys.argv[1]+".npz",10)
# npz_t=np.load("dat_train.npz")
# x_tr=npz_t['arr_0']
# y_tr=npz_t['arr_1']
# for i in range(len(y_tr)):
# MCTS.showHeatMap(x_tr[i],y_tr[i])
| 2.140625 | 2 |
VA/main/models/blocks.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 343 | 12757589 | import tensorflow as tf
from keras.models import Model
from deephar.layers import *
from deephar.utils import *
def conv_block(inp, kernel_size, filters, last_act=True):
filters1, filters2, filters3 = filters
x = conv_bn_act(inp, filters1, (1, 1))
x = conv_bn_act(x, filters2, kernel_size)
x = conv_bn(x, filters3, (1, 1))
shortcut = conv_bn(inp, filters3, (1, 1))
x = add([x, shortcut])
if last_act:
x = Activation('relu')(x)
return x
def identity_block(inp, kernel_size, filters, last_act=True):
filters1, filters2, filters3 = filters
x = conv_bn_act(inp, filters1, (1, 1))
x = conv_bn_act(x, filters2, kernel_size)
x = conv_bn(x, filters3, (1, 1))
x = add([x, inp])
if last_act:
x = Activation('relu')(x)
return x
def stem_inception_v4(x, image_div=8):
"""Entry-flow network (stem) *based* on Inception_v4."""
assert image_div in [4, 8, 16, 32], \
'Invalid image_div ({}).'.format(image_div)
x = conv_bn_act(x, 32, (3, 3), strides=(2, 2))
x = conv_bn_act(x, 32, (3, 3))
if image_div is 32:
x = MaxPooling2D((2, 2))(x)
x = conv_bn_act(x, 64, (3, 3))
a = conv_bn_act(x, 96, (3, 3), strides=(2, 2))
b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = concatenate([a, b])
a = conv_bn_act(x, 64, (1, 1))
a = conv(a, 96, (3, 3))
b = conv_bn_act(x, 64, (1, 1))
b = conv_bn_act(b, 64, (5, 1))
b = conv_bn_act(b, 64, (1, 5))
b = conv(b, 96, (3, 3))
x = concatenate([a, b])
x = BatchNormalization(axis=-1, scale=False)(x)
if image_div is not 4:
a = act_conv_bn(x, 192, (3, 3), strides=(2, 2))
b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = concatenate([a, b])
if image_div in [16, 32]:
a = act_conv_bn(x, 192, (3, 3), strides=(2, 2))
b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = concatenate([a, b])
if image_div is 4:
x = residual(x, int_size=112, out_size=2*192+64, convtype='normal',
name='residual0')
else:
x = residual(x, int_size=144, out_size=3*192, convtype='normal',
name='residual0')
return x
def stem_residual_eccv(x, image_div=8):
"""Entry-flow network (stem) *based* on ResNet ('residual' option)."""
assert image_div in [4, 8, 16, 32], \
'Invalid image_div ({}).'.format(image_div)
x = conv_bn_act(x, 64, (7, 7), strides=(2, 2), padding='same')
a = conv_bn_act(x, 128, (3, 3), padding='same')
b = conv_bn_act(x, 128, (1, 1), padding='same')
x = add([a, b])
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = residual(x, int_size=128, out_size=256, convtype='normal', name='rn0')
x = residual(x, int_size=128, out_size=256, convtype='normal', name='rn1')
if image_div is 4:
x = residual(x, out_size=256, convtype='normal', name='rn3')
else:
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = residual(x, int_size=192, out_size=384, convtype='normal',
name='rn3')
x = residual(x, int_size=192, out_size=384, convtype='normal',
name='rn4')
if image_div in [16, 32]:
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = residual(x, int_size=256, out_size=512, convtype='normal',
name='rn5')
x = residual(x, int_size=256, out_size=512, convtype='normal',
name='rn6')
if image_div is 32:
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
return x
def reception_block(x, num_levels, kernel_size, int_size=None,
convtype='depthwise', name=None):
def hourglass(x, n):
up1 = residual(x, kernel_size=kernel_size, int_size=int_size,
convtype=convtype)
low = MaxPooling2D((2, 2))(x)
if n == num_levels:
low = act_conv_bn(low, int(K.int_shape(x)[-1] / 2), (1, 1))
low = residual(low, kernel_size=kernel_size, int_size=int_size,
convtype=convtype)
if n > 2:
low = hourglass(low, n-1)
else:
low = residual(low, kernel_size=kernel_size,
int_size=int_size,
convtype=convtype)
if n == num_levels:
low = residual(low, kernel_size=kernel_size,
out_size=K.int_shape(x)[-1], int_size=int_size,
convtype=convtype)
else:
low = residual(low, kernel_size=kernel_size,
int_size=int_size, convtype=convtype)
up2 = UpSampling2D((2, 2))(low)
x = add([up1, up2])
return x
x = hourglass(x, num_levels)
return x
def build_keypoints_regressor(input_shape, dim, num_maps, sam_model, prob_model,
name=None, verbose=0):
assert num_maps >= 1, \
'The number of maps should be at least 1 (%d given)' % num_maps
inputs = []
inputs3d = []
p_concat = []
v_concat = []
# Auxiliary functions
v_tile = Lambda(lambda x: K.tile(x, (1, 1, dim)))
# This depends on TensorFlow because keras does not implement divide.
tf_div = Lambda(lambda x: tf.divide(x[0], x[1]))
for i in range(num_maps):
h = Input(shape=input_shape)
inputs.append(h)
h_s = act_channel_softmax(h)
p = sam_model(h_s)
v = prob_model(h_s)
if dim == 3:
d = Input(shape=input_shape)
inputs3d.append(d)
d_s = Activation('sigmoid')(d)
dm = multiply([d_s, h_s])
z = Lambda(lambda x: K.sum(x, axis=(1, 2)))(dm)
z = Lambda(lambda x: K.expand_dims(x, axis=-1))(z)
p = concatenate([p, z])
if num_maps > 1:
t = v_tile(v)
p = multiply([p, v_tile(v)])
p_concat.append(p)
v_concat.append(v)
if num_maps > 1:
p = add(p_concat)
v_sum = add(v_concat)
p = tf_div([p, v_tile(v_sum)])
v = maximum(v_concat)
else:
p = p_concat[0]
v = v_concat[0]
model = Model(inputs+inputs3d, [p, v], name=name)
if verbose:
model.summary()
return model
def build_context_aggregation(num_joints, num_context, alpha,
num_frames=1, name=None):
inp = Input(shape=(num_joints * num_context, 1))
d = Dense(num_joints, use_bias=False)
x = Lambda(lambda x: K.squeeze(x, axis=-1))(inp)
x = d(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
w = d.get_weights()
w[0].fill(0)
for j in range(num_joints):
start = j*num_context
w[0][j * num_context : (j + 1) * num_context, j] = 1.
d.set_weights(w)
d.trainable = False
ctx_sum = Model(inputs=inp, outputs=x)
ctx_sum.trainable = False
if num_frames > 1:
ctx_sum = TimeDistributed(ctx_sum,
input_shape=(num_frames,) + K.int_shape(inp)[1:])
# Define auxiliary layers.
mul_alpha = Lambda(lambda x: alpha * x)
mul_1alpha = Lambda(lambda x: (1 - alpha) * x)
# This depends on TensorFlow because keras does not implement divide.
tf_div = Lambda(lambda x: tf.divide(x[0], x[1]))
if num_frames == 1:
# Define inputs
ys = Input(shape=(num_joints, 2))
yc = Input(shape=(num_joints * num_context, 2))
pc = Input(shape=(num_joints * num_context, 1))
# Split contextual predictions in x and y and do computations separately
xi = Lambda(lambda x: x[:,:, 0:1])(yc)
yi = Lambda(lambda x: x[:,:, 1:2])(yc)
else:
ys = Input(shape=(num_frames, num_joints, 2))
yc = Input(shape=(num_frames, num_joints * num_context, 2))
pc = Input(shape=(num_frames, num_joints * num_context, 1))
# Split contextual predictions in x and y and do computations separately
xi = Lambda(lambda x: x[:,:,:, 0:1])(yc)
yi = Lambda(lambda x: x[:,:,:, 1:2])(yc)
pxi = multiply([xi, pc])
pyi = multiply([yi, pc])
pc_sum = ctx_sum(pc)
pxi_sum = ctx_sum(pxi)
pyi_sum = ctx_sum(pyi)
pc_div = Lambda(lambda x: x / num_context)(pc_sum)
pxi_div = tf_div([pxi_sum, pc_sum])
pyi_div = tf_div([pyi_sum, pc_sum])
yc_div = concatenate([pxi_div, pyi_div])
ys_alpha = mul_alpha(ys)
yc_div_1alpha = mul_1alpha(yc_div)
y = add([ys_alpha, yc_div_1alpha])
model = Model(inputs=[ys, yc, pc], outputs=y, name=name)
model.trainable = False
return model
def build_softargmax_1d(input_shape, name=None):
if name is None:
name_sm = None
else:
name_sm = name + '_softmax'
inp = Input(shape=input_shape)
x = act_depth_softmax(inp, name=name_sm)
x = lin_interpolation_1d(x)
model = Model(inputs=inp, outputs=x, name=name)
model.trainable = False
return model
def build_softargmax_2d(input_shape, rho=0., name=None):
if name is None:
name_sm = None
else:
name_sm = name + '_softmax'
inp = Input(shape=input_shape)
x = act_channel_softmax(inp, name=name_sm)
if rho > 0:
x = kl_divergence_regularizer(x, rho=rho)
x_x = lin_interpolation_2d(x, axis=0)
x_y = lin_interpolation_2d(x, axis=1)
x = concatenate([x_x, x_y])
model = Model(inputs=inp, outputs=x, name=name)
model.trainable = False
return model
def build_joints_probability(input_shape, name=None, verbose=0):
inp = Input(shape=input_shape)
x = inp
x = AveragePooling2D((2, 2), strides=(1, 1))(x)
x = Lambda(lambda x: 4*x)(x)
x = GlobalMaxPooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
model = Model(inputs=inp, outputs=x, name=name)
if verbose:
model.summary()
return model
| 2.84375 | 3 |
src/spaceone/inventory/model/disk.py | choonho/plugin-google-cloud-compute-inven-collector | 3 | 12757590 | <filename>src/spaceone/inventory/model/disk.py<gh_stars>1-10
from schematics import Model
from schematics.types import StringType, IntType, FloatType, BooleanType, ModelType, ListType
'''
{
"id": "3358936943321049290",
"creationTimestamp": "2020-08-18T06:22:45.863-07:00",
"name": "dk-test-attached-081820",
"description": "dk-test-attached-081820",
"sizeGb": "10",
"zone": "https://www.googleapis.com/compute/beta/projects/bluese-cloudone-20200113/zones/asia-northeast3-a",
"status": "READY",
"selfLink": "https://www.googleapis.com/compute/beta/projects/bluese-cloudone-20200113/zones/asia-northeast3-a/disks/dk-test-attached-081820",
"type": "https://www.googleapis.com/compute/beta/projects/bluese-cloudone-20200113/zones/asia-northeast3-a/diskTypes/pd-balanced",
"lastAttachTimestamp": "2020-08-18T06:22:57.405-07:00",
"users": [
"https://www.googleapis.com/compute/beta/projects/bluese-cloudone-20200113/zones/asia-northeast3-a/instances/js-test"
],
"labels": {
"name": "dk_created_as"
},
"labelFingerprint": "g8kd3RSB8Ug=",
"physicalBlockSizeBytes": "4096",
"interface": "SCSI",
"kind": "compute#disk"
}
'''
class Labels(Model):
key = StringType()
value = StringType()
class DiskTags(Model):
disk_id = StringType(serialize_when_none=False)
disk_name = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
zone = StringType(serialize_when_none=False)
disk_type = StringType(choices=('local-ssd', 'pd-balanced', 'pd-ssd', 'pd-standard'), serialize_when_none=False)
encrypted = BooleanType(default=True)
read_iops = FloatType(serialize_when_none=False)
write_iops = FloatType(serialize_when_none=False)
read_throughput = FloatType(serialize_when_none=False)
write_throughput = FloatType(serialize_when_none=False)
labels = ListType(ModelType(Labels), default=[], serialize_when_none=False)
class Disk(Model):
device_index = IntType()
device = StringType(default="")
disk_type = StringType(default="disk")
size = FloatType()
tags = ModelType(DiskTags, default={})
| 2.1875 | 2 |
usfm_utils/usfm/flags.py | unfoldingWord-dev/USFM-Utils | 4 | 12757591 | <filename>usfm_utils/usfm/flags.py
"""
This module stores info about USFM flags that is used by the lexer and parser.
Most of this info is store in dictionaries whose keys are flag names (used as token
names in lexing), and whose values are (flag, constructor) pairs, where flag is the
string literal that is used inside USFM files, and constructor is a function for
constructing the corresponding Element.
In general, a constructor of None means that a particular flag has special functionality,
and needs to be handled separately during parsing
"""
from usfm_utils.elements.element_impls import Paragraph, FormattedText, \
ChapterNumber, OtherText, Text, Heading, Whitespace, Footnote
from usfm_utils.elements.paragraph_utils import Centered, LeftAligned, \
RightAligned
# TOKEN_NAME: (flag, callable[children -> Paragraph])
paragraphs = {
"CENTERED_PARAGRAPH": ("pc", Paragraph.Builder(layout=Centered())),
"PARAGRAPH": ("p", Paragraph.Builder()),
"FLUSH_PARAGRAPH": ("m", Paragraph.Builder(layout=LeftAligned(LeftAligned.FirstLineIndent.none))),
"EMBEDDED_OPENING": ("pmo", Paragraph.Builder(layout=LeftAligned(LeftAligned.FirstLineIndent.none),
embedded=True)),
"EMBEDDED_PARAGRAPH": ("pm", Paragraph.Builder(embedded=True)),
"EMBEDDED_CLOSING": ("pmc", Paragraph.Builder(layout=LeftAligned(LeftAligned.FirstLineIndent.none),
embedded=True)),
"EMBEDDED_REFRAIN": ("pmr", Paragraph.Builder(embedded=True,
layout=RightAligned())),
"NO_BREAK": ("nb", None),
"POETIC_RIGHT_ALIGNED": ("qr", Paragraph.Builder(layout=RightAligned(),
poetic=True)),
"POETIC_CENTERED": ("qc", Paragraph.Builder(layout=Centered(), poetic=True)),
# introductory paragraphs
"INTRO_PARAGRAPH": ("ip", Paragraph.Builder(introductory=True)),
"INTRO_INDENTED": ("ipi", Paragraph.Builder(layout=LeftAligned(left_margin_indent=1),
introductory=True)),
"INTRO_FLUSH": ("im", Paragraph.Builder(layout=LeftAligned(LeftAligned.FirstLineIndent.none),
introductory=True)),
"INTRO_INDENTED_FLUSH": ("imi", Paragraph.Builder(layout=LeftAligned(
LeftAligned.FirstLineIndent.none, left_margin_indent=1), introductory=True)),
"INTRO_QUOTE": ("ipq", lambda children: Paragraph.Builder(introductory=True)
.build([FormattedText.Kind.quotation.construct(children)])),
"INTRO_FLUSH_QUOTE": ("imq", lambda children: Paragraph.Builder(introductory=True)
.build([FormattedText.Kind.quotation.construct(children)])),
"INTRO_RIGHT_ALIGNED": ("ipr", Paragraph.Builder(
layout=RightAligned(), introductory=True))
}
# TOKEN_NAME: (flag, callable[children, indent -> Paragraph])
indented_paragraphs = {
"INDENTED_PARAGRAPH":
("pi",
lambda children, indent: Paragraph.Builder(
layout=LeftAligned(left_margin_indent=indent)
).build(children)),
"LIST_ITEM":
("li",
lambda children, indent: Paragraph.Builder(
layout=LeftAligned(LeftAligned.FirstLineIndent.outdent, left_margin_indent=indent),
embedded=True
).build(children)),
"POETIC_LINE":
("q",
lambda children, indent: Paragraph.Builder(
layout=LeftAligned(left_margin_indent=indent),
poetic=True
).build(children)),
"EMBEDDED_POETIC":
("qm",
lambda children, indent: Paragraph.Builder(
layout=LeftAligned(left_margin_indent=indent),
embedded=True,
poetic=True
).build(children)),
# introductory
"INTRO_LIST_ITEM":
("ili",
lambda children, indent: Paragraph.Builder(
layout=LeftAligned(LeftAligned.FirstLineIndent.outdent, left_margin_indent=indent),
introductory=True
).build(children)),
"INTRO_POETIC":
("iq",
lambda children, indent: Paragraph.Builder(
layout=LeftAligned(left_margin_indent=indent),
introductory=True,
poetic=True
).build(children)),
}
# TOKEN_NAME: (flag, callable[children -> Element])
lower_open_closes = {
"ALT_VERSE": ("va", FormattedText.Kind.alternate_verse_no.construct),
"BOLD": ("bd", FormattedText.Kind.bold.construct),
"BOLD_AND_ITALICS": ("bdit", lambda children: FormattedText(
FormattedText.Kind.bold, [FormattedText(FormattedText.Kind.italics, children)])),
"BOOK_TITLE": ("bk", FormattedText.Kind.book_title.construct),
"CROSS_REF_DEUTEROCANONICAL": ("xdc", FormattedText.Kind.deuterocanonical.construct),
"CROSS_REF_NEW_TESTAMENT": ("xnt", FormattedText.Kind.footnote_new_testament.construct),
"CROSS_REF_OLD_TESTAMENT": ("xot", FormattedText.Kind.footnote_old_testament.construct),
"DEUTEROCANONICAL": ("dc", FormattedText.Kind.deuterocanonical.construct),
"EMPHASIS": ("em", FormattedText.Kind.emphasis.construct),
"FOOTNOTE_DEUTEROCANONICAL": ("fdc", FormattedText.Kind.deuterocanonical.construct),
"FOOTNOTE_REFERENCE_MARK": ("fm", FormattedText.Kind.footnote_reference_mark.construct),
"ITALICS": ("it", FormattedText.Kind.italics.construct),
"KEYWORD": ("k", FormattedText.Kind.keyword.construct),
"NAME_OF_GOD": ("nd", FormattedText.Kind.name_of_god.construct),
"NORMAL": ("no", FormattedText.Kind.normal.construct),
"ORDINAL": ("ord", FormattedText.Kind.ordinal.construct),
"PROPER_NAME": ("pn", FormattedText.Kind.proper_name.construct),
"PUBLISHED_VERSE": ("vp", None),
"QUOTED_TEXT": ("qt", FormattedText.Kind.quotation.construct),
"SECONDARY_LANG": ("sls", FormattedText.Kind.secondary_language.construct),
"SIGNATURE": ("sig", FormattedText.Kind.signature.construct),
"SMALL_CAPS": ("sc", FormattedText.Kind.lower_case.construct),
"TRANSLATOR_ADDITION": ("add", FormattedText.Kind.translator_addition.construct),
"WORDS_OF_JESUS": ("wj", FormattedText.Kind.words_of_jesus.construct)
}
# TOKEN_NAME: (flag, callable[children -> Element])
higher_open_closes = {
"ALT_CHAPTER": ("ca", ChapterNumber.Kind.alternate.construct),
"SELAH": ("qs", OtherText.Kind.selah.construct),
}
# TOKEN_NAME: (flag, callable[children -> Element])
headings = {
"HEADING": ("h", None),
"TABLE_OF_CONTENTS": ("toc", None),
"MAJOR_TITLE": ("mt", Heading.Builder(Heading.Kind.major_title)),
"MAJOR_TITLE_END": ("mte", Heading.Builder(Heading.Kind.major_title_end)),
"MAJOR_SECTION": ("ms", Heading.Builder(Heading.Kind.major_section)),
"SECTION": ("s", Heading.Builder(Heading.Kind.section)),
"INTRO_MAJOR_TITLE": ("imt", Heading.Builder(Heading.Kind.major_title,
introductory=True)),
"INTRO_MAJOR_TITLE_END": ("imte", Heading.Builder(Heading.Kind.major_title_end,
introductory=True)),
"INTRO_SECTION": ("is", Heading.Builder(Heading.Kind.section,
introductory=True)),
"INTRO_OUTLINE_TITLE": ("iot", Heading.Builder(Heading.Kind.outline_title,
introductory=True))
}
# Flags that take a one-word argument
# TOKEN_NAME: (flag, callable[children -> Element])
one_word_arguments = {
"CHAPTER": ("c", None),
"VERSE": ("v", FormattedText.Kind.verse_no.construct)
}
# TOKEN_NAME: (flag, callable[children -> Element])
higher_rest_of_lines = {
"ACROSTIC_HEADING": ("qa", OtherText.Kind.acrostic_heading.construct),
"DESCRIPTIVE_TITLE": ("d", OtherText.Kind.explanatory.construct),
"EXPLANATORY": ("iex", OtherText.Kind.explanatory.construct),
"SPEAKER_ID": ("sp", OtherText.Kind.speaker_id.construct),
}
# TOKEN_NAME: flag
ignore_rest_of_lines = {
"FILE_ID": "id",
"ENCODING": "ide",
"STATUS": "sts",
"REM_TEXT": "rem_text",
}
# TOKEN_NAME: (flag, callable[children -> Element])
lower_until_next_flags = {
# footnotes
"FOOTNOTE_ALT_QUOTATION": ("fqa", FormattedText.Kind.footnote_alternate_quotation.construct),
"FOOTNOTE_KEYWORD": ("fk", FormattedText.Kind.footnote_keyword.construct),
"FOOTNOTE_TEXT": ("ft", FormattedText.Kind.no_effect.construct),
"FOOTNOTE_QUOTATION": ("fq", FormattedText.Kind.footnote_quotation.construct),
"FOOTNOTE_REFERENCE": ("fr", FormattedText.Kind.footnote_reference.construct),
"FOOTNOTE_VERSE": ("fv", FormattedText.Kind.footnote_verse.construct),
# cross-references
"CROSS_REF_ORIGIN": ("xo", FormattedText.Kind.footnote_origin.construct),
"CROSS_REF_KEYWORD": ("xk", FormattedText.Kind.footnote_keyword.construct),
"CROSS_REF_QUOTATION": ("xq", FormattedText.Kind.footnote_quotation.construct)
}
# TOKEN_NAME: (flag, Footnote.Kind)
footnotes = {
"FOOTNOTE": ("f", Footnote.Kind.footnote),
"ENDNOTE": ("fe", Footnote.Kind.endnote),
"CROSS_REFERENCE": ("x", Footnote.Kind.cross_reference)
}
# TOKEN_NAME: (flag, Whitespace.Kind)
whitespace = {
"BLANK_LINE": ("b", Whitespace.Kind.new_line),
"INTRO_BLANK_LINE": ("ib", Whitespace.Kind.new_line),
"PAGE_BREAK": ("pb", Whitespace.Kind.page_break)
}
| 2.515625 | 3 |
espresso/tools/estimate_initial_state_prior_from_alignments.py | kingfener/espresso | 1 | 12757592 | <reponame>kingfener/espresso<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import sys
import numpy as np
try:
import kaldi_io
except ImportError:
raise ImportError('Please install kaldi_io with: pip install kaldi_io')
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger("espresso.tools.estimate_initial_state_prior_from_alignments")
def get_parser():
parser = argparse.ArgumentParser(
description="Obtain initial state prior from alignments")
# fmt: off
parser.add_argument("--alignment-files", nargs="+", required=True,
help="path(s) to alignment file(s)")
parser.add_argument("--prior-dim", required=True, type=int,
help="state prior dimension, i.e., the number of states")
parser.add_argument("--prior-floor", type=float, default=5.0e-6,
help="floor for the state prior")
parser.add_argument("--output", required=True, type=str,
help="output path")
# fmt: on
return parser
def main(args):
assert args.prior_floor > 0.0 and args.prior_floor < 1.0
prior = np.zeros((args.prior_dim,), dtype=np.int32)
for path in args.alignment_files:
with open(path, "r", encoding="utf-8") as f:
for line in f:
_, rxfile = line.strip().split(None, 1)
try:
ali = kaldi_io.read_vec_int(rxfile)
except Exception:
raise Exception("failed to read int vector {}.".format(rxfile))
assert ali is not None and isinstance(ali, np.ndarray)
for id in ali:
prior[id] += 1
prior = np.maximum(prior / float(np.sum(prior)), args.prior_floor) # normalize and floor
prior = prior / float(np.sum(prior)) # normalize again
kaldi_io.write_vec_flt(args.output, prior)
logger.info("Saved the initial state prior estimate in {}".format(args.output))
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
| 2.03125 | 2 |
CISP 300/A4 Michael Dinh 169 #6.py | gb-camera/college-hw | 0 | 12757593 | <reponame>gb-camera/college-hw<filename>CISP 300/A4 Michael Dinh 169 #6.py
#-------------------------------------------------------------
# Name: <NAME>
# Date: 10/10/2018
# Reference: Pg. 169, Problem #6
# Title: Book Club Points
# Inputs: User inputs number of books bought from a store
# Processes: Determines point value based on number of books bought
# Outputs: Number of points user earns in correlation to number of books input
#-------------------------------------------------------------
#Start defining modules
#getBooks() prompts the user for the number of books bought
def getBooks():
books = int(input("Welcome to the Serendipity Booksellers Book Club Awards Points Calculator! Please enter the number of books you've purchased today: "))
return books
#defPoints() determines what points tier a user qualifies for based on book value
def detPoints(books):
if books < 1:
print("You've earned 0 points; buy at least one book to qualify!")
elif books == 1:
print("You've earned 5 points!")
elif books == 2:
print("You've earned 15 points!")
elif books == 3:
print("You've earned 30 points!")
elif books > 3:
print("Super reader! You've earned 60 points!")
else:
print("Error!")
#Define main()
def main():
#Intialize local variable
books = 0.0
#Begin calling modules
books = getBooks()
detPoints(books)
#Call main
main() | 3.40625 | 3 |
setup.py | aweir12/python-ServiceNow | 0 | 12757594 | <reponame>aweir12/python-ServiceNow
import setuptools
import versioneer
with open("README.md", "r") as rm:
long_description = rm.read().split("\n")[1]
setuptools.setup(
name="servicenow",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="<NAME>",
author_email="<EMAIL>",
description=long_description,
url="https://github.com/aweir12/python-ServiceNow",
packages=setuptools.find_packages(),
package_data={'': ['*.csv', '*.yml', '*.html']}
) | 1.585938 | 2 |
interface/Python/b1.py | zephms/multilanguage | 0 | 12757595 | <reponame>zephms/multilanguage
# v0.1.1
print("这里是b1")
import mlp as mlp
cm = mlp.MLManager("8083")
cm.waitForServer()
cm.waitForSignal("start", "1")
while True:
get = cm.get("a-b1")
print("b1获取到数据:", get)
| 2.625 | 3 |
candas/__init__.py | JulianWgs/candas | 2 | 12757596 | <reponame>JulianWgs/candas<gh_stars>1-10
""" __init__ module for candas package """
from . dataframe import from_file, from_database, load_dbc, from_fake
from . database import initialize_database
| 1.46875 | 1 |
classes/database.py | arthur-bryan/puppeteer | 1 | 12757597 | """
MIT License
Copyright (c) 2021 <NAME> <<EMAIL>>
This module belongs to https://github.com/arthur-bryan/puppeteer:
A implementation of a botnet using Python on server (C&C) side
and C on the puppets side.
This module contains the class that represents the Database, with the
responsible methods for create/update the database based on the server
requests
"""
import sqlite3
from config import to_red
class Database:
""" Creates database (if not exists), then connect to it, defines
a cursor and then creates the puppets table if not exists
"""
def __init__(self, filename):
""" Args:
filename (str): the path for the database
"""
try:
self.conn = sqlite3.connect(filename, check_same_thread=False)
self.conn.row_factory = lambda cursor, row: row[0]
except sqlite3.Error as error:
print(to_red(f"\n[ DATABASE ERROR ] {error} {filename}\n"))
else:
self.cursor = self.conn.cursor()
self.create_table()
def create_table(self):
""" Creates (if not exists) the table to store the puppets infos """
try:
self.cursor.execute(
"""
CREATE TABLE IF NOT EXISTS puppets (
ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
IP_ADDRESS VARCHAR(15),
IS_CONNECTED INTEGER,
AUTORUN_ENABLED INTEGER,
OP_SYSTEM VARCHAR(15),
ARCHITECTURE VARCHAR(15),
KERNEL_RELEASE VARCHAR(30),
HOSTNAME VARCHAR(20),
USERNAME VARCHAR(20),
LAST_CONNECTION DATE NOT NULL,
HASH TEXT NOT NULL UNIQUE
);
""")
except sqlite3.Error as error:
print(to_red(f"\n[ DATABASE ERROR ] {error}\n"))
else:
self.conn.commit()
def add_puppet(self, puppet):
""" Inserts the puppet information to the table
As some puppet attributes are received as C string (terminated by
'\x00'), these strings must be sliced to prevent sqlite store it as
BLOB binary data
Args:
puppet (:obj: 'Puppet'): puppet to be added to database
"""
try:
self.cursor.execute(
"""
INSERT INTO puppets (
IP_ADDRESS,
IS_CONNECTED,
AUTORUN_ENABLED,
OP_SYSTEM,
ARCHITECTURE,
KERNEL_RELEASE,
HOSTNAME,
USERNAME,
LAST_CONNECTION,
HASH
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (puppet.ip_address,
puppet.is_connected,
puppet.autorun_is_enabled,
puppet.op_system[:-1],
puppet.architecture[:-1],
puppet.kernel_release[:-1],
puppet.hostname[:-1],
puppet.username[:-1],
puppet.last_connection,
puppet.id_hash)
)
except ValueError as error:
print(to_red(f"\n[ DATABASE ERROR ] {error} inserting\n"))
else:
self.conn.commit()
def update_all_puppet_info(self, puppet):
""" Update all information of the puppet
Args:
puppet (:obj: 'Puppet'): puppet with new values to replace the
old ones
"""
try:
self.cursor.execute(
"""
UPDATE puppets
SET IP_ADDRESS = ?,
IS_CONNECTED = ?,
AUTORUN_ENABLED = ?,
OP_SYSTEM = ?,
ARCHITECTURE = ?,
KERNEL_RELEASE = ?,
HOSTNAME = ?,
USERNAME = ?,
LAST_CONNECTION = ?,
HASH = ?
WHERE HASH = ?
""", (puppet.ip_address,
puppet.is_connected,
puppet.autorun_is_enabled,
puppet.op_system[:-1],
puppet.architecture[:-1],
puppet.kernel_release[:-1],
puppet.hostname[:-1],
puppet.username[:-1],
puppet.last_connection,
puppet.id_hash,
puppet.id_hash)
)
except sqlite3.Error as error:
print(to_red(f"\n[ DATABASE ERROR ] {error} updating\n"))
else:
self.conn.commit()
def update_puppet_status(self, puppet, new_status):
""" Updates a connection status of the puppet
Args:
puppet (:obj: 'Puppet'): puppet to be updated
new_status (int): the new value for the status
"""
try:
self.cursor.execute(
"""
UPDATE puppets
SET IS_CONNECTED = ?
WHERE HASH = ?
""", (new_status,
puppet.id_hash)
)
except sqlite3.Error as error:
print(to_red(f"\n[ DATABASE ERROR ] {error} update status\n"))
else:
self.conn.commit()
def get_all_puppets(self):
""" Fetches all puppets on database and returns them in a list of tuples
Returns:
puppets (:obj: 'list' of :obj: 'tuples'): list of tuples with
puppet information (one tuple per puppet)
"""
try:
puppets = self.cursor.execute("SELECT * FROM puppets;").fetchall()
return puppets
except sqlite3.Error as error:
print(f"\n [ DATABASE ERROR ] {error}\n")
def get_connected_puppets(self):
""" Fetches all puppets on database that are currently connected to the
server and returns them in a list of tuples
Returns:
puppets (:obj: 'list' of :obj: 'tuples'): list of tuples with
the connected puppets information (one tuple per puppet)
"""
try:
connected_puppets = self.cursor.execute(
"SELECT * FROM puppets WHERE IS_CONNECTED = 1;"
).fetchall()
return connected_puppets
except sqlite3.Error as error:
print(to_red(f"\n [ DATABASE ERROR ] {error}\n"))
def get_puppets_hashes(self):
""" Fetches and returns the hashes of all puppets on database
Returns:
puppets_hashes (:obj: 'tuple'): tuple containing the
hashes of the puppets in database
"""
try:
puppets_hashes = self.cursor.execute(
"SELECT HASH FROM puppets;"
).fetchall()
return puppets_hashes
except sqlite3.Error as error:
print(to_red(f"\n [ DATABASE ERROR ] {error}\n"))
def disconnect_puppets_on_exit(self):
""" Updates a connection status of all puppets to 0 when the server
stops
"""
try:
self.cursor.execute(
"""
UPDATE puppets
SET IS_CONNECTED = 0
"""
)
except sqlite3.Error as error:
print(to_red(f"\n[ DATABASE ERROR ] {error}\n"))
else:
self.conn.commit()
| 3.046875 | 3 |
minwindowsubstr.py | matthewmuccio/LeetCodeProblemSet | 0 | 12757598 | <gh_stars>0
#!/usr/bin/env python3
from collections import Counter
class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
if not t or not s:
return ""
d = Counter(t)
required = len(d)
l, r = 0, 0
formed = 0
window_counts = {}
ans = float("inf"), None, None
while r < len(s):
c = s[r]
window_counts[c] = window_counts.get(c, 0) + 1
if c in d and window_counts[c] == d[c]:
formed += 1
while l <= r and formed == required:
c = s[l]
if r - l + 1 < ans[0]:
ans = (r - l + 1, l, r)
window_counts[c] -= 1
if c in d and window_counts[c] < d[c]:
formed -= 1
l += 1
r += 1
return "" if ans[0] == float("inf") else s[ans[1] : ans[2] + 1]
if __name__ == "__main__":
s = Solution()
s1 = "ADOBECODEBANC"
t = "ABC"
result = s.minWindow(s1, t)
print(result)
| 3.265625 | 3 |
python/pluralsight-intro2flask/thermos/test/thermos_test.py | renweizhukov/learning-ml | 0 | 12757599 | # -*- coding: utf-8 -*-
from flask import url_for
from flask_testing import TestCase
import thermos
from thermos.models import User, Bookmark
class ThermosTestCase(TestCase):
def create_app(self):
return thermos.create_app('test')
def setUp(self):
self.db = thermos.db
self.db.create_all()
self.client = self.app.test_client()
u = User(username='test', email='<EMAIL>', password='<PASSWORD>')
bm = Bookmark(user=u, url='http://www.example.com',
tags='one,two,three')
self.db.session.add(u)
self.db.session.add(bm)
self.db.session.commit()
self.client.post(url_for('auth.login'),
data = dict(username='test', password='<PASSWORD>'))
def tearDown(self):
thermos.db.session.remove()
thermos.db.drop_all()
def test_delete_all_tags(self):
response = self.client.post(
url_for('bookmarks.edit_bookmark', bookmark_id=1),
data = dict(
url = 'http://test.example.com',
tags = ''
),
follow_redirects = True
)
assert response.status_code == 200
bm = Bookmark.query.first()
assert not bm._tags
| 2.515625 | 3 |
openstackclient/compute/v2/security_group.py | citrix-openstack-build/python-openstackclient | 0 | 12757600 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 Security Group action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from novaclient.v1_1 import security_group_rules
from openstackclient.common import parseractions
from openstackclient.common import utils
def _xform_security_group_rule(sgroup):
info = {}
info.update(sgroup)
info.update(
{'port_range': "%u:%u" % (
info.pop('from_port'),
info.pop('to_port'),
)}
)
info['ip_range'] = info['ip_range']['cidr']
if info['ip_protocol'] == 'icmp':
info['port_range'] = ''
return info
class CreateSecurityGroup(show.ShowOne):
"""Create a new security group"""
log = logging.getLogger(__name__ + ".CreateSecurityGroup")
def get_parser(self, prog_name):
parser = super(CreateSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
"name",
metavar="<name>",
help="New security group name",
)
parser.add_argument(
"--description",
metavar="<description>",
help="Security group description",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
data = compute_client.security_groups.create(
parsed_args.name,
parsed_args.description,
)
info = {}
info.update(data._info)
return zip(*sorted(six.iteritems(info)))
class DeleteSecurityGroup(command.Command):
"""Delete a security group"""
log = logging.getLogger(__name__ + '.DeleteSecurityGroup')
def get_parser(self, prog_name):
parser = super(DeleteSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Name or ID of security group to delete',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
compute_client = self.app.client_manager.compute
data = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
compute_client.security_groups.delete(data.id)
return
class ListSecurityGroup(lister.Lister):
"""List all security groups"""
log = logging.getLogger(__name__ + ".ListSecurityGroup")
def get_parser(self, prog_name):
parser = super(ListSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help='Display information from all projects (admin only)',
)
return parser
def take_action(self, parsed_args):
def _get_project(project_id):
try:
return getattr(project_hash[project_id], 'name', project_id)
except KeyError:
return project_id
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
columns = (
"ID",
"Name",
"Description",
)
column_headers = columns
if parsed_args.all_projects:
# TODO(dtroyer): Translate Project_ID to Project (name)
columns = columns + ('Tenant ID',)
column_headers = column_headers + ('Project',)
search = {'all_tenants': parsed_args.all_projects}
data = compute_client.security_groups.list(search_opts=search)
projects = self.app.client_manager.identity.projects.list()
project_hash = {}
for project in projects:
project_hash[project.id] = project
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Tenant ID': _get_project},
) for s in data))
class SetSecurityGroup(show.ShowOne):
"""Set security group properties"""
log = logging.getLogger(__name__ + '.SetSecurityGroup')
def get_parser(self, prog_name):
parser = super(SetSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Name or ID of security group to change',
)
parser.add_argument(
'--name',
metavar='<new-name>',
help='New security group name',
)
parser.add_argument(
"--description",
metavar="<description>",
help="New security group name",
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
compute_client = self.app.client_manager.compute
data = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
if parsed_args.name:
data.name = parsed_args.name
if parsed_args.description:
data.description = parsed_args.description
info = {}
info.update(compute_client.security_groups.update(
data,
data.name,
data.description,
)._info)
if info:
return zip(*sorted(six.iteritems(info)))
else:
return ({}, {})
class ShowSecurityGroup(show.ShowOne):
"""Show a specific security group"""
log = logging.getLogger(__name__ + '.ShowSecurityGroup')
def get_parser(self, prog_name):
parser = super(ShowSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Name or ID of security group to change',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
compute_client = self.app.client_manager.compute
info = {}
info.update(utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)._info)
rules = []
for r in info['rules']:
rules.append(utils.format_dict(_xform_security_group_rule(r)))
# Format rules into a list of strings
info.update(
{'rules': rules}
)
# Map 'tenant_id' column to 'project_id'
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(six.iteritems(info)))
class CreateSecurityGroupRule(show.ShowOne):
"""Create a new security group rule"""
log = logging.getLogger(__name__ + ".CreateSecurityGroupRule")
def get_parser(self, prog_name):
parser = super(CreateSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Create rule in this security group',
)
parser.add_argument(
"--proto",
metavar="<proto>",
default="tcp",
help="IP protocol (icmp, tcp, udp; default: tcp)",
)
parser.add_argument(
"--src-ip",
metavar="<ip-address>",
default="0.0.0.0/0",
help="Source IP (may use CIDR notation; default: 0.0.0.0/0)",
)
parser.add_argument(
"--dst-port",
metavar="<port-range>",
action=parseractions.RangeAction,
help="Destination port, may be a range: 137:139 (default: 0; "
"only required for proto tcp and udp)",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
from_port, to_port = parsed_args.dst_port
data = compute_client.security_group_rules.create(
group.id,
parsed_args.proto,
from_port,
to_port,
parsed_args.src_ip,
)
info = _xform_security_group_rule(data._info)
return zip(*sorted(six.iteritems(info)))
class DeleteSecurityGroupRule(command.Command):
"""Delete a security group rule"""
log = logging.getLogger(__name__ + '.DeleteSecurityGroupRule')
def get_parser(self, prog_name):
parser = super(DeleteSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Create rule in this security group',
)
parser.add_argument(
"--proto",
metavar="<proto>",
default="tcp",
help="IP protocol (icmp, tcp, udp; default: tcp)",
)
parser.add_argument(
"--src-ip",
metavar="<ip-address>",
default="0.0.0.0/0",
help="Source IP (may use CIDR notation; default: 0.0.0.0/0)",
)
parser.add_argument(
"--dst-port",
metavar="<port-range>",
action=parseractions.RangeAction,
help="Destination port, may be a range: 137:139 (default: 0; "
"only required for proto tcp and udp)",
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
compute_client = self.app.client_manager.compute
group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
from_port, to_port = parsed_args.dst_port
# sigh...delete by ID?
compute_client.security_group_rules.delete(
group.id,
parsed_args.proto,
from_port,
to_port,
parsed_args.src_ip,
)
return
class ListSecurityGroupRule(lister.Lister):
"""List all security group rules"""
log = logging.getLogger(__name__ + ".ListSecurityGroupRule")
def get_parser(self, prog_name):
parser = super(ListSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Create rule in this security group',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
# Argh, the rules are not Resources...
rules = []
for rule in group.rules:
rules.append(security_group_rules.SecurityGroupRule(
compute_client.security_group_rules,
_xform_security_group_rule(rule),
))
columns = column_headers = (
"ID",
"IP Protocol",
"IP Range",
"Port Range",
)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in rules))
| 2.03125 | 2 |
keras_contrib/metrics/crf_accuracies.py | HaemanthSP/keras-contrib | 1 | 12757601 | from tensorflow.keras import backend as K
def _get_accuracy(y_true, y_pred, mask, sparse_target=False):
y_pred = K.argmax(y_pred, -1)
if sparse_target:
y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
else:
y_true = K.argmax(y_true, -1)
judge = K.cast(K.equal(y_pred, y_true), K.floatx())
if mask is None:
return K.mean(judge)
else:
mask = K.cast(mask, K.floatx())
return K.sum(judge * mask) / K.sum(mask)
def crf_viterbi_accuracy(y_true, y_pred):
'''Use Viterbi algorithm to get best path, and compute its accuracy.
`y_pred` must be an output from CRF.'''
crf, idx = y_pred._keras_history[:2]
X = crf._inbound_nodes[idx].input_tensors[0]
mask = crf._inbound_nodes[idx].input_masks[0]
y_pred = crf.viterbi_decoding(X, mask)
return _get_accuracy(y_true, y_pred, mask, crf.sparse_target)
def crf_marginal_accuracy(y_true, y_pred):
'''Use time-wise marginal argmax as prediction.
`y_pred` must be an output from CRF with `learn_mode="marginal"`.'''
crf, idx = y_pred._keras_history[:2]
X = crf._inbound_nodes[idx].input_tensors[0]
mask = crf._inbound_nodes[idx].input_masks[0]
y_pred = crf.get_marginal_prob(X, mask)
return _get_accuracy(y_true, y_pred, mask, crf.sparse_target)
def crf_accuracy(y_true, y_pred):
'''Ge default accuracy based on CRF `test_mode`.'''
crf, idx = y_pred._keras_history[:2]
if crf.test_mode == 'viterbi':
return crf_viterbi_accuracy(y_true, y_pred)
else:
return crf_marginal_accuracy(y_true, y_pred)
| 2.5 | 2 |
prepare_fusion_data.py | oriern/ClusterProp | 2 | 12757602 | import pandas as pd
import os
import time
import numpy as np
from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster
import pickle
from collections import defaultdict
from utils import offset_str2list, offset_decreaseSentOffset, insert_string
def find_abstractive_target(predictions_topic_cluster, alignments, topic):
cluster_spans = list(predictions_topic_cluster['docSpanText'].values)
alignments_cluster = alignments[(alignments['topic']==topic) & (alignments['docSpanText'].isin(cluster_spans))]
aligned_summ_span_cands = list(alignments_cluster['summarySpanText'].drop_duplicates().values)
summ_span_cands_score = []
for summ_span in aligned_summ_span_cands:
alignments_cluster_summ_span = alignments_cluster[alignments_cluster['summarySpanText'] == summ_span]
summ_span_cands_score.append(alignments_cluster_summ_span['pred_prob'].sum())
return aligned_summ_span_cands[np.argmax(summ_span_cands_score)]
def add_OIE_special_tok(docSpanOffsets, docSentCharIdx, sent):
# document_tmp = document[:]
span_offsets = offset_str2list(docSpanOffsets)
offsets = offset_decreaseSentOffset(docSentCharIdx, span_offsets)
# assume we have max 2 parts
for offset in offsets[::-1]: # [::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' > ')
sent = insert_string(sent, offset[0], ' < ')
return sent
##################################
###### main ##############
##################################
if __name__ == "__main__":
MAX_SENT = 100
DATASETS = ['DUC2004']#['TAC2008','TAC2009','TAC2010']
SET_TYPE = 'test'
CLUSTERING = True
SUMM_LEN = 100
MAX_CLUSTERS = 10
DUC2004_Benchmark = True
FULL_SENT = False
if FULL_SENT:
full_sent_flag = '_full_sent'
else:
full_sent_flag = ''
sys_model = 'roberta'
model_name = 'greedyMaxRouge'
sys_checkpoint = 'checkpoint-1200' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_TAC2008_TAC2009_2010_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed'
##DUC2004
if DUC2004_Benchmark:
sys_checkpoint = 'checkpoint-1500' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed_finetuned_TAC8910'
empty = 0
analysis_list = []
fusion_text = []
fusion_target = []
cluster_metadata = []
##full
full_fixed = 'fixed'
if DATASETS[0] == 'TAC2011':
full_fixed = 'full'
if DUC2004_Benchmark:
if DATASETS[0] == 'DUC2004':
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE, full_fixed))
else:
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_greedyMaxRouge_no_alignment_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE, full_fixed))
else:
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE,full_fixed))
predictions = pd.read_csv(
'./models/{}/{}/{}_{}_results_None.csv'.format(sys_folder, sys_checkpoint,
SET_TYPE, '_'.join(DATASETS)))
assert (len(predictions) == len(metadata))
metadata.insert(2, "prediction", predictions['prediction'])
predictions = metadata
for SET in DATASETS:
alignments = pd.read_csv(
'./dev{}_checkpoint-2000_negative.csv'.format(SET))
sys_summary_path = './{}_system_summaries/{}/{}_'.format(SET, sys_folder,
sys_checkpoint) + time.strftime(
"%Y%m%d-%H%M%S") + '/'
data_path = './data/{}/'.format(SET)
gold_summary_path = data_path + 'summaries/'
for topic in os.listdir(data_path):
print(topic)
if topic == 'summaries':
continue
if SET.startswith('TAC'):
topic = topic[:-3] + topic[-2:]
summary = ''
predictions_topic = predictions[predictions['topic'] == topic]
if DUC2004_Benchmark:
predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.4]
else:
predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.04]
predictions_topic = predictions_topic.sort_values(by=['prediction'], ascending=False)
if len(predictions_topic) == 0:
empty += 1
continue
if CLUSTERING:
simMat = read_simMats(topic, predictions_topic, SET)
cluster_mat(simMat, predictions_topic['simMat_idx'].values, predictions_topic)
oracle_per_cluster(SET, gold_summary_path, topic, predictions_topic, MAX_CLUSTERS)
allowed_clusters = list(
predictions_topic.sort_values(by=['cluster_size', 'inFile_sentIdx'], ascending=[False, True])[
'cluster_idx'].drop_duplicates(keep="first").values)[:MAX_CLUSTERS]
selected_spans = []
summary = ' '
for allowed_cluster_idx in allowed_clusters:
predictions_topic_cluster = predictions_topic[
predictions_topic['cluster_idx'] == allowed_cluster_idx]
predictions_topic_cluster = predictions_topic_cluster.sort_values(by=['prediction'],
ascending=False)
if len(predictions_topic_cluster) > 0:
if FULL_SENT:
predictions_topic_cluster['docSentText_special_tokens'] = predictions_topic_cluster.apply(lambda x: add_OIE_special_tok(x['docSpanOffsets'], x['docSentCharIdx'], x['docSentText']), axis=1)
fusion_text.append(
'<s> ' + ' </s> <s> '.join(
list(predictions_topic_cluster['docSentText_special_tokens'].values)) + ' </s>')
else:
fusion_text.append(
'<s> ' + ' </s> <s> '.join(list(predictions_topic_cluster['docSpanText'].values)) + ' </s>')
fusion_target.append(find_abstractive_target(predictions_topic_cluster, alignments, topic))
cluster_metadata.append([topic, list(predictions_topic_cluster.index)])
assert (predictions['docSpanText'].values[predictions_topic_cluster.index[0]]
== predictions_topic_cluster['docSpanText'].values[0])
if DUC2004_Benchmark:
out_dir = 'fusion_data/DUC2004{}/{}/'.format(full_sent_flag,model_name)
else:
out_dir = 'fusion_data/TAC2011{}/'.format(model_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
cluster_metadata_df = pd.DataFrame(cluster_metadata, columns=['topic', 'cluster_indexes'])
cluster_metadata_df.to_csv('{}/cluster_metadata_{}.csv'.format(out_dir,'_'.join(DATASETS)))
if SET_TYPE == 'dev':
SET_TYPE = 'val'
with open('{}/{}.source'.format(out_dir, SET_TYPE), 'w') as f:
f.write('\n'.join(fusion_text).replace('...', ' '))
with open('{}/{}.target'.format(out_dir, SET_TYPE), 'w') as f:
f.write('\n'.join(fusion_target).replace('...', ' '))
| 2.3125 | 2 |
talks/old_talks/admin.py | alan-turing-institute/talks.ox | 5 | 12757603 | <reponame>alan-turing-institute/talks.ox
from __future__ import absolute_import
from django.contrib import admin
from .models import OldTalk, OldSeries
| 1.007813 | 1 |
Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/UnitTests/TimeTypeInfo_UnitTest.py | davidbrownell/v3-Common_Environment | 0 | 12757604 | <filename>Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/UnitTests/TimeTypeInfo_UnitTest.py
# ----------------------------------------------------------------------
# |
# | TimeTypeInfo_UnitTest.py
# |
# | <NAME> <<EMAIL>>
# | 2018-04-23 12:14:14
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Unit test for TimeTypeInfo.py."""
import datetime
import os
import sys
import unittest
import CommonEnvironment
from CommonEnvironment.TypeInfo.FundamentalTypes.TimeTypeInfo import TimeTypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
class StandardSuite(unittest.TestCase):
# ----------------------------------------------------------------------
def test_Standard(self):
self.assertEqual(TimeTypeInfo.Desc, "Time")
self.assertEqual(TimeTypeInfo.ConstraintsDesc, '')
self.assertEqual(TimeTypeInfo.ExpectedType, datetime.time)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
| 1.875 | 2 |
cloud_deployer/openstack_handlers/subnet.py | abualy/cloud-deployer | 1 | 12757605 | #!/usr/bin/env python
#import modules
import csv
from netaddr import IPNetwork
from neutronclient.v2_0 import client as neutronclient
#Create a new Subnet
def subnet_create(credentials, my_csv, dns_servers, external_pool, archi, tags=None):
#open csv file and read each row as dictionary
subnet_file = open(my_csv, 'rb')
subnet_reader = csv.DictReader(subnet_file)
print "########################## Starting routers, networks and subnets creation ###############################"
if 'router' not in archi:
archi['router'] = {}
if 'subnet' not in archi:
archi['subnet'] = {}
if 'network' not in archi:
archi['network'] = {}
#iterate through rows checking for subnets
for subnet_dict in subnet_reader:
ip_data = IPNetwork(subnet_dict['cidr'])
neutron = neutronclient.Client(username=credentials['username'],password=credentials['password'],tenant_name=archi['vpc'][subnet_dict['vpc']],auth_url=credentials['auth_url'])
#create Openstack network
network = neutron.create_network({'network':{'name': subnet_dict['subnet'], 'admin_state_up': True}})
my_dns = []
my_update_subnet = {'subnet':{'host_routes':[]}}
my_dns.append(dns_servers[subnet_dict['vpc']])
for server in dns_servers:
if server != subnet_dict['vpc']:
my_dns.append(dns_servers[server])
#create Openstack subnet
subnet = neutron.create_subnet({'subnets': [{'name': subnet_dict['subnet'],'cidr': subnet_dict['cidr'], 'ip_version': 4, 'dns_nameservers':my_dns, 'network_id': network['network']['id']}]})
print subnet_dict['subnet'] + " created"
archi['network'][subnet_dict['subnet']] = network['network']['id']
archi['subnet'][subnet_dict['subnet']] = subnet['subnets'][0]['id']
if subnet_dict['vpc']+'-router' not in archi['router']:
#create Openstack internal router
router = neutron.create_router({'router': {'name': subnet_dict['vpc']+'-router','admin_state_up': True}})
archi['router'][subnet_dict['vpc']+'-router'] = router['router']['id']
print subnet_dict['vpc']+'-router' + " created"
ip_data = IPNetwork(subnet_dict['cidr'])
internal_port = 1
#create Openstack routes
if subnet_dict['route_table'] :
if 'add' in archi['rt'][subnet_dict['vpc']+'_'+subnet_dict['route_table']]:
internal_port = 3
my_update_subnet['subnet']['host_routes'].append({'destination':archi['vpc'][subnet_dict['vpc']+'_cidr'],'nexthop':str(ip_data[internal_port])})
for route_dict in archi['rt'][subnet_dict['vpc']+'_'+subnet_dict['route_table']]:
if route_dict != 'add':
if route_dict['route_type']=='igw':
port = neutron.create_port({'port': {'name': subnet_dict['subnet'], 'admin_state_up': True, 'network_id': network['network']['id'],'fixed_ips': [{'subnet_id': subnet['subnets'][0]['id'], 'ip_address': str(ip_data[1])}]}})
route = neutron.add_interface_router(archi['igw'][route_dict['route']], {"port_id": port['port']['id'] })
print subnet_dict['subnet'] + " attached to "+route_dict['route']
if route_dict['route_type']=='instances':
my_update_subnet['subnet']['host_routes'].append({'destination':route_dict['cidr'],'nexthop':str(ip_data[internal_port])})
print subnet_dict['subnet'] + " attached to "+route_dict['route']
port = neutron.create_port({'port': {'name': subnet_dict['subnet'], 'admin_state_up': True, 'network_id': network['network']['id'],'fixed_ips': [{'subnet_id': subnet['subnets'][0]['id'], 'ip_address': str(ip_data[internal_port])}]}})
interface = neutron.add_interface_router(archi['router'][subnet_dict['vpc']+'-router'], {"port_id": port['port']['id'] })
print subnet_dict['subnet'] + " attached to internal network"
subnet = neutron.update_subnet(subnet['subnets'][0]['id'],my_update_subnet)
print "done creating routers, networks and subnets :) "
return archi | 2.515625 | 3 |
tests/utils.py | svenski/hmrb | 69 | 12757606 | import json
from collections import Counter
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, type({}.items())):
return {str(k): v for k, v in obj}
return json.JSONEncoder.default(self, obj)
def is_probably_equal(arg1: object, arg2: object) -> bool:
if isinstance(arg1, dict):
arg1 = {str(k): v for k, v in arg1.items()}
if isinstance(arg2, dict):
arg2 = {str(k): v for k, v in arg2.items()}
c_a = Counter(arg1) if isinstance(arg1, str) else \
Counter(str(json.dumps(arg1, cls=Encoder)))
c_b = Counter(arg2) if isinstance(arg2, str) else \
Counter(str(json.dumps(arg2, cls=Encoder)))
return c_a == c_b
def parse_babylonian_data(fp):
item_sep = '################################'
atts_sep = '================================'
with open(fp, encoding='utf-8') as fh:
s = fh.read()
items = [itm for itm in s.split(item_sep) if itm.strip()]
for itm in items:
atts_str, grammar_str = itm.split(atts_sep)
yield parse_bab_data_atts(atts_str), grammar_str
def parse_bab_data_atts(atts_str):
lines = atts_str.split('\n')
atts = {line.split(':', 1)[0]: line.split(':', 1)[1].strip()
for line in lines if line}
for key in atts:
if key in ['outcomes', 'inputs']:
atts[key] = atts[key].strip().split(';')
elif key in ['loads']:
atts[key] = parse_bool(atts[key].strip())
else:
atts[key] = atts[key].strip()
return atts
def parse_bool(s):
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise ValueError(f'Incorrect boolean value: {s}')
class Underscore:
pass
class FakeToken:
def __init__(self, orth, lemma, pos):
self.orth = orth
self.lemma = lemma
self.pos = pos
self._ = Underscore()
class FakeDocument:
def __init__(self, tokens):
self.tokens = tokens
self.pos_ = 0
def __iter__(self):
return iter(self.tokens)
def __len__(self):
return len(self.tokens)
def __getitem__(self, item):
if isinstance(item, int):
return self.tokens[item]
else:
return FakeDocument([t for t in self.tokens[item]])
| 2.953125 | 3 |
myblog/settings.py | Samurai-XHe/myblog | 1 | 12757607 | """
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os,re
from .base_settings import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['xhe.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'read_statistics',
'comment',
'ckeditor',
'ckeditor_uploader',
'user',
'likes',
'imagekit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR,'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'user.context_processors.login_modal_form',
'read_statistics.context_processors.week_hot_blogs',
'read_statistics.context_processors.month_hot_blogs',
'blog.context_processors.blog_types',
'blog.context_processors.blog_dates',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'myblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
'''
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# 配置静态文件地址
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
#STATICFILES_DIRS = [
# os.path.join(BASE_DIR,'static'),
#]
#media 配置
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
#ckeditor
CKEDITOR_UPLOAD_PATH = 'upload/'
# ckeditor 配置
CKEDITOR_CONFIGS = {
'default': {
'toolbar': (
['div','Source','-','Save','NewPage','Preview','-','Templates'],
['Cut','Copy','Paste','PasteText','PasteFromWord','-','Print','SpellChecker','Scayt'],
['Undo','Redo','-','Find','Replace','-','SelectAll','RemoveFormat'],
['Form','Checkbox','Radio','TextField','Textarea','Select','Button', 'ImageButton','HiddenField'],
['Bold','Italic','Underline','Strike','-','Subscript','Superscript'],
['NumberedList','BulletedList','-','Outdent','Indent','Blockquote'],
['JustifyLeft','JustifyCenter','JustifyRight','JustifyBlock'],
['Link','Unlink','Anchor'],
['Image','Flash','Table','HorizontalRule','Smiley','SpecialChar','PageBreak'],
['Styles','Format','Font','FontSize'],
['TextColor','BGColor'],
['Maximize','ShowBlocks','-','About', 'pbckcode'],
),
'width':'auto',
'height':'180',
'tabSpaces':'4',
'removePlugins': 'elementspath',
'resize_enabled': False,
},
'comment_ckeditor': {
'toolbar': 'custom',
'toolbar_custom': [
['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript','Image'],
["TextColor", "BGColor", 'RemoveFormat'],
['NumberedList', 'BulletedList'],
['Link', 'Unlink'],
["Smiley", "SpecialChar", 'Blockquote'],
],
'width': '100%',
'height': '180',
'tabSpaces': 4,
'removePlugins': 'elementspath',
'resize_enabled': False,
}
}
# 每页多少条数据
CONTENT_OF_EACH_PAGE = 7
# session 配置
#session不仅可以保存在数据库里,
#数据库(默认)
#缓存(memchache、redis)
#文件
#缓存+数据库
#加密cookie
#SESSION_ENGINE = 'django.contrib.sessions.backends.file' # 引擎
#SESSION_FILE_PATH = 文件路径 # 缓存文件路径,如果为None,则使用tempfile模块获取一个临时地址tempfile.gettempdir()
#如:/<KEY>
#SESSION_COOKIE_NAME="SSSSSSSID" # Session的cookie保存在浏览器上时的key,即:sessionid=随机字符串
#SESSION_COOKIE_PATH="/" # Session的cookie保存的路径
#SESSION_COOKIE_DOMAIN = None # Session的cookie保存的域名
#SESSION_COOKIE_SECURE = False # 是否Https传输cookie
#SESSION_COOKIE_HTTPONLY = True # 是否Session的cookie只支持http传输
SESSION_COOKIE_AGE = 86400 # Session的cookie失效日期(24小时) 默认1209600秒(两周)
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # 是否关闭浏览器使Session过期,和上面一条不能共存
SESSION_SAVE_EVERY_REQUEST = True
#如果你设置了session的过期时间为30分钟,30分钟后session准时失效
#如果该参数设置为True,在30分钟期间有请求服务端,就不会过期!(为什么逛一晚上淘宝,也不会登出,但是浏览器不刷新了就会自动登出)
#下面这个方法不是在settings中用的
#request.session.set_expiry(value)
#你可以传递四种不同的值给它:
# * 如果value是个整数,session会在秒数后失效(适用于整个Django框架,即这个数值时效时整个页面都会session失效)。
#* 如果value是个datatime或timedelta,session就会在这个时间后失效。
#* 如果value是0,用户关闭浏览器session就会失效。
# * 如果value是None,session会依赖全局session失效策略。
| 1.8125 | 2 |
setup.py | eflglobal/filters | 11 | 12757608 | <gh_stars>10-100
# coding=utf-8
# :bc: Not importing unicode_literals because in Python 2 distutils,
# some values are expected to be byte strings.
from __future__ import absolute_import, division, print_function
from codecs import StreamReader, open
from os.path import dirname, join, realpath
from setuptools import setup
cwd = dirname(realpath(__file__))
##
# Load long description for PyPi.
with open(join(cwd, 'README.rst'), 'r', 'utf-8') as f: # type: StreamReader
long_description = f.read()
##
# Off we go!
# noinspection SpellCheckingInspection
setup(
name = 'filters',
description = 'Validation and data pipelines made easy!',
url = 'https://filters.readthedocs.io/',
version = '1.3.2',
packages = ['filters'],
long_description = long_description,
install_requires = [
'class-registry',
'python-dateutil',
'pytz',
'py2casefold; python_version < "3.0"',
'regex',
'six',
'typing; python_version < "3.0"',
],
extras_require = {
'django':['filters-django'],
'iso': ['filters-iso'],
},
test_suite = 'test',
test_loader = 'nose.loader:TestLoader',
tests_require = [
'nose',
],
license = 'MIT',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Filters',
],
keywords = 'data validation',
author = '<NAME>',
author_email = '<EMAIL>',
)
| 1.460938 | 1 |
automation_orchestrator/orchestrator/migrations/0004_v022_2.py | basico-ps/AutomationOrchestrator | 26 | 12757609 | # Generated by Django 3.0.3 on 2020-02-27 08:03
from django.db import migrations, models
import sortedm2m.fields
class Migration(migrations.Migration):
dependencies = [
('orchestrator', '0003_v022_1'),
]
operations = [
migrations.AlterField(
model_name='filetrigger',
name='bot',
field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'),
),
migrations.AlterField(
model_name='scheduletrigger',
name='bot',
field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'),
),
migrations.AlterField(
model_name='emailimaptrigger',
name='bot',
field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'),
),
migrations.AlterField(
model_name='emailoutlooktrigger',
name='bot',
field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'),
),
migrations.AlterField(
model_name='apitrigger',
name='bot',
field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'),
),
migrations.AddField(
model_name='apitrigger',
name='bots',
field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='api_trigger_bot', to='orchestrator.Bot'),
),
migrations.AddField(
model_name='emailimaptrigger',
name='bots',
field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='email_imap_trigger_bot', to='orchestrator.Bot'),
),
migrations.AddField(
model_name='emailoutlooktrigger',
name='bots',
field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='email_outlook_trigger_bot', to='orchestrator.Bot'),
),
migrations.AddField(
model_name='filetrigger',
name='bots',
field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='file_trigger_bot', to='orchestrator.Bot'),
),
migrations.AddField(
model_name='scheduletrigger',
name='bots',
field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='schedule_trigger_bot', to='orchestrator.Bot'),
),
]
| 1.75 | 2 |
model_pipeline/functions/process_edu_titles.py | kbelsvik/career-skills-capstone | 0 | 12757610 | import pandas as pd
import re
from nltk import word_tokenize
def get_list_small_words(list_of_titles, word_size):
# Create a list of acronyms by selecting all words that are `word_size` or less letters
word_list = []
for row in list_of_titles:
[word_list.append(x) for x in row if len(x) < word_size]
return list(set(word_list))
def load_dict(filename):
# Read file to dictionary
return pd.read_csv(filename, encoding='latin-1')[['keyword','type']].set_index('keyword')['type'].to_dict()
def split_to_subject_degree(list_of_titles, word_list):
# Iterate through all the degree titles. Process each word:
# 1. If the word = 'in', then add it as a `degree_row`, remove that word from the
# `subject_row`, and stop processing word
# 2. If the word is in the acronym list or the manual dictionary, then add it as a
# `degree_row`, remove that word from the `subject_row`, and stop processing word
# 3. If the word is not 1 or 2, stop processing the word
# Load these dictionaries from the `configuration_files` folder
degree_type_word_dict = load_dict('functions/configuration_files/degree_type_word_dict.csv')
degree_type_phrase_dict = load_dict('functions/configuration_files/degree_type_phrase_dict.csv')
degree_name_list = []
subject_name_list = []
for row in list_of_titles:
degree_row = []
subject_row = row
for token in row:
if token == 'in':
degree_row.append(token)
subject_row = subject_row[1:]
break
elif token in list(degree_type_word_dict.keys()) + word_list:
degree_row.append(token)
subject_row = subject_row[1:]
else:
break
degree_name_list.append(' '.join(degree_row))
subject_name_list.append(' '.join(subject_row))
return degree_name_list, subject_name_list
def tag_with_degree_category(list_of_degrees, list_of_subjects):
# This function takes the list of degrees and tags each degree with one or more degree categories
last_dict = {
'immersive':'bootcamp',
'certificate':'bootcamp',
'bootcamp':'bootcamp',
'boot camp':'bootcamp',
'license':'license',
'licensure':'license',
'certification':'certificate',
'certificate':'certificate',
}
degree_category_list = []
# Load these dictionaries from the `configuration_files` folder
degree_type_word_dict = load_dict('functions/configuration_files/degree_type_word_dict.csv')
degree_type_phrase_dict = load_dict('functions/configuration_files/degree_type_phrase_dict.csv')
# Iterate through each degree
for index, row in enumerate(list_of_degrees):
degree_category = []
found_key=0
# First, use the `degree_type_word_dict` dictionary to assign a degree category
for key in filter(lambda x: str(degree_type_word_dict[x])!='nan', degree_type_word_dict):
if key in row.split():
degree_category.append(degree_type_word_dict[key])
found_key=1
if found_key==0:
# If degree category is still empty,
# use the `degree_type_phrase_dict` dictionary to assign a degree category
for phrase in degree_type_phrase_dict:
if re.match(phrase,row):
degree_category.append(degree_type_phrase_dict[phrase])
found_key=1
if found_key==0:
# If degree category is still empty,
# use the `last_dict` dictionary and match on the subject instead of the degree
# to assign a degree category
for key in last_dict:
if key in list_of_subjects[index]:
degree_category.append(last_dict[key])
degree_category_list.append(list(set([x.strip() for x in degree_category if str(x)!='nan' and str(x)!= ' '])))
return degree_category_list
def find_best_degree_category(list_of_degree_categories):
# This function takes a list of degree categories and returns the highest ranked one
# The `degree_category_ranking` list shows ranking of the categories
# Each row will be assigned only 1 degree category in the end
degree_category_ranking = ['minor',
'all but dissertation',
'juris doctor',
'doctorate',
'associates',
'some education',
'masters',
'bachelors',
'license',
'hs diploma',
'vocational',
'certificate']
final_degree_category_list = []
for row in list_of_degree_categories:
if len(row) > 1:
for job in degree_category_ranking:
if job in row:
final_degree_category_list.append(job)
break
elif len(row) == 1:
final_degree_category_list.append(row[0])
else:
final_degree_category_list.append('unknown')
return final_degree_category_list
def process_edu_titles(list_of_titles):
# Remove anything outside of words and numbers
list_of_titles = [re.sub('[^A-Za-z0-9\s]+', '', row.lower()) for row in list_of_titles]
# Tokenize all the words
list_of_titles = [word_tokenize(row) for row in list_of_titles]
# Find all the acronyms in the education titles
acronym_list = get_list_small_words(list_of_titles, 5)
# Split the overall title into subject and degree
degree_name_list, subject_name_list = split_to_subject_degree(list_of_titles, acronym_list)
# Find the degree categories for each degree
degree_category_list = tag_with_degree_category(degree_name_list, subject_name_list)
# Condense the degree categories into one degree
final_degree_category_list = find_best_degree_category(degree_category_list)
# Return these lists
return subject_name_list, degree_name_list, final_degree_category_list
| 3.65625 | 4 |
MiRTrans.py | zhanglu295/MicroTrans | 0 | 12757611 | <reponame>zhanglu295/MicroTrans<filename>MiRTrans.py
import os
import sys
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import scipy.stats
#import pdb
#pdb.set_trace()
import warnings
warnings.filterwarnings('ignore')
class parameter_struc(object):
def __int__(self):
self.Seq_Pair='N'
self.Degrad_Pair='N'
self.MirExp='N'
self.MrnaExp='N'
def input_parameter(argv,parameter_struc):
deter=1
f=open(argv,"r")
for line in f:
Par=line.split('=')
if len(Par)==2:
if Par[0]=='Pred_seq':
parameter_struc.Seq_Pair=Par[1].strip('\n')
elif Par[0]=='Pred_deg':
parameter_struc.Degrad_Pair=Par[1].strip('\n')
elif Par[0]=='Exp_miRNA':
parameter_struc.MirExp=Par[1].strip('\n')
elif Par[0]=='Exp_mRNA':
parameter_struc.MrnaExp=Par[1].strip('\n')
f.close()
if os.path.isfile(parameter_struc.Seq_Pair)=='N':
deter=0
print('The file with sequence based microRNA target prediction does not exist')
if os.path.isfile(parameter_struc.Degrad_Pair)=='N':
deter=0
print('The file with degradome sequencing does not exist')
if os.path.isfile(parameter_struc.MirExp)=='N':
deter=0
print('The file with microRNA expression does not exist')
if os.path.isfile(parameter_struc.MrnaExp)=='N':
deter=0
print('The file with mRNA expression does not exist')
return deter
class Candidate_pair(object):
def __ini__(self,mirna,mrna):
self.mirna=mirna
self.mrna=mrna
def input_target_pair(in_path):
pair_info=[]
f=open(in_path,"r")
line_index=0
pair_dict=defaultdict(list)
for line in f:
if line_index>0:
pair_full=line.strip('\n')
pair_div=pair_full.split('\t')
pair_dict[pair_div[0]].append(pair_div[1])
line_index=line_index+1
f.close()
return pair_dict
def input_deg(in_path):
pair_info=[]
f=open(in_path,"r")
line_index=0
pair_dict=defaultdict(list)
for line in f:
if line_index>0:
pair_full=line.strip('\n')
pair_div=pair_full.split('\t')
pair_dict[(pair_div[0],pair_div[1])].append(pair_div[2])
line_index=line_index+1
f.close()
return pair_dict
def input_expression(in_path):
dict_rna={}
f=open(in_path,"r")
line_index=0
for line in f:
if line_index>0:
exp_full=line.strip('\n')
exp_div=exp_full.split('\t')
dict_rna[exp_div[0]]=list(map(float,exp_div[1:]))
line_index=line_index+1
f.close()
return dict_rna
def cal_dep(Seq_dict,Deg_dict,miR_exp,mRNA_exp):
f=open('MicroTrans_results.txt',"w")
f.write('microRNA')
f.write('\t')
f.write('mRNA')
f.write('\t')
f.write('p-value')
f.write('\n')
#perform lasso regression
lasso_reg={}
for (Key_mirna,Value_mrna) in Seq_dict.items():
print('processing microRNA '+Key_mirna,end="\r" )
TotalmRNA=[]
for i in range(len(Value_mrna)):
TotalmRNA.append(mRNA_exp[Value_mrna[i]])
clf = linear_model.LassoLarsIC(criterion='bic')
clf.fit(np.transpose(np.asarray(TotalmRNA)),np.asarray(miR_exp[Key_mirna]))
if len(np.nonzero(clf.coef_))==0:
continue
stdev=bootstrap(np.asarray(miR_exp[Key_mirna]),np.asarray(TotalmRNA),len(Value_mrna))
for j in range(len(clf.coef_)):
if clf.coef_[j]!=0:
lasso_reg[(Key_mirna,Value_mrna[j])]=1-round(scipy.stats.norm(0, 1).cdf(clf.coef_[j]/stdev[j]),3)
lasso_reg_set=set(lasso_reg)
deg_set=set(Deg_dict)
sharedKey={}
for inter_key in lasso_reg_set.intersection(deg_set):
sharedKey[(inter_key[0],inter_key[1])]=1
Pvalue=scipy.stats.combine_pvalues([float(lasso_reg[inter_key]),float(deg_set[inter_key])], method='fisher')
output(inter_key[0],inter_key[1],Pvalue,f)
for uniq_key in lasso_reg.keys():
if uniq_key not in sharedKey.keys():
output(uniq_key[0],uniq_key[1],lasso_reg[uniq_key],f)
for uniq_key in Deg_dict.keys():
if uniq_key not in sharedKey.keys():
output(uniq_key[0],uniq_key[1],Deg_dict[uniq_key][0],f)
f.close()
print('Succesfully finished, the results are in MicroTrans_results.txt')
return None
def output(mirna_name,mrna_name,p_value,f):
f.write(mirna_name)
f.write('\t')
f.write(mrna_name)
f.write('\t')
f.write(str(round(float(p_value),4)))
f.write('\n')
return None
def bootstrap(miR_exp,mRNA_exp,num_mrna):
clf = linear_model.LassoLarsIC(criterion='bic')
boot_coll=[]
std=[]
for i in range(1000):
rand_mrna=np.transpose(np.random.permutation(np.transpose(mRNA_exp)))
clf.fit(np.transpose(rand_mrna),np.transpose(miR_exp))
boot_coll.append(clf.coef_)
stdev=np.std(boot_coll,axis=0)
return stdev
def main():
Par=parameter_struc()
deter=input_parameter(sys.argv[1],Par)
if deter==1:
print('Analysis Begin...')
#input sequence based prediction
Seq_dict=input_target_pair(Par.Seq_Pair)
print('Finish reading sequence based prediction results')
#input degradome sequencing
Deg_dict=input_deg(Par.Degrad_Pair)
print('Finish reading degradome sequencing results')
#input miRNA expression
miR_exp=input_expression(Par.MirExp)
print('Finish reading microRNA expression')
#input mRNA expression
mRNA_exp=input_expression(Par.MrnaExp)
print('Finish reading mRNA expression')
cal_dep(Seq_dict,Deg_dict,miR_exp,mRNA_exp)
return None
if __name__=='__main__':
main()
| 2.8125 | 3 |
filter_plugins/collections.py | GParedesDevo/ansible-redis | 3 | 12757612 | <filename>filter_plugins/collections.py
def arraypermute(collection, key):
return [ str(i) + str(j) for i in collection for j in key ]
class FilterModule(object):
def filters(self):
return {
'arraypermute': arraypermute
} | 2.390625 | 2 |
tests/masonry/test_api.py | crunchr/poetry | 2 | 12757613 | import os
import tarfile
import zipfile
from contextlib import contextmanager
from poetry.masonry import api
from poetry.utils.helpers import temporary_directory
@contextmanager
def cwd(directory):
prev = os.getcwd()
os.chdir(str(directory))
try:
yield
finally:
os.chdir(prev)
fixtures = os.path.join(os.path.dirname(__file__), "builders", "fixtures")
def test_get_requires_for_build_wheel():
expected = ["cleo>=0.6.0,<0.7.0", "cachy[msgpack]>=0.2.0,<0.3.0"]
with cwd(os.path.join(fixtures, "complete")):
api.get_requires_for_build_wheel() == expected
def test_get_requires_for_build_sdist():
expected = ["cleo>=0.6.0,<0.7.0", "cachy[msgpack]>=0.2.0,<0.3.0"]
with cwd(os.path.join(fixtures, "complete")):
api.get_requires_for_build_sdist() == expected
def test_build_wheel():
with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")):
filename = api.build_wheel(tmp_dir)
with zipfile.ZipFile(str(os.path.join(tmp_dir, filename))) as zip:
namelist = zip.namelist()
assert "my_package-1.2.3.dist-info/entry_points.txt" in namelist
assert "my_package-1.2.3.dist-info/WHEEL" in namelist
assert "my_package-1.2.3.dist-info/METADATA" in namelist
def test_build_sdist():
with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")):
filename = api.build_sdist(tmp_dir)
with tarfile.open(str(os.path.join(tmp_dir, filename))) as tar:
namelist = tar.getnames()
assert "my-package-1.2.3/LICENSE" in namelist
| 2.328125 | 2 |
src/wordbase/debug.py | vsemionov/wordbase | 0 | 12757614 | # Copyright (C) 2011 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
enabled = False
def not_impl(self, name=None):
method = name or traceback.extract_stack()[-2][2]
raise NotImplementedError("{}.{} not implemented".format(self.__class__.__name__, method))
class NullConnection:
def write_line(self, line, split=True):
pass
def write_status(self, code, message):
pass
def write_text_end(self):
pass
def write_text(self, lines):
pass
def __getattr__(self, name):
not_impl(self, name)
| 1.671875 | 2 |
tests/mlir/polybench/python/syr2k.py | chhzh123/heterocl | 0 | 12757615 | import heterocl as hcl
import numpy as np
def top_syr2k(M=20, N=30, alpha=1.5, beta=1.2, dtype=hcl.Int(), target=None):
hcl.init(dtype)
A = hcl.placeholder((N, M), "A")
B = hcl.placeholder((N, M), "B")
C = hcl.placeholder((N, N), "C")
def kernel_syr2k(A, B, C):
# Irregulax axis access
with hcl.Stage("loop_1"):
with hcl.for_(0, N, name="i") as i:
with hcl.for_(0, i + 1, name="j") as j:
C[i][j] *= beta
with hcl.for_(0, M, name="k") as k:
with hcl.for_(0, i + 1, name="j") as j:
C[i][j] += A[j][k] * alpha * B[i][k] + B[j][k] * alpha * A[i][k]
s = hcl.create_schedule([A, B, C], kernel_syr2k)
#### Apply customizations ####
#### Apply customizations ####
return hcl.build(s, target=target)
def syr2k_golden(alpha, beta, M, N, A, B, C, DATA_TYPE):
dtype = NDATA_TYPE_DICT[DATA_TYPE.lower()]
for i in range(N):
for j in range(i + 1):
C[i][j] *= beta
for k in range(M):
for j in range(i + 1):
C[i][j] += A[j][k] * alpha * B[i][k] + B[j][k] * alpha * A[i][k] | 2.25 | 2 |
file.py | wario2k/UnityFPSGame | 0 | 12757616 | import os
'''
def list_files(startpath):
my_file = open("file.txt","w+")
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = '-' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
my_file.write('{}{}'.format(subindent, f))
my_file.close()
list_files("Unity Source")
'''
from pathlib import Path
class DisplayablePath(object):
display_filename_prefix_middle = "├──"
display_filename_prefix_last = "└──"
display_parent_prefix_middle = " "
display_parent_prefix_last = "│ "
def __init__(self, path, parent_path, is_last):
self.path = Path(str(path))
self.parent = parent_path
self.is_last = is_last
if self.parent:
self.depth = self.parent.depth + 1
else:
self.depth = 0
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
@classmethod
def make_tree(cls, root, parent=None, is_last=False, criteria=None):
root = Path(str(root))
criteria = criteria or cls._default_criteria
displayable_root = cls(root, parent, is_last)
yield displayable_root
children = sorted(list(path
for path in root.iterdir()
if criteria(path)),
key=lambda s: str(s).lower())
count = 1
for path in children:
is_last = count == len(children)
if path.is_dir():
yield from cls.make_tree(path,
parent=displayable_root,
is_last=is_last,
criteria=criteria)
else:
yield cls(path, displayable_root, is_last)
count += 1
@classmethod
def _default_criteria(cls, path):
return True
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
def displayable(self):
if self.parent is None:
return self.displayname
_filename_prefix = (self.display_filename_prefix_last
if self.is_last
else self.display_filename_prefix_middle)
parts = ['{!s} {!s}'.format(_filename_prefix,
self.displayname)]
parent = self.parent
while parent and parent.parent is not None:
parts.append(self.display_parent_prefix_middle
if parent.is_last
else self.display_parent_prefix_last)
parent = parent.parent
return ''.join(reversed(parts))
paths = DisplayablePath.make_tree(Path("Unity Source"))
myfile = open("file.txt","w+")
for path in paths:
print(path.displayable())
myfile.write(path.displayable())
myfile.close() | 2.90625 | 3 |
libraries/unified-model/unified_model/evaluation_utils.py | felixridinger/machine-learning-lab | 55 | 12757617 | import logging
import timeit
import numpy as np
import pandas as pd
from tqdm import tqdm
from unified_model import UnifiedModel
from unified_model.utils import truncate_middle, ITEM_COLUMN, SCORE_COLUMN
log = logging.getLogger(__name__)
UNKNOWN_ITEM = '<UNK>'
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)
# http://scikit-learn.org/stable/modules/model_evaluation.html
def f1_score(precision, recall):
return 2 * precision * recall / (precision + recall)
def evaluate_classifier(unified_model, test_data: list, target_predictions: list, k: list = None, per_label=False):
# TODO multithreaded evaluation
k = [1, 5] if k is None else [k] if isinstance(k, int) else k # set default value for k
k.sort()
pred_labels, pred_scores, in_top_k, avg_pred_time = _process_predictions(unified_model,
test_data,
target_predictions, k)
scored_labels = _score_labels(target_predictions, k, pred_labels, in_top_k)
metrics = _calculate_metrics(scored_labels, k)
metrics['avg_prediction_time'] = avg_pred_time
if per_label:
return metrics, scored_labels
else:
return metrics
def _calculate_metrics(scored_labels, k):
metrics = {}
for i in k:
i = str(i)
try:
metrics['micro_precision@k' + i] = scored_labels['true_positives@k' + i].sum() / scored_labels[
'predicted_count@k' + i].sum()
except ZeroDivisionError:
metrics['micro_precision@k' + i] = 0
metrics['micro_recall@k' + i] = scored_labels['true_positives@k' + i].sum() / scored_labels['count'].sum()
try:
metrics['micro_f1@k' + i] = f1_score(metrics['micro_precision@k' + i], metrics['micro_recall@k' + i])
except ZeroDivisionError:
metrics['micro_f1@k' + i] = 0
metrics['macro_precision@k' + i] = scored_labels['precision@k' + i].mean()
metrics['macro_recall@k' + i] = scored_labels['recall@k' + i].mean()
metrics['macro_f1@k' + i] = scored_labels['f1@k' + i].mean()
return metrics
def _score_labels(target_predictions, k, pred_labels, in_top_k):
unique_labels = list(set(target_predictions))
target_predictions = np.array(target_predictions) # convert true predictions to no array
columns = ['count'] # tp + fn
for i in k:
i = str(i)
columns.append('predicted_count@k' + i) # tp + fp
columns.append('true_positives@k' + i)
columns.append('precision@k' + i)
columns.append('recall@k' + i)
columns.append('f1@k' + i)
df = pd.DataFrame(0, columns=columns, index=unique_labels)
for label in unique_labels:
df['count'][label] = np.sum(target_predictions == label)
for i in k:
df['predicted_count@k' + str(i)][label] = np.sum(pred_labels[:, :i].flatten() == label)
df['true_positives@k' + str(i)][label] = np.sum(in_top_k[i][target_predictions == label])
for i in k:
i = str(i)
df['precision@k' + i] = df['true_positives@k' + i] / df['predicted_count@k' + i]
df['recall@k' + i] = df['true_positives@k' + i] / df['count']
df['f1@k' + i] = f1_score(df['precision@k' + i], df['recall@k' + i])
df = df.fillna(0)
return df.sort_values(by='count', ascending=False)
def _fill_missing_predictions(df: pd.DataFrame, max_k: int) -> pd.DataFrame:
for i in range(max_k - df.shape[0]):
df = df.append({ITEM_COLUMN: UNKNOWN_ITEM,
SCORE_COLUMN: 0}, ignore_index=True)
return df
def _process_predictions(unified_model, test_data, target_predictions, k):
# allow target_predictions to also contain a list of true labels per prediction
target_predictions = np.array(target_predictions) # convert true predictions to no array
start_time = timeit.default_timer()
predictions = []
for data in tqdm(test_data, desc="Calculating metrics..."):
try:
prediction_result = unified_model.predict(data, limit=np.amax(k))
if prediction_result.shape[0] < np.amax(k):
log.warning("Model returned " + str(prediction_result.shape[0]) + " predictions, "
+ str(np.amax(k)) + " were expected.")
log.debug("Model data: " + str(data))
prediction_result = _fill_missing_predictions(prediction_result, np.amax(k))
if prediction_result is None:
log.warning("Model returned no prediction (None).")
log.debug("Model data: " + str(data))
# add empty predictions
prediction_result = _fill_missing_predictions(pd.DataFrame(columns=[ITEM_COLUMN, SCORE_COLUMN]),
np.amax(k))
except Exception as ex:
log.warning("Exception during prediction: " + str(ex))
log.debug("Model data: " + str(data))
prediction_result = _fill_missing_predictions(pd.DataFrame(columns=[ITEM_COLUMN, SCORE_COLUMN]), np.amax(k))
predictions.append(prediction_result)
avg_pred_time = ((timeit.default_timer() - start_time) / len(test_data) * 1000)
pred_labels = np.array([prediction[ITEM_COLUMN].tolist() for prediction in predictions])
pred_scores = np.array([prediction[SCORE_COLUMN].tolist() for prediction in predictions])
in_top_k = {}
for i in k:
in_top_k[i] = np.array(
[true_label in k_predictions[:i] for true_label, k_predictions in zip(target_predictions, pred_labels)])
return pred_labels, pred_scores, in_top_k, avg_pred_time
def compare_models(unified_models: list, data_list: list, target_predictions: list, styled=True,
**kwargs) -> pd.DataFrame:
"""
Compare evaluation metrics for the given list of models.
# Arguments
data_list (list): List of data items used for the evaluations.
target_predictions (list): List of true predictions for test data.
styled (boolean): If 'True', a styled DataFrame will be returned (with coloring, etc.)
**kwargs: Provide additional keyword-based parameters.
# Returns
DataFrame that summarizes the metrics of all of the given models.
"""
model_names = []
metrics_per_model = []
for model in unified_models:
print("Calculating metrics for " + str(model))
model_names.append(truncate_middle(str(model), 40))
metrics_per_model.append(model.evaluate(data_list, target_predictions, **kwargs))
## compare evaluation df, also use color to show best and worst values
# add random baseline and combined score
df = pd.DataFrame(metrics_per_model, index=model_names)
# https://pandas.pydata.org/pandas-docs/stable/style.html
if styled:
# return df.style.bar(color='#f0fbff')
return df.style.background_gradient(cmap='BuGn', low=0.1, high=0.8, axis=0)
else:
return df
def test_unified_model(model_instance: UnifiedModel, data=None, conda_environment=False):
"""
Helps to test whether your model instance can be successfully loaded in another python environment.
This method saves the model instance, loads the model file in another python process,
and (optionally) calls `predict()` with the provided test data.
# Arguments
model_instance (UnifiedModel): Unified model instance.
data (string or bytes): Input data to test the model (optional).
conda_environment (bool): If `True`, a clean conda environment will be created for the test (optional).
"""
import sys
import os
import tempfile
import subprocess
import shutil
log.info("Starting model test.")
temp_test_folder = tempfile.mkdtemp()
saved_model_path = model_instance.save(os.path.join(temp_test_folder, "test_model"))
python_runtime = sys.executable
CONDA_ENV = "model-test-env"
if conda_environment:
log.info("Creating clean conda environment.")
try:
log.info(subprocess.check_output("conda create -n " + CONDA_ENV + " python=3.6 cython -y",
stderr=subprocess.STDOUT, shell=True).decode("utf-8"))
log.info("Installing unified model.")
log.info(
subprocess.check_output("/opt/conda/envs/"
+ CONDA_ENV
+ "/bin/pip install --upgrade unified-model",
stderr=subprocess.STDOUT,
shell=True).decode("utf-8"))
python_runtime = "/opt/conda/envs/" + CONDA_ENV + "/bin/python"
except subprocess.CalledProcessError as e:
log.info("Failed to create conda environment: \n" + e.output.decode("utf-8"))
test_command = python_runtime + " " + saved_model_path + ' predict'
if data:
test_command += ' --input-data "' + str(data) + '"'
log.info("Executing " + test_command)
try:
log.info(subprocess.check_output(test_command, stderr=subprocess.STDOUT, shell=True).decode("utf-8"))
log.info("Finished model test successfully!")
except subprocess.CalledProcessError as e:
log.info("Test failed: \n" + e.output.decode("utf-8"))
shutil.rmtree(temp_test_folder)
if conda_environment:
log.info("Removing conda environment.")
subprocess.call("conda remove --name " + CONDA_ENV + " --all -y", shell=True)
| 2.390625 | 2 |
schedule/migrations/0002_event_color_event.py | kimarakov/schedule | 1,065 | 12757618 | <gh_stars>1000+
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("schedule", "0001_initial")]
operations = [
migrations.AddField(
model_name="event",
name="color_event",
field=models.CharField(
verbose_name="Color event", blank=True, max_length=10, null=True
),
)
]
| 1.742188 | 2 |
website/migrations/0014_auto_20170105_1151.py | CrowdcoinSA/crowdcoin-platform | 0 | 12757619 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-01-05 09:51
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0013_auto_20170105_1113'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='datetime',
field=models.DateTimeField(auto_created=True, default=datetime.datetime(2017, 1, 5, 11, 51, 51, 548614)),
),
migrations.AlterField(
model_name='uniqueidentifier',
name='is_active',
field=models.BooleanField(default=True),
),
]
| 1.734375 | 2 |
intersight/models/storage_virtual_drive.py | sdnit-se/intersight-python | 21 | 12757620 | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StorageVirtualDrive(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'ancestors': 'list[MoBaseMoRef]',
'parent': 'MoBaseMoRef',
'permission_resources': 'list[MoBaseMoRef]',
'device_mo_id': 'str',
'dn': 'str',
'rn': 'str',
'model': 'str',
'revision': 'str',
'serial': 'str',
'vendor': 'str',
'access_policy': 'str',
'actual_write_cache_policy': 'str',
'available_size': 'str',
'block_size': 'str',
'bootable': 'str',
'config_state': 'str',
'configured_write_cache_policy': 'str',
'connection_protocol': 'str',
'drive_cache': 'str',
'drive_security': 'str',
'drive_state': 'str',
'io_policy': 'str',
'name': 'str',
'num_blocks': 'str',
'oper_state': 'str',
'operability': 'str',
'physical_block_size': 'str',
'presence': 'str',
'read_policy': 'str',
'security_flags': 'str',
'size': 'str',
'strip_size': 'str',
'type': 'str',
'uuid': 'str',
'vendor_uuid': 'str',
'virtual_drive_id': 'str',
'physical_disk_usages': 'list[StoragePhysicalDiskUsageRef]',
'registered_device': 'AssetDeviceRegistrationRef',
'storage_controller': 'StorageControllerRef',
'vd_member_eps': 'list[StorageVdMemberEpRef]',
'virtual_drive_extension': 'StorageVirtualDriveExtensionRef'
}
attribute_map = {
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'device_mo_id': 'DeviceMoId',
'dn': 'Dn',
'rn': 'Rn',
'model': 'Model',
'revision': 'Revision',
'serial': 'Serial',
'vendor': 'Vendor',
'access_policy': 'AccessPolicy',
'actual_write_cache_policy': 'ActualWriteCachePolicy',
'available_size': 'AvailableSize',
'block_size': 'BlockSize',
'bootable': 'Bootable',
'config_state': 'ConfigState',
'configured_write_cache_policy': 'ConfiguredWriteCachePolicy',
'connection_protocol': 'ConnectionProtocol',
'drive_cache': 'DriveCache',
'drive_security': 'DriveSecurity',
'drive_state': 'DriveState',
'io_policy': 'IoPolicy',
'name': 'Name',
'num_blocks': 'NumBlocks',
'oper_state': 'OperState',
'operability': 'Operability',
'physical_block_size': 'PhysicalBlockSize',
'presence': 'Presence',
'read_policy': 'ReadPolicy',
'security_flags': 'SecurityFlags',
'size': 'Size',
'strip_size': 'StripSize',
'type': 'Type',
'uuid': 'Uuid',
'vendor_uuid': 'VendorUuid',
'virtual_drive_id': 'VirtualDriveId',
'physical_disk_usages': 'PhysicalDiskUsages',
'registered_device': 'RegisteredDevice',
'storage_controller': 'StorageController',
'vd_member_eps': 'VdMemberEps',
'virtual_drive_extension': 'VirtualDriveExtension'
}
def __init__(self, account_moid=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, shared_scope=None, tags=None, version_context=None, ancestors=None, parent=None, permission_resources=None, device_mo_id=None, dn=None, rn=None, model=None, revision=None, serial=None, vendor=None, access_policy=None, actual_write_cache_policy=None, available_size=None, block_size=None, bootable=None, config_state=None, configured_write_cache_policy=None, connection_protocol=None, drive_cache=None, drive_security=None, drive_state=None, io_policy=None, name=None, num_blocks=None, oper_state=None, operability=None, physical_block_size=None, presence=None, read_policy=None, security_flags=None, size=None, strip_size=None, type=None, uuid=None, vendor_uuid=None, virtual_drive_id=None, physical_disk_usages=None, registered_device=None, storage_controller=None, vd_member_eps=None, virtual_drive_extension=None):
"""
StorageVirtualDrive - a model defined in Swagger
"""
self._account_moid = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._ancestors = None
self._parent = None
self._permission_resources = None
self._device_mo_id = None
self._dn = None
self._rn = None
self._model = None
self._revision = None
self._serial = None
self._vendor = None
self._access_policy = None
self._actual_write_cache_policy = None
self._available_size = None
self._block_size = None
self._bootable = None
self._config_state = None
self._configured_write_cache_policy = None
self._connection_protocol = None
self._drive_cache = None
self._drive_security = None
self._drive_state = None
self._io_policy = None
self._name = None
self._num_blocks = None
self._oper_state = None
self._operability = None
self._physical_block_size = None
self._presence = None
self._read_policy = None
self._security_flags = None
self._size = None
self._strip_size = None
self._type = None
self._uuid = None
self._vendor_uuid = None
self._virtual_drive_id = None
self._physical_disk_usages = None
self._registered_device = None
self._storage_controller = None
self._vd_member_eps = None
self._virtual_drive_extension = None
if account_moid is not None:
self.account_moid = account_moid
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if ancestors is not None:
self.ancestors = ancestors
if parent is not None:
self.parent = parent
if permission_resources is not None:
self.permission_resources = permission_resources
if device_mo_id is not None:
self.device_mo_id = device_mo_id
if dn is not None:
self.dn = dn
if rn is not None:
self.rn = rn
if model is not None:
self.model = model
if revision is not None:
self.revision = revision
if serial is not None:
self.serial = serial
if vendor is not None:
self.vendor = vendor
if access_policy is not None:
self.access_policy = access_policy
if actual_write_cache_policy is not None:
self.actual_write_cache_policy = actual_write_cache_policy
if available_size is not None:
self.available_size = available_size
if block_size is not None:
self.block_size = block_size
if bootable is not None:
self.bootable = bootable
if config_state is not None:
self.config_state = config_state
if configured_write_cache_policy is not None:
self.configured_write_cache_policy = configured_write_cache_policy
if connection_protocol is not None:
self.connection_protocol = connection_protocol
if drive_cache is not None:
self.drive_cache = drive_cache
if drive_security is not None:
self.drive_security = drive_security
if drive_state is not None:
self.drive_state = drive_state
if io_policy is not None:
self.io_policy = io_policy
if name is not None:
self.name = name
if num_blocks is not None:
self.num_blocks = num_blocks
if oper_state is not None:
self.oper_state = oper_state
if operability is not None:
self.operability = operability
if physical_block_size is not None:
self.physical_block_size = physical_block_size
if presence is not None:
self.presence = presence
if read_policy is not None:
self.read_policy = read_policy
if security_flags is not None:
self.security_flags = security_flags
if size is not None:
self.size = size
if strip_size is not None:
self.strip_size = strip_size
if type is not None:
self.type = type
if uuid is not None:
self.uuid = uuid
if vendor_uuid is not None:
self.vendor_uuid = vendor_uuid
if virtual_drive_id is not None:
self.virtual_drive_id = virtual_drive_id
if physical_disk_usages is not None:
self.physical_disk_usages = physical_disk_usages
if registered_device is not None:
self.registered_device = registered_device
if storage_controller is not None:
self.storage_controller = storage_controller
if vd_member_eps is not None:
self.vd_member_eps = vd_member_eps
if virtual_drive_extension is not None:
self.virtual_drive_extension = virtual_drive_extension
@property
def account_moid(self):
"""
Gets the account_moid of this StorageVirtualDrive.
The Account ID for this managed object.
:return: The account_moid of this StorageVirtualDrive.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this StorageVirtualDrive.
The Account ID for this managed object.
:param account_moid: The account_moid of this StorageVirtualDrive.
:type: str
"""
self._account_moid = account_moid
@property
def create_time(self):
"""
Gets the create_time of this StorageVirtualDrive.
The time when this managed object was created.
:return: The create_time of this StorageVirtualDrive.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this StorageVirtualDrive.
The time when this managed object was created.
:param create_time: The create_time of this StorageVirtualDrive.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this StorageVirtualDrive.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this StorageVirtualDrive.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this StorageVirtualDrive.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this StorageVirtualDrive.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this StorageVirtualDrive.
The time when this managed object was last modified.
:return: The mod_time of this StorageVirtualDrive.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this StorageVirtualDrive.
The time when this managed object was last modified.
:param mod_time: The mod_time of this StorageVirtualDrive.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this StorageVirtualDrive.
The unique identifier of this Managed Object instance.
:return: The moid of this StorageVirtualDrive.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this StorageVirtualDrive.
The unique identifier of this Managed Object instance.
:param moid: The moid of this StorageVirtualDrive.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this StorageVirtualDrive.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:return: The object_type of this StorageVirtualDrive.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this StorageVirtualDrive.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:param object_type: The object_type of this StorageVirtualDrive.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this StorageVirtualDrive.
The array of owners which represent effective ownership of this object.
:return: The owners of this StorageVirtualDrive.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this StorageVirtualDrive.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this StorageVirtualDrive.
:type: list[str]
"""
self._owners = owners
@property
def shared_scope(self):
"""
Gets the shared_scope of this StorageVirtualDrive.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this StorageVirtualDrive.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this StorageVirtualDrive.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this StorageVirtualDrive.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this StorageVirtualDrive.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this StorageVirtualDrive.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this StorageVirtualDrive.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this StorageVirtualDrive.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this StorageVirtualDrive.
The versioning info for this managed object.
:return: The version_context of this StorageVirtualDrive.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this StorageVirtualDrive.
The versioning info for this managed object.
:param version_context: The version_context of this StorageVirtualDrive.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def ancestors(self):
"""
Gets the ancestors of this StorageVirtualDrive.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this StorageVirtualDrive.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this StorageVirtualDrive.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this StorageVirtualDrive.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def parent(self):
"""
Gets the parent of this StorageVirtualDrive.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this StorageVirtualDrive.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this StorageVirtualDrive.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this StorageVirtualDrive.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def permission_resources(self):
"""
Gets the permission_resources of this StorageVirtualDrive.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:return: The permission_resources of this StorageVirtualDrive.
:rtype: list[MoBaseMoRef]
"""
return self._permission_resources
@permission_resources.setter
def permission_resources(self, permission_resources):
"""
Sets the permission_resources of this StorageVirtualDrive.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:param permission_resources: The permission_resources of this StorageVirtualDrive.
:type: list[MoBaseMoRef]
"""
self._permission_resources = permission_resources
@property
def device_mo_id(self):
"""
Gets the device_mo_id of this StorageVirtualDrive.
:return: The device_mo_id of this StorageVirtualDrive.
:rtype: str
"""
return self._device_mo_id
@device_mo_id.setter
def device_mo_id(self, device_mo_id):
"""
Sets the device_mo_id of this StorageVirtualDrive.
:param device_mo_id: The device_mo_id of this StorageVirtualDrive.
:type: str
"""
self._device_mo_id = device_mo_id
@property
def dn(self):
"""
Gets the dn of this StorageVirtualDrive.
The Distinguished Name unambiguously identifies an object in the system.
:return: The dn of this StorageVirtualDrive.
:rtype: str
"""
return self._dn
@dn.setter
def dn(self, dn):
"""
Sets the dn of this StorageVirtualDrive.
The Distinguished Name unambiguously identifies an object in the system.
:param dn: The dn of this StorageVirtualDrive.
:type: str
"""
self._dn = dn
@property
def rn(self):
"""
Gets the rn of this StorageVirtualDrive.
The Relative Name uniquely identifies an object within a given context.
:return: The rn of this StorageVirtualDrive.
:rtype: str
"""
return self._rn
@rn.setter
def rn(self, rn):
"""
Sets the rn of this StorageVirtualDrive.
The Relative Name uniquely identifies an object within a given context.
:param rn: The rn of this StorageVirtualDrive.
:type: str
"""
self._rn = rn
@property
def model(self):
"""
Gets the model of this StorageVirtualDrive.
This field identifies the model of the given component.
:return: The model of this StorageVirtualDrive.
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""
Sets the model of this StorageVirtualDrive.
This field identifies the model of the given component.
:param model: The model of this StorageVirtualDrive.
:type: str
"""
self._model = model
@property
def revision(self):
"""
Gets the revision of this StorageVirtualDrive.
:return: The revision of this StorageVirtualDrive.
:rtype: str
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this StorageVirtualDrive.
:param revision: The revision of this StorageVirtualDrive.
:type: str
"""
self._revision = revision
@property
def serial(self):
"""
Gets the serial of this StorageVirtualDrive.
This field identifies the serial of the given component.
:return: The serial of this StorageVirtualDrive.
:rtype: str
"""
return self._serial
@serial.setter
def serial(self, serial):
"""
Sets the serial of this StorageVirtualDrive.
This field identifies the serial of the given component.
:param serial: The serial of this StorageVirtualDrive.
:type: str
"""
self._serial = serial
@property
def vendor(self):
"""
Gets the vendor of this StorageVirtualDrive.
This field identifies the vendor of the given component.
:return: The vendor of this StorageVirtualDrive.
:rtype: str
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""
Sets the vendor of this StorageVirtualDrive.
This field identifies the vendor of the given component.
:param vendor: The vendor of this StorageVirtualDrive.
:type: str
"""
self._vendor = vendor
@property
def access_policy(self):
"""
Gets the access_policy of this StorageVirtualDrive.
:return: The access_policy of this StorageVirtualDrive.
:rtype: str
"""
return self._access_policy
@access_policy.setter
def access_policy(self, access_policy):
"""
Sets the access_policy of this StorageVirtualDrive.
:param access_policy: The access_policy of this StorageVirtualDrive.
:type: str
"""
self._access_policy = access_policy
@property
def actual_write_cache_policy(self):
"""
Gets the actual_write_cache_policy of this StorageVirtualDrive.
:return: The actual_write_cache_policy of this StorageVirtualDrive.
:rtype: str
"""
return self._actual_write_cache_policy
@actual_write_cache_policy.setter
def actual_write_cache_policy(self, actual_write_cache_policy):
"""
Sets the actual_write_cache_policy of this StorageVirtualDrive.
:param actual_write_cache_policy: The actual_write_cache_policy of this StorageVirtualDrive.
:type: str
"""
self._actual_write_cache_policy = actual_write_cache_policy
@property
def available_size(self):
"""
Gets the available_size of this StorageVirtualDrive.
:return: The available_size of this StorageVirtualDrive.
:rtype: str
"""
return self._available_size
@available_size.setter
def available_size(self, available_size):
"""
Sets the available_size of this StorageVirtualDrive.
:param available_size: The available_size of this StorageVirtualDrive.
:type: str
"""
self._available_size = available_size
@property
def block_size(self):
"""
Gets the block_size of this StorageVirtualDrive.
:return: The block_size of this StorageVirtualDrive.
:rtype: str
"""
return self._block_size
@block_size.setter
def block_size(self, block_size):
"""
Sets the block_size of this StorageVirtualDrive.
:param block_size: The block_size of this StorageVirtualDrive.
:type: str
"""
self._block_size = block_size
@property
def bootable(self):
"""
Gets the bootable of this StorageVirtualDrive.
:return: The bootable of this StorageVirtualDrive.
:rtype: str
"""
return self._bootable
@bootable.setter
def bootable(self, bootable):
"""
Sets the bootable of this StorageVirtualDrive.
:param bootable: The bootable of this StorageVirtualDrive.
:type: str
"""
self._bootable = bootable
@property
def config_state(self):
"""
Gets the config_state of this StorageVirtualDrive.
:return: The config_state of this StorageVirtualDrive.
:rtype: str
"""
return self._config_state
@config_state.setter
def config_state(self, config_state):
"""
Sets the config_state of this StorageVirtualDrive.
:param config_state: The config_state of this StorageVirtualDrive.
:type: str
"""
self._config_state = config_state
@property
def configured_write_cache_policy(self):
"""
Gets the configured_write_cache_policy of this StorageVirtualDrive.
:return: The configured_write_cache_policy of this StorageVirtualDrive.
:rtype: str
"""
return self._configured_write_cache_policy
@configured_write_cache_policy.setter
def configured_write_cache_policy(self, configured_write_cache_policy):
"""
Sets the configured_write_cache_policy of this StorageVirtualDrive.
:param configured_write_cache_policy: The configured_write_cache_policy of this StorageVirtualDrive.
:type: str
"""
self._configured_write_cache_policy = configured_write_cache_policy
@property
def connection_protocol(self):
"""
Gets the connection_protocol of this StorageVirtualDrive.
:return: The connection_protocol of this StorageVirtualDrive.
:rtype: str
"""
return self._connection_protocol
@connection_protocol.setter
def connection_protocol(self, connection_protocol):
"""
Sets the connection_protocol of this StorageVirtualDrive.
:param connection_protocol: The connection_protocol of this StorageVirtualDrive.
:type: str
"""
self._connection_protocol = connection_protocol
@property
def drive_cache(self):
"""
Gets the drive_cache of this StorageVirtualDrive.
:return: The drive_cache of this StorageVirtualDrive.
:rtype: str
"""
return self._drive_cache
@drive_cache.setter
def drive_cache(self, drive_cache):
"""
Sets the drive_cache of this StorageVirtualDrive.
:param drive_cache: The drive_cache of this StorageVirtualDrive.
:type: str
"""
self._drive_cache = drive_cache
@property
def drive_security(self):
"""
Gets the drive_security of this StorageVirtualDrive.
:return: The drive_security of this StorageVirtualDrive.
:rtype: str
"""
return self._drive_security
@drive_security.setter
def drive_security(self, drive_security):
"""
Sets the drive_security of this StorageVirtualDrive.
:param drive_security: The drive_security of this StorageVirtualDrive.
:type: str
"""
self._drive_security = drive_security
@property
def drive_state(self):
"""
Gets the drive_state of this StorageVirtualDrive.
It shows the Virtual drive state.
:return: The drive_state of this StorageVirtualDrive.
:rtype: str
"""
return self._drive_state
@drive_state.setter
def drive_state(self, drive_state):
"""
Sets the drive_state of this StorageVirtualDrive.
It shows the Virtual drive state.
:param drive_state: The drive_state of this StorageVirtualDrive.
:type: str
"""
self._drive_state = drive_state
@property
def io_policy(self):
"""
Gets the io_policy of this StorageVirtualDrive.
:return: The io_policy of this StorageVirtualDrive.
:rtype: str
"""
return self._io_policy
@io_policy.setter
def io_policy(self, io_policy):
"""
Sets the io_policy of this StorageVirtualDrive.
:param io_policy: The io_policy of this StorageVirtualDrive.
:type: str
"""
self._io_policy = io_policy
@property
def name(self):
"""
Gets the name of this StorageVirtualDrive.
:return: The name of this StorageVirtualDrive.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this StorageVirtualDrive.
:param name: The name of this StorageVirtualDrive.
:type: str
"""
self._name = name
@property
def num_blocks(self):
"""
Gets the num_blocks of this StorageVirtualDrive.
:return: The num_blocks of this StorageVirtualDrive.
:rtype: str
"""
return self._num_blocks
@num_blocks.setter
def num_blocks(self, num_blocks):
"""
Sets the num_blocks of this StorageVirtualDrive.
:param num_blocks: The num_blocks of this StorageVirtualDrive.
:type: str
"""
self._num_blocks = num_blocks
@property
def oper_state(self):
"""
Gets the oper_state of this StorageVirtualDrive.
It shows the current operational state of Virtual drive.
:return: The oper_state of this StorageVirtualDrive.
:rtype: str
"""
return self._oper_state
@oper_state.setter
def oper_state(self, oper_state):
"""
Sets the oper_state of this StorageVirtualDrive.
It shows the current operational state of Virtual drive.
:param oper_state: The oper_state of this StorageVirtualDrive.
:type: str
"""
self._oper_state = oper_state
@property
def operability(self):
"""
Gets the operability of this StorageVirtualDrive.
:return: The operability of this StorageVirtualDrive.
:rtype: str
"""
return self._operability
@operability.setter
def operability(self, operability):
"""
Sets the operability of this StorageVirtualDrive.
:param operability: The operability of this StorageVirtualDrive.
:type: str
"""
self._operability = operability
@property
def physical_block_size(self):
"""
Gets the physical_block_size of this StorageVirtualDrive.
:return: The physical_block_size of this StorageVirtualDrive.
:rtype: str
"""
return self._physical_block_size
@physical_block_size.setter
def physical_block_size(self, physical_block_size):
"""
Sets the physical_block_size of this StorageVirtualDrive.
:param physical_block_size: The physical_block_size of this StorageVirtualDrive.
:type: str
"""
self._physical_block_size = physical_block_size
@property
def presence(self):
"""
Gets the presence of this StorageVirtualDrive.
:return: The presence of this StorageVirtualDrive.
:rtype: str
"""
return self._presence
@presence.setter
def presence(self, presence):
"""
Sets the presence of this StorageVirtualDrive.
:param presence: The presence of this StorageVirtualDrive.
:type: str
"""
self._presence = presence
@property
def read_policy(self):
"""
Gets the read_policy of this StorageVirtualDrive.
:return: The read_policy of this StorageVirtualDrive.
:rtype: str
"""
return self._read_policy
@read_policy.setter
def read_policy(self, read_policy):
"""
Sets the read_policy of this StorageVirtualDrive.
:param read_policy: The read_policy of this StorageVirtualDrive.
:type: str
"""
self._read_policy = read_policy
@property
def security_flags(self):
"""
Gets the security_flags of this StorageVirtualDrive.
:return: The security_flags of this StorageVirtualDrive.
:rtype: str
"""
return self._security_flags
@security_flags.setter
def security_flags(self, security_flags):
"""
Sets the security_flags of this StorageVirtualDrive.
:param security_flags: The security_flags of this StorageVirtualDrive.
:type: str
"""
self._security_flags = security_flags
@property
def size(self):
"""
Gets the size of this StorageVirtualDrive.
:return: The size of this StorageVirtualDrive.
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this StorageVirtualDrive.
:param size: The size of this StorageVirtualDrive.
:type: str
"""
self._size = size
@property
def strip_size(self):
"""
Gets the strip_size of this StorageVirtualDrive.
The strip size is the portion of a stripe that resides on a single drive in the drive group, this is measured in KB.
:return: The strip_size of this StorageVirtualDrive.
:rtype: str
"""
return self._strip_size
@strip_size.setter
def strip_size(self, strip_size):
"""
Sets the strip_size of this StorageVirtualDrive.
The strip size is the portion of a stripe that resides on a single drive in the drive group, this is measured in KB.
:param strip_size: The strip_size of this StorageVirtualDrive.
:type: str
"""
self._strip_size = strip_size
@property
def type(self):
"""
Gets the type of this StorageVirtualDrive.
:return: The type of this StorageVirtualDrive.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this StorageVirtualDrive.
:param type: The type of this StorageVirtualDrive.
:type: str
"""
self._type = type
@property
def uuid(self):
"""
Gets the uuid of this StorageVirtualDrive.
:return: The uuid of this StorageVirtualDrive.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""
Sets the uuid of this StorageVirtualDrive.
:param uuid: The uuid of this StorageVirtualDrive.
:type: str
"""
self._uuid = uuid
@property
def vendor_uuid(self):
"""
Gets the vendor_uuid of this StorageVirtualDrive.
:return: The vendor_uuid of this StorageVirtualDrive.
:rtype: str
"""
return self._vendor_uuid
@vendor_uuid.setter
def vendor_uuid(self, vendor_uuid):
"""
Sets the vendor_uuid of this StorageVirtualDrive.
:param vendor_uuid: The vendor_uuid of this StorageVirtualDrive.
:type: str
"""
self._vendor_uuid = vendor_uuid
@property
def virtual_drive_id(self):
"""
Gets the virtual_drive_id of this StorageVirtualDrive.
:return: The virtual_drive_id of this StorageVirtualDrive.
:rtype: str
"""
return self._virtual_drive_id
@virtual_drive_id.setter
def virtual_drive_id(self, virtual_drive_id):
"""
Sets the virtual_drive_id of this StorageVirtualDrive.
:param virtual_drive_id: The virtual_drive_id of this StorageVirtualDrive.
:type: str
"""
self._virtual_drive_id = virtual_drive_id
@property
def physical_disk_usages(self):
"""
Gets the physical_disk_usages of this StorageVirtualDrive.
:return: The physical_disk_usages of this StorageVirtualDrive.
:rtype: list[StoragePhysicalDiskUsageRef]
"""
return self._physical_disk_usages
@physical_disk_usages.setter
def physical_disk_usages(self, physical_disk_usages):
"""
Sets the physical_disk_usages of this StorageVirtualDrive.
:param physical_disk_usages: The physical_disk_usages of this StorageVirtualDrive.
:type: list[StoragePhysicalDiskUsageRef]
"""
self._physical_disk_usages = physical_disk_usages
@property
def registered_device(self):
"""
Gets the registered_device of this StorageVirtualDrive.
The Device to which this Managed Object is associated.
:return: The registered_device of this StorageVirtualDrive.
:rtype: AssetDeviceRegistrationRef
"""
return self._registered_device
@registered_device.setter
def registered_device(self, registered_device):
"""
Sets the registered_device of this StorageVirtualDrive.
The Device to which this Managed Object is associated.
:param registered_device: The registered_device of this StorageVirtualDrive.
:type: AssetDeviceRegistrationRef
"""
self._registered_device = registered_device
@property
def storage_controller(self):
"""
Gets the storage_controller of this StorageVirtualDrive.
A collection of references to the [storage.Controller](mo://storage.Controller) Managed Object. When this managed object is deleted, the referenced [storage.Controller](mo://storage.Controller) MO unsets its reference to this deleted MO.
:return: The storage_controller of this StorageVirtualDrive.
:rtype: StorageControllerRef
"""
return self._storage_controller
@storage_controller.setter
def storage_controller(self, storage_controller):
"""
Sets the storage_controller of this StorageVirtualDrive.
A collection of references to the [storage.Controller](mo://storage.Controller) Managed Object. When this managed object is deleted, the referenced [storage.Controller](mo://storage.Controller) MO unsets its reference to this deleted MO.
:param storage_controller: The storage_controller of this StorageVirtualDrive.
:type: StorageControllerRef
"""
self._storage_controller = storage_controller
@property
def vd_member_eps(self):
"""
Gets the vd_member_eps of this StorageVirtualDrive.
It is a reference to LocalDisk to build up a VirtualDrive.
:return: The vd_member_eps of this StorageVirtualDrive.
:rtype: list[StorageVdMemberEpRef]
"""
return self._vd_member_eps
@vd_member_eps.setter
def vd_member_eps(self, vd_member_eps):
"""
Sets the vd_member_eps of this StorageVirtualDrive.
It is a reference to LocalDisk to build up a VirtualDrive.
:param vd_member_eps: The vd_member_eps of this StorageVirtualDrive.
:type: list[StorageVdMemberEpRef]
"""
self._vd_member_eps = vd_member_eps
@property
def virtual_drive_extension(self):
"""
Gets the virtual_drive_extension of this StorageVirtualDrive.
A collection of references to the [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) Managed Object. When this managed object is deleted, the referenced [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) MO unsets its reference to this deleted MO.
:return: The virtual_drive_extension of this StorageVirtualDrive.
:rtype: StorageVirtualDriveExtensionRef
"""
return self._virtual_drive_extension
@virtual_drive_extension.setter
def virtual_drive_extension(self, virtual_drive_extension):
"""
Sets the virtual_drive_extension of this StorageVirtualDrive.
A collection of references to the [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) Managed Object. When this managed object is deleted, the referenced [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) MO unsets its reference to this deleted MO.
:param virtual_drive_extension: The virtual_drive_extension of this StorageVirtualDrive.
:type: StorageVirtualDriveExtensionRef
"""
self._virtual_drive_extension = virtual_drive_extension
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StorageVirtualDrive):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.476563 | 1 |
plugin.video.rebirth/resources/lib/modules/jsunfuck.py | TheWardoctor/wardoctors-repo | 1 | 12757621 | <reponame>TheWardoctor/wardoctors-repo<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# | #
# | ______________________________________________________________ #
# | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| #
# | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| #
# | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| #
# | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| #
# | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| #
# | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| #
# | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ..............................................................| #
# | ..............................................................| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| #
# | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| #
# | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| #
# | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| #
# | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| #
# | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| #
# | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| #
# | #
# | Rebirth Addon #
# | Copyright (C) 2017 Cypher #
# | #
# | This program is free software: you can redistribute it and/or modify #
# | it under the terms of the GNU General Public License as published by #
# | the Free Software Foundation, either version 3 of the License, or #
# | (at your option) any later version. #
# | #
# | This program is distributed in the hope that it will be useful, #
# | but WITHOUT ANY WARRANTY; without even the implied warranty of #
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# | GNU General Public License for more details. #
# | #
################################################################################
import re
import sys
import urllib
import string
class JSUnfuck(object):
numbers = None
words = {
"(![]+[])": "false",
"([]+{})": "[object Object]",
"(!![]+[])": "true",
"([][[]]+[])": "undefined",
"(+{}+[])": "NaN",
"([![]]+[][[]])": "falseundefined",
"([][f+i+l+t+e+r]+[])": "function filter() { [native code] }",
"(!![]+[][f+i+l+t+e+r])": "truefunction filter() { [native code] }",
"(+![]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0function String() { [native code] }",
"(+![]+[![]]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0falsefunction String() { [native code] }",
"([]+[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +l+o+c+a+t+i+o+n)())": "https://123movies.to",
"([]+[])[f+o+n+t+c+o+l+o+r]()": '<font color="undefined"></font>',
"(+(+!![]+e+1+0+0+0)+[])": "Infinity",
"(+[![]]+[][f+i+l+t+e+r])": 'NaNfunction filter() { [native code] }',
'(+[![]]+[+(+!+[]+(!+[]+[])[3]+[1]+[0]+[0]+[0])])': 'NaNInfinity',
'([]+[])[i+t+a+l+i+c+s]()': '<i></i>',
'[[]][c+o+n+c+a+t]([[]])+[]': ',',
'([][f+i+l+l]+[])': 'function fill() { [native code]}',
'(!![]+[][f+i+l+l])': 'truefunction fill() { [native code]}',
'((+[])[c+o+n+s+t+r+u+c+t+o+r]+[])': 'function Number() {[native code]} _display:45:1',
'(+(+!+[]+[1]+e+[2]+[0])+[])': '1.1e+21',
'([]+[])[c+o+n+s+t+r+u+c+t+o+r][n+a+m+e]': 'S+t+r+i+n+g',
'([][e+n+t+r+i+e+s]()+[])': '[object Array Iterator]',
'([]+[])[l+i+n+k](")': '<a href="""></a>',
'(![]+[0])[i+t+a+l+i+c+s]()': '<i>false0</i>',
# dummy to force array dereference
'DUMMY1': '6p',
'DUMMY2': '2x',
'DUMMY3': '%3C',
'DUMMY4': '%5B',
'DUMMY5': '6q',
'DUMMY6': '4h',
}
uniqs = {
'[t+o+S+t+r+i+n+g]': 1,
'[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,
'[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,
'[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,
'[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,
}
def __init__(self, js):
self.js = js
def decode(self, replace_plus=True):
while True:
start_js = self.js
self.repl_words(self.words)
self.repl_numbers()
self.repl_arrays(self.words)
self.repl_uniqs(self.uniqs)
if start_js == self.js:
break
if replace_plus:
self.js = self.js.replace('+', '')
self.js = re.sub('\[[A-Za-z]*\]', '', self.js)
self.js = re.sub('\[(\d+)\]', '\\1', self.js)
return self.js
def repl_words(self, words):
while True:
start_js = self.js
for key, value in sorted(words.items(), key=lambda x: len(x[0]), reverse=True):
self.js = self.js.replace(key, value)
if self.js == start_js:
break
def repl_arrays(self, words):
for word in sorted(words.values(), key=lambda x: len(x), reverse=True):
for index in xrange(0, 100):
try:
repl = word[index]
self.js = self.js.replace('%s[%d]' % (word, index), repl)
except:
pass
def repl_numbers(self):
if self.numbers is None:
self.numbers = self.__gen_numbers()
while True:
start_js = self.js
for key, value in sorted(self.numbers.items(), key=lambda x: len(x[0]), reverse=True):
self.js = self.js.replace(key, value)
if self.js == start_js:
break
def repl_uniqs(self, uniqs):
for key, value in uniqs.iteritems():
if key in self.js:
if value == 1:
self.__handle_tostring()
elif value == 2:
self.__handle_escape(key)
elif value == 3:
self.__handle_unescape(key)
def __handle_tostring(self):
for match in re.finditer('(\d+)\[t\+o\+S\+t\+r\+i\+n\+g\](\d+)', self.js):
repl = to_base(match.group(1), match.group(2))
self.js = self.js.replace(match.group(0), repl)
def __handle_escape(self, key):
while True:
start_js = self.js
offset = self.js.find(key) + len(key)
if self.js[offset] == '(' and self.js[offset + 2] == ')':
c = self.js[offset + 1]
self.js = self.js.replace('%s(%s)' % (key, c), urllib.quote(c))
if start_js == self.js:
break
def __handle_unescape(self, key):
start = 0
while True:
start_js = self.js
offset = self.js.find(key, start)
if offset == -1: break
offset += len(key)
expr = ''
extra = ''
last_c = self.js[offset - 1]
abort = False
for i, c in enumerate(self.js[offset:]):
extra += c
if c == ')':
break
elif (i > 0 and c == '(') or (c == '[' and last_c != '+'):
abort = True
break
elif c == '%' or c in string.hexdigits:
expr += c
last_c = c
if not abort:
self.js = self.js.replace(key + extra, urllib.unquote(expr))
if start_js == self.js:
break
else:
start = offset
def __gen_numbers(self):
n = {'(+[]+[])': '0', '(+![]+([]+[]))': '0', '[+[]]': '[0]',
'(+!![]+[])': '1', '[+!+[]]': '[1]', '[+!![]]': '[1]',
'[+!+[]+[+[]]]': '[10]', '+(1+1)': '11', '(+20)': '20'}
for i in xrange(2, 20):
key = '+!![]' * (i - 1)
key = '!+[]' + key
n['(' + key + ')'] = str(i)
key += '+[]'
n['(' + key + ')'] = str(i)
n['[' + key + ']'] = '[' + str(i) + ']'
for i in xrange(2, 10):
key = '!+[]+' * (i - 1) + '!+[]'
n['(' + key + ')'] = str(i)
n['[' + key + ']'] = '[' + str(i) + ']'
key = '!+[]' + '+!![]' * (i - 1)
n['[' + key + ']'] = '[' + str(i) + ']'
for i in xrange(0, 10):
key = '(+(+!+[]+[%d]))' % (i)
n[key] = str(i + 10)
key = '[+!+[]+[%s]]' % (i)
n[key] = '[' + str(i + 10) + ']'
for tens in xrange(2, 10):
for ones in xrange(0, 10):
key = '!+[]+' * (tens) + '[%d]' % (ones)
n['(' + key + ')'] = str(tens * 10 + ones)
n['[' + key + ']'] = '[' + str(tens * 10 + ones) + ']'
for hundreds in xrange(1, 10):
for tens in xrange(0, 10):
for ones in xrange(0, 10):
key = '+!+[]' * hundreds + '+[%d]+[%d]))' % (tens, ones)
if hundreds > 1: key = key[1:]
key = '(+(' + key
n[key] = str(hundreds * 100 + tens * 10 + ones)
return n
def to_base(n, base, digits="0123456789abcdefghijklmnopqrstuvwxyz"):
n, base = int(n), int(base)
if n < base:
return digits[n]
else:
return to_base(n // base, base, digits).lstrip(digits[0]) + digits[n % base]
def main():
with open(sys.argv[1]) as f:
start_js = f.read()
print JSUnfuck(start_js).decode()
if __name__ == '__main__':
sys.exit(main())
| 1.539063 | 2 |
sld-schedule/helpers/api_token.py | guorenxi/Stack-Lifecycle-Deployment | 115 | 12757622 | <reponame>guorenxi/Stack-Lifecycle-Deployment
from helpers.api_request import request_url
from config.api import settings
def get_token(data):
response = request_url(
verb='POST',
headers={'Content-Type': 'application/json'},
uri='authenticate/access-token-json',
json=data
)
if response.get('status_code') == 200:
result = response.get('json')
return result.get('access_token')
if __name__ == "__main__":
print(get_token(settings.CREDENTIALS_ADM))
| 2.125 | 2 |
tensorflow_1/matrix.py | kapuni/DeeplearingPractice-CV- | 0 | 12757623 | <gh_stars>0
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES "] = "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
x = tf.constant([[10., 20.]])
y = tf.constant([[30.], [40.]])
z = tf.matmul(x, y)
with tf.Session(config=config) as sess:
print(sess.run(z)) | 2 | 2 |
src/services/package_service.py | c-w-m/mongo-quickstart | 26 | 12757624 | from typing import Optional, List
from data.downloads import Download
from data.packages import Package
from data.release_history import ReleaseHistory
from data.users import User
class PackageService:
@classmethod
def package_count(cls):
return Package.objects().count()
@classmethod
def release_count(cls):
return ReleaseHistory.objects().count()
@classmethod
def user_count(cls):
return User.objects().count()
@classmethod
def download_count(cls):
return Download.objects().count()
@classmethod
def find_package_by_name(cls, name):
package = Package.objects(name=name).first()
return package
@classmethod
def latest_release(cls, package: Package) -> Optional[ReleaseHistory]:
release = ReleaseHistory \
.objects(package_id=package.id) \
.order_by('-created') \
.first()
return release
@classmethod
def find_maintainers(cls, package: Package) -> List[User]:
users = User.objects(id__in=package.maintainers)
return list(users)
@classmethod
def popular_packages(cls, limit: int) -> List[Package]:
packages = Package.objects()\
.order_by('-total_downloads')\
.limit(limit)
return list(packages)
| 2.4375 | 2 |
app/views/vacancies.py | WishesFire/Epam-Python-Project | 1 | 12757625 | <gh_stars>1-10
"""
Views:
- `categories_show (/categories)`: Show all jobs categories
- `vacancies_show (/<category_slug>)`: Show all vacancies for specific category
- `vacancy_create (/vacancy_create)`: Show vacancy create form
- `vacancy_detail (/vacancy/<vacancy_slug>)`: Show detail about vacancy
"""
# pylint: disable=logging-fstring-interpolation
# pylint: disable=wrong-import-order
# pylint: disable=ungrouped-imports
# pylint: disable=simplifiable-if-statement
import logging
import json
from flask import Blueprint, render_template, request, redirect, url_for
from flask_mail import Message
from flask_login import current_user, login_required
from app.configs.config import TestBaseConfig
from app.service.validartors import VacancyFormValidator
from app.service.category_service import CategoryService
from app.service.vacancy_service import VacancyService
from app.service.user_service import UserService
from app import mail
vacancies_view = Blueprint('vacancies', __name__)
@vacancies_view.route("/categories", methods=["GET"])
def categories_show():
"""
Show categories
:return: rendered template
"""
logging.info("Show all categories")
categories = CategoryService.get_all_categories()
average_salary = {}
all_count_vacancies = 0
for category in categories:
average = 0
for vacancy in category.vacancies:
average += vacancy.salary
if average > 0:
average_salary[category.name] = int(average / len(category.vacancies))
else:
average_salary[category.name] = 0
all_count_vacancies += len(category.vacancies)
content = {"categories": categories, "count_vacancies": all_count_vacancies,
"average_salary": average_salary, "user": current_user}
logging.info(f"All categories - {categories}")
return render_template("categories.html", **content)
@vacancies_view.route("/vacancy_create", methods=["GET", "POST"])
@login_required
def vacancy_create():
"""
Vacancy information form (Name, salary, about, notification, contacts).
Then the vacancy appears in the list
:return: rendered template
"""
if request.method == "POST":
logging.info("User POST vacancy_create")
vacancy_name = request.form.get("name")
vacancy_salary = request.form.get("salary")
vacancy_about = request.form.get("about")
vacancy_contacts = request.form.get("contacts")
vacancy_category = request.form.get("category")
if request.form.get("notify"):
vacancy_notification = True
else:
vacancy_notification = False
logging.info("Get vacancy data from vacancy creating form")
validator = VacancyFormValidator(vacancy_name, vacancy_salary,
vacancy_about, vacancy_contacts)
vacancy_name = validator.check_name()
vacancy_salary = validator.check_salary()
logging.info("Validation is DONE")
category = CategoryService.find_category_by_name(vacancy_category)
VacancyService.create_new_vacancy(vacancy_name, vacancy_salary,
vacancy_about, vacancy_contacts,
vacancy_notification, current_user.id, category.id)
logging.info("New vacancy created")
return redirect(url_for("auth.profile"))
categories = CategoryService.get_all_categories()
content = {"categories": categories, "user": current_user}
return render_template("vacancy_create.html", **content)
@vacancies_view.route("/<category_slug>/vacancies", methods=["GET", "POST"])
def vacancies_show(category_slug):
"""
Show vacancies specific category
Filters salary vacancies
:param category_slug: used for url
:return: rendered template
"""
page_count = request.args.get("page", 1, type=int)
if request.method == "POST":
salary_average = request.form.get("salary-avg")
if salary_average:
logging.info(f"Salary filter get - {salary_average}")
salary_average = float(salary_average)
category = CategoryService.find_category_by_slug(category_slug)
vacancies = VacancyService.find_vacancies_by_filter(
category.id, salary_average,
(page_count, TestBaseConfig.PAGINATION_PAGE))
logging.info(f"Current category - {category.name}")
logging.info(f"All filtered vacancies - {vacancies}")
content = {"category_vacancies": vacancies, "category_slug": category_slug,
"user": current_user, "filter_flag": True}
return render_template("vacancies.html", **content)
category = CategoryService.find_category_by_slug(category_slug)
if category:
vacancies = VacancyService.find_vacancies_by_category(category.id,
(page_count,
TestBaseConfig.PAGINATION_PAGE))
content = {"category_vacancies": vacancies, "category_slug": category_slug,
"user": current_user, "filter_flag": False}
return render_template("vacancies.html", **content)
return redirect(url_for("base.home"))
@vacancies_view.route("/vacancy/<vacancy_slug>", methods=["GET", "POST"])
@login_required
def vacancy_detail(vacancy_slug):
"""
Show detail about specific vacancy (title, salary, information about vacancy, contacts)
:param vacancy_slug:
:return: rendered template
"""
if request.method == "POST":
data = json.loads(request.data)
if data["notification"]:
current_vacancy = VacancyService.find_vacancy_by_slug(vacancy_slug)
owner = UserService.find_user_by_id(current_vacancy.user)
if current_user.id != owner.id:
msg = Message("Someone watch your contacts", sender=TestBaseConfig.MAIL_USERNAME,
recipients=[owner.email])
msg.body = f"Someone found out about your job " \
f"in this vacancy - {current_vacancy.name}"
mail.send(msg)
return "Message sent"
return "Message don't send"
current_vacancy = VacancyService.find_vacancy_by_slug(vacancy_slug)
logging.info(f"Show vacancy detail - {current_vacancy.name}")
content = {"vacancy": current_vacancy, "user": current_user}
return render_template("vacancy.html", **content)
| 2.28125 | 2 |
clustering/kmeans_clustering/kmean clustering.py | ViKrAm-Bais/Machine-Learning-with-Python | 0 | 12757626 | <filename>clustering/kmeans_clustering/kmean clustering.py
#!/usr/bin/env python
# coding: utf-8
# simple kmeans clustering
# In[92]:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
x=np.array([[1,2],[3,1],[1,1],[5,7],[5,5],[4,8],[6,6],[0,0],[0,5],[1,6]])
plt.scatter(x[:,0],x[:,1],marker=".",c="m",s=150,linewidth=2)
plt.show()
# In[93]:
kmean=KMeans(n_clusters=3)
kmean.fit(x)
cen=kmean.cluster_centers_
label=kmean.labels_
print(label)
colors=['c','m','b']
# In[95]:
for i in range(0,len(x)):
plt.scatter(x[i,0],x[i,1],marker=".",c=colors[label[i]],s=150,linewidth=2)
plt.scatter(cen[:,0],cen[:,1],marker="+",c="k",s=160,linewidth=2)
plt.show()
# kmeans clustering using datasets
# In[ ]:
# In[ ]:
| 3.546875 | 4 |
tests.py | shulinye/yoga | 2 | 12757627 | #!/usr/bin/python3
"""Various utils to check the integrity of the movesGraph"""
import argparse
import datetime
import os.path
import logging
LOG_FILENAME = '/tmp/yoga.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
import moves
import strengthaerobics
import stretches
def sloppyRun(func, *args, **kwargs):
"""Runs a function, catching all exceptions
and writing them to a log file."""
try:
return func(*args, **kwargs)
except:
logging.exception(func.__name__ + str(args) + str(kwargs))
def log_isinstance(ob, t, context=None, level = logging.ERROR):
if not isinstance(ob, t):
logging.log(level, repr(ob) + " is not " + repr(t) + (" :" + str(context) if context is not None else ""))
return False
return True
def generateAllMoves(d = 1, a = 0, s = 0):
movesGraph = moves.generateMoves(d)
sloppyRun(stretches.defineStretches, movesGraph, difficulty=d)
sloppyRun(moves.linkMain, movesGraph, difficulty=d)
if a: sloppyRun(strengthaerobics.linkAerobics, movesGraph, d, a)
if s: sloppyRun(strengthaerobics.linkStrength, movesGraph, d, s)
if a*s: sloppyRun(strengthaerobics.linkStrengthAerobics, movesGraph, d, s, a)
sloppyRun(moves.unlinkWarmup, movesGraph, [], d)
sloppyRun(moves.linkHarder, movesGraph, d)
if s: sloppyRun(strengthaerobics.linkStrengthHarder, movesGraph, d, s)
sloppyRun(moves.linkEnding, movesGraph)
sloppyRun(stretches.linkCooldown, movesGraph)
if s: sloppyRun(strengthaerobics.linkStrengthCooldown, movesGraph, d, s)
if a: sloppyRun(strengthaerobics.linkAerobicsCooldown, movesGraph, d, a)
sloppyRun(moves.linkSavasana, movesGraph, difficulty = d)
print("%d moves discovered" % len(movesGraph))
return movesGraph
def checkChildType(move):
if len(move.nextMove) == 0:
logging.error(str(move) + " has no children")
for m in move:
log_isinstance(m, moves.Move, context=move)
def checkGraph(movesGraph):
for i in movesGraph.values():
if isinstance(i, tuple):
for j in i:
if log_isinstance(j, moves.Move):
checkChildType(j)
elif log_isinstance(i, moves.Move):
checkChildType(i)
def checkConnected(movesGraph):
allmoves = set()
linkedmoves = set()
for i in movesGraph.values():
if isinstance(i, tuple):
for j in i:
allmoves.add(j)
linkedmoves.update(j)
else:
allmoves.add(i)
linkedmoves.update(i)
return allmoves.difference(linkedmoves)
def checkLog(filename):
if os.path.isfile(filename):
print("Error file exists:", filename)
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--aerobics", dest="a", help="Insert aerobics moves", action='count', default=0)
parser.add_argument("-s", "--strength", dest="s", help="Insert strength moves", action='count', default=0)
parser.add_argument("-d", "--difficulty", dest="d", help="Difficulty: larger number=harder", default=1, type=int, choices=[-1,0,1,2])
args = parser.parse_args()
logging.info("Running with settings: " + str(vars(args)))
logging.info("Run time: " + datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S'))
print("Generating moves graph")
movesGraph = generateAllMoves(**vars(args))
print("Checking graph")
checkGraph(movesGraph)
m = checkConnected(movesGraph)
if m: logging.debug("There is no way to get to the following moves:\n " + "\n ".join(repr(i) for i in sorted(m)))
checkLog(LOG_FILENAME)
| 2.6875 | 3 |
findpi/findpi.py | felipenogson/findpi | 9 | 12757628 | <gh_stars>1-10
"""Uses brand new features of Python 3"""
import argparse
import threading
import psutil
from concurrent.futures import ThreadPoolExecutor
import os
import socket
import sys
import time
from getmac import get_mac_address
try:
from __version__ import __version__
except ModuleNotFoundError:
from findpi.__version__ import __version__
def getInput(currentip, thread_count):
"""
Get user input ip address or use default.
"""
currentnum = 1
userinput = input(
f'What net to check? (default {currentip}): ') or currentip
start_time = time.time()
print(f'\nChecking for delicious pi around {userinput}...')
if userinput.endswith('/24'):
limit = 255
if limit == 1:
checkip = userinput.rsplit('.', 1)[0] + f'.{currentnum}'
checkMacs(checkip)
print("--- %s seconds ---" % (time.time() - start_time))
sys.exit(0)
ip_list = []
# nice way to fill up the list with the full range
ip_list.extend([userinput.rsplit('.', 1)[0] +
f'.{i}' for i in range(limit)])
# multi-threading the modern way ;)
with ThreadPoolExecutor(max_workers=thread_count) as executor:
{executor.submit(checkMacs, ip) for ip in ip_list}
executor.shutdown(wait=False)
# always print the time it took to complete
print("--- %s seconds ---" % (time.time() - start_time))
def prep():
"""
Get the args and set them.
"""
parser = argparse.ArgumentParser(description='Ways to run findpi.')
parser.add_argument('-c', '--cores', type=int,
help='cores to use for threads', dest="cores")
parser.add_argument('-v', '--version', action='version',
version=__version__)
args = parser.parse_args()
return args
def checksudo():
if not os.geteuid() == 0:
sys.exit(
'This script must be run as root (or with \'sudo\' or \'doas\' etc.)!')
def getip():
"""
get current ip hopefully and convert to /24
"""
currentip = (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(
("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
currentip = currentip.rsplit('.', 1)[0] + '.0/24'
return currentip
def checkCores():
"""
Gets the total number of cores and returns sensible default int for threads
"""
multiplier = 4 # set this to whatever you want
try:
cores = psutil.cpu_count() * multiplier
except:
print('Cannot get cores info, defaulting to 4')
cores = 4
return cores
def ThreadId(ipaddress, macaddress):
"""
The thread function that gets called from checkMacs to ensure timeout.
"""
macaddress = get_mac_address(ip=ipaddress)
if macaddress:
if ("b8:27:eb" in str(macaddress.lower())) or ("dc:a6:32" in str(macaddress.lower())):
print(f'Found pi: {ipaddress}')
def checkMacs(ip_address):
"""
Checks if mac address found using get_mac_address threaded function.
Accepts: ip_address var as string
Returns: nothing
Prints: found ip of pi if found
"""
macaddress = str()
th = threading.Thread(target=ThreadId, args=(ip_address, macaddress))
th.start()
th.join(timeout=0.5)
return
logo = """
______ _____ _ _ _____ _____ _____
| ____|_ _| \ | | __ \| __ \_ _|
| |__ | | | \| | | | | |__) || |
| __| | | | . ` | | | | ___/ | |
| | _| |_| |\ | |__| | | _| |_
|_| |_____|_| \_|_____/|_| |_____|
"""
def main():
"""
Main function that runs everything.
"""
args = prep()
checksudo()
currentIP = getip()
if not args.cores:
thread_count = checkCores()
else:
thread_count = args.cores
getInput(currentIP, thread_count)
if __name__ == "__main__":
print(logo)
main()
| 2.546875 | 3 |
tests/test_init.py | Nagidal/starter | 0 | 12757629 | #!/usr/bin/env
import datetime
import starter
import pytest
import pathlib
import sys
@pytest.mark.skipif(pathlib.Path(sys.prefix) != pathlib.Path(r"C:\ProgramData\Anaconda3\envs\starter"), reason="Test only in native enironment")
def test_date():
starter_date = datetime.datetime.strptime(starter.__date__, "%Y-%m-%d")
today = datetime.datetime.today()
assert starter_date.year == today.year, "date.year in `__init__.py` is not current"
assert starter_date.month == today.month, "date.month in `__init__.py` is not current"
assert starter_date.day == today.day, "date.day in `__init__.py` is not current"
def test_author():
assert starter.__author__
assert isinstance(starter.__author__, str)
def test_author_email():
assert starter.__author_email__
assert isinstance(starter.__author_email__, str)
assert "@" in starter.__author_email__
| 2.4375 | 2 |
app/overview/choices/neudar.py | gellerjulia/harmonization-website | 1 | 12757630 | <gh_stars>1-10
from .neu import CAT_NEU_ANALYTES
from .dar import CAT_DAR_ANALYTES
CAT_NEUDAR_ANALYTES = [
('Analytes', tuple(set(CAT_NEU_ANALYTES[0][1]) & set(CAT_DAR_ANALYTES[0][1])))]
CAT_NEUDAR_MEMBER_C = (
('1', 'mother'), # maps to maternal
('2', 'father'),
('3', 'child'),
)
CAT_NEUDAR_TIME_PERIOD = (
('9', 'any'), # all time periods ploted together
# ('0', 'early enrollment'), # maps to 12G
# ('1', 'enrollment'), # maps to 24G
# ('3', 'week 36/delivery'), # maps to 6WP
)
ADDITIONAL_FEATURES = [('Categorical', (
('Outcome', 'Outcome'),
('Member_c', 'Family Member'),
# ('TimePeriod', 'Collection Time'),
('CohortType', 'Cohort Type'),
))]
NEUDAR_FEATURE_CHOICES = CAT_NEUDAR_ANALYTES + ADDITIONAL_FEATURES
NEUDAR_CATEGORICAL_CHOICES = ADDITIONAL_FEATURES
| 2.03125 | 2 |
tests/__init__.py | cia05rf/async-scrape | 0 | 12757631 | import os
import json
#Create env variables
with open("env.json", "r") as f:
envs = json.loads(f.read())
for env, var in envs.items():
os.environ[env] = var | 2.78125 | 3 |
SCTF/2018 Quals/tracer/solver.py | PurpEth/solved-hacking-problem | 1 | 12757632 | <gh_stars>1-10
'''
g = r0 * b
pub = r1 * g = r0 * r1 * b
ct0 = r2 * g = r0 * r2 * b
ct1 = pub * r2 + c = r0 * r1 * r2 * b + c
'''
import binascii
from fastecdsa.curve import P521
from fastecdsa.point import Point
with open('pc.log', 'r') as f:
content = f.read()
lines = content.split('\n')
start_index = [i for i, x in enumerate(lines) if x == "0000000000401ee8"]
end_index = [i for i, x in enumerate(lines) if x == "0000000000402018"]
g_x = 5405424750907042817849523452244787490362599682385950687385382709003948286406876796594535643748818283262121138737076141597966012285810985633370824005103944416
g_y = 984970155278863317776905647274559677791525657478616051760985477946504010716818161185200198096532903279219172158326801022992897407628359999389646296263358663
pub_x = 4892656645518573331106701586397878976390433610692116750215231364193992297525681417236426633145141081722252828121588677507009668068565040851265421535903327698
pub_y = 445589854414539227925716617203051677345304928733141270115246729820043468361633813613863577404936314503047208205373086044049612015283264631681675748037596649
ct0_x = 552855983191477065625173490798701617711704046550323641029043197505267412733020855489986706517083352349729506878848234582442903346393633912672334490115627032
ct0_y = 4448288254968185929975292935301106070977300148734716422986283428819999541940872803146014484885544656926366780738305965546200127900163602523408778848349228434
ct1_x = 1172894324150563774663811643608960517627766591027738626927811162713249354115380380370221946441154957962746746084983424309132270981703866563921333244571945068
ct1_y = 1326862342442789403618364073625262255428404701645852537809124740716613376400513445402466174855564161338377255072099047633392029230168238302039238121549772049
traced = []
for (line_start, line_end) in zip(start_index, end_index):
now = 0
add_cand = 1
count = 0
for i in range(line_start, line_end):
if lines[i] == "0000000000401f95":
count += 1
if lines[i+1] == "0000000000401f97":
# bit is set
now += add_cand
add_cand *= 2
traced.append(now)
assert traced[2] == traced[3]
base_point = Point(P521.gx, P521.gy, curve=P521)
g_point = base_point * traced[0]
assert g_x == g_point.x and g_y == g_point.y
ct0 = Point(ct0_x, ct0_y, curve=P521)
ct1 = Point(ct1_x, ct1_y, curve=P521)
flag = ct1 - (ct0 * traced[1])
print binascii.unhexlify('%x' % flag.x)
| 2.171875 | 2 |
pynab/interfaces/tv/tvmaze.py | SpookyDex/pynab | 0 | 12757633 | import pytvmaze
from pynab import log
import pynab.ids
TVMAZE_SEARCH_URL = ' http://api.tvmaze.com/search/shows'
NAME = 'TVMAZE'
def search(data):
"""
Search TVMaze for Show Info.
:param data: show data
:return: show details
"""
year = data.get('year')
country = data.get('country')
clean_name = pynab.ids.clean_name(data.get('name'))
log.debug('tvmaze: attempting to find "{}" online'.format(clean_name))
# code contributed by srob650 (https://github.com/srob650)
showname = ''
if year:
showname = clean_name[:-5]
if country:
showname = clean_name.split(country)[0].strip()
if not year or country:
showname = clean_name
maze_show = None
tvm = pytvmaze.TVMaze()
try:
maze_show = tvm.get_show(show_name=showname, show_year=year, show_country=country)
except Exception as e:
log.debug('tvmaze: exception: {}'.format(e))
if maze_show:
log.debug('tvmaze: returning show - {} with id - {}'.format(maze_show.name, maze_show.id))
return maze_show.id
else:
log.debug('tvmaze: No show found')
return None
| 2.890625 | 3 |
coltune_script.py | shijin-aws/ompi-collectives-tuning | 0 | 12757634 | #!/usr/bin/env python
# Copyright (c) 2020 Amazon.com, Inc. or its affiliates. All Rights
# reserved.
#
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
import os
import sys
imb_collectives = ["reduce_scatter_block"]
def main():
from os import system
from sys import argv
from common import Params
dir_path = os.path.dirname(os.path.realpath(__file__))
config = Params( argv[1] )
collective_list = config.getStrlst("collectives")
omb_path = config.getStr("omb_collective_directory")
imb_bin = config.getStr("imb_binary")
num_rank_list = config.getIntlst("number_of_ranks")
max_num_node = config.getInt("max_num_node")
num_core_per_node = config.getInt("number_of_cores_per_node")
num_run = config.getInt("number_of_runs_per_test")
job_directory = dir_path+"/collective_jobs"
for collective in collective_list:
params = Params( job_directory+"/"+collective+".job" )
if not os.path.exists(dir_path+"/output"):
os.makedirs(dir_path+"/output")
if not os.path.exists(dir_path+"/output/"+collective):
os.makedirs(dir_path+"/output/"+collective)
num_alg = params.getInt("number_of_algorithms")
exclude_alg = params.getIntlst("exclude_algorithms")
two_proc_alg = -1
try:
two_proc_alg = params.getInt("two_proc_alg")
except Exception as e:
print "No two proc algorithm for "+collective
f = open(dir_path+"/output/"+collective+"/"+collective+"_coltune.sh", "w")
print >> f, "#!/bin/sh"
print >> f, "#"
print >> f, "#$ -j y"
print >> f, "#$ -pe mpi %d" % (max_num_node * num_core_per_node)
print >> f, "#"
print >> f, "#$ -cwd"
print >> f, "#"
print >> f, "echo Got $NSOLTS processors."
print >> f, ""
for num_rank in num_rank_list:
for alg in range(num_alg+1):
if alg in exclude_alg or (alg == two_proc_alg and num_rank > 2):
continue
print >> f, "# ", alg, num_rank, "ranks"
for run_id in xrange(num_run):
if collective in imb_collectives:
prg_name = imb_bin+" -npmin %d %s " % (num_rank, collective)
else:
prg_name = omb_path+"/osu_"+collective
cmd = "mpirun --np %d " % (num_rank)
cmd += "--mca coll_tuned_use_dynamic_rules 1 --mca coll_tuned_"+collective+"_algorithm "+str(alg)
cmd += " " + prg_name
cmd += " >& " + dir_path+"/output/"+collective + "/" + str(alg) + "_" + str(num_rank) + "ranks" + "_run" + str(run_id) + ".out"
print >> f, cmd
print >> f, ""
f.close()
print "SGE script wrote to "+collective+"_coltune.sh successfully!"
if __name__ == "__main__":
main()
| 2.328125 | 2 |
tests/models/hyper_dt_regression_test.py | lixfz/DeepTables | 828 | 12757635 | # -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
import pandas as pd
from deeptables.models import DeepTable
from deeptables.models.hyper_dt import HyperDT, tiny_dt_space
from hypernets.core.callbacks import SummaryCallback, FileStorageLoggingCallback
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers import RandomSearcher
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from .. import homedir
class Test_HyperDT_Regression():
def test_boston(self):
print("Loading datasets...")
boston_dataset = load_boston()
df_train = pd.DataFrame(boston_dataset.data)
df_train.columns = boston_dataset.feature_names
self.y = pd.Series(boston_dataset.target)
self.X = df_train
self.X_train, \
self.X_test, \
self.y_train, \
self.y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=42)
rs = RandomSearcher(tiny_dt_space, optimize_direction=OptimizeDirection.Maximize, )
hdt = HyperDT(rs,
callbacks=[SummaryCallback(), FileStorageLoggingCallback(rs, output_dir=f'{homedir}/hyn_logs')],
reward_metric='RootMeanSquaredError',
dnn_params={
'hidden_units': ((256, 0, False), (256, 0, False)),
'dnn_activation': 'relu',
},
)
hdt.search(self.X_train, self.y_train, self.X_test, self.y_test, max_trials=3)
best_trial = hdt.get_best_trial()
estimator = hdt.final_train(best_trial.space_sample, self.X, self.y)
score = estimator.predict(self.X_test)
result = estimator.evaluate(self.X_test, self.y_test)
assert result
assert isinstance(estimator.model, DeepTable)
| 2.078125 | 2 |
discrete_A3C.py | lws803/pytorch-A3C | 0 | 12757636 | import torch
import torch.nn as nn
from utils import v_wrap, set_init, push_and_pull, record
import torch.nn.functional as F
import torch.multiprocessing as mp
from shared_adam import SharedAdam
import gym
import os
import argparse
import matplotlib.pyplot as plt
from simulations.cartpole_sim import Simulation
os.environ["OMP_NUM_THREADS"] = "1"
UPDATE_GLOBAL_ITER = 40
GAMMA = 0.9
MAX_EP = 3000
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true', help='run testing')
parser.add_argument('--model_path', type=str, default='models/model_discrete.pth', help='path to the model')
args = parser.parse_args()
class DiscreteNet(nn.Module):
def __init__(self, s_dim, a_dim):
super(DiscreteNet, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.pi1 = nn.Linear(s_dim, 200)
self.pi2 = nn.Linear(200, a_dim)
self.v1 = nn.Linear(s_dim, 100)
self.v2 = nn.Linear(100, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical
def forward(self, x):
pi1 = F.relu6(self.pi1(x))
logits = self.pi2(pi1)
v1 = F.relu6(self.v1(x))
values = self.v2(v1)
return logits, values
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
prob = F.softmax(logits, dim=1).data
m = self.distribution(prob)
return m.sample().numpy()[0]
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
probs = F.softmax(logits, dim=1)
m = self.distribution(probs)
exp_v = m.log_prob(a) * td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, name):
super(Worker, self).__init__()
self.name = 'w%i' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.env = Simulation()
self.lnet = DiscreteNet(self.env.state_space, self.env.action_space) # local network
def run(self):
total_step = 1
while self.g_ep.value < MAX_EP:
s = self.env.reset_env()
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0.
while True:
if self.name == 'w0':
# self.env.show()
pass
a = self.lnet.choose_action(v_wrap(s[None, :]))
s_, r, done, _ = self.env.move(a)
if done: r = -1
ep_r += r
buffer_a.append(a)
buffer_s.append(s)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
# sync
push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)
buffer_s, buffer_a, buffer_r = [], [], []
if done: # done and print information
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name)
break
s = s_
total_step += 1
self.res_queue.put(None)
def run_test (gnet, opt):
env = Simulation()
lnet = gnet
s = env.reset_env() # Reset the env
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
total_step = 1
while True:
env.show()
a = lnet.choose_action(v_wrap(s[None, :])) # Choose next action to perform, left or right by what magnitude
s_, r, done, _ = env.move(a) # Perform the action and record the state and rewards
# Also take the boolean of whether the sim is done
ep_r += r
buffer_a.append(a) # Buffer for action
buffer_s.append(s) # Buffer for state
buffer_r.append(r) # Buffer for rewards
if total_step % UPDATE_GLOBAL_ITER == 0 or done:
# TODO: Test if we really need the feedback training, maybe can remove this
push_and_pull(opt, lnet, gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)
buffer_s, buffer_a, buffer_r = [], [], []
if done:
print (total_step)
s = env.reset_env() # Reset the env
total_step = 0
s = s_ # Set current state to the new state caused by action
total_step += 1
if __name__ == "__main__":
sim = Simulation()
gnet = DiscreteNet(sim.state_space, sim.action_space) # global network
if args.test:
gnet.load_state_dict(torch.load(args.model_path)) # Load the previously trained network
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=0.0001) # global optimizer
global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()
if args.test:
run_test(gnet, opt)
else:
# parallel training
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
print ("Saving model...")
torch.save(gnet.state_dict(), args.model_path)
plt.plot(res)
plt.ylabel('Moving average ep reward')
plt.xlabel('Step')
plt.show()
| 2.34375 | 2 |
duplicates_parser/duplicates.py | LilianBoulard/utils | 2 | 12757637 | import os
from pathlib import Path
from argparse import ArgumentParser
import duplicates as du
_parser = ArgumentParser(
'Utility for parsing a directory, and finding duplicates.'
)
_parser.add_argument(
'-d', '--directory',
help='Directory to scan recursively. '
'Default is current directory',
type=str, required=True, default=[os.getcwd()], nargs=1,
)
_parser.add_argument(
'-l', '--memory_limit',
help='Memory limit for the parser. In bytes. Default is 2147483648 (2GB).',
type=int, required=False, default=[2147483648],
)
_parser.add_argument(
'--dashboard',
help='Creates a dashboard of the data. '
'Specify a file name to which HTML will be exported.',
type=str, required=False, nargs=1,
)
_args = _parser.parse_args()
_directory = _args.directory[0]
_limit = _args.memory_limit[0]
if _args.dashboard:
_generate_dashboard = True
_dashboard_output_file = Path(_args.dashboard[0]).resolve()
else:
_generate_dashboard = False
_dashboard_output_file = None
if __name__ == "__main__":
parser = du.DuplicateParser(_directory, _limit)
ext = du.Extractor(parser)
duplicates = ext.get_duplicates()
ext.clean_and_overwrite_dataframe()
df = ext.get_df()
if _generate_dashboard:
dashboard = du.Dashboard(df)
dashboard.generate(output_file=_dashboard_output_file, data=duplicates)
print('duplicates: ', duplicates)
| 3.046875 | 3 |
tests/flat_param_test.py | roypeters/spotlight | 9 | 12757638 | <gh_stars>1-10
from .validator_test import ValidatorTest
class FlatParamTest(ValidatorTest):
def setUp(self):
self.rules = {
"email": "email|max:4",
"password": "<PASSWORD>",
"nested1.field": "min:5",
"nested2.*": "min:5",
"nested3.*.field": "min:5",
"nested4.nested.field": "min:5",
}
self.data = {
"email": "this.is.not.a.valid.email",
"password": "<PASSWORD>",
"nested1": {"field": "test"},
"nested2": ["test", "valid", "test"],
"nested3": [{"field": "test"}, {"field": "valid"}, {"field": "test"}],
"nested4": {"nested": {"field": "test"}},
}
def test_flat_param_true_expect_list_of_errors(self):
errors = self.validator.validate(self.data, self.rules, flat=True)
self.assertEqual(isinstance(errors, list), True)
self.assertEqual(len(errors), 9)
def test_flat_param_false_expect_dict_of_errors(self):
errors = self.validator.validate(self.data, self.rules, flat=False)
self.assertEqual(isinstance(errors, dict), True)
self.assertEqual(len(errors), 8)
self.assertEqual(len(errors.get("email")), 2)
self.assertEqual(len(errors.get("password")), 1)
self.assertEqual(len(errors.get("nested1.field")), 1)
self.assertEqual(len(errors.get("nested2.0")), 1)
self.assertEqual(len(errors.get("nested2.2")), 1)
self.assertEqual(len(errors.get("nested3.0.field")), 1)
self.assertEqual(len(errors.get("nested3.2.field")), 1)
self.assertEqual(len(errors.get("nested4.nested.field")), 1)
| 2.75 | 3 |
tests/conftest.py | Australian-Text-Analytics-Platform/atap_widgets | 0 | 12757639 | from sys import stderr
import pandas as pd
import pytest
import spacy
from atap_widgets.conversation import Conversation
# Workaround for spacy models being difficult to install
# via pip
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
print(
"Downloading language model for spaCy\n"
"(don't worry, this will only happen once)",
file=stderr,
)
from spacy.cli import download
download("en_core_web_sm")
@pytest.fixture(scope="session")
def sherlock_holmes_five_sentences():
return """To <NAME> she is always the woman. I have seldom heard him
mention her under any other name. In his eyes she eclipses and predominates the
whole of her sex. It was not that he felt any emotion akin to love for <NAME>. All emotions, and that one particularly, were abhorrent to his cold,
precise but admirably balanced mind. """
@pytest.fixture(scope="session")
def basic_spacy_nlp():
return spacy.load("en_core_web_sm")
@pytest.fixture
def sherlock_holmes_doc(sherlock_holmes_five_sentences, basic_spacy_nlp):
return basic_spacy_nlp(sherlock_holmes_five_sentences)
@pytest.fixture
def sherlock_holmes_dummy_df(sherlock_holmes_doc):
"""
DataFrame, one row per sentence from the Sherlock Holmes example
"""
df = pd.DataFrame(
{
"text": [str(sentence) for sentence in sherlock_holmes_doc.sents],
"speaker": list("ABABA"),
}
)
return df
@pytest.fixture
def sherlock_holmes_dummy_conversation(sherlock_holmes_dummy_df):
"""
Treat each sentence from the Sherlock Holmes example as a turn
in a conversation, for checking contingency counts etc.
"""
return Conversation(sherlock_holmes_dummy_df)
@pytest.fixture
def sortable_text_df():
df = pd.DataFrame(
{
"text": ["The pen is red", "My pen is green", "Your pen is blue"],
"text_id": [1, 2, 3],
}
)
return df
| 2.8125 | 3 |
parsec/commands/cmd_tools.py | innovate-invent/parsec | 0 | 12757640 | <filename>parsec/commands/cmd_tools.py
import click
from parsec.commands.tools.get_tool_panel import cli as get_tool_panel
from parsec.commands.tools.get_tools import cli as get_tools
from parsec.commands.tools.install_dependencies import cli as install_dependencies
from parsec.commands.tools.paste_content import cli as paste_content
from parsec.commands.tools.put_url import cli as put_url
from parsec.commands.tools.run_tool import cli as run_tool
from parsec.commands.tools.show_tool import cli as show_tool
from parsec.commands.tools.upload_file import cli as upload_file
from parsec.commands.tools.upload_from_ftp import cli as upload_from_ftp
@click.group()
def cli():
pass
cli.add_command(get_tool_panel)
cli.add_command(get_tools)
cli.add_command(install_dependencies)
cli.add_command(paste_content)
cli.add_command(put_url)
cli.add_command(run_tool)
cli.add_command(show_tool)
cli.add_command(upload_file)
cli.add_command(upload_from_ftp)
| 2.046875 | 2 |
chat-bot/plugins/welcome.py | Bopperman/The-2-John-Bros | 43 | 12757641 | <filename>chat-bot/plugins/welcome.py
from plugin import Plugin
import logging
import discord
log = logging.getLogger('discord')
class Welcome(Plugin):
fancy_name = "Welcome"
async def on_member_join(self, member):
server = member.server
storage = await self.get_storage(server)
welcome_message = await storage.get('welcome_message')
welcome_message = welcome_message.replace(
"{server}",
server.name
).replace(
"{user}",
member.mention
)
channel_name = await storage.get('channel_name')
private = await storage.get('private')
if private:
destination = member
else:
destination = server
channel = discord.utils.find(lambda c: c.name == channel_name or
c.id == channel_name,
server.channels)
if channel is not None:
destination = channel
await self.mee6.send_message(destination, welcome_message)
async def on_member_remove(self, member):
server = member.server
storage = await self.get_storage(server)
gb_message = await storage.get('gb_message')
gb_enabled = (await storage.get('gb_disabled')) is None
if not gb_enabled:
return
if not gb_message:
return
gb_message = gb_message.replace(
"{server}",
server.name
).replace(
"{user}",
member.name
)
channel_name = await storage.get('channel_name')
destination = server
channel = discord.utils.find(lambda c: c.name == channel_name or
c.id == channel_name,
server.channels)
if channel is not None:
destination = channel
await self.mee6.send_message(destination, gb_message)
| 2.484375 | 2 |
django/bitcoin_monitor/celery.py | chanhosuh/bitcoin-monitor | 1 | 12757642 | <filename>django/bitcoin_monitor/celery.py<gh_stars>1-10
import os
import celery.signals
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bitcoin_monitor.settings')
app = Celery('bitcoin_monitor')
# Using a string here means the workers don't have to serialize
# the configuration object to child processes.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
"""
A task that dumps out its own request information.
To test the worker, do:
from bitcoin_monitor.celery import debug_task
debug_task.delay()
"""
print('Request: {0!r}'.format(self.request))
return True
@celery.signals.setup_logging.connect
def setup_logging(**kwargs):
"""
This dummy function is needed to make sure Celery
doesn't hijack our logger.
"""
pass
| 2.265625 | 2 |
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/merged_extractor_config.py | hongman/Recognizers-Text | 1 | 12757643 | <reponame>hongman/Recognizers-Text
from typing import List, Pattern
from recognizers_text.extractor import Extractor
from recognizers_text.utilities import RegExpUtility
from recognizers_number import FrenchIntegerExtractor
from ...resources.french_date_time import FrenchDateTime
from ..extractors import DateTimeExtractor
from ..base_merged import MergedExtractorConfiguration
from ..base_date import BaseDateExtractor
from ..base_time import BaseTimeExtractor
from ..base_duration import BaseDurationExtractor
from ..base_dateperiod import BaseDatePeriodExtractor
from ..base_timeperiod import BaseTimePeriodExtractor
from ..base_datetime import BaseDateTimeExtractor
from ..base_datetimeperiod import BaseDateTimePeriodExtractor
from ..base_set import BaseSetExtractor
from ..base_holiday import BaseHolidayExtractor
from .date_extractor_config import FrenchDateExtractorConfiguration
from .time_extractor_config import FrenchTimeExtractorConfiguration
from .duration_extractor_config import FrenchDurationExtractorConfiguration
from .dateperiod_extractor_config import FrenchDatePeriodExtractorConfiguration
from .timeperiod_extractor_config import FrenchTimePeriodExtractorConfiguration
from .datetime_extractor_config import FrenchDateTimeExtractorConfiguration
from .datetimeperiod_extractor_config import FrenchDateTimePeriodExtractorConfiguration
from .set_extractor_config import FrenchSetExtractorConfiguration
from .holiday_extractor_config import FrenchHolidayExtractorConfiguration
class FrenchMergedExtractorConfiguration(MergedExtractorConfiguration):
@property
def date_extractor(self) -> DateTimeExtractor:
return self._date_extractor
@property
def time_extractor(self) -> DateTimeExtractor:
return self._time_extractor
@property
def date_time_extractor(self) -> DateTimeExtractor:
return self._date_time_extractor
@property
def date_period_extractor(self) -> DateTimeExtractor:
return self._date_period_extractor
@property
def time_period_extractor(self) -> DateTimeExtractor:
return self._time_period_extractor
@property
def date_time_period_extractor(self) -> DateTimeExtractor:
return self._date_time_period_extractor
@property
def holiday_extractor(self) -> DateTimeExtractor:
return self._holiday_extractor
@property
def duration_extractor(self) -> DateTimeExtractor:
return self._duration_extractor
@property
def set_extractor(self) -> DateTimeExtractor:
return self._set_extractor
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def after_regex(self) -> Pattern:
return self._after_regex
@property
def before_regex(self) -> Pattern:
return self._before_regex
@property
def since_regex(self) -> Pattern:
return self._since_regex
@property
def from_to_regex(self) -> Pattern:
return self._from_to_regex
@property
def single_ambiguous_month_regex(self) -> Pattern:
return self._single_ambiguous_month_regex
@property
def preposition_suffix_regex(self) -> Pattern:
return self._preposition_suffix_regex
@property
def number_ending_pattern(self) -> Pattern:
return self._number_ending_pattern
@property
def filter_word_regex_list(self) -> List[Pattern]:
return self._filter_word_regex_list
def __init__(self):
self._before_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.BeforeRegex)
self._after_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.AfterRegex)
self._since_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.SinceRegex)
self._from_to_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.FromToRegex)
self._single_ambiguous_month_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.SingleAmbiguousMonthRegex)
self._preposition_suffix_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.PrepositionSuffixRegex)
self._number_ending_pattern = RegExpUtility.get_safe_reg_exp(FrenchDateTime.NumberEndingPattern)
self._date_extractor = BaseDateExtractor(FrenchDateExtractorConfiguration())
self._time_extractor = BaseTimeExtractor(FrenchTimeExtractorConfiguration())
self._date_time_extractor = BaseDateTimeExtractor(FrenchDateTimeExtractorConfiguration())
self._date_period_extractor = BaseDatePeriodExtractor(FrenchDatePeriodExtractorConfiguration())
self._time_period_extractor = BaseTimePeriodExtractor(FrenchTimePeriodExtractorConfiguration())
self._date_time_period_extractor = BaseDateTimePeriodExtractor(FrenchDateTimePeriodExtractorConfiguration())
self._duration_extractor = BaseDurationExtractor(FrenchDurationExtractorConfiguration())
self._set_extractor = BaseSetExtractor(FrenchSetExtractorConfiguration())
self._holiday_extractor = BaseHolidayExtractor(FrenchHolidayExtractorConfiguration())
self._integer_extractor = FrenchIntegerExtractor()
self._filter_word_regex_list = []
| 2.0625 | 2 |
tglib/tglib/clients/mysql_client.py | kkkkv/tgnms | 12 | 12757644 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import os
from typing import Any, Dict, Optional
import aiomysql
from aiomysql.sa import Engine, create_engine
from sqlalchemy import exc, select
from ..exceptions import (
ClientRestartError,
ClientRuntimeError,
ClientStoppedError,
ConfigError,
)
from .base_client import BaseClient
class MySQLClient(BaseClient):
"""A client for interacting with MySQL using :mod:`sqlalchemy`."""
_engine: Optional[Engine] = None
@classmethod
async def start(cls, config: Dict[str, Any]) -> None:
"""Initialize the MySQL connection pool.
Args:
config: Params and values for configuring the client.
Raises:
ClientRestartError: The MySQL connection pool has already been initialized.
ClientRuntimeError: The client failed to connect to the database.
ConfigError: The ``config`` argument is incorrect/incomplete.
"""
if cls._engine is not None:
raise ClientRestartError()
mysql_params = config.get("mysql")
required_params = ["host", "port"]
if mysql_params is None:
raise ConfigError("Missing required 'mysql' key")
if not isinstance(mysql_params, dict):
raise ConfigError("Value for 'mysql' is not an object")
if not all(param in mysql_params for param in required_params):
raise ConfigError(f"Missing one or more required params: {required_params}")
mysql_params.update(
{
"db": os.getenv("DB_NAME"),
"user": os.getenv("DB_USER"),
"password": <PASSWORD>("<PASSWORD>"),
}
)
try:
cls._engine = await create_engine(**mysql_params, pool_recycle=10)
except aiomysql.OperationalError as e:
raise ClientRuntimeError() from e
@classmethod
async def stop(cls) -> None:
"""Cleanly shut down the MySQL connection pool."""
if cls._engine is None:
raise ClientStoppedError()
cls._engine.close()
await cls._engine.wait_closed()
cls._engine = None
@classmethod
async def healthcheck(cls) -> bool:
"""Ping the database with a 'SELECT 1' query.
Returns:
True if the database connection is alive, False otherwise.
"""
if cls._engine is None:
return False
try:
async with cls._engine.acquire() as sa_conn:
await sa_conn.scalar(select([1]))
return True
except exc.DBAPIError:
return False
def lease(self):
"""Get a connection from the connection pool.
Attention:
This function **MUST** be used with an asynchronous context manager.
Example:
>>> from sqlalchemy import insert
>>> async with MySQLClient().lease() as sa_conn:
... query = insert(Table).values(name="test")
... await sa_conn.execute(query)
... await sa_conn.connection.commit()
"""
if self._engine is None:
raise ClientStoppedError()
return self._engine.acquire()
| 2.5625 | 3 |
bilanci_project/bilanci/settings/staging.py | DeppSRL/open_bilanci | 6 | 12757645 | <reponame>DeppSRL/open_bilanci
"""Production settings and globals."""
from base import *
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
MAIN_HOST = ['openbilanci.staging.deppsviluppo.org',]
# Allowed hosts expansion: needed for servizi ai Comuni
HOSTS_COMUNI = [
'novara.comuni.deppsviluppo.org',
'rapallo.comuni.deppsviluppo.org',
'castiglionedellestiviere.comuni.deppsviluppo.org',
'firenze.comuni.deppsviluppo.org',
'terni.comuni.deppsviluppo.org'
]
ALLOWED_HOSTS += MAIN_HOST + HOSTS_COMUNI
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
########## END EMAIL CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('192.168.3.11',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE_CLASSES
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
def show_toolbar(request):
print("IP Address for debug-toolbar: " + request.META['REMOTE_ADDR'])
return True
SHOW_TOOLBAR_CALLBACK = show_toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS=False
########## END TOOLBAR CONFIGURATION
BILANCI_PATH = "/home/open_bilanci/dati/bilanci_subset"
OUTPUT_FOLDER = '../scraper_project/scraper/output/'
LISTA_COMUNI = 'listacomuni.csv'
LISTA_COMUNI_PATH = OUTPUT_FOLDER + LISTA_COMUNI
PATH_PREVENTIVI = BILANCI_PATH+"/%s/%s/Preventivo/%s.html"
PATH_CONSUNTIVI = BILANCI_PATH+"/%s/%s/Consuntivo/%s.html"
BILANCI_RAW_DB = 'bilanci_raw'
| 1.710938 | 2 |
qtile/settings/mouse.py | pblcc/dotfiles | 0 | 12757646 | """
This module contains the python script that defines the configuration
of the mouse for my Qtile configuration.
For more information check: github.com/pablocorbalann/dotfiles/tree/main/qtile
"""
# Imports (just qtile)
from libqtile.config import Drag, Click
from libqtile.command import lazy
from settings.keys import mod
# We can define the mouse as a list of actions, formed by
# mods, buttons and results.
# TODO: implement a more consistent mouse functionallity level
mouse = [
Drag(
[mod],
"Button1",
lazy.window.set_position_floating(),
start=lazy.window.get_position()
),
Drag(
[mod],
"Button3",
lazy.window.set_size_floating(),
start=lazy.window.get_size()
),
Click([mod], "Button2", lazy.window.bring_to_front())
]
| 2.515625 | 3 |
venv/Lib/site-packages/PyInstaller/hooks/rthooks/pyi_rth_inspect.py | Gabs-Leo/Kay-O.Time.Limiter | 10 | 12757647 | #-----------------------------------------------------------------------------
# Copyright (c) 2021, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
import sys
import os
import inspect
_orig_inspect_getsourcefile = inspect.getsourcefile
# Provide custom implementation of inspect.getsourcefile() for frozen
# applications that properly resolves relative filenames obtained from
# object (e.g., inspect stack-frames). See #5963.
def _pyi_getsourcefile(object):
filename = inspect.getfile(object)
if not os.path.isabs(filename):
# Check if given filename matches the basename of __main__'s
# __file__
main_file = sys.modules['__main__'].__file__
if filename == os.path.basename(main_file):
return main_file
# If filename ends with .py suffix and does not correspond to
# frozen entry-point script, convert it to corresponding .pyc
# in sys._MEIPASS
if filename.endswith('.py'):
filename = os.path.normpath(
os.path.join(sys._MEIPASS, filename + 'c'))
# Ensure the relative path did not try to jump out of
# sys._MEIPASS, just in case...
if filename.startswith(sys._MEIPASS):
return filename
elif filename.startswith(sys._MEIPASS) and filename.endswith('.pyc'):
# If filename is already PyInstaller-compatible, prevent any
# further processing (i.e., with original implementation)
return filename
# Use original implementation as a fallback
return _orig_inspect_getsourcefile(object)
inspect.getsourcefile = _pyi_getsourcefile
| 2.015625 | 2 |
evaluation/sliding_window.py | Awannaphasch2016/tgn | 0 | 12757648 | <reponame>Awannaphasch2016/tgn
#!/usr/bin/env python3
#
import math
import pandas as pd
import numpy as np
import time
import pickle
import torch
from sklearn.metrics import average_precision_score, roc_auc_score
from utils.utils import EarlyStopMonitor, get_neighbor_finder, compute_xf_iwf, compute_nf, get_nf_iwf, add_only_new_values_of_new_window_to_dict, compute_share_selected_random_weight_per_window, get_share_selected_random_weight_per_window, EF_IWF, NF_IWF, SHARE_SELECTED_RANDOM_WEIGHT, get_conditions, apply_off_set_ind, convert_n_instances_to_ind, convert_ind_to_n_instances, right_split_ind_by_window, convert_window_idx_to_batch_idx, convert_window_idx_to_batch_idx, split_a_window_into_batches, convert_prob_list_to_binary_list, get_conditions_node_classification, label_new_unique_nodes_with_budget, pred_prob_to_pred_labels,get_batch_idx_relative_to_window_idx, get_encoder, convert_to_onehot,get_label_distribution, get_unique_nodes_labels, right_split_ind_by_batch, NF, EF
from utils.sampler import RandEdgeSampler, EdgeSampler_NF_IWF
from utils.data_processing import Data
from tqdm import tqdm
import random
# from modules.sliding_window_framework import WindowSlidingForward, WindowSlidingEnsemble
from pathlib import Path
from evaluation.evaluation import get_sampler, get_negative_nodes_batch, init_pos_neg_labels, get_criterion, compute_edges_probabilities_with_custom_sampled_nodes, get_edges_weight, compute_loss, compute_evaluation_score, compute_precision, compute_auc_for_ensemble
from evaluation.eval_node_classification import select_decoder_and_loss_node_classification, get_nodes_weight, my_eval_node_classification
from sklearn.metrics import average_precision_score, roc_auc_score, confusion_matrix
from modules.ensemble import get_all_ensemble_training_data_inds
class SlidingWindow:
def __init__(self, args):
self.args = args
def set_sliding_window_params(self, keep_last_n_window=float('inf')):
self.keep_last_n_window = keep_last_n_window
def set_run_idx(self, run_idx):
self.run_idx = run_idx
def set_ws_idx(self, ws_idx):
self.ws_idx = ws_idx
def set_epoch_idx(self, epoch_idx):
self.epoch_idx = epoch_idx
def set_batch_idx(self, batch_idx):
self.batch_idx = batch_idx
def set_decoder(self):
raise NotImplementedError()
# def set_model(self, ModelClass,
def set_encoder(self, ModelClass,
neighbor_finder, node_features, edge_features, device, n_layers=2,
n_heads=2, dropout=0.1, use_memory=False,
memory_update_at_start=True, message_dimension=100,
memory_dimension=500, embedding_module_type="graph_attention",
message_function="mlp",
mean_time_shift_src=0, std_time_shift_src=1, mean_time_shift_dst=0,
std_time_shift_dst=1, n_neighbors=None, aggregator_type="last",
memory_updater_type="gru",
use_destination_embedding_in_message=False,
use_source_embedding_in_message=False,
dyrep=False):
"""
NOTE: later on, I may need to implement this function using discousure property of python.
"""
self.model_kwargs = {
"neighbor_finder":neighbor_finder,
"node_features":node_features,
"edge_features":edge_features,
"device":device,
"n_layers":n_layers,
"n_heads":n_heads,
"dropout":dropout,
"use_memory":use_memory,
"message_dimension":message_dimension,
"memory_dimension":memory_dimension,
"memory_update_at_start":memory_update_at_start,
"embedding_module_type":embedding_module_type,
"message_function":message_function,
"aggregator_type":aggregator_type,
"memory_updater_type":memory_updater_type,
"n_neighbors":n_neighbors,
"mean_time_shift_src":mean_time_shift_src,
"std_time_shift_src":std_time_shift_src,
"mean_time_shift_dst":mean_time_shift_dst,
"std_time_shift_dst":std_time_shift_dst,
"use_destination_embedding_in_message":use_destination_embedding_in_message,
"use_source_embedding_in_message":use_source_embedding_in_message,
"dyrep":dyrep
}
self.ModelClass = ModelClass
def save_checkpoint_per_ws(self):
if self.args.save_checkpoint:
torch.save(self.model.state_dict(), self.check_point.get_checkpoint_path())
def init_params_that_tracks_history_of_ws_node_classification(self):
self.selected_sources_to_label = []
self.selected_sources_ind = None
def init_params_that_tracks_history_of_ws(self):
self.ef_iwf_window_dict = EF_IWF()
self.nf_iwf_window_dict = NF_IWF()
self.ef_window_dict = EF()
self.nf_window_dict = NF()
self.share_selected_random_weight_per_window_dict = SHARE_SELECTED_RANDOM_WEIGHT()
def get_conditions(self):
raise NotImplementedError()
# def add_dataset(self, full_data):
# self.full_data = full_data
def add_dataset(self, full_data, node_features, edge_features, train_data, val_data, test_data):
self.full_data = full_data
self.node_features = node_features
self.train_data = train_data
self.val_data = val_data
self.test_data = test_data
def add_data(self, data_transformed_collection):
self.data_transformed_collection = data_transformed_collection
self.add_dataset(
self.data_transformed_collection.full_data,
self.data_transformed_collection.node_features,
self.data_transformed_collection.edge_features,
self.data_transformed_collection.train_data,
self.data_transformed_collection.val_data,
self.data_transformed_collection.test_data)
def add_hardware_params(self, device):
self.device = device
def add_checkpoints(self, check_point):
self.check_point = check_point
def add_observers(self):
self.add_weight_observer()
self.add_ws_observer()
self.add_batch_observer()
self.add_epoch_observer()
self.add_performance_observer()
self.add_run_observer()
self.add_ensemble_observer()
self.add_ensemble_prediction_observer()
def add_ensemble_prediction_observer(self):
self.ensemble_prediction_observer = {}
def add_ensemble_observer(self):
self.ensemble_observer = {}
self.ensemble_observer_1 = {}
def add_performance_observer(self):
self.performance_observer = {}
def add_weight_observer(self):
self.weight_observer = {}
def add_run_observer(self):
self.run_observer = {}
self.run_observer_1 = {}
def add_ws_observer(self):
self.ws_observer = {}
self.ws_observer_1 = {}
def add_batch_observer(self):
self.batch_observer = {}
self.batch_observer_1 = {}
def add_epoch_observer(self):
self.epoch_observer = {}
self.epoch_observer_1 = {}
def add_loggers_class(self, l_1, l_2, l_3):
self.l_1 = l_1
self.l_2 = l_2
self.l_3 = l_3
self.add_loggers(l_1.logger, l_2.logger, l_3.logger)
def add_loggers(self, logger, logger_2, logger_3):
self.logger = logger
self.logger_2 = logger_2
self.logger_3 = logger_3
def add_model_training_params(self, n_epoch):
self.n_epoch = n_epoch
def add_model_params(self, num_neighbors, use_memory):
self.num_neighbors = num_neighbors
self.use_memory = use_memory
def add_model(self):
raise NotImplementedError
def pre_evaluation(self):
raise NotImplementedError
# def get_init_windows_size(self, window_size, keep_last_n_window_as_window_slides, window_idx_to_start_with=None):
def get_init_windows_size(self, window_size, window_idx_to_start_with=None):
# def get_init_windows_size(self, window_size):
init_windows_size = window_size
if window_idx_to_start_with is not None:
assert window_idx_to_start_with * window_size >= init_windows_size
init_windows_size = window_idx_to_start_with * window_size
# else:
# if keep_last_n_window_as_window_slides is not None:
# init_windows_size = window_size * keep_last_n_window_as_window_slides
# # else:
# # init_windows_size = window_size
return init_windows_size
# def get_window_slide_stride(self, window_size, keep_last_n_window_as_window_slides ):
def get_window_slide_stride(self, window_size, window_stride_multiplier=1):
# return self.get_init_windows_size(window_size, keep_last_n_window_as_window_slides)
return window_size * window_stride_multiplier
def get_sliding_window_params(self, num_instance, batch_size, ws_multiplier,keep_last_n_window_as_window_slides, window_stride_multiplier, window_idx_to_start_with=None, init_n_instances_as_multiple_of_ws=None):
### Batch params
# raise NotImplementedError
# if first_batch_idx is not None:
# first_batch = (first_batch_idx + 1) * batch_size
# assert first_batch < num_instance
# num_instance = num_instance - first_batch
# if last_batch_idx is not None:
# last_batch = (last_batch_idx + 1) * batch_size
# assert last_batch < num_instance
# num_instance = num_instance - last_batch
# begin_ind, idx_to_split, _ = right_split_ind_by_window(1, self.full_data.data_size, self.window_size)
begin_ind, idx_to_split, _ = right_split_ind_by_batch(1, self.full_data.data_size, self.args.bs)
# self.window_begin_inds, self.window_end_inds = get_all_ensemble_training_data_inds(begin_ind, idx_to_split-1, self.window_size, fix_begin_ind=True)
batch_inds = split_a_window_into_batches(begin_ind, idx_to_split- 1, self.args.bs)
batch_begin_inds = batch_inds[:-1]
batch_end_inds = batch_inds[1:]
### Window params
window_size = batch_size * ws_multiplier
num_init_data = window_size
init_train_data = self.get_init_windows_size(window_size, window_idx_to_start_with=self.args.window_idx_to_start_with) # default init_train_data.
# init_train_data = window_size # :NOTE: Bad naming, but I keep this for compatibility reason.
num_instances_shift = self.get_window_slide_stride(window_size)
total_num_ws = math.ceil(num_instance/num_instances_shift) # 6
init_num_ws = int(init_train_data/num_instances_shift)
left_num_ws = total_num_ws - init_num_ws # DEPRECATED:
# left_num_ws = total_num_ws - init_num_ws + 1 # NOTE: this may cause error.
# ws_idx_to_run = list(range(init_num_ws, total_num_ws))
ws_idx_pair = self.get_ws_idx_pair(init_train_data,window_size, keep_last_n_window_as_window_slides, window_idx_to_start_with=window_idx_to_start_with)
# ws_idx_pair_list = [ws_idx_pair]
ws_idx_pair_to_run = []
ws_idx_begin = ws_idx_pair[0]
ws_idx_end = ws_idx_pair[1]
while ws_idx_end <= num_instance - window_size: # only include training data.
ws_idx_pair_to_run.append((ws_idx_begin, ws_idx_end))
ws_idx_begin += num_instances_shift
ws_idx_end += num_instances_shift
# get init number of instances
init_n_instances = None
if init_n_instances_as_multiple_of_ws is not None:
init_n_instances = init_n_instances_as_multiple_of_ws * window_size
return window_size, num_init_data, num_instances_shift, init_train_data, total_num_ws, init_num_ws, left_num_ws, batch_inds, batch_begin_inds, batch_end_inds, ws_idx_pair_to_run, init_n_instances
def set_sliding_window_framework(self, ws_framework):
if ws_framework == "forward":
self.WSFramework = WindowSlidingForward
elif ws_framework == "ensemble":
self.WSFramework = WindowSlidingEnsemble
else:
raise NotImplementedError()
def set_begin_end_batch_idx(self, size_of_current_concat_windows, batch_idx, batch_size, batch_ref_window_size):
"""
:NOTE: at the time of writing, I only care about begin and end batch.
"""
assert batch_ref_window_size == 0, "I don't use this param as of now."
assert batch_ref_window_size < batch_size
start_train_idx = batch_idx * batch_size
batch_ref_window_size = 0
assert batch_ref_window_size < batch_size
end_train_idx = min(size_of_current_concat_windows-batch_ref_window_size, start_train_idx + batch_size)
end_train_idx = min(end_train_idx, self.full_data.data_size - batch_ref_window_size) # edge case for hard sampling window.
assert start_train_idx < end_train_idx, "number of batch to run for each epoch was not set correctly."
end_train_hard_negative_idx = end_train_idx + batch_ref_window_size
assert end_train_hard_negative_idx <= size_of_current_concat_windows
if end_train_idx <= (self.full_data.data_size - batch_ref_window_size):
assert (end_train_hard_negative_idx - batch_ref_window_size) == end_train_idx
return start_train_idx, end_train_idx
# def set_params_window(self, start_idx, end_idx):
# self.set_params_batch(start_idx, end_idx)
def set_params_mask(self, left, right ):
raise NotImplementedError()
def set_params_batch(self, start_train_idx, end_train_idx):
sources_batch, destinations_batch = self.full_data.sources[start_train_idx:end_train_idx], \
self.full_data.destinations[start_train_idx:end_train_idx]
edge_idxs_batch = self.full_data.edge_idxs[start_train_idx: end_train_idx]
timestamps_batch = self.full_data.timestamps[start_train_idx:end_train_idx]
labels_batch = self.full_data.labels[start_train_idx:end_train_idx]
# edge_hard_samples_batch = full_data.edge_idxs[start_train_idx:end_train_hard_negative_idx]
# timestamps_hard_samples_batch = full_data.timestamps[start_train_idx:end_train_hard_negative_idx]
return sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, labels_batch
def evaluate_batch(self):
raise NotImplementedError
# def evaluate_epoch(self, epoch, num_batch):
def evaluate_epoch(self):
raise NotImplementedError()
def evaluate_ws(self):
raise NotImplementedError()
def evaluate(self):
raise NotImplementedError
class WindowSlidingForward(SlidingWindow):
def observer_collect_idx_per_batch(self):
self.run_observer.setdefault('run_idx', []).append(self.run_idx)
self.epoch_observer.setdefault('epoch_idx', []).append(self.epoch_idx)
self.batch_observer.setdefault('batch_idx', []).append(self.batch_idx)
self.ws_observer.setdefault('ws_idx', []).append(self.ws_idx)
def observer_collect_idx_per_epoch(self):
self.run_observer_1.setdefault('run_idx', []).append(self.run_idx)
self.epoch_observer_1.setdefault('epoch_idx', []).append(self.epoch_idx)
self.batch_observer_1.setdefault('batch_idx', []).append(self.batch_idx)
self.ws_observer_1.setdefault('ws_idx', []).append(self.ws_idx)
def save_performance_observer(self):
performance_observer_path = Path(self.l_2.log_relative_path)/ f'{self.l_2.log_time}'
# weight_observer_file_name = '{}_run={}_ws={}_epoch={}_batch={}.csv'
performance_observer_file_name = 'performance_observer.pickle'
Path(performance_observer_path).mkdir(parents=True, exist_ok=True)
observer = {}
observer.update(self.run_observer_1)
observer.update(self.epoch_observer_1)
observer.update(self.batch_observer_1)
observer.update(self.performance_observer)
observer.update(self.ws_observer_1)
# observer.update(self.ws_observer)
pd.DataFrame.from_dict(observer).to_pickle( str(performance_observer_path /performance_observer_file_name))
def get_conditions(self):
# :TODO: write test on these. raise exception for all cases that wasn't intended or designed for.
self.prefix, self.neg_sample_method, self.neg_edges_formation, self.weighted_loss_method, self.compute_xf_iwf_with_sigmoid = get_conditions(self.args)
def set_decoder(self):
"""
:NOTE: I am aware taht optimizer and criterion doesn't belong to decoder only, but it needs somewhere to belong, and here it is.
"""
# select_decoder_and_loss
# NOTE: For link prediction, decoder is defined in TGN as self.affinity_score which is used in compute_edge_probabilities
decoder = None
criterion = get_criterion()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
return decoder, optimizer, criterion
def add_model(self):
self.model = self.ModelClass(**self.model_kwargs)
self.decoder, self.optimizer, self.criterion = self.set_decoder()
# self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.model.to(self.device)
def pre_evaluation(self):
self.window_size, self.num_init_data,self.num_instances_shift, self.init_train_data, self.total_num_ws, self.init_num_ws, self.left_num_ws, self.batch_inds, self.batch_begin_inds, self.batch_end_inds, self.ws_idx_pair_to_run, self.init_n_instances = self.get_sliding_window_params(self.full_data.data_size, self.args.bs, self.args.ws_multiplier, self.args.keep_last_n_window_as_window_slides, self.args.window_stride_multiplier, self.args.init_n_instances_as_multiple_of_ws)
self.add_model()
def set_params_mask(self, start_idx, end_idx ):
# :TODO: :BUG: raise NotImplementedError() # cant use masking because np.unique(full_data.timestamps).shape[0] != full_data.timestamps.shape[0]
full_data = self.full_data
# train_mask = full_data.timestamps < full_data.timestamps[end_train_idx]
left = full_data.timestamps < full_data.timestamps[end_idx]
right = full_data.timestamps[0] <= full_data.timestamps
mask = np.logical_and(right, left)
return mask
def observer_collect_idx(self):
self.run_observer.setdefault('run_idx', []).append(self.run_idx)
self.ws_observer.setdefault('ws_idx', []).append(self.ws_idx)
self.epoch_observer.setdefault('epoch_idx', []).append(self.epoch_idx)
self.batch_observer.setdefault('batch_idx', []).append(self.batch_idx)
def observer_collect_edges_weight(self, pos_edges_weight, neg_edges_weight):
if pos_edges_weight is not None:
# self.weight_observer.setdefault('pos_edges_weight', []).append(pos_edges_weight.numpy())
self.weight_observer.setdefault('pos_edges_weight', []).append(pos_edges_weight.cpu().detach().numpy().tolist())
else:
self.weight_observer['pos_edges_weight'] = None
if neg_edges_weight is not None:
# self.weight_observer.setdefault('neg_edges_weight', []).append(neg_edges_weight.numpy())
self.weight_observer.setdefault('neg_edges_weight', []).append(neg_edges_weight.cpu().detach().numpy().tolist())
else:
self.weight_observer['neg_edges_weight'] = None
weight_observer_path = Path(self.l_2.log_relative_path)/ f'{self.l_2.log_time}'
# weight_observer_file_name = '{}_run={}_ws={}_epoch={}_batch={}.csv'
weight_observer_file_name = 'weight_observer.pickle'
Path(weight_observer_path).mkdir(parents=True, exist_ok=True)
observer = {}
observer.update(self.run_observer)
observer.update(self.ws_observer)
observer.update(self.epoch_observer)
observer.update(self.batch_observer)
if self.weight_observer['pos_edges_weight'] is not None:
weight_name = 'pos_edges_weight'
# pd.DataFrame.from_dict(self.weight_observer[weight_name]).to_csv( str(weight_observer_path /weight_observer_file_name.format(weight_name, self.run_idx, self.ws_idx, self.epoch_idx, self.batch_idx)))
observer.update({weight_name: self.weight_observer[weight_name]})
pd.DataFrame.from_dict(observer).to_pickle( str(weight_observer_path /weight_observer_file_name))
if self.weight_observer['neg_edges_weight'] is not None:
weight_name = 'neg_edges_weight'
# pd.DataFrame.from_dict(self.weight_observer[weight_name]).to_csv( str(weight_observer_path /weight_observer_file_name.format(weight_name, self.run_idx, self.ws_idx, self.epoch_idx, self.batch_idx)))
observer.update({weight_name: self.weight_observer[weight_name]})
pd.DataFrame.from_dict(observer).to_pickle( str(weight_observer_path /weight_observer_file_name))
def evaluate_batch(self, model, k, backprop_every):
args = self.args
criterion = self.criterion
optimizer = self.optimizer
full_data = self.full_data
# init_train_data = self.init_train_data
NUM_NEIGHBORS = self.num_neighbors
neg_sample_method = self.neg_sample_method
neg_edges_formation = self.neg_edges_formation
device = self.device
weighted_loss_method = self.weighted_loss_method
compute_xf_iwf_with_sigmoid = self.compute_xf_iwf_with_sigmoid
num_instance = full_data.data_size
max_weight = args.max_random_weight_range
BATCH_SIZE = args.bs
USE_MEMORY = args.use_memory
ef_iwf_window_dict = self.ef_iwf_window_dict.dict_
nf_iwf_window_dict = self.nf_iwf_window_dict.dict_
ef_window_dict = self.ef_window_dict.dict_
nf_window_dict = self.nf_window_dict.dict_
share_selected_random_weight_per_window_dict = self.share_selected_random_weight_per_window_dict.dict_
loss = 0
optimizer.zero_grad()
# Custom loop to allow to perform backpropagation only every a certain number of batches
for j in range(backprop_every):
# logger.debug('----backprop_every = {}'.format(j))
batch_idx = k + j
# print(f'batch_idx={batch_idx}')
batch_ref_window_size = 0 # :NOTE: added for compatibility reason
## Indexing
start_train_idx = self.batch_begin_inds[batch_idx]
end_train_idx = self.batch_end_inds[batch_idx]
# start_train_idx, end_train_idx = self.set_begin_end_batch_idx(self.init_train_data,batch_idx, BATCH_SIZE, batch_ref_window_size)
end_train_hard_negative_idx = end_train_idx # :NOTE: added for compatibility reason.
## Masking
train_mask = self.set_params_mask(start_train_idx, end_train_idx)
# Initialize training neighbor finder to retrieve temporal graph
train_data = Data(full_data.sources[train_mask],
full_data.destinations[train_mask],
full_data.timestamps[train_mask],
full_data.edge_idxs[train_mask],
full_data.labels[train_mask])
# print(train_data.n_interactions, train_data.n_unique_nodes)
train_ngh_finder = get_neighbor_finder(train_data, args.uniform)
model.set_neighbor_finder(train_ngh_finder)
sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, _ = self.set_params_batch(start_train_idx, end_train_idx)
## Sampler
size = len(sources_batch)
train_rand_sampler = get_sampler(train_data, batch_ref_window_size, start_train_idx, end_train_hard_negative_idx, neg_sample_method)
negatives_src_batch, negatives_dst_batch = get_negative_nodes_batch(train_rand_sampler, BATCH_SIZE, size, neg_sample_method)
pos_label, neg_label = init_pos_neg_labels(size, device)
# criterion = get_criterion()
# self.logger_3.info(f'')
# TODO: this function should be in tgn
model, pos_prob, neg_prob = compute_edges_probabilities_with_custom_sampled_nodes(model, neg_edges_formation, negatives_dst_batch, negatives_src_batch, sources_batch, destinations_batch, timestamps_batch, edge_idxs_batch, NUM_NEIGHBORS)
pos_edges_weight, neg_edges_weight = get_edges_weight(train_data,k, BATCH_SIZE,max_weight,start_train_idx, end_train_hard_negative_idx, ef_iwf_window_dict, nf_iwf_window_dict, ef_window_dict, nf_window_dict, share_selected_random_weight_per_window_dict, weighted_loss_method, sampled_nodes=negatives_src_batch, compute_xf_iwf_with_sigmoid=compute_xf_iwf_with_sigmoid, edge_weight_multiplier=args.edge_weight_multiplier, use_time_decay=args.use_time_decay, time_diffs = model.time_diffs_raw.cpu().data.numpy())
## assign the correct device to data
if pos_edges_weight is not None:
pos_edges_weight = pos_edges_weight.to(self.device)
self.observer_collect_idx_per_batch()
self.logger_2.info(f'pos_edges_weight = {pos_edges_weight}')
self.logger_2.info(f'neg_edges_weight = {neg_edges_weight}')
self.observer_collect_edges_weight(pos_edges_weight, neg_edges_weight)
loss = compute_loss(pos_label, neg_label, pos_prob, neg_prob, pos_edges_weight, neg_edges_weight, batch_idx, criterion, loss, weighted_loss_method)
loss /= backprop_every
loss.backward()
optimizer.step()
self.m_loss.append(loss.item())
# Detach memory after 'args.backprop_every' number of batches so we don't backpropagate to
# the start of time
if USE_MEMORY:
model.memory.detach_memory()
self.start_train_idx = start_train_idx
self.end_train_idx = end_train_idx
self.train_rand_sampler = train_rand_sampler
def evaluate_epoch(self, model, epoch, num_batch_pair):
"""
params
-------
num_batch_pair
"""
args = self.args
full_data = self.full_data
# init_train_data = self.init_train_data
NUM_NEIGHBORS = self.num_neighbors
BATCH_SIZE = args.bs
USE_MEMORY = args.use_memory
start_epoch = time.time()
### Training :DOC:
# Reinitialize memory of the model at the start of each epoch
if USE_MEMORY:
model.memory.__init_memory__()
# for k in range(0, num_batch, args.backprop_every):
for k in range(num_batch_pair[0], num_batch_pair[1], args.backprop_every):
self.logger.info('training batch contain idx from {} to {}'.format(k * BATCH_SIZE, (k+1) * BATCH_SIZE))
self.logger.info('batch = {}'.format(k))
self.logger.debug('batch = {}'.format(k))
self.set_batch_idx(k)
# self.batch_list.append()
self.evaluate_batch(model, k, args.backprop_every)
start_train_idx = self.start_train_idx
end_train_idx = self.end_train_idx
train_rand_sampler = self.train_rand_sampler
# if self.weight_observer['pos_edges_weight'] is not None:
# self.weight_observer['pos_edges_weight'] = []
# if self.weight_observer['neg_edges_weight'] is not None:
# self.weight_observer['neg_edges_weight'] = []
epoch_time = time.time() - start_epoch
self.epoch_times.append(epoch_time)
self.logger.info('start validation...')
# Initialize validation and test neighbor finder to retrieve temporal graph
full_ngh_finder = get_neighbor_finder(full_data, args.uniform)
### Validation
# Validation uses the full graph
model.set_neighbor_finder(full_ngh_finder)
if USE_MEMORY:
# Backup memory at the end of training, so later we can restore it and use it for the
# validation on unseen nodes
train_memory_backup = model.memory.backup_memory()
# VAL_BATCH_SIZE = BATCH_SIZE * 1
VAL_BATCH_SIZE = self.window_size # NOTE: only model with same window_size can be compared.
# VAL_BATCH_SIZE = self.num_instances_shift
# self.logger.info('prediction contain idx from {} to {}'.format(end_train_idx, end_train_idx + VAL_BATCH_SIZE))
end_train_idx = min(end_train_idx, self.full_data.n_interactions-1)
self.logger.info('prediction contain idx from {} to {}'.format(end_train_idx, end_train_idx + VAL_BATCH_SIZE))
sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, _ = self.set_params_batch(end_train_idx, end_train_idx + VAL_BATCH_SIZE)
size = len(sources_batch)
_, negative_samples = train_rand_sampler.sample(size)
pos_prob, neg_prob = model.compute_edge_probabilities(sources_batch, destinations_batch,
negative_samples, timestamps_batch,
edge_idxs_batch, NUM_NEIGHBORS)
pred_score = np.concatenate([pos_prob.cpu().detach().numpy(), neg_prob.cpu().detach().numpy()])
true_label = np.concatenate([np.ones(size), np.zeros(size)])
val_auc, val_ap = compute_evaluation_score(true_label,pred_score)
if USE_MEMORY:
val_memory_backup = model.memory.backup_memory()
# Restore memory we had at the end of training to be used when validating on new nodes.
# Also backup memory after validation so it can be used for testing (since test edges are
# strictly later in time than validation edges)
model.memory.restore_memory(train_memory_backup)
total_epoch_time = time.time() - start_epoch
self.total_epoch_times.append(total_epoch_time)
mean_loss = np.mean(self.m_loss)
self.performance_observer.setdefault('Mean Loss', []).append(mean_loss)
self.logger.info('epoch: {} took {:.2f}s'.format(epoch, total_epoch_time))
self.logger.info('Epoch mean loss: {}'.format(mean_loss))
self.logger.info(
'val auc: {}'.format(val_ap))
self.performance_observer.setdefault('AUC', []).append(val_ap)
self.logger.info(
'val ap: {}'.format(val_auc))
self.performance_observer.setdefault('Absolute Precision', []).append(val_auc)
self.observer_collect_idx_per_epoch()
self.save_performance_observer()
def get_idx_pair_of_current_concat_windows(self, last_idx_of_current_window, window_stride):
return (last_idx_of_current_window - window_stride), last_idx_of_current_window
def evaluate(self):
# raise NotImplementedError()
args = self.args
self.check_point.data = args.data
self.check_point.prefix = self.prefix
self.check_point.bs = args.bs
self.check_point.epoch_max = args.n_epoch
self.check_point.ws_max = self.total_num_ws
self.check_point.max_random_weight_range = args.max_random_weight_range
# ws_idx_begin, ws_idx_end = self.get_ws_idx_pair()
# for ws in range(self.left_num_ws):
# for ws in self.ws_idx_to_run:
for ws_idx_begin, ws_idx_end in self.ws_idx_pair_to_run:
assert int(ws_idx_begin/self.window_size) == ws_idx_begin/self.window_size
ws = int(ws_idx_end/self.window_size) - 1
self.check_point.ws_idx = ws
# self.logger.debug('-ws = {}'.format(ws))
self.logger.debug('-ws = {}'.format(ws))
# self.logger.info('ws contain idx from {} to {}'.format(self.init_train_data, self.init_train_data + self.num_instances_shift))
# self.logger.info('training ws contain idx from {} to {}'.format(self.init_train_data - self.num_instances_shift, self.init_train_data))
self.logger.info('training ws contain idx from {} to {}'.format(ws_idx_begin, ws_idx_end))
self.logger_2.info('-ws = {}'.format(ws))
self.set_ws_idx(ws)
# self.evaluate_ws(ws, self.init_train_data, self.args.bs)
# idx_pair_of_current_concat_windows = self.get_idx_pair_of_current_concat_windows(self.init_train_data, self.num_instances_shift)
# self.evaluate_ws(ws, idx_pair_of_current_concat_windows, self.args.bs)
self.evaluate_ws((ws_idx_begin, ws_idx_end), self.args.bs)
# def get_num_batch_idx_pair(self, window_idx_pair,batch_size, keep_last_n_window_as_window_slides, first_batch_idx=None, last_batch_idx=None):
def get_ws_idx_pair(self, init_ws_end_idx, window_size, keep_last_n_window_as_window_slides=None, window_idx_to_start_with=None):
assert init_ws_end_idx/window_size == int(init_ws_end_idx/window_size)
ws_idx_begin = 0
ws_idx_end = init_ws_end_idx
if window_idx_to_start_with is not None:
ws_idx_end = window_size * window_idx_to_start_with
assert ws_idx_end >= init_ws_end_idx
if keep_last_n_window_as_window_slides is not None:
# assert keep_last_n_window_as_window_slides <= (init_ws_end_idx/ window_size)
ws_idx_begin = ws_idx_end - (keep_last_n_window_as_window_slides * window_size)
assert ws_idx_begin >= 0
return ws_idx_begin, ws_idx_end
def get_num_batch_idx_pair(self, window_idx_pair,batch_size, keep_last_n_window_as_window_slides=None):
num_batch_begin = 0
num_batch_end = math.floor((window_idx_pair[1])/batch_size)
if keep_last_n_window_as_window_slides is not None:
# num_batch_begin = math.floor((window_idx_pair[0])/batch_size)
num_batch_begin = math.floor((window_idx_pair[0])/batch_size)
# if first_batch_idx is not None:
# raise NotImplementedError
# num_batch_begin = min(num_batch_begin, first_batch_idx)
# if last_batch_idx is not None:
# raise NotImplementedError
# num_batch_end = min(num_batch_end, last_batch_idx)
return num_batch_begin, num_batch_end
# def evaluate_ws(self, ws, idx_pair, batch_size):
def evaluate_ws(self, idx_pair, batch_size):
"""
params
-----
idx_pair: tuple
"""
# size_of_current_concat_windows = (idx_pair[1] - idx_pair[0]) + 1
# num_batch = math.ceil((size_of_current_concat_windows)/batch_size)
# num_batch_begin = math.floor((idx_pair[0])/batch_size)
# num_batch_end = math.floor((idx_pair[1])/batch_size)
num_batch_begin, num_batch_end = self.get_num_batch_idx_pair(idx_pair, batch_size, self.args.keep_last_n_window_as_window_slides)
self.m_loss = []
# for epoch in range(NUM_EPOCH):
self.epoch_times = []
self.total_epoch_times = []
for epoch in range(self.n_epoch):
self.check_point.epoch_idx = epoch
self.check_point.get_checkpoint_path()
self.logger.debug('--epoch = {}'.format(epoch))
self.logger_2.info('--epoch = {}'.format(epoch))
self.logger.info(f'max_weight = {self.args.max_random_weight_range}')
self.set_epoch_idx(epoch)
self.evaluate_epoch(self.model, epoch, (num_batch_begin, num_batch_end))
self.save_checkpoint_per_ws()
# self.init_num_ws += 1 # DEBUG:
# self.init_train_data = self.init_num_ws * self.num_instances_shift # DEBUG:
# Training has finished, we have loaded the best model, and we want to backup its current
# memory (which has seen validation edges) so that it can also be used when testing on unseen
# if USE_MEMORY:
if self.use_memory:
val_memory_backup = self.model.memory.backup_memory()
class WindowSlidingEnsemble(SlidingWindow):
def __init__(self, args):
self.args = args
self.models = {}
# def add_dataset(self, full_data):
# self.full_data = full_data
def get_conditions(self):
self.prefix, self.neg_sample_method, self.neg_edges_formation, self.weighted_loss_method, self.compute_xf_iwf_with_sigmoid = get_conditions(self.args)
def get_ws_idx_pair(self, init_ws_end_idx, window_size, keep_last_n_window_as_window_slides=None, window_idx_to_start_with=None):
assert init_ws_end_idx/window_size == int(init_ws_end_idx/window_size)
ws_idx_begin = 0
ws_idx_end = init_ws_end_idx
if window_idx_to_start_with is not None:
ws_idx_end = window_size * window_idx_to_start_with
assert ws_idx_end >= init_ws_end_idx
if keep_last_n_window_as_window_slides is not None:
assert keep_last_n_window_as_window_slides <= (init_ws_end_idx/ window_size)
ws_idx_begin = ws_idx_end - (keep_last_n_window_as_window_slides * window_size)
assert ws_idx_begin >= 0
return ws_idx_begin, ws_idx_end
return init_windows_size
def get_sliding_window_params(self, num_instance, batch_size, ws_multiplier, window_stride_multiplier=None, init_n_instances_as_multiple_of_ws=None):
begin_ind, idx_to_split, _ = right_split_ind_by_batch(1, self.full_data.data_size, self.args.bs)
batch_inds = split_a_window_into_batches(begin_ind, idx_to_split- 1, self.args.bs)
batch_begin_inds = batch_inds[:-1]
batch_end_inds = batch_inds[1:]
### Window params
window_size = batch_size * ws_multiplier
num_init_data = window_size
init_train_data = self.get_init_windows_size(window_size) # default init_train_data.
num_instances_shift = self.get_window_slide_stride(window_size, window_stride_multiplier)
total_num_ws = math.ceil(num_instance/num_instances_shift) # 6
init_num_ws = int(init_train_data/num_instances_shift)
left_num_ws = total_num_ws - init_num_ws # DEPRECATED:
# def get_ws_idx_pair(self, init_ws_end_idx, window_size, keep_last_n_window_as_window_slides=None, window_idx_to_start_with=None):
ws_idx_pair = self.get_ws_idx_pair(init_train_data,window_size)
ws_idx_pair_to_run = []
ws_idx_begin = ws_idx_pair[0]
ws_idx_end = ws_idx_pair[1]
# while ws_idx_end <= num_instance - window_size: # only include training data.
while ws_idx_end <= num_instance: # only include training data.
ws_idx_pair_to_run.append((ws_idx_begin, ws_idx_end))
ws_idx_begin += num_instances_shift
ws_idx_end += num_instances_shift
# get init number of instances
init_n_instances = None
if init_n_instances_as_multiple_of_ws is not None:
init_n_instances = init_n_instances_as_multiple_of_ws * window_size
return window_size, num_init_data, num_instances_shift, init_train_data, total_num_ws, init_num_ws, left_num_ws, batch_inds, batch_begin_inds, batch_end_inds, ws_idx_pair_to_run, init_n_instances
def pre_evaluation(self):
# self.window_size, self.num_init_data,self.num_instances_shift, self.init_train_data, self.total_num_ws, self.init_num_ws, self.left_num_ws = self.get_sliding_window_params(self.full_data.data_size, self.args.bs, self.args.ws_multiplier)
# self.window_size, self.num_init_data,self.num_instances_shift, self.init_train_data, self.total_num_ws, self.init_num_ws, self.left_num_ws, _, _, _, _ = self.get_sliding_window_params(self.full_data.data_size, self.args.bs, self.args.ws_multiplier, self.args.window_stride_multiplier)
self.window_size, self.num_init_data,self.num_instances_shift, self.init_train_data, self.total_num_ws, self.init_num_ws, self.left_num_ws, self.batch_inds, self.batch_begin_inds, self.batch_end_inds, self.ws_idx_pair_to_run, self.init_n_instances = self.get_sliding_window_params(self.full_data.data_size, self.args.bs, self.args.ws_multiplier, self.args.window_stride_multiplier,self.args.init_n_instances_as_multiple_of_ws)
begin_ind, idx_to_split, _ = right_split_ind_by_window(1, self.full_data.data_size, self.window_size, self.init_n_instances)
# if init_n_instances_as_multiple_of_ws flag is None, init_n_instances is set to be at the start of the last window.
if self.init_n_instances is None:
self.init_n_instances = idx_to_split - self.window_size # FIXME: something aint right
# self.ensemble_begin_inds, self.ensemble_end_inds = get_all_ensemble_training_data_inds(begin_ind, idx_to_split-1, self.window_size, fix_begin_ind=False)
self.ensemble_begin_inds, self.ensemble_end_inds = get_all_ensemble_training_data_inds(begin_ind, self.init_n_instances-1, self.window_size, fix_begin_ind=self.args.fix_begin_data_ind_of_models_in_ensemble)
def add_model(self, idx):
self.model = self.ModelClass(**self.model_kwargs)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.model.to(self.device)
# self.models[idx]["model"] = self.model
# self.models[idx].setdefault("model", self.model)
# self.models[idx] = {"model": self.model}
self.models.setdefault(idx, {"model": self.model, "loss": None})
# self.batch_inds = split_a_window_into_batches(self.ensemble_begin_inds[idx], self.ensemble_end_inds[idx], self.args.bs)
# self.batch_begin_inds = self.batch_inds[:-1]
# self.batch_end_inds = self.batch_inds[1:]
# def observer_collect_idx_per_epoch(self):
# self.run_observer_1.setdefault('run_idx', []).append(self.run_idx)
# self.epoch_observer_1.setdefault('epoch_idx', []).append(self.epoch_idx)
# self.batch_observer_1.setdefault('batch_idx', []).append(self.batch_idx)
# self.ensemble_observer_1.setdefault('ensemble_idx', []).append(self.ensemble_idx)
# def save_performance_observer(self):
# performance_observer_path = Path(self.l_2.log_relative_path)/ f'{self.l_2.log_time}'
# # weight_observer_file_name = '{}_run={}_ws={}_epoch={}_batch={}.csv'
# performance_observer_file_name = 'performance_observer.pickle'
# Path(performance_observer_path).mkdir(parents=True, exist_ok=True)
# observer = {}
# observer.update(self.run_observer_1)
# observer.update(self.ws_observer_1)
# observer.update(self.epoch_observer_1)
# observer.update(self.batch_observer_1)
# observer.update(self.performance_observer)
# pd.DataFrame.from_dict(observer).to_pickle( str(performance_observer_path /performance_observer_file_name))
# def observer_collect_idx(self):
# self.run_observer.setdefault('run_idx', []).append(self.run_idx)
# self.ws_observer.setdefault('ws_idx', []).append(self.ws_idx)
# self.epoch_observer.setdefault('epoch_idx', []).append(self.epoch_idx)
# self.batch_observer.setdefault('batch_idx', []).append(self.batch_idx)
def observer_collect_idx_per_batch(self):
self.run_observer.setdefault('run_idx', []).append(self.run_idx)
self.epoch_observer.setdefault('epoch_idx', []).append(self.epoch_idx)
self.batch_observer.setdefault('batch_idx', []).append(self.batch_idx)
self.ensemble_observer.setdefault('ensemble_idx', []).append(self.ensemble_idx)
self.ws_observer.setdefault('ws_idx', []).append(self.ws_idx)
def observer_collect_edges_weight(self, pos_edges_weight, neg_edges_weight):
self.logger_2.info(f'pos_edges_weight = {pos_edges_weight}')
self.logger_2.info(f'neg_edges_weight = {neg_edges_weight}')
if pos_edges_weight is not None:
# self.weight_observer.setdefault('pos_edges_weight', []).append(pos_edges_weight.numpy())
self.weight_observer.setdefault('pos_edges_weight', []).append(pos_edges_weight.cpu().detach().numpy().tolist())
else:
self.weight_observer['pos_edges_weight'] = None
if neg_edges_weight is not None:
# self.weight_observer.setdefault('neg_edges_weight', []).append(neg_edges_weight.numpy())
self.weight_observer.setdefault('neg_edges_weight', []).append(neg_edges_weight.cpu().detach().numpy().tolist())
else:
self.weight_observer['neg_edges_weight'] = None
weight_observer_path = Path(self.l_2.log_relative_path)/ f'{self.l_2.log_time}'
# weight_observer_file_name = '{}_run={}_ws={}_epoch={}_batch={}.csv'
weight_observer_file_name = 'weight_observer.pickle'
Path(weight_observer_path).mkdir(parents=True, exist_ok=True)
def save_weight_observer(self):
observer = {}
observer.update(self.run_observer)
observer.update(self.ensemble_observer)
observer.update(self.epoch_observer)
observer.update(self.batch_observer)
observer.update(self.ws_observer)
if self.weight_observer['pos_edges_weight'] is not None:
weight_name = 'pos_edges_weight'
# pd.DataFrame.from_dict(self.weight_observer[weight_name]).to_csv( str(weight_observer_path /weight_observer_file_name.format(weight_name, self.run_idx, self.ws_idx, self.epoch_idx, self.batch_idx)))
observer.update({weight_name: self.weight_observer[weight_name]})
pd.DataFrame.from_dict(observer).to_pickle( str(weight_observer_path /weight_observer_file_name))
if self.weight_observer['neg_edges_weight'] is not None:
weight_name = 'neg_edges_weight'
# pd.DataFrame.from_dict(self.weight_observer[weight_name]).to_csv( str(weight_observer_path /weight_observer_file_name.format(weight_name, self.run_idx, self.ws_idx, self.epoch_idx, self.batch_idx)))
observer.update({weight_name: self.weight_observer[weight_name]})
pd.DataFrame.from_dict(observer).to_pickle( str(weight_observer_path /weight_observer_file_name))
def evaluate_batch(self, ensemble_idx, k, backprop_every, checkpoint_loss):
model = self.models[ensemble_idx]["model"]
args = self.args
full_data = self.full_data
# init_train_data = self.init_train_data
NUM_NEIGHBORS = self.num_neighbors
optimizer = self.optimizer
neg_sample_method = self.neg_sample_method
neg_edges_formation = self.neg_edges_formation
device = self.device
weighted_loss_method = self.weighted_loss_method
compute_xf_iwf_with_sigmoid = self.compute_xf_iwf_with_sigmoid
num_instance = full_data.data_size
max_weight = args.max_random_weight_range
BATCH_SIZE = args.bs
USE_MEMORY = args.use_memory
ef_iwf_window_dict = self.ef_iwf_window_dict.dict_
nf_iwf_window_dict = self.nf_iwf_window_dict.dict_
share_selected_random_weight_per_window_dict = self.share_selected_random_weight_per_window_dict.dict_
nf_window_dict = self.nf_window_dict.dict_
ef_window_dict = self.ef_window_dict.dict_
if checkpoint_loss is None:
loss = 0
else:
loss = checkpoint_loss
optimizer.zero_grad()
# Custom loop to allow to perform backpropagation only every a certain number of batches
for j in range(backprop_every):
# logger.debug('----backprop_every = {}'.format(j))
batch_idx = k + j
batch_ref_window_size = 0 # :NOTE: added for compatibility reason
## Indexing
start_train_idx = self.batch_begin_inds[batch_idx]
end_train_idx = self.batch_end_inds[batch_idx]
# start_train_idx, end_train_idx = self.set_begin_end_batch_idx(self.init_train_data,batch_idx, BATCH_SIZE, batch_ref_window_size)
end_train_hard_negative_idx = end_train_idx # :NOTE: added for compatibility reason.
sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, _ = self.set_params_batch(start_train_idx, end_train_idx)
## Masking
# train_mask = full_data.timestamps < full_data.timestamps[end_train_idx]
left = full_data.timestamps < full_data.timestamps[end_train_idx]
right = full_data.timestamps[0] <= full_data.timestamps
train_mask = np.logical_and(right, left)
# Initialize training neighbor finder to retrieve temporal graph
train_data = Data(full_data.sources[train_mask],
full_data.destinations[train_mask],
full_data.timestamps[train_mask],
full_data.edge_idxs[train_mask],
full_data.labels[train_mask])
# print(train_data.n_interactions, train_data.n_unique_nodes)
train_ngh_finder = get_neighbor_finder(train_data, args.uniform)
model.set_neighbor_finder(train_ngh_finder)
## Sampler
size = len(sources_batch)
train_rand_sampler = get_sampler(train_data, batch_ref_window_size, start_train_idx, end_train_hard_negative_idx, neg_sample_method)
negatives_src_batch, negatives_dst_batch = get_negative_nodes_batch(train_rand_sampler, BATCH_SIZE, size, neg_sample_method)
pos_label, neg_label = init_pos_neg_labels(size, device)
criterion = get_criterion()
model, pos_prob, neg_prob = compute_edges_probabilities_with_custom_sampled_nodes(model, neg_edges_formation, negatives_dst_batch, negatives_src_batch, sources_batch, destinations_batch, timestamps_batch, edge_idxs_batch, NUM_NEIGHBORS)
pos_edges_weight, neg_edges_weight = get_edges_weight(train_data,k, BATCH_SIZE,max_weight,start_train_idx, end_train_hard_negative_idx, ef_iwf_window_dict, nf_iwf_window_dict, ef_window_dict, nf_window_dict, share_selected_random_weight_per_window_dict, weighted_loss_method, sampled_nodes=negatives_src_batch, compute_xf_iwf_with_sigmoid=compute_xf_iwf_with_sigmoid, edge_weight_multiplier=args.edge_weight_multiplier, use_time_decay=args.use_time_decay, time_diffs = model.time_diffs_raw.cpu().data.numpy())
self.observer_collect_idx_per_batch()
self.logger_2.info(f'pos_edges_weight = {pos_edges_weight}')
self.logger_2.info(f'neg_edges_weight = {neg_edges_weight}')
self.observer_collect_edges_weight(pos_edges_weight, neg_edges_weight)
self.save_weight_observer()
loss = compute_loss(pos_label, neg_label, pos_prob, neg_prob, pos_edges_weight, neg_edges_weight, batch_idx, criterion, loss, weighted_loss_method)
loss /= backprop_every
loss.backward()
optimizer.step()
self.m_loss.append(loss.item())
# Detach memory after 'args.backprop_every' number of batches so we don't backpropagate to
# the start of time
if USE_MEMORY:
model.memory.detach_memory()
self.start_train_idx = start_train_idx
self.end_train_idx = end_train_idx
self.train_rand_sampler = train_rand_sampler
def observer_collect_idx_per_epoch(self):
self.run_observer_1.setdefault('run_idx', []).append(self.run_idx)
self.epoch_observer_1.setdefault('epoch_idx', []).append(self.epoch_idx)
self.batch_observer_1.setdefault('batch_idx', []).append(self.batch_idx)
self.ensemble_observer_1.setdefault('ensemble_idx', []).append(self.ensemble_idx)
self.ws_observer_1.setdefault('ws_idx', []).append(self.ws_idx)
def save_performance_observer(self):
performance_observer_path = Path(self.l_2.log_relative_path)/ f'{self.l_2.log_time}'
# weight_observer_file_name = '{}_run={}_ws={}_epoch={}_batch={}.csv'
performance_observer_file_name = 'performance_observer.pickle'
Path(performance_observer_path).mkdir(parents=True, exist_ok=True)
observer = {}
observer.update(self.run_observer_1)
observer.update(self.ensemble_observer_1)
observer.update(self.epoch_observer_1)
observer.update(self.batch_observer_1)
observer.update(self.performance_observer)
observer.update(self.ws_observer_1)
# observer.update(self.ws_observer)
pd.DataFrame.from_dict(observer).to_pickle( str(performance_observer_path /performance_observer_file_name))
# def evaluate_epoch(self, ensemble_idx, epoch, num_batch):
def evaluate_epoch(self, ensemble_idx, epoch, num_batch_pair, checkpoint_loss, window_pred_pair):
model = self.models[ensemble_idx]["model"]
args = self.args
full_data = self.full_data
# init_train_data = self.init_train_data
NUM_NEIGHBORS = self.num_neighbors
BATCH_SIZE = args.bs
USE_MEMORY = args.use_memory
start_epoch = time.time()
### Training :DOC:
# Reinitialize memory of the model at the start of each epoch
if USE_MEMORY:
model.memory.__init_memory__()
# for k in range(0, num_batch, args.backprop_every):
# NOTE: evalaute_epoch() only produce expected behavior if window size is equal to batch size.
for k in range(num_batch_pair[0], num_batch_pair[1], args.backprop_every):
# logger.debug('---batch = {}'.format(k))
self.set_batch_idx(k)
self.evaluate_batch(ensemble_idx, k, args.backprop_every, checkpoint_loss)
# start_train_idx = self.start_train_idx
# end_train_idx = self.end_train_idx
train_rand_sampler = self.train_rand_sampler
begin_window_pred_idx, end_window_pred_idx = window_pred_pair
# end_train_idx = window_pred_pair[0]
epoch_time = time.time() - start_epoch
self.epoch_times.append(epoch_time)
self.logger.info('start validation...')
# Initialize validation and test neighbor finder to retrieve temporal graph
full_ngh_finder = get_neighbor_finder(full_data, args.uniform)
### Validation
# Validation uses the full graph
model.set_neighbor_finder(full_ngh_finder)
if USE_MEMORY:
# Backup memory at the end of training, so later we can restore it and use it for the
# validation on unseen nodes
train_memory_backup = model.memory.backup_memory()
# VAL_BATCH_SIZE = BATCH_SIZE * 1
# VAL_BATCH_SIZE = self.window_size
# sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, _ = self.set_params_batch(end_train_idx, end_train_idx + VAL_BATCH_SIZE)
sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, _ = self.set_params_batch(begin_window_pred_idx, end_window_pred_idx)
size = len(sources_batch)
_, negative_samples = train_rand_sampler.sample(size)
pos_prob, neg_prob = model.compute_edge_probabilities(sources_batch, destinations_batch,
negative_samples, timestamps_batch,
edge_idxs_batch, NUM_NEIGHBORS)
pred_score = np.concatenate([pos_prob.cpu().data.detach().numpy(), neg_prob.cpu().data.detach().numpy()])
true_label = np.concatenate([np.ones(size), np.zeros(size)])
self.models[ensemble_idx]["pred_score"] = pred_score.reshape(-1)
self.models[ensemble_idx]["true_label"] = true_label.reshape(-1)
val_auc, val_ap = compute_evaluation_score(true_label,pred_score)
if USE_MEMORY:
val_memory_backup = model.memory.backup_memory()
# Restore memory we had at the end of training to be used when validating on new nodes.
# Also backup memory after validation so it can be used for testing (since test edges are
# strictly later in time than validation edges)
model.memory.restore_memory(train_memory_backup)
total_epoch_time = time.time() - start_epoch
self.total_epoch_times.append(total_epoch_time)
mean_loss = np.mean(self.m_loss)
self.logger.info('epoch: {} took {:.2f}s'.format(epoch, total_epoch_time))
self.logger.info('Epoch mean loss: {}'.format(mean_loss))
# NOTE: I switch val_ap and val_acc, so that it consistent with output from compute_evalaution_score
self.logger.info(
'val auc: {}'.format(val_ap))
self.logger.info(
'val ap: {}'.format(val_auc))
self.performance_observer.setdefault('Mean Loss', []).append(mean_loss)
self.performance_observer.setdefault('AUC', []).append(val_ap)
self.performance_observer.setdefault('Absolute Precision', []).append(val_auc)
self.observer_collect_idx_per_epoch()
self.save_performance_observer()
return self.m_loss[-1] # return last loss
def evaluate_ensemble(self, n_ensemble, ws_idx):
ensemble_prediction_results_path = Path(self.l_3.log_relative_path)/ f'{self.l_3.log_time}'
# weight_observer_file_name = '{}_run={}_ws={}_epoch={}_batch={}.csv'
ensemble_prediction_results_file_name = 'ensemble_prediction_results.pickle'
Path(ensemble_prediction_results_path).mkdir(parents=True, exist_ok=True)
pred_val_list = []
raw_pred_val_list = []
for ensemble_idx in range(n_ensemble):
raw_pred_val = self.models[ensemble_idx]["pred_score"].tolist()
pred_val = convert_prob_list_to_binary_list(raw_pred_val)
pred_val_list.append(pred_val)
raw_pred_val_list.append(raw_pred_val)
self.ensemble_prediction_observer.setdefault('raw_pred_val', []).append(raw_pred_val)
self.ensemble_prediction_observer.setdefault('ws_idx', []).append(ws_idx)
self.ensemble_prediction_observer.setdefault('ensemble_idx', []).append(ensemble_idx)
self.ensemble_prediction_observer.setdefault('true_label', []).append(self.models[0]["true_label"])
# tmp = {'raw_pred_val': [raw_pred_val]}
# tmp.update({'ensemble_idx': ensemble_idx})
# tmp.update({'true_label': [self.models[0]["true_label"]]})
pd.DataFrame.from_dict(self.ensemble_prediction_observer).to_pickle(
str(ensemble_prediction_results_path/ensemble_prediction_results_file_name))
sum_vote = np.sum(np.array(pred_val_list), axis=0)
arg_max_vote = np.sum(np.array(raw_pred_val_list), axis=0)
mean_vote = np.mean(np.array(raw_pred_val_list), axis=0)
voted_pred_list = []
# for v in sum_vote:
for v in arg_max_vote:
# TODO: this function should be change to support multiple class prediction.
voted_pred = 1 if v > n_ensemble/2 else 0
voted_pred_list.append(voted_pred)
true_label = self.models[ensemble_idx]["true_label"]
cm = confusion_matrix(true_label, voted_pred_list)
precision = compute_precision(cm)
auc_for_ensemble = compute_auc_for_ensemble(true_label, mean_vote)
return precision, auc_for_ensemble
def set_ensemble_idx(self, ensemble_idx):
self.ensemble_idx = ensemble_idx
def get_num_batch_idx_pair(self, window_idx_pair,batch_size):
num_batch_end = math.floor((window_idx_pair[1])/batch_size)
num_batch_begin = math.floor((window_idx_pair[0])/batch_size)
# num_batch_begin = 0
# num_batch_end = math.floor((window_idx_pair[1])/batch_size)
# if keep_last_n_window_as_window_slides is not None:
# num_batch_begin = math.floor((window_idx_pair[0])/batch_size)
return num_batch_begin, num_batch_end
def get_ws_left_to_run(self,ensemble_end_idx_before_slide, ws_idx_pair):
# FIXME: this can be optimized
for ind, (begin_ind, end_ind) in enumerate(ws_idx_pair):
if end_ind > ensemble_end_idx_before_slide + 1:
ws_idx_left = len(ws_idx_pair) - (ind)
ws_idx_pair_left = ws_idx_pair[-ws_idx_left:]
assert len(ws_idx_pair_left) >= 2
ws_idx_pair_left_to_run = ws_idx_pair_left[:-1]
ws_first_idx_before_slide = ind
return ws_first_idx_before_slide,ws_idx_left, ws_idx_pair_left_to_run, ws_idx_pair_left
def evaluate(self):
args = self.args
assert len(self.ensemble_begin_inds) == len(self.ensemble_end_inds)
n_ensembles = len(self.ensemble_begin_inds)
# self.check_point.data = args.data
# self.check_point.prefix = self.prefix
# self.check_point.bs = args.bs
# self.check_point.epoch_max = args.n_epoch
# self.check_point.ws_max = self.total_num_ws
# self.check_point.max_random_weight_range = args.max_random_weight_range
self.check_point_dict = {}
for i in range(n_ensembles):
self.check_point_dict[i] = self.check_point
self.check_point_dict[i].data = args.data
self.check_point_dict[i].prefix = self.prefix
self.check_point_dict[i].bs = args.bs
self.check_point_dict[i].epoch_max = args.n_epoch
self.check_point_dict[i].ws_max = self.total_num_ws
self.check_point_dict[i].max_random_weight_range = args.max_random_weight_range
ws_first_idx_before_slide, ws_left_to_run, ws_idx_pair_left_to_run, ws_idx_pair_left = self.get_ws_left_to_run(self.init_n_instances,self.ws_idx_pair_to_run) # FIXME: replace 1000 with variable.
# for idx in range(n_ensembles):
assert ws_idx_pair_left_to_run[0][0] == self.init_n_instances
for ws_idx_offset, ws_idx_pair in enumerate(ws_idx_pair_left_to_run):
self.ws_idx = ws_idx_offset + ws_first_idx_before_slide
for idx, (ensemble_idx_begin, ensemble_idx_end) in enumerate(zip(self.ensemble_begin_inds,self.ensemble_end_inds)):
# TODO: load model for the next round.
self.set_ensemble_idx(idx)
# self.check_point.ws_idx = 99999
# self.check_point.ensemble_idx = idx
self.check_point_dict[idx].ensemble_idx = idx
self.check_point_dict[idx].ws_idx = self.ws_idx
self.logger.debug('--ensemble_idx = {}'.format(idx))
self.logger_2.info('--ensemble_idx = {}'.format(idx))
self.logger.debug('--ensemble_idx_pair = {}'.format((ensemble_idx_begin, ensemble_idx_end)))
self.logger_2.info('--ensemble_idx_pair = {}'.format((ensemble_idx_begin, ensemble_idx_end)))
self.logger.debug('--ws_idx = {}'.format(self.ws_idx))
self.logger_2.info('--ws_idx = {}'.format(self.ws_idx))
self.logger.debug('--ws_idx_pair = {}'.format(ws_idx_pair))
self.logger_2.info('--ws_idx_pair = {}'.format(ws_idx_pair))
self.m_loss = []
self.epoch_times = []
self.total_epoch_times = []
if idx in self.models:
# FIXME: code is really patchy. fix it
self.check_point_dict[idx].ws_idx = self.ws_idx - 1
checkpoint = torch.load(self.check_point_dict[idx].get_checkpoint_path())
self.models[idx]["model"].load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict( checkpoint['optimizer_state_dict'] )
self.models[idx].setdefault("loss", checkpoint['loss'])
self.check_point_dict[idx].ws_idx = self.check_point_dict[idx].ws_idx + 1
else:
self.add_model(idx) # FIXME: make it works with window sliding ensemble.
self.ef_iwf_window_dict.dict_ = {}
self.nf_iwf_window_dict.dict_ = {}
self.share_selected_random_weight_per_window_dict.dict_ = {}
# size_of_current_concat_windows = self.ensemble_end_inds[idx] - self.ensemble_begin_inds[idx] + 1
# num_batch = math.ceil((size_of_current_concat_windows)/args.bs)
# for ws_idx_begin, ws_idx_end in self.ws_idx_pair_to_run:
# for ensemble_idx_begin, ensemble_idx_end in zip(self.ensemble_begin_inds,self.ensemble_end_inds):
num_batch_begin, num_batch_end = self.get_num_batch_idx_pair((ensemble_idx_begin,ensemble_idx_end+1), self.args.bs)
last_loss = None
for epoch in range(self.n_epoch):
self.check_point_dict[idx].epoch_idx = epoch
self.check_point_dict[idx].get_checkpoint_path()
self.logger.debug('--epoch = {}'.format(epoch))
self.logger_2.info('--epoch = {}'.format(epoch))
self.logger.info(f'max_weight = {self.args.max_random_weight_range}')
self.set_epoch_idx(epoch)
# self.evaluate_epoch(idx, epoch, num_batch)
last_loss = self.evaluate_epoch(idx, epoch, (num_batch_begin, num_batch_end), self.models[idx]['loss'], ws_idx_pair)
# self.set_ws_idx(ws)
# save model
# self.save_checkpoint_per_ws()
assert last_loss is not None
# if self.args.save_checkpoint:
torch.save({
'model_state_dict':self.models[idx]['model'].state_dict(),
'optimizer_state_dict':self.optimizer.state_dict(),
'loss': last_loss
}, self.check_point_dict[idx].get_checkpoint_path())
# raise NotImplementedError('make sure that ensemble evaluation works as expected as window slide forward.')
precision, auc_for_ensemble = self.evaluate_ensemble(n_ensembles, self.ws_idx)
self.logger.info(f'precision of enemble = {precision}')
self.logger.info(f'auc of enemble = {auc_for_ensemble}')
# self.observer_collect_idx_per_epoch()
# self.save_performance_observer()
class WindowSlidingForwardNodeClassification(WindowSlidingForward):
def get_conditions(self):
self.prefix, self.weighted_loss_method = get_conditions_node_classification(self.args)
def set_decoder(self):
args = self.args
device = self.device
feat_dim = self.node_features.shape[1]
n_unique_labels = self.full_data.n_unique_labels
decoder_optimizer, decoder, decoder_loss_criterion = select_decoder_and_loss_node_classification(args,device,feat_dim, n_unique_labels, self.weighted_loss_method)
return decoder, decoder_optimizer, decoder_loss_criterion
def add_model(self):
self.model = self.ModelClass(**self.model_kwargs)
self.decoder, self.optimizer, self.criterion = self.set_decoder()
# self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.model.to(self.device)
begin_ind, idx_to_split, _ = right_split_ind_by_window(1, self.full_data.data_size, self.window_size)
# self.window_begin_inds, self.window_end_inds = get_all_ensemble_training_data_inds(begin_ind, idx_to_split-1, self.window_size, fix_begin_ind=True)
self.batch_inds = split_a_window_into_batches(begin_ind, idx_to_split- 1, self.args.bs)
self.batch_begin_inds = self.batch_inds[:-1]
self.batch_end_inds = self.batch_inds[1:]
def set_params_mask(self, start_idx, end_idx ):
BATCH_SIZE = self.args.bs
full_data = self.full_data
time_after_end_of_current_batch = full_data.timestamps > full_data.timestamps[end_idx]
time_before_end_of_next_batch = full_data.timestamps <= full_data.timestamps[end_idx + BATCH_SIZE]
mask = np.logical_and(time_after_end_of_current_batch, time_before_end_of_next_batch)
return mask
def evaluate_batch(self, model, k, backprop_every):
args = self.args
criterion = self.criterion
full_data = self.full_data
# init_train_data = self.init_train_data
NUM_NEIGHBORS = self.num_neighbors
device = self.device
decoder_optimizer = self.optimizer
decoder_loss_criterion = self.criterion
WINDOW_SIZE = self.window_size
num_instance = full_data.data_size
max_weight = args.max_random_weight_range
BATCH_SIZE = args.bs
USE_MEMORY = args.use_memory
n_unique_labels = self.full_data.n_unique_labels
weighted_loss_method = self.weighted_loss_method
ef_iwf_window_dict = self.ef_iwf_window_dict.dict_
nf_iwf_window_dict = self.nf_iwf_window_dict.dict_
share_selected_random_weight_per_window_dict = self.share_selected_random_weight_per_window_dict.dict_
loss = 0
decoder_optimizer.zero_grad()
# Custom loop to allow to perform backpropagation only every a certain number of batches
for j in range(args.backprop_every):
batch_idx = k + j
# start_train_idx = batch_idx * BATCH_SIZE
# end_train_idx = min(end_ws_idx, start_train_idx + BATCH_SIZE)
start_train_idx = self.batch_begin_inds[batch_idx]
end_train_idx = self.batch_end_inds[batch_idx]
assert (self.end_ws_idx - self.begin_ws_idx) <= WINDOW_SIZE, "if false, *_batch will encounter out of bound error. Maybe intial number of data is more than BATCH_SIZE."
assert start_train_idx < end_train_idx, "number of batch to run for each epoch was not set correctly."
# assert len(selected_sources_ind) >= end_ws_idx
# print(start_train_idx, end_train_idx)
# sources_batch, destinations_batch = full_data.sources[start_train_idx:end_train_idx], \
# full_data.destinations[start_train_idx:end_train_idx]
# edge_idxs_batch = full_data.edge_idxs[start_train_idx: end_train_idx]
# timestamps_batch = full_data.timestamps[start_train_idx:end_train_idx]
# labels_batch = full_data.labels[start_train_idx:end_train_idx]
sources_batch, destinations_batch, edge_idxs_batch, timestamps_batch, labels_batch = self.set_params_batch(start_train_idx, end_train_idx)
# total_labels_batch = labels_batch
self.current_window_labels.extend(labels_batch)
# size = len(sources_batch)
source_embedding, destination_embedding, _ = model.compute_temporal_embeddings(sources_batch, destinations_batch, destinations_batch, timestamps_batch, edge_idxs_batch, sampled_source_nodes=None, n_neighbors=NUM_NEIGHBORS)
# labels_batch = labels_batch[self.selected_sources_ind]
# sources_batch = sources_batch[self.selected_sources_ind]
## offset by self.selected_sources_ind - relative_batch_idx and select only ind within range of 0 < x < batch_size.
# get_selected_sources_of_batch_idx_relative_to_window_idx(k, )
# absolute_window_idx = convert_batch_idx_to_window_idx(absolute_batch_idx, ws_multiplier)
relative_batch_idx = get_batch_idx_relative_to_window_idx(k, args.ws_multiplier)
offset_val = relative_batch_idx * BATCH_SIZE
relative_sources_ind = np.array(self.selected_sources_ind) - offset_val
left = relative_sources_ind >= 0
right = relative_sources_ind < BATCH_SIZE
selected_relative_sources_ind_mask = np.logical_and(left, right)
selected_relative_sources_ind = relative_sources_ind[selected_relative_sources_ind_mask].tolist()
## NOTE: only mask labels (selected from window range) that are within current batch range.
# labels_batch = labels_batch[self.selected_sources_ind]
# sources_batch = sources_batch[self.selected_sources_ind]
labels_batch = labels_batch[selected_relative_sources_ind]
sources_batch = sources_batch[selected_relative_sources_ind]
# raise NotImplementedError("added arugment to get_nodes_weight and I haven't test it in node_classification yet.")
nodes_weight = get_nodes_weight(full_data, batch_idx, BATCH_SIZE, max_weight, start_train_idx, end_train_idx, nf_iwf_window_dict, n_unique_labels, weighted_loss_method, share_selected_random_weight_per_window_dict)
self.logger_2.info(f'nodes_weight = {nodes_weight}')
# nodes_weight_batch = nodes_weight[self.selected_sources_ind]
if nodes_weight is not None:
nodes_weight_batch = nodes_weight[selected_relative_sources_ind]
if full_data.n_unique_labels == 2: # :NOTE: for readability, train_data should be replaced by full_data, but I am unsure about side effect.
raise NotImplementedError
pred = self.decoder(source_embedding).sigmoid()
labels_batch_torch = torch.from_numpy(labels_batch).float().to(device)
decoder_loss = decoder_loss_criterion(weight=nodes_weight_batch)(pred, labels_batch_torch)
elif full_data.n_unique_labels == 4:
# pred = self.decoder(source_embedding[self.selected_sources_ind]).softmax(dim=1) # :BUG: I am not sure if selected_sources_ind can be appplied here without effecting model learning
pred = self.decoder(source_embedding[selected_relative_sources_ind]).softmax(dim=1) # :BUG: I am not sure if selected_sources_ind can be appplied here without effecting model learning
labels_batch_torch = torch.from_numpy(self.onehot_encoder.transform(pd.DataFrame(labels_batch)).toarray()).long().to(device)
labels_batch_torch = np.argmax(labels_batch_torch, axis=1)
if nodes_weight is None:
decoder_loss = decoder_loss_criterion()(pred, labels_batch_torch)
else:
decoder_loss = decoder_loss_criterion(weight=nodes_weight_batch)(pred, labels_batch_torch)
pred = pred_prob_to_pred_labels(pred.cpu().detach().numpy()) # :NOTE: not sure what this is used for.
loss += decoder_loss.item()
loss /= args.backprop_every
decoder_loss.backward()
decoder_optimizer.step()
self.m_loss.append(decoder_loss.item())
# Detach memory after 'args.backprop_every' number of batches so we don't backpropagate to
# the start of time
if USE_MEMORY:
model.memory.detach_memory()
self.start_train_idx = start_train_idx
self.end_train_idx = end_train_idx
# self.train_rand_sampler = train_rand_sampler
self.logger.info(
f"train labels batch distribution = {get_label_distribution(labels_batch)}")
self.logger.info(
f"predicted train labels batch distribution = {get_label_distribution(pred)}")
self.logger.info(
f"train labels batch distribution (disregard frequency of unique node) = "
f"{get_label_distribution(get_unique_nodes_labels(labels_batch, sources_batch))}")
self.logger.info(
f"predicted train labels batch distribution (disregard frequency of unique node) = "
f"{get_label_distribution(get_unique_nodes_labels(pred, sources_batch))}") # note that it is possible that model predict different labels for the same nodes. (I will omit this metric until it is shown to be needed.)
def evaluate_epoch(self, model, epoch, num_batch):
args = self.args
full_data = self.full_data
# init_train_data = self.init_train_data
NUM_NEIGHBORS = self.num_neighbors
decoder = self.decoder
BATCH_SIZE = args.bs
USE_MEMORY = args.use_memory
start_epoch = time.time()
### Training :DOC:
# Reinitialize memory of the model at the start of each epoch
if USE_MEMORY:
model.memory.__init_memory__()
self.decoder = self.decoder.train()
self.current_window_labels = []
for k in range(0, num_batch, args.backprop_every):
# logger.debug('---batch = {}'.format(k))
self.evaluate_batch(model, k, args.backprop_every)
start_train_idx = self.start_train_idx
end_train_idx = self.end_train_idx
# train_rand_sampler = self.train_rand_sampler
# self.logger.info(f"total labels batch epoch distribution = {get_label_distribution(self.total_labels_batch)}")
self.logger.info(f"total labels distribution = {get_label_distribution(self.current_window_labels)}")
epoch_time = time.time() - start_epoch
self.epoch_times.append(epoch_time)
self.logger.info(f'total number of labelled uniqued nodes = {len(self.selected_sources_to_label)}')
self.logger.info('start validation...')
# Initialize validation and test neighbor finder to retrieve temporal graph
full_ngh_finder = get_neighbor_finder(full_data, args.uniform)
### Validation
if USE_MEMORY:
# Backup memory at the end of training, so later we can restore it and use it for the
# validation on unseen nodes
train_memory_backup = model.memory.backup_memory()
assert full_data.timestamps.shape[0] >= end_train_idx + BATCH_SIZE
# VAL_BATCH_SIZE = BATCH_SIZE * 1
VAL_BATCH_SIZE = self.window_size
assert full_data.timestamps.shape[0] >= end_train_idx + BATCH_SIZE
# :DEBUG:
val_mask = self.set_params_mask(start_train_idx, end_train_idx)
val_data = Data(full_data.sources[val_mask],
full_data.destinations[val_mask],
full_data.timestamps[val_mask],
full_data.edge_idxs[val_mask],
full_data.labels[val_mask],
)
val_auc, val_acc, cm = my_eval_node_classification(self.logger,
model,
decoder,
val_data,
VAL_BATCH_SIZE,
self.selected_sources_to_label,
n_neighbors=NUM_NEIGHBORS)
if USE_MEMORY:
val_memory_backup = model.memory.backup_memory()
# Restore memory we had at the end of training to be used when validating on new nodes.
# Also backup memory after validation so it can be used for testing (since test edges are
# strictly later in time than validation edges)
model.memory.restore_memory(train_memory_backup)
total_epoch_time = time.time() - start_epoch
self.total_epoch_times.append(total_epoch_time)
self.logger.info('epoch: {} took {:.2f}s'.format(epoch, total_epoch_time))
self.logger.info('Epoch mean loss: {}'.format(np.mean(self.m_loss)))
# self.logger.info('val ap: {}'.format(val_ap))
self.logger.info(f'val acc: {val_acc}')
self.logger.info(f'confusion matrix = \n{cm}')
def evaluate_ws(self, ws, size_of_current_concat_windows, batch_size):
full_data = self.full_data
## Keeping track of class labels
self.selected_sources_ind,self.selected_sources_to_label = label_new_unique_nodes_with_budget(self.selected_sources_to_label, full_data, (self.begin_ws_idx, self.end_ws_idx))
# assert selected_sources_to_label[:len_before] == selected_sources_to_label_before
assert np.unique(self.selected_sources_to_label).shape[0] == len(self.selected_sources_to_label)
num_batch = math.ceil((size_of_current_concat_windows)/batch_size)
self.m_loss = []
# for epoch in range(NUM_EPOCH):
self.epoch_times = []
self.total_epoch_times = []
for epoch in range(self.n_epoch):
self.check_point.epoch_idx = epoch
self.check_point.get_checkpoint_path()
self.logger.debug('--epoch = {}'.format(epoch))
self.logger_2.info('--epoch = {}'.format(epoch))
self.logger.info(f'max_weight = {self.args.max_random_weight_range}')
self.evaluate_epoch(self.model, epoch, num_batch)
self.save_checkpoint_per_ws()
self.init_num_ws += 1
self.init_train_data = self.init_num_ws * self.num_instances_shift
self.begin_ws_idx = self.end_ws_idx
self.end_ws_idx = min(self.init_num_ws * self.num_instances_shift, self.full_data.edge_idxs.shape[0]-1)
# Training has finished, we have loaded the best model, and we want to backup its current
# memory (which has seen validation edges) so that it can also be used when testing on unseen
# if USE_MEMORY:
if self.use_memory:
val_memory_backup = self.model.memory.backup_memory()
def pre_evaluation(self):
self.window_size, self.num_init_data,self.num_instances_shift, self.init_train_data, self.total_num_ws, self.init_num_ws, self.left_num_ws = self.get_sliding_window_params(self.full_data.data_size, self.args.bs, self.args.ws_multiplier)
self.begin_ws_idx = 0 # pointer for first index of previously added window
self.end_ws_idx = self.init_train_data # pointer for last index of previously added window
self.onehot_encoder = get_encoder(self.full_data.n_unique_labels)
self.add_model()
def evaluate(self):
# raise NotImplementedError()
args = self.args
self.check_point.data = args.data
self.check_point.prefix = self.prefix
self.check_point.bs = args.bs
self.check_point.epoch_max = args.n_epoch
self.check_point.ws_max = self.total_num_ws
self.check_point.max_random_weight_range = args.max_random_weight_range
for ws in range(self.left_num_ws):
self.check_point.ws_idx = ws
self.logger.debug('-ws = {}'.format(ws))
self.logger_2.info('-ws = {}'.format(ws))
self.set_ws_idx(ws)
self.evaluate_ws(ws, self.init_train_data, self.args.bs)
| 1.5 | 2 |
aloobj.py | zstewar1/ALOLoader | 0 | 12757649 | <reponame>zstewar1/ALOLoader
import itertools
import numpy
def dump(alo, fp):
bones = alo['skeleton'][0]['bone_container']
for bone in bones:
b = bone['bone'][0]
b['matrix'].append((0, 0, 0, 1))
b['matrix'] = numpy.matrix(b['matrix'])
b['pmatrix'] = b['matrix']
if b['parent'] >= 0:
b['pmatrix'] = bones[b['parent']]['bone'][0]['pmatrix'] * b['pmatrix']
verts = {}
vertr = {}
tex = {}
texr = {}
norm = {}
normr = {}
for meshindex, mesh in enumerate(alo['mesh']):
print('g', mesh['mesh_name'][0]['name'], file=fp)
conn = next(c for c in alo['connections'][0]['object_connection']
if c['object'] == meshindex)
if conn:
matrix = bones[conn['bone']]['bone'][0]['pmatrix']
else:
matrix = numpy.matrix(numpy.identity(4))
for submesh in mesh['sub_mesh_data']:
verto = []
texo = []
normo = []
for vert in submesh['vertex_buffer'][0]['buffer']:
mpos = matrix * numpy.matrix(vert['position'] + (1,)).T
pos = tuple(itertools.islice(mpos.flat, 3))
verti = vertr.setdefault(pos, len(verts))
if verti == len(verts):
print('v', *pos, file=fp)
verts.setdefault(verti, pos)
verto.append(verti + 1)
texc = vert['tex_coords'][0]
texi = texr.setdefault(texc, len(tex))
if texi == len(tex):
print('vt', *texc, file=fp)
tex.setdefault(texi, texc)
texo.append(texi + 1)
mnormv = numpy.matrix(vert['normal']) * matrix[:3,:3].I
normv = tuple(mnormv.flat)
normi = normr.setdefault(normv, len(norm))
if normi == len(norm):
print('vn', *normv, file=fp)
norm.setdefault(normi, normv)
normo.append(normi + 1)
ib = submesh['index_buffer'][0]['indices']
for i in range(0, len(ib), 3):
print('f ', sep='', end='', file=fp)
print(verto[ib[i]], texo[ib[i]], normo[ib[i]], sep='/', end='', file=fp)
print(' ', sep='', end='', file=fp)
print(
verto[ib[i+1]], texo[ib[i+1]], normo[ib[i+1]], sep='/', end='',
file=fp)
print(' ', sep='', end='', file=fp)
print(
verto[ib[i+2]], texo[ib[i+2]], normo[ib[i+2]], sep='/', end='\n',
file=fp)
| 2.140625 | 2 |
scripts/hmm_parser-v1.py | ParkvilleData/MetaGenePipe | 0 | 12757650 | #!/usr/bin/env python
from collections import defaultdict
import pandas as pd
import numpy as np
import sys
import pprint
from Bio import SearchIO
import argparse
def hmmer_to_df(hmmTbl, only_top_hit=False):
"""
Takes a table from HMMER 3 and converts it to a Pandas Dataframe
Adapted from https://stackoverflow.com/a/62021471
"""
attribs = [ 'id', 'evalue'] # can add in more columns
hits = defaultdict(list)
prev_hit_id = None
## open hmmTbl and extract hits
with open(hmmTbl) as handle:
for queryresult in SearchIO.parse(handle, 'hmmer3-tab'):
for hit in queryresult.hits:
# Only record the top hit
if only_top_hit and hit.id == prev_hit_id:
continue
for attrib in attribs:
hits[attrib].append(getattr(hit, attrib))
hits['KO'].append(queryresult.id)
prev_hit_id = hit.id
return pd.DataFrame.from_dict(hits)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('brite', type=argparse.FileType('r'), help="The brite hierachy level file.")
parser.add_argument('hmm_tbls', nargs='+', help='A list of tables from HMMER 3.')
parser.add_argument('--consistent-pathways', action='store_true', help='Outputs all the pathways consistently across each output file even if they do not exist at that level.')
parser.add_argument('--outprefix', help="The samplename prefix")
args = parser.parse_args()
levels = ["Level1", "Level2", "Level3"]
# load brite database
brite_df = pd.read_csv(args.brite, sep='\t')
# Loop over the HMMER tables
counts_df = []
for hmm_tbl in args.hmm_tbls:
hmmer_df = hmmer_to_df( hmm_tbl, only_top_hit=False )
# Select ids for rows with minimum e value
idx_evalue_min = hmmer_df.groupby('id')['evalue'].idxmin()
# Filter hmmer dataframe with these indexes
hmmer_min_e_df = hmmer_df.loc[idx_evalue_min]
brite_filtered = brite_df[brite_df['KO'].isin(hmmer_min_e_df.KO)]
for level in levels:
my_counts_df = brite_filtered[level].value_counts().rename_axis('pathway').reset_index(name='counts')
my_counts_df['level'] = level
my_counts_df['hmm_tbl'] = hmm_tbl
# Store in single dataframe
counts_df = my_counts_df if len(counts_df) == 0 else pd.concat( [counts_df, my_counts_df ], ignore_index=True)
# Output the counts into text files
for level in levels:
output_filepath = f"{level}.{args.outprefix}.counts.tsv"
print(f"Writing to file {output_filepath}")
with open(output_filepath, 'w') as f:
# Get pathways for this level so that we can have consistency in the output files even when the counts are zero
df_for_pathways = counts_df if args.consistent_pathways else counts_df[ counts_df.level == level ]
pathways_for_level = sorted(df_for_pathways.pathway.unique())
headers = ["Pathway"] + args.hmm_tbls
f.write( "\t".join(headers) )
f.write( "\n" )
for pathway in pathways_for_level:
f.write(f"{pathway}")
for hmm_tbl in args.hmm_tbls:
filtered = counts_df[ (counts_df.pathway == pathway) & (counts_df.level == level) & (counts_df.hmm_tbl == hmm_tbl) ]
count = filtered.counts.sum()
f.write( f"\t{count}" )
f.write( "\n" )
if __name__ == "__main__":
main()
| 2.59375 | 3 |