hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9271ac7529349d3b0dfa17ddd03591219a780e76 | 1,188 | py | Python | numbering-systems-final.py | Vandal2006/numbering-systems | 3057e354919433b06efe37bded2b7dbbe26543c8 | [
"MIT"
] | null | null | null | numbering-systems-final.py | Vandal2006/numbering-systems | 3057e354919433b06efe37bded2b7dbbe26543c8 | [
"MIT"
] | null | null | null | numbering-systems-final.py | Vandal2006/numbering-systems | 3057e354919433b06efe37bded2b7dbbe26543c8 | [
"MIT"
] | null | null | null | # Author: Henry Gilson
# Class: cpsc-20000
# Constants
INTRODUCTION = '''
******************************************************
Numbering Systemn 2.0
GilsoSoft
******************************************************
This program will give you your inputed interger, as an itnput, as a number and as a hex.
If you did not input a interger you will be told you need to input an int to get the value.
It will alsom welcome you for useing it.
'''
import sys
print(INTRODUCTION)
numberOfArgs = len(sys.argv)
print("Total arguments passed: " + str(numberOfArgs))
print("Argument 1: " + sys.argv[0])
if numberOfArgs == 2:
print("Argument 2: " + sys.argv[1])
try:
numberAsAString = sys.argv[1]
numberAsAnInt = int(numberAsAString, base=10)
numberAsHex = hex(numberAsAnInt)
print("Input: " + numberAsAString)
print("Number: " + str(numberAsAnInt))
print("Hex: " + numberAsHex)
print("Your Welcome User ('.')")
except ValueError:
print("you did not enter an interger, to get the values you must enter an interger")
print("") | 33 | 92 | 0.558923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.570707 |
9271ae6e568ebb47596ef419f5344965563e2e99 | 2,993 | py | Python | demo/optimization.py | Pricccvtqu4tt/ngocthinh | 03b980e7cefa085c0a3314b6f3b25275b584ff99 | [
"Apache-2.0"
] | 1 | 2020-09-24T07:16:39.000Z | 2020-09-24T07:16:39.000Z | demo/optimization.py | Pricccvtqu4tt/ngocthinh | 03b980e7cefa085c0a3314b6f3b25275b584ff99 | [
"Apache-2.0"
] | null | null | null | demo/optimization.py | Pricccvtqu4tt/ngocthinh | 03b980e7cefa085c0a3314b6f3b25275b584ff99 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import re
import numpy as np
import paddle as P
import paddle.distributed.fleet as fleet
from propeller.paddle.train.hooks import RunHook
log = logging.getLogger(__name__)
from demo.utils import create_if_not_exists, get_warmup_and_linear_decay
def optimization(
loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_fp16=False, ):
"""do backword for static"""
def exclude_from_weight_decay(param):
name = param.rstrip('.master')
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
g_clip = P.nn.ClipGradByGlobalNorm(1.0)
lr_scheduler = P.optimizer.lr.LambdaDecay(
learning_rate,
get_warmup_and_linear_decay(num_train_steps, warmup_steps))
optimizer = P.optimizer.AdamW(
learning_rate=lr_scheduler,
weight_decay=weight_decay,
grad_clip=g_clip,
apply_decay_param_fun=exclude_from_weight_decay)
if use_fp16:
log.info('AMP activated')
if weight_decay > 0.:
raise ValueError(
'paddle amp will ignore `weight_decay`, see https://github.com/PaddlePaddle/Paddle/issues/29794'
)
#amp_list = P.fluid.contrib.mixed_precision.AutoMixedPrecisionLists(
# custom_white_list=['softmax', 'layer_norm', 'gelu'])
optimizer = P.fluid.contrib.mixed_precision.decorate(
optimizer, init_loss_scaling=3**15, use_dynamic_loss_scaling=True)
_, param_grads = optimizer.minimize(loss)
loss_scaling = P.static.default_main_program().global_block().var(
'loss_scaling_0')
else:
_, param_grads = optimizer.minimize(loss)
loss_scaling = None
class LRStepHook(RunHook):
def after_run(self, _, __):
lr_scheduler.step()
log.debug('lr step: %.5f' % lr_scheduler.get_lr())
return LRStepHook(), loss_scaling
| 33.255556 | 112 | 0.686602 | 157 | 0.052456 | 0 | 0 | 0 | 0 | 0 | 0 | 953 | 0.31841 |
92721f275bd80706a0af8e3045e6a861b877107e | 2,136 | py | Python | test/build_bfrange2.py | trueroad/pdf-fix-tuc | b5e682c45062560d443d778e7ef31b54d85b54da | [
"BSD-2-Clause"
] | 20 | 2021-01-23T13:59:36.000Z | 2022-03-28T01:48:22.000Z | test/build_bfrange2.py | trueroad/pdf-fix-tuc | b5e682c45062560d443d778e7ef31b54d85b54da | [
"BSD-2-Clause"
] | null | null | null | test/build_bfrange2.py | trueroad/pdf-fix-tuc | b5e682c45062560d443d778e7ef31b54d85b54da | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Fix ToUnicode CMap in PDF
# https://github.com/trueroad/pdf-fix-tuc
#
# build_bfrange2.py:
# Build bfrange2 PDF.
#
# Copyright (C) 2021 Masamichi Hosoda.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import re
import sys
def main ():
re_begin = re.compile (r"(\d+)\s+beginbfrange\r?\n?")
for line in sys.stdin.buffer:
try:
line = line.decode ("utf-8")
except UnicodeDecodeError:
sys.stdout.buffer.write (line)
continue
m = re_begin.match (line)
if m:
print ("{} beginbfrange".format (int (m.group (1)) + 5))
print ("""\
<0001> <0003> <2E81>
<0004> <0006> <2E99>
<0007> <0009> <2EF2>
<000A> <000C> <2EFF>
<000D> <000F> <2FD4>\
""")
continue
print (line, end = "", flush = True)
if __name__ == "__main__":
main ()
| 31.880597 | 77 | 0.695225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,637 | 0.766386 |
9272ad107af4225262b4e05e8e7c329cca07cdc6 | 4,391 | py | Python | fedot/api/api_utils/initial_assumptions.py | vkirilenko/FEDOT | d287d899e47fe5aebf52c12733892e103b75842b | [
"BSD-3-Clause"
] | 1 | 2021-11-09T10:24:38.000Z | 2021-11-09T10:24:38.000Z | fedot/api/api_utils/initial_assumptions.py | vkirilenko/FEDOT | d287d899e47fe5aebf52c12733892e103b75842b | [
"BSD-3-Clause"
] | null | null | null | fedot/api/api_utils/initial_assumptions.py | vkirilenko/FEDOT | d287d899e47fe5aebf52c12733892e103b75842b | [
"BSD-3-Clause"
] | null | null | null | from typing import List, Union
from fedot.core.data.data import data_has_categorical_features, InputData
from fedot.core.data.multi_modal import MultiModalData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode, Node
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.tasks import Task, TaskTypesEnum
NOT_FITTED_ERR_MSG = 'Model not fitted yet'
class ApiInitialAssumptionsHelper:
def get_initial_assumption(self,
data: Union[InputData, MultiModalData],
task: Task) -> Pipeline:
has_categorical_features = data_has_categorical_features(data)
if isinstance(data, MultiModalData):
node_final = self.create_multidata_pipeline(task, data, has_categorical_features)
elif isinstance(data, InputData):
node_final = self.create_unidata_pipeline(task, has_categorical_features)
else:
raise NotImplementedError(f"Don't handle {type(data)}")
init_pipeline = Pipeline(node_final)
return init_pipeline
def create_unidata_pipeline(self,
task: Task,
has_categorical_features: bool) -> Node:
node_imputation = PrimaryNode('simple_imputation')
if task.task_type == TaskTypesEnum.ts_forecasting:
node_lagged = SecondaryNode('lagged', [node_imputation])
node_final = SecondaryNode('ridge', [node_lagged])
else:
if has_categorical_features:
node_encoder = SecondaryNode('one_hot_encoding', [node_imputation])
node_preprocessing = SecondaryNode('scaling', [node_encoder])
else:
node_preprocessing = SecondaryNode('scaling', [node_imputation])
if task.task_type == TaskTypesEnum.classification:
node_final = SecondaryNode('xgboost', nodes_from=[node_preprocessing])
elif task.task_type == TaskTypesEnum.regression:
node_final = SecondaryNode('xgbreg', nodes_from=[node_preprocessing])
else:
raise NotImplementedError(f"Don't have initial pipeline for task type: {task.task_type}")
return node_final
def create_multidata_pipeline(self, task: Task, data: MultiModalData, has_categorical_features: bool) -> Node:
if task.task_type == TaskTypesEnum.ts_forecasting:
node_final = SecondaryNode('ridge', nodes_from=[])
for data_source_name, values in data.items():
if data_source_name.startswith('data_source_ts'):
node_primary = PrimaryNode(data_source_name)
node_imputation = SecondaryNode('simple_imputation', [node_primary])
node_lagged = SecondaryNode('lagged', [node_imputation])
node_last = SecondaryNode('ridge', [node_lagged])
node_final.nodes_from.append(node_last)
elif task.task_type == TaskTypesEnum.classification:
node_final = SecondaryNode('xgboost', nodes_from=[])
node_final.nodes_from = self.create_first_multimodal_nodes(data, has_categorical_features)
elif task.task_type == TaskTypesEnum.regression:
node_final = SecondaryNode('xgbreg', nodes_from=[])
node_final.nodes_from = self.create_first_multimodal_nodes(data, has_categorical_features)
else:
raise NotImplementedError(f"Don't have initial pipeline for task type: {task.task_type}")
return node_final
def create_first_multimodal_nodes(self, data: MultiModalData, has_categorical: bool) -> List[SecondaryNode]:
nodes_from = []
for data_source_name, values in data.items():
node_primary = PrimaryNode(data_source_name)
node_imputation = SecondaryNode('simple_imputation', [node_primary])
if data_source_name.startswith('data_source_table') and has_categorical:
node_encoder = SecondaryNode('one_hot_encoding', [node_imputation])
node_preprocessing = SecondaryNode('scaling', [node_encoder])
else:
node_preprocessing = SecondaryNode('scaling', [node_imputation])
node_last = SecondaryNode('ridge', [node_preprocessing])
nodes_from.append(node_last)
return nodes_from
| 49.337079 | 114 | 0.666818 | 4,000 | 0.910954 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.094739 |
9273889db8121468c67067e3c3728797e69262dd | 1,251 | py | Python | server/py/dummy.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | server/py/dummy.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | server/py/dummy.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | import numpy as np
import glob,os
import datetime
from pathlib import Path
from pymongo import MongoClient
from flask_mongoengine import MongoEngine
from bson.objectid import ObjectId
client = MongoClient(port=27017)
db=client.GetMeThrough;
binary =0;
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
file_names = glob.glob(home + "/known_people/*.jp*g")
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
known_encodings_file_path = home + "/data/known_encodings_file.csv"
people_file_path = home + "/data/people_file.csv"
known_encodings_file = Path(known_encodings_file_path)
date_format = "%Y-%m-%d %H:%M:%S.%f"
current_date = str(datetime.datetime.now())
attendance_system = db.attendance.find({"user": ObjectId("5ab221040784981645037c3a")})
res = [col.encode('utf8') if isinstance(col, unicode) else col for col in attendance_system]
for attendance_doc in res:
date_time = attendance_doc['date_time']
time1 = datetime.datetime.strptime(date_time.encode('utf8'), date_format)
time2 = datetime.datetime.strptime(str(datetime.datetime.now()), date_format)
diff = time2 - time1
minutes = (diff.seconds) / 60
print minutes
if(minutes >=30):
print ("hjhk")
| 37.909091 | 92 | 0.723421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.142286 |
927394c4cd452ef6ecd8dbe3a44abe8b81fb299c | 1,802 | py | Python | TingCheChangSystem/ParkingLot.py | scottyyf/oodoop | d254b8a355c985c7672381495ab8a44d7d55e153 | [
"MIT"
] | 2 | 2021-12-14T10:46:14.000Z | 2021-12-14T10:47:00.000Z | TingCheChangSystem/ParkingLot.py | scottyyf/oodoop | d254b8a355c985c7672381495ab8a44d7d55e153 | [
"MIT"
] | 9 | 2021-12-06T06:16:15.000Z | 2021-12-20T06:39:50.000Z | TingCheChangSystem/ParkingLot.py | scottyyf/oodoop | d254b8a355c985c7672381495ab8a44d7d55e153 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: ParkingLot.py
Author: Scott Yang(Scott)
Email: yangyingfa@skybility.com
Copyright: Copyright (c) 2021, Skybility Software Co.,Ltd. All rights reserved.
Description:
"""
from datetime import datetime
from TException import SpotOccupiedError, NoSuitableSpotsError
from Ticket import Ticket
from Utils import CalculateTime
class ParkingLot:
def __init__(self, hourly_rate):
self._levels = []
self._hourly_rate = hourly_rate
def get_available_count(self):
count = 0
for level in self._levels:
count += level.get_available_count()
return count
def park_vehicle(self, vehicle):
try:
spots = self._find_spots_for_vehicle(vehicle)
except NoSuitableSpotsError:
# go to another Lot
raise
for spot in spots:
try:
spot.take_spot()
except SpotOccupiedError:
# need refind spots
raise
ticket = Ticket(spots, vehicle)
return ticket
def _find_spots_for_vehicle(self, vehicle):
# ret = []
for level in self._levels:
spots = level.find_spots_for_vehicle(vehicle)
if spots:
return spots
raise NoSuitableSpotsError('can not find spots for vehicle')
def clear_spot(self, ticket):
spots = ticket.spots()
for spot in spots:
spot.leave_spot()
def calculate_price(self, ticket):
end_time = datetime.now()
start_time = ticket.start_time()
cal_inst = CalculateTime(start_time, end_time)
if cal_inst.in_free_time():
return 0
else:
return self._hourly_rate * cal_inst.get_hours()
| 25.380282 | 79 | 0.614317 | 1,421 | 0.788568 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.167592 |
92739e4159a529434e8f575d3386d3a074e34ccc | 3,998 | py | Python | hnk05_hupai.py | jcq15/mahjong | 4220a7aecdb22361052c23c1829844e1b3cb1c83 | [
"MIT"
] | 1 | 2021-07-23T02:17:42.000Z | 2021-07-23T02:17:42.000Z | hnk05_hupai.py | jcq15/mahjong | 4220a7aecdb22361052c23c1829844e1b3cb1c83 | [
"MIT"
] | null | null | null | hnk05_hupai.py | jcq15/mahjong | 4220a7aecdb22361052c23c1829844e1b3cb1c83 | [
"MIT"
] | null | null | null | import operator
from global_data import *
class HupaiCheck:
# 对一副具有14张牌的手牌的胡牌与否、胡牌拆分形式进行判断,拥有两个输入接口:self.tehai和self.numtehai,分别表示用数字字母表示的手牌和
# 转化成了方便处理的数字形式的手牌,33332拆解形式是递归,返回存储在了self.hupaiway中,是一个最多三层嵌套的列表。如果没胡是空列表,和了的话
# 最大列表的第一层列表是胡牌形式,里面的每一个列表是拆分成33332中的一份、或者七对子的一对,或者国士无双的所有内容。
# 下一个版本的改进:输入不一定要输入28个字符,即2m3m4m应该被允许输入成234m,支持东南西北白发中输入
# 突然发现的问题:没做吃碰杠,计划改成负数来
def __init__(self, *, tehai='', numtehai=[]):
self.tehai = tehai # 手牌,用mspz表示的
self.numtehai = numtehai # 手牌,用数字列表记录的东西
# 胡牌拆分完毕的嵌套列表,如果没胡是空列表,和了的话最大列表的第一层列表是胡牌形式,里面的每一个列表是拆分成
# 33332中的一份、或者七对子的一对,或者国士无双的所有内容
# 国士也是三层列表,这样格式统一,能省去一些田事
self.hupaiway = []
def hupai32_deal(self):
hupaidevide_unranked = [] # 胡牌分割,没有进行排序之前的部分
hupaidevide = [] # 胡牌处理过程的中间变量
def hupai(tehai, sorted_):
if len(tehai) == 2 and tehai[0] == tehai[1]:
hupaidevide_unranked.append(sorted_ + [tehai])
else:
for i in range(0, len(tehai)):
if tehai[i] + 1 in tehai and tehai[i] + 2 in tehai:
tehainext = tehai + []
i1 = tehainext.pop(i)
a = tehainext.index(tehai[i] + 1)
a1 = tehainext.pop(a)
b = tehainext.index(tehai[i] + 2)
b1 = tehainext.pop(b)
hupai(tehainext, sorted_ + [[i1, a1, b1]])
for i in range(0, len(tehai)):
if i + 2 < len(tehai) and tehai[i] == tehai[i + 1] and tehai[i] == tehai[i + 2]:
hupai(tehai[:i] + tehai[i + 3:], sorted_ + [tehai[i:i + 3]])
hupai(self.tehai, [])
for h in hupaidevide_unranked:
h.sort(key=operator.itemgetter(0), reverse=False)
for i in hupaidevide_unranked:
if i not in hupaidevide:
hupaidevide.append(i)
for i in hupaidevide:
for j in range(len(i)):
if len(i[j]) == 2:
i.append(i[j])
i.remove(i[j])
return hupaidevide
@staticmethod
def tehaitonumtehai(tehai, num=14):
numtehai = []
for i in range(1, num + 1):
numtehai.append(paitonum[tehai[:2]])
tehai = tehai[2:]
numtehai.sort()
return numtehai
# 如果不是七对,返回空列表
def qidui(self):
#qiduisymbol = True
ans = []
for i in range(0, 7):
if self.tehai[i * 2] != self.tehai[i * 2 + 1] or self.tehai[i * 2] == self.tehai[i * 2 - 1]:
pass
# qiduisymbol = False
else:
ans.append([tehai[i * 2], tehai[i * 2 + 1]])
#if qiduisymbol:
# return ans
return ans
def gsws(self):
gswslist = [1, 9, 11, 19, 21, 29, 31, 32, 34, 35, 37, 38, 40]
symbol = True
for i in gswslist:
if i not in self.tehai:
symbol = False
if symbol:
return [self.tehai]
else:
return []
def hupai_dealall(self):
self.hupaiway = self.hupai32_deal() + self.qidui() + self.gsws()
return self.hupaiway
def hupaiway_usersee(self):
if self.hupaiway != []:
for i in self.hupaiway:
print('胡牌方式有:')
for j in i:
for k in j:
print(numtopai[k], end='')
print(' ', end='')
print('\n')
if __name__ == '__main__':
pai = '1m1m1m2m3m4m5m6m7m8m9m9m9m1z'
hc = HupaiCheck(tehai=pai)
hc.hupai_dealall()
hc.hupaiway_usersee()
# pai.numtehai = [1,1,2,2,2,3,3,3,4,4,5,5,6,6]
#print(pai.hupai32_deal(pai.numtehai))
#print(pai.hupai_dealall(pai.numtehai))
#print(pai.hupaiway_usersee(pai.hupai_dealall(pai.numtehai)))
| 32.504065 | 104 | 0.512256 | 4,384 | 0.915622 | 0 | 0 | 238 | 0.049708 | 0 | 0 | 1,600 | 0.334169 |
927613f79cb0fe30b6c12b5c873b5fabde329804 | 2,193 | py | Python | edgelm/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | 1 | 2021-11-07T00:30:05.000Z | 2021-11-07T00:30:05.000Z | edgelm/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | edgelm/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | import kaldi_io
import numpy as np
import os
def get_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("w2v_dir", help="wav2vec feature and text directory")
parser.add_argument("tar_root", help="output data directory in kaldi's format")
parser.add_argument("split", help="name of the subset")
parser.add_argument("--label", default="", help="if specified, copy labels too")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
tar_dir = os.path.join(args.tar_root, args.split)
os.makedirs(tar_dir, exist_ok=True)
lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths")
with open(lengths_path) as f:
lengths = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengths[:-1]).tolist()
feats = np.load(
os.path.join(args.w2v_dir, f"{args.split}.npy"),
mmap_mode="r"
)
assert feats.shape[0] == sum(lengths), \
f"lengths mismatch {feats.shape[0]} != {sum(lengths)}"
ark_path = os.path.join(tar_dir, "feats.ark")
scp_path = os.path.join(tar_dir, "feats.scp")
wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}"
with kaldi_io.open_or_fd(wspec, "wb") as f:
for idx, (offset, length) in enumerate(zip(offsets, lengths)):
feat = feats[offset:offset+length]
kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}")
u2s_path = os.path.join(tar_dir, "utt2spk")
s2u_path = os.path.join(tar_dir, "spk2utt")
with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u:
for idx in range(len(lengths)):
f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n")
f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n")
if bool(args.label):
lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}")
txt_path = os.path.join(tar_dir, "text")
with open(lab_path) as f_lab, open(txt_path, "w") as f_txt:
for idx, line in enumerate(f_lab):
f_txt.write(f"utt{idx:010d} {line}")
if __name__ == "__main__":
main()
| 38.473684 | 85 | 0.616963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.243958 |
927646be7e0eba265112163bf55301fe33919254 | 1,465 | py | Python | tests/tests_earthquake.py | domenicosolazzo/Earthquake | 4a9940c0f10f686bafb27521b1d9ba83a2930968 | [
"MIT"
] | null | null | null | tests/tests_earthquake.py | domenicosolazzo/Earthquake | 4a9940c0f10f686bafb27521b1d9ba83a2930968 | [
"MIT"
] | null | null | null | tests/tests_earthquake.py | domenicosolazzo/Earthquake | 4a9940c0f10f686bafb27521b1d9ba83a2930968 | [
"MIT"
] | null | null | null | import unittest
from lib.earthquake import Earthquake
class EarthquakeTestCase(unittest.TestCase):
def setUp(self):
self.earthquake = Earthquake(service="fake")
def test_refresh_exists(self):
self.assertTrue(callable(getattr(self.earthquake,"refresh")),"True")
def test_refresh_does_take_at_most_one_parameter(self):
self.assertRaises(TypeError, getattr(self.earthquake, "refresh"), *("json",2),**{})
def test_refresh_returns_a_list(self):
self.assertTrue(type(self.earthquake.refresh("list")) == type([]),"Refresh returns a list")
def test_earthquake_has_an_items_attribute(self):
self.assertFalse(callable(getattr(self.earthquake,"items")), "The object has an items attribute.")
def test_items_attribute_is_a_list(self):
self.assertTrue(type(self.earthquake.items) == type([]), "The items attribute is a list")
def test_earthquake_contains_a_manager_attribute(self):
self.assertFalse(callable(getattr(self.earthquake,"_Earthquake__manager")),"Earthquake has a manager attribute")
def test_earthquake_accepts_zero_or_one_paramenter_in_input(self):
self.assertRaises(AssertionError, self.earthquake, **{"service":"fake", "notvalid":"notvalid"})
self.assertRaises(AssertionError, self.earthquake, *["fake"],**{"service":"lalala"})
self.assertFalse(callable(getattr(self.earthquake,"_Earthquake__manager")),"Earthquake has a manager attribute")
| 63.695652 | 120 | 0.736519 | 1,410 | 0.962457 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.214334 |
9277085b50933d495b6b61b89bb5d25f3f2a75a8 | 4,704 | py | Python | edmunds/log/logmanager.py | LowieHuyghe/edmunds-python | 236d087746cb8802a8854b2706b8d3ff009e9209 | [
"Apache-2.0"
] | 4 | 2017-09-07T13:39:50.000Z | 2018-05-31T16:14:50.000Z | edmunds/log/logmanager.py | LowieHuyghe/edmunds-python | 236d087746cb8802a8854b2706b8d3ff009e9209 | [
"Apache-2.0"
] | 103 | 2017-03-19T15:58:21.000Z | 2018-07-11T20:36:17.000Z | edmunds/log/logmanager.py | LowieHuyghe/edmunds-python | 236d087746cb8802a8854b2706b8d3ff009e9209 | [
"Apache-2.0"
] | 2 | 2017-10-14T15:20:11.000Z | 2018-04-20T09:55:44.000Z |
from edmunds.foundation.patterns.manager import Manager
import os
class LogManager(Manager):
"""
Log Manager
"""
def __init__(self, app):
"""
Initiate the manager
:param app: The application
:type app: Application
"""
super(LogManager, self).__init__(app, app.config('app.log.instances', []))
self._log_path = os.path.join(os.sep, 'logs')
def _create_file(self, config):
"""
Create File instance
:param config: The config
:type config: dict
:return: File instance
:rtype: File
"""
log_path = self._log_path
if 'directory' in config:
directory = config['directory']
# Check if absolute or relative path
if not directory.startswith(os.sep):
log_path = os.path.join(log_path, directory)
else:
log_path = directory
filename = '%s.log' % 'app' # self._app.name
options = {}
if 'prefix' in config:
options['prefix'] = config['prefix']
if 'max_bytes' in config:
options['max_bytes'] = config['max_bytes']
if 'backup_count' in config:
options['backup_count'] = config['backup_count']
if 'level' in config:
options['level'] = config['level']
if 'format' in config:
options['format'] = config['format']
from edmunds.log.drivers.file import File
return File(self._app, log_path, filename, **options)
def _create_timed_file(self, config):
"""
Create TimedFile instance
:param config: The config
:type config: dict
:return: TimedFile instance
:rtype: TimedFile
"""
log_path = self._log_path
if 'directory' in config:
directory = config['directory']
# Check if absolute or relative path
if not directory.startswith(os.sep):
log_path = os.path.join(log_path, directory)
else:
log_path = directory
filename = '%s.log' % 'app' # self._app.name
options = {}
if 'prefix' in config:
options['prefix'] = config['prefix']
if 'when' in config:
options['when'] = config['when']
if 'interval' in config:
options['interval'] = config['interval']
if 'backup_count' in config:
options['backup_count'] = config['backup_count']
if 'level' in config:
options['level'] = config['level']
if 'format' in config:
options['format'] = config['format']
from edmunds.log.drivers.timedfile import TimedFile
return TimedFile(self._app, log_path, filename, **options)
def _create_sys_log(self, config):
"""
Create SysLog instance
:param config: The config
:type config: dict
:return: SysLog instance
:rtype: SysLog
"""
options = {}
if 'address' in config:
options['address'] = config['address']
if 'facility' in config:
options['facility'] = config['facility']
if 'socktype' in config:
options['socktype'] = config['socktype']
if 'level' in config:
options['level'] = config['level']
if 'format' in config:
options['format'] = config['format']
from edmunds.log.drivers.syslog import SysLog
return SysLog(self._app, **options)
def _create_stream(self, config):
"""
Create Stream instance
:param config: The config
:type config: dict
:return: Stream instance
:rtype: Stream
"""
options = {}
if 'stream' in config:
options['stream'] = config['stream']
if 'level' in config:
options['level'] = config['level']
if 'format' in config:
options['format'] = config['format']
from edmunds.log.drivers.stream import Stream
return Stream(self._app, **options)
def _create_google_app_engine(self, config):
"""
Create GoogleAppEngine instance
:param config: The config
:type config: dict
:return: GoogleAppEngine instance
:rtype: GoogleAppEngine
"""
options = {}
if 'level' in config:
options['level'] = config['level']
from edmunds.log.drivers.googleappengine import GoogleAppEngine
return GoogleAppEngine(self._app, **options)
| 29.772152 | 82 | 0.54358 | 4,634 | 0.985119 | 0 | 0 | 0 | 0 | 0 | 0 | 1,806 | 0.383929 |
9278e544f626a49aa388ef6db7265b805480641b | 2,220 | py | Python | mottak-arkiv-service/tests/routers/dto/test_Arkivuttrekk.py | arkivverket/mottak | 241dc86b6a012f5ac07dd5242d97df3fadfb69d0 | [
"Apache-2.0"
] | 4 | 2021-03-05T15:39:24.000Z | 2021-09-15T06:11:45.000Z | mottak-arkiv-service/tests/routers/dto/test_Arkivuttrekk.py | arkivverket/mottak | 241dc86b6a012f5ac07dd5242d97df3fadfb69d0 | [
"Apache-2.0"
] | 631 | 2020-04-27T10:39:18.000Z | 2022-03-31T14:51:38.000Z | mottak-arkiv-service/tests/routers/dto/test_Arkivuttrekk.py | arkivverket/mottak | 241dc86b6a012f5ac07dd5242d97df3fadfb69d0 | [
"Apache-2.0"
] | 3 | 2020-02-20T15:48:03.000Z | 2021-12-16T22:50:40.000Z | from datetime import date
from uuid import UUID
import pytest
from app.domain.models.Arkivuttrekk import Arkivuttrekk, ArkivuttrekkStatus, ArkivuttrekkType
from app.domain.models.Depotinstitusjoner import DepotinstitusjonerEnum
from app.routers.dto.Arkivuttrekk import ArkivuttrekkCreate as Arkivuttrekk_dto
@pytest.fixture
def _arkivuttrekk_domain() -> Arkivuttrekk:
return Arkivuttrekk(
id_=None,
obj_id=UUID("df53d1d8-39bf-4fea-a741-58d472664ce2"),
status=ArkivuttrekkStatus.OPPRETTET,
type_=ArkivuttrekkType.NOARK5,
tittel="tittel",
sjekksum_sha256="2afeec307b0573339b3292e27e7971b5b040a5d7e8f7432339cae2fcd0eb936a",
avgiver_navn="Lewis Caroll",
avgiver_epost="lewis@caroll.net",
koordinator_epost="kornat@arkivverket.no",
metadatafil_id=1,
arkiv_startdato=date.fromisoformat("1864-04-10"),
arkiv_sluttdato=date.fromisoformat("1900-05-12"),
storrelse=45620,
avtalenummer="01/12345",
depotinstitusjon=DepotinstitusjonerEnum("ARKIVVERKET"),
opprettet=None,
endret=None
)
@pytest.fixture
def _arkivuttrekk_dto() -> Arkivuttrekk_dto:
return Arkivuttrekk_dto(
obj_id=UUID("df53d1d8-39bf-4fea-a741-58d472664ce2"),
status=ArkivuttrekkStatus.OPPRETTET,
type=ArkivuttrekkType.NOARK5,
tittel="tittel",
sjekksum_sha256="2afeec307b0573339b3292e27e7971b5b040a5d7e8f7432339cae2fcd0eb936a",
avgiver_navn="Lewis Caroll",
avgiver_epost="lewis@caroll.net",
koordinator_epost="kornat@arkivverket.no",
metadatafil_id=1,
arkiv_startdato=date.fromisoformat("1864-04-10"),
arkiv_sluttdato=date.fromisoformat("1900-05-12"),
storrelse=45620,
avtalenummer="01/12345",
depotinstitusjon="ARKIVVERKET",
)
def test_to_domain(_arkivuttrekk_dto, _arkivuttrekk_domain):
"""
GIVEN an object of type ArkivuttrekkBase
WHEN calling the internal method to_domain()
THEN check that returned domain object Arkivuttrekk is correct
"""
expected = _arkivuttrekk_domain
actual = _arkivuttrekk_dto.to_domain()
assert vars(actual) == vars(expected)
| 34.6875 | 93 | 0.718468 | 0 | 0 | 0 | 0 | 1,534 | 0.690991 | 0 | 0 | 608 | 0.273874 |
92799feccf4a1361a1ddbaeca85667a04610ecb8 | 1,007 | py | Python | anecAPI/anecAPI.py | suborofu/anecAPI | ddf54e63254fc4b00748e673d8b48aa59abd6526 | [
"MIT"
] | null | null | null | anecAPI/anecAPI.py | suborofu/anecAPI | ddf54e63254fc4b00748e673d8b48aa59abd6526 | [
"MIT"
] | null | null | null | anecAPI/anecAPI.py | suborofu/anecAPI | ddf54e63254fc4b00748e673d8b48aa59abd6526 | [
"MIT"
] | null | null | null | from modern_jokes import modern_jokes
from soviet_jokes import soviet_jokes
import random
import argparse
def soviet_joke():
return random.choice(soviet_jokes)
def modern_joke():
return random.choice(modern_jokes)
def random_joke():
return random.choice(soviet_jokes + modern_jokes)
# TODO: Auto-generated jokes
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=True,
description='Displays a funny (or not) USSR/Russian joke (also called anecdote).')
parser.add_argument("-m", "--modern", action="store_true", help="display a modern Russian joke")
parser.add_argument("-s", "--soviet", action="store_true", help="display an old USSR joke")
parser.add_argument("-a", "--any", action="store_true", help="display a USSR/Russian joke (default)")
args = parser.parse_args()
if args.modern:
print(modern_joke())
elif args.soviet:
print(soviet_joke())
else:
print(random_joke())
| 27.972222 | 119 | 0.681231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.276068 |
9279acc17663b0db1a6150a3819e8cd2c5b2264d | 3,141 | py | Python | lexicon/encode.py | ishine/neural-lexicon-reader | 66f4c464a7a442812e79458759ac913ce51d1c6e | [
"MIT"
] | 4 | 2021-10-31T08:29:21.000Z | 2021-12-29T08:12:56.000Z | lexicon/encode.py | shaun95/neural-lexicon-reader | 66f4c464a7a442812e79458759ac913ce51d1c6e | [
"MIT"
] | 1 | 2021-12-12T11:22:20.000Z | 2021-12-12T11:53:47.000Z | lexicon/encode.py | mutiann/neural-lexicon-reader | 66f4c464a7a442812e79458759ac913ce51d1c6e | [
"MIT"
] | null | null | null | import transformers
from transformers import XLMRobertaTokenizerFast, XLMRobertaModel
import torch
import json
import pickle
import tqdm
import os
from matplotlib import pyplot as plt
lengths = []
tokenizer = XLMRobertaTokenizerFast.from_pretrained("xlm-roberta-large")
model = XLMRobertaModel.from_pretrained("xlm-roberta-large")
max_token = 256
def shrink_text(text, inputs):
print(text)
char_p = []
for i in range(len(text)):
if text[i] == '●' and i > 0:
char_p.append(i)
char_p.append(len(text) - 1)
reduction = inputs.data['input_ids'].shape[1] - max_token + 1
offset = inputs.data['offset_mapping'][0].numpy().tolist()[1:-1]
tok_p = []
seg_len = []
for cp in char_p:
for i, (l, r) in enumerate(offset):
if l <= cp < r:
if len(tok_p) == 0:
seg_len.append(i)
else:
seg_len.append(i - tok_p[-1])
tok_p.append(i)
break
remove_toks = [0 for _ in seg_len]
while reduction > 0:
maxp = maxv = 0
for i in range(len(seg_len)):
if seg_len[i] > maxv:
maxv = seg_len[i]
maxp = i
seg_len[maxp] -= 1
remove_toks[maxp] += 1
reduction -= 1
for i, n_toks in reversed(list(enumerate(remove_toks))):
if n_toks > 0:
l = offset[tok_p[i] - n_toks][1]
r = offset[tok_p[i]][0]
if tok_p[i] == len(offset) - 1:
r = len(text)
text = text[:l] + '…' + text[r:]
print(text)
return text
def get_encodings(text):
with torch.no_grad():
inputs = tokenizer(text, return_tensors="pt", return_offsets_mapping=True)
if inputs.data['input_ids'].shape[1] >= max_token:
text = shrink_text(text, inputs)
inputs = tokenizer(text, return_tensors="pt", return_offsets_mapping=True)
lengths.append(inputs.data['input_ids'].shape[1])
del inputs['offset_mapping']
outputs = model(**inputs, output_hidden_states=True, return_dict=True)
return {'input': model.get_input_embeddings()(inputs['input_ids']).cpu().numpy(),
'output': outputs['hidden_states'][-1].cpu().numpy(),
'tokens': ['<sos>'] + tokenizer.tokenize(text) + ['<eos>']}
def get_all_encoding(lexicon, out_path, word_id):
os.makedirs(out_path, exist_ok=True)
# skipped = {}
lengths = []
for key, text in tqdm.tqdm(lexicon):
length = len(tokenizer.tokenize(text))
lengths.append(length)
key_id = word_id[key]
if os.path.exists(os.path.join(out_path, str(key_id) + '.pickle')):
continue
text = text.replace("*", '●')
encoding = get_encodings(text)
pickle.dump(encoding, open(os.path.join(out_path, str(key_id) + '.pickle'), 'wb'))
# print(json.dumps(skipped, indent=1, ensure_ascii=False))
print("%d %d %d" % (len([t for t in lengths if t >= 200]), len([t for t in lengths if t >= 300]), len([t for t in lengths if t >= 400])))
plt.hist(lengths)
plt.show()
| 36.103448 | 141 | 0.58007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.094058 |
927d3239663b283ae0d534d00b7151aeae2d5071 | 1,052 | py | Python | 05_reseaux_web/2_le_web/demo_web_cgi/cgi-bin/cookie.py | efloti/cours-nsi-premiere | 5b05bc81e5f8d7df47bf5785068b4bf4d1e357bb | [
"CC0-1.0"
] | null | null | null | 05_reseaux_web/2_le_web/demo_web_cgi/cgi-bin/cookie.py | efloti/cours-nsi-premiere | 5b05bc81e5f8d7df47bf5785068b4bf4d1e357bb | [
"CC0-1.0"
] | null | null | null | 05_reseaux_web/2_le_web/demo_web_cgi/cgi-bin/cookie.py | efloti/cours-nsi-premiere | 5b05bc81e5f8d7df47bf5785068b4bf4d1e357bb | [
"CC0-1.0"
] | null | null | null | #!/opt/tljh/user/bin/python
import cgitb; cgitb.enable() # pour débogage
import os
entete_http = "Content-type: text/html; charset=utf-8\n"
# gabarit html ... deux zones d'insertions
html_tpl = """
<!DOCTYPE html>
<head>
<title>cookie</title>
</head>
<body>
<a href="/">retour...</a>
<h2>{message}</h2>
{suppr}
</body>
</html>
"""
# négliger cela pour l'instant
lien_suppression = """
<a href="/" onclick="document.cookie='test=; Max-Age=-99999999;'">oublie moi ...</a>
"""
# récupérons le cookie éventuel
cookie = os.environ["HTTP_COOKIE"]
# l'a-t-on déjà enregistré ?
if "test" in cookie: # oui
message = "Tu es déjà venue par ici toi..."
suppr = lien_suppression
else: # non
# demandons au navigateur d'enregistrer un cookie
entete_http += "Set-Cookie: test=ok\n"
message = "Première visite sur cette page?"
suppr = ""
# nous sommes prêt pour produire la page finale
html = html_tpl.format(
message=message,
suppr=suppr,
)
# envoie de la réponse
print(entete_http)
print(html) | 19.849057 | 84 | 0.651141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 710 | 0.666667 |
927e320523e04b0b2afed5168444a9bde76b3d60 | 2,053 | py | Python | globe_observer_cli.py | fpaludi/GlobeObserver | d95155142304a305e9ec73276139ddcd8dd31c72 | [
"MIT"
] | null | null | null | globe_observer_cli.py | fpaludi/GlobeObserver | d95155142304a305e9ec73276139ddcd8dd31c72 | [
"MIT"
] | null | null | null | globe_observer_cli.py | fpaludi/GlobeObserver | d95155142304a305e9ec73276139ddcd8dd31c72 | [
"MIT"
] | null | null | null | from time import time
from typing import List, Optional
from pathlib import Path
from datetime import datetime, timedelta
import typer
import numpy as np
from skimage import exposure
import rasterio as rio
from rasterio import plot
from globe_observer.gee import SatelliteFactory, BaseGeeService
from globe_observer.gdrive_service import GDriveFactory
app = typer.Typer()
@app.command()
def available_satellites() -> List[str]:
return SatelliteFactory.list_satellites()
@app.command()
def download_images(
satellite_name: str,
polygon: Path,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
bands: Optional[str] = "rgb"
):
if not polygon.exists():
raise FileExistsError(
f"File {polygon} doesn't exists"
)
if not end_date:
end_date = datetime.now()
if not start_date:
start_date = end_date - timedelta(weeks=4)
gee_service = BaseGeeService(
satellite_name=satellite_name,
polygon_file=polygon,
drive_service=GDriveFactory.build()
)
images_collection = gee_service.get_image_collection(
start_date=start_date,
end_date=end_date,
)
gee_service.download_to_local(
images_collection,
"results/",
)
@app.command()
def show_rgb_image(
file: Path,
):
if not file.exists():
raise FileExistsError(
f"File {file} doesn't exists"
)
with rio.open(file) as raster:
red = min_max_norm(raster.read(1))
green = min_max_norm(raster.read(2))
blue = min_max_norm(raster.read(3))
img = np.nan_to_num(np.array([red, green, blue]), -1)
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
plot.show(img_rescale)
def min_max_norm(image: np.ndarray):
min_value = np.nanmin(image)
max_value = np.nanmax(image)
result = (image - min_value) / (max_value - min_value)
return result
if __name__ == "__main__":
app()
| 25.6625 | 73 | 0.669264 | 0 | 0 | 0 | 0 | 1,451 | 0.706771 | 0 | 0 | 86 | 0.04189 |
927e356264be10470f903d4aeb2be8373a316cd6 | 61 | py | Python | up/utils/model/optim/__init__.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
] | 196 | 2021-10-30T05:15:36.000Z | 2022-03-30T18:43:40.000Z | up/utils/model/optim/__init__.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
] | 12 | 2021-10-30T11:33:28.000Z | 2022-03-31T14:22:58.000Z | up/utils/model/optim/__init__.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
] | 23 | 2021-11-01T07:26:17.000Z | 2022-03-27T05:55:37.000Z | from .lars import LARS # noqa
from .lamb import LAMB # noqa | 30.5 | 30 | 0.721311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.196721 |
927f45b9dbdb80eca73f5bcd7ad5255da6d02422 | 10,487 | py | Python | simrd/simrd/parse/graph.py | uwsampl/dtr-prototype | eff53cc4804cc7d6246a6e5086861ce2b846f62b | [
"Linux-OpenIB"
] | 90 | 2020-06-18T05:32:06.000Z | 2022-03-28T13:05:17.000Z | simrd/simrd/parse/graph.py | merrymercy/dtr-prototype | bf40e182453a7d8d23581ea68f32a9d7d2037d62 | [
"Linux-OpenIB"
] | 5 | 2020-07-02T02:25:16.000Z | 2022-03-24T05:50:30.000Z | simrd/simrd/parse/graph.py | uwsampl/dtr-prototype | eff53cc4804cc7d6246a6e5086861ce2b846f62b | [
"Linux-OpenIB"
] | 13 | 2020-06-27T07:01:54.000Z | 2022-01-18T07:31:01.000Z | import attr
from attr import attrib, s
from typing import Tuple, List, Optional, Callable, Mapping, Union, Set
from collections import defaultdict
from ..tensor import Operator
@attr.s(auto_attribs=True)
class GOp:
cost : float
size : Tuple[int]
alias : Tuple[int]
args : Tuple['GTensor']
result : Tuple['GTensor']
name : str
meta : dict
def __attrs_post_init__(self):
assert len(self.size) == len(self.alias) == len(self.result)
for i in range(len(self.size)):
assert self.alias[i] == -1 or self.size[i] == 0
def is_aliasing(self) -> bool:
return any([a >= 0 for a in self.alias])
def all_aliasing(self) -> bool:
return all([a >= 0 for a in self.alias])
def is_tuple(self) -> bool:
return len(self.result) > 1
def __str__(self): return self.name
@staticmethod
def make(g : 'Graph',
args : Tuple['GTensor'],
cost : float,
size : Tuple[int],
alias : Tuple[int],
name : str,
res_names : Tuple[str],
meta : dict,
make_uname : bool = True) -> ('GOp', Tuple['GTensor']):
assert len(size) == len(alias) == len(res_names)
uname = '{}/{}'.format(name, g._next_id()) if make_uname else name
result = tuple([GTensor(None, i, res_names[i], None) for i in range(len(res_names))])
op = GOp(cost, size, alias, args, result, uname, meta)
for r in result:
r.op = op
r.storage_size = r.size() if not r.alias() else r.alias().storage_size
assert r.storage_size is not None
g.add_op(op)
return op, result
GOp.CONST_NAME = 'constant'
@attr.s(auto_attribs=True)
class GTensor:
op : 'GOp'
index : int
name : str
storage_size : int
meta : dict = attrib(factory=dict)
def size(self) -> int:
return self.op.size[self.index]
def alias(self) -> Optional['GTensor']:
a = self.op.alias[self.index]
return self.op.args[a] if a >= 0 else None
def __str__(self): return self.name
@attr.s(auto_attribs=True)
class GCompute:
op : 'GOp'
def __str__(self):
return '({},)=Compute({})'.format(
','.join([r.name for r in self.op.result]),
self.op.name
)
@attr.s(auto_attribs=True)
class GGet:
tensor : 'GTensor'
pin : bool
def __str__(self):
op = 'Pin' if self.pin else 'Get'
return '{}({})'.format(op, self.tensor.name)
@attr.s(auto_attribs=True)
class GRelease:
tensor : 'GTensor'
def __str__(self):
return 'Release({})'.format(self.tensor.name)
class Graph:
def __init__(self):
self._id : int = 0
self.schedule : List[Union['GCompute', 'GGet', 'GRelease']] = []
self.ops : Mapping[str, 'GOp'] = {}
self.fwd_ops : Mapping[str, 'GOp'] = {}
self.bwd_ops : Mapping[str, 'GOp'] = {}
self.tensors : Mapping[str, 'GTensor'] = {}
self.op_children : Mapping[str, Set[str]] = defaultdict(set)
self.op_parents : Mapping[str, Set[str]] = defaultdict(set)
self.meta = {
'compute': 0
}
def _next_id(self) -> int:
i = self._id
self._id += 1
return i
def add_op(self, op : 'GOp') -> None:
assert op.name not in self.ops
self.ops[op.name] = op
if op.meta.get('bwd', False):
self.bwd_ops[op.name] = op
else:
self.fwd_ops[op.name] = op
for ti in op.args:
assert ti.name in self.tensors
op_parents = set([ti.op.name for ti in op.args])
for ps in op_parents:
self.op_children[ps].add(op.name)
self.op_parents[op.name] = op_parents
for to in op.result:
assert to.name not in self.tensors
self.tensors[to.name] = to
self.meta['compute'] += op.cost
# returns op names, not ops
def ops_topological(self) -> List[str]:
visited = {v : False for v in self.ops}
stack = []
def visit(v):
visited[v] = True
for u in self.op_children[v]:
if not visited[u]:
visit(u)
stack.insert(0, v)
for v in self.ops:
if not visited[v]:
visit(v)
return stack
def get_closure(self) -> Callable[['Runtime'], None]:
def f(rt):
tensor_map = {}
for cmd in self.schedule:
if isinstance(cmd, GCompute):
# TODO: add a rematerialize cmd? this assumes once-compute only
for x in cmd.op.args:
assert x.name in tensor_map
args = [tensor_map[x.name] for x in cmd.op.args]
rt_op = Operator(
cmd.op.cost,
cmd.op.size,
cmd.op.alias,
cmd.op.name
)
res = rt.compute(args, rt_op, names=tuple([o.name for o in cmd.op.result]))
for i, r in enumerate(res):
assert cmd.op.result[i].name not in tensor_map
tensor_map[cmd.op.result[i].name] = r
elif isinstance(cmd, GGet):
assert cmd.tensor.name in tensor_map
t = tensor_map[cmd.tensor.name]
if cmd.pin:
if not t.defined:
rt.rematerialize(t)
assert t.defined
rt.pin(t)
else:
rt.get(t)
elif isinstance(cmd, GRelease):
assert cmd.tensor.name in tensor_map
rt.release(tensor_map[cmd.tensor.name])
return f
def rewrite_collapse_aliases(g : 'Graph') -> 'Graph':
g_r = Graph()
g_r.meta = g.meta.copy()
g_r.meta['compute'] = 0
ops_topological = g.ops_topological()
# maps old -> new
tensor_map : Mapping[str, 'GTensor'] = {}
op_map : Mapping[str, 'GOp'] = {}
for op_name in ops_topological:
op = g.ops[op_name]
if op.is_aliasing():
if not op.all_aliasing():
raise RuntimeError(
'cannot collapse aliases, {} is not all aliasing'
.format(op)
)
for r in op.result:
tensor_map[r.name] = tensor_map[r.alias().name]
else:
# keep operator
args = [tensor_map[x.name] for x in op.args]
op_new, res = GOp.make(
g_r, args, op.cost, op.size, op.alias,
op.name, tuple([o.name for o in op.result]), op.meta,
make_uname=False
)
for r in res:
tensor_map[r.name] = r
op_map[op.name] = op_new
# rewrite schedule
for cmd in g.schedule:
if isinstance(cmd, GCompute):
if cmd.op.name in op_map:
g_r.schedule.append(GCompute(op_map[cmd.op.name]))
else:
# aliasing op; increase refcount
for r in cmd.op.result:
g_r.schedule.append(GGet(tensor_map[r.name], pin=False))
elif isinstance(cmd, GGet):
g_r.schedule.append(GGet(tensor_map[cmd.tensor.name], pin=cmd.pin))
elif isinstance(cmd, GRelease):
g_r.schedule.append(GRelease(tensor_map[cmd.tensor.name]))
g_r.meta['no_aliases'] = True
g_r.meta['tensor_map'] = {old: new.name for old, new in tensor_map.items()}
g_r.meta['op_map'] = {old: new.name for old, new in op_map.items()}
return g_r
def rewrite_merge_tuples(g : 'Graph') -> 'Graph':
g_r = Graph()
g_r.meta = g.meta.copy()
g_r.meta['compute'] = 0
ops_topological = g.ops_topological()
# maps old -> new
tensor_map : Mapping[str, 'GTensor'] = {}
op_map : Mapping[str, 'GOp'] = {}
for op_name in ops_topological:
op = g.ops[op_name]
assert not op.is_aliasing()
if op.is_tuple():
args = tuple([tensor_map[x.name] for x in op.args])
op_new, res = GOp.make(
g_r, args, op.cost, (sum(op.size),), (-1,),
op.name, ('+'.join([o.name for o in op.result]),), op.meta,
make_uname=False
)
for r in op.result:
tensor_map[r.name] = res[0]
op_map[op.name] = op_new
else:
# keep
args = [tensor_map[x.name] for x in op.args]
op_new, res = GOp.make(
g_r, args, op.cost, op.size, op.alias,
op.name, (op.result[0].name,), op.meta,
make_uname=False
)
tensor_map[res[0].name] = res[0]
op_map[op.name] = op_new
for cmd in g.schedule:
if isinstance(cmd, GCompute):
op_new = op_map[cmd.op.name]
g_r.schedule.append(GCompute(op_new))
# need to get more refs for each missing tuple output
for _ in range(len(cmd.op.result) - 1):
g_r.schedule.append(GGet(op_new.result[0], pin=False))
elif isinstance(cmd, GGet):
g_r.schedule.append(GGet(tensor_map[cmd.tensor.name], pin=cmd.pin))
elif isinstance(cmd, GRelease):
g_r.schedule.append(GRelease(tensor_map[cmd.tensor.name]))
g_r.meta['no_tuples'] = True
g_r.meta['tensor_map'] = {old: new.name for old, new in tensor_map.items()}
g_r.meta['op_map'] = {old: new.name for old, new in op_map.items()}
return g_r
def rewrite_constant_elim(g : 'Graph') -> 'Graph':
if not g.meta.get('no_aliases', False):
raise RuntimeError('cannot eliminate constants, input graph may have aliases')
g_r = Graph()
g_r.meta = g.meta.copy()
compute_pre = g_r.meta['compute']
g_r.meta['compute'] = 0
g_r.meta['constant_ram'] = 0
ops_topological = g.ops_topological()
# maps old -> new
tensor_map : Mapping[str, 'GTensor'] = {}
op_map : Mapping[str, 'GOp'] = {}
for op_name in ops_topological:
op = g.ops[op_name]
if op_name.split('/')[0] == GOp.CONST_NAME:
args = [tensor_map[x.name] for x in op.args]
assert len(args) == 0
g_r.meta['constant_ram'] += sum(op.size)
else:
# keep operator
args = [tensor_map[x.name] for x in op.args if x.name in tensor_map]
op_new, res = GOp.make(
g_r, args, op.cost, op.size, op.alias,
op.name, tuple([o.name for o in op.result]), op.meta,
make_uname=False
)
for r in res:
tensor_map[r.name] = r
op_map[op.name] = op_new
for cmd in g.schedule:
if isinstance(cmd, GCompute):
if cmd.op.name in op_map:
op_new = op_map[cmd.op.name]
g_r.schedule.append(GCompute(op_new))
elif isinstance(cmd, GGet):
if cmd.tensor.name in tensor_map:
g_r.schedule.append(GGet(tensor_map[cmd.tensor.name], pin=cmd.pin))
elif isinstance(cmd, GRelease):
if cmd.tensor.name in tensor_map:
g_r.schedule.append(GRelease(tensor_map[cmd.tensor.name]))
g_r.meta['no_constants'] = True
g_r.meta['tensor_map'] = {old: new.name for old, new in tensor_map.items()}
g_r.meta['op_map'] = {old: new.name for old, new in op_map.items()}
assert compute_pre == g_r.meta['compute']
return g_r
def rewrite_checkmate(g : 'Graph') -> 'Graph':
g_r = rewrite_collapse_aliases(g)
g_r = rewrite_merge_tuples(g_r)
g_r = rewrite_constant_elim(g_r)
return g_r
| 30.135057 | 89 | 0.605416 | 4,845 | 0.462001 | 0 | 0 | 2,293 | 0.218652 | 0 | 0 | 915 | 0.087251 |
927f4c79aa83e7fff26f8f45893a1eaf912e2026 | 58 | py | Python | json_schema_checker/validators/__init__.py | zorgulle/json_schema_checker | 20cac68f899528619e5059f0e1fbee0a0f7219d6 | [
"MIT"
] | null | null | null | json_schema_checker/validators/__init__.py | zorgulle/json_schema_checker | 20cac68f899528619e5059f0e1fbee0a0f7219d6 | [
"MIT"
] | null | null | null | json_schema_checker/validators/__init__.py | zorgulle/json_schema_checker | 20cac68f899528619e5059f0e1fbee0a0f7219d6 | [
"MIT"
] | null | null | null | from .validators import Int
from .validators import String | 29 | 30 | 0.844828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
927fd09597ab96c5d8e35156dd5a27ff50d0ebba | 3,653 | py | Python | samples/oxy-cope/namd_02-mini.py | sergio-marti/qm3 | 00476b05a730de79511c003dd54894c5d2cfac7c | [
"MIT"
] | 13 | 2020-12-19T02:37:39.000Z | 2022-02-09T11:26:38.000Z | samples/oxy-cope/namd_02-mini.py | zhenglz/qm3 | 356a26baf990391597bd75b971a95952e52fb977 | [
"MIT"
] | 2 | 2021-01-07T02:53:27.000Z | 2021-07-02T17:58:42.000Z | samples/oxy-cope/namd_02-mini.py | zhenglz/qm3 | 356a26baf990391597bd75b971a95952e52fb977 | [
"MIT"
] | 3 | 2020-12-20T11:30:52.000Z | 2021-07-12T02:51:44.000Z | import qm3.mol
import qm3.fio.xplor
import qm3.problem
import qm3.engines.namd
import qm3.engines.xtb
import qm3.engines.mmint
import qm3.actions.minimize
import os
import time
import pickle
class my_problem( qm3.problem.template ):
def __init__( self ):
qm3.problem.template.__init__( self )
self.mol = qm3.mol.molecule( "01.pdb" )
self.mol.boxl = [ 40., 40., 40. ]
qm3.fio.xplor.psf_read( self.mol, "01.psf" )
self.mol.guess_atomic_numbers()
qm3.engines.mmint.non_bonded( self.mol, "01.non_bonded" )
os.system( "rm -vf namd.*" )
f = open( "namd.inp", "wt" )
f.write( """
structure 01.psf
coordinates 01.pdb
paraTypeCharmm on
parameters 01.wat.prm
parameters 01.acs.prm
fixedatoms on
fixedatomsfile 01.pdb
cellBasisVector1 40. .0 .0
cellBasisVector2 .0 40. .0
cellBasisVector3 .0 .0 40.
PME on
PMETolerance 0.000001
PMEGridSpacing 0.5
exclude scaled1-4
1-4scaling 0.5
switching on
switchdist 12.0
cutoff 14.0
pairlistdist 18.0
wrapAll off
wrapWater off
nonbondedFreq 1
fullElectFrequency 1
stepspercycle 1
temperature 0.0
outputEnergies 1
outputname namd.out
startup
############################################################
set fd [ open "namd.pipe" r ]
while { [ gets $fd cmd ] >= 0 } {
switch $cmd {
"energy" { run 0 }
"gradient" { run 0; output onlyforces shm }
"charges" { reloadCharges shm }
"coordinates" { coorfile shmread }
"exit" { close $fd; exit }
}
}
""" )
f.close()
os.mkfifo( "namd.pipe" )
os.system( "NAMD_SHM=1 ./bin/namd2 +ppn 1 +setcpuaffinity +isomalloc_sync +idlepoll namd.inp > namd.out &" )
while( not os.path.isfile( "namd.shmid" ) ):
time.sleep( 1 )
time.sleep( 1 )
self.emm = qm3.engines.namd.namd_shm()
f = open( "01.sele_QM.pk", "rb" )
sqm = pickle.load( f )
f.close()
f = open( "01.sele_MM.pk", "rb" )
smm = pickle.load( f )
f.close()
self.eqm = qm3.engines.xtb.dl_xtb( self.mol, 0, 0, sqm, smm )
self.fix = qm3.engines.mmint.QMLJ( self.mol, sqm, smm, [] )
self.size = self.mol.natm * 3
self.coor = self.mol.coor
for i in sqm:
self.mol.chrg[i] = 0.0
self.emm.update_chrg( self.mol )
def get_func( self ):
self.mol.func = 0.0
self.emm.get_func( self.mol )
self.eqm.get_func( self.mol )
self.func = self.mol.func
def get_grad( self ):
self.mol.func = 0.0
self.mol.grad = [ 0.0 for i in range( self.mol.natm * 3 ) ]
self.emm.get_grad( self.mol )
self.eqm.get_grad( self.mol )
self.fix.get_grad( self.mol )
self.func = self.mol.func
self.grad = self.mol.grad[:]
obj = my_problem()
qm3.actions.minimize.fire( obj, print_frequency = 10,
gradient_tolerance = 10.0, step_number = 1000, fire2 = True )
sqm = list( sorted( obj.mol.indx["A"][1].values() ) )
sel = obj.mol.sph_sel( sqm, 14.0 )
f = open( "02.sele.pk", "wb" )
pickle.dump( sel, f )
f.close()
smm = list( set( sel ).difference( set( sqm ) ) )
f = open( "02.sele_MM.pk", "wb" )
pickle.dump( smm, f )
f.close()
fix = list( set( range( obj.mol.natm ) ).difference( set( smm ) ) )
f = open( "02.fixed.pk", "wb" )
pickle.dump( fix, f )
f.close()
qm3.engines.namd.pdb_write( obj.mol, "02.prev.pdb", fix )
obj.emm.stop()
| 26.860294 | 116 | 0.557624 | 2,825 | 0.773337 | 0 | 0 | 0 | 0 | 0 | 0 | 1,362 | 0.372844 |
927fe9bfc9c6689c83acfebe0371f46c8bba2a23 | 1,791 | py | Python | wysdom/dom/DOMDict.py | jetavator/wysdom | 4c67c82a9df66370da5cf5347abd7450a52d3d03 | [
"Apache-2.0"
] | 1 | 2021-04-20T07:40:28.000Z | 2021-04-20T07:40:28.000Z | wysdom/dom/DOMDict.py | jetavator/wysdom | 4c67c82a9df66370da5cf5347abd7450a52d3d03 | [
"Apache-2.0"
] | 69 | 2020-05-13T07:13:49.000Z | 2021-05-06T18:26:21.000Z | wysdom/dom/DOMDict.py | jetavator/wysdom | 4c67c82a9df66370da5cf5347abd7450a52d3d03 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from typing import Generic, TypeVar, Optional, Any, Dict
from collections.abc import Mapping
from ..base_schema import Schema, SchemaAnything
from .DOMElement import DOMElement
from .DOMObject import DOMObject
from . import DOMInfo
from .DOMProperties import DOMProperties
T_co = TypeVar("T_co")
class DOMDict(DOMObject, Generic[T_co]):
"""
An object with dynamic properties (corresponding to a Python dict).
"""
def __init__(
self,
value: Optional[Mapping[str, Any]] = None,
json_dom_info: Optional[DOMInfo] = None,
item_type: Optional[Schema] = None,
) -> None:
"""
:param value: A dict (or any :class:`collections.abc.Mapping`) containing the data to populate this
object's properties.
:param json_dom_info: A :class:`~wysdom.dom.DOMInfo` named tuple containing information about this object's
position in the DOM.
:param item_type: A :class:`~wysdom.Schema` object specifying what constitutes a valid property
of this object.
"""
self.__json_schema_properties__ = DOMProperties(
additional_properties=(item_type or SchemaAnything())
)
super().__init__(value or {}, json_dom_info)
def __getitem__(self, key: str) -> T_co:
return super().__getitem__(key)
def __deepcopy__(self, memo: Dict[int, DOMElement]) -> DOMDict:
cls = self.__class__
result = cls(
value=self.to_builtin(),
json_dom_info=self.__json_dom_info__,
_item_type=self.__json_schema_properties__.additional_properties,
)
memo[id(self)] = result
return result
| 34.442308 | 115 | 0.638749 | 1,453 | 0.811279 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.330542 |
928094af66188381474817f8d7d387d3293b6b95 | 13,773 | py | Python | auto_updater.py | fgreinacher/homebrew-dotnet-sdk-versions | 4cf4eb88c7e2ffdb866d0ad2a4b72acef4139afb | [
"MIT"
] | null | null | null | auto_updater.py | fgreinacher/homebrew-dotnet-sdk-versions | 4cf4eb88c7e2ffdb866d0ad2a4b72acef4139afb | [
"MIT"
] | null | null | null | auto_updater.py | fgreinacher/homebrew-dotnet-sdk-versions | 4cf4eb88c7e2ffdb866d0ad2a4b72acef4139afb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import glob
import hashlib
import json
import os
import re
import requests
import urllib.request
class SdkVersion:
def __init__(self, version_string):
version_split = version_string.split('.')
self.major = int(version_split[0])
self.minor = int(version_split[1])
self.feature = int(version_split[2][0])
self.patch = int(version_split[2][-2:])
self.version_string = version_string
def __str__(self):
return self.version_string
def getMajor(self):
return '{0}'.format(self.major)
def getMajorMinor(self):
return '{0}.{1}'.format(self.major, self.minor)
def getMajorMinorFeature(self):
return '{0}.{1}.{2}'.format(self.major, self.minor, self.feature)
def getMajorMinorFeaturePath(self):
return self.version_string
def __eq__(self, other):
return self.version_string == other.version_string
def __ne__(self, other):
return not self.version_string == other.version_string
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not other < self
def __lt__(self, other):
return self.major < other.major \
or self.minor < other.minor \
or self.feature < other.feature \
or self.patch < other.patch
class RuntimeVersion:
def __init__(self, version_string):
version_split = version_string.split('.')
self.major = int(version_split[0])
self.minor = int(version_split[1])
self.patch = int(version_split[2])
self.version_string = version_string
def __str__(self):
return self.version_string
def getMajor(self):
return '{0}'.format(self.major)
def getMajorMinor(self):
return '{0}.{1}'.format(self.major, self.minor)
def getMajorMinorPatch(self):
return self.version_string
def __eq__(self, other):
return self.version_string == other.version_string
def __ne__(self, other):
return not self.version_string == other.version_string
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not other < self
def __lt__(self, other):
return self.major < other.major \
or self.minor < other.minor \
or self.patch < other.patch
class Application:
# `version "6.0.100,6.0.0"`
version_pattern = re.compile('version "([0-9.,]+)"')
# `sha256 "d290cefddb4fbdf1215724c049d86b4ce09f5dc2c5a658e3c1645c368f34c31a"`
sha_256_pattern = re.compile('sha256 "([0-9a-z]+)"')
# `url "https://download.visualstudio.microsoft.com/download/pr/38102737-cb48-46c2-8f52-fb7102b50ae7/d81958d71c3c2679796e1ecfbd9cc903/dotnet-sdk-#{version.before_comma}-osx-x64.pkg"`
url_pattern = re.compile('url "([^\s]+)"')
sha_256_x64_pattern = re.compile('sha256_x64 = "([0-9a-z]+)"')
sha_256_arm64_pattern = re.compile('sha256_arm64 = "([0-9a-z]+)"')
url_x64_pattern = re.compile('url_x64 = "([^\s]+)"')
url_arm64_pattern = re.compile('url_arm64 = "([^\s]+)"')
really_push = False
@staticmethod
def run():
for file_path in glob.glob('Casks/*.rb'):
Application._output("------------------------------------")
Application._output("{0}: Checking for updates ...".format(file_path))
(sdk_version, runtime_version) = Application._find_versions(file_path)
Application._log('sdk_version', sdk_version)
Application._log('runtime_version', runtime_version)
is_arm_supported = Application._cask_supports_arm(file_path)
Application._log('is_arm_supported', is_arm_supported)
releases_json = Application._download_release_json(sdk_version)
(latest_sdk_release, latest_sdk_release_version) = Application._find_latest_sdk_release(sdk_version, releases_json)
if latest_sdk_release is None:
Application._output("No latest version found for {0}. Skipping.".format(file_path))
continue
if latest_sdk_release_version <= sdk_version:
Application._output("Latest version[{0}] is not greater than current version[{1}]. Skipping".format(latest_sdk_release_version, sdk_version))
continue
git_branch_name = Application._prepare_git_branch(file_path, latest_sdk_release)
is_cask_updated = False
if is_arm_supported:
is_cask_updated = Application._update_intel_arm_cask(file_path, latest_sdk_release)
else:
is_cask_updated = Application._update_intel_only_cask(file_path, latest_sdk_release)
if is_cask_updated:
Application._update_read_me(sdk_version, latest_sdk_release)
Application._push_git_branch(file_path, sdk_version, latest_sdk_release, git_branch_name)
@staticmethod
def _find_versions(file_path):
with open(file_path, 'r') as file:
for line in file:
match = Application.version_pattern.search(line.strip())
if not match:
continue
# split `6.0.100,6.0.0` on comma
version_split = match.group(1).split(',')
return SdkVersion(version_split[0]), RuntimeVersion(version_split[1])
raise Exception('Cannot find version in cask: {0}'.format(file_path))
@staticmethod
def _download_release_json(sdk_version):
sdk_major_minor_version = sdk_version.getMajorMinor()
url = 'https://raw.githubusercontent.com/dotnet/core/master/release-notes/{}/releases.json'.format(sdk_major_minor_version)
with urllib.request.urlopen(url) as f:
return json.loads(f.read().decode('utf-8'))
@staticmethod
def _find_latest_sdk_release(sdk_version, releases_json):
sdk_major_minor_feature_version = sdk_version.getMajorMinorFeature()
sdk_major_minor_feature_version_regex = '^' + sdk_major_minor_feature_version + '[0-9]{2}$'
latest_sdk_release = None
latest_sdk_release_version = None
releases = releases_json['releases']
for release in releases:
match = re.search(sdk_major_minor_feature_version_regex, release['sdk']['version'])
if not match:
continue
if latest_sdk_release == None:
latest_sdk_release = release
latest_sdk_release_version = SdkVersion(release['sdk']['version'])
else:
release_version = SdkVersion(release['sdk']['version'])
if release_version > latest_sdk_release_version:
latest_sdk_release = release
latest_sdk_release_version = release_version
return latest_sdk_release, latest_sdk_release_version
@staticmethod
def _cask_supports_arm(file_path):
with open(file_path, 'r') as file:
for line in file:
if 'arch = Hardware::CPU.intel? ? "x64" : "arm64"' in line.strip():
return True
return False
@staticmethod
def _update_intel_only_cask(file_path, latest_sdk_release):
sdk_url, sha_256 = Application._find_download_and_verify_sdk_url(latest_sdk_release, 'x64')
if sdk_url is None:
return False
with open(file_path, 'r') as file:
content = file.read()
# url needs to have SOME version interpolation to make brew audit happy
url_with_interpolation = sdk_url.replace(latest_sdk_release['sdk']['version'], '#{version.before_comma}')
new_version = 'version "{0},{1}"'.format(latest_sdk_release['sdk']['version'], latest_sdk_release['runtime']['version'])
new_sha_256 = 'sha256 "{0}"'.format(sha_256)
new_url = 'url "{0}"'.format(url_with_interpolation)
Application._log('new_version', new_version)
Application._log('new_sha_256', new_sha_256)
Application._log('new_url', new_url)
content = Application.version_pattern.sub(new_version, content)
content = Application.sha_256_pattern.sub(new_sha_256, content)
content = Application.url_pattern.sub(new_url, content)
with open(file_path, 'w') as file:
file.write(content)
return True
@staticmethod
def _update_intel_arm_cask(file_path, latest_sdk_release):
x64_sdk_url, x64_sha_256 = Application._find_download_and_verify_sdk_url(latest_sdk_release, 'x64')
arm64_sdk_url, arm64_sha_256 = Application._find_download_and_verify_sdk_url(latest_sdk_release, 'arm64')
if x64_sdk_url is None or arm64_sdk_url is None:
return False
with open(file_path, 'r') as file:
content = file.read()
# url needs to have SOME version interpolation to make brew audit happy
x64_url_with_interpolation = x64_sdk_url.replace(latest_sdk_release['sdk']['version'], '#{version.before_comma}')
arm64_url_with_interpolation = arm64_sdk_url.replace(latest_sdk_release['sdk']['version'], '#{version.before_comma}')
new_version = 'version "{0},{1}"'.format(latest_sdk_release['sdk']['version'], latest_sdk_release['runtime']['version'])
new_x64_sha_256 = 'sha256_x64 = "{0}"'.format(x64_sha_256)
new_x64_url = 'url_x64 = "{0}"'.format(x64_url_with_interpolation)
new_arm64_sha_256 = 'sha256_arm64 = "{0}"'.format(arm64_sha_256)
new_arm64_url = 'url_arm64 = "{0}"'.format(arm64_url_with_interpolation)
Application._log('new_version', new_version)
Application._log('new_x64_sha_256', new_x64_sha_256)
Application._log('new_x64_url', new_x64_url)
Application._log('new_arm64_sha_256', new_arm64_sha_256)
Application._log('new_arm64_url', new_arm64_url)
content = Application.version_pattern.sub(new_version, content)
content = Application.sha_256_x64_pattern.sub(new_x64_sha_256, content)
content = Application.url_x64_pattern.sub(new_x64_url, content)
content = Application.sha_256_arm64_pattern.sub(new_arm64_sha_256, content)
content = Application.url_arm64_pattern.sub(new_arm64_url, content)
with open(file_path, 'w') as file:
file.write(content)
return True
@staticmethod
def _update_read_me(sdk_release, latest_sdk_release):
file_path = 'README.md'
with open(file_path, 'r') as file:
content = file.read()
content = content.replace(str(sdk_release), latest_sdk_release['sdk']['version'])
with open(file_path, 'w') as file:
file.write(content)
@staticmethod
def _find_download_and_verify_sdk_url(sdk_release, arch):
sdk_url, sdk_sha_512 = Application._find_sdk_url(sdk_release, arch)
if sdk_url is None:
Application._output("Could not find sdk url for sdk_release[{0}]. Skipping".format(sdk_release['sdk']['version']))
return None, None
sha_256, sha_512 = Application._download_and_calculate_sha256(sdk_url)
if not sdk_sha_512 == sha_512:
Application._output("Downloaded sha512[{0}] does not match provided sha512[{1}]. Man-in-the-middle? Skipping".format(sha_512, sdk_sha_512))
return None, None
return sdk_url, sha_256
@staticmethod
def _find_sdk_url(sdk_release, arch):
name = 'dotnet-sdk-osx-{}.pkg'.format(arch)
for file in sdk_release['sdk']['files']:
if file['name'] == name:
return file['url'], file['hash']
return None
@staticmethod
def _download_and_calculate_sha256(url):
sha256 = hashlib.sha256()
sha512 = hashlib.sha512()
with requests.get(url, stream = True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192):
if chunk:
sha256.update(chunk)
sha512.update(chunk)
return sha256.hexdigest(), sha512.hexdigest()
@staticmethod
def _prepare_git_branch(file_path, latest_sdk_release):
branch_name = "update-{0}-to-{1}".format(file_path, latest_sdk_release['sdk']['version'])
if Application.really_push:
os.system('git checkout -b "{0}" || git checkout "{0}"'.format(branch_name))
os.system('git reset --hard origin/master')
return branch_name
@staticmethod
def _push_git_branch(file_path, sdk_version, latest_sdk_release, branch_name):
commit_message = '[Auto] update {0} from {1} to {2}'.format(file_path, str(sdk_version), latest_sdk_release['sdk']['version'])
if Application.really_push:
os.system('git add {0}'.format(file_path))
os.system('git add {0}'.format('README.md'))
os.system('git commit -m "{0}"'.format(commit_message))
os.system('git push origin --force {0}'.format(branch_name))
os.system('hub pull-request --base master --head "{0}" -m "{1}"'.format(branch_name, commit_message))
@staticmethod
def _log(name, value = ''):
print('{0}: {1}'.format(name, str(value)))
@staticmethod
def _output(message):
print(message)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--really_push", action='store_true', default=False, help='Indicates whether we really push to git or not')
args = parser.parse_args()
Application.really_push = args.really_push
Application.run()
| 38.046961 | 186 | 0.650839 | 13,326 | 0.967545 | 0 | 0 | 10,094 | 0.732883 | 0 | 0 | 2,287 | 0.16605 |
9280ceaa2b70f59e4500ed7d8d1c93dba1aab988 | 4,487 | py | Python | template_creator/reader/strategies/GoStrategy.py | VanOvermeire/sam-template-creator | 0b39440c9051ccd30fc80bfa2e4d7da40c7e50b7 | [
"MIT"
] | 3 | 2019-06-10T19:46:23.000Z | 2021-05-06T12:15:45.000Z | template_creator/reader/strategies/GoStrategy.py | VanOvermeire/sam-template-creator | 0b39440c9051ccd30fc80bfa2e4d7da40c7e50b7 | [
"MIT"
] | 2 | 2019-10-20T14:57:50.000Z | 2020-01-01T00:52:32.000Z | template_creator/reader/strategies/GoStrategy.py | VanOvermeire/sam-template-creator | 0b39440c9051ccd30fc80bfa2e4d7da40c7e50b7 | [
"MIT"
] | 2 | 2019-10-19T07:40:53.000Z | 2019-10-19T08:29:40.000Z | import re
from template_creator.reader.config.iam_config import GO_EXCEPTIONS
from template_creator.reader.strategies.language_strategy_common import find_variables_in_line_of_code, find_api, find_events
class GoStrategy:
def build_camel_case_name(self, dir_name, file):
if '/' in dir_name:
dir_name = dir_name[dir_name.rfind('/') + 1:]
elif '.' == dir_name:
dir_name = file[file.rfind('/') + 1:file.find('.go')]
components = dir_name.split('_')
return ''.join(x.title() for x in components)
def build_handler(self, directory, file, handler_line, executable):
if executable:
return executable.rsplit('/', 1)[1]
return 'handler'
def find_events(self, handler_line):
try:
lambda_full_event = handler_line[handler_line.index('Context, ') + 9:handler_line.index(')')]
lambda_event = lambda_full_event[0:lambda_full_event.index(' ')]
return find_events(lambda_event)
except ValueError:
return None
def find_api(self, handler_line):
try:
handler_prefix = handler_line[handler_line.index('func ') + 5:handler_line.index('Request')]
split_prefix = re.split('(?=[A-Z])', handler_prefix)
return find_api(split_prefix)
except ValueError:
return []
def find_env_variables(self, lines):
variables = set()
first_regex = re.compile(r'.*os.Getenv\(.*')
first_regex_results = list(filter(first_regex.search, lines))
for result in first_regex_results:
variables.update(find_variables_in_line_of_code(result, 'os.Getenv(', ')'))
return list(variables)
def find_permissions(self, lines):
clients = set()
regex = re.compile(r'.*github.com/aws/aws-sdk-go/service/.*')
results = list(filter(regex.search, lines))
for result in results:
client = result.replace('github.com/aws/aws-sdk-go/service/', '').replace('"', '').strip()
if '/' in client:
client = client[:client.find('/')]
if client in GO_EXCEPTIONS:
client = GO_EXCEPTIONS[client]
clients.add('{}:*'.format(client))
return list(clients)
@staticmethod
def is_handler_file(lines):
regex = re.compile(r'\s*lambda.Start\(')
result = list(filter(regex.search, lines))
if result:
first_result = result[0]
function_name = first_result[first_result.index('lambda.Start(') + 13: first_result.index(')')]
return True, [line for line in lines if function_name in line][0]
return False, None
@staticmethod
def find_invoked_files(handler_file_lines):
lines_without_comments = [x for x in handler_file_lines if not x.strip().startswith('//')]
results = dict()
i = 0
while i < len(lines_without_comments):
line = lines_without_comments[i].strip()
if line.startswith('import "'):
import_statement = line.replace('import', '')
result = import_statement[import_statement.find('"'):import_statement.rfind('"')]
GoStrategy.__add_if_not_system_or_github(result, results)
elif line.startswith('import ('):
if '"' in line:
import_statement = line.replace('import (', '')
result = import_statement[import_statement.find('"'):import_statement.rfind('"')]
GoStrategy.__add_if_not_system_or_github(result, results)
while ')' not in line:
line = lines_without_comments[i].strip()
if '"' in line:
result = line[line.find('"'):line.rfind('"')]
GoStrategy.__add_if_not_system_or_github(result, results)
i += 1
i += 1
return results
@staticmethod
def __add_if_not_system_or_github(result, results):
if '/' in result and 'github.com' not in result:
result = result[result.find('/') + 1:]
results[result] = '*'
@staticmethod
def get_executable_glob():
return '**/main'
@staticmethod
def remove_commented_lines(lines):
return [line for line in lines if not line.strip().startswith('//')]
def __repr__(self):
return self.__class__.__name__
| 34.251908 | 125 | 0.595944 | 4,279 | 0.953644 | 0 | 0 | 2,094 | 0.466682 | 0 | 0 | 352 | 0.078449 |
9280f582ef5d54e9016b69411e4065df25b247b7 | 2,650 | py | Python | indicators/data.py | WPRDC/community-simulacrum | c463de3dac1749c82537c6c6ac74c50eb5b9be75 | [
"MIT"
] | null | null | null | indicators/data.py | WPRDC/community-simulacrum | c463de3dac1749c82537c6c6ac74c50eb5b9be75 | [
"MIT"
] | 6 | 2020-12-18T17:21:35.000Z | 2021-03-03T21:08:44.000Z | indicators/data.py | WPRDC/community-simulacrum | c463de3dac1749c82537c6c6ac74c50eb5b9be75 | [
"MIT"
] | null | null | null | import dataclasses
import typing
from dataclasses import dataclass
from typing import List
from typing import Optional
from profiles.settings import DENOM_DKEY, VALUE_DKEY, GEOG_DKEY, TIME_DKEY
if typing.TYPE_CHECKING:
from indicators.models import CensusVariable, CKANVariable
@dataclass
class Datum:
variable: str
geog: str
time: str
value: Optional[float] = None
moe: Optional[float] = None
percent: Optional[float] = None
denom: Optional[float] = None
@staticmethod
def from_census_response_datum(variable: 'CensusVariable', census_datum) -> 'Datum':
return Datum(
variable=variable.slug,
geog=census_datum.get('geog'),
time=census_datum.get('time'),
value=census_datum.get('value'),
moe=census_datum.get('moe'),
denom=census_datum.get('denom'),
percent=census_datum.get('percent'), )
@staticmethod
def from_census_response_data(variable: 'CensusVariable', census_data: list[dict]) -> List['Datum']:
return [Datum.from_census_response_datum(variable, census_datum) for census_datum in census_data]
@staticmethod
def from_ckan_response_datum(variable: 'CKANVariable', ckan_datum) -> 'Datum':
denom, percent = None, None
if DENOM_DKEY in ckan_datum:
denom = ckan_datum[DENOM_DKEY]
percent = (ckan_datum[VALUE_DKEY] / ckan_datum[DENOM_DKEY])
return Datum(variable=variable.slug,
geog=ckan_datum[GEOG_DKEY],
time=ckan_datum[TIME_DKEY],
value=ckan_datum[VALUE_DKEY],
denom=denom,
percent=percent)
@staticmethod
def from_ckan_response_data(variable: 'CKANVariable', ckan_data: list[dict]) -> List['Datum']:
return [Datum.from_ckan_response_datum(variable, ckan_datum) for ckan_datum in ckan_data]
def update(self, **kwargs):
""" Creates new Datum similar to the instance with new values from kwargs """
return Datum(**{**self.as_dict(), **kwargs})
def with_denom_val(self, denom_val: Optional[float]):
""" Merge the denom value and generate the percent """
return dataclasses.replace(self, denom=denom_val, percent=(self.value / denom_val))
def as_dict(self):
return {'variable': self.variable, 'geog': self.geog, 'time': self.time,
'value': self.value, 'moe': self.moe, 'percent': self.percent, 'denom': self.denom}
def as_value_dict(self):
return {'value': self.value, 'moe': self.moe, 'percent': self.percent, 'denom': self.denom}
| 37.857143 | 105 | 0.655472 | 2,352 | 0.887547 | 0 | 0 | 2,363 | 0.891698 | 0 | 0 | 337 | 0.12717 |
9281d8f67c6e76e88dcbf1ff3a04ce086acc8c7f | 886 | py | Python | support.py | Soarxyn/PEF-Structural-Analysis | db6067d216539266cd3c40682f85070ccc082f89 | [
"MIT"
] | 4 | 2020-03-20T20:45:12.000Z | 2020-06-19T13:03:41.000Z | support.py | Soarxyn/PEF-Structural-Analysis | db6067d216539266cd3c40682f85070ccc082f89 | [
"MIT"
] | null | null | null | support.py | Soarxyn/PEF-Structural-Analysis | db6067d216539266cd3c40682f85070ccc082f89 | [
"MIT"
] | null | null | null | from typing import Tuple
from enum import Enum
from auxiliary.algebra import Vector3, psin, pcos
# this is an auxiliary class used for initializing the Support class's members' values
class SupportType(Enum):
SIMPLE: Tuple = (1, 0) # tuple values are the number
PINNED: Tuple = (2, 0) # of forces and the number
FIXED: Tuple = (2, 1) # of moments, in that order
# this is a class that defines one of three support types: simple, pinned or fixed
class Support:
def __init__(self, name: str, angle: float = 0):
# this member is the reaction vector from the support
# its values are used for solving the system
self.reaction: Vector3 = Vector3(0, 0, 0)
if SupportType[name].value[0] > 1:
self.reaction.x = 1
self.reaction.y = 1
else:
self.reaction.x = pcos(angle)
self.reaction.y = psin(angle)
if SupportType[name].value[1] == 1:
self.reaction.z = 1
| 32.814815 | 86 | 0.705418 | 615 | 0.694131 | 0 | 0 | 0 | 0 | 0 | 0 | 347 | 0.391648 |
9282012d5de8ff8454e76abbaa57322969482ad0 | 11,677 | py | Python | picaact.py | yindaheng98/picacomic | 167ba977192d27f6d27a911f748bf018b61d2acf | [
"Apache-2.0"
] | null | null | null | picaact.py | yindaheng98/picacomic | 167ba977192d27f6d27a911f748bf018b61d2acf | [
"Apache-2.0"
] | null | null | null | picaact.py | yindaheng98/picacomic | 167ba977192d27f6d27a911f748bf018b61d2acf | [
"Apache-2.0"
] | null | null | null | import os
import re
import logging
import sqlite3
import json
import threading
from picaapi import PicaApi
from urllib import parse
from multiprocessing.pool import ThreadPool
class PicaAction:
def __init__(self, account, password,
proxies=None, threadn=5,
data_path=os.path.join(os.path.split(__file__)[0], "data"),
db_path=os.path.join(os.path.split(__file__)[0], "data", "data.db"),
global_url="https://picaapi.picacomic.com/",
api_key="C69BAF41DA5ABD1FFEDC6D2FEA56B",
secret_key="~d}$Q7$eIni=V)9\\RK/P.RM4;9[7|@/CA}b~OW!3?EV`:<>M7pddUBL5n|0/*Cn"):
logging.info("PicaAction启动中......")
self.picaapi = PicaApi(proxies=proxies,
global_url=global_url,
api_key=api_key,
secret_key=secret_key)
self.download_path = data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
self.db = sqlite3.connect(db_path)
self.__login(account, password)
self.account = account
self.threadn = threadn
def __ExecuteSQL(self, sql, args=None):
cur = self.db.cursor()
if args == None:
logging.info("Executing in DB: %s" % sql)
__res = cur.execute(sql).fetchall()
else:
logging.info("Executing in DB: %s,%s" % (sql, str(args)))
__res = cur.execute(sql, args).fetchall()
self.db.commit()
return __res
def __login(self, account, password):
logging.info("%s 登录......" % account)
_ = self.__ExecuteSQL(
"create table if not exists account (email text PRIMARY KEY NOT NULL, password text, token text);")
logging.info("从数据库中查找 %s 的token......" % account)
token = self.__ExecuteSQL("select token from account where email=?;",
(account,))
def gettoken():
logging.info("为 %s 获取新的token......" % account)
token = self.picaapi.login(account, password)
logging.info("%s 的新token已获取: %s" % (account, token))
return token
if len(token) > 0:
token = token[0][0]
logging.info("数据库中有 %s 的token: %s" % (account, token))
self.picaapi.set_authorization(token)
logging.info("测试数据库中 %s 的token是否有效......" % account)
profile = self.picaapi.profile()
if profile["code"] == 200:
logging.info("数据库中 %s 的token有效" % account)
else:
logging.info("数据库中 %s 的token失效" % account)
token = gettoken()
self.__ExecuteSQL("update account set token=? where email=?;",
(token, account))
else:
logging.info("数据库中没有 %s 的token" % account)
token = gettoken()
self.__ExecuteSQL("insert into account (email, password, token)values (?, ?, ?);",
(account, password, token))
self.picaapi.set_authorization(token)
def __travel_favourites_ol(self, limit=None, order="dd"):
def islimited():
nonlocal limit
if limit != None:
limit -= 1
if limit <= 0:
return True
return False
pages = self.picaapi.favourite(1, order=order)['pages']
if pages < 2:
return
for i in range(1, pages+1):
docs = self.picaapi.favourite(i, order=order)['docs']
for favourite in docs:
yield favourite
if islimited():
return
def __travel_favourites_db(self, limit=None):
favourites = []
if limit != None:
favourites = self.__ExecuteSQL(
"select * from comics limit %d;" % limit)
else:
favourites = self.__ExecuteSQL("select * from comics;")
for favourite in favourites:
data = json.loads(favourite[1])
detail = json.loads(favourite[2])
yield data, detail
def gather_favourites_ol(self, n=None, order="dd"):
favourites = []
for favourite in self.__travel_favourites_ol(limit=n, order=order):
favourites.append(favourite)
return favourites
def gather_favourites_db(self, n=None):
favourites = []
details = []
for favourite, detail in self.__travel_favourites_db(limit=n):
favourites.append(favourite)
details.append(detail)
return favourites, details
def __insert_favourite(self, favourite):
detail = self.picaapi.comics(favourite["_id"])
if detail == None:
return
_ = self.__ExecuteSQL("insert or REPLACE into comics (id, data, detail)values(?, ?, ?);",
(favourite["_id"], json.dumps(favourite), json.dumps(detail)))
_ = self.__ExecuteSQL("insert or REPLACE into favourites (id, user)values(?, ?);",
(favourite["_id"], self.account))
def init_favourites(self, n=None):
logging.info("初始化 %s 的收藏列表......" % self.account)
_ = self.__ExecuteSQL(
"create table if not exists comics (id text PRIMARY KEY NOT NULL, data json, detail json);")
_ = self.__ExecuteSQL(
"create table if not exists favourites (id text, user text, PRIMARY KEY(id, user)," +
"FOREIGN KEY(id) REFERENCES comics(id)," +
"FOREIGN KEY(user) REFERENCES account(email));")
for favourite in self.__travel_favourites_ol(limit=n, order="da"):
self.__insert_favourite(favourite)
logging.info("%s 的收藏列表初始化完成" % self.account)
def append_favourites(self, n=None):
logging.info("将 %s 的收藏列表中的新增收藏写入数据库......" % self.account)
for favourite in self.__travel_favourites_ol(limit=n):
fs = self.__ExecuteSQL("select * from favourites where id=? and user=?;",
(favourite["_id"], self.account))
if len(fs) > 0:
logging.info("%s 的收藏 %s 已入数据库......" %
(self.account, favourite["_id"]))
if n == None:
break
else:
logging.info("设置了更新收藏的数量为 %d,继续......" % n)
else:
logging.info("%s 的收藏 %s 未入数据库......" %
(self.account, favourite["_id"]))
self.__insert_favourite(favourite)
logging.info("%s 的收藏列表中的新增收藏已写入数据库" % self.account)
def update_finish_status(self):
logging.info("更新数据库中已有收藏的finish状态......")
for favourite, _ in self.__travel_favourites_db():
if not favourite["finished"]:
self.__insert_favourite(favourite)
logging.info("数据库中已有收藏的finish状态已更新......")
def __travel_episodes_ol(self, id):
data = self.picaapi.eps(id, 1)
if data is None:
return
pages, epss = data["pages"], data["docs"]
for eps in epss:
yield eps
if pages < 2:
return
for i in (2, pages+1):
epss = self.picaapi.eps(id, i)['docs']
for eps in epss:
yield eps
def init_episode(self, id):
logging.info("初始化漫画%s的分话列表......" % id)
for eps in self.__travel_episodes_ol(id):
_ = self.__ExecuteSQL("insert or REPLACE into episodes (id, data, comic)values(?, ?, ?);",
(eps["_id"], json.dumps(eps), id))
logging.info("漫画%s的分话列表初始化完成" % id)
def init_episodes(self):
logging.info("初始化系统内所有漫画的分话列表......")
_ = self.__ExecuteSQL(
"create table if not exists episodes (id text PRIMARY KEY NOT NULL, data json, comic text," +
"FOREIGN KEY(id) REFERENCES comics(id));")
for favourite, _ in self.__travel_favourites_db():
self.init_episode(favourite["_id"])
logging.info("系统内所有漫画的分话列表初始化完成")
def update_episodes(self):
logging.info("更新系统内所有未完成漫画的分话列表......")
for favourite, _ in self.__travel_favourites_db():
if not favourite["finished"]:
self.init_episode(favourite["_id"])
logging.info("系统内所有未完成漫画的分话列表初始化完成")
def append_download_status(self):
logging.info("为系统内新增的分话添加下载状态记录......")
_ = self.__ExecuteSQL(
"create table if not exists status (id text PRIMARY KEY NOT NULL, finished bool," +
"FOREIGN KEY(id) REFERENCES episodes(id));")
_ = self.__ExecuteSQL(
"insert into status(id, finished) select id,FALSE from" +
"(select * from episodes left outer join status on status.id=episodes.id)" +
"where finished IS NULL;")
logging.info("已为系统内新增的分话添加下载状态记录")
def reset_download_status(self):
logging.info("重置系统内的分话下载状态记录......")
_ = self.__ExecuteSQL(
"insert or REPLACE into status(id, finished) select id,FALSE from episodes;")
logging.info("系统内的分话下载状态记录已重置")
def __travel_img(self, comic, order):
data = self.picaapi.pages(comic, order, 1)
pages, docs = data['pages'], data["docs"]
for img in docs:
yield img
if pages < 2:
return
for i in range(2, pages+1):
docs = self.picaapi.pages(comic, order, i)['docs']
for img in docs:
yield img
def __download(self, comic, eps):
order = eps["order"]
logging.info("开始下载漫画%s的分话%s" % (comic["_id"], eps["_id"]))
threadpool = ThreadPool(processes=self.threadn)
for data in self.__travel_img(comic["_id"], order):
media = data["media"]
url = parse.urljoin(media["fileServer"],
"static/"+media["path"])
path = None
def cor_dirname(dn):
dn = re.sub('[\/:*?"<>|]', '', dn)
dn = dn.strip()
return dn
author = 'null'
if 'author' in comic:
author = cor_dirname(comic['author'])
ctitle = cor_dirname(comic['title'])
etitle = cor_dirname(eps['title'])
if comic['finished'] and comic['epsCount'] <= 1:
path = os.path.join(self.download_path,
author, ctitle,
media['originalName'])
else:
path = os.path.join(self.download_path,
author, ctitle, etitle,
media['originalName'])
threadpool.apply_async(self.picaapi.download, (url, path,))
threadpool.close()
threadpool.join()
_ = self.__ExecuteSQL("update status set finished=true where id=?;",
(eps["_id"],))
logging.info("漫画%s的分话%s下载完成" % (comic["_id"], eps["_id"]))
def download_all(self):
episodes = self.__ExecuteSQL(
"select episodes.data, comics.data from episodes inner join status on status.id=episodes.id and status.finished=false inner join comics on episodes.comic=comics.id;")
n = len(episodes)
logging.info("开始下载系统内所有未完成分话(共%d个)" % n)
for eps_data, comic_data in episodes:
eps_data, comic_data = json.loads(eps_data), json.loads(comic_data)
self.__download(comic_data, eps_data)
n -= 1
logging.info("系统内未完成分话还有%d个" % n)
logging.info("系统内所有分话下载完成")
| 41.853047 | 178 | 0.546716 | 12,260 | 0.98561 | 1,797 | 0.144465 | 0 | 0 | 0 | 0 | 3,464 | 0.278479 |
92823b64b2493a2841eef5286f4dd90b0822ea8b | 1,094 | py | Python | sharpy-sc2/sharpy/plans/require/enemy_building_exists.py | etzhang416/sharpy-bot-eco | badc68ad1aa903dfa1bbc33f6225608e433ff353 | [
"Unlicense"
] | null | null | null | sharpy-sc2/sharpy/plans/require/enemy_building_exists.py | etzhang416/sharpy-bot-eco | badc68ad1aa903dfa1bbc33f6225608e433ff353 | [
"Unlicense"
] | null | null | null | sharpy-sc2/sharpy/plans/require/enemy_building_exists.py | etzhang416/sharpy-bot-eco | badc68ad1aa903dfa1bbc33f6225608e433ff353 | [
"Unlicense"
] | null | null | null | import warnings
from sc2 import UnitTypeId
from sharpy.plans.require.require_base import RequireBase
class EnemyBuildingExists(RequireBase):
"""
Checks if enemy has units of the type based on the information we have seen.
"""
def __init__(self, unit_type: UnitTypeId, count: int = 1):
assert unit_type is not None and isinstance(unit_type, UnitTypeId)
assert count is not None and isinstance(count, int)
super().__init__()
self.unit_type = unit_type
self.count = count
def check(self) -> bool:
enemy_count = self.knowledge.known_enemy_units(self.unit_type).amount
if enemy_count is None:
return False
if enemy_count >= self.count:
return True
return False
class RequiredEnemyBuildingExists(EnemyBuildingExists):
def __init__(self, unit_type: UnitTypeId, count: int = 1):
warnings.warn(
"'RequiredEnemyBuildingExists' is deprecated, use 'EnemyBuildingExists' instead", DeprecationWarning, 2
)
super().__init__(unit_type, count)
| 28.789474 | 115 | 0.678245 | 985 | 0.900366 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.157221 |
92831eddc434876a1167f4aa57320da88b3d4e23 | 2,971 | py | Python | setup.py | biobakery/halla | 9017988664399e607590ddb030ce755b233effae | [
"MIT"
] | 6 | 2020-09-15T17:57:45.000Z | 2022-01-09T11:46:33.000Z | setup.py | biobakery/halla | 9017988664399e607590ddb030ce755b233effae | [
"MIT"
] | 2 | 2021-04-13T17:07:28.000Z | 2021-09-20T15:45:43.000Z | setup.py | biobakery/halla | 9017988664399e607590ddb030ce755b233effae | [
"MIT"
] | 2 | 2020-06-17T16:47:35.000Z | 2020-09-15T17:57:47.000Z | '''HAllA setup
To install: python setup.py install
'''
import sys
try:
import setuptools
from setuptools.command.install import install
except ImportError:
sys.exit('Please install setuptools.')
VERSION = '0.8.20'
AUTHOR = 'HAllA Development Team'
MAINTAINER_EMAIL = 'halla-users@googlegroups.com'
class PostInstallCommand(install):
'''Post-installation for installation mode'''
def run(self):
install.run(self)
# post-install script
from rpy2.robjects.packages import importr
try:
eva = importr('eva')
except:
utils = importr('utils')
utils.chooseCRANmirror(ind=1)
utils.install_packages('EnvStats')
utils.install_packages('https://cran.r-project.org/src/contrib/Archive/eva/eva_0.2.5.tar.gz')
# check if eva has been successfully installed
eva = importr('eva')
try:
XICOR = importr('XICOR')
except:
utils = importr('utils')
utils.chooseCRANmirror(ind=1)
utils.install_packages("XICOR")
XICOR = importr('XICOR')
# Installing requirements.txt dependencies
dependencies=[]
requirements = open('requirements.txt', 'r')
for dependency in requirements:
dependencies.append(str(dependency))
setuptools.setup(
name='HAllA',
author=AUTHOR,
author_email=MAINTAINER_EMAIL,
version=VERSION,
license='MIT',
description='HAllA: Hierarchical All-against All Association Testing',
long_description="Given two high-dimensional 'omics datasets X and Y (continuous and/or categorical features) from the same n biosamples, HAllA (Hierarchical All-against-All Association Testing) discovers densely-associated blocks of features in the X vs. Y association matrix where: 1) each block is defined as all associations between features in a subtree of X hierarchy and features in a subtree of Y hierarchy and 2) a block is densely associated if (1 - FNR)% of pairwise associations are FDR significant (FNR is the pre-defined expected false negative rate)",
url='https://github.com/biobakery/halla',
keywords=['halla', 'association testing'],
platforms=['Linux','MacOS'],
install_requires=dependencies,
classifiers=[
'Programming Language :: Python',
'Operating System :: MacOS',
'Operating System :: Unix',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
packages=setuptools.find_packages(),
package_data={
'halla': ['config.yaml']
},
entry_points={
'console_scripts': [
'halla = scripts.halla:main',
'halladata = scripts.synthetic_data:main',
'hallagram = scripts.hallagram:main',
'hallagnostic = scripts.diagnostic_plot:main',
]
},
cmdclass={
'install': PostInstallCommand,
},
test_suite= 'tests',
)
| 35.795181 | 570 | 0.657354 | 822 | 0.276675 | 0 | 0 | 0 | 0 | 0 | 0 | 1,518 | 0.510939 |
92839f4c490a65ddecdbcfd8f4191a0b811fc3a5 | 5,087 | py | Python | all-sky-average-proper-motions/proper-motion-map.py | agabrown/gaiaedr3-proper-motion-visualizations | f3943c548e6c373badcd0a598852c71eb88643d1 | [
"MIT"
] | 5 | 2020-12-03T17:05:54.000Z | 2021-03-22T11:48:21.000Z | all-sky-average-proper-motions/proper-motion-map.py | agabrown/gaiaedr3-proper-motion-visualizations | f3943c548e6c373badcd0a598852c71eb88643d1 | [
"MIT"
] | null | null | null | all-sky-average-proper-motions/proper-motion-map.py | agabrown/gaiaedr3-proper-motion-visualizations | f3943c548e6c373badcd0a598852c71eb88643d1 | [
"MIT"
] | 2 | 2021-01-28T13:20:57.000Z | 2021-08-13T16:55:32.000Z | """
Plot an all-sky average proper motion map, using statistics downloaded from the Gaia archive with a query similar to the
following:
select
gaia_healpix_index(5, source_id) as healpix_5,
avg(pmra) as avg_pmra,
avg(pmdec) as avg_pmdec
from gaiaedr3.gaia_source
where parallax_over_error>=10
and parallax*parallax - 2*parallax - parallax_error*parallax_error < -1
group by healpix_5
Anthony Brown Oct 2020 - Dec 2020
"""
import argparse
import astropy.units as u
import astropy_healpix.healpy as hp
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
from astropy.coordinates import ICRS, Galactic
from astropy.table import Table
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from matplotlib.patches import ArrowStyle
def make_plot(args):
"""
Take the steps to make the plot.
Parameters
----------
args: dict
Command line arguments
Returns
-------
Nothing
"""
infile = './data/' + args['inputFile']
basename = 'PMmap-' + args['inputFile'].split('.')[0]
default_proj = ccrs.PlateCarree()
sky_proj = ccrs.Mollweide()
backgr = plt.imread('../star-trail-animation/sky-images/GaiaSky-colour-2k.png')
nside = hp.order2nside(args['hplevel'])
hpcol = 'healpix_{0}'.format(args['hplevel'])
edr3data = Table.read(infile)
alpha, delta = hp.pix2ang(nside, edr3data[hpcol], lonlat=True, nest=True)
pmra = edr3data['avg_pmra']
pmdec = edr3data['avg_pmdec']
icrs = ICRS(ra=alpha * u.degree, dec=delta * u.degree, pm_ra_cosdec=pmra * u.mas / u.yr,
pm_dec=pmdec * u.mas / u.yr)
galactic = icrs.transform_to(Galactic)
pmtot = np.sqrt(galactic.pm_l_cosb.value ** 2 + galactic.pm_b.value ** 2)
fig = plt.figure(figsize=(16, 9), dpi=120, frameon=False, tight_layout={'pad': 0.01})
gs = GridSpec(1, 1, figure=fig)
ax = fig.add_subplot(gs[0, 0], projection=sky_proj)
ax.imshow(np.fliplr(backgr), transform=default_proj, zorder=-1, origin='upper')
pmcmap = cm.viridis
veccolor = plt.cm.get_cmap('tab10').colors[9]
linecolor = plt.cm.get_cmap('tab10').colors[9]
if args['quiver']:
vscale = np.median(pmtot) / 10
ax.quiver(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, angles='xy', scale=vscale, scale_units='dots', color=veccolor,
headwidth=1, headlength=3, headaxislength=2.5)
else:
if args['colourstreams']:
ax.streamplot(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, linewidth=2.0, density=2, color=pmtot, cmap=pmcmap, maxlength=0.5,
arrowsize=1, arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
elif args['lwcode'] > 0:
ax.streamplot(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, linewidth=args['lwcode'] * pmtot / np.median(pmtot), density=2,
color=linecolor,
maxlength=0.5, arrowsize=1, arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4,
tail_width=.4))
else:
ax.streamplot(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, linewidth=1.5, density=2, color=linecolor, maxlength=0.5, arrowsize=1,
arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
ax.invert_xaxis()
if args['pdfOutput']:
plt.savefig(basename + '.pdf')
elif args['pngOutput']:
plt.savefig(basename + '.png')
else:
plt.show()
def parse_command_line_arguments():
"""
Set up command line parsing.
"""
parser = argparse.ArgumentParser("Produce all-sky proper motion map.")
parser.add_argument('inputFile', type=str, help="""VOT file with proper motion stats by Healpix.""")
parser.add_argument('hplevel', type=int, nargs='?', default=4, help="""Healpix level of input table.""")
parser.add_argument('--vectors', action="store_true", dest="quiver", help="Plot vectors instead of streamlines")
parser.add_argument('--colourcode', action='store_true', dest='colourstreams', help="""Plot streamlines colour coded
by magnitude of proper motion""")
parser.add_argument('--lwcode', type=float, default=0.0, help="""Plot streamlines with the width indicating the
magnitude of proper motion. Scale the widths by the factor provided""")
parser.add_argument("-p", action="store_true", dest="pdfOutput", help="Make PDF plot")
parser.add_argument("-b", action="store_true", dest="pngOutput", help="Make PNG plot")
args = vars(parser.parse_args())
return args
if __name__ in '__main__':
cmdargs = parse_command_line_arguments()
make_plot(cmdargs)
| 40.373016 | 120 | 0.658148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,449 | 0.284844 |
92863f36881bc64c62961311a3a4e4eae246cfd5 | 15,743 | py | Python | AWERA/wind_profile_clustering/read_requested_data.py | lthUniBonn/AWERA | fa7f210516318bcfcbe1c99abbb5954b0cbaf682 | [
"MIT"
] | null | null | null | AWERA/wind_profile_clustering/read_requested_data.py | lthUniBonn/AWERA | fa7f210516318bcfcbe1c99abbb5954b0cbaf682 | [
"MIT"
] | null | null | null | AWERA/wind_profile_clustering/read_requested_data.py | lthUniBonn/AWERA | fa7f210516318bcfcbe1c99abbb5954b0cbaf682 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import xarray as xr
import numpy as np
import sys
from os.path import join as path_join
from .era5_ml_height_calc import compute_level_heights
# FIXME what of this is still necessary?
import dask
# only as many threads as requested CPUs | only one to be requested,
# more threads don't seem to be used
# TODO better option than synchronous?
dask.config.set(scheduler='synchronous')
def read_raw_data(data_config, sel_sample_ids=[], lat0=50, lon0=0):
""""Read ERA5 wind data for adjacent years.
Args:
start_year (int): Read data starting from this year.
final_year (int): Read data up to this year.
Returns:
tuple of Dataset, ndarray, ndarray, ndarray, and ndarray:
Tuple containing reading object of multiple wind
data (netCDF) files, longitudes of grid, latitudes of grid,
model level numbers, and timestamps in hours since
1900-01-01 00:00:0.0.
"""
if data_config.era5_data_input_format == 'single_loc':
ds = read_ds_single_loc_files(data_config, lat0, lon0)
elif data_config.era5_data_input_format == 'monthly':
# Construct the list of input NetCDF files
ml_files = []
sfc_files = []
for y in range(data_config.start_year, data_config.final_year+1):
for m in range(1, data_config.year_final_month+1):
ml_files.append(
data_config.model_level_file_name_format.format(y, m))
sfc_files.append(
data_config.surface_file_name_format.format(y, m))
# Load the data from the NetCDF files.
ds = xr.open_mfdataset(ml_files+sfc_files, decode_times=True)
else:
print('Wrong input data format: {}'.format(
data_config.era5_data_input_format))
if len(sel_sample_ids) > 0:
ds = ds.isel(time=sel_sample_ids)
lons = ds['longitude'].values
lats = ds['latitude'].values
levels = ds['level'].values.astype(int) # Model level numbers.
hours = ds['time'].values
dlevels = np.diff(levels)
if not (np.all(dlevels == 1) and levels[-1] == 137):
i_highest_level = len(levels) - np.argmax(dlevels[::-1] > 1) - 1
print("Not all the downloaded model levels are consecutive."
" Only model levels up to {} are evaluated."
.format(levels[i_highest_level]))
levels = levels[i_highest_level:]
else:
i_highest_level = 0
return ds, lons, lats, levels, hours, i_highest_level
def read_ds_single_loc_files(data_config,
lat, lon,
by_index=False,
time_combined=True,
sel_sample_ids=[]):
""""Read ERA5 wind data from location wise files.
Returns:
Dataset: Reading object of multiple wind data (netCDF) files
"""
ds_ml = xr.open_dataset(data_config.latitude_ds_file_name.format(
lat=lat, lon=lon), decode_times=True)
ds_ml = ds_ml.sel(time=slice(str(data_config.start_year),
str(data_config.final_year)))
# Read surface pressure files
sfc_files = []
for y in range(data_config.start_year, data_config.final_year+1):
for m in range(1, data_config.year_final_month+1):
sfc_files.append(
data_config.surface_file_name_format.format(y, m))
# Load the data from the NetCDF files.
ds_sfc = xr.open_mfdataset(sfc_files, decode_times=True)
ds_sfc = ds_sfc.sel(latitude=[lat], longitude=[lon])
# Test matching lat/lon representations
if ds_sfc.coords['longitude'].values != ds_ml.coords['longitude'].values:
ds_ml.coords['longitude'] = ds_ml.coords['longitude'] - 360
if (ds_sfc.coords['longitude'].values !=
ds_ml.coords['longitude'].values):
raise ValueError('Mismatching longitudes')
if ds_sfc.coords['latitude'].values != ds_ml.coords['latitude'].values:
ds_ml.coords['latitude'] = ds_ml.coords['latitude'] - 360
if (ds_sfc.coords['longitude'].values !=
ds_ml.coords['longitude'].values):
raise ValueError('Mismatching latitudes')
ds = xr.merge([ds_ml, ds_sfc])
if len(sel_sample_ids) > 0:
ds = ds.isel(time=sel_sample_ids)
return ds
def eval_single_loc_era5_input(data_config,
sel_sample_ids,
i_highest_level,
levels,
n_per_loc,
loc_i_loc,
ds=None,
use_memmap=False):
# TODO improve arguments!
lat, lon, i_lat, i_lon, i = loc_i_loc
if data_config.era5_data_input_format == 'single_loc' or ds is None:
# For single location data files, always reaad next file
ds = read_ds_single_loc_files(data_config,
lat, lon,
sel_sample_ids=sel_sample_ids)
# Extract wind data for single location
v_levels_east = ds['u'][:, i_highest_level:, i_lat, i_lon].values
v_levels_north = ds['v'][:, i_highest_level:, i_lat, i_lon].values
t_levels = ds['t'][:, i_highest_level:, i_lat, i_lon].values
# TODO test -- better to call values later? or all together at beginning?
q_levels = ds['q'][:, i_highest_level:, i_lat, i_lon].values
try:
surface_pressure = ds.variables['sp'][:, i_lat, i_lon].values
except KeyError:
surface_pressure = np.exp(ds.variables['lnsp'][:, i_lat, i_lon].values)
# Calculate model level height
level_heights, density_levels = compute_level_heights(levels,
surface_pressure,
t_levels,
q_levels)
# Determine wind at altitudes of interest by
# means of interpolating the raw wind data.
# Interpolation results array.
v_req_alt_east_loc = np.zeros((n_per_loc, len(data_config.height_range)))
v_req_alt_north_loc = np.zeros((n_per_loc, len(data_config.height_range)))
# same = 0
for i_hr in range(n_per_loc):
if not np.all(level_heights[i_hr, 0] > data_config.height_range):
raise ValueError("Requested height ({:.2f} m) is higher than \
height of highest model level."
.format(level_heights[i_hr, 0]))
v_req_alt_east_loc[i_hr, :] = np.interp(data_config.height_range,
level_heights[i_hr, ::-1],
v_levels_east[i_hr, ::-1])
v_req_alt_north_loc[i_hr, :] = np.interp(data_config.height_range,
level_heights[i_hr, ::-1],
v_levels_north[i_hr, ::-1])
# Sanity check height range oversampling
# same_hr = sum(np.diff(np.round(np.interp(data_config.height_range,
# level_heights[i_hr, ::-1],
# np.arange(level_heights.shape[1])))) == 0)
# same += same_hr
# print('Height Level Ids: ',
# np.round(np.interp(data_config.height_range,
# level_heights[i_hr, ::-1],
# np.arange(level_heights.shape[1]))))
# print('Same level data used {} times.'.format(same))
if use_memmap:
v_east = np.memmap('tmp/v_east.memmap', dtype='float64', mode='r+',
shape=(n_per_loc, len(data_config.height_range)),
offset=n_per_loc*i*len(data_config.height_range)
* int(64/8))
v_east[:, :] = v_req_alt_east_loc
del v_east
v_north = np.memmap('tmp/v_north.memmap', dtype='float64', mode='r+',
shape=(n_per_loc, len(data_config.height_range)),
offset=n_per_loc*i*len(data_config.height_range)
* int(64/8))
v_north[:, :] = v_req_alt_north_loc
del v_north
return 0
else:
return(v_req_alt_east_loc, v_req_alt_north_loc)
def get_wind_data_era5(config,
locations=[(40, 1)],
sel_sample_ids=[]):
lat, lon = locations[0]
ds, lons, lats, levels, hours, i_highest_level = read_raw_data(
config.Data, sel_sample_ids=sel_sample_ids, lat0=lat, lon0=lon)
n_per_loc = len(hours)
if config.Data.era5_data_input_format != 'single_loc':
# Convert lat/lon lists to indices
lats, lons = (list(lats), list(lons))
i_locs = [(lats.index(lat), lons.index(lon)) for lat, lon in locations]
else:
i_locs = [(0, 0) for lat, lon in locations]
if config.General.use_memmap:
v_req_alt_east = np.memmap('tmp/v_east.memmap', dtype='float64',
mode='w+',
shape=(n_per_loc*len(locations),
len(config.Data.height_range)))
v_req_alt_north = np.memmap('tmp/v_north.memmap', dtype='float64',
mode='w+',
shape=(n_per_loc*len(locations),
len(config.Data.height_range)))
else:
v_req_alt_east = np.zeros((n_per_loc*len(locations),
len(config.Data.height_range)))
v_req_alt_north = np.zeros((n_per_loc*len(locations),
len(config.Data.height_range)))
if config.Processing.parallel:
# TODO import not here
from multiprocessing import Pool
from tqdm import tqdm
loc_i_loc_combinations = [(locations[i][0], locations[i][1],
i_loc[0], i_loc[1], i)
for i, i_loc in enumerate(i_locs)]
import functools
funct = functools.partial(eval_single_loc_era5_input,
config.Data, sel_sample_ids,
i_highest_level, levels, n_per_loc,
ds=ds,
use_memmap=config.General.use_memmap)
with Pool(config.Processing.n_cores) as p:
if config.Processing.progress_out == 'stdout':
file = sys.stdout
else:
file = sys.stderr
results = list(tqdm(p.imap(funct, loc_i_loc_combinations),
total=len(loc_i_loc_combinations),
file=file))
if not config.General.use_memmap:
for i, val in enumerate(results):
v_req_alt_east_loc, v_req_alt_north_loc = val
v_req_alt_east[n_per_loc*i:n_per_loc*(i+1), :] = \
v_req_alt_east_loc
v_req_alt_north[n_per_loc*i:n_per_loc*(i+1), :] = \
v_req_alt_north_loc
else:
# Not parallelized version:
for i, i_loc in enumerate(i_locs):
# TODO add progress bar
i_lat, i_lon = i_loc
lat, lon = locations[i]
v_req_alt_east_loc, v_req_alt_north_loc = \
eval_single_loc_era5_input(
config.Data,
sel_sample_ids, i_highest_level,
levels, n_per_loc,
(lat, lon, i_lat, i_lon, i),
ds=ds)
# TODO is this even xarray anymore?
# check efficiency numpy, pandas, xarray
v_req_alt_east[n_per_loc*i:n_per_loc*(i+1), :] = v_req_alt_east_loc
v_req_alt_north[n_per_loc*i:n_per_loc*(i+1), :] = \
v_req_alt_north_loc
# if era5_data_input_format == 'single_loc':
# Close dataset - free resources? #TODO
# ds.close()
# TODO This could get too large for a large number of locations
# - better use an xarray/ more efficient data structure here?
if config.General.use_memmap:
del v_req_alt_east
del v_req_alt_north
v_req_alt_east = np.memmap('tmp/v_east.memmap', dtype='float64',
mode='r',
shape=(n_per_loc*len(locations),
len(config.Data.height_range)))
v_req_alt_north = np.memmap('tmp/v_north.memmap', dtype='float64',
mode='r',
shape=(n_per_loc*len(locations),
len(config.Data.height_range)))
wind_data = {
'wind_speed_east': v_req_alt_east,
'wind_speed_north': v_req_alt_north,
'n_samples': n_per_loc*len(i_locs),
'n_samples_per_loc': n_per_loc,
'datetime': ds['time'].values,
'altitude': config.Data.height_range,
'years': (config.Data.start_year, config.Data.final_year),
'locations': locations,
}
ds.close() # Close the input NetCDF file.
return wind_data
def get_wind_data(config, sel_sample_ids=[], locs=[]):
# TODO add single sample selection for all data types
if len(locs) == 0:
# Use all configuration locations
locs = config.Data.locations
if config.Data.use_data == 'DOWA':
import os
# HDF5 library has been updated (1.10.1)
# (netcdf uses HDF5 under the hood) file system does not support
# the file locking that the HDF5 library uses.
# In order to read your hdf5 or netcdf files,
# you need set this environment variable :
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
# TODO check - is this still needed?
# if yes - where set, needed for era5? FIX
from .read_data.dowa import read_data
wind_data = read_data({'mult_coords': locs}, config.Data.DOWA_data_dir)
# Use start_year to final_year data only
hours = wind_data['datetime']
start_date = np.datetime64(
'{}-01-01T00:00:00.000000000'.format(config.Data.start_year))
end_date = np.datetime64(
'{}-01-01T00:00:00.000000000'.format(config.Data.final_year+1))
start_idx = list(hours).index(start_date)
end_idx = list(hours).index(end_date)
data_range = range(start_idx, end_idx + 1)
for key in ['wind_speed_east', 'wind_speed_north', 'datetime']:
wind_data[key] = wind_data[key][data_range]
wind_data['n_samples'] = len(data_range)
wind_data['years'] = (config.Data.start_year, config.Data.final_year)
wind_data['locations'] = locs
wind_data['n_samples_per_loc'] = wind_data['n_samples']/len(locs)
print(len(hours))
print(len(wind_data['wind_speed_east']), wind_data['n_samples'])
elif config.Data.use_data == 'LIDAR':
from read_data.fgw_lidar import read_data
# FIXME config is not included here?
wind_data = read_data()
elif config.Data.use_data in ['ERA5', 'ERA5_1x1']:
wind_data = get_wind_data_era5(config,
locations=locs,
sel_sample_ids=sel_sample_ids)
else:
raise ValueError("Wrong data type specified: "
"{} - no option to read data is executed".format(
config.Data.use_data))
return wind_data
| 42.896458 | 79 | 0.56495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,981 | 0.252874 |
9289780cbba13c79f57a317c9ba7998f92d3643a | 1,113 | py | Python | bot/test_gpio.py | radiodee1/awesome-chatbot | 5e83ae577421bb1e612da57836f24544e9dc044b | [
"MIT"
] | 22 | 2018-06-22T07:10:06.000Z | 2022-03-22T05:27:58.000Z | bot/test_gpio.py | radiodee1/awesome-chatbot | 5e83ae577421bb1e612da57836f24544e9dc044b | [
"MIT"
] | 6 | 2020-05-17T16:34:48.000Z | 2022-03-02T13:56:55.000Z | bot/test_gpio.py | radiodee1/awesome-chatbot | 5e83ae577421bb1e612da57836f24544e9dc044b | [
"MIT"
] | 11 | 2018-03-29T21:47:56.000Z | 2022-03-07T22:15:06.000Z | #!/usr/bin/env python3
import time
pin_skip = False
try:
import RPi.GPIO as GPIO
led_pin_a = 12
led_pin_b = 16
print('load rpi gpio')
except:
try:
import Jetson.GPIO as GPIO
led_pin_a = 12
led_pin_b = 16
print('load jetson gpio')
except:
pin_skip = True
print('no load gpio')
class Game:
def __init__(self):
print('hello - gpio test')
def pin_setup(self):
if pin_skip: return
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(led_pin_a, GPIO.OUT)
GPIO.setup(led_pin_b, GPIO.OUT)
def pin_a_on(self):
if pin_skip: return
GPIO.output(led_pin_a, GPIO.HIGH)
GPIO.output(led_pin_b, GPIO.LOW)
def pin_a_off(self):
if pin_skip: return
GPIO.output(led_pin_a, GPIO.LOW)
GPIO.output(led_pin_b, GPIO.HIGH)
if __name__ == '__main__':
g = Game()
g.pin_setup()
while True:
print('light on')
g.pin_a_on()
time.sleep(1)
print('light off')
g.pin_a_off()
time.sleep(1) | 20.611111 | 41 | 0.577718 | 542 | 0.486972 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.106918 |
9289e747472df431fa07919c97c9bde9926f3780 | 636 | py | Python | 1-Algorithmic-Toolbox/week3/assignments/car_fueling.py | Helianus/Data-Structures-and-Algorithms-Coursera | 1fe8961cdd77fdeb88490f127e4369680419c428 | [
"MIT"
] | null | null | null | 1-Algorithmic-Toolbox/week3/assignments/car_fueling.py | Helianus/Data-Structures-and-Algorithms-Coursera | 1fe8961cdd77fdeb88490f127e4369680419c428 | [
"MIT"
] | null | null | null | 1-Algorithmic-Toolbox/week3/assignments/car_fueling.py | Helianus/Data-Structures-and-Algorithms-Coursera | 1fe8961cdd77fdeb88490f127e4369680419c428 | [
"MIT"
] | null | null | null | # python3
import sys
def compute_min_refills(distance, tank, stops):
# write your code here
if distance <= tank:
return 0
else:
stops.append(distance)
n_stops = len(stops) - 1
count = 0
refill = tank
for i in range(n_stops):
if refill < stops[i] or (stops[-2] + tank < distance):
return -1
if refill < stops[i + 1]:
refill = tank + stops[i]
count += 1
return count
if __name__ == '__main__':
d, m, _, *stops = map(int, sys.stdin.read().split())
print(compute_min_refills(d, m, stops))
| 22.714286 | 66 | 0.52673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.064465 |
928b9676a71b52831a3150e851b3660ba36d9a5e | 579 | py | Python | avatar/plugins/avatar_plugin.py | gitttt/avatar-python-private | aa33ac9f37b7a1d79943ac89109f3f9e38cf3736 | [
"ECL-2.0",
"Apache-2.0"
] | 30 | 2017-09-05T18:21:57.000Z | 2022-01-13T08:23:49.000Z | avatar/plugins/avatar_plugin.py | gitttt/avatar-python-private | aa33ac9f37b7a1d79943ac89109f3f9e38cf3736 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2016-12-08T13:17:53.000Z | 2016-12-08T13:27:36.000Z | avatar/plugins/avatar_plugin.py | K-atc/avatar-python | c6d74f85f63333e3031e1f5e208d3f90e316a9e4 | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2017-09-26T07:36:50.000Z | 2020-10-31T00:47:08.000Z | class AvatarPlugin:
"""
Abstract interface for all Avatar plugins
Upon start() and stop(), plugins are expected to register/unregister
their own event handlers by the means of :func:`System.register_event_listener`
and :func:`System.unregister_event_listener`
"""
def __init__(self, system):
self._system = system
def init(self, **kwargs):
assert(False) #Not implemented
def start(self, **kwargs):
assert(False) #Not implemented
def stop(self, **kwargs):
assert(False) #Not implemented
| 27.571429 | 83 | 0.649396 | 578 | 0.998273 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.53886 |
928bbe7491fdbc0a0f928ea52f2ec3bc8d5bb842 | 323 | py | Python | python/python.py | TimVan1596/ACM-ICPC | 07f7d728db1ecd09c5a3d0f05521930b14eb9883 | [
"Apache-2.0"
] | 1 | 2019-05-22T07:12:34.000Z | 2019-05-22T07:12:34.000Z | python/python.py | TimVan1596/ACM-ICPC | 07f7d728db1ecd09c5a3d0f05521930b14eb9883 | [
"Apache-2.0"
] | 3 | 2021-12-10T01:13:54.000Z | 2021-12-14T21:18:42.000Z | python/python.py | TimVan1596/ACM-ICPC | 07f7d728db1ecd09c5a3d0f05521930b14eb9883 | [
"Apache-2.0"
] | null | null | null | import xlwt
if __name__ == '__main__':
workbook = xlwt.Workbook(encoding='utf-8') # 创建workbook 对象
worksheet = workbook.add_sheet('sheet1') # 创建工作表sheet
# 往表中写内容,第一各参数 行,第二个参数列,第三个参数内容
worksheet.write(0, 0, 'hello world')
worksheet.write(0, 1, '你好')
workbook.save('first.xls') # 保存表为students.xls
| 32.3 | 63 | 0.674923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.51861 |
928bed2cda12bb44aa28928e0571f9042f5e1006 | 736 | py | Python | kiestze_django/kiestze/migrations/0004_auto_20180719_1438.py | oSoc18/kiest_ze | 842eefcf3f6002ea90c2917682f625749398b33e | [
"Apache-2.0"
] | 3 | 2018-07-11T07:59:15.000Z | 2018-07-26T19:58:44.000Z | kiestze_django/kiestze/migrations/0004_auto_20180719_1438.py | oSoc18/kiest_ze | 842eefcf3f6002ea90c2917682f625749398b33e | [
"Apache-2.0"
] | 4 | 2018-08-11T13:55:50.000Z | 2019-12-28T15:41:30.000Z | kiestze_django/kiestze/migrations/0004_auto_20180719_1438.py | oSoc18/kiest_ze | 842eefcf3f6002ea90c2917682f625749398b33e | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.7 on 2018-07-19 14:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kiestze', '0003_gemeente'),
]
operations = [
migrations.RemoveField(
model_name='gemeente',
name='id',
),
migrations.AlterField(
model_name='gemeente',
name='nis',
field=models.IntegerField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='partij',
name='nis',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kiestze.Gemeente'),
),
]
| 25.37931 | 104 | 0.586957 | 610 | 0.828804 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.177989 |
928e8bc158a67d838d81a5ba85e0362f35eae242 | 3,182 | py | Python | analysis/get_input_message_pairs.py | Shawn-Guo-CN/EmergentNumerals | ef9786e5bd6c8c456143ad305742340e510f5edb | [
"MIT"
] | 2 | 2019-08-16T21:37:55.000Z | 2019-08-18T18:11:28.000Z | analysis/get_input_message_pairs.py | Shawn-Guo-CN/EmergentNumerals | ef9786e5bd6c8c456143ad305742340e510f5edb | [
"MIT"
] | null | null | null | analysis/get_input_message_pairs.py | Shawn-Guo-CN/EmergentNumerals | ef9786e5bd6c8c456143ad305742340e510f5edb | [
"MIT"
] | null | null | null | import torch
import numpy as np
from utils.conf import args
from models.Set2Seq2Seq import Set2Seq2Seq
from preprocesses.DataIterator import FruitSeqDataset
from preprocesses.Voc import Voc
DATA_FILE = './data/all_data.txt'
OUT_FILE = './data/input_msg_pairs.txt'
def load_data(f):
dataset = []
for line in f.readlines():
line = line.strip()
dataset.append(line)
return dataset
def generate_perfect_in_msg_pairs(file_path=DATA_FILE):
f = open(file_path, 'r')
in_strs = load_data(f)
msgs = []
for in_str in in_strs:
msg = ''
for c in range(args.num_words):
msg += str(in_str.count(chr(65+c)))
msgs.append(msg)
return in_strs, msgs
def output_perfect_in_msg_pairs(file_path, ins, msgs):
out_file = open(file_path, 'a')
for idx in range(len(ins)):
print(ins[idx] + '\t' + msgs[idx] + '/\t' + ins[idx] + '/', file=out_file)
out_file.close()
def reproduce_msg_output(model, voc, data_batch, train_args):
input_var = data_batch['input']
input_mask = data_batch['input_mask']
target_var = data_batch['target']
target_mask = data_batch['target_mask']
target_max_len = data_batch['target_max_len']
message, _, msg_mask = model.speaker(input_var, input_mask)
output = model.listener(message, msg_mask, target_max_len)
message = message.squeeze().detach().cpu().numpy()
msg_str = ''
msg_end = False
for r_idx in range(message.shape[0]):
cur_v = np.argmax(message[r_idx])
if cur_v == train_args.msg_vocsize - 1:
msg_end = True
if not msg_end:
msg_str += str(cur_v)
msg_str += '/'
output = output.squeeze().detach().cpu().numpy()
output_str = ''
output_end = False
for r_idx in range(output.shape[0]):
cur_v = np.argmax(output[r_idx])
if cur_v == train_args.eos_index:
output_end = True
if not output_end:
output_str += voc.index2word[cur_v]
output_str += '/'
return msg_str, output_str
def iterate_dataset(model, voc, str_set, batch_set, out_file, train_args):
for idx, data_batch in enumerate(batch_set):
message, output = reproduce_msg_output(model, voc, data_batch, train_args)
print(str_set[idx] + '\t' + message + '\t' + output, file=out_file)
def main():
print('building vocabulary...')
voc = Voc()
print('done')
print('loading data and building batches...')
data_set = FruitSeqDataset(voc, dataset_file_path=DATA_FILE, batch_size=1)
str_set = data_set.load_stringset(DATA_FILE)
print('done')
print('rebuilding model from saved parameters in ' + args.param_file + '...')
model = Set2Seq2Seq(voc.num_words).to(args.device)
checkpoint = torch.load(args.param_file, map_location=args.device)
train_args = checkpoint['args']
model.load_state_dict(checkpoint['model'])
voc = checkpoint['voc']
print('done')
model.eval()
print('iterating data set...')
out_file = open(OUT_FILE, mode='a')
iterate_dataset(model, voc, str_set, data_set, out_file, train_args)
if __name__ == '__main__':
main()
| 28.666667 | 82 | 0.653363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 326 | 0.102451 |
92939cd40ec3f894a9ca9fdc8baa64317efc2816 | 768 | py | Python | Deck.py | Harel92/BlackJack-Game | adba557a025d9633a2cdaca4f59bb7aa4c566f6c | [
"MIT"
] | null | null | null | Deck.py | Harel92/BlackJack-Game | adba557a025d9633a2cdaca4f59bb7aa4c566f6c | [
"MIT"
] | null | null | null | Deck.py | Harel92/BlackJack-Game | adba557a025d9633a2cdaca4f59bb7aa4c566f6c | [
"MIT"
] | null | null | null | import random
from Card import Card
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
class Deck:
def __init__(self):
# Note this only happens once upon creation of a new Deck
self.all_cards = []
for suit in suits:
for rank in ranks:
# This assumes the Card class has already been defined!
self.all_cards.append(Card(suit, rank))
def shuffle(self):
# Note this doesn't return anything
random.shuffle(self.all_cards)
def deal(self):
# Note we remove one card from the list of all_cards
return self.all_cards.pop()
| 30.72 | 113 | 0.578125 | 557 | 0.72526 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.408854 |
9293e833f1e19f7c619dba6ff2f245f4df6c6b74 | 4,762 | py | Python | dependencies/src/4Suite-XML-1.0.2/Ft/Xml/Lib/HtmlPrettyPrinter.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/Ft/Xml/Lib/HtmlPrettyPrinter.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/Ft/Xml/Lib/HtmlPrettyPrinter.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | ########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/Lib/HtmlPrettyPrinter.py,v 1.12 2005/02/09 09:12:06 mbrown Exp $
"""
This module supports formatted document serialization in HTML syntax.
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
from Ft.Xml import EMPTY_NAMESPACE
from HtmlPrinter import HtmlPrinter
class HtmlPrettyPrinter(HtmlPrinter):
"""
An HtmlPrettyPrinter instance provides functions for serializing an
XML or XML-like document to a stream, based on SAX-like event calls
initiated by an Ft.Xml.Lib.Print.PrintVisitor instance.
The methods in this subclass of HtmlPrinter attempt to emit a
document conformant to the HTML 4.01 syntax, with extra whitespace
added for visual formatting. The indent attribute is the string used
for each level of indenting. It defaults to 2 spaces.
"""
# The amount of indent for each level of nesting
indent = ' '
def __init__(self, stream, encoding):
HtmlPrinter.__init__(self, stream, encoding)
self._level = 0
# indenting control variables
self._isInline = [1] # prevent newline before first element
self._inNoIndent = [0]
self._indentForbidden = 0
self._indentEndTag = False
return
def startElement(self, namespaceUri, tagName, namespaces, attributes):
if self._inElement:
self.writeAscii('>')
self._inElement = False
# Create the lookup key for the various lookup tables
key = (namespaceUri, tagName.lower())
# Get the inline flag for this element
inline = key in self.inlineElements
if not inline and not self._isInline[-1] and not self._indentForbidden:
self.writeAscii('\n' + (self.indent * self._level))
HtmlPrinter.startElement(self, namespaceUri, tagName, namespaces,
attributes)
# Setup indenting rules for this element
self._isInline.append(inline)
self._inNoIndent.append(key in self.noIndentElements)
self._indentForbidden += self._inNoIndent[-1]
self._level += 1
self._indentEndTag = False
return
def endElement(self, namespaceUri, tagName):
# Undo changes to indenting rules for this element
self._level -= 1
inline = self._isInline.pop()
if self._inElement:
# An empty non-null namespace element (use XML short form)
self.writeAscii('/>')
self._inElement = False
else:
if not inline and not self._indentForbidden and self._indentEndTag:
self.writeAscii('\n' + (self.indent * self._level))
HtmlPrinter.endElement(self, namespaceUri, tagName)
self._indentForbidden -= self._inNoIndent.pop()
self._indentEndTag = not inline
return
def processingInstruction(self, target, data):
if self._inElement:
self.writeAscii('>')
self._inElement = False
# OK to indent end-tag
self._indentEndTag = True
# try to indent
if not self._isInline[-1] and not self._indentForbidden:
self.writeAscii('\n' + (self.indent * self._level))
HtmlPrinter.processingInstruction(self, target, data)
return
def comment(self, data):
if self._inElement:
self.writeAscii('>')
self._inElement = False
# OK to indent end-tag
self._indentEndTag = True
# try to indent
if not self._isInline[-1] and not self._indentForbidden:
self.writeAscii('\n' + (self.indent * self._level))
HtmlPrinter.comment(self, data)
return
# Elements that should never be emitted on a new line.
inlineElements = {}
for name in ['tt', 'i', 'b', 'u', 's', 'strike', 'big', 'small', 'em',
'strong', 'dfn', 'code', 'samp', 'kbd', 'var', 'cite',
'abbr', 'acronym', 'a', 'img', 'applet', 'object', 'font',
'basefont', 'script', 'map', 'q', 'sub', 'sup', 'span',
'bdo', 'iframe', 'input', 'select', 'textarea', 'label',
'button']:
inlineElements[(EMPTY_NAMESPACE, name)] = True
# Elements that should never be emitted with additional
# whitespace in their content; i.e., once you're inside
# one, you don't do any more indenting.
noIndentElements = {}
for name in ['script', 'style', 'pre', 'textarea', 'xmp']:
noIndentElements[(EMPTY_NAMESPACE, name)] = True
del name
| 36.075758 | 108 | 0.616968 | 4,250 | 0.892482 | 0 | 0 | 0 | 0 | 0 | 0 | 1,837 | 0.385762 |
9294963c5c0383860551b27b39a0b277aa9e0e8f | 818 | py | Python | setup.py | lanius/chord | 48e129367080c95116600a80d56b310d06322b21 | [
"MIT"
] | null | null | null | setup.py | lanius/chord | 48e129367080c95116600a80d56b310d06322b21 | [
"MIT"
] | null | null | null | setup.py | lanius/chord | 48e129367080c95116600a80d56b310d06322b21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='chord',
version='0.0.1',
url='https://github.com/lanius/chord/',
packages=['chord'],
license='MIT',
author='lanius',
author_email='lanius@nirvake.org',
description='Captures current status of keyboard.',
install_requires=['pyHook', 'pywin32'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 29.214286 | 70 | 0.5978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.634474 |
92950fc24b8bc2b382fb2963eb44444a27e250ed | 4,669 | py | Python | modules/deepspell/baseline/symspell_gendawg.py | Klebert-Engineering/deep-spell-9 | cabfcbf8238085b139e6b4b0e43f459230ead963 | [
"MIT"
] | 3 | 2018-12-16T17:20:39.000Z | 2019-01-11T19:45:29.000Z | modules/deepspell/baseline/symspell_gendawg.py | Klebert-Engineering/deep-spell-9 | cabfcbf8238085b139e6b4b0e43f459230ead963 | [
"MIT"
] | 2 | 2021-11-30T16:35:36.000Z | 2022-01-20T12:33:10.000Z | modules/deepspell/baseline/symspell_gendawg.py | Klebert-Engineering/deep-spell-9 | cabfcbf8238085b139e6b4b0e43f459230ead963 | [
"MIT"
] | null | null | null | # (C) 2018-present Klebert Engineering
"""
Opens a TSV FTS corpus file and generates misspelled entries
for each FTS token with a given maximum edit distance.
Takes two arguments:
(1) The corpus file
(2) The output file. Two output files will be generated from this argument:
-> <output file>.refs : Contains pickled DAWG with misspelled tokens and correct token references
-> <output file>.tokens : Contains the correctly spelled tokens, where line-number=reference-index,
in the form of <token> <frequency>
"""
import codecs
import sys
import string
from hat_trie import Trie
from dawg import BytesDAWG
def generate_lookup_entries(w, max_edit_distance=0):
"""given a word, derive strings with up to max_edit_distance characters
deleted"""
result = {w}
queue = {w}
for d in range(max_edit_distance):
temp_queue = set()
for word in queue:
if len(word) > 1:
for c in range(len(word)): # character index
word_minus_c = word[:c] + word[c + 1:]
if word_minus_c not in result:
result.add(word_minus_c)
if word_minus_c not in temp_queue:
temp_queue.add(word_minus_c)
queue = temp_queue
return result
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=10):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
bytes_for_token = Trie() # charset
token_and_freq_for_index = []
longest_word_length = 0
bytes_per_index = 3
# Count total lines in corpus file
with codecs.open(input_file_path, encoding="utf-8") as corpus_file:
total = sum(1 for _ in corpus_file)
done = 0
print("Loading completion tokens from '{}'...".format(input_file_path))
with codecs.open(input_file_path, encoding="utf-8") as input_file:
index_for_token = Trie() # charset
for line in input_file:
parts = line.split("\t")
done += 1
print_progress(done, total)
if len(parts) < 6:
continue
token = parts[2].lower() # unidecode.unidecode()
# check if word is already in dictionary
# dictionary entries are in the form: (list of suggested corrections, frequency of word in corpus)
if token in index_for_token:
token_index = index_for_token[token]
else:
token_index = len(token_and_freq_for_index)
index_for_token[token] = token_index
longest_word_length = max(len(token), longest_word_length)
token_and_freq_for_index.append([token, 0])
# first appearance of word in corpus
# n.b. word may already be in dictionary as a derived word, but
# counter of frequency of word in corpus is not incremented in those cases.
deletes = generate_lookup_entries(token)
word_index_bytes = token_index.to_bytes(bytes_per_index, 'big')
for entry in deletes:
if entry in bytes_for_token:
bytes_for_token[entry] += word_index_bytes
else:
bytes_for_token[entry] = word_index_bytes
# increment count of token in corpus
token_and_freq_for_index[token_index][1] += 1
print("\n ...done.")
print("Creating DAWG...")
dawg_dict = BytesDAWG(([token, bytes_for_token[token]] for token in bytes_for_token.iterkeys()))
print(" ...done.")
print("Writing output files {}.refs and {}.tokens ...".format(output_file_path, output_file_path))
dawg_dict.save(output_file_path + ".refs")
with codecs.open(output_file_path + ".tokens", "w", encoding="utf-8") as output_tokens:
for token, freq in token_and_freq_for_index:
output_tokens.write("{}\t{}\n".format(token, freq))
print(" ...done.")
| 39.567797 | 106 | 0.644892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,754 | 0.375669 |
929548cb4fdb28eda560c96f789724f2f74e54ef | 4,090 | py | Python | Smaug/models/users.py | luviiv/Smaug | d26d837bb97ef583e05c43d16aed265356b7cf74 | [
"Apache-2.0"
] | null | null | null | Smaug/models/users.py | luviiv/Smaug | d26d837bb97ef583e05c43d16aed265356b7cf74 | [
"Apache-2.0"
] | null | null | null | Smaug/models/users.py | luviiv/Smaug | d26d837bb97ef583e05c43d16aed265356b7cf74 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
users.py
~~~~~~~~~~~
user manage
:copyright: (c) 2015 by Lu Tianchao.
:license: Apache, see LICENSE for more details.
"""
import hashlib
from datetime import datetime
from werkzeug import generate_password_hash, check_password_hash, \
cached_property
from flask.ext.sqlalchemy import BaseQuery
from flask.ext.principal import RoleNeed, UserNeed, Permission
from Smaug.extensions import db
from Smaug.permissions import null
class UserQuery(BaseQuery):
def from_identity(self, identity):
"""
Loads user from flask.ext.principal.Identity instance and
assigns permissions from user.
A "user" instance is monkeypatched to the identity instance.
If no user found then None is returned.
"""
try:
user = self.get(int(identity.id))
except ValueError:
user = None
if user:
identity.provides.update(user.provides)
identity.user = user
return user
def authenticate(self, login, password):
user = self.filter(db.or_(User.username==login,
User.email==login)).first()
if user:
authenticate = user.check_password(password)
else:
authenticate = False
return user, authenticate
def authenticate_openid(self, email, openid):
user = self.filter(User.email==email).first()
if user:
authenticate = user.check_openid(openid)
else:
authenticate = False
return user, authenticate
class User(db.Model):
__tablename__ = "users"
query_class = UserQuery
# user roles
MEMBER = 100
MODERATOR = 200
ADMIN = 300
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.Unicode(60), unique=True, nullable=False)
email = db.Column(db.String(150), unique=True, nullable=False)
role = db.Column(db.Integer, default = MEMBER)
_password = db.Column("password", db.String(80))
_openid = db.Column("openid", db.String(80), unique=True)
class Permissions(object):
def __init__(self, obj):
self.obj = obj
@cached_property
def send_message(self):
if not self.obj.receive_email:
return null
needs = [UserNeed(user_id) for user_id in self.obj.friends]
if not needs:
return null
return Permission(*needs)
def __str__(self):
return self.username
def __repr__(self):
return "<%s>" % self
@cached_property
def permissions(self):
return self.Permissions(self)
def _get_password(self):
return self._password
def _set_password(self, password):
self._password = generate_password_hash(password)
password = db.synonym("_password",
descriptor=property(_get_password,
_set_password))
def check_password(self, password):
if self.password is None:
return False
return check_password_hash(self.password, password)
def _get_openid(self):
return self._openid
def _set_openid(self, openid):
self._openid = generate_password_hash(openid)
openid = db.synonym("_openid",
descriptor=property(_get_openid,
_set_openid))
def check_openid(self, openid):
if self.openid is None:
return False
return check_password_hash(self.openid, openid)
@cached_property
def provides(self):
needs = [RoleNeed('authenticated'),
UserNeed(self.id)]
if self.is_moderator:
needs.append(RoleNeed('moderator'))
if self.is_admin:
needs.append(RoleNeed('admin'))
return needs
@property
def is_moderator(self):
return self.role >= self.MODERATOR
@property
def is_admin(self):
return self.role >= self.ADMIN | 25.72327 | 71 | 0.601467 | 3,606 | 0.881663 | 0 | 0 | 810 | 0.198044 | 0 | 0 | 502 | 0.122738 |
9296c38d6179d914971335cddde811e8c8cfc78b | 347 | py | Python | cfg/launcher/__main__.py | rr-/dotfiles | 4a684c43a5714a3312b42b445e5ba9ae1fab0d1a | [
"MIT"
] | 16 | 2015-06-05T12:57:44.000Z | 2021-08-05T23:49:42.000Z | cfg/launcher/__main__.py | rr-/dotfiles | 4a684c43a5714a3312b42b445e5ba9ae1fab0d1a | [
"MIT"
] | 6 | 2015-11-01T18:18:26.000Z | 2020-10-06T09:17:29.000Z | cfg/launcher/__main__.py | rr-/dotfiles | 4a684c43a5714a3312b42b445e5ba9ae1fab0d1a | [
"MIT"
] | 6 | 2015-10-31T18:53:12.000Z | 2020-11-30T18:03:06.000Z | import os
from libdotfiles.util import (
HOME_DIR,
PKG_DIR,
REPO_ROOT_DIR,
create_symlink,
run,
)
create_symlink(
PKG_DIR / "launcher.json", HOME_DIR / ".config" / "launcher.json"
)
os.chdir(REPO_ROOT_DIR / "opt" / "launcher")
run(
["python3", "-m", "pip", "install", "--user", "--upgrade", "."],
check=False,
)
| 17.35 | 69 | 0.605187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.29683 |
9296fb6b24dc3eedf52b5f0b91836cb7ef50d404 | 2,456 | py | Python | problems/737.Sentence-Similarity-II/li.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/737.Sentence-Similarity-II/li.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/737.Sentence-Similarity-II/li.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | # coding=utf-8
# Author: Jianghan LI
# Question: 737.Sentence-Similarity-II
# Complexity: O(N)
# Date: 2018-05 14:50 - 14:56, 1 wrong try
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
"""
:type words1: List[str]
:type words2: List[str]
:type pairs: List[List[str]]
:rtype: bool
"""
parents = {}
self.count = 0
def add(x):
if x not in parents:
parents[x] = x
self.count += 1
def find(x):
if x not in parents:
return x
if x != parents[x]:
parents[x] = find(parents[x])
return parents[x]
def union(x, y):
x, y = find(x), find(y)
if x != y:
parents[x] = y
self.count -= 1
return True
return False
for x, y in pairs:
add(x)
add(y)
union(x, y)
if len(words1) != len(words2):
return False
for x, y in zip(words1, words2):
# print find(x), find(y)
if find(x) != find(y):
return False
return True
############ test case ###########
s = Solution()
words1 = ["great", "acting", "skills"]
words2 = ["fine", "drama", "talent"]
pairs = [["great", "good"], ["fine", "good"], ["acting", "drama"], ["skills", "talent"]]
print s.areSentencesSimilarTwo(words1, words2, pairs)
words1 = ["an", "extraordinary", "meal", "meal"]
words2 = ["one", "good", "dinner"]
pairs = [["great", "good"], ["extraordinary", "good"], ["well", "good"], ["wonderful", "good"], ["excellent", "good"], ["fine", "good"], ["nice", "good"], ["any", "one"], ["some", "one"], ["unique", "one"], ["the", "one"], ["an", "one"], ["single", "one"], ["a", "one"], ["truck", "car"], ["wagon", "car"], ["automobile", "car"], ["auto", "car"], ["vehicle", "car"], [
"entertain", "have"], ["drink", "have"], ["eat", "have"], ["take", "have"], ["fruits", "meal"], ["brunch", "meal"], ["breakfast", "meal"], ["food", "meal"], ["dinner", "meal"], ["super", "meal"], ["lunch", "meal"], ["possess", "own"], ["keep", "own"], ["have", "own"], ["extremely", "very"], ["actually", "very"], ["really", "very"], ["super", "very"]]
print s.areSentencesSimilarTwo(words1, words2, pairs)
############ comments ############
# 1 wrong try for case len(words1) != len(words2)
| 38.375 | 368 | 0.482085 | 1,093 | 0.445033 | 0 | 0 | 0 | 0 | 0 | 0 | 1,051 | 0.427932 |
929989a699a5e40cd08c6754c513c1391f4e851e | 3,371 | py | Python | test/test_datatypes.py | panny2207/OWL-RL | 218c5779290a0fbd653e40ab377e19b4f7c394fb | [
"W3C-20150513"
] | null | null | null | test/test_datatypes.py | panny2207/OWL-RL | 218c5779290a0fbd653e40ab377e19b4f7c394fb | [
"W3C-20150513"
] | null | null | null | test/test_datatypes.py | panny2207/OWL-RL | 218c5779290a0fbd653e40ab377e19b4f7c394fb | [
"W3C-20150513"
] | null | null | null | """
Test for OWL 2 RL/RDF rules from
Table 8. The Semantics of Datatypes
https://www.w3.org/TR/owl2-profiles/#Reasoning_in_OWL_2_RL_and_RDF_Graphs_using_Rules
NOTE: The following axioms are skipped on purpose
- dt-eq
- dt-diff
"""
from rdflib import Graph, Literal, Namespace, RDF, XSD, RDFS
import owlrl
DAML = Namespace('http://www.daml.org/2002/03/agents/agent-ont#')
T = Namespace('http://test.org/')
def test_dt_type1():
"""
Test dt-type1 rule for OWL 2 RL.
"""
g = Graph()
owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g)
assert (RDF.PlainLiteral, RDF.type, RDFS.Datatype) in g
assert (RDF.XMLLiteral, RDF.type, RDFS.Datatype) in g
assert (RDFS.Literal, RDF.type, RDFS.Datatype) in g
assert (XSD.decimal, RDF.type, RDFS.Datatype) in g
assert (XSD.integer, RDF.type, RDFS.Datatype) in g
assert (XSD.nonNegativeInteger, RDF.type, RDFS.Datatype) in g
assert (XSD.nonPositiveInteger, RDF.type, RDFS.Datatype) in g
assert (XSD.positiveInteger, RDF.type, RDFS.Datatype) in g
assert (XSD.negativeInteger, RDF.type, RDFS.Datatype) in g
assert (XSD.long, RDF.type, RDFS.Datatype) in g
assert (XSD.int, RDF.type, RDFS.Datatype) in g
assert (XSD.short, RDF.type, RDFS.Datatype) in g
assert (XSD.byte, RDF.type, RDFS.Datatype) in g
assert (XSD.unsignedLong, RDF.type, RDFS.Datatype) in g
assert (XSD.unsignedInt, RDF.type, RDFS.Datatype) in g
assert (XSD.unsignedShort, RDF.type, RDFS.Datatype) in g
assert (XSD.unsignedByte, RDF.type, RDFS.Datatype) in g
assert (XSD.float, RDF.type, RDFS.Datatype) in g
assert (XSD.double, RDF.type, RDFS.Datatype) in g
assert (XSD.string, RDF.type, RDFS.Datatype) in g
assert (XSD.normalizedString, RDF.type, RDFS.Datatype) in g
assert (XSD.token, RDF.type, RDFS.Datatype) in g
assert (XSD.language, RDF.type, RDFS.Datatype) in g
assert (XSD.Name, RDF.type, RDFS.Datatype) in g
assert (XSD.NCName, RDF.type, RDFS.Datatype) in g
assert (XSD.NMTOKEN, RDF.type, RDFS.Datatype) in g
assert (XSD.boolean, RDF.type, RDFS.Datatype) in g
assert (XSD.hexBinary, RDF.type, RDFS.Datatype) in g
assert (XSD.base64Binary, RDF.type, RDFS.Datatype) in g
assert (XSD.anyURI, RDF.type, RDFS.Datatype) in g
assert (XSD.dateTime, RDF.type, RDFS.Datatype) in g
assert (XSD.dateTimeStamp, RDF.type, RDFS.Datatype) in g
def test_dt_type2():
"""
Test dt-type2 rule for OWL 2 RL.
"""
p_one = Literal(1, datatype=XSD.positiveInteger)
g = Graph()
g.add((T.A, T.prop, p_one))
owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g)
assert (T.A, T.prop, p_one) in g
assert (p_one, RDF.type, XSD.positiveInteger) in g
def test_dt_not_type():
"""
Test dt-not-type rule for OWL 2 RL.
"""
m_one = Literal(-1, datatype=XSD.nonNegativeInteger)
g = Graph()
g.add((T.A, T.prop, m_one))
owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g)
# TODO, we know this one fails. It is not supposed to.
#assert (m_one, RDF.type, XSD.nonNegativeInteger) not in g
assert True
result = next(g.objects(predicate=DAML.error))
expected = Literal(
'Lexical value of the literal \'-1\' does not match its datatype'
' (http://www.w3.org/2001/XMLSchema#nonNegativeInteger)'
)
assert expected == result
| 35.861702 | 85 | 0.68407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 683 | 0.202611 |
929b14db4cd8be502dda71851249a5f21efe7fb5 | 7,072 | py | Python | pykin/robots/bimanual.py | jdj2261/pykin | da952b8ec023382b8a324d1095b0cfd675c7452b | [
"MIT"
] | 14 | 2021-08-09T06:59:10.000Z | 2022-03-09T13:05:46.000Z | pykin/robots/bimanual.py | jdj2261/pykin | da952b8ec023382b8a324d1095b0cfd675c7452b | [
"MIT"
] | null | null | null | pykin/robots/bimanual.py | jdj2261/pykin | da952b8ec023382b8a324d1095b0cfd675c7452b | [
"MIT"
] | 4 | 2021-12-13T03:23:36.000Z | 2022-03-09T11:34:29.000Z | import numpy as np
from pykin.robots.robot import Robot
from pykin.utils.error_utils import NotFoundError
class Bimanual(Robot):
"""
Initializes a bimanual robot simulation object.
Args:
fname (str): path to the urdf file.
offset (Transform): robot init offset
"""
def __init__(
self,
fname: str,
offset=None
):
super(Bimanual, self).__init__(fname, offset)
self._setup_input2dict()
self._set_joint_limits_upper_and_lower()
def _setup_input2dict(self):
"""
Setup dictionary name
"""
self._base_name = self._input2dict("")
self._eef_name = {}
self.desired_base_frame = self._input2dict(None)
self.desired_frames = self._input2dict(None)
self._frames = self._input2dict(None)
self._revolute_joint_names = self._input2dict(None)
self._target_pose = self._input2dict(None)
self.joint_limits_lower = self._input2dict(None)
self.joint_limits_upper = self._input2dict(None)
def _input2dict(self, inp):
"""
Helper function that converts an input that is either a single value or a list into a dict with keys for
each arm: "right", "left"
Args:
inp (str or list or None): Input value to be converted to dict
:Note: If inp is a list, then assumes format is [right, left]
Returns:
dict: Inputs mapped for each robot arm
"""
# First, convert to list if necessary
if not isinstance(inp, list):
inp = [inp for _ in range(2)]
# Now, convert list to dict and return
return {key: value for key, value in zip(self._arms, inp)}
def _set_joint_limits_upper_and_lower(self):
"""
Set joint limits upper and lower
"""
limits_lower = []
limits_upper = []
for joint, (limit_lower, limit_upper) in self.joint_limits.items():
limits_lower.append((joint, limit_lower))
limits_upper.append((joint, limit_upper))
for arm in self._arms:
self.joint_limits_lower[arm] = [
limit_lower for joint, limit_lower in limits_lower if arm in joint]
self.joint_limits_upper[arm] = [
limit_upper for joint, limit_upper in limits_upper if arm in joint]
def setup_link_name(self, base_name="", eef_name=None):
"""
Sets robot's link name
Args:
base_name (str): reference link name
eef_name (str): end effector name
"""
if "right" in eef_name:
self._base_name["right"] = base_name
self._eef_name["right"] = eef_name
self._set_desired_base_frame("right")
self._set_desired_frame("right")
if "left" in eef_name:
self._base_name["left"] = base_name
self._eef_name["left"] = eef_name
self._set_desired_base_frame("left")
self._set_desired_frame("left")
def _set_desired_base_frame(self, arm):
"""
Sets robot's desired base frame
Args:
arm (str): robot arm (right or left)
"""
if self.base_name[arm] == "":
self.desired_base_frame[arm] = self.root
else:
self.desired_base_frame[arm] = super().find_frame(self.base_name[arm] + "_frame")
def _set_desired_frame(self, arm):
"""
Sets robot's desired frame
Args:
arm (str): robot arm (right or left)
"""
self.desired_frames[arm] = super().generate_desired_frame_recursive(
self.desired_base_frame[arm],
self.eef_name[arm])
self._frames[arm] = self.desired_frames[arm]
self._revolute_joint_names[arm] = super().get_revolute_joint_names(self._frames[arm])
self._target_pose[arm] = np.zeros(len(self._revolute_joint_names[arm]))
def inverse_kin(self, current_joints, target_pose, method="LM", maxIter=1000):
"""
Returns joint angles obtained by computing IK
Args:
current_joints (sequence of float): input joint angles
target_pose (np.array): goal pose to achieve
method (str): two methods to calculate IK (LM: Levenberg-marquardt, NR: Newton-raphson)
maxIter (int): Maximum number of calculation iterations
Returns:
joints (np.array): target joint angles
"""
if not isinstance(target_pose, dict):
raise TypeError("Be sure to input the target pose in dictionary form.")
joints = {}
self._frames = self._input2dict(None)
self._revolute_joint_names = self._input2dict(None)
for arm in target_pose.keys():
if self.eef_name[arm]:
self._set_desired_frame(arm)
self._target_pose[arm] = self._convert_target_pose_type_to_npy(target_pose[arm])
joints[arm] = self.kin.inverse_kinematics(
self._frames[arm],
current_joints,
self._target_pose[arm],
method,
maxIter)
return joints
def _convert_target_pose_type_to_npy(self, value):
"""
convert input type to numpy array
Args:
value(list or tupe)
Returns:
np.array
"""
if isinstance(value, (list, tuple)):
value = np.array(value)
return value.flatten()
def get_eef_pose(self, transformations):
"""
Compute end effector's pose
Args:
transformations(OrderedDict)
Returns:
vals(dict)
"""
vals = {}
for arm in self.arm_type:
if self.eef_name[arm]:
vals[arm] = np.concatenate((transformations[self.eef_name[arm]].pos, transformations[self.eef_name[arm]].rot))
return vals
@property
def _arms(self):
"""
Returns name of arms used as naming convention throughout this module
Returns:
2-tuple: ('right', 'left')
"""
return ("right", "left")
@property
def arm_type(self):
"""
Return arm type
If number of eef_name is two, return tuple type("right", "left)
otherwise, return list type(["right] or ["left"])
Returns:
arm types (tuple or list)
"""
if len(self._eef_name.keys()) == 2:
return self._arms
elif "right" in self.eef_name.keys():
return ["right"]
elif "left" in self.eef_name.keys():
return ["left"]
else:
raise NotFoundError("Can not find robot's arm type")
@property
def base_name(self):
return self._base_name
@property
def eef_name(self):
return self._eef_name
@property
def active_joint_names(self):
return self._revolute_joint_names
| 32 | 126 | 0.5806 | 6,955 | 0.983456 | 0 | 0 | 1,004 | 0.141968 | 0 | 0 | 2,467 | 0.34884 |
929d5cde02d733a6bdaf5707edb964059b0f2d46 | 717 | py | Python | tests/test_model.py | tteofili/python-trustyai | 407ba3d9e40b42dd97d46d8ae9077d67ffa3a147 | [
"Apache-2.0"
] | null | null | null | tests/test_model.py | tteofili/python-trustyai | 407ba3d9e40b42dd97d46d8ae9077d67ffa3a147 | [
"Apache-2.0"
] | null | null | null | tests/test_model.py | tteofili/python-trustyai | 407ba3d9e40b42dd97d46d8ae9077d67ffa3a147 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=import-error, wrong-import-position, wrong-import-order, invalid-name
"""Test model provider interface"""
from common import *
from trustyai.model import Model, feature
def foo():
return "works!"
def test_basic_model():
"""Test basic model"""
def test_model(inputs):
outputs = [output(name=feature.name, dtype="number", value=feature.value.as_number()) for feature in
inputs]
return [PredictionOutput(outputs)]
model = Model(test_model)
features = [
feature(name=f"f-num{i}", value=i * 2.0, dtype="number")
for i in range(5)
]
result = model.predictAsync(features).get()
assert len(result[0].outputs) == 5
| 23.9 | 108 | 0.644351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.249651 |
929dfa2e3f9cf68cb55576bcbba4406af6eaa25b | 381 | py | Python | avazu-ctr/rf.py | ldamewood/renormalization | 9dd2293f8e39fd671abae4f4f5914c9ccbcd5519 | [
"MIT"
] | 6 | 2016-11-26T02:39:27.000Z | 2018-05-11T02:02:03.000Z | avazu-ctr/rf.py | chrinide/renormalization | 9dd2293f8e39fd671abae4f4f5914c9ccbcd5519 | [
"MIT"
] | null | null | null | avazu-ctr/rf.py | chrinide/renormalization | 9dd2293f8e39fd671abae4f4f5914c9ccbcd5519 | [
"MIT"
] | 5 | 2015-01-12T20:22:56.000Z | 2018-03-22T03:38:15.000Z | #!/usr/bin/env python
from __future__ import print_function
from sklearn.feature_extraction import FeatureHasher
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.metrics import log_loss
import ctr
learner = RandomForestClassifier(verbose = False, n_jobs = -1)
for ID,x,y in ctr.data(ctr.train, batchsize = 1):
pass | 27.214286 | 62 | 0.811024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.055118 |
929e35be958ee3633c514ae680ee6e40e159bff6 | 1,611 | py | Python | migrations/versions/f6ee6f9df554_.py | d-demirci/blockpy-server | f596f539d7809054a430a898bee877a0dbcb15b5 | [
"MIT"
] | 18 | 2019-10-14T13:56:15.000Z | 2022-03-12T23:49:14.000Z | migrations/versions/f6ee6f9df554_.py | acbart/blockpy-server | c504be4dffc624b7161c2c976d2d195f5f08cf9a | [
"MIT"
] | 26 | 2019-08-13T18:17:45.000Z | 2021-09-06T12:31:48.000Z | migrations/versions/f6ee6f9df554_.py | DigitalDerrickcs/blockpy-server | 4d3da5830ec802ec9d018e2fde91f33e2be38f10 | [
"MIT"
] | 9 | 2019-08-27T10:52:31.000Z | 2021-07-27T16:10:17.000Z | """Add Review Table
Revision ID: f6ee6f9df554
Revises:
Create Date: 2019-08-07 13:09:49.691184
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f6ee6f9df554'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('review',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.Column('submission_id', sa.Integer(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('assignment_version', sa.Integer(), nullable=True),
sa.Column('submission_version', sa.Integer(), nullable=True),
sa.Column('version', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(('author_id',), ['user.id'], ),
sa.ForeignKeyConstraint(('submission_id',), ['submission.id'], ),
sa.ForeignKeyConstraint(('tag_id',), ['assignment_tag.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_foreign_key(None, 'submission', 'assignment_group', ['assignment_group_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'submission', type_='foreignkey')
op.drop_table('review')
# ### end Alembic commands ###
| 34.276596 | 98 | 0.676598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.400372 |
929e8a325b9be258a38a47e650d3a9382c02adaa | 15,486 | py | Python | regionsSP/source/summary.py | abdelhadisamir/covid-19-SEIAR | 187afb1ad4dccb1a4544b54eb7cda3d61d2c601f | [
"MIT"
] | 2 | 2020-05-12T07:32:42.000Z | 2021-07-26T09:41:17.000Z | regionsSP/source/summary.py | abdelhadisamir/covid-19-SEIAR | 187afb1ad4dccb1a4544b54eb7cda3d61d2c601f | [
"MIT"
] | null | null | null | regionsSP/source/summary.py | abdelhadisamir/covid-19-SEIAR | 187afb1ad4dccb1a4544b54eb7cda3d61d2c601f | [
"MIT"
] | null | null | null | if districtRegion1=="DRS 05 - Barretos":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 01 - Grande São Paulo":
date="2020-03-15"
#initial condition for susceptible
s0=280.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=80
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=1500
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of infected
ratioRecovered=0.1
#weigth for fitting data
weigthCases=0.6
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 04 - Baixada Santista":
date="2020-04-01"
#initial condition for susceptible
s0=8.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=150
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 06 - Bauru":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 17 - Taubaté":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=17
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=2
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 06 - Bauru":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 13 - Ribeirão Preto":
date="2020-03-25"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=5
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.3
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 02 - Araçatuba":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=2
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 09 - Marília":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 07 - Campinas":
date="2020-04-01"
#initial condition for susceptible
s0=20.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=40
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.5
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 11 - Presidente Prudente":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 10 - Piracicaba":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=2
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 12 - Registro":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 14 - São João da Boa Vista":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 15 - São José do Rio Preto":
date="2020-04-01"
#initial condition for susceptible
s0=10.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 14 - São João da Boa Vista":
date="2020-04-01"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=1e-4
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=60
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.08
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.0
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 16 - Sorocaba":
date="2020-04-01"
#initial condition for susceptible
s0=1.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=2
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 03 - Araraquara":
date="2020-03-25"
#initial condition for susceptible
s0=5.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=0
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.4
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
if districtRegion1=="DRS 03 - Araraquara":
date="2020-03-25"
#initial condition for susceptible
s0=2.0e3
#initial condition for exposed
e0=1e-4
#initial condition for infectious
i0=0
#initial condition for recovered
r0=1e-4
#initial condition for deaths
k0=1e-4
#initial condition for asymptomatic
a0=1e-4
#start fitting when the number of cases >= start
start=0
#how many days is the prediction
prediction_days=70
#as recovered data is not available, so recovered is in function of death
ratioRecovered=.1
#weigth for fitting data
weigthCases=0.5
weigthRecov=0.1
#weightDeaths = 1 - weigthCases - weigthRecov
| 32.465409 | 84 | 0.596216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,787 | 0.567013 |
929ee11fc6eb6656e8d14a65c71ef4a3ce9f586f | 4,068 | py | Python | claymore_json_api.py | bennettwarner/ClaymoreJSON-API | c171ce844540135ff8f5e85df3093c84d433c66c | [
"MIT"
] | null | null | null | claymore_json_api.py | bennettwarner/ClaymoreJSON-API | c171ce844540135ff8f5e85df3093c84d433c66c | [
"MIT"
] | null | null | null | claymore_json_api.py | bennettwarner/ClaymoreJSON-API | c171ce844540135ff8f5e85df3093c84d433c66c | [
"MIT"
] | null | null | null | # Author: Bennett Warner
# Last update: 4/16/2018
import sys
import socket
import json
import argparse
from http.server import HTTPServer, BaseHTTPRequestHandler
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, Claymore JSON-API requires Python 3.x\n")
sys.exit(1)
remote_host, remote_port = '', ''
def banner():
print("""
_____ _
/ ____| |
| | | | __ _ _ _ _ __ ___ ___ _ __ ___
| | | |/ _` | | | | '_ ` _ \ / _ \| '__/ _ \
| |____| | (_| | |_| | | | | | | (_) | | | __/
\_____|_|\__,_|\__, |_| |_| |_|\___/|_| \___|
__/ |
_ _____ |___/ _ _ _____ _____
| |/ ____|/ __ \| \ | | /\ | __ \_ _|
| | (___ | | | | \| |______ / \ | |__) || |
_ | |\___ \| | | | . ` |______/ /\ \ | ___/ | |
| |__| |____) | |__| | |\ | / ____ \| | _| |_
\____/|_____/ \____/|_| \_| /_/ \_\_| |_____|
# https://github.com/bennettwarner/ClaymoreJSON-API
""")
def parser_error(errmsg):
banner()
r = '\033[91m' # red
print(r + "Usage: python3 " + sys.argv[0] + " [Options] use -h for help")
print(r + "Error: " + errmsg)
sys.exit()
def parse_args():
# parse the arguments
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython3 ' + sys.argv[0] + " -rhost miningrig.local -rport 3333")
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument('-lhost', '--local-host', help="The IP address / hostname that should be used to serve the JSON API. (Defaults to localhost)", default='localhost')
parser.add_argument('-lport', '--local-port', help='The port that this server should bind to (Defaults to port 80)', default=80)
parser.add_argument('-rhost', '--remote-host', help='The IP address / hostname of the machine running Claymore Dual Miner', required=True)
parser.add_argument('-rport', '--remote-port', help='The port that Claymore Dual Miner is hosting the API on', required=True)
return parser.parse_args()
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(get_response())
def get_response():
global remote_host, remote_port
try:
request = poll_claymore(remote_host, remote_port)
response = build_response(request)
return response.encode()
except TimeoutError:
print('Error: API Timeout')
return 'Error: API Timeout'.encode()
def poll_claymore(remote_host, remote_port):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect((remote_host, remote_port))
send = '{"id":0,"jsonrpc":"2.0","method":"miner_getstat1"}\n'
connection.send(str.encode(send))
data = connection.recv(1024)
connection.close()
return (json.loads(data))['result']
def build_response(request):
return_json = dict()
return_json['host'] = remote_host
return_json['port'] = remote_port
return_json['hashrate'] = str((str(request[2]).split(';'))[0])
cards = str(request[3]).split(';')
card_hashrate = dict()
for i in range(0, len(cards)):
card_hashrate.update({'card'+str(i): str(cards[i])})
return_json.update({'card_hashrate': card_hashrate})
return_json['uptime'] = request[1]
return_json['mining_address'] = request[7]
print(return_json)
return json.dumps(return_json)
def main():
args = parse_args()
local_host = args.local_host
local_port = int(args.local_port)
global remote_host, remote_port
remote_host = args.remote_host
remote_port = int(args.remote_port)
banner()
httpd = HTTPServer((local_host, local_port), SimpleHTTPRequestHandler)
print('Claymore JSON API running on '+local_host+':'+str(local_port))
print()
httpd.serve_forever()
if __name__ == '__main__':
main()
| 32.544 | 171 | 0.596853 | 178 | 0.043756 | 0 | 0 | 0 | 0 | 0 | 0 | 1,662 | 0.408555 |
92a13a7837e033195b0dd69a79961f1b0bff1db1 | 7,196 | py | Python | src/mushr_pf/motion_model.py | rogeriobonatti/mushr_pf | ec02df0504566f48983cfa0421034bdb94e1283b | [
"BSD-3-Clause"
] | 4 | 2019-08-30T08:20:21.000Z | 2021-12-31T01:34:40.000Z | src/mushr_pf/motion_model.py | rogeriobonatti/mushr_pf | ec02df0504566f48983cfa0421034bdb94e1283b | [
"BSD-3-Clause"
] | 2 | 2020-06-28T21:27:29.000Z | 2020-09-10T21:04:43.000Z | src/mushr_pf/motion_model.py | rogeriobonatti/mushr_pf | ec02df0504566f48983cfa0421034bdb94e1283b | [
"BSD-3-Clause"
] | 5 | 2019-09-26T15:17:43.000Z | 2022-01-21T00:11:20.000Z | #!/usr/bin/env python
# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
from threading import Lock
import numpy as np
import rospy
from std_msgs.msg import Float64
from vesc_msgs.msg import VescStateStamped
# Tune these Values!
KM_V_NOISE = 0.4 # Kinematic car velocity noise std dev
KM_DELTA_NOISE = 0.2 # Kinematic car delta noise std dev
KM_X_FIX_NOISE = 3e-2 # Kinematic car x position constant noise std dev
KM_Y_FIX_NOISE = 3e-2 # Kinematic car y position constant noise std dev
KM_THETA_FIX_NOISE = 1e-1 # Kinematic car theta constant noise std dev
# #Tune these Values!
# KM_V_NOISE = 0.01 # Kinematic car velocity noise std dev
# KM_DELTA_NOISE = 0.06 # Kinematic car delta noise std dev
# KM_X_FIX_NOISE = 3e-2 # Kinematic car x position constant noise std dev
# KM_Y_FIX_NOISE = 1e-3 # Kinematic car y position constant noise std dev
# KM_THETA_FIX_NOISE = 1e-2 # Kinematic car theta constant noise std dev
# #Tune these Values!
# KM_V_NOISE = 0.015 # Kinematic car velocity noise std dev
# KM_DELTA_NOISE = 0.065 # Kinematic car delta noise std dev
# KM_X_FIX_NOISE = 1e-2 # Kinematic car x position constant noise std dev
# KM_Y_FIX_NOISE = 1e-2 # Kinematic car y position constant noise std dev
# KM_THETA_FIX_NOISE = 1e-2 # Kinematic car theta constant noise std dev
"""
Propagates the particles forward based the velocity and steering angle of the car
"""
class KinematicMotionModel:
"""
Initializes the kinematic motion model
motor_state_topic: The topic containing motor state information
servo_state_topic: The topic containing servo state information
speed_to_erpm_offset: Offset conversion param from rpm to speed
speed_to_erpm_gain: Gain conversion param from rpm to speed
steering_angle_to_servo_offset: Offset conversion param from servo position to steering angle
steering_angle_to_servo_gain: Gain conversion param from servo position to steering angle
car_length: The length of the car
particles: The particles to propagate forward
state_lock: Controls access to particles
"""
def __init__(
self,
motor_state_topic,
servo_state_topic,
speed_to_erpm_offset,
speed_to_erpm_gain,
steering_to_servo_offset,
steering_to_servo_gain,
car_length,
particles,
state_lock=None,
):
self.last_servo_cmd = None # The most recent servo command
self.last_vesc_stamp = None # The time stamp from the previous vesc state msg
self.particles = particles
self.SPEED_TO_ERPM_OFFSET = (
speed_to_erpm_offset # Offset conversion param from rpm to speed
)
self.SPEED_TO_ERPM_GAIN = (
speed_to_erpm_gain # Gain conversion param from rpm to speed
)
self.STEERING_TO_SERVO_OFFSET = steering_to_servo_offset # Offset conversion param from servo position to steering angle
self.STEERING_TO_SERVO_GAIN = steering_to_servo_gain # Gain conversion param from servo position to steering angle
self.CAR_LENGTH = car_length # The length of the car
if state_lock is None:
self.state_lock = Lock()
else:
self.state_lock = state_lock
# This subscriber just caches the most recent servo position command
self.servo_pos_sub = rospy.Subscriber(
servo_state_topic, Float64, self.servo_cb, queue_size=1
)
# Subscribe to the state of the vesc
self.motion_sub = rospy.Subscriber(
motor_state_topic, VescStateStamped, self.motion_cb, queue_size=1
)
"""
Caches the most recent servo command
msg: A std_msgs/Float64 message
"""
def servo_cb(self, msg):
self.last_servo_cmd = msg.data # Update servo command
"""
Converts messages to controls and applies the kinematic car model to the
particles
msg: a vesc_msgs/VescStateStamped message
"""
def motion_cb(self, msg):
self.state_lock.acquire()
if self.last_servo_cmd is None:
self.state_lock.release()
return
if self.last_vesc_stamp is None:
print("Vesc callback called for first time....")
self.last_vesc_stamp = msg.header.stamp
self.state_lock.release()
return
# Convert raw msgs to controls
# Note that control = (raw_msg_val - offset_param) / gain_param
curr_speed = (
msg.state.speed - self.SPEED_TO_ERPM_OFFSET
) / self.SPEED_TO_ERPM_GAIN
curr_steering_angle = (
self.last_servo_cmd - self.STEERING_TO_SERVO_OFFSET
) / self.STEERING_TO_SERVO_GAIN
dt = (msg.header.stamp - self.last_vesc_stamp).to_sec()
# Propagate particles forward in place
self.apply_motion_model(
proposal_dist=self.particles, control=[curr_speed, curr_steering_angle, dt]
)
self.last_vesc_stamp = msg.header.stamp
self.state_lock.release()
def apply_motion_model(self, proposal_dist, control):
"""
Propagates particles forward (in-place) by applying the kinematic model and adding
sampled gaussian noise
proposal_dist: The particles to propagate
control: List containing velocity, steering angle, and timer interval - [v,delta,dt]
returns: nothing
"""
# Separate control
v, delta, dt = control
# Add control noise
v = np.random.normal(loc=v, scale=KM_V_NOISE, size=proposal_dist[:, 0].shape)
delta = np.random.normal(
loc=delta, scale=KM_DELTA_NOISE, size=proposal_dist[:, 0].shape
)
# apply motion model's update rule
theta = proposal_dist[:, 2]
theta_new = theta + v / self.CAR_LENGTH * np.tan(delta) * dt
# x
proposal_dist[:, 0] += (
self.CAR_LENGTH / np.tan(delta) * (np.sin(theta_new) - np.sin(theta))
)
# y
proposal_dist[:, 1] += (
self.CAR_LENGTH / np.tan(delta) * (-np.cos(theta_new) + np.cos(theta))
)
# Add noise
proposal_dist[:, 0] = np.random.normal(
loc=proposal_dist[:, 0],
scale=KM_X_FIX_NOISE,
size=proposal_dist[:, 0].shape,
)
proposal_dist[:, 1] = np.random.normal(
loc=proposal_dist[:, 1],
scale=KM_Y_FIX_NOISE,
size=proposal_dist[:, 1].shape,
)
proposal_dist[:, 2] = np.random.normal(
loc=theta_new, scale=KM_THETA_FIX_NOISE, size=proposal_dist[:, 2].shape
)
# print 'v: %f, delta: %f, x: %f, y: %f, theta: %f'%(np.mean(v), np.mean(delta), np.mean(proposal_dist[:,0]), np.mean(proposal_dist[:,1]), np.mean(proposal_dist[:,2]))
# Limit particle rotation to be between -pi and pi
proposal_dist[proposal_dist[:, 2] < -1 * np.pi, 2] += 2 * np.pi
proposal_dist[proposal_dist[:, 2] > np.pi, 2] -= 2 * np.pi
| 38.481283 | 175 | 0.65189 | 5,615 | 0.780295 | 0 | 0 | 0 | 0 | 0 | 0 | 3,420 | 0.475264 |
92a236fcf2da463a8a913c08e7962db7d7a482fc | 2,603 | py | Python | 2018/day-22/part2.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | 1 | 2019-12-27T22:36:30.000Z | 2019-12-27T22:36:30.000Z | 2018/day-22/part2.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | null | null | null | 2018/day-22/part2.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | null | null | null | from collections import defaultdict
from heapq import heappop, heappush
class Region(object):
def __init__(self, y, x, gi, el, t):
self.y = y
self.x = x
self.gi = gi
self.el = el
self.t = t
def pos(self):
return (self.y, self.x)
tools = {
'.': set(['c', 't']),
'=': set(['c', 'n']),
'|': set(['t', 'n']),
}
moves = {
'n': lambda p: (p[0]-1,p[1]),
's': lambda p: (p[0]+1,p[1]),
'w': lambda p: (p[0],p[1]-1),
'e': lambda p: (p[0],p[1]+1),
}
def solve(depth, target):
# Mapping cave
cave = defaultdict(Region)
for y in range(1000):
for x in range(1000):
pos = (y,x)
if pos == (0,0) or pos == target:
gi = 0
elif y == 0:
gi = x * 16807
elif x == 0:
gi = y * 48271
else:
gi = cave[(y,x-1)].el * cave[(y-1,x)].el
el = (gi + depth) % 20183
if el % 3 == 0:
t = '.'
elif el % 3 == 1:
t = '='
else:
t = '|'
cave[(y,x)] = Region(y, x, gi, el, t)
# Find quickest route
pos = (0,0)
q = [(0,'t',pos, [(0,'t',pos)])]
seen = {}
while q:
time, tool, pos, steps = heappop(q)
if pos == target:
if tool == 't':
for s in steps:
print(s)
print(time)
return
else:
next_time = time+7
next_pos = pos
next_steps = steps[:]
next_steps.append((next_time, 't', next_pos))
heappush(q, (next_time, 't', next_pos, next_steps))
continue
if (tool,pos) in seen: # and seen[(tool,pos)] <= time:
continue
seen[(tool,pos)] = time
for m in moves.values():
next_pos = m(pos)
# print(next_pos)
if next_pos[0] >= 0 and next_pos[1] >= 0:
p_tools = tools[cave[pos].t]
np_tools = tools[cave[next_pos].t]
common_tools = p_tools.intersection(np_tools)
for next_tool in common_tools:
next_time = time+1
if next_tool != tool:
next_time += 7
next_steps = steps[:]
next_steps.append((next_time, next_tool, next_pos))
heappush(q, (next_time, next_tool, next_pos, next_steps))
solve(510, (10,10))
# solve(11991, (797,6)) | 26.561224 | 77 | 0.419516 | 213 | 0.081829 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.064925 |
92a2ade8e25bb89801f783432bcd0131ea069e28 | 68 | py | Python | atcoder/abc149/b.py | sugitanishi/competitive-programming | 51af65fdce514ece12f8afbf142b809d63eefb5d | [
"MIT"
] | null | null | null | atcoder/abc149/b.py | sugitanishi/competitive-programming | 51af65fdce514ece12f8afbf142b809d63eefb5d | [
"MIT"
] | null | null | null | atcoder/abc149/b.py | sugitanishi/competitive-programming | 51af65fdce514ece12f8afbf142b809d63eefb5d | [
"MIT"
] | null | null | null | a,b,k=map(int,input().split())
print(max(a-k,0),max(b-max(k-a,0),0)) | 34 | 37 | 0.602941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
92a36b9eb0f433186029fab6359b6a888ab9e2da | 6,660 | py | Python | models/baselines.py | saverymax/qdriven-chiqa-summarization | 257a00133869db47807b9dd10761a6dd3aa15306 | [
"MIT"
] | 10 | 2020-05-25T20:52:45.000Z | 2022-01-26T08:06:51.000Z | models/baselines.py | saverymax/qdriven-chiqa-summarization | 257a00133869db47807b9dd10761a6dd3aa15306 | [
"MIT"
] | null | null | null | models/baselines.py | saverymax/qdriven-chiqa-summarization | 257a00133869db47807b9dd10761a6dd3aa15306 | [
"MIT"
] | 5 | 2020-07-15T12:21:27.000Z | 2022-02-06T08:11:41.000Z | """
Script for baseline approaches:
1. Take 10 random sentences
2. Take the topk 10 sentences with highest rouge score relative to the question
3. Pick the first 10 sentences
To run
python baselines.py --dataset=chiqa
"""
import json
import numpy as np
import random
import requests
import argparse
import rouge
import spacy
def get_args():
"""
Get command line arguments
"""
parser = argparse.ArgumentParser(description="Arguments for data exploration")
parser.add_argument("--dataset",
dest="dataset",
help="Dataset to run baselines on. Only current option is MEDIQA-AnS.")
return parser
def calculate_sentence_level_rouge(question, doc_sen, evaluator):
"""
For each pair of sentences, calculate rouge score with py-rouge
"""
rouge_score = evaluator.get_scores(doc_sen, question)['rouge-l']['f']
return rouge_score
def pick_k_best_rouge_sentences(k, questions, documents, summaries):
"""
Pick the k sentences that have the highest rouge scores when compared to the question.
"""
# Initiate rouge evaluator
evaluator = rouge.Rouge(metrics=['rouge-l'],
max_n=3,
limit_length=False,
length_limit_type='words',
apply_avg=False,
apply_best=True,
alpha=1,
weight_factor=1.2,
stemming=False)
pred_dict = {
'question': [],
'ref_summary': [],
'gen_summary': []
}
for q, doc, summ, in zip(questions, documents, summaries):
# Sentencize abstract
rouge_scores = []
for sentence in doc:
rouge_score = calculate_sentence_level_rouge(q, sentence, evaluator)
rouge_scores.append(rouge_score)
if len(doc) < k:
top_k_rouge_scores = np.argsort(rouge_scores)
else:
top_k_rouge_scores = np.argsort(rouge_scores)[-k:]
top_k_sentences = " ".join([doc[i] for i in top_k_rouge_scores])
summ = summ.replace("<s>", "")
summ = summ.replace("</s>", "")
pred_dict['question'].append(q)
pred_dict['ref_summary'].append(summ)
pred_dict['gen_summary'].append(top_k_sentences)
return pred_dict
def pick_first_k_sentences(k, questions, documents, summaries):
"""
Pick the first k sentences to use as summaries
"""
pred_dict = {
'question': [],
'ref_summary': [],
'gen_summary': []
}
for q, doc, summ, in zip(questions, documents, summaries):
if len(doc) < k:
first_k_sentences = doc
else:
first_k_sentences = doc[0:k]
first_k_sentences = " ".join(first_k_sentences)
summ = summ.replace("<s>", "")
summ = summ.replace("</s>", "")
pred_dict['question'].append(q)
pred_dict['ref_summary'].append(summ)
pred_dict['gen_summary'].append(first_k_sentences)
return pred_dict
def pick_k_random_sentences(k, questions, documents, summaries):
"""
Pick k random sentences from the articles to use as summaries
"""
pred_dict = {
'question': [],
'ref_summary': [],
'gen_summary': []
}
random.seed(13)
for q, doc, summ, in zip(questions, documents, summaries):
if len(doc) < k:
random_sentences = " ".join(doc)
else:
random_sentences = random.sample(doc, k)
random_sentences = " ".join(random_sentences)
summ = summ.replace("<s>", "")
summ = summ.replace("</s>", "")
pred_dict['question'].append(q)
pred_dict['ref_summary'].append(summ)
pred_dict['gen_summary'].append(random_sentences)
return pred_dict
def load_dataset(path):
"""
Load the evaluation set
"""
with open(path, "r", encoding="utf-8") as f:
asumm_data = json.load(f)
summaries = []
questions = []
documents = []
nlp = spacy.load('en_core_web_sm')
# Split sentences
cnt = 0
for q_id in asumm_data:
questions.append(asumm_data[q_id]['question'])
tokenized_art = nlp(asumm_data[q_id]['articles'])
summaries.append(asumm_data[q_id]['summary'])
article_sentences = [s.text.strip() for s in tokenized_art.sents]
documents.append(article_sentences[0:])
return questions, documents, summaries
def save_baseline(baseline, filename):
"""
Save baseline in format for rouge evaluation
"""
with open("../evaluation/data/baselines/chiqa_eval/baseline_{}.json".format(filename), "w", encoding="utf-8") as f:
json.dump(baseline, f, indent=4)
def run_baselines():
"""
Generate the random baseline and the best rouge baseline
"""
# Load the MEDIQA-AnS datasets
datasets = [
("../data_processing/data/page2answer_single_abstractive_summ.json", "p2a-single-abs"),
("../data_processing/data/page2answer_single_extractive_summ.json", "p2a-single-ext"),
("../data_processing/data/section2answer_single_abstractive_summ.json", "s2a-single-abs"),
("../data_processing/data/section2answer_single_extractive_summ.json", "s2a-single-ext"),
]
for data in datasets:
task = data[1]
print("Running baselines on {}".format(task))
# k can be determined from averages or medians of summary types of reference summaries. Alternatively, just use Lead-3 baseline.
# Optional to use different k for extractive and abstractive summaries, as the manual summaries of the two types have different average lengths
if task == "p2a-single-abs":
k = 3
if task == "p2a-single-ext":
k = 3
if task == "s2a-single-abs":
k = 3
if task == "s2a-single-ext":
k = 3
questions, documents, summaries = load_dataset(data[0])
k_sentences = pick_k_random_sentences(k, questions, documents, summaries)
first_k_sentences = pick_first_k_sentences(k, questions, documents, summaries)
k_best_rouge = pick_k_best_rouge_sentences(k, questions, documents, summaries)
save_baseline(k_sentences, filename="random_sentences_k_{}_{}_{}".format(k, args.dataset, task))
save_baseline(first_k_sentences, filename="first_sentences_k_{}_{}_{}".format(k, args.dataset, task))
save_baseline(k_best_rouge, filename="best_rouge_k_{}_{}_{}".format(k, args.dataset, task))
if __name__ == "__main__":
args = get_args().parse_args()
run_baselines()
| 33.807107 | 151 | 0.619369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,173 | 0.326276 |
92a38133bdfe889e2dab505ef498dfffa770b831 | 4,593 | py | Python | pycsvschema/validators/types.py | crowdskout/PycsvSchema | d5d44ace9bfe84e1001577caf4d933827143d124 | [
"MIT"
] | 11 | 2018-08-29T21:34:06.000Z | 2021-11-08T12:26:59.000Z | pycsvschema/validators/types.py | crowdskout/PycsvSchema | d5d44ace9bfe84e1001577caf4d933827143d124 | [
"MIT"
] | 2 | 2018-08-30T20:57:19.000Z | 2019-10-09T19:25:27.000Z | pycsvschema/validators/types.py | crowdskout/PycsvSchema | d5d44ace9bfe84e1001577caf4d933827143d124 | [
"MIT"
] | 1 | 2020-03-17T15:06:41.000Z | 2020-03-17T15:06:41.000Z | #!/usr/bin/python
# -*-coding: utf-8 -*-
# https://github.com/frictionlessdata/tableschema-py/tree/1d9750248de06a075029c1278404c5db5311fbc5/tableschema/types
# type and format
# Support types and formats:
# string
# email
# uri
# uuid
# ipv4
# ipv6
# hostname
# datetime
# number
# integer
# boolean
#
import rfc3986
import re
import uuid
import datetime
import ipaddress
from pycsvschema import defaults
class TypeValidator(object):
def __init__(self, field_schema):
self.field_schema = field_schema
self.format = self.field_schema.get('format', defaults.FIELDS_FORMAT)
self.value = None
self.to_type = None
def try_convert_value(self, value, to_type, convertor_config=None, update=False):
if not convertor_config:
convertor_config = {}
try:
v = to_type(value, **convertor_config)
except Exception:
return False
if update:
self.value = v
else:
self.value = value
return True
def validate(self, value):
pass
class StringValidator(TypeValidator):
EMAIL_PATTERN = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
HOSTNAME_PATTERN = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*" \
r"([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
def __init__(self, field_schema):
super().__init__(field_schema=field_schema)
self.to_type = str
self.pattern = ''
def validate(self, value):
if value is None:
return
self.value = value
if self.format == 'email':
if not re.match(self.EMAIL_PATTERN, value):
return False
elif self.format == 'uri':
if not rfc3986.is_valid_uri(value, require_scheme=True):
return False
elif self.format == 'uuid':
return self.try_convert_value(value=value, to_type=uuid.UUID, convertor_config={'version': 4})
elif self.format == 'ipv4':
return self.try_convert_value(value=value, to_type=ipaddress.IPv4Address)
elif self.format == 'ipv6':
return self.try_convert_value(value=value, to_type=ipaddress.IPv6Address)
elif self.format == 'hostname':
if not re.match(self.HOSTNAME_PATTERN, value):
return False
elif self.format == 'datetime':
self.pattern = self.field_schema.get('datetimePattern', defaults.FIELDS_FORMAT_DATETIME_PATTERN)
try:
datetime.datetime.strptime(value, self.pattern)
except Exception:
return False
elif self.field_schema.get('pattern', defaults.FIELDS_TYPE_STRING_PATTERN):
self.pattern = self.field_schema.get('pattern', defaults.FIELDS_TYPE_STRING_PATTERN)
if not re.match(self.pattern, value):
return False
return True
class NumberValidator(TypeValidator):
def __init__(self, field_schema):
super().__init__(field_schema=field_schema)
self.to_type = float
self.groupchar = self.field_schema.get('groupChar', defaults.FIELDS_GROUPCHAR)
def validate(self, value):
if value is None:
return
value = value.replace(self.groupchar, '')
return self.try_convert_value(value=value, to_type=self.to_type, update=True)
class IntegerValidator(TypeValidator):
def __init__(self, field_schema):
super().__init__(field_schema=field_schema)
self.to_type = int
self.groupchar = self.field_schema.get('groupChar', defaults.FIELDS_GROUPCHAR)
def validate(self, value):
if value is None:
return
value = value.replace(self.groupchar, '')
return self.try_convert_value(value=value, to_type=self.to_type, update=True)
class BooleanValidator(TypeValidator):
def __init__(self, field_schema):
super().__init__(field_schema=field_schema)
self.to_type = bool
self.truevalues = self.field_schema.get('trueValues', defaults.FIELDS_TRUEVALUES)
self.falsevalues = self.field_schema.get('falseValues', defaults.FIELDS_FALSEVALUES)
def validate(self, value):
if value in self.truevalues:
self.value = True
elif value in self.falsevalues:
self.value = False
else:
return False
return True
TYPE_MAPPER = {
'string': StringValidator,
'number': NumberValidator,
'integer': IntegerValidator,
'boolean': BooleanValidator,
}
| 30.62 | 116 | 0.63096 | 4,008 | 0.872632 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.142391 |
92a47215c9aaf111948777614daf685c1e4ab775 | 3,454 | py | Python | Identifeye/src/adj_list.py | haasm3/Identifeye | e2fec37c472f2fc9a124741f431f7d38bdb10b43 | [
"MIT"
] | null | null | null | Identifeye/src/adj_list.py | haasm3/Identifeye | e2fec37c472f2fc9a124741f431f7d38bdb10b43 | [
"MIT"
] | null | null | null | Identifeye/src/adj_list.py | haasm3/Identifeye | e2fec37c472f2fc9a124741f431f7d38bdb10b43 | [
"MIT"
] | null | null | null | from difflib import SequenceMatcher
"""
A Python program to demonstrate the adjacency
list representation of the graph
"""
# weights arreay by value
weights = [1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7]
# A class to represent the adjacency list of the node
class AdjNode:
def __init__(self, data, weight):
self.vertex = data
self.weight = weight
self.next = None
# A class to represent a graph. A graph
# is the list of the adjacency lists.
# Size of the array will be the no. of the
# vertices "V"
class Graph:
def __init__(self):
self.graph = []
# Function to add an edge in an undirected graph
def add_edge(self, src, dest, weight):
# Adding the node to the source node
node = AdjNode(dest, weight)
node.next = self.graph[src]
self.graph[src] = node
# Adding the source node to the destination as
# it is the undirected graph
node = AdjNode(src, weight)
node.next = self.graph[dest]
self.graph[dest] = node
# Function to print the graph (in adj list format)
def print_graph(self):
for i in range(self.V):
# get current node
temp = self.graph[i]
# if no links skip
if(temp == None):
continue
print("Adjacency list of vertex {}\n head".format(i), end="")
while temp:
print(F" -> Node Position = {temp.vertex} : weight = {round(temp.weight,3)}", end="")
temp = temp.next
print(" \n")
# Function to print the graph (in the given data format)
def print_data_matches(self, NodeArray):
for i in range(self.V):
# get current adjancey list node
temp = self.graph[i]
# if no links skip
if (temp == None):
continue
# get current data point
current_data = NodeArray[i]
print("Account Name {}\n".format(current_data[0]), end="")
while temp:
connected_to = NodeArray[temp.vertex]
print(F" -> Connected To: = {connected_to[0]} with a confidence of {round((temp.weight*100), 1)}%", end="\n")
temp = temp.next
print(" \n")
i = i + 1
# goes through and adds edges to adjancey list
def add_edges(self, Nodes):
for i in range(len(NodeArray)):
for j in range(i + 1, len(NodeArray)):
# get weight of each node with others
w = calculate_weights(NodeArray[i], NodeArray[j])
if w >= (2 / 7):
self.add_edge(i, j, w)
# calculate the connected-ness of each node
def calculate_weights(node1, node2):
w = 0
# calculate weight - does not include play time or is banned
# Account and Character name
for i in range(0, 2):
if node1[i] == node2[i]:
w += weights[i]
else:
match = SequenceMatcher(None, node1[i], node2[i]).find_longest_match(0, len(node1[i]), 0, len(node2[i]))
if match.size >= 3:
w += weights[i] * min(weights[i], weights[i] * match.size / 10)
# IP, UUID, Location
for i in range(2, 5):
if node1[i] == node2[i]:
w += weights[i]
return w
| 30.298246 | 126 | 0.532716 | 2,349 | 0.680081 | 0 | 0 | 0 | 0 | 0 | 0 | 1,167 | 0.337869 |
92a48b86c4e76dd5956f70160e597a930611bbec | 11,633 | py | Python | scripts/flowed_plaquette_plot.py | hmvege/GluonicLQCD | 9bb7466fce408bf51cb98d65f639acd37d034d62 | [
"MIT"
] | 1 | 2019-04-12T08:12:30.000Z | 2019-04-12T08:12:30.000Z | scripts/flowed_plaquette_plot.py | hmvege/GluonicLQCD | 9bb7466fce408bf51cb98d65f639acd37d034d62 | [
"MIT"
] | 2 | 2021-03-14T12:30:39.000Z | 2021-03-14T12:33:17.000Z | scripts/flowed_plaquette_plot.py | hmvege/GluonicLQCD | 9bb7466fce408bf51cb98d65f639acd37d034d62 | [
"MIT"
] | 1 | 2020-05-14T02:12:33.000Z | 2020-05-14T02:12:33.000Z | import matplotlib.pyplot as plt, numpy as np
plaq_morningstar_ubuntu = np.array("""
0 0.61401745
1 0.62927918
2 0.64409064
3 0.65844519
4 0.67233862
5 0.68576903
6 0.69873661
7 0.71124351
8 0.72329362
9 0.73489239
10 0.74604666
11 0.75676448
12 0.76705495
13 0.77692801
14 0.78639435
15 0.79546523
16 0.80415238
17 0.81246785
18 0.82042391
19 0.82803298
20 0.83530752
21 0.84225997
22 0.84890267
23 0.85524782
24 0.86130743
25 0.86709327
26 0.87261685
27 0.87788938
28 0.88292176
29 0.88772457
30 0.89230805
31 0.89668206
32 0.90085616
33 0.90483950
34 0.90864091
35 0.91226887
36 0.91573149
37 0.91903657
38 0.92219154
39 0.92520354
40 0.92807937
41 0.93082553
42 0.93344823
43 0.93595338
44 0.93834663
45 0.94063334
46 0.94281864
47 0.94490740
48 0.94690426
49 0.94881362
50 0.95063970
51 0.95238648
52 0.95405775
53 0.95565713
54 0.95718804
55 0.95865374
56 0.96005734
57 0.96140177
58 0.96268983
59 0.96392419
60 0.96510736
61 0.96624174
62 0.96732962
63 0.96837315
64 0.96937441
65 0.97033534
66 0.97125780
67 0.97214355
68 0.97299429
69 0.97381159
70 0.97459698
71 0.97535190
72 0.97607772
73 0.97677575
74 0.97744721
75 0.97809330
76 0.97871513
77 0.97931378
78 0.97989024
79 0.98044550
80 0.98098048
81 0.98149603
82 0.98199301
83 0.98247220
84 0.98293437
85 0.98338022
86 0.98381045
87 0.98422571
88 0.98462662
89 0.98501378
90 0.98538776
91 0.98574909
92 0.98609828
93 0.98643585
94 0.98676224
95 0.98707791
96 0.98738328
97 0.98767877
98 0.98796476
99 0.98824163""".split(),dtype=float)
plaq_morningstar_mac = np.array("""
0 0.61049840
1 0.62569628
2 0.64044857
3 0.65474864
4 0.66859227
5 0.68197751
6 0.69490449
7 0.70737526
8 0.71939361
9 0.73096487
10 0.74209574
11 0.75279414
12 0.76306898
13 0.77293008
14 0.78238795
15 0.79145369
16 0.80013886
17 0.80845533
18 0.81641522
19 0.82403079
20 0.83131432
21 0.83827811
22 0.84493434
23 0.85129505
24 0.85737212
25 0.86317717
26 0.86872158
27 0.87401642
28 0.87907249
29 0.88390022
30 0.88850975
31 0.89291083
32 0.89711292
33 0.90112507
34 0.90495602
35 0.90861415
36 0.91210751
37 0.91544379
38 0.91863039
39 0.92167434
40 0.92458240
41 0.92736100
42 0.93001630
43 0.93255417
44 0.93498018
45 0.93729968
46 0.93951774
47 0.94163920
48 0.94366865
49 0.94561049
50 0.94746888
51 0.94924778
52 0.95095098
53 0.95258204
54 0.95414440
55 0.95564128
56 0.95707577
57 0.95845080
58 0.95976915
59 0.96103347
60 0.96224629
61 0.96340998
62 0.96452682
63 0.96559897
64 0.96662848
65 0.96761731
66 0.96856731
67 0.96948024
68 0.97035779
69 0.97120154
70 0.97201301
71 0.97279364
72 0.97354481
73 0.97426782
74 0.97496390
75 0.97563425
76 0.97627999
77 0.97690217
78 0.97750183
79 0.97807993
80 0.97863740
81 0.97917510
82 0.97969389
83 0.98019456
84 0.98067786
85 0.98114453
86 0.98159524
87 0.98203067
88 0.98245144
89 0.98285814
90 0.98325135
91 0.98363161
92 0.98399945
93 0.98435535
94 0.98469980
95 0.98503324
96 0.98535610
97 0.98566881
98 0.98597174
99 0.98626529""".split(),dtype=float)
plaq_luscher_mac = np.array("""
0 0.61049839
1 0.62569626
2 0.64044854
3 0.65474860
4 0.66859222
5 0.68197746
6 0.69490443
7 0.70737520
8 0.71939354
9 0.73096480
10 0.74209567
11 0.75279406
12 0.76306891
13 0.77293000
14 0.78238787
15 0.79145362
16 0.80013878
17 0.80845525
18 0.81641514
19 0.82403071
20 0.83131425
21 0.83827803
22 0.84493426
23 0.85129498
24 0.85737205
25 0.86317710
26 0.86872151
27 0.87401636
28 0.87907242
29 0.88390016
30 0.88850969
31 0.89291078
32 0.89711286
33 0.90112501
34 0.90495597
35 0.90861410
36 0.91210746
37 0.91544375
38 0.91863034
39 0.92167429
40 0.92458235
41 0.92736096
42 0.93001626
43 0.93255413
44 0.93498014
45 0.93729964
46 0.93951770
47 0.94163916
48 0.94366862
49 0.94561046
50 0.94746885
51 0.94924775
52 0.95095095
53 0.95258202
54 0.95414437
55 0.95564125
56 0.95707574
57 0.95845077
58 0.95976913
59 0.96103345
60 0.96224627
61 0.96340996
62 0.96452680
63 0.96559895
64 0.96662846
65 0.96761729
66 0.96856729
67 0.96948023
68 0.97035777
69 0.97120152
70 0.97201299
71 0.97279363
72 0.97354480
73 0.97426780
74 0.97496389
75 0.97563424
76 0.97627997
77 0.97690216
78 0.97750182
79 0.97807992
80 0.97863739
81 0.97917509
82 0.97969388
83 0.98019455
84 0.98067785
85 0.98114452
86 0.98159524
87 0.98203066
88 0.98245143
89 0.98285813
90 0.98325134
91 0.98363161
92 0.98399944
93 0.98435535
94 0.98469979
95 0.98503323
96 0.98535610
97 0.98566880
98 0.98597174
99 0.98626529""".split(),dtype=float)
plaq_taylor_mac = np.array("""
0 0.61049847
1 0.62569641
2 0.64044875
3 0.65474887
4 0.66859255
5 0.68197783
6 0.69490485
7 0.70737565
8 0.71939403
9 0.73096531
10 0.74209620
11 0.75279461
12 0.76306947
13 0.77293058
14 0.78238846
15 0.79145420
16 0.80013937
17 0.80845584
18 0.81641573
19 0.82403129
20 0.83131482
21 0.83827860
22 0.84493482
23 0.85129553
24 0.85737258
25 0.86317762
26 0.86872202
27 0.87401685
28 0.87907291
29 0.88390063
30 0.88851014
31 0.89291122
32 0.89711329
33 0.90112543
34 0.90495637
35 0.90861449
36 0.91210784
37 0.91544411
38 0.91863069
39 0.92167464
40 0.92458268
41 0.92736128
42 0.93001657
43 0.93255442
44 0.93498043
45 0.93729992
46 0.93951797
47 0.94163942
48 0.94366887
49 0.94561070
50 0.94746908
51 0.94924798
52 0.95095116
53 0.95258222
54 0.95414457
55 0.95564144
56 0.95707593
57 0.95845095
58 0.95976930
59 0.96103362
60 0.96224643
61 0.96341011
62 0.96452695
63 0.96559909
64 0.96662860
65 0.96761743
66 0.96856742
67 0.96948035
68 0.97035789
69 0.97120164
70 0.97201311
71 0.97279374
72 0.97354490
73 0.97426791
74 0.97496399
75 0.97563434
76 0.97628007
77 0.97690225
78 0.97750191
79 0.97808000
80 0.97863747
81 0.97917517
82 0.97969396
83 0.98019462
84 0.98067792
85 0.98114459
86 0.98159530
87 0.98203073
88 0.98245149
89 0.98285820
90 0.98325140
91 0.98363167
92 0.98399950
93 0.98435540
94 0.98469984
95 0.98503328
96 0.98535615
97 0.98566885
98 0.98597179
99 0.98626533""".split(),dtype=float)
plaq_taylor4_mac = np.array("""
0 0.61049840
1 0.62569628
2 0.64044857
3 0.65474864
4 0.66859227
5 0.68197751
6 0.69490449
7 0.70737526
8 0.71939361
9 0.73096487
10 0.74209574
11 0.75279414
12 0.76306898
13 0.77293008
14 0.78238795
15 0.79145369
16 0.80013886
17 0.80845533
18 0.81641522
19 0.82403079
20 0.83131432
21 0.83827811
22 0.84493434
23 0.85129505
24 0.85737212
25 0.86317717
26 0.86872158
27 0.87401642
28 0.87907249
29 0.88390022
30 0.88850975
31 0.89291083
32 0.89711292
33 0.90112507
34 0.90495602
35 0.90861415
36 0.91210751
37 0.91544379
38 0.91863039
39 0.92167434
40 0.92458240
41 0.92736100
42 0.93001630
43 0.93255417
44 0.93498018
45 0.93729968
46 0.93951774
47 0.94163920
48 0.94366865
49 0.94561049
50 0.94746888
51 0.94924778
52 0.95095098
53 0.95258204
54 0.95414440
55 0.95564128
56 0.95707577
57 0.95845080
58 0.95976915
59 0.96103347
60 0.96224629
61 0.96340998
62 0.96452682
63 0.96559897
64 0.96662848
65 0.96761731
66 0.96856731
67 0.96948024
68 0.97035779
69 0.97120154
70 0.97201301
71 0.97279364
72 0.97354481
73 0.97426782
74 0.97496390
75 0.97563425
76 0.97627999
77 0.97690217
78 0.97750183
79 0.97807993
80 0.97863740
81 0.97917510
82 0.97969389
83 0.98019456
84 0.98067786
85 0.98114453
86 0.98159524
87 0.98203067
88 0.98245144
89 0.98285814
90 0.98325135
91 0.98363161
92 0.98399945
93 0.98435535
94 0.98469980
95 0.98503324
96 0.98535610
97 0.98566881
98 0.98597174
99 0.98626529""".split(),dtype=float)
def getFlowPlaq(arr):
tflow = arr[0::2] * 0.01
plaq_flow = arr[1::2]
return tflow, plaq_flow
fig1 = plt.figure(dpi=300)
# Morningstar plot
ax1 = fig1.add_subplot(311)
t_morningstar, pf_morningstar = getFlowPlaq(plaq_morningstar_mac)
ax1.plot(t_morningstar, pf_morningstar,"-", label="Flowed Plaquette")
ax1.grid(True)
# ax.set_xlabel(r"Flow time $\tau$")
ax1.set_ylabel(r"$P_{Morningstar}$")
ax1.set_title("Flowed plaquette value with different SU3 exponentiating methods")
ax1.tick_params(axis='x',which='major',labelsize=8)
# Luscher plot
ax2 = fig1.add_subplot(312)
t_luscher, pf_luscher = getFlowPlaq(plaq_luscher_mac)
ax2.plot(t_luscher, pf_luscher,"-", label="Flowed Plaquette")
ax2.grid(True)
ax2.set_ylabel(r"$P_{Lushcer}$")
ax2.tick_params(axis='x',which='major',labelsize=8)
# Taylor plot
ax3 = fig1.add_subplot(313)
t_taylor, pf_taylor = getFlowPlaq(plaq_taylor4_mac)
ax3.plot(t_taylor, pf_luscher,"-", label="Flowed Plaquette")
ax3.grid(True)
ax3.set_ylabel(r"$P_{Taylor}$")
ax3.tick_params(axis='x',which='major',labelsize=8)
fig1.savefig("../figures/plaquette_flow.png")
#### Differences ####
fig2 = plt.figure(dpi=300)
ax4 = fig2.add_subplot(111)
pf_diff1 = np.absolute(pf_morningstar - pf_luscher)
pf_diff2 = np.absolute(pf_morningstar - pf_taylor)
pf_diff3 = np.absolute(pf_luscher - pf_taylor)
# ax4.plot(t_luscher,pf_diff1,label=r"$|P_{Morningstar} - P_{Luscher}|$")
ax4.plot(t_luscher,pf_diff2,label=r"$|P_{Morningstar} - P_{Taylor}|$")
ax4.plot(t_luscher,pf_diff3,'--',label=r"$|P_{Luscher} - P_{Taylor}|$")
ax4.set_title(r"Differences in $SU(3)$ exponentiation methods")
ax4.set_yscale("log",nonposy='clip')
ax4.set_ylabel(r"$\Delta P$")
ax4.tick_params(axis='y',which='minor',labelsize=8)
ax4.tick_params(axis='y',which='major',labelsize=8)
ax4.set_xlabel(r"Flow time $\tau$")
ax4.legend()
fig2.savefig("../figures/exponentiation_differences.png")
# ax5 = fig2.add_subplot(312)
# ax5.set_yscale("log",nonposy='clip')
# ax5.set_ylabel(r"$\Delta P$")
# ax5.tick_params(axis='y',which='minor',labelsize=8)
# ax5.tick_params(axis='y',which='major',labelsize=8)
# ax5.legend()
# ax5 = fig2.add_subplot(312)
# ax5.set_yscale("log",nonposy='clip')
# ax5.tick_params(axis='y',which='minor',labelsize=8)
# ax5.tick_params(axis='y',which='major',labelsize=8)
# ax5.legend()
| 20.022375 | 81 | 0.63423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,065 | 0.865211 |
92a63e6d9950d24608b3bbb002e036b82919a7bf | 2,541 | py | Python | pyalgs/data_structures/commons/queue.py | vertexproject/pyalgs | 11b9ea37afc9e9f9e38ffacc42b53f9cd96f5f83 | [
"BSD-3-Clause"
] | 12 | 2017-05-01T10:31:42.000Z | 2021-06-23T14:03:28.000Z | pyalgs/data_structures/commons/queue.py | vertexproject/pyalgs | 11b9ea37afc9e9f9e38ffacc42b53f9cd96f5f83 | [
"BSD-3-Clause"
] | 2 | 2018-08-01T10:09:09.000Z | 2020-07-16T11:41:46.000Z | pyalgs/data_structures/commons/queue.py | vertexproject/pyalgs | 11b9ea37afc9e9f9e38ffacc42b53f9cd96f5f83 | [
"BSD-3-Clause"
] | 6 | 2017-06-04T01:41:14.000Z | 2021-01-19T05:05:44.000Z | from abc import abstractmethod, ABCMeta
class Queue(object):
""" Queue interface
"""
__metaclass__ = ABCMeta
@abstractmethod
def enqueue(self, item):
pass
@abstractmethod
def dequeue(self):
pass
@abstractmethod
def is_empty(self):
pass
@abstractmethod
def size(self):
pass
@staticmethod
def create():
return LinkedListQueue()
@abstractmethod
def iterate(self):
pass
class Node(object):
value = None
nextNode = None
def __init__(self, value):
self.value = value
class LinkedListQueue(Queue):
first = None
last = None
N = 0
def size(self):
return self.N
def iterate(self):
x = self.first
while x is not None:
value = x.value
x = x.nextNode
yield value
def enqueue(self, item):
old_last = self.last
self.last = Node(item)
if old_last is not None:
old_last.nextNode = self.last
if self.first is None:
self.first = self.last
self.N += 1
def is_empty(self):
return self.N == 0
def dequeue(self):
if self.is_empty():
return None
old_first = self.first
self.first = old_first.nextNode
if old_first == self.last:
self.last = None
self.N -= 1
return old_first.value
class ArrayQueue(Queue):
head = 0
tail = 0
s = []
def __init__(self, capacity=None):
if capacity is None:
capacity = 10
self.s = [0] * capacity
def iterate(self):
if self.is_empty():
return
for i in range(self.head, self.tail):
yield self.s[i]
def enqueue(self, item):
self.s[self.tail] = item
self.tail += 1
if self.tail == len(self.s):
self.resize(len(self.s) * 2)
def resize(self, new_size):
tmp = [0] * new_size
for i in range(self.head, self.tail):
tmp[i-self.head] = self.s[i]
self.s = tmp
self.tail = self.tail - self.head
self.head = 0
def size(self):
return self.tail - self.head
def is_empty(self):
return self.size() == 0
def dequeue(self):
if self.is_empty():
return None
deleted = self.s[self.head]
self.head += 1
if self.size() == len(self.s) // 4:
self.resize(len(self.s) // 2)
return deleted
| 19.546154 | 45 | 0.528926 | 2,489 | 0.979536 | 288 | 0.113341 | 323 | 0.127115 | 0 | 0 | 32 | 0.012593 |
92a704fccf87ceb7853e4a72b95d94d37432b6bb | 18 | py | Python | anchore_manager/version.py | Nordix/anchore-engine | f25baa5cbf9aa34d56e56b341a90f577d85e6146 | [
"Apache-2.0"
] | 110 | 2017-09-14T02:15:15.000Z | 2022-03-30T20:14:21.000Z | anchore_manager/version.py | Nordix/anchore-engine | f25baa5cbf9aa34d56e56b341a90f577d85e6146 | [
"Apache-2.0"
] | 115 | 2017-09-22T12:15:30.000Z | 2022-01-17T12:31:21.000Z | anchore_manager/version.py | Nordix/anchore-engine | f25baa5cbf9aa34d56e56b341a90f577d85e6146 | [
"Apache-2.0"
] | 56 | 2017-09-22T11:26:25.000Z | 2022-03-03T14:14:58.000Z | version = "0.9.4"
| 9 | 17 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.388889 |
92a76f0c28626e2a60cb856d6d530e23b266317a | 1,488 | py | Python | GaussianTrashSource.py | jkamalu/trashbots-RL | 22983749bc69ef958121101ce59702d3766b9145 | [
"Apache-2.0"
] | 1 | 2021-06-10T11:59:14.000Z | 2021-06-10T11:59:14.000Z | GaussianTrashSource.py | jkamalu/trashbots-RL | 22983749bc69ef958121101ce59702d3766b9145 | [
"Apache-2.0"
] | 1 | 2019-07-16T12:25:21.000Z | 2019-07-16T12:25:21.000Z | GaussianTrashSource.py | jkamalu/trashbots-RL | 22983749bc69ef958121101ce59702d3766b9145 | [
"Apache-2.0"
] | null | null | null | from numpy.random import multivariate_normal
class GaussianTrashSource:
def __init__(self, mean, max_y, max_x, cov=[[1,0],[0,1]], id=None):
"""
Creates a trashsource
Parameters
----------
cov: 2x2 matrix, covariance of the rnd var.
coords: (int,int), (y,x) trash hotspot, mean of the rnd var.
max_x : int, limits to the trash coordinates / grid of the environment
max_y : int, limits to the trash coordinates / grid of the environment
Returns
-------
"""
# mean of the gaussian
self.mean = mean
# covariance matrix of the multivariate gaussian
self.cov = cov
# strict limits to the gaussian
self.max_x = max_x
self.max_y = max_y
# Just an id of the trashsource
self.id = id
def draw_sample_in_limits(self):
"""
"""
y, x = multivariate_normal(self.mean, self.cov,1)[0]
y = int(min(self.max_y, round(y)))
x = int(min(self.max_x, round(x)))
return [y, x]
def get_trash(self, n=None):
"""
Returns a list of n coordinates drawn from the distribution
"""
if n:
return [self.draw_sample_in_limits() for i in range(n)]
return self.draw_sample_in_limits()
| 29.176471 | 82 | 0.508737 | 1,441 | 0.968414 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.456317 |
92a781db2000e629d38e7865533c3898de0e92c8 | 1,218 | py | Python | twitter_user/__init__.py | hostinfodev/twitter-user | e85f1e884e156076e550b6ad80bfddf504d06e0f | [
"MIT"
] | null | null | null | twitter_user/__init__.py | hostinfodev/twitter-user | e85f1e884e156076e550b6ad80bfddf504d06e0f | [
"MIT"
] | null | null | null | twitter_user/__init__.py | hostinfodev/twitter-user | e85f1e884e156076e550b6ad80bfddf504d06e0f | [
"MIT"
] | null | null | null | from selenium.webdriver.firefox.options import Options
from webdriver_manager.firefox import GeckoDriverManager
from seleniumwire import webdriver
from .fetch import fetchUser as fetchUser_
class TwitterUser(object):
# __CONSTRUCTOR
def __init__(self, allowed_connection_retries=20, allowed_parsing_retries=500, headless=True):
# allowed_connection_retries: AMOUNT OF TIMES ALLOWED TO RECOVER FROM AN ERROR DURING THE WEB REQUEST/SESSION.
# # # PRIVATE METHODS/OBJECTS # # #
# FIREFOX OPTIONS
firefox_options = Options()
if headless:
firefox_options.add_argument("--headless")
# # # PUBLIC METHODS/OBJECTS # # #
# WEBDRIVER HARNESS
self.driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=firefox_options)
# ALLOWED CONNECTION RETRIES
self.allowed_connection_retries = allowed_connection_retries
# ALLOWED PARSING RETRIES
self.allowed_parsing_retries = allowed_parsing_retries
# FETCH-USER METHOD
#@classmethod
def fetchUser(self, screen_name):
return fetchUser_(self, screen_name)
| 31.230769 | 132 | 0.686371 | 1,025 | 0.841544 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.268473 |
92a9563621c7a5c44424f2a6b1c724f6506e207a | 923 | py | Python | file_operations.py | OrkunAvci/Content-Based-Image-Retrieval | 4a0a9589e9747fb8d6f14238cc2f18f17dbf5a9e | [
"MIT"
] | null | null | null | file_operations.py | OrkunAvci/Content-Based-Image-Retrieval | 4a0a9589e9747fb8d6f14238cc2f18f17dbf5a9e | [
"MIT"
] | null | null | null | file_operations.py | OrkunAvci/Content-Based-Image-Retrieval | 4a0a9589e9747fb8d6f14238cc2f18f17dbf5a9e | [
"MIT"
] | null | null | null | import cv2 as cv
from os import listdir
from os.path import isfile, join
import json
folders = [
# Img sets to use
"octopus",
"elephant",
"flamingo",
"kangaroo",
"leopards",
"sea_horse"
]
files = [f for f in listdir("./data/camera") if isfile(join("./data/camera", f))] # Get all file names
train_file_names = files[:20] # First 20 is train
test_file_names = files[20:30] # Next 10 is test
def get_train_set():
img_set = [[cv.imread("./data/" + folder + "/" + file_name) for file_name in train_file_names] for folder in folders]
return img_set
def get_test_set():
img_set = [[cv.imread("./data/" + folder + "/" + file_name) for file_name in test_file_names] for folder in folders]
return img_set
def save(file_name, data):
with open("./data/" + file_name, "w+") as f:
f.write(json.dumps(data))
def read(file_name):
with open("./data/" + file_name, "r+") as f:
return json.loads(f.read()) | 27.147059 | 118 | 0.671723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.239437 |
92a9d8e5eb6b7dfd1c874a961a7ca59965c56a76 | 278 | py | Python | inner_rpc/inner_rpc/ir_exceptions.py | chenjiee815/inner_rpc | d6b4945dfb3ea7fda7a64cfe23d19f8539f988cf | [
"Apache-2.0"
] | 1 | 2015-09-17T01:22:28.000Z | 2015-09-17T01:22:28.000Z | inner_rpc/inner_rpc/ir_exceptions.py | chenjiee815/inner_rpc | d6b4945dfb3ea7fda7a64cfe23d19f8539f988cf | [
"Apache-2.0"
] | null | null | null | inner_rpc/inner_rpc/ir_exceptions.py | chenjiee815/inner_rpc | d6b4945dfb3ea7fda7a64cfe23d19f8539f988cf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding=utf-8
"""
inner_rpc.ir_exceptions
-----------------
该模块主要包括公共的异常定义
"""
class BaseError(Exception):
pass
class SocketError(BaseError):
pass
class SocketTimeout(SocketError):
pass
class DataError(BaseError):
pass
| 12.636364 | 33 | 0.643885 | 152 | 0.496732 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.464052 |
92abf94301c78871fdaeb06a8f1ddaf5bff99c2c | 268 | py | Python | floodsystem/analysis.py | AndrewKeYanzhe/part-ia-flood-warning-system | eb3a32d3722be7b45acf3924fd4d2431652d3053 | [
"MIT"
] | null | null | null | floodsystem/analysis.py | AndrewKeYanzhe/part-ia-flood-warning-system | eb3a32d3722be7b45acf3924fd4d2431652d3053 | [
"MIT"
] | null | null | null | floodsystem/analysis.py | AndrewKeYanzhe/part-ia-flood-warning-system | eb3a32d3722be7b45acf3924fd4d2431652d3053 | [
"MIT"
] | null | null | null | import matplotlib
import numpy as np
def polyfit (dates, levels, p):
# dates = matplotlib.dates.date2num(dates)
p_coeff = np.polyfit(dates,levels,p)
# p_coeff = np.polyfit(dates-dates[0],levels,p)
poly = np.poly1d(p_coeff)
return poly, dates[0] | 22.333333 | 51 | 0.682836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.33209 |
92ac0dae3334701d84c09b1bf18e3dc09ab5b133 | 2,211 | py | Python | examples/example_backward_elimination.py | patricklai14/gmp_feature_selection | 5bfec610257fe80a5a5081e8aacdccf54119b771 | [
"MIT"
] | null | null | null | examples/example_backward_elimination.py | patricklai14/gmp_feature_selection | 5bfec610257fe80a5a5081e8aacdccf54119b771 | [
"MIT"
] | null | null | null | examples/example_backward_elimination.py | patricklai14/gmp_feature_selection | 5bfec610257fe80a5a5081e8aacdccf54119b771 | [
"MIT"
] | null | null | null | from ase import Atoms
from ase.calculators.emt import EMT
from ase.io.trajectory import Trajectory
from ase.io import read
import numpy as np
import pandas as pd
import argparse
import copy
import os
import pdb
import pickle
from model_eval import model_evaluation
from gmp_feature_selection import backward_elimination
def main():
dir_prefix = "/storage/home/hpaceice1/plai30/sandbox"
parallel_workspace = os.path.join(dir_prefix, "pace/parallel_workspace")
OUTPUT_DIR = os.path.join(dir_prefix, "output")
#setup dataset
np.random.seed(3)
distances = np.linspace(2, 5, 500)
images = []
for i in range(len(distances)):
l = distances[i]
image = Atoms(
"CuCO",
[
(-l * np.sin(0.65), l * np.cos(0.65), np.random.uniform(low=-4.0, high=4.0)),
(0, 0, 0),
(l * np.sin(0.65), l * np.cos(0.65), np.random.uniform(low=-4.0, high=4.0))
],
)
image.set_cell([10, 10, 10])
image.wrap(pbc=True)
image.set_calculator(EMT())
images.append(image)
elements = ["Cu","C","O"]
atom_gaussians = {"C": os.path.join(dir_prefix, "config/MCSH_potential/C_coredensity_5.g"),
"O": os.path.join(dir_prefix, "config/MCSH_potential/O_totaldensity_7.g"),
"Cu": os.path.join(dir_prefix, "config/MCSH_potential/Cu_totaldensity_5.g")}
data = model_evaluation.dataset(elements, images, atom_gaussians=atom_gaussians)
#set up evaluation parameters
cutoff = 8
sigmas = (np.logspace(np.log10(0.05), np.log10(1.0), num=5)).tolist()
model_eval_params = model_evaluation.get_model_eval_params(
fp_type="gmp", eval_type="k_fold_cv", eval_num_folds=2, eval_cv_iters=1,
cutoff=cutoff, sigmas=sigmas, nn_layers=3, nn_nodes=20, nn_learning_rate=1e-3,
nn_batch_size=32, nn_epochs=1000)
back_elim = backward_elimination.backward_elimination(data, model_eval_params)
back_elim.run(enable_parallel=True, parallel_workspace=parallel_workspace, seed=1, output_dir=OUTPUT_DIR)
if __name__ == "__main__":
main()
| 34.546875 | 109 | 0.642243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.132972 |
92acb45103ede7c9d91ff61af3f4f34d8a8e49c2 | 4,797 | py | Python | merlin/cfg.py | USGS-EROS/lcmap-merlin | 692568e7e8e0bbb236e6e801b90abde8be8a05ea | [
"Unlicense"
] | null | null | null | merlin/cfg.py | USGS-EROS/lcmap-merlin | 692568e7e8e0bbb236e6e801b90abde8be8a05ea | [
"Unlicense"
] | null | null | null | merlin/cfg.py | USGS-EROS/lcmap-merlin | 692568e7e8e0bbb236e6e801b90abde8be8a05ea | [
"Unlicense"
] | 3 | 2018-05-08T14:51:56.000Z | 2018-07-12T19:07:34.000Z | from cytoolz import assoc
from cytoolz import merge
from functools import partial
from merlin import chipmunk
from merlin import chips
from merlin import dates
from merlin import formats
from merlin import specs
import os
ubids = {'chipmunk-ard': {'reds': ['LC08_SRB4', 'LE07_SRB3', 'LT05_SRB3', 'LT04_SRB3'],
'greens': ['LC08_SRB3', 'LE07_SRB2', 'LT05_SRB2', 'LT04_SRB2'],
'blues': ['LC08_SRB2', 'LE07_SRB1', 'LT05_SRB1', 'LT04_SRB1'],
'nirs': ['LC08_SRB5', 'LE07_SRB4', 'LT05_SRB4', 'LT04_SRB4'],
'swir1s': ['LC08_SRB6', 'LE07_SRB5', 'LT05_SRB5', 'LT04_SRB5'],
'swir2s': ['LC08_SRB7', 'LE07_SRB7', 'LT05_SRB7', 'LT04_SRB7'],
'thermals': ['LC08_BTB10', 'LE07_BTB6', 'LT05_BTB6', 'LT04_BTB6'],
'qas': ['LC08_PIXELQA', 'LE07_PIXELQA', 'LT05_PIXELQA', 'LT04_PIXELQA']},
'chipmunk-aux': {'nlcd': ['AUX_NLCD'],
'nlcdtrn': ['AUX_NLCDTRN'],
'posidex': ['AUX_POSIDEX'],
'mpw': ['AUX_MPW'],
'aspect': ['AUX_ASPECT'],
'slope': ['AUX_SLOPE'],
'dem': ['AUX_DEM']}
}
def profiles(env, profile=None):
"""Retrieve a configuration profile with env applied.
Args:
env (dict): Environment variables
profile (str): Name of profile to load. If no profile is supplied all profiles
are returned.
Returns:
dict: Profile or profiles with env substitutions.
"""
__profiles = {
'chipmunk-ard' : {
'grid_fn': partial(chipmunk.grid,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_GRID_RESOURCE', '/grid')),
'dates_fn': dates.symmetric,
'chips_fn': partial(chipmunk.chips,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_CHIPS_RESOURCE', '/chips')),
'specs_fn': partial(specs.mapped, ubids=ubids['chipmunk-ard']),
'format_fn': formats.pyccd,
'registry_fn': partial(chipmunk.registry,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_REGISTRY_RESOURCE', '/registry')),
'snap_fn': partial(chipmunk.snap,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_SNAP_RESOURCE', '/grid/snap')),
'near_fn': partial(chipmunk.near,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_NEAR_RESOURCE', '/grid/near'))},
'chipmunk-aux' : {
'grid_fn': partial(chipmunk.grid,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_GRID_RESOURCE', '/grid')),
'dates_fn': dates.single,
'chips_fn': partial(chipmunk.chips,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_CHIPS_RESOURCE', '/chips')),
'specs_fn': partial(specs.mapped, ubids=ubids['chipmunk-aux']),
'format_fn': formats.aux,
'registry_fn': partial(chipmunk.registry,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_REGISTRY_RESOURCE', '/registry')),
'snap_fn': partial(chipmunk.snap,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_SNAP_RESOURCE', '/grid/snap')),
'near_fn': partial(chipmunk.near,
url=env.get('CHIPMUNK_URL', None),
resource=env.get('CHIPMUNK_NEAR_RESOURCE', '/grid/near'))},
}
return __profiles.get(profile, None) if profile else __profiles
def get(profile='chipmunk-ard', env=None):
"""Return a configuration profile.
Args:
profile (str): Name of profile.
env (dict): Environment variables to override os.environ
Returns:
dict: A Merlin configuration
"""
p = profiles(env=merge(os.environ, env if env else {}),
profile=profile)
return assoc(p, 'profile', profile)
| 47.49505 | 104 | 0.488013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,828 | 0.381072 |
92acbaaddd86f818bb28122d75c4dba5ebed8fa5 | 366 | py | Python | tests/test_core.py | blacktanktop/vivid | e85837bcd86575f8a275517250dd026aac3e451f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_core.py | blacktanktop/vivid | e85837bcd86575f8a275517250dd026aac3e451f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_core.py | blacktanktop/vivid | e85837bcd86575f8a275517250dd026aac3e451f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from vivid.core import BaseBlock, network_hash
def test_network_hash():
a = BaseBlock('a')
b = BaseBlock('b')
assert network_hash(a) != network_hash(b)
assert network_hash(a) == network_hash(a)
c = BaseBlock('c', parent=[a, b])
hash1 = network_hash(c)
a._parent = [BaseBlock('z')]
hash2 = network_hash(c)
assert hash1 != hash2
| 24.4 | 46 | 0.642077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.032787 |
92adae391b5af1e46c2cbd108fadd2ff78e87772 | 384 | py | Python | app/app/services/forms.py | dtcooper/crazyarms | 71ea0e58958233daaceb8750043f74ef1a141079 | [
"MIT"
] | 15 | 2021-01-18T17:16:51.000Z | 2022-03-28T22:16:19.000Z | app/app/services/forms.py | dtcooper/carb | 71ea0e58958233daaceb8750043f74ef1a141079 | [
"MIT"
] | 4 | 2021-03-14T16:28:40.000Z | 2021-03-31T16:48:49.000Z | app/app/services/forms.py | dtcooper/carb | 71ea0e58958233daaceb8750043f74ef1a141079 | [
"MIT"
] | 3 | 2021-07-15T02:24:19.000Z | 2022-03-18T11:50:05.000Z | from django import forms
from .services import HarborService
class HarborCustomConfigForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for section_number in range(1, HarborService.CUSTOM_CONFIG_NUM_SECTIONS + 1):
self.fields[f"section{section_number}"] = forms.CharField(widget=forms.Textarea, required=False)
| 34.909091 | 108 | 0.726563 | 319 | 0.830729 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.067708 |
92adb85a4eb3efaf7dd6076d356a071ec17c7036 | 26,816 | py | Python | deepmars/models/train_model_sys.py | utplanets/deepmars | ba306aa9b25b654636b61cf952af2791b7ed0e56 | [
"MIT"
] | 2 | 2021-08-08T03:06:58.000Z | 2021-11-25T04:06:00.000Z | deepmars/models/train_model_sys.py | utplanets/deepmars | ba306aa9b25b654636b61cf952af2791b7ed0e56 | [
"MIT"
] | null | null | null | deepmars/models/train_model_sys.py | utplanets/deepmars | ba306aa9b25b654636b61cf952af2791b7ed0e56 | [
"MIT"
] | 2 | 2020-11-23T09:38:26.000Z | 2021-02-26T01:14:28.000Z | #!/usr/bin/env python
"""Convolutional Neural Network Training Functions
Functions for building and training a (UNET) Convolutional Neural Network on
images of the Mars and binary ring targets.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import h5py
from keras.models import Model
from keras.layers.core import Dropout, Reshape
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras import backend as K
import deepmars.features.template_match_target as tmt
import deepmars.utils.processing as proc
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
from joblib import Parallel, delayed
from tqdm import tqdm, trange
# Check Keras version - code will switch API if needed.
from keras import __version__ as keras_version
K.set_image_dim_ordering('tf')
k2 = True if keras_version[0] == '2' else False
# If Keras is v2.x.x, create Keras 1-syntax wrappers.
if not k2:
from keras.models import load_model
from keras.layers import merge, Input
from keras.layers.convolutional import (Convolution2D, MaxPooling2D,
UpSampling2D)
else:
from keras.models import load_model
from keras.layers import Concatenate, Input
from keras.layers.convolutional import (Conv2D, MaxPooling2D,
UpSampling2D)
def merge(layers, mode=None, concat_axis=None):
"""Wrapper for Keras 2's Concatenate class (`mode` is discarded)."""
return Concatenate(axis=concat_axis)(list(layers))
def Convolution2D(n_filters, FL, FLredundant, activation=None,
init=None, W_regularizer=None, border_mode=None):
"""Wrapper for Keras 2's Conv2D class."""
return Conv2D(n_filters, FL, activation=activation,
kernel_initializer=init,
kernel_regularizer=W_regularizer,
padding=border_mode)
minrad_ = 5
maxrad_ = 40
longlat_thresh2_ = 1.8
rad_thresh_ = 1.0
template_thresh_ = 0.5
target_thresh_ = 0.1
@click.group()
def dl():
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
import sys
sys.path.append(os.getenv("DM_ROOTDIR"))
pass
########################
def get_param_i(param, i):
"""Gets correct parameter for iteration i.
Parameters
----------
param : list
List of model hyperparameters to be iterated over.
i : integer
Hyperparameter iteration.
Returns
-------
Correct hyperparameter for iteration i.
"""
if len(param) > i:
return param[i]
else:
return param[0]
########################
def custom_image_generator(data, target, batch_size=32):
"""Custom image generator that manipulates image/target pairs to prevent
overfitting in the Convolutional Neural Network.
Parameters
----------
data : array
Input images.
target : array
Target images.
batch_size : int, optional
Batch size for image manipulation.
Yields
------
Manipulated images and targets.
"""
D, L, W = data.shape[0], data[0].shape[0], data[0].shape[1]
while True:
shuffle_index = np.arange(D)
# only shuffle once each loop through the data
np.random.shuffle(shuffle_index)
for i in np.arange(0, len(data), batch_size):
index = shuffle_index[i:i + batch_size]
d, t = data[index].copy(), target[index].copy()
# Random color inversion
# for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
# d[j][d[j] > 0.] = 1. - d[j][d[j] > 0.]
# Horizontal/vertical flips
for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
d[j], t[j] = np.fliplr(d[j]), np.fliplr(t[j]) # left/right
for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
d[j], t[j] = np.flipud(d[j]), np.flipud(t[j]) # up/down
# Random up/down & left/right pixel shifts, 90 degree rotations
npix = 15
# Horizontal shift
h = np.random.randint(-npix, npix + 1, batch_size)
# Vertical shift
v = np.random.randint(-npix, npix + 1, batch_size)
# 90 degree rotations
r = np.random.randint(0, 4, batch_size)
for j in range(batch_size):
d[j] = np.pad(d[j], ((npix, npix), (npix, npix), (0, 0)),
mode='constant')[npix + h[j]:L + h[j] + npix,
npix + v[j]:W + v[j] + npix, :]
sh, sv = slice(npix + h[j], L + h[j] + npix),\
slice(npix + v[j], W + v[j] + npix)
t[j] = np.pad(t[j], (npix,), mode='constant')[sh, sv]
d[j], t[j] = np.rot90(d[j], r[j]), np.rot90(t[j], r[j])
yield (d, t)
def t2c(pred, csv, i,
minrad=minrad_,
maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_,
template_thresh=template_thresh_,
target_thresh=target_thresh_):
return np.hstack([i,
tmt.template_match_t2c(pred, csv,
minrad=minrad,
maxrad=maxrad
longlat_thresh2=longlat_thresh2,
rad_thresh=rad_thresh,
template_thresh=template_thresh,
target_thresh=target_thresh)])
def diagnostic(res, beta):
"""Calculate the metrics from the predictions compared to the CSV.
Parameters
------------
res: list of results containing:
image number, number of matched, number of existing craters, number of
detected craters, maximum radius detected, mean error in longitude,
mean error in latitude, mean error in radius, fraction of duplicates
in detections.
beta : int
Beta value when calculating F-beta score.
Returns
-------
dictionary : metrics stored in a dictionary
"""
counter, N_match, N_csv, N_detect,\
mrad, err_lo, err_la, err_r, frac_duplicates = np.array(res).T
w = np.where(N_match == 0)
w = np.where(N_match > 0)
counter, N_match, N_csv, N_detect,\
mrad, err_lo, err_la, errr_, frac_dupes =\
counter[w], N_match[w], N_csv[w], N_detect[w],\
mrad[w], err_lo[w], err_la[w], err_r[w], frac_duplicates[w]
precision = N_match / (N_match + (N_detect - N_match))
recall = N_match / N_csv
fscore = (1 + beta**2) * (recall * precision) / \
(precision * beta**2 + recall)
diff = N_detect - N_match
frac_new = diff / (N_detect + diff)
frac_new2 = diff / (N_csv + diff)
frac_duplicates = frac_dupes
return dict(precision=precision,
recall=recall,
fscore=fscore,
frac_new=frac_new,
frac_new2=frac_new2,
err_lo=err_lo,
err_la=err_la,
err_r=err_r,
frac_duplicates=frac_duplicates,
maxrad=mrad,
counter=counter, N_match=N_match, N_csv=N_csv)
def get_metrics(data, craters_images, dim, model, name, beta=1, offset=0,
minrad=minrad_, maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_, template_thresh=template_thresh_,
target_thresh=target_thresh_, rmv_oor_csvs=0):
"""Function that prints pertinent metrics at the end of each epoch.
Parameters
----------
data : hdf5
Input images.
craters : hdf5
Pandas arrays of human-counted crater data.
dim : int
Dimension of input images (assumes square).
model : keras model object
Keras model
beta : int, optional
Beta value when calculating F-beta score. Defaults to 1.
"""
X, Y = data[0], data[1]
craters, images = craters_images
# Get csvs of human-counted craters
csvs = []
# minrad, maxrad = 3, 50
cutrad, n_csvs = 0.8, len(X)
diam = 'Diameter (pix)'
for i in range(len(X)):
imname = images[i] # name = "img_{0:05d}".format(i)
found = False
for crat in craters:
if imname in crat:
csv = crat[imname]
found = True
if not found:
csvs.append([-2])
continue
# remove small/large/half craters
csv = csv[(csv[diam] < 2 * maxrad) & (csv[diam] > 2 * minrad)]
csv = csv[(csv['x'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['y'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['x'] - cutrad * csv[diam] / 2 > 0)]
csv = csv[(csv['y'] - cutrad * csv[diam] / 2 > 0)]
if len(csv) < 3: # Exclude csvs with few craters
csvs.append([-1])
else:
csv_coords = np.asarray((csv['x'], csv['y'], csv[diam] / 2)).T
csvs.append(csv_coords)
# Calculate custom metrics
print("csvs: {}".format(len(csvs)))
print("")
print("*********Custom Loss*********")
recall, precision, fscore = [], [], []
frac_new, frac_new2, mrad = [], [], []
err_lo, err_la, err_r = [], [], []
frac_duplicates = []
if isinstance(model, Model):
preds = None
# print(X[6].min(),X[6].max(),X.dtype,np.percentile(X[6],99))
preds = model.predict(X, verbose=1)
# save
h5f = h5py.File("predictions.hdf5", 'w')
h5f.create_dataset(name, data=preds)
print("Successfully generated and saved model predictions.")
else:
preds = model
# print(csvs)
countme = [i for i in range(n_csvs) if len(csvs[i]) >= 3]
print("Processing {} fields".format(len(countme)))
# preds contains a large number of predictions,
# so we run the template code in parallel.
res = Parallel(n_jobs=24,
verbose=5)(delayed(t2c)(preds[i], csvs[i], i,
minrad=minrad,
maxrad=maxrad,
longlat_thresh2=longlat_thresh2,
rad_thresh=rad_thresh,
template_thresh=template_thresh,
target_thresh=target_thresh)
for i in range(n_csvs) if len(csvs[i]) >= 3)
if len(res) == 0:
print("No valid results: ", res)
return None
# At this point we've processed the predictions with the template matching
# algorithm, now calculate the metrics from the data.
diag = diagnostic(res, beta)
print(len(diag["recall"]))
# print("binary XE score = %f" % model.evaluate(X, Y))
if len(diag["recall"]) > 3:
metric_data = [("N_match/N_csv (recall)", diag["recall"]),
("N_match/(N_match + (N_detect-N_match)) (precision)",
diag["precision"]),
("F_{} score".format(beta), diag["fscore"]),
("(N_detect - N_match)/N_detect" +
"(fraction of craters that are new)",
diag["frac_new"]),
("(N_detect - N_match)/N_csv (fraction" +
"of craters that are new, 2)", diag["frac_new2"])]
for fname, data in metric_data:
print("mean and std of %s = %f, %f" %
(fname, np.mean(data), np.std(data)))
for fname, data in [("fractional longitude diff", diag["err_lo"]),
("fractional latitude diff", diag["err_la"]),
("fractional radius diff", diag["err_r"]),
]:
print("median and IQR %s = %f, 25:%f, 75:%f" %
(fname,
np.median(data),
np.percentile(data, 25),
np.percentile(data, 75)))
print("""mean and std of maximum detected pixel radius in an image =
%f, %f""" % (np.mean(diag["maxrad"]), np.std(diag["maxrad"])))
print("""absolute maximum detected pixel radius over all images =
%f""" % np.max(diag["maxrad"]))
print("")
return diag
########################
def build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization type.
n_filters : int
Number of filters in each layer.
Returns
-------
model : keras model object
Constructed Keras model.
"""
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2),)(a3)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
print(model.summary())
return model
########################
def test_model(Data, Craters, MP, i_MP):
# Static params
dim, nb_epoch, bs = MP['dim'], MP['epochs'], MP['bs']
# Iterating params
FL = get_param_i(MP['filter_length'], i_MP)
learn_rate = get_param_i(MP['lr'], i_MP)
n_filters = get_param_i(MP['n_filters'], i_MP)
init = get_param_i(MP['init'], i_MP)
lmbda = get_param_i(MP['lambda'], i_MP)
drop = get_param_i(MP['dropout'], i_MP)
model = load_model(MP["model"])
get_metrics(Data[MP["test_dataset"]],
Craters[MP["test_dataset"]], dim, model, MP["test_dataset"])
def train_and_test_model(Data, Craters, MP, i_MP):
"""Function that trains, tests and saves the model, printing out metrics
after each model.
Parameters
----------
Data : dict
Inputs and Target Moon data.
Craters : dict
Human-counted crater data.
MP : dict
Contains all relevant parameters.
i_MP : int
Iteration number (when iterating over hypers).
"""
# Static params
dim, nb_epoch, bs = MP['dim'], MP['epochs'], MP['bs']
# Iterating params
FL = get_param_i(MP['filter_length'], i_MP)
learn_rate = get_param_i(MP['lr'], i_MP)
n_filters = get_param_i(MP['n_filters'], i_MP)
init = get_param_i(MP['init'], i_MP)
lmbda = get_param_i(MP['lambda'], i_MP)
drop = get_param_i(MP['dropout'], i_MP)
# Build model
if MP["model"] is not None:
model = load_model(MP["model"])
else:
model = build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters)
# Main loop
n_samples = MP['n_train']
for nb in range(nb_epoch):
if k2:
model.fit_generator(
custom_image_generator(Data['train'][0], Data['train'][1],
batch_size=bs),
steps_per_epoch=n_samples / bs, epochs=1, verbose=1,
# validation_data=(Data['dev'][0],Data['dev'][1]), #no gen
validation_data=custom_image_generator(Data['dev'][0],
Data['dev'][1],
batch_size=bs),
validation_steps=MP['n_dev'] / bs,
callbacks=[
EarlyStopping(monitor='val_loss', patience=3, verbose=0)])
else:
model.fit_generator(
custom_image_generator(Data['train'][0], Data['train'][1],
batch_size=bs),
samples_per_epoch=n_samples, nb_epoch=1, verbose=1,
# validation_data=(Data['dev'][0],Data['dev'][1]), #no gen
validation_data=custom_image_generator(Data['dev'][0],
Data['dev'][1],
batch_size=bs),
nb_val_samples=n_samples,
callbacks=[
EarlyStopping(monitor='val_loss', patience=3, verbose=0)])
suffix = "{}_{}_{}_{}_{}_{}_{}.hdf5".format(learn_rate,
n_filters,
init,
lmbda,
drop,
nb,
nb_epoch)
model_save_name = os.path.join(MP["save_dir"],
"model_".format(suffix))
if MP['save_models']:
model.save(model_save_name)
if MP["calculate_custom_loss"]:
get_metrics(Data['dev'], Craters['dev'], dim, model, "dev")
if MP["save_models"] == 1:
model.save(os.path.join(MP["save_dir"], MP["final_save_name"]))
print("###################################")
print("##########END_OF_RUN_INFO##########")
print("""learning_rate=%e, batch_size=%d, filter_length=%e, n_epoch=%d
n_train=%d, img_dimensions=%d, init=%s, n_filters=%d, lambda=%e
dropout=%f""" % (learn_rate, bs, FL, nb_epoch, MP['n_train'],
MP['dim'], init, n_filters, lmbda, drop))
if MP["calculate_custom_loss"]:
get_metrics(Data['test'], Craters['test'], dim, model, "test")
print("###################################")
print("###################################")
########################
def get_models(MP):
"""Top-level function that loads data files and calls train_and_test_model.
Parameters
----------
MP : dict
Model Parameters.
"""
dir = MP['dir']
n_train, n_dev, n_test = MP['n_train'], MP['n_dev'], MP['n_test']
# Load data
def load_files(numbers, test, this_dataset):
res0 = []
res1 = []
files = []
craters = []
images = []
npic = 0
if not test or (test and this_dataset):
for n in tqdm(numbers):
files.append(h5py.File(os.path.join(
dir, "sys_images_{0:05d}.hdf5".format(n)), 'r'))
images.extend(["img_{0:05d}".format(a)
for a in np.arange(n, n + 1000)])
res0.append(files[-1]["input_images"][:].astype('float32'))
npic = npic + len(res0[-1])
res1.append(files[-1]["target_masks"][:].astype('float32'))
files[-1].close()
craters.append(pd.HDFStore(os.path.join(
dir, "sys_craters_{0:05d}.hdf5".format(n)), 'r'))
res0 = np.vstack(res0)
res1 = np.vstack(res1)
return files, res0, res1, npic, craters, images
train_files,\
train0,\
train1,\
Ntrain,\
train_craters,\
train_images = load_files(MP["train_indices"],
MP["test"],
MP["test_dataset"] == "train")
print(Ntrain, n_train)
dev_files,\
dev0,\
dev1,\
Ndev,\
dev_craters,\
dev_images = load_files(MP["dev_indices"],
MP["test"],
MP["test_dataset"] == "dev")
print(Ndev, n_dev)
test_files,\
test0,\
test1,\
Ntest,\
test_craters,\
test_images = load_files(MP["test_indices"],
MP["test"],
MP["test_dataset"] == "test")
print(Ntest, n_test)
Data = {
"train": [train0, train1],
"dev": [dev0, dev1],
"test": [test0[:n_test], test1[:n_test]]
}
# Rescale, normalize, add extra dim
proc.preprocess(Data)
# Load ground-truth craters
Craters = {
'train': [train_craters, train_images],
'dev': [dev_craters, dev_images],
'test': [test_craters, test_images]
}
# Iterate over parameters
if MP["test"]:
test_model(Data, Craters, MP, 0)
return
else:
for i in range(MP['N_runs']):
train_and_test_model(Data, Craters, MP, i)
@dl.command()
@click.option("--test", is_flag=True, default=False)
@click.option("--test_dataset", default="dev")
@click.option("--model", default=None)
def train_model(test, test_dataset, model):
"""Run Convolutional Neural Network Training
Execute the training of a (UNET) Convolutional Neural Network on
images of the Moon and binary ring targets.
"""
# Model Parameters
MP = {}
# Directory of train/dev/test image and crater hdf5 files.
MP['dir'] = os.path.join(os.getenv("DM_ROOTDIR"), 'data/processed/')
# Image width/height, assuming square images.
MP['dim'] = 256
# Batch size: smaller values = less memory, less accurate gradient estimate
MP['bs'] = 10
# Number of training epochs.
MP['epochs'] = 30
# Number of train/valid/test samples, needs to be a multiple of batch size.
# sample every even numbered image file to use in the training,
# half of the odd number for testing.
# half of the odd numbers for validataion.
MP['train_indices'] = list(np.arange(162000, 208000, 2000))
MP['dev_indices'] = list(np.arange(161000, 206000, 4000))
MP['test_indices'] = list(np.arange(163000, 206000, 4000))
# MP['test_indices'] = 90000#list(np.arange(10000,184000,8000))
MP['n_train'] = len(MP["train_indices"]) * 1000
MP['n_dev'] = len(MP["dev_indices"]) * 1000
MP['n_test'] = len(MP["test_indices"]) * 1000
print(MP["n_train"], MP["n_dev"], MP["n_test"])
# Save model (binary flag) and directory.
MP['save_models'] = 1
MP["calculate_custom_loss"] = False
MP['save_dir'] = 'models'
MP['final_save_name'] = 'model.h5'
# initial model
MP["model"] = model
# testing only
MP["test"] = test
MP["test_dataset"] = test_dataset
# Model Parameters (to potentially iterate over, keep in lists).
# runs.csv looks like
# filter_length,lr,n_filters,init,lambda,dropout
# 3,0.0001,112,he_normal,1e-6,0.15
#
# each line is a new run.
df = pd.read_csv("runs.csv")
for na, ty in [("filter_length", int),
("lr", float),
("n_filters", int),
("init", str),
("lambda", float),
("dropout", float)]:
MP[na] = df[na].astype(ty).values
MP['N_runs'] = len(MP['lambda']) # Number of runs
MP['filter_length'] = [3] # Filter length
# MP['lr'] = [0.0001] # Learning rate
# MP['n_filters'] = [112] # Number of filters
# MP['init'] = ['he_normal'] # Weight initialization
# MP['lambda'] = [1e-6] # Weight regularization
# MP['dropout'] = [0.15] # Dropout fraction
# Iterating over parameters example.
# MP['N_runs'] = 2
# MP['lambda']=[1e-4,1e-4]
print(MP)
get_models(MP)
if __name__ == '__main__':
dl()
| 36.237838 | 79 | 0.545085 | 0 | 0 | 2,267 | 0.084539 | 3,367 | 0.125559 | 0 | 0 | 8,682 | 0.323762 |
92adf2c4c13c901877b09563e05bc3de942a3158 | 4,608 | py | Python | chainer_bcnn/functions/loss/noised_cross_entropy.py | yuta-hi/bayesian_unet | cce1dbd75fad9cc29b77eb1c76b33c6a3eb0ffa6 | [
"MIT"
] | 36 | 2019-12-04T02:09:25.000Z | 2022-03-31T07:18:40.000Z | chainer_bcnn/functions/loss/noised_cross_entropy.py | keisuke-uemura/bayesian_unet | cce1dbd75fad9cc29b77eb1c76b33c6a3eb0ffa6 | [
"MIT"
] | 2 | 2019-12-03T06:35:07.000Z | 2020-06-14T23:14:13.000Z | chainer_bcnn/functions/loss/noised_cross_entropy.py | keisuke-uemura/bayesian_unet | cce1dbd75fad9cc29b77eb1c76b33c6a3eb0ffa6 | [
"MIT"
] | 8 | 2020-12-07T03:43:22.000Z | 2022-02-02T03:39:40.000Z | from __future__ import absolute_import
from chainer import backend
from chainer import functions as F
from chainer.functions import sigmoid_cross_entropy
from chainer.functions import softmax_cross_entropy
from .sigmoid_soft_cross_entropy import sigmoid_soft_cross_entropy
def noised_softmax_cross_entropy(y, t, mc_iteration,
normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean', enable_double_backprop=False):
""" Softmax Cross-entropy for aleatoric uncertainty estimates.
See: https://arxiv.org/pdf/1703.04977.pdf
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape[0] == log_std.shape[0]
assert log_std.shape[1] in (logits.shape[1], 1)
assert logits.shape[2:] == log_std.shape[2:]
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += softmax_cross_entropy(logits + noise, t,
normalize=False,
cache_score=cache_score,
class_weight=class_weight,
ignore_label=ignore_label,
reduce='no',
enable_double_backprop=enable_double_backprop)
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
def noised_sigmoid_cross_entropy(y, t, mc_iteration, normalize=True, reduce='mean'):
""" Sigmoid Cross-entropy for aleatoric uncertainty estimates.
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape[0] == log_std.shape[0]
assert log_std.shape[1] in (logits.shape[1], 1)
assert logits.shape[2:] == log_std.shape[2:]
assert logits.shape == t.shape
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += sigmoid_cross_entropy(logits + noise, t,
normalize=False,
reduce='no')
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
def noised_sigmoid_soft_cross_entropy(y, t, mc_iteration, normalize=True, reduce='mean'):
""" Sigmoid Soft Cross-entropy for aleatoric uncertainty estimates.
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape == log_std.shape
assert logits.shape == t.shape
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += sigmoid_soft_cross_entropy(logits + noise, t,
normalize=False,
reduce='no')
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
| 30.516556 | 95 | 0.591146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,428 | 0.309896 |
92ae5b5d2fee593a7d7b0979f8e6f0afa7ccd217 | 160 | py | Python | malib/value_functions/__init__.py | wwxFromTju/malib | 7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2 | [
"MIT"
] | 6 | 2021-05-19T10:25:36.000Z | 2021-12-27T03:30:33.000Z | malib/value_functions/__init__.py | wwxFromTju/malib | 7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2 | [
"MIT"
] | 1 | 2021-05-29T04:51:37.000Z | 2021-05-30T06:18:10.000Z | malib/value_functions/__init__.py | ying-wen/malib_deprecated | 875338b81c4d87064ad31201f461ef742db05f25 | [
"MIT"
] | 1 | 2021-05-31T16:16:12.000Z | 2021-05-31T16:16:12.000Z | from malib.value_functions.value_function import (
MLPValueFunction,
CommNetValueFunction,
BiCNetValueFunction,
)
# __all__ = ["MLPValueFunction"]
| 20 | 50 | 0.7625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.2 |
92af515669514482415e4aa95b757d705f7e871b | 3,010 | py | Python | save_scummer/utils.py | JWCook/save-scummer | 5822e84fa1d87d2d3db95d69a013234d213b4322 | [
"MIT"
] | 2 | 2021-02-19T18:50:14.000Z | 2021-08-30T20:42:46.000Z | save_scummer/utils.py | JWCook/save-scummer | 5822e84fa1d87d2d3db95d69a013234d213b4322 | [
"MIT"
] | null | null | null | save_scummer/utils.py | JWCook/save-scummer | 5822e84fa1d87d2d3db95d69a013234d213b4322 | [
"MIT"
] | null | null | null | """Generic utility functions that don't depend on other modules"""
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from os.path import getmtime
from pathlib import Path
from typing import Dict, Iterable, Union
from pytimeparse import parse as parse_time
StrOrPath = Union[Path, str]
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
def format_file_size(n_bytes: int) -> str:
"""Given a number of bytes, return in human-readable format"""
filesize = float(n_bytes)
for unit in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if filesize >= 1024 and unit != 'TB':
filesize /= 1024
else:
return f'{filesize:.2f} {unit}'
return f'{filesize:.2f} {unit}'
def format_timestamp(dt: Union[str, datetime]) -> str:
"""Reformat a datetime string into a common format, along with time elapsed since that time.
Time elapsed is in human-readable form, e.g. "5 minutes ago" or "2 days ago."
Adapted from: https://stackoverflow.com/a/1551394
"""
if not dt:
return 'never'
if not isinstance(dt, datetime):
dt = parse_date(dt)
diff = datetime.now() - dt
if diff.days == 0:
if diff.seconds < 60:
time_elapsed = f'{diff.seconds} seconds ago'
elif diff.seconds < 3600:
time_elapsed = f'{int(diff.seconds / 60)} minutes ago'
else:
time_elapsed = f'{int(diff.seconds / 3600)} hours ago'
elif diff.days == 1:
time_elapsed = 'yesterday'
else:
time_elapsed = f'{diff.days} days ago'
return f'{dt.strftime(DATETIME_FORMAT)} ({time_elapsed})'
def get_datetime_by_age(age: str) -> datetime:
age_delta = timedelta(seconds=parse_time(age))
return datetime.now() - age_delta
def get_dir_files_by_date(path: Path) -> Dict[Path, datetime]:
"""Get all files in the specified directory, sorted by creation date (desc),
along with the parsed datetime.
"""
try:
files = list(path.iterdir())
except IOError:
return {}
files = sorted(files, key=getmtime, reverse=True)
return {path: datetime.fromtimestamp(path.stat().st_mtime) for path in files}
def get_dir_size(path: Path) -> str:
"""Get (non-recursive) sum of file sizes in the given directory, in human-readable format"""
try:
file_sizes = [f.stat().st_size for f in path.iterdir()]
except IOError:
return '0 bytes'
return format_file_size(sum(file_sizes))
def get_latest_modified(paths: Iterable[Path]) -> datetime:
"""Get the most recent 'modified on' timestamp (ISO format) from the paths given.
For a save directory with multiple files, this is the best indicator of when the save was
created, as not all files may be modified with each save.
"""
datetimes = [datetime.fromtimestamp(path.stat().st_mtime) for path in paths]
return max(datetimes).replace(microsecond=0)
def normalize_path(path: StrOrPath) -> Path:
return Path(path).expanduser().resolve()
| 33.820225 | 96 | 0.662791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,120 | 0.372093 |
92afbe514a83c1f439be100159e8dac9eed5d374 | 752 | py | Python | bip_utils/bip/__init__.py | djmuratb/bip_utils | 18de9a7067ca2cc3bc4727dc9e39e75db0456e1b | [
"MIT"
] | 1 | 2021-06-13T11:45:23.000Z | 2021-06-13T11:45:23.000Z | bip_utils/bip/__init__.py | djmuratb/bip_utils | 18de9a7067ca2cc3bc4727dc9e39e75db0456e1b | [
"MIT"
] | null | null | null | bip_utils/bip/__init__.py | djmuratb/bip_utils | 18de9a7067ca2cc3bc4727dc9e39e75db0456e1b | [
"MIT"
] | null | null | null | # BIP39
from bip_utils.bip.bip39_ex import Bip39InvalidFileError, Bip39ChecksumError
from bip_utils.bip.bip39 import (
Bip39WordsNum, Bip39EntropyBitLen,
Bip39EntropyGenerator, Bip39MnemonicGenerator, Bip39MnemonicValidator, Bip39SeedGenerator
)
# BIP32
from bip_utils.bip.bip32_ex import Bip32KeyError, Bip32PathError
from bip_utils.bip.bip32_utils import Bip32Utils
from bip_utils.bip.bip32_path import Bip32PathParser
from bip_utils.bip.bip32 import Bip32
# BIP44/49/84
from bip_utils.bip.bip44_base_ex import Bip44DepthError, Bip44CoinNotAllowedError
from bip_utils.bip.bip44_base import Bip44Changes, Bip44Coins, Bip44Levels
from bip_utils.bip.bip44 import Bip44
from bip_utils.bip.bip49 import Bip49
from bip_utils.bip.bip84 import Bip84
| 41.777778 | 93 | 0.855053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.035904 |
92b79331943ca8f2fc79c0b6c0f927016d96552f | 34,251 | py | Python | simulation_supervised/python/run_script.py | kkelchte/simulation_supervised | 8b55e2984cb4911b3b840090eb8b9d296252c4bd | [
"Apache-2.0"
] | 2 | 2019-02-20T14:25:23.000Z | 2021-06-14T21:58:59.000Z | simulation_supervised/python/run_script.py | kkelchte/simulation_supervised | 8b55e2984cb4911b3b840090eb8b9d296252c4bd | [
"Apache-2.0"
] | null | null | null | simulation_supervised/python/run_script.py | kkelchte/simulation_supervised | 8b55e2984cb4911b3b840090eb8b9d296252c4bd | [
"Apache-2.0"
] | 1 | 2019-02-20T14:25:27.000Z | 2019-02-20T14:25:27.000Z | #!/usr/bin/python
"""
Run_long_script governs the running of long gazebo_ros_tensorflow simulations.
The core functionality lies in:
1. parsing the correct arguments at different levels (tensorflow dnn, gazebo environment, ros supervision)
2. different crash handling when for instance starting gazebo / tensorflow fails
The script is organized in different steps:
1. Parsing arguments saved in a name space
2. launching ROS and robot related parameters
3. launching tensorflow in machine (docker/singularity/virtualenv) environment
4. launching experiment with potentially autogenerated gazebo world
Exit code:
0) normal exit code
2) tensorflow stopped working
3) communication with logfolder (Opal) is blocked
4) config file is missing
Example usage:
Let behavior arbitration fly with drone through default canyon in singularity environment 1 time while saving images.
python run_script.py --robot drone_sim --fsm oracle_drone_fsm --world canyon --reuse_default_world -n 1 -ds -p params.yaml
Author: Klaas Kelchtermans
Dependecies: simulation_supervised, pilot, klaas_robots
"""
import rospy
from std_srvs.srv import Empty as Emptyservice
from std_srvs.srv import EmptyRequest # for pausing and unpausing physics engine
from geometry_msgs.msg import Pose
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.srv import SetModelStateRequest
from gazebo_msgs.msg import ModelState
import ast # to parse startingpositions as string to list
import sys, os, os.path
import subprocess, shlex
import shutil
import time
import signal
import argparse
import yaml
import fnmatch
import numpy as np
class bcolors:
""" Colors to print in terminal with color!
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# global variables for Popen objects used for terminating sessions
ros_popen = None
python_popen = None
gazebo_popen = None
crash_number = 0
run_number = 0
def myprint(message):
"""
Output is not captured on computing cluster,
therefore write it away to logfolder/output as well
"""
print(message)
with open(FLAGS.summary_dir+FLAGS.log_tag+'/output','a') as f:
f.write(message+'\n')
# Predefined functions.
def load_param_file(location):
"""Load yaml as dict and change to proper string arguments.
Note that current implementation will by default change both --key True and --key False to --key."""
yaml_dict={}
with open(location, 'r') as stream:
try:
yaml_dict=yaml.load(stream)
except yaml.YAMLError as exc:
myprint(exc)
yaml_str=""
for k in yaml_dict.keys():
if isinstance(yaml_dict[k],bool):
yaml_str = "{0} --{1}".format(yaml_str, k)
else:
yaml_str = "{0} --{1} {2}".format(yaml_str, k, yaml_dict[k])
return yaml_str
def wait_for_gazebo():
"""gazebo popen is not enough to get gzserver to stop so wait longer..."""
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","gz"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
myprint("{0}: wait for gazebo".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
out = p_grep.communicate()[0]
while "gzserver" in out:
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","gz"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
out = p_grep.communicate()[0]
time.sleep(0.2)
time.sleep(1)
def wait_for_create_dataset():
"""gazebo popen is not enough to get gzserver to stop so wait longer..."""
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","create_dataset"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
myprint("{0}: wait for create_dataset".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
out = p_grep.communicate()[0]
while "create_dataset" in out:
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","create_dataset"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
out = p_grep.communicate()[0]
time.sleep(0.2)
def wait_for_ros_to_start():
"""Ros might take some time to start the first time so wait till its well in the ps -ef"""
time.sleep(1)
p_ps = subprocess.call(["rosparam", "list"], stdout=subprocess.PIPE)
while p_ps == 1:
myprint("{0}: wait for ros".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
time.sleep(1)
p_ps = subprocess.call(["rosparam", "list"], stdout=subprocess.PIPE)
def kill_popen(process_name, process_popen):
"""Check status, terminate popen and wait for it to stop."""
myprint("{0}: terminate {1}".format(time.strftime("%Y-%m-%d_%I:%M:%S"), process_name))
if process_popen.poll() == None:
process_popen.terminate()
process_popen.wait()
def kill_combo():
"""kill ros, python and gazebo pids and wait for them to finish"""
global ros_popen, python_popen, gazebo_popen
if gazebo_popen: kill_popen('gazebo', gazebo_popen)
wait_for_gazebo()
if python_popen: kill_popen('python', python_popen)
if ros_popen: kill_popen('ros', ros_popen)
time.sleep(5)
##########################################################################################################################
# STEP 1 Load Parameters
parser = argparse.ArgumentParser(description="""Run_simulation_scripts governs the running of long gazebo_ros_tensorflow simulations.
The core functionality lies in:
1. parsing the correct arguments at different levels (tensorflow dnn, gazebo environment, ros supervision)
2. different crash handling when for instance starting gazebo / tensorflow fails""")
# ==========================
# General Settings
# ==========================
parser.add_argument("--summary_dir", default='tensorflow/log/', type=str, help="Choose the directory to which tensorflow should save the summaries.")
parser.add_argument("--data_root", default='pilot_data/', type=str, help="Choose the directory to which tensorflow should save the summaries.")
parser.add_argument("--code_root", default='~', type=str, help="Choose the directory to which tensorflow should save the summaries.")
parser.add_argument("-t", "--log_tag", default='testing_on_policy', type=str, help="LOGTAG: tag used to name logfolder.")
parser.add_argument("--data_location", default='', type=str, help="Datalocation is by default the log_tag but than in data_root instead of summary_dir, otherwise FLAG should indicate relative path to data_root.")
parser.add_argument("-n", "--number_of_runs", default=-1, type=int, help="NUMBER_OF_RUNS: define the number of runs the robot will be trained/evaluated. n=1 avoids a hard stop after 5minutes.")
parser.add_argument("-g", "--graphics", action='store_true', help="Add extra nodes for visualization e.g.: Gazebo GUI, control display, depth prediction, ...")
parser.add_argument("-e", "--evaluation", action='store_true',help="This script can launch 2 modes of experiments: training (default) or evaluation.")
parser.add_argument("--evaluate_every", default=10, type=int, help="Evaluate every N runs when training.")
parser.add_argument("--final_evaluation_runs", default=5, type=int, help="Evaluate N times after training is finished..")
parser.add_argument("-ds", "--create_dataset", action='store_true',help="In case of True, sensor data is saved.")
parser.add_argument("--owr", action='store_true',help="Delete dataset if it is already there.")
parser.add_argument("--save_only_success", action='store_true',help="In case of True, sensor data is saved.")
parser.add_argument("--random_seed", type=int, help="If provided, the simulation is seeded (as good as possible).")
# ==========================
# Robot Settings
# ==========================
parser.add_argument("--robot",default='drone_sim', type=str, help="Specify the robot configuration file: turtle_sim(default), drone_sim, turtle_real, drone_real.")
parser.add_argument("-r", "--recovery", action='store_true',help="Use drone with recovery camera's attached.")
# ==========================
# Tensorflow Settings
# ==========================
parser.add_argument("-m","--checkpoint_path", type=str, help="Specify the directory of the checkpoint of the earlier trained model.")
parser.add_argument("-pe","--python_environment",default='sing', type=str, help="Define which environment should be loaded in shell when launching tensorlfow. Possibilities: sing, docker, virtualenv.")
parser.add_argument("-pp","--python_project",default='pytorch_pilot/pilot', type=str, help="Define which python module should be started with ~/tenorflow/PROJECT_NAME/main.py: q-learning/pilot, pilot/pilot, ddpg, ....")
# ==========================
# Environment Settings
# ==========================
parser.add_argument("--auto_go", action='store_true',help="Publish /go signal after few launching gazebo to start experiment automatically")
parser.add_argument("--reuse_default_world", action='store_true',help="reuse the default forest/canyon/sandbox instead of generating them on the fly.")
parser.add_argument("--one_world", action='store_true',help="Reuse one world to train in over and over again.")
parser.add_argument("-w","--world",dest='worlds', action='append', nargs=1, help="Define different worlds: corridor, canyon, forest, sandbox, esat_v1, esat_v2, ... .")
# parser.add_argument("-p","--paramfile",default='eva_params.yaml',type=str, help="Add more parameters to the command loading the DNN in tensorflow ex: eva_params.yaml or params.yaml.")
parser.add_argument("--fsm",default='nn_drone_fsm',type=str, help="Define the fsm loaded from /simsup/config/fsm: nn_turtle_fsm, console_fsm, console_nn_db_turtle_fsm, ...")
parser.add_argument("--x_pos",default=999,type=float, help="Specify x position.")
parser.add_argument("--x_var",default=0,type=float, help="Specify variation in x position.")
parser.add_argument("--y_pos",default=999,type=float, help="Specify y position.")
parser.add_argument("--y_var",default=0,type=float, help="Specify variation in y position.")
parser.add_argument("--z_pos",default=999,type=float, help="Specify z position.")
parser.add_argument("--z_var",default=0,type=float, help="Specify variation z position.")
parser.add_argument("--yaw_or",default=999,type=float, help="Specify yaw orientation.")
# parser.add_argument("--yaw_var",default=2*3.14,type=float, help="Specify variation in yaw orientation.")
parser.add_argument("--yaw_var",default=0,type=float, help="Specify variation in yaw orientation.")
FLAGS, others = parser.parse_known_args()
# FLAGS=parser.parse_args()
# get simulation_supervised dir
simulation_supervised_dir=subprocess.check_output(shlex.split("rospack find simulation_supervised"))[:-1]
# 3 main directories have to be defined in order to make it also runnable from a read-only system-installed singularity image.
if FLAGS.summary_dir[0] != '/': # 1. Tensorflow log directory for saving tensorflow logs and xterm logs
FLAGS.summary_dir=os.environ['HOME']+'/'+FLAGS.summary_dir
if FLAGS.data_root[0] != '/': # 2. Pilot_data directory for saving data
FLAGS.data_root=os.environ['HOME']+'/'+FLAGS.data_root
if FLAGS.code_root == '~': # 3. location for tensorflow code (and also catkin workspace though they are found with rospack)
#no explicit directory for code is set so try to parse first from environment
try:
FLAGS.code_root = os.environ['CODE']
except KeyError: # in case environment variable is not set, take home dir
FLAGS.code_root = os.environ['HOME']
if FLAGS.log_tag == 'testing_on_policy':
if os.path.isdir(FLAGS.summary_dir+FLAGS.log_tag): shutil.rmtree(FLAGS.summary_dir+FLAGS.log_tag)
if os.path.isdir(FLAGS.data_root+FLAGS.log_tag): shutil.rmtree(FLAGS.data_root+FLAGS.log_tag)
# add default values to be able to operate
if FLAGS.worlds == None : FLAGS.worlds=['canyon']
else: #worlds are appended in a nested list... so get them out.
worlds=[]
for w in FLAGS.worlds: worlds.append(w[0])
FLAGS.worlds = worlds[:]
# FLAGS.params=load_param_file(FLAGS.paramfile) if FLAGS.paramfile else ""
FLAGS.params=others[:]
if FLAGS.random_seed:
np.random.seed(FLAGS.random_seed)
FLAGS.params.append('--random_seed '+str(FLAGS.random_seed))
# check if robot configuration exists is there:
if not os.path.isfile(simulation_supervised_dir+'/config/robot/'+FLAGS.robot+'.yaml'):
myprint("Could not find robot configuration for {}".format(w[0]))
sys.exit(4)
# try to extract condor host
try:
FLAGS.condor_host=subprocess.check_output(shlex.split("cat $_CONDOR_JOB_AD | grep RemoteHost | head -1 | cut -d '=' -f 2 | cut -d '@' -f 2 | cut -d '.' -f 1)"))
except:
FLAGS.condor_host='unknown_host'
# Clear log folder if desired
if FLAGS.owr and os.path.isdir("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag)):
shutil.rmtree("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag))
# Create main log folder if necessary
if not os.path.isdir("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag)):
os.makedirs("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag))
else:
# Load last position to start from if lastposition is file log folder already existed
if os.path.isfile("{0}{1}/last_position.txt".format(FLAGS.summary_dir, FLAGS.log_tag)):
try:
with open("{0}{1}/last_position.txt".format(FLAGS.summary_dir, FLAGS.log_tag),'r') as f:
last_position=f.readlines()
FLAGS.x_pos,FLAGS.y_pos,FLAGS.z_pos,FLAGS.yaw_or= [ float(x) for x in last_position[-1].strip().split(',')]
myprint("[run_script] obtained last position as {0} {1} {2} {3}".format(FLAGS.x_pos,FLAGS.y_pos,FLAGS.z_pos,FLAGS.yaw_or))
except:
myprint("[run_script] failed to obtain last position from {0}{1}/last_position.txt".format(FLAGS.summary_dir, FLAGS.log_tag))
# in case of data_creation, make data_location in ~/pilot_data
if FLAGS.create_dataset:
if FLAGS.data_location == "":
FLAGS.data_location = "{0}{1}".format(FLAGS.data_root, FLAGS.log_tag)
else:
FLAGS.data_location = "{0}{1}".format(FLAGS.data_root, FLAGS.data_location)
if os.path.isdir(FLAGS.data_location) and (FLAGS.number_of_runs == 1 or FLAGS.owr):
shutil.rmtree(FLAGS.data_location)
if not os.path.isdir(FLAGS.data_location):
os.makedirs(FLAGS.data_location)
else:
# check number of items already recorded
if len(os.listdir(FLAGS.data_location)) >= 1:
# in case there is already data recorded, parse the number of runs and continue from there
last_run=sorted([d for d in os.listdir(FLAGS.data_location) if os.path.isdir("{0}/{1}".format(FLAGS.data_location,d))])[-1]
run_number=int(last_run.split('_')[0]) +1 #assuming number occurs at first 5 digits xxxxx_name_of_data
myprint("Found data from previous run so adjusted run_number to {}".format(run_number))
# display and save all settings
myprint("\nSettings:")
for f in sorted(FLAGS.__dict__): myprint("{0}: {1}".format( f, FLAGS.__dict__[f]))
with open("{0}{1}/run_conf".format(FLAGS.summary_dir, FLAGS.log_tag),'w') as c:
c.write("Settings of Run_simulation_scripts:\n\n")
for f in FLAGS.__dict__: c.write("{0}: {1}\n".format(f, FLAGS.__dict__[f]))
##########################################################################################################################
# STEP 2 Start ROS with ROBOT specific parameters
# ensure location for logging the xterm outputs exists.
ros_xterm_log_dir="{0}{1}/xterm_ros".format(FLAGS.summary_dir,FLAGS.log_tag)
if not os.path.isdir(ros_xterm_log_dir): os.makedirs(ros_xterm_log_dir)
def start_ros():
"""Start ros core with robot parameters loaded"""
global ros_popen
command="roslaunch simulation_supervised load_params.launch robot_config:={0}.yaml {1}".format(FLAGS.robot, 'random_seed:='+str(FLAGS.random_seed) if FLAGS.random_seed else '')
if os.path.isfile(simulation_supervised_dir+'/config/environment/'+worlds[0]+'.yaml'):
command="{0} world_config:={1}".format(command, simulation_supervised_dir+'/config/environment/'+worlds[0]+'.yaml')
xterm_log_file='{0}/xterm_ros_{1}.txt'.format(ros_xterm_log_dir,time.strftime("%Y-%m-%d_%I%M"))
if os.path.isfile(xterm_log_file): os.remove(xterm_log_file)
args = shlex.split("xterm -iconic -l -lf {0} -hold -e {1}".format(xterm_log_file,command))
ros_popen = subprocess.Popen(args)
pid_ros = ros_popen.pid
myprint("{0}: start_ros pid {1}\n".format(time.strftime("%Y-%m-%d_%I:%M:%S"),pid_ros))
wait_for_ros_to_start()
rospy.set_param('evaluate_every',FLAGS.evaluate_every if not FLAGS.evaluation else 1)
rospy.set_param('recovery',FLAGS.recovery)
start_ros()
##########################################################################################################################
# STEP 3 Start tensorflow
python_xterm_log_dir="{0}{1}/xterm_python".format(FLAGS.summary_dir,FLAGS.log_tag)
if not os.path.isdir(python_xterm_log_dir): os.makedirs(python_xterm_log_dir)
def start_python():
"""Function that initializes python code."""
# delete default test folder
# if logdir already exists probably condor job is just restarted somewhere so use last saved q in case of training
global python_popen
# Add parameters
FLAGS.log_folder = "{0}{1}".format(FLAGS.summary_dir,FLAGS.log_tag)
FLAGS.params.append("--log_tag {0}".format(FLAGS.log_tag))
if not '--on_policy' in FLAGS.params: FLAGS.params.append("--on_policy")
if FLAGS.checkpoint_path: FLAGS.params.append("--checkpoint_path {0}".format(FLAGS.checkpoint_path))
# Create command
params=""
for p in FLAGS.params: params="{0} {1}".format(params,p)
command="{0}/scripts/launch_python/{1}.sh {2}/tensorflow/{3}/main.py {4}".format(simulation_supervised_dir,
FLAGS.python_environment,
FLAGS.code_root,
FLAGS.python_project,
params)
myprint("Tensorflow command:\n {}".format(command))
xterm_log_file='{0}/xterm_python_{1}.txt'.format(python_xterm_log_dir,time.strftime("%Y-%m-%d_%I%M"))
if os.path.isfile(xterm_log_file): os.remove(xterm_log_file)
args = shlex.split("xterm -l -lf {0} -hold -e {1}".format(xterm_log_file, command))
# Execute command
python_popen = subprocess.Popen(args)
pid_python = python_popen.pid
myprint("{0}: start_python pid {1} \n\n".format(time.strftime("%Y-%m-%d_%I:%M:%S"),pid_python))
# Wait for creation of tensorflow log file to know the python node is running
start_time = time.time()
wait_time=10
if os.path.isfile(FLAGS.log_folder+'/nn_ready'):
prev_stat_nn_ready=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
while prev_stat_nn_ready == subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready')):
if time.time()-start_time > wait_time*60:
myprint("{0}: Waited for {3}minutes on nn_ready in {2} to start, seems like tensorflow has crashed on {1} so exit with error code 2.".format(time.strftime("%Y-%m-%d_%I:%M"), FLAGS.condor_host, FLAGS.log_folder, wait_time))
kill_combo()
sys.exit(2)
time.sleep(1)
else:
while(not os.path.isfile(FLAGS.log_folder+'/nn_ready')):
time.sleep(1)
if time.time()-start_time > wait_time*60:
myprint("{0}: Waited for {3}minutes on nn_ready in {2} to start, seems like tensorflow has crashed on {1} so exit with error code 2.".format(time.strftime("%Y-%m-%d_%I:%M"), FLAGS.condor_host, FLAGS.log_folder, wait_time))
kill_combo()
sys.exit(2)
start_python()
myprint("[runscript] set recovery to {0}".format(rospy.get_param('recovery')))
##########################################################################################################################
# STEP 4 Start gazebo environment
def create_environment(run_number, world_name):
"""Call correct python script for generating potentially new environment.
Returns a string with arguments for the launch file to be concatenated to the launch command.
"""
# generate world if it is possible and allowed, this also changes the loaded world file location from the default simsup_demo/worlds to log_folder
world_file=''
world_config=''
background=''
# don't create a new world if one_world is on
if FLAGS.one_world and run_number > 0: return ''
if world_name in ['canyon', 'forest', 'sandbox'] and not FLAGS.reuse_default_world:
generator_file="{0}/python/generators/{1}_generator.py".format(subprocess.check_output(shlex.split("rospack find simulation_supervised_tools"))[:-1],world_name)
subprocess.Popen(shlex.split("python "+generator_file+" "+FLAGS.log_folder)).wait()
background=FLAGS.log_folder+'/'+world_name+'.png'
world_file=FLAGS.log_folder+'/'+world_name+'.world'
elif world_name in ['canyon', 'corridor', 'different_corridor'] and FLAGS.reuse_default_world:
# reuse default 10 evaluation canyons or corridors
world_file='{0}/../simulation_supervised_demo/worlds/{2}_evaluation/{1:05d}_{2}.world'.format(simulation_supervised_dir,run_number%10, world_name)
background='{0}/../simulation_supervised_demo/worlds/{2}_evaluation/{1:05d}_{2}.png'.format(simulation_supervised_dir,run_number%10, world_name)
if 'corridor' in world_name:
command="{0} world_config:={1}/config/environment/{2:05d}_{3}.yaml".format(command, simulation_supervised_dir,run_number%10, world_name)
elif world_name in ['corridor'] and not FLAGS.reuse_default_world:
generator_file="{0}/python/generators/world_generator.py".format(subprocess.check_output(shlex.split("rospack find simulation_supervised_tools"))[:-1])
generator_command="python "+generator_file+" --output_dir "+FLAGS.log_folder+" --output_file "+world_name+"_"+str(run_number)
for p in others: generator_command="{0} {1}".format(generator_command, p)
print("[runscript] Generate command: {0}".format(generator_command))
return_val=subprocess.call(shlex.split(generator_command))
if return_val != 0:
kill_combo()
myprint("Failed to create env {0}, return value: {1}".format(world_name, return_val))
sys.exit(2)
world_file=FLAGS.log_folder+'/'+world_name+"_"+str(run_number)+'.world'
world_config=FLAGS.log_folder+'/'+world_name+"_"+str(run_number)+'.yaml'
arguments='world_name:='+world_name
for arg in ["world_file", "world_config", "background"]:
if len(eval(arg)) != 0: arguments=arguments+" "+arg+":="+eval(arg)
return arguments
def sample_new_position(starting_positions=[]):
""" Parse a new x,y,z,yaw(quaternion) pose for the robot given the world name and current robot
returns positions: x, y, z and orientation yaw in quaternion (1 ~ +90)
"""
# default with arguments
x, y, z, yaw = 0,0,0,0
if len(starting_positions) != 0:
pos = starting_positions[np.random.choice(range(len(starting_positions)))]
if len(pos) == 2:
x, y = pos
elif len(pos) == 3:
x, y, yaw = pos
elif len(pos) == 4:
x, y, z, yaw = pos
else:
myprint("[run_script] failed to parse starting_position {0}".format(pos))
# overwrite sampled starting positions if they were manually set
if FLAGS.x_pos != 999: x=FLAGS.x_pos
if FLAGS.y_pos != 999: y=FLAGS.y_pos
if FLAGS.z_pos != 999: z=FLAGS.z_pos
if FLAGS.yaw_or != 999: yaw=FLAGS.yaw_or
# add some variation
x += np.random.uniform(-FLAGS.x_var,FLAGS.x_var)
y += np.random.uniform(-FLAGS.y_var,FLAGS.y_var)
z += np.random.uniform(-FLAGS.z_var,FLAGS.z_var)
yaw += np.random.uniform(-FLAGS.yaw_var,FLAGS.yaw_var)
return x, y, z, yaw
# ensure location for logging the xterm outputs exists.
gazebo_xterm_log_dir="{0}{1}/xterm_gazebo".format(FLAGS.summary_dir,FLAGS.log_tag)
if not os.path.isdir(gazebo_xterm_log_dir): os.makedirs(gazebo_xterm_log_dir)
# Some local variables for running different simulations
prev_environment_arguments=''
reset_gazebo_service=rospy.ServiceProxy('/gazebo/reset_simulation',Emptyservice)
model_state_gazebo_service=rospy.ServiceProxy('/gazebo/set_model_state',SetModelState)
unpause_physics_client=rospy.ServiceProxy('/gazebo/unpause_physics',Emptyservice)
gazebo_popen=None
prev_stat_nn_log=''
prev_stat_fsm_log=''
fsm_file = FLAGS.log_folder+'/fsm_log'
if not os.path.isfile(fsm_file):
with open(fsm_file,'a') as f:
f.write('{0}: {1}\n'.format(time.strftime("%Y-%m-%d_%I-%M-%S"), FLAGS.log_folder))
crashed=False
while (run_number < FLAGS.number_of_runs) or FLAGS.number_of_runs==-1:
######################################
# 4.1 Prepare Run
world_name = FLAGS.worlds[run_number%len(FLAGS.worlds)]
# save current status of NN nn_ready to compare afterwards
if os.path.isfile(FLAGS.log_folder+'/nn_ready'):
prev_stat_nn_log=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
else: # we have last communication with our log folder so exit with code 2
myprint("{2}: lost communication with our log folder {0} on host {1} so exit with code 3.".format(FLAGS.log_folder, FLAGS.condor_host, time.strftime("%Y-%m-%d_%I:%M:%S")))
kill_combo()
sys.exit(3)
# clean up gazebo ros folder every now and then
if run_number%50 == 0 : shutil.rmtree("{0}/.gazebo/log".format(os.environ['HOME']),ignore_errors=True)
evaluate=((run_number%FLAGS.evaluate_every) == 0 and run_number != 0 and FLAGS.evaluate_every != -1) or FLAGS.evaluation
# if evaluate:
# rospy.set_param('max_duration', 120)
# else:
# rospy.set_param('max_duration', 5)
new_environment_arguments=create_environment(run_number, world_name)
######################################
# 4.2 Create environment and perform next run
if rospy.has_param('/starting_positions'):
starting_positions = rospy.get_param('starting_positions')
if isinstance(starting_positions,str):
starting_positions=ast.literal_eval(starting_positions)
else:
starting_positions = []
if (new_environment_arguments == prev_environment_arguments or len(new_environment_arguments) == 0) and not crashed and gazebo_popen != None:
# 4.2.1 Reset environment for next run if possible
# 4.2.1a Ensure correct settings
rospy.set_param('/evaluate',evaluate)
# 4.2.1b Reset environment ==> causes gt_node to freeze for more than a minute...
# reset_gazebo_service(EmptyRequest())
# 4.2.1c Change position of drone according to new selected starting position
pose=Pose()
pose.position.x, pose.position.y, starting_height, yaw = sample_new_position(starting_positions)
# pose.position.x, pose.position.y, starting_height, yaw=0,0,1,0
myprint("[run_script]: x: {0}, y: {1}, z: {2}, yaw:{3}".format(pose.position.x, pose.position.y, starting_height, yaw))
# some yaw to quaternion re-orientation code:
pose.orientation.z=np.sin(yaw)
pose.orientation.w=np.cos(yaw)
pose.position.z = 0.1
model_state = ModelState()
model_state.model_name = 'quadrotor' if FLAGS.robot.startswith('drone') else 'turtlebot3_burger'
model_state.pose=pose
state_request = SetModelStateRequest()
state_request.model_state = model_state
retvals = model_state_gazebo_service(state_request)
rospy.set_param('starting_height', starting_height)
myprint("Changed pose with return values: {0}".format(retvals))
time.sleep(5) # HAS to be 5 otherwise '/overtake' and '/ready' overlap resulting in empty images in gt_listener
unpause_physics_client(EmptyRequest())
else:
# 4.2.2 Launch Gazebo again
# 4.2.2a Ensure previous Gazebo is not running anymore
if gazebo_popen!=None:
kill_popen('gazebo', gazebo_popen)
wait_for_gazebo()
prev_environment_arguments = new_environment_arguments
# 4.2.2b Build command with correct settings
# remove if saving location already exists (probably due to crash previously)
if FLAGS.create_dataset:
data_location="{0}/{1:05d}_{2}".format(FLAGS.data_location,run_number,world_name)
if os.path.isdir(data_location): shutil.rmtree(data_location)
os.makedirs(data_location)
new_environment_arguments+=" save_images:=true"
new_environment_arguments+=" data_location:={0}".format(data_location)
if 'world_file' in new_environment_arguments:
world_file=[a for a in new_environment_arguments.split(' ') if 'world_file' in a][0].split(':=')[1]
myprint("[runscript] world_file {0}".format(world_file))
shutil.copyfile(world_file, data_location+'/'+os.path.basename(world_file))
x,y,z,yaw=sample_new_position(starting_positions)
# x,y,z,yaw=-54, -4, 1, -3.14
command="roslaunch simulation_supervised_demo {0}.launch fsm_config:={1} log_folder:={2} evaluate:={3} {4} graphics:={5} x:={6} y:={7} Yspawned:={9} starting_height:={8} {10}".format(FLAGS.robot,
FLAGS.fsm,
FLAGS.log_folder,
'true' if evaluate else 'false',
new_environment_arguments,
'true' if FLAGS.graphics else 'false',
x,y,z,yaw,
'random_seed:='+str(FLAGS.random_seed) if FLAGS.random_seed else '')
# 4.2.2c Launch command
# Execute command
myprint( "gazebo_command: {0}".format(command))
xterm_log_file='{0}/xterm_gazebo_{1}.txt'.format(gazebo_xterm_log_dir,time.strftime("%Y-%m-%d_%I-%M-%S"))
args = shlex.split("xterm -iconic -l -lf {0} -hold -e {1}".format(xterm_log_file,command))
gazebo_popen = subprocess.Popen(args)
pid_gazebo = gazebo_popen.pid
######################################
# 4.3 Wait for run to finish
# on this moment the run is not crashed (yet).
crashed=False
crash_checked=False
#print starting positions for visualizing later.
with open(FLAGS.log_folder+'/starting_positions.txt','a') as f:
f.write('{0}, {1}, {2}\n'.format(x,y,yaw))
prev_stat_fsm_log=subprocess.check_output(shlex.split("stat -c %Y "+fsm_file))
time.sleep(0.1)
myprint("\n{0}: started run {1} of the {2} in {4} {3} {5}".format(time.strftime("%Y-%m-%d_%I:%M:%S"),
run_number+1,
FLAGS.number_of_runs,
world_name,
bcolors.OKBLUE,
bcolors.ENDC))
start_time=time.time()
time_spend=0
# while fsm_file has not been updated, wait...
while prev_stat_fsm_log == subprocess.check_output(shlex.split("stat -c %Y "+fsm_file)):
# Check on job suspension:
# if between last update and now has been more than 30 seconds (should be less than 0.1s)
if time.time() - start_time - time_spend > 30:
myprint("{0}: Job got suspended.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
time.sleep(30) #wait for big tick to update
start_time=time.time()
else:
time_spend=time.time() - start_time
# automatically start with /go after 10s
if FLAGS.auto_go:
if 10.05 <= time_spend<10.15:
go_popen=subprocess.Popen(shlex.split("rostopic pub /go std_msgs/Empty"))
elif 11.15 <= time_spend < 11.25 and go_popen.poll()==None:
kill_popen('go', go_popen)
# if False:
# if time_spend > 60*10 and FLAGS.number_of_runs != 1: #don't interupt if this is a single run
if time_spend > 5 and not crash_checked:
crash_checked = True
# check for crash
with open(xterm_log_file, 'r') as f:
for l in f.readlines():
if 'process has died' in l:
myprint("[run_script] {0}: found gz crash in {1}: {2}.".format(time.strftime("%Y-%m-%d_%I:%M:%S"), os.path.basename(xterm_log_file),l[:50]))
crashed=True
crash_number+=1
if crashed:
if crash_number < 10: #after 20 crashes its maybe time to restart everything
kill_popen('gazebo', gazebo_popen)
else:
myprint("{0}: crashed for 10the time so restart everything.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
kill_combo()
start_ros()
start_python()
crash_number = 0
break # get out of this loop
time.sleep(0.1)
######################################
# 4.4 Clean up run
# 4.4.1 Wait for NN framework if it is running
if not crashed and 'nn' in FLAGS.fsm:
# wait for nn_ready and stop in case of no tensorflow communication
if os.path.isfile(FLAGS.log_folder+'/nn_ready'):
current_stat=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
start_time=time.time()
myprint("{0}: waiting for nn_ready.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
while current_stat == prev_stat_nn_log:
current_stat=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
time.sleep(1)
if time.time()-start_time > 8*60:
myprint("{0}: waited for 8minutes on nn_ready to finish training so something went wrong on {1} exit with code 2.".format(time.strftime("%Y-%m-%d_%I:%M:%S"), FLAGS.condor_host))
kill_combo()
sys.exit(2)
else:
myprint("{2}: we have lost communication with our log folder {0} on host {1} so exit with code 3.".format(FLAGS.log_folder, FLAGS.condor_host, time.strftime("%Y-%m-%d_%I:%M:%S")))
kill_combo()
sys.exit(3)
if not crashed:
message = open(fsm_file,'r').readlines()[-1].strip()
myprint("{0}: ended run {1} with {3}{2}{4}".format(time.strftime("%Y-%m-%d_%I:%M:%S"), run_number+1, message, bcolors.OKGREEN if 'success' in message else bcolors.FAIL, bcolors.ENDC))
# increment also in case of crash as drone has zero turning speed:
run_number+=1
if message == 'FINISHED': # make this the final run for evaluation
FLAGS.number_of_runs=run_number+FLAGS.final_evaluation_runs
FLAGS.evaluation=True
# run_number = FLAGS.number_of_runs-1
time.sleep(3)
# extra second needed to save image in gt_listener
# after all required runs are finished
kill_combo()
myprint("\n{0}: done.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
| 49.63913 | 230 | 0.686754 | 238 | 0.006949 | 0 | 0 | 0 | 0 | 0 | 0 | 15,620 | 0.456045 |
92b84f5fd97fe6052eddaf4fb13a8cbc0248d07a | 270 | py | Python | sample/rubrik_polaris/get_storage_object_ids_ebs.py | talmo77/rubrik-polaris-sdk-for-python | 505ce03b7995e7b86206c728be594d56d4431050 | [
"MIT"
] | 2 | 2021-07-14T12:54:53.000Z | 2022-03-03T21:55:28.000Z | sample/rubrik_polaris/get_storage_object_ids_ebs.py | talmo77/rubrik-polaris-sdk-for-python | 505ce03b7995e7b86206c728be594d56d4431050 | [
"MIT"
] | 8 | 2021-03-09T13:02:15.000Z | 2022-02-24T08:46:50.000Z | sample/rubrik_polaris/get_storage_object_ids_ebs.py | talmo77/rubrik-polaris-sdk-for-python | 505ce03b7995e7b86206c728be594d56d4431050 | [
"MIT"
] | 4 | 2021-04-16T15:49:36.000Z | 2021-11-09T17:58:21.000Z | from rubrik_polaris import PolarisClient
domain = 'my-company'
username = 'john.doe@example.com'
password = 's3cr3tP_a55w0R)'
client = PolarisClient(domain, username, password, insecure=True)
print(client.get_storage_object_ids_ebs(tags = {"Class": "Management"}))
| 22.5 | 72 | 0.77037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.259259 |
92b86ef8042175ced0f5be6c5fb6656b4c301369 | 452 | py | Python | lizardanalysis.py | JojoReikun/ClimbingLizardDLCAnalysis | 6cc38090217a3ffd4860ef6d06ba7967d3c10b7c | [
"MIT"
] | 1 | 2021-03-09T19:12:44.000Z | 2021-03-09T19:12:44.000Z | lizardanalysis.py | JojoReikun/ClimbingLizardDLCAnalysis | 6cc38090217a3ffd4860ef6d06ba7967d3c10b7c | [
"MIT"
] | null | null | null | lizardanalysis.py | JojoReikun/ClimbingLizardDLCAnalysis | 6cc38090217a3ffd4860ef6d06ba7967d3c10b7c | [
"MIT"
] | null | null | null | """
LizardDLCAnalysis Toolbox
© Johanna T. Schultz
© Fabian Plum
Licensed under MIT License
----------------------------------------------------------
for testing and debugging in pycharm:
---> Tools
---> Python Console
---> (with ipython installed):
IN[1]: import lizardanalysis
---> run commands:
IN[2]: lizardanalysis.command(*args, **kwargs)
"""
from lizardanalysis import cli
def main():
cli.main()
if __name__ == '__main__':
main()
| 17.384615 | 58 | 0.60177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.797357 |
92b8e094c1356c59ec88aeb2c5c608bf6256f530 | 5,221 | py | Python | run/set_model.py | Debatrix/Qtrain | 60b6ff9be1247c4f1c7f6154c4fc58915af301a4 | [
"Apache-2.0"
] | null | null | null | run/set_model.py | Debatrix/Qtrain | 60b6ff9be1247c4f1c7f6154c4fc58915af301a4 | [
"Apache-2.0"
] | null | null | null | run/set_model.py | Debatrix/Qtrain | 60b6ff9be1247c4f1c7f6154c4fc58915af301a4 | [
"Apache-2.0"
] | null | null | null | import torch
from torch.utils.data import DataLoader
from src import qmodel, rmodel
from src.loss import IQALoss, PredictLoss
from src.framework import IQAnModel, RecognitionModel, TripRecognitionModel
from src.dataset import get_eye_dataset, EyePairDataset, FaceDataset
def set_r_model(config):
if 'r_model_name' in config:
model_name = config['r_model_name']
else:
model_name = config['model_name'].split('_')[-1]
# model
if model_name.lower() == 'maxout':
model = rmodel.Maxout(num_classes=config['num_classes'])
elif model_name.lower() == 'maxout_o':
model = rmodel.MaxoutO(num_classes=config['num_classes'])
elif model_name.lower() == 'nlightcnn':
model = rmodel.LightCNN(num_classes=config['num_classes'], norm=True)
elif model_name.lower() == 'lightcnn':
model = rmodel.LightCNN(num_classes=config['num_classes'], norm=False)
elif model_name.lower() == 'embedding':
model = rmodel.Embedding(num_classes=config['num_classes'])
elif model_name.lower() == 'vninet':
model = rmodel.VniNet(num_classes=config['num_classes'])
elif model_name.lower() == 'resnet18':
model = rmodel.Resnet18(num_classes=config['num_classes'],
norm=False,
pretrained=config['pretrained'])
elif model_name.lower() == 'resnet18n':
model = rmodel.Resnet18(num_classes=config['num_classes'],
norm=True,
pretrained=config['pretrained'])
elif model_name.lower() == 'vgg11bn':
model = rmodel.VGG11BN(num_classes=config['num_classes'],
pretrained=config['pretrained'])
else:
raise ValueError('Unsupported model: ' + model_name)
# criterion
criterion = PredictLoss(loss_type=config['rec_loss'])
if config['rec_loss'] == 'triplet':
model = TripRecognitionModel(model, criterion)
else:
model = RecognitionModel(model, criterion)
return model
def set_q_model(config):
if 'q_model_name' in config:
model_name = config['q_model_name']
else:
model_name = config['model_name'].split('_')[0]
# model
if model_name.lower() == 'resunet':
model = qmodel.ResUnet()
elif model_name.lower() == 'unet':
model = qmodel.Unet()
else:
raise ValueError('Unsupported model: ' + model_name)
# criterion
criterion = IQALoss(loss_type=config['qua_loss'], alpha=config['alpha'])
model = IQAnModel(model, criterion)
return model
def set_eye_dataloaders(config, mode='rtrain', pdfs=None):
if config['qua_loss'] == 'triplet':
train_data = EyePairDataset(dataset=config['dataset'],
mode=mode,
less_data=config['less_data'],
dfs=pdfs,
weight=config['weight'])
else:
train_data, num_classes = get_eye_dataset(
datasets=config['dataset'],
mode=mode,
less_data=config['less_data'],
dfs=pdfs,
weight=config['weight'])
train_data_loader = DataLoader(train_data,
config['r_batchsize'],
drop_last=True,
shuffle=True,
pin_memory=True,
num_workers=config['num_workers'])
val_data, num_classes = get_eye_dataset(datasets=config['dataset'],
mode='val',
less_data=config['less_data'])
val_data_loader = DataLoader(val_data,
config['r_batchsize'],
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=config['num_workers'])
# num_classes = train_data.num_classes
return (train_data_loader, val_data_loader), num_classes
def set_face_dataloaders(config, mode='qtrain', dfs=[None, None]):
train_data = FaceDataset(
dataset=config['dataset'],
mode=mode,
less_data=config['less_data'],
dfs=dfs[0],
)
train_data_loader = DataLoader(train_data,
config['q_batchsize'],
drop_last=True,
shuffle=True,
pin_memory=True,
num_workers=config['num_workers'])
val_data = FaceDataset(
dataset=config['dataset'],
mode='val',
less_data=config['less_data'],
dfs=dfs[1],
)
val_data_loader = DataLoader(val_data,
config['q_batchsize'],
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=config['num_workers'])
return (train_data_loader, val_data_loader) | 39.255639 | 78 | 0.542808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 769 | 0.14729 |
92b908d1f507caf2b00b09a9d6cc2b497334bead | 3,535 | py | Python | logic/auth.py | enisimsar/watchtower-news | 222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6 | [
"MIT"
] | 2 | 2019-02-21T18:29:09.000Z | 2021-01-27T14:52:46.000Z | logic/auth.py | enisimsar/watchtower-news | 222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6 | [
"MIT"
] | 3 | 2018-11-22T08:34:04.000Z | 2021-06-01T22:47:19.000Z | logic/auth.py | enisimsar/watchtower-news | 222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6 | [
"MIT"
] | 1 | 2019-06-13T10:45:46.000Z | 2019-06-13T10:45:46.000Z | import hashlib
import logging
import random
import string
import uuid
from mongoengine import DoesNotExist, NotUniqueError
from models.Invitation import Invitation
from models.User import User
__author__ = 'Enis Simsar'
# from http://www.pythoncentral.io/hashing-strings-with-python/
def hash_password(password):
# uuid is used to generate a random number
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
# from http://www.pythoncentral.io/hashing-strings-with-python/
def check_password(hashed_password, user_password):
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
def register_user(invitation_code, username, password):
logging.info("username: {0}".format(username))
password = hash_password(password)
try:
invitation = Invitation.objects.get(code=invitation_code, is_active=True)
except DoesNotExist:
return {'response': False, 'error': 'Invitation Code is invalid.'}
except Exception as e:
logging.error("exception: {0}".format(str(e)))
return {'error': str(e)}
try:
user = User(username=username, password=password)
user.save()
except NotUniqueError:
return {'response': False, 'error': 'Username is already registered!'}
except Exception as e:
logging.error("exception: {0}".format(str(e)))
return {'error': str(e)}
invitation.is_active = False
invitation.save()
return {'response': True, 'api_token': user.api_token}
def login_user(username, password):
logging.info("username: {0}".format(username))
try:
user = User.objects.get(username=username)
if not check_password(user.password, str(password)):
raise DoesNotExist
except DoesNotExist:
return {'response': False, 'error': 'Credentials are not correct!'}
except Exception as e:
logging.error("exception: {0}".format(str(e)))
return {'error': str(e)}
return {'response': True, 'api_token': user.api_token}
def get_user_profile(user_id):
logging.info("user_id: {0}".format(user_id))
try:
user = User.objects.get(id=user_id)
except DoesNotExist:
return {'response': False}
except Exception as e:
logging.error("exception: {0}".format(str(e)))
return {'error': str(e)}
if user:
logging.info(user.to_dict())
return user.to_dict()
return {'response': False}
def refresh_api_token(user_id):
logging.info("user_id: {0}".format(user_id))
try:
user = User.objects.get(id=user_id)
except DoesNotExist:
return {'response': False}
except Exception as e:
logging.error("exception: {0}".format(str(e)))
return {'error': str(e)}
if user:
new_token = ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(40)])
user.api_token = new_token
user.save()
return {'api_token': new_token, 'response': True}
return {'response': False}
def get_user_with_api_token(api_token):
try:
user = User.objects.get(api_token=api_token)
except DoesNotExist:
return {'response': False}
except Exception as e:
logging.error("exception: {0}".format(str(e)))
return {'error': str(e)}
if user:
return {
'response': True,
'id': user.id
}
return {'response': False}
| 28.97541 | 101 | 0.647808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 665 | 0.188119 |
92b9c455c74384b0f2054ab3fdb9826c43fde780 | 3,171 | py | Python | scripts/compare_spider.py | bayanistnahtc/seq2struct | 1b5597a4b91a405f19404a5c1df6c8671b9da119 | [
"MIT"
] | 25 | 2019-07-16T22:32:44.000Z | 2022-01-25T05:23:07.000Z | scripts/compare_spider.py | bayanistnahtc/seq2struct | 1b5597a4b91a405f19404a5c1df6c8671b9da119 | [
"MIT"
] | 19 | 2018-12-17T20:42:11.000Z | 2020-02-12T21:29:51.000Z | scripts/compare_spider.py | bayanistnahtc/seq2struct | 1b5597a4b91a405f19404a5c1df6c8671b9da119 | [
"MIT"
] | 22 | 2019-03-16T05:57:27.000Z | 2020-10-25T04:34:54.000Z | # Merge outputs of infer.py and other models for comparison.
# Outputs to a CSV file.
import argparse
import csv
import json
import os
from third_party.spider import evaluation
def main():
parser = argparse.ArgumentParser()
# Outputs of infer.py
parser.add_argument('--infer', nargs='*', default=())
# Files containing inferred SQL, one per line
# in order of the items in the dev set.
parser.add_argument('--sql', nargs='*', default=())
# The name to output for each of the inputs, in the CSV header.
parser.add_argument('--names', nargs='*', default=())
parser.add_argument('--out', required=True)
args = parser.parse_args()
assert len(args.names) == len(args.infer) + len(args.sql)
SPIDER_ROOT = 'data/spider-20190205'
foreign_key_maps = {
db['db_id']: evaluation.build_foreign_key_map(db)
for db in json.load(open(os.path.join(SPIDER_ROOT, 'tables.json')))
}
# 1. Create the evaluator
evaluator = evaluation.Evaluator(
os.path.join(SPIDER_ROOT, 'database'),
foreign_key_maps,
'match')
# 2. Read the ground truth SQL
dev = json.load(open(os.path.join(SPIDER_ROOT, 'dev.json')))
# 3. Perform evaluation
difficulty = {}
inferred_per_file = []
correct_per_file = []
# db
# gold
for infer_path in args.infer:
inferred = [None] * len(dev)
correct = [None] * len(dev)
inferred_per_file.append(inferred)
correct_per_file.append(correct)
for line in open(infer_path):
item = json.loads(line)
item_inferred = item['beams'][0]['inferred_code']
i = item['index']
eval_result = evaluator.evaluate_one(
db_name=dev[i]['db_id'],
gold=dev[i]['query'],
predicted=item_inferred)
difficulty[i] = eval_result['hardness']
inferred[i] = item_inferred
correct[i] = 1 if eval_result['exact'] else 0
for sql_path in args.sql:
inferred = [None] * len(dev)
correct = [None] * len(dev)
inferred_per_file.append(inferred)
correct_per_file.append(correct)
for i, line in enumerate(open(sql_path)):
eval_result = evaluator.evaluate_one(
db_name=dev[i]['db_id'],
gold=dev[i]['query'],
predicted=line.strip())
difficulty[i] = eval_result['hardness']
inferred[i] = line.strip()
correct[i] = 1 if eval_result['exact'] else 0
with open(args.out, 'w') as f:
writer = csv.writer(f)
writer.writerow(['DB', 'Difficulty', 'Question', 'Gold'] + ['{} correct'.format(c)
for c in args.names] + ['{} output'.format(c) for c in args.names])
for i, dev_item in enumerate(dev):
writer.writerow(
[dev_item['db_id'], difficulty[i], dev_item['question'],
dev_item['query']] +
[x[i] for x in correct_per_file] +
[x[i] for x in inferred_per_file])
if __name__ == '__main__':
main()
| 31.39604 | 90 | 0.578997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.199622 |
92b9ef953f90ad50edd4071667fa6e30e30d0ede | 854 | py | Python | src/server.py | atomicfruitcake/colonel | 1f34ae91991146b3efddbbaba10b99d5aa20640a | [
"MIT"
] | null | null | null | src/server.py | atomicfruitcake/colonel | 1f34ae91991146b3efddbbaba10b99d5aa20640a | [
"MIT"
] | null | null | null | src/server.py | atomicfruitcake/colonel | 1f34ae91991146b3efddbbaba10b99d5aa20640a | [
"MIT"
] | null | null | null | import kore
# Handler called for /httpclient
async def server(req):
# Create an httpclient.
client = kore.httpclient("https://kore.io")
# Do a simple GET request.
print("firing off request")
status, body = await client.get()
print("status: %d, body: '%s'" % (status, body))
# Reuse and perform another GET request, returning headers too this time.
status, headers, body = await client.get(return_headers=True)
print("status: %d, headers: '%s'" % (status, headers))
# What happens if we post something?
status, body = await client.post(body=b"hello world")
print("status: %d, body: '%s'" % (status, body))
# Add some custom headers to our requests.
status, body = await client.get(
headers={
"x-my-header": "async-http"
}
)
req.response(200, b'async done')
| 29.448276 | 77 | 0.626464 | 0 | 0 | 0 | 0 | 0 | 0 | 807 | 0.944965 | 396 | 0.4637 |
2b7ba4ab1cf3dd66d4fbb9e770b9f307a58c473f | 8,055 | py | Python | UCP/discussion/functions.py | BuildmLearn/University-Campus-Portal-UCP | 3cac50bd58f2ef4365522a76a8378d0a6f599832 | [
"BSD-3-Clause"
] | 13 | 2016-04-24T10:44:28.000Z | 2020-09-07T01:23:18.000Z | UCP/discussion/functions.py | BuildmLearn/University-Campus-Portal-UCP | 3cac50bd58f2ef4365522a76a8378d0a6f599832 | [
"BSD-3-Clause"
] | 16 | 2016-09-05T10:35:41.000Z | 2018-08-25T10:27:13.000Z | UCP/discussion/functions.py | BuildmLearn/University-Campus-Portal-UCP | 3cac50bd58f2ef4365522a76a8378d0a6f599832 | [
"BSD-3-Clause"
] | 24 | 2016-06-25T08:20:12.000Z | 2018-01-11T20:46:24.000Z | """
Functions file for discussion app
consists of common functions used by both api.py and views.py file
"""
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.core.mail import send_mail
from django.shortcuts import render
from django.template import Context
from django.utils import timezone
from django.views.generic import View
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from login.models import UserProfile
import login.serializers as Serializers
from discussion.models import DiscussionThread, Reply, Attachment, Tag
from discussion.serializers import DiscussionThreadSerializer,DiscussionThreadFullSerializer, ReplySerializer, ReplyFullSerializer, TagSerializer
from UCP.constants import result, message
from UCP.settings import EMAIL_HOST_USER, BASE_URL, PAGE_SIZE
from UCP.functions import send_parallel_mail
def get_all_tags():
"""
returns a list of all availible tags
"""
tags = Tag.objects.all()
serializer = TagSerializer(tags, many=True)
return serializer.data
def get_top_discussions(tags):
"""
returns top 5 recent discussions from tags followed by the user
"""
return DiscussionThread.objects.filter(tags__in = tags)[:5]
def add_discussion_thread(request):
"""
"""
response = {}
serializer = DiscussionThreadSerializer(data = request.POST)
if serializer.is_valid():
user_profile = UserProfile.objects.get(user = request.user)
discussion = serializer.save(
posted_by = user_profile,
posted_at = timezone.now()
)
tags = request.POST["tag"].split(',')
for tag_name in tags:
if Tag.objects.filter(name = tag_name).exists():
tag = Tag.objects.get(name = tag_name)
else:
tag = Tag(name=tag_name)
tag.save()
discussion.tags.add(tag)
response["result"] = result.RESULT_SUCCESS
response["error"] = []
response["message"] = "Discussion Thread added successfully"
else:
response["result"] = result.RESULT_FAILURE
response["error"] = serializer.errors
return response
def get_discussion_list(request):
"""
Return a list of discussion threads filtered by page number and tag
"""
response = {}
threads = DiscussionThread.objects.all()
if "tag" in request.GET:
#return a filtererd list
threads = DiscussionThread.objects.filter(tags__name = request.GET["tag"])
#code for pagination
count = len(threads)
page_count = count/PAGE_SIZE + 1
if("page" in request.GET):
page_no = int(request.GET["page"]) - 1
else:
page_no = 0
offset = page_no * PAGE_SIZE
threads = threads[offset:offset+PAGE_SIZE]
serializer = DiscussionThreadFullSerializer(threads, many=True)
response["page_count"] = page_count
response["data"] = serializer.data
return response
def subscribe(request, pk):
"""
subscribe request.user to a discussion thread with id pk
"""
response = {}
if DiscussionThread.objects.filter(id = pk).exists():
discussion = DiscussionThread.objects.get(id = pk)
user_profile = UserProfile.objects.get(user = request.user)
discussion.subscribed.add(user_profile)
discussion.save()
response["result"] = result.RESULT_SUCCESS
return response
else:
response["result"] = result.RESULT_FAILURE
response["error"] = "This discussion id does not exist"
def unsubscribe(request, pk):
"""
unsubscribe request.user to a discussion thread with id pk
"""
response = {}
if DiscussionThread.objects.filter(id = pk).exists():
discussion = DiscussionThread.objects.get(id = pk)
user_profile = UserProfile.objects.get(user = request.user)
discussion.subscribed.remove(user_profile)
discussion.save()
response["result"] = result.RESULT_SUCCESS
return response
else:
response["result"] = result.RESULT_FAILURE
response["error"] = "This discussion id does not exist"
def get_tags(query):
"""
returns a list of tags whose name match the query
"""
tags = Tag.objects.filter(name__icontains=query)
data = []
for tag in tags:
item = {}
item["id"] = tag.id
item["value"] = tag.name
item["label"] = tag.name
data.append(item)
return data
def get_replies(pk, request):
"""
returns all replies of a discussion pk based on the page number
"""
response = {}
if DiscussionThread.objects.filter(id = pk).exists():
discussion = DiscussionThread.objects.get(id = pk)
replies = Reply.objects.filter(thread = discussion)
#pagination
page_count = len(replies)/PAGE_SIZE + 1
if("page" in request.GET):
page_no = int(request.GET["page"]) - 1
else:
page_no = 0
offset = page_no * PAGE_SIZE
replies = replies[offset:offset+PAGE_SIZE]
reply_serializer = ReplyFullSerializer(replies, many=True)
discussion_serializer = DiscussionThreadFullSerializer(discussion)
response["page_count"] = page_count
response["data"] = {}
response["data"]["discussion"] = discussion_serializer.data
response["data"]["replies"] = reply_serializer.data
return response
else:
response["error"] = "This discussion id does not exist"
def get_discussion(pk):
"""
Return the discussion with id pk
"""
response = {}
discussion = DiscussionThread.objects.get(id = pk)
serializer = DiscussionThreadSerializer(discussion)
response["data"] = serializer.data
return response
def send_notification(discussion):
"""
send an email notification to people subscribed to a thread
"""
for user in discussion.subscribed.all():
discussion_url = "http://" + BASE_URL + "/discussions/" + str(discussion.id)
message = "Hey "+user.user.first_name + "!\n"
message += "A new reply was added to this discussion\n"
message += 'To view the discussions click here - '+discussion_url
send_parallel_mail(discussion.title + " - new reply",message,[user.user.email])
def add_reply(pk, request):
"""
add a reply to the discussion thread with id pk
"""
response = {}
discussion = DiscussionThread.objects.get(id = pk)
discussion_serializer = DiscussionThreadSerializer(discussion)
serializer = ReplySerializer(data=request.POST)
if serializer.is_valid():
user_profile = UserProfile.objects.get(user = request.user)
reply = serializer.save(
posted_by = user_profile,
posted_at = timezone.now(),
thread = discussion
)
for _file in request.FILES.getlist('attachments'):
print _file
attachment = Attachment(
reply = reply,
uploaded_file = _file
)
attachment.save()
discussion.no_of_replies += 1
#automatically subscribe the person adding the reply to the discussion
discussion.subscribed.add(user_profile)
send_notification(discussion)
discussion.save()
response["result"] = result.RESULT_SUCCESS
response["data"] = discussion_serializer.data
else:
response["result"] = result.RESULT_FAILURE
response["error"] = serializer.errors
response["data"] = discussion_serializer.data
return response
| 31.342412 | 145 | 0.644693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,485 | 0.184358 |
2b7bc8e032cf86f470d11cf32fc4958ecf2316e4 | 22,525 | py | Python | Tama/Plugins/tama_drawer/tamaframe.py | just-drive/Tama | 7102c2855d1625a8ecef3d7d7d17680a4a0820f4 | [
"BSD-3-Clause"
] | 2 | 2021-02-08T20:55:56.000Z | 2021-03-04T02:26:17.000Z | Tama/Plugins/tama_drawer/tamaframe.py | just-drive/Tama | 7102c2855d1625a8ecef3d7d7d17680a4a0820f4 | [
"BSD-3-Clause"
] | 4 | 2021-02-02T05:34:12.000Z | 2021-10-21T06:02:26.000Z | Tama/Plugins/tama_drawer/tamaframe.py | just-drive/Tama | 7102c2855d1625a8ecef3d7d7d17680a4a0820f4 | [
"BSD-3-Clause"
] | null | null | null | from win32ctypes.pywin32 import win32api
import win32.lib.win32con as win32con
import win32.win32gui as win32gui
from wx.lib.delayedresult import startWorker
import PIL
import wx
import wx.aui as aui
import wx.adv as adv
import wx.lib.newevent
import os
import threading
import datetime
import random
import mouse
from screeninfo import get_monitors
import Plugins.tama_drawer.tama_drawer_events
from Plugins.tama_drawer.tama_drawer_events import TamaMoodEvent
from Plugins.tama_drawer.tama_drawer_events import EVT_TAMA_MOOD
#
# The two tools below were acquired from the below wxPythonWiki tutorial:
# https://wiki.wxpython.org/WorkingWithImages
#
import Plugins.tama_drawer.ImgConv # wxImage <==> PilImage
import Plugins.tama_drawer.BitmapManip # mask wxBmap <==> PilImage <== file
from PIL import Image, ImageDraw, ImageChops, ImageSequence
def GetRamdomWxColorAndInverse() :
r, g, b = (random.randint(0, 127), random.randint(0, 127), random.randint(0, 127))
if random.randint(0, 1) : # Gaurantee a large contrast
r, g, b = (255-r, 255-g, 255-b)
#end if
R, G, B, = (255-r, 255-g, 255-b) # The inverse
return (wx.Colour(r, g, b), wx.Colour(R, G, B))
def CreateInnerMaskBmapFromOuterMask( srcBmap ) :
"""
Derive the inner mask wxBitmap from the Outer mask wxBitmap.
The srcBmap must be "well behaved" in that a continuous border
must present so that a floodfill to the perimeter area will not reach
into the inner area. The border color must be >=128. So,
the srcBmap consists of a transparent/BLACK perimeter, an white/opaque
frame border and a transparent/BLACK inner area.
When completed, the outer_area+border will be transparent/BLACK,
the parent's frame border will be transparent/BLACK and the inner area
will be opaque/WHITE.
1. outer perimeter (black) --> Floodfill to white/255
Now both perimeter and border are white).
2. Invert the image and return as a wxBitmap..
"""
# Start with an 'L' Pil copy of the RGB input wxBitmap.
dstPilImage = ImgConv.PilImageFromWxBitmap( srcBmap ).convert( 'L' )
# Make sure the image is quantized to binary.
dstPilImage = dstPilImage.point(lambda i: (i / 128) * 255)
size = dstPilImage.size
ImageDraw.floodfill( dstPilImage, (0, 0), (255) )
return ImgConv.WxBitmapFromPilImage( ImageChops.invert( dstPilImage ) )
#end def
#------------------------------------------------------------------------------
class TamaFrame(wx.Frame):
"""
Shaped window from disk image files and optional disk transparency mask files.
The user cannot resize the window because there are no resizing decorations !
The entire popup is just a control-less bitmap image.
However, all that is visible (opaque) can be repositioned by dragging.
"""
def __init__( self, parent, image_filename=None, mask_filename=None,
outer_or_inner_window=1, # default to a shaped frame window
posn=(0, 0), bgTransparency=100 ) :
style = ( wx.STAY_ON_TOP )
"""
The TamaFrame inherits from wx.Frame, and thus receives the ability to be used in a wxpython (wx) app
This is the window that is created for the application, Tama's actual form will be inside of this frame,
and the frame itself is only slightly visible (This can be tweaked).
"""
wx.Frame.__init__(self, parent, wx.ID_ANY, style=style, title = 'Tama', name = 'Tama')
self.bgTransparency = bgTransparency
self.SetBackgroundStyle(wx.BG_STYLE_ERASE)
self.image_filename = image_filename
self.image_wxBitmaps = []
self.parent = parent
self.current_bitmap = None
self.timer = wx.Timer(self, wx.ID_ANY)
self.timer.Start(60)
#Will be used to get locations of screens, so that the correct
#screen is drawn to when drawing with a ScreenDC
self.screens = []
self.screen_positions = []
for screen in get_monitors():
self.screens.append(screen)
#Bounding_boxes is a list of rect objects where bounding_boxes[0]
#Represents the client size of screen[0]
#This will be used to draw with a ScreenDC, which considers all
#monitors to be one screen.
self.bounding_boxes = []
for screen_idx in range(len(self.screens)):
self.bounding_boxes.append(wx.Display(screen_idx).GetClientArea())
self.SetTitle('Tama')
self.SetSize( (250, 250) )
self.current_screen = wx.Display().GetFromPoint((self.bounding_boxes[0].GetX(), self.bounding_boxes[0].GetY()))
self.SetPosition((self.bounding_boxes[0].GetX(), self.bounding_boxes[0].GetY()))
self.current_mood = None
self.last_mouse_pos = wx.Point(0,0)
self.tama_widget = TamaWidget(self)
self.previous_update = datetime.datetime.now()
self.screenContext = None
self.is_border_window = outer_or_inner_window
self.is_inner_window = not outer_or_inner_window
if wx.Platform == '__WXGTK__' : # GTK-only, use as an event handler.
self.Bind( wx.EVT_WINDOW_CREATE, self.DrawWindow )
#end if
#------------------------------
# This handler is always required.
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.OnTimer)
#self.Bind(wx.EVT_ERASE_BACKGROUND, self.DoNothing)
# Enable the user to quit the app by pressing <ESC>.
self.Bind( wx.EVT_KEY_UP, self.OnKeyDown ) # Comment this to disable.
# Enable window dragging.
self.Bind( wx.EVT_MOTION, self.OnMotion ) # Comment this to disable.
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
self.Bind(wx.EVT_CLOSE, parent.OnClose)
#mouse.on_right_click(self.ShowRightMenu)
self.Bind(wx.EVT_CONTEXT_MENU, self.ShowRightMenu)
#Linux and Windows will have different ways to create this kind of transparent frame.
if wx.Platform == '__WXMSW__':
hwnd = self.GetHandle()
extendedStyleSettings = win32gui.GetWindowLong(hwnd, win32con.GWL_EXSTYLE)
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE, extendedStyleSettings | win32con.WS_EX_LAYERED | win32con.WS_CHILD)
win32gui.SetLayeredWindowAttributes(hwnd, 0, 255, win32con.LWA_COLORKEY)
self.SetTransparent(190)
elif wx.Platform == '__WXGTK__':
pass
else:
pass
self.SetDoubleBuffered(True)
self.Layout()
self.Show()
#--------------------------------------------
def SetOtherWindow( self, otherWindow ) :
""" Allow the other ShapedWindow to be referenced in this instantiation. """
self.otherWindow = otherWindow
#end def
def SetMyPosition( self, posn ) :
""" This is for "OtherWindow" to call, never "self"."""
self.SetPosition( posn )
#end def
def OnPaint( self, event ) :
self.DrawWindow()
event.Skip()
return # Very important to let all higher level handlers be called.
#end def
def DoNothing(self, event):
pass
def SetImage(self, pil_image):
if pil_image:
width, height = pil_image.size
self.image_wxBitmaps.append(wx.BitmapFromBuffer(width, height, pil_image.convert('RGB').tobytes(), pil_image.convert('RGBA').getchannel('A').tobytes()))
return
def DrawWindow(self) :
"""Implement window drawing at any time."""
# screenContext will be drawn to after memoryContext is given the right combined bitmap
context = wx.PaintDC(self)
# Blit will copy the pixels from self.combined_image, which is a
# MemoryDC that contains the current Tama Image to be displayed.
# This image is newly generated within the Tama task system, in order to
# reduce image display time.
if len(self.image_wxBitmaps) and context.CanDrawBitmap():
context.DrawBitmap(self.image_wxBitmaps.pop(), 0, 0, False)
del context
#end def DrawWindow
#--------------------------------------------
def OnTimer(self, event):
if not self.tama_widget.is_grabbed() and self.tama_widget.is_moving():
self.move_direction()
self.SetImage(self.tama_widget.next())
self.Refresh()
return
def show_window_pinning(self, event):
self.parent.frames[2].Show()
return
def show_copyx(self, event):
self.parent.frames[3].Show()
return
def show_macro_recorder(self, event):
self.parent.frames[4].Show()
return
def show_settings(self, event):
self.parent.frames[5].Show()
return
def ShowRightMenu(self, *args) :
"""
Create and show a Context Menu
"""
# only do this part the first time so the events are only bound once
if not hasattr(self, "itemOneId"):
self.itemOneId = wx.NewId()
self.itemTwoId = wx.NewId()
self.itemThreeId = wx.NewId()
self.itemFourId = wx.NewId()
self.itemFiveId = wx.NewId()
self.Bind(wx.EVT_MENU, self.show_window_pinning, id=self.itemOneId)
self.Bind(wx.EVT_MENU, self.show_copyx, id=self.itemTwoId)
self.Bind(wx.EVT_MENU, self.show_macro_recorder, id=self.itemThreeId)
self.Bind(wx.EVT_MENU, self.show_settings, id=self.itemFourId)
self.Bind(wx.EVT_MENU, self.parent.OnClose, id=self.itemFiveId)
# build the menu
menu = wx.Menu()
itemOne = menu.Append(self.itemOneId, "Pin a Window...")
itemTwo = menu.Append(self.itemTwoId, "Copy X...")
itemThree = menu.Append(self.itemThreeId, "Record Mouse Events...")
itemFour = menu.Append(self.itemFourId, "Settings")
itemFive = menu.Append(self.itemFiveId, "Exit")
# show the popup menu
self.PopupMenu(menu)
menu.Destroy()
def OnKeyDown( self, event ) :
"""Quit the app if the user presses Q, q or Esc"""
keyCode = event.GetKeyCode()
quitCodes = [27, ord('Q'), ord('q')]
event.Skip() # Allow any following event processing.
if (keyCode in quitCodes) :
self.Close( force=True )
#end if
#end def
#--------------------------------------------
def OnMotion( self, event ) :
"""Implement window client area dragging since this window has no frame to grab."""
if not event.Dragging() : # Mouse is moving but no button is down.
self.dragPosn = None
return
#end if
#self.CaptureMouse()
if self.dragPosn == None : # Previous non-dragging mouse position
# Capture the first mouse coord after pressing any button
self.dragPosn = event.GetPosition()
else:
if not self.tama_widget.is_grabbed():
self.tama_widget.is_grabbed(True)
currPosn = event.GetPosition()
self.current_screen = wx.Display().GetFromWindow(self)
displacement = self.dragPosn - currPosn
newPosn = self.GetPosition() - displacement
self.SetPosition( newPosn )
self.Update()
def move_direction(self):
window_pos = self.GetScreenPosition()
if self.tama_widget.is_moving() and 'Move' in self.tama_widget.get_anim_name():
#box represents the client area of the current screen that Tama is located on.
#and the upper left corner does not have to be 0,0
box = self.bounding_boxes[self.current_screen]
if self.tama_widget.get_movement_direction() == 'Move Left':
if self.bounding_boxes[self.current_screen].Contains(
wx.Point(window_pos[0]-2, window_pos[1])):
self.Move(window_pos[0]-2, window_pos[1])
else:
self.tama_widget.is_moving(False)
elif self.tama_widget.get_movement_direction() == 'Move Right':
if self.bounding_boxes[self.current_screen].Contains(
wx.Point(window_pos[0] + self.GetSize().GetWidth(), window_pos[1])):
self.Move(window_pos[0]+2, window_pos[1])
else:
self.tama_widget.is_moving(False)
else:
pass
self.Update()
def OnRelease(self, event):
if self.tama_widget.is_grabbed():
self.tama_widget.is_grabbed(False)
def needs_update(self):
return self.tama_widget.needs_update()
def needs_mood(self):
if self.current_mood is None:
return True
return False
def generate(self, event):
if self.tama_widget.is_moving():
self.tama_widget.is_moving(False, None)
if 'Sleeping' in event.get_modifiers():
self.tama_widget.set_animation('Sleeping')
elif 'Eating' in event.get_modifiers():
self.tama_widget.set_animation('Eating')
elif 'Thinking_of_Food' in event.get_modifiers():
self.tama_widget.set_animation('Thinking_of_Food')
else:
self.tama_widget.set_animation('Idle')
self.Refresh()
return
def set_current_mood(self, current_mood):
self.tama_widget.set_current_mood(current_mood)
self.Show()
return
def get_bounding_boxes(self):
return self.bounding_boxes
def OnClose(self, e):
e.Skip()
class TamaWidget():
"""
Holds the processes that handle generating Tama from a stack of layers that are provided via (tama_stream)
It will yield images from tama_generate() as the animation is changed by a current_mood update.
"""
def __init__(self, parent):
self.assets_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "Assets")
self.parent = parent
self.available_folders = []
self.current_mood = None
self.animation_duration = 0
self.frame_idx = 0
#self.tama_generator = wx.Process()
self.current_animation_name = 'Idle'
self.idle_animation_path = os.path.join(os.path.join(self.assets_folder, 'Idle'), 'Idle_0.gif')
self.current_folder_animations = [self.idle_animation_path]
#GenericAnimationCtrl is used here in order to detect when an animation is done playing.
self.current_gif = Image.open(self.idle_animation_path)
self.current_animation = []
self.prev_animation = None
self.grabbed = False
self.moving = False
self.direction = None
def set_current_mood(self, current_mood):
self.current_mood = current_mood
return
def get_movement_direction(self):
return self.direction
def needs_update(self):
if (self.is_grabbed() and self.current_animation_name != 'Grabbed') \
or (not self.is_grabbed() and self.current_animation_name == 'Grabbed'):
return True
elif self.frame_idx - self.animation_duration >= -1:
return True
return False
def get_current_animation(self):
if self.current_animation:
return self.current_animation[self.frame_idx]
return None
def get_anim_name(self):
return self.current_animation_name
# Returns the current frame and increments the frame_idx by one.
def next(self):
if self.frame_idx >= self.animation_duration-1:
self.set_animation(self.current_animation_name)
im = self.get_current_animation()
if im:
self.frame_idx += 1
return im
else:
return None
def is_grabbed(self, ishe = None):
'''
This allows other classes to "grab" Tama as well.
Returns whether or not 'grabbed' animations will play if used without the bool
Sets grabbed animations to play and returns the screen position if used with the bool
'''
if ishe is None:
return self.grabbed
self.grabbed = ishe
if self.is_moving():
if ishe == True:
if 'Move' not in self.current_animation_name \
and 'Grabbed' not in self.current_animation_name:
self.prev_animation = self.current_animation_name
self.set_animation('Grabbed')
self.is_moving(False)
if ishe == False:
return
else:
if ishe == True:
if 'Move' not in self.current_animation_name \
and 'Grabbed' not in self.current_animation_name:
self.prev_animation = self.current_animation_name
self.set_animation('Grabbed')
if ishe == False:
self.set_animation(self.prev_animation)
def is_moving(self, ishe = None, dir = -1):
'''
This allows other classes to trigger Tama left-right movements
Returns whether or not moving animations are playing
Sets moving animations to play
'''
if ishe is None:
return self.moving
self.moving = ishe
if dir == 0:
self.direction = 'Move Left'
elif dir == 1:
self.direction = 'Move Right'
else:
self.direction = "Idle"
self.moving = False
if not self.is_grabbed():
self.moving = ishe
if ishe == True:
if 'Move' not in self.current_animation_name \
and 'Grabbed' not in self.current_animation_name:
self.prev_animation = self.current_animation_name
self.set_animation(self.direction)
elif ishe == False:
self.set_animation(self.prev_animation)
else:
self.moving = False
def pngs_exist(self, gif_idx, anim_name):
if os.path.exists(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')):
pngs = [file.name for file in os.scandir(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')) if file.is_dir() != True and '.png' in file.name.lower()]
for png in pngs:
if str(gif_idx) + "_" in png:
return True
return False
def set_animation(self, anim_name):
#This has to happen every time set_animation is called, or indices will go out of range when calling self.next()
self.frame_idx = 0
if self.is_grabbed():
#in the future, we can set a grabbed + anim_name animation here, and rotate the animation on user drag.
anim_name = 'Grabbed'
elif random.randrange(0, 2) == 0:
if not self.is_moving():
dir = random.randrange(0, 2)
self.is_moving(True, dir)
else:
self.is_moving(False)
return
gifs = [file.path for file in os.scandir(os.path.join(self.assets_folder, anim_name)) if file.is_dir() != True and '.gif' in file.name.lower()]
if os.path.exists(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')):
pngs = [file.path for file in os.scandir(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')) if file.is_dir() != True and '.png' in file.name.lower()]
else:
pngs = []
if len(gifs) < 1:
self.current_animation_name = 'Idle'
current_gif = Image.open(self.idle_animation_path)
if not os.path.exists(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')):
os.mkdir(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen'), 0)
for frame in ImageSequence.Iterator(current_gif):
combined_anim_name = "" + str(0) + "_" + anim_name + "_frame" + str(self.animation_duration) + ".png"
path_to_frame = os.path.join(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen'), combined_anim_name)
gif_info = frame.info
frame.save(path_to_frame, **gif_info)
self.current_animation.append(Image.open(path_to_frame))
self.animation_duration += 1
else:
self.animation_duration = 0
self.current_animation_name = anim_name
self.current_animation = []
gif_idx = random.randrange(0, len(gifs), 1)
#if there aren't any pngs yet for this animation, create them
if self.pngs_exist(gif_idx, anim_name):
pngs = [file.path for file in os.scandir(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')) if file.is_dir() != True and '.png' in file.name.lower() and str(gif_idx) + "_" + anim_name + "_frame" in file.name]
for png in pngs:
combined_anim_name = "" + str(gif_idx) + "_" + anim_name + "_frame" + str(self.animation_duration) + ".png"
path_to_frame = os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')
self.current_animation.append(Image.open(pngs[pngs.index(os.path.join(path_to_frame, combined_anim_name))]))
self.animation_duration += 1
else:
current_gif = Image.open(gifs[gif_idx])
if not os.path.exists(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen')):
os.mkdir(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen'), 0)
for frame in ImageSequence.Iterator(current_gif):
combined_anim_name = "" + str(gif_idx) + "_" + anim_name + "_frame" + str(self.animation_duration) + ".png"
path_to_frame = os.path.join(os.path.join(os.path.join(self.assets_folder, anim_name), 'Gen'), combined_anim_name)
gif_info = frame.info
frame.save(path_to_frame, **gif_info)
self.current_animation.append(Image.open(path_to_frame))
self.animation_duration += 1
return
| 42.024254 | 239 | 0.610699 | 19,952 | 0.885771 | 0 | 0 | 0 | 0 | 0 | 0 | 5,653 | 0.250966 |
2b7ebd9c78bff500ac5913c95ab0dfa1a49349f8 | 1,940 | py | Python | examples/08_compute_shader.py | dougbrion/ModernGL | 6de8938ccd0042c1389a32b697af5f9c9d279e41 | [
"MIT"
] | null | null | null | examples/08_compute_shader.py | dougbrion/ModernGL | 6de8938ccd0042c1389a32b697af5f9c9d279e41 | [
"MIT"
] | null | null | null | examples/08_compute_shader.py | dougbrion/ModernGL | 6de8938ccd0042c1389a32b697af5f9c9d279e41 | [
"MIT"
] | 1 | 2020-07-10T23:26:36.000Z | 2020-07-10T23:26:36.000Z |
'''
example of using compute shader.
requirements:
- numpy
- imageio (for output)
'''
import os
import moderngl
import numpy as np
import imageio # for output
def source(uri, consts):
''' read gl code '''
with open(uri, 'r') as fp:
content = fp.read()
# feed constant values
for key, value in consts.items():
content = content.replace(f"%%{key}%%", str(value))
return content
# W = X * Y // for each run, handles a row of pixels
# execute compute shader for H times to complete
W = 512
H = 256
X = W
Y = 1
Z = 1
consts = {
"W": W,
"H": H,
"X": X + 1,
"Y": Y,
"Z": Z,
}
FRAMES = 50
OUTPUT_DIRPATH = "./output"
if not os.path.isdir(OUTPUT_DIRPATH):
os.makedirs(OUTPUT_DIRPATH)
context = moderngl.create_standalone_context(require=430)
compute_shader = context.compute_shader(source('./gl/median_5x5.gl', consts))
# init buffers
buffer_a_data = np.random.uniform(0.0, 1.0, (H, W, 4)).astype('f4')
buffer_a = context.buffer(buffer_a_data)
buffer_b_data = np.zeros((H, W, 4)).astype('f4')
buffer_b = context.buffer(buffer_b_data)
imgs = []
last_buffer = buffer_b
for i in range(FRAMES):
toggle = True if i % 2 else False
buffer_a.bind_to_storage_buffer(1 if toggle else 0)
buffer_b.bind_to_storage_buffer(0 if toggle else 1)
# toggle 2 buffers as input and output
last_buffer = buffer_a if toggle else buffer_b
# local invocation id x -> pixel x
# work groupid x -> pixel y
# eg) buffer[x, y] = gl_LocalInvocationID.x + gl_WorkGroupID.x * W
compute_shader.run(group_x=H, group_y=1)
# print out
output = np.frombuffer(last_buffer.read(), dtype=np.float32)
output = output.reshape((H, W, 4))
output = np.multiply(output, 255).astype(np.uint8)
imgs.append(output)
# if you don't want to use imageio, remove this line
imageio.mimwrite(f"./{OUTPUT_DIRPATH}/debug.gif", imgs, "GIF", duration=0.15)
| 24.25 | 77 | 0.659794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.311856 |
2b809f699739184b5a2cfa529c9aff5901c0adf0 | 2,816 | py | Python | src/third_party/pcap2har/main.py | ashumeow/pcaphar | 68d20b74bb16fa1b4183ebe04753ffcecf9daedf | [
"Apache-2.0"
] | null | null | null | src/third_party/pcap2har/main.py | ashumeow/pcaphar | 68d20b74bb16fa1b4183ebe04753ffcecf9daedf | [
"Apache-2.0"
] | null | null | null | src/third_party/pcap2har/main.py | ashumeow/pcaphar | 68d20b74bb16fa1b4183ebe04753ffcecf9daedf | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command line tool to convert PCAP file to HAR format.
This is command line to for pcaphar app engine. A user can convert a PCAP file
to HAR format file.
"""
__author__ = 'lsong@google.com (Libo Song)'
import os
import sys
# add third_party directory to sys.path for global import
path = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(os.path.abspath(path))
dpkt_path = os.path.join(path, "dpkt")
sys.path.append(os.path.abspath(dpkt_path))
import heapq
import logging
import StringIO
import time
import convert
def PrintUsage():
print __file__, "[options] <pcap file> [<har file>]"
print "options: -l[diwe] log level"
print " --port filter out port"
def main(argv=None):
logging_level = logging.WARNING
filter_port = -1
if argv is None:
argv = sys.argv
filenames = []
idx = 1
while idx < len(argv):
if argv[idx] == '-h' or argv[idx] == '--help':
PrintUsage()
return 0
elif argv[idx] == '--port':
idx += 1
if idx >= len(argv):
PrintUsage()
return 1
filter_port = int(argv[idx])
elif argv[idx] == '-ld':
logging_level = logging.DEBUG
elif argv[idx] == '-li':
logging_level = logging.INFO
elif argv[idx] == '-lw':
logging_level = logging.WARN
elif argv[idx] == '-le':
logging_level = logging.ERROR
elif argv[idx][0:1] == '-':
print "Unknow option:", argv[idx]
PrintUsage()
return 1
else:
filenames.append(argv[idx])
idx += 1
# set the logging level
logging.basicConfig(level=logging_level)
if len(filenames) == 1:
pcap_file = filenames[0]
har_file = pcap_file + ".har"
elif len(filenames) == 2:
pcap_file = filenames[0]
har_file = filenames[1]
else:
PrintUsage()
return 1
# If excpetion raises, do not catch it to terminate the program.
inf = open(pcap_file, 'r')
pcap_in = inf.read()
inf.close
har_out = StringIO.StringIO()
options = convert.Options()
#options.remove_cookie = False
convert.convert(pcap_in, har_out, options)
har_out_str = har_out.getvalue()
outf = open(har_file, 'w')
outf.write(har_out_str)
outf.close()
if __name__ == "__main__":
sys.exit(main())
| 25.834862 | 78 | 0.667259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,137 | 0.403764 |
2b832fc4edfc9eb726a140832521968248c37fd7 | 1,223 | py | Python | chapter_2/name_cases.py | superbe/PythonCrashCourse | c8781f68b0e9e68e54d48cce5224ecb6a5625ae2 | [
"MIT"
] | null | null | null | chapter_2/name_cases.py | superbe/PythonCrashCourse | c8781f68b0e9e68e54d48cce5224ecb6a5625ae2 | [
"MIT"
] | null | null | null | chapter_2/name_cases.py | superbe/PythonCrashCourse | c8781f68b0e9e68e54d48cce5224ecb6a5625ae2 | [
"MIT"
] | null | null | null | name = 'eric Pearson'
# Упражнение 3.
message = f'Hello {name}, would you like to learn some Python today?'
print(message)
# Упражнение 4.
message = f'Hello {name.lower()}, would you like to learn some Python today?'
print(message)
message = f'Hello {name.upper()}, would you like to learn some Python today?'
print(message)
message = f'Hello {name.title()}, would you like to learn some Python today?'
print(message)
# Упражнение 5.
message = f'Albert Einstein once said, "A person who never made a mistake never tried anything new."'
print(message)
# Упражнение 6.
famous_person = 'Albert Einstein'
message = f'{famous_person.title()} once said, "A person who never made a mistake never tried anything new."'
print(message)
# Упражнение 7.
famous_person = ' \t\nAlbert Einstein \t\n'
print(f'|{famous_person} once said, "A person who never made a mistake never tried anything new."|')
print(f'|{famous_person.lstrip()} once said, "A person who never made a mistake never tried anything new."|')
print(f'|{famous_person.rstrip()} once said, "A person who never made a mistake never tried anything new."|')
print(f'|{famous_person.strip()} once said, "A person who never made a mistake never tried anything new."|')
| 38.21875 | 109 | 0.732625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,031 | 0.809898 |
2b841955f5296b5807aadaca8de6d7eb61f48da2 | 1,891 | py | Python | ccdproc/tests/test_ccdproc_logging.py | cdeil/ccdproc | 1bcfb0142669243325bfce05b4f2fc45ea013f02 | [
"BSD-3-Clause"
] | null | null | null | ccdproc/tests/test_ccdproc_logging.py | cdeil/ccdproc | 1bcfb0142669243325bfce05b4f2fc45ea013f02 | [
"BSD-3-Clause"
] | null | null | null | ccdproc/tests/test_ccdproc_logging.py | cdeil/ccdproc | 1bcfb0142669243325bfce05b4f2fc45ea013f02 | [
"BSD-3-Clause"
] | null | null | null | from astropy.extern import six
from astropy.tests.helper import pytest
import astropy.units as u
from ..ccdproc import create_variance, Keyword
@pytest.mark.parametrize('key', [
'short',
'toolongforfits'])
def test_log_string(ccd_data, key):
add_key = key
new = create_variance(ccd_data, readnoise=3 * ccd_data.unit,
add_keyword=add_key)
# Keys should be added to new but not to ccd_data and should have
# no value.
assert add_key in new.meta
assert add_key not in ccd_data.meta
# Long keyword names should be accessible with just the keyword name
# without HIERARCH -- is it?
assert new.meta[add_key] == ''
def test_log_keyword(ccd_data):
key = 'filter'
key_val = 'V'
kwd = Keyword(key, value=key_val)
new = create_variance(ccd_data, readnoise=3 * ccd_data.unit,
add_keyword=kwd)
# Was the Keyword added with the correct value?
assert kwd.name in new.meta
assert kwd.name not in ccd_data.meta
assert new.meta[kwd.name] == key_val
def test_log_dict(ccd_data):
keys_to_add = {
'process': 'Added variance',
'n_images_input': 1,
'current_temp': 42.9
}
new = create_variance(ccd_data, readnoise=3 * ccd_data.unit,
add_keyword=keys_to_add)
for k, v in six.iteritems(keys_to_add):
# Were all dictionary items added?
assert k in new.meta
assert k not in ccd_data.meta
assert new.meta[k] == v
def test_log_bad_type_fails(ccd_data):
add_key = 15 # anything not string and not dict-like will work here
# Do we fail with non-string, non-Keyword, non-dict-like value?
with pytest.raises(AttributeError):
create_variance(ccd_data, readnoise=3 * ccd_data.unit,
add_keyword=add_key)
| 33.175439 | 73 | 0.64146 | 0 | 0 | 0 | 0 | 575 | 0.304072 | 0 | 0 | 466 | 0.24643 |
2b85a8cfcce972531e03d79d1f89a1b7627c598a | 3,102 | py | Python | harvester/sharekit/tests/factories.py | surfedushare/search-portal | 708a0d05eee13c696ca9abd7e84ab620d3900fbe | [
"MIT"
] | 2 | 2021-08-19T09:40:59.000Z | 2021-12-14T11:08:20.000Z | harvester/sharekit/tests/factories.py | surfedushare/search-portal | 708a0d05eee13c696ca9abd7e84ab620d3900fbe | [
"MIT"
] | 159 | 2020-05-14T14:17:34.000Z | 2022-03-23T10:28:13.000Z | harvester/sharekit/tests/factories.py | nppo/search-portal | aedf21e334f178c049f9d6cf37cafd6efc07bc0d | [
"MIT"
] | 1 | 2021-11-11T13:37:22.000Z | 2021-11-11T13:37:22.000Z | import os
import factory
from datetime import datetime
from urllib.parse import quote
from django.conf import settings
from django.utils.timezone import make_aware
from sharekit.models import SharekitMetadataHarvest
class SharekitMetadataHarvestFactory(factory.django.DjangoModelFactory):
class Meta:
model = SharekitMetadataHarvest
strategy = factory.BUILD_STRATEGY
class Params:
set = "edusources"
is_initial = True
is_empty = False
number = 0
is_restricted = False
since = factory.Maybe(
"is_initial",
make_aware(datetime(year=1970, month=1, day=1)),
make_aware(datetime(year=2020, month=2, day=10, hour=13, minute=8, second=39, microsecond=315000))
)
set_specification = "edusources"
status = 200
head = {
"content-type": "application/json"
}
@factory.lazy_attribute
def set_specification(self):
return "edusourcesprivate" if self.is_restricted else "edusources"
@factory.lazy_attribute
def uri(self):
base = f"api.acc.surfsharekit.nl/api/jsonapi/channel/v1/{self.set_specification}/repoItems?"
modified_parameter = quote(f"filter[modified][GE]={self.since:%Y-%m-%dT%H:%M:%SZ}", safe="=")
page_size_parameter = quote("page[size]=25", safe="=")
page_number_parameter = quote(f"page[number]={self.number+1}", safe="=")
if self.is_initial and self.number > 0:
params = [modified_parameter, page_number_parameter, page_size_parameter]
else:
params = [modified_parameter, page_size_parameter]
return base + "&".join(params)
@factory.lazy_attribute
def request(self):
return {
"args": [self.set_specification, f"{self.since:%Y-%m-%dT%H:%M:%SZ}"],
"kwargs": {},
"method": "get",
"url": "https://" + self.uri,
"headers": {},
"data": {}
}
@factory.lazy_attribute
def body(self):
if self.is_empty:
response_type = "empty"
elif self.is_initial:
response_type = "initial"
else:
response_type = "delta"
response_file = f"sharekit-api.{response_type}.{self.number}.json"
response_file_path = os.path.join(settings.BASE_DIR, "sharekit", "fixtures", response_file)
with open(response_file_path, "r") as response:
response_string = response.read()
# We modify the reference to the own link to indicate the link has restricted materials if necessary
if self.is_restricted:
return response_string.replace("/edusources/", "/edusourcesprivate/")
return response_string
@classmethod
def create_common_sharekit_responses(cls, include_delta=False, is_restricted=False):
cls.create(is_initial=True, number=0, is_restricted=is_restricted)
cls.create(is_initial=True, number=1, is_restricted=is_restricted)
if include_delta:
cls.create(is_initial=False, number=0, is_restricted=is_restricted)
| 36.069767 | 112 | 0.645068 | 2,881 | 0.928756 | 0 | 0 | 2,198 | 0.708575 | 0 | 0 | 619 | 0.199549 |
2b85b84ca70ad679c297d70925380a546779f12d | 588 | py | Python | kunai/torch_utils/seed.py | mjun0812/kunai | 6a457c7242ed98dadb29f7002a3c0385ae6c1701 | [
"MIT"
] | null | null | null | kunai/torch_utils/seed.py | mjun0812/kunai | 6a457c7242ed98dadb29f7002a3c0385ae6c1701 | [
"MIT"
] | null | null | null | kunai/torch_utils/seed.py | mjun0812/kunai | 6a457c7242ed98dadb29f7002a3c0385ae6c1701 | [
"MIT"
] | null | null | null | import random
import numpy as np
import torch
def worker_init_fn(worker_id):
"""Reset numpy random seed in PyTorch Dataloader
Args:
worker_id (int): random seed value
"""
np.random.seed(np.random.get_state()[1][0] + worker_id)
def fix_seed(seed):
"""fix seed on random, numpy, torch module
Args:
seed (int): seed parameter
Returns:
int: seed parameter
"""
# random
random.seed(seed)
# Numpy
np.random.seed(seed)
# Pytorch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return seed
| 17.294118 | 59 | 0.634354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.462585 |
2b8606d666e17ddbca292ed6f0207c8119aff103 | 5,393 | py | Python | paper/plots.py | vishakad/animate | 0da18be596bf7e462d47bef86aff4c71fc9cffbb | [
"MIT"
] | 5 | 2019-01-23T18:03:51.000Z | 2019-11-22T23:06:04.000Z | paper/plots.py | vishakad/animate | 0da18be596bf7e462d47bef86aff4c71fc9cffbb | [
"MIT"
] | 5 | 2019-03-27T21:47:04.000Z | 2019-08-07T00:22:34.000Z | paper/plots.py | vishakad/animate | 0da18be596bf7e462d47bef86aff4c71fc9cffbb | [
"MIT"
] | 4 | 2019-02-14T23:18:24.000Z | 2019-05-08T04:08:32.000Z | import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import colors as mplcolors
import CBcm
import numpy as np
import util
rcParams['svg.fonttype'] = 'none'
def makeColours( vals, cmapStr, returnDict=False ):
colours = np.zeros( (len(vals),3) )
origVals = np.copy( vals )
if vals.max() > 1:
vals = vals/vals.max()
norm = mplcolors.Normalize( vmin=vals.min(), vmax=vals.max()*0.95 )
else:
norm = mplcolors.Normalize( vmin=vals.min(), vmax=vals.max()*0.95 )
colours = [cm.ScalarMappable( norm=norm, cmap=cmapStr).to_rgba( val ) for val in vals]
if returnDict:
uniqueOrigVals = np.unique( origVals )
uniqueVals = np.unique( vals )
uniqueColours = [cm.ScalarMappable( norm=norm, cmap=cmapStr).to_rgba( val ) for val in uniqueVals]
valColourDict = {}
for (val,colour) in zip(uniqueOrigVals,uniqueColours):
valColourDict[colour] = val
return [colours, valColourDict]
else:
return colours
def manyErrorBarsPlot( x, yMeans, yStds, lineLevels, cmap=CBcm.CB2cm['YeBl'], xIsMean=False, cbarText='', xAxisText='', yAxisText='', titleText='', ylim=[], fontsize=10, plotCbar=True, ls='solid' ):
if plotCbar:
plt.figure()
gs = mpl.gridspec.GridSpec(1, 2, width_ratios=[8, 1])
axes = [plt.subplot( gsAx ) for gsAx in gs]
else:
axes = [plt.gca()]
if isinstance( lineLevels, list):
lineLevels = np.array( lineLevels )
colours = makeColours( lineLevels, cmap )
if xIsMean:
for idx in range(yMeans.shape[1]):
axes[0].errorbar( x, yMeans[:,idx], yerr=yStds[:,idx], capthick=2, color=colours[idx], ms=2, marker='.', ls=ls )
else:
for idx in range(yMeans.shape[0]):
axes[0].errorbar( x, yMeans[idx,:], yerr=yStds[idx,:], capthick=2, color=colours[idx], ms=2, marker='.', ls=ls )
axes[0].set_xlabel( xAxisText, fontsize=fontsize )
axes[0].set_ylabel( yAxisText, fontsize=fontsize )
axes[0].set_title( titleText )
if len(ylim) > 0:
axes[0].set_ylim(ylim)
if plotCbar:
#norm = mpl.colors.Normalize( vmin=1, vmax=4 )
norm = mpl.colors.Normalize( vmin=min(lineLevels), vmax=max(lineLevels) )
#norm = mpl.colors.LogNorm( vmin=), vmax=max(lineLevels) )
cbl = mpl.colorbar.ColorbarBase( axes[1], norm=norm, cmap=cmap,
orientation='vertical', values=lineLevels )
tickBoundaries = np.arange( len(lineLevels) )*1.0/len(lineLevels)
tickBoundaries = np.append( tickBoundaries, 1 )
tickLocs = []
for idx in range(len(tickBoundaries)-1):
tickLocs.append( 0.5 * (tickBoundaries[idx] + tickBoundaries[idx+1]))
cbl.ax.yaxis.set_ticks( tickLocs )
cbl.ax.yaxis.set_ticklabels( lineLevels )
cbl.ax.yaxis.set_label_text( cbarText )
plt.tight_layout()
if plotCbar:
return [axes[0],cbl]
else:
return [axes[0],colours]
def getTpr( genome, fprTarget ):
if fprTarget > 1-1e-6:
return [1,1,-np.inf]
readCounts = genome['ratio'].values
lb = min( readCounts )
ub = max( readCounts )
numFP = genome.query('binding == "false-positive"').shape[0]
numTP = genome.shape[0] - numFP
#print("Starting search. Target : {}".format( fprTarget ))
fpr = 1.0
nItr = 100
itr = 0
while abs( fpr - fprTarget ) > 0.01 * fprTarget and itr < nItr:
threshold = (lb + ub)/2.0
genome.loc[:,'inference'] = 'no-binding'
genome.loc[readCounts > threshold,'inference'] = 'binding'
if threshold < 1e-8:
fpr = fprTarget
print( [fpr,fprTarget,threshold])
break
fpr = genome.query( '(inference == "binding") & (binding == "false-positive")').shape[0]*1.0/numFP
if fpr > fprTarget:
lb = threshold
else:
ub = threshold
itr += 1
tpr = genome.query( '(inference == "binding") & (binding != "false-positive")').shape[0]*1.0/numTP
return [fpr,tpr,threshold]
def makeROC( genome, color, ax=None, ls='solid', fprTargets=[], label="" ):
if len( fprTargets ) == 0:
fprTargets = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
fprVals = []
tprVals = []
readCountThresholds = []
for fprTarget in fprTargets:
if abs( fprTarget ) < 1e-6:
fpr = 0
tpr = 0
readCountThreshold = max( genome['ratio'].values )
else:
fpr, tpr, readCountThreshold = getTpr( genome, fprTarget )
fprVals.append( fpr )
tprVals.append( tpr )
readCountThresholds.append( readCountThreshold )
itr = 0
if ax is None:
fig = plt.figure()
ax = plt.gca()
auROC = util.findAuROC( genome )
if label == "":
ax.plot( fprVals, tprVals, color=color, lw=0.75, ls=ls, label='{:.2f}'.format(auROC), marker='o' , markersize=2 )
else:
ax.plot( fprVals, tprVals, color=color, lw=0.75, ls=ls, label=label + ' ({:.2f})'.format(auROC), marker='o', markersize=2 )
ax.set_xlim(0,1)
ax.grid('on',ls='dashed')
ax.legend(fontsize=8)
return [ax,auROC,readCountThresholds]
| 34.350318 | 198 | 0.595031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.087706 |
2b8700d29d570214a09d942f5f9f7ee6ff8ff4f6 | 4,195 | py | Python | tests/world/test_world.py | Finistere/antidote | 97751e0e6a1b8bd638a1c33212345c7a84ad97b8 | [
"MIT"
] | 52 | 2017-12-17T19:52:37.000Z | 2022-03-29T10:24:04.000Z | tests/world/test_world.py | Finistere/antidote | 97751e0e6a1b8bd638a1c33212345c7a84ad97b8 | [
"MIT"
] | 32 | 2018-11-02T08:49:16.000Z | 2022-03-25T22:23:30.000Z | tests/world/test_world.py | Finistere/antidote | 97751e0e6a1b8bd638a1c33212345c7a84ad97b8 | [
"MIT"
] | 5 | 2019-05-17T18:26:14.000Z | 2021-12-25T23:13:31.000Z | from typing import Callable
import pytest
from antidote import From, FromArg, Get, Service, factory, world
from antidote._compatibility.typing import Annotated
from antidote._internal.world import LazyDependency
from antidote._providers import FactoryProvider, ServiceProvider
from antidote.exceptions import DependencyNotFoundError, FrozenWorldError
from .utils import DummyIntProvider
@pytest.fixture(autouse=True)
def empty_world():
with world.test.empty():
yield
class A:
pass
def test_get():
world.provider(ServiceProvider)
a = A()
world.test.singleton(A, a)
assert world.get(A) is a
class B(Service):
pass
assert world.get[A](A) is a
assert world.get[B]() is world.get(B)
b = B()
assert world.get(B, default=b) is world.get(B)
assert world.get[B](default=b) is world.get(B)
with pytest.raises(DependencyNotFoundError):
world.get(Service)
with pytest.raises(DependencyNotFoundError):
world.get[Service]()
assert world.get[Service](default=b) is b
def test_get_type_safety():
x = object()
world.test.singleton(A, x)
assert world.get(A) is x # without type hints, it should not fail
with pytest.raises(TypeError):
world.get[A]()
class B:
pass
assert world.get(B, default=x) is x
with pytest.raises(TypeError, match=".*default.*"):
world.get[B](default=x)
def test_get_factory():
world.provider(FactoryProvider)
@factory
def build_a() -> A:
return A()
assert world.get[A] @ build_a is world.get(A @ build_a)
@pytest.mark.parametrize('getter', [
pytest.param(world.get, id='get'),
pytest.param(world.get[A], id='get[A]'),
pytest.param(lambda x: world.lazy(x).get(), id='lazy'),
pytest.param(lambda x: world.lazy[A](x).get(), id='lazy[A]')
])
def test_annotation_support(getter: Callable[[object], object]):
class Maker:
def __rmatmul__(self, other):
return 'maker'
world.test.singleton({
A: A(),
'a': A(),
'maker': A()
})
assert getter(Annotated[A, object()]) is world.get(A)
assert getter(Annotated[A, Get('a')]) is world.get('a') # noqa: F821
assert getter(Annotated[A, From(Maker())]) is world.get('maker')
with pytest.raises(TypeError):
getter(Annotated[A, Get('a'), Get('a')]) # noqa: F821
with pytest.raises(TypeError):
getter(Annotated[A, FromArg(lambda a: a)]) # noqa: F821
def test_lazy():
world.test.singleton({
'x': object(),
A: A()
})
lazy = world.lazy('x')
assert isinstance(lazy, LazyDependency)
assert lazy.unwrapped == 'x'
assert lazy.get() == world.get('x')
lazy = world.lazy[A](A)
assert isinstance(lazy, LazyDependency)
assert lazy.unwrapped == A
assert lazy.get() == world.get(A)
assert world.lazy[A]().get() is world.get(A)
def test_lazy_type_safety():
x = object()
world.test.singleton(A, x)
assert world.lazy(A).get() is x
with pytest.raises(TypeError):
world.lazy[A]().get()
def test_lazy_factory():
world.provider(FactoryProvider)
@factory
def build_a() -> A:
return A()
assert (world.lazy[A] @ build_a).get() is world.get(A @ build_a)
def test_freeze():
world.provider(ServiceProvider)
provider = world.get[ServiceProvider]()
class Service:
pass
world.freeze()
with pytest.raises(FrozenWorldError):
world.test.singleton("test", "x")
with pytest.raises(FrozenWorldError):
provider.register(Service, scope=None)
def test_add_provider():
world.provider(DummyIntProvider)
assert world.get(10) == 20
def test_no_duplicate_provider():
world.provider(DummyIntProvider)
assert world.get(10) == 20
with pytest.raises(ValueError, match=".*already exists.*"):
world.provider(DummyIntProvider)
@pytest.mark.parametrize('p, expectation', [
(object(), pytest.raises(TypeError, match=".*RawProvider.*")),
(A, pytest.raises(TypeError, match=".*RawProvider.*"))
])
def test_invalid_add_provider(p, expectation):
with expectation:
world.provider(p)
| 23.971429 | 73 | 0.647676 | 172 | 0.041001 | 61 | 0.014541 | 1,347 | 0.321097 | 0 | 0 | 252 | 0.060072 |
2b871dae8f560cf13fae7b1febeb4db0dbbc9651 | 2,355 | py | Python | MessageBoard.py | dntoll/pycom-lora-mesh-with-ap | 445ea5871b96f46e887514cdbe79940ca76d0023 | [
"MIT"
] | 1 | 2020-05-28T16:50:13.000Z | 2020-05-28T16:50:13.000Z | MessageBoard.py | dntoll/pycom-lora-mesh-with-ap | 445ea5871b96f46e887514cdbe79940ca76d0023 | [
"MIT"
] | null | null | null | MessageBoard.py | dntoll/pycom-lora-mesh-with-ap | 445ea5871b96f46e887514cdbe79940ca76d0023 | [
"MIT"
] | 1 | 2021-03-07T06:30:45.000Z | 2021-03-07T06:30:45.000Z | #!/usr/bin/env python
#
# Copyright (c) 2019, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
__version__ = '1'
from Message import Message
class MessageBoard:
def __init__(self, meshState):
self.received = []
self.toBeSent = []
self.sent = []
self.meshState = meshState
def sendMessage(self, message):
self.toBeSent.append(message)
def receiveMessage(self, message):
#we only care about our own messages
#print(message.target)
#print(self.meshState.me.rloc16)
if self.meshState.isDirectedToMe(message.target):
if message.isACK:
self._receivedAccMessageForMe(message);
else:
self.sendAcc(message)
self.received.append(message)
elif message.isBroadCast():
if message.isDecoration():
self.meshState.updateOthersDecorations(message)
else:
self.received.append(message)
def _receivedAccMessageForMe(self, message):
newSendList = []
for sent in self.toBeSent:
if message.isAccForMessage(sent) == False:
newSendList.append(sent)
else:
self.sent.append(sent)
self.toBeSent = newSendList #remove from send list
#remove acc
def sendCompleted(self):
newSendList = []
for sent in self.toBeSent:
if sent.isACK == False and sent.isBroadCast() == False:
newSendList.append(sent)
else:
if sent.isDecoration():
pass
else:
self.sent.append(sent)
self.toBeSent = newSendList
def getReceivedMessages(self):
return self.received
def getMessagesToBeSent(self):
return self.toBeSent
def getMessagesSent(self):
return self.sent
def sendAcc(self, message):
#note sender and target swapped places here...
accMessage = Message(message.content, message.sender, message.target, 0, True, False, False);
self.sendMessage(accMessage)
| 30.584416 | 101 | 0.609766 | 1,983 | 0.842038 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.205945 |
2b8826183338e351fe4dbe3ffb007fda739ce1c8 | 467 | py | Python | datastrucutre/array/find_given_sum_in_array.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | 1 | 2021-05-02T05:43:34.000Z | 2021-05-02T05:43:34.000Z | datastrucutre/array/find_given_sum_in_array.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | null | null | null | datastrucutre/array/find_given_sum_in_array.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | null | null | null | def find_sum(arr, s):
curr_sum = arr[0]
start = 0
n = len(arr) - 1
i = 1
while i <= n:
while curr_sum > s and start < i:
curr_sum = curr_sum - arr[start]
start += 1
if curr_sum == s:
return "Found between {} and {}".format(start, i - 1)
curr_sum = curr_sum + arr[i]
i += 1
return "Sum not found"
arr = [15, 2, 4, 8, 9, 5, 10, 23]
print(find_sum(arr, 6))
| 18.68 | 65 | 0.466809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.085653 |
2b88c5258ed3a32b0f868be74797639d674d4da1 | 256 | py | Python | Part_3_advanced/m08_abstract_protocol/abstract_class/homework_1_start/new_movies/rental_directory.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m08_abstract_protocol/abstract_class/homework_1_start/new_movies/rental_directory.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m08_abstract_protocol/abstract_class/homework_1_start/new_movies/rental_directory.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from new_movies.random_data_utility import random_generator
available_movies = random_generator.generate_random_movies(movies_number=15)
available_games = random_generator.generate_random_games()
def add_movie(movie):
available_movies.append(movie)
| 28.444444 | 76 | 0.859375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2b8975fc52de46e8e473c2014db381061743a8e1 | 598 | py | Python | mall/apps/goods/views.py | codedaliu/meiduo | 909d75eb55e52f9dae8879c5ad1d48ffb52fa498 | [
"MIT"
] | null | null | null | mall/apps/goods/views.py | codedaliu/meiduo | 909d75eb55e52f9dae8879c5ad1d48ffb52fa498 | [
"MIT"
] | null | null | null | mall/apps/goods/views.py | codedaliu/meiduo | 909d75eb55e52f9dae8879c5ad1d48ffb52fa498 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from contents.serializers import HotSKUListSerializer
from goods.models import SKU
class HomeAPIView(APIView):
pass
'''
列表数据
热销数据:应该是到哪个分类去获取哪个分类的热销数据中
1.获取分类id
2.根据id获取数据
3.将数据转化为字典
4返回相应
'''
from rest_framework.generics import ListAPIView
class HotSKUListAPIView(ListAPIView):
def get_queryset(self):
category_id = self.kwargs['category_id']
return SKU.objects.filter(category_id=category_id).order_by('-sales')[:2]
serializer_class = HotSKUListSerializer | 19.290323 | 81 | 0.779264 | 277 | 0.382597 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.345304 |
2b8a843980224263fdf89972e3e7e0691943f2f7 | 216 | py | Python | tests/test_winterspringbl.py | lamter/slavewg | c3a4098b6c2cbfd232f8ed2290141b7f61d7db6f | [
"Apache-2.0"
] | 3 | 2020-08-13T15:04:33.000Z | 2021-03-12T16:12:39.000Z | tests/test_winterspringbl.py | lamter/slavewg | c3a4098b6c2cbfd232f8ed2290141b7f61d7db6f | [
"Apache-2.0"
] | null | null | null | tests/test_winterspringbl.py | lamter/slavewg | c3a4098b6c2cbfd232f8ed2290141b7f61d7db6f | [
"Apache-2.0"
] | null | null | null | import slavewg
from threading import Event
from queue import Queue
def test_runLoop():
q = Queue()
s = Event()
s.set()
lbl = slavewg.LootBlackLotus(s, q)
lbl.do(lbl.pos_winterspring_mountain)
| 15.428571 | 41 | 0.680556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2b8bd9a6050ec09b335cae1c2fa6fb5dd7da1961 | 16,475 | py | Python | src/sdk/python/TeraSdk.py | yvxiang/tera | 55fd460efb33a96b5f72527469843189c04b5410 | [
"BSD-3-Clause"
] | null | null | null | src/sdk/python/TeraSdk.py | yvxiang/tera | 55fd460efb33a96b5f72527469843189c04b5410 | [
"BSD-3-Clause"
] | null | null | null | src/sdk/python/TeraSdk.py | yvxiang/tera | 55fd460efb33a96b5f72527469843189c04b5410 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tera Python SDK. It needs a libtera_c.so
TODO(taocipian) __init__.py
"""
from ctypes import CFUNCTYPE, POINTER
from ctypes import byref, cdll, string_at
from ctypes import c_bool, c_char_p, c_void_p
from ctypes import c_int32, c_int64, c_ubyte, c_uint64
class ScanDescriptor(object):
""" scan操作描述符
scan出[start_key, end_key)范围内的所有数据,每个cell默认返回最新的1个版本
"""
def __init__(self, start_key):
"""
Args:
start_key(string): scan操作的起始位置,scan结果包含start_key
"""
self.desc = lib.tera_scan_descriptor(start_key,
c_uint64(len(start_key)))
def SetEnd(self, end_key):
"""
不调用此函数时,end_key被置为“无穷大”
Args:
end_key(string): scan操作的终止位置,scan结果不包含end_key
"""
lib.tera_scan_descriptor_set_end(self.desc, end_key,
c_uint64(len(end_key)))
def SetMaxVersions(self, versions):
"""
不调用此函数时,默认每个cell只scan出最新版本
Args:
versions(long): scan时某个cell最多被选出多少个版本
"""
lib.tera_scan_descriptor_set_max_versions(self.desc, versions)
def SetBufferSize(self, buffer_size):
lib.tera_scan_descriptor_set_buffer_size(self.desc, buffer_size)
def SetIsAsync(self, is_async):
lib.tera_scan_descriptor_set_is_async(self.desc, is_async)
def SetPackInterval(self, interval):
lib.tera_scan_descriptor_set_pack_interval(self.desc, interval)
def AddColumn(self, cf, qu):
lib.tera_scan_descriptor_add_column(self.desc, cf,
qu, c_uint64(len(qu)))
def AddColumnFamily(self, cf):
lib.tera_scan_descriptor_add_column_family(self.desc, cf)
def IsAsync(self):
return lib.tera_scan_descriptor_is_async(self.desc)
def SetFilterString(self, filter_string):
lib.tera_scan_descriptor_set_filter_string(self.desc, filter_string)
def SetSnapshot(self, sid):
lib.tera_scan_descriptor_set_snapshot(self.desc, sid)
def SetTimeRange(self, start, end):
lib.tera_scan_descriptor_set_time_range(self.desc, start, end)
class ResultStream(object):
""" scan操作返回的输出流
"""
def __init__(self, stream):
self.stream = stream
def Done(self):
""" 此stream是否已经读完
Returns:
(bool) 如果已经读完,则返回 true, 否则返回 false.
"""
err = c_char_p()
return lib.tera_result_stream_done(self.stream, byref(err))
def Next(self):
""" 迭代到下一个cell
"""
lib.tera_result_stream_next(self.stream)
def RowName(self):
"""
Returns:
(string) 当前cell对应的Rowkey
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_row_name(self.stream,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Family(self):
"""
Returns:
(string) 当前cell对应的ColumnFamily
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_family(self.stream, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Qualifier(self):
"""
Returns:
(string) 当前cell对应的Qulifier
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_qualifier(self.stream,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def ColumnName(self):
"""
Returns:
(string) 当前cell对应的 ColumnName(即 ColumnFamily:Qulifier)
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_column_name(self.stream,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Value(self):
"""
Returns:
(string) 当前cell对应的value
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_value(self.stream, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Timestamp(self):
"""
Returns:
(long) 当前cell对应的时间戳,Unix time
"""
return lib.tera_result_stream_timestamp(self.stream)
class Client(object):
""" 通过Client对象访问一个tera集群
使用建议:一个集群对应一个Client即可,如需访问多个Client,需要创建多个
"""
def __init__(self, conf_path, log_prefix):
"""
Raises:
TeraSdkException: 创建一个Client对象失败
"""
err = c_char_p()
self.client = lib.tera_client_open(conf_path, log_prefix, byref(err))
if self.client is None:
raise TeraSdkException("open client failed:" + str(err.value))
def OpenTable(self, name):
""" 打开名为<name>的表
Args:
name(string): 表名
Returns:
(Table) 打开的Table指针
Raises:
TeraSdkException: 打开table时出错
"""
err = c_char_p()
table_ptr = lib.tera_table_open(self.client, name, byref(err))
if table_ptr is None:
raise TeraSdkException("open table failed:" + err.value)
return Table(table_ptr)
MUTATION_CALLBACK = CFUNCTYPE(None, c_void_p)
class RowMutation(object):
""" 对某一行的变更
在Table.ApplyMutation()调用之前,
RowMutation的所有操作(如Put/DeleteColumn)都不会立即生效
"""
def __init__(self, mutation):
self.mutation = mutation
def Put(self, cf, qu, value):
""" 写入(修改)这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell值为<value>
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
value(string): cell的值
"""
lib.tera_row_mutation_put(self.mutation, cf,
qu, c_uint64(len(qu)),
value, c_uint64(len(value)))
def DeleteColumn(self, cf, qu):
""" 删除这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
"""
lib.tera_row_mutation_delete_column(self.mutation, cf,
qu, c_uint64(len(qu)))
def RowKey(self):
"""
Returns:
(string): 此RowMutation对象的rowkey,例如可用在回调中
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_row_mutation_rowkey(self.mutation,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def SetCallback(self, callback):
""" 设置回调
调用此函数则本次变更为异步(Table.ApplyMutation()立即返回);
否则本次变更为同步(Table.ApplyMutation()等待写入操作完成后返回)。
Args:
callback(MUTATION_CALLBACK): 用户回调,不论任何情况,最终都会被调用
"""
lib.tera_row_mutation_set_callback(self.mutation, callback)
class Table(object):
""" 对表格的所有增删查改操作由此发起
通过Client.OpenTable()获取一个Table对象
"""
def __init__(self, table):
self.table = table
def NewRowMutation(self, rowkey):
""" 生成一个对 rowkey 的RowMutation对象
Args:
rowkey(string): 待变更的rowkey
Returns:
(RowMutation): RowMutation对象
"""
return RowMutation(lib.tera_row_mutation(self.table, rowkey,
c_uint64(len(rowkey))))
def ApplyMutation(self, mutation):
""" 应用一次变更,
如果之前调用过 SetCallback() 则本次调用为异步,否则为同步
Args:
mutation(RowMutation): RowMutation对象
"""
lib.tera_table_apply_mutation(self.table, mutation.mutation)
def IsPutFinished(self):
""" table的异步写操作是否*全部*完成
Returns:
(bool) 全部完成则返回true,否则返回false.
"""
return lib.tera_table_is_put_finished(self.table)
def Get(self, rowkey, cf, qu, snapshot):
""" 同步get一个cell的值
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
snapshot(long): 快照,不关心的用户设置为0即可
Raises:
TeraSdkException: 读操作失败
"""
err = c_char_p()
value = POINTER(c_ubyte)()
vallen = c_uint64()
result = lib.tera_table_get(
self.table, rowkey, c_uint64(len(rowkey)), cf,
qu, c_uint64(len(qu)), byref(value), byref(vallen), byref(err),
c_uint64(snapshot)
)
if not result:
raise TeraSdkException("get record failed:" + err.value)
return copy_string_to_user(value, long(vallen.value))
def Put(self, rowkey, cf, qu, value):
""" 同步put一个cell的值
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
value(string): cell的值
Raises:
TeraSdkException: 写操作失败
"""
err = c_char_p()
result = lib.tera_table_put(
self.table, rowkey, c_uint64(len(rowkey)), cf,
qu, c_uint64(len(qu)), value, c_uint64(len(value)), byref(err)
)
if not result:
raise TeraSdkException("put record failed:" + err.value)
def Delete(self, rowkey, cf, qu):
""" 同步删除某个cell
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
"""
lib.tera_table_delete(
self.table, rowkey, c_uint64(len(rowkey)),
cf, qu, c_uint64(len(qu))
)
def Scan(self, desc):
""" 发起一次scan操作
Args:
desc(ScanDescriptor): scan操作描述符
Raises:
TeraSdkException: scan失败
"""
err = c_char_p()
stream = lib.tera_table_scan(
self.table,
desc.desc,
byref(err)
)
if stream is None:
raise TeraSdkException("scan failed:" + err.value)
return ResultStream(stream)
class TeraSdkException(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def init_function_prototype():
######################
# scan result stream #
######################
lib.tera_result_stream_done.argtypes = [c_void_p,
POINTER(c_char_p)]
lib.tera_result_stream_done.restype = c_bool
lib.tera_result_stream_timestamp.argtypes = [c_void_p]
lib.tera_result_stream_timestamp.restype = c_int64
lib.tera_result_stream_column_name.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_column_name.restype = None
lib.tera_result_stream_family.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_family.restype = None
lib.tera_result_stream_next.argtypes = [c_void_p]
lib.tera_result_stream_next.restype = None
lib.tera_result_stream_qualifier.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_qualifier.restype = None
lib.tera_result_stream_row_name.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_row_name.restype = None
lib.tera_result_stream_value.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_value.restype = None
###################
# scan descriptor #
###################
lib.tera_scan_descriptor.argtypes = [c_char_p, c_uint64]
lib.tera_scan_descriptor.restype = c_void_p
lib.tera_scan_descriptor_add_column.argtypes = [c_void_p, c_char_p,
c_void_p, c_uint64]
lib.tera_scan_descriptor_add_column.restype = None
lib.tera_scan_descriptor_add_column_family.argtypes = [c_void_p, c_char_p]
lib.tera_scan_descriptor_add_column_family.restype = None
lib.tera_scan_descriptor_is_async.argtypes = [c_void_p]
lib.tera_scan_descriptor_is_async.restype = c_bool
lib.tera_scan_descriptor_set_buffer_size.argtypes = [c_void_p, c_int64]
lib.tera_scan_descriptor_set_buffer_size.restype = None
lib.tera_scan_descriptor_set_end.argtypes = [c_void_p, c_void_p, c_uint64]
lib.tera_scan_descriptor_set_end.restype = None
lib.tera_scan_descriptor_set_filter_string.argtypes = [c_void_p, c_char_p]
lib.tera_scan_descriptor_set_filter_string.restype = None
lib.tera_scan_descriptor_set_pack_interval.argtypes = [c_char_p, c_int64]
lib.tera_scan_descriptor_set_pack_interval.restype = None
lib.tera_scan_descriptor_set_is_async.argtypes = [c_void_p, c_bool]
lib.tera_scan_descriptor_set_is_async.restype = None
lib.tera_scan_descriptor_set_max_versions.argtypes = [c_void_p, c_int32]
lib.tera_scan_descriptor_set_max_versions.restype = None
lib.tera_scan_descriptor_set_snapshot.argtypes = [c_void_p, c_uint64]
lib.tera_scan_descriptor_set_snapshot.restype = None
lib.tera_scan_descriptor_set_time_range.argtypes = [c_void_p,
c_int64, c_int64]
lib.tera_scan_descriptor_set_time_range.restype = None
##########
# client #
##########
lib.tera_client_open.argtypes = [c_char_p, c_char_p, POINTER(c_char_p)]
lib.tera_client_open.restype = c_void_p
lib.tera_table_open.argtypes = [c_void_p, c_char_p, POINTER(c_char_p)]
lib.tera_table_open.restype = c_void_p
################
# row_mutation #
################
lib.tera_row_mutation_put.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64,
c_char_p, c_uint64]
lib.tera_row_mutation_put.restype = None
lib.tera_row_mutation_set_callback.argtypes = [c_void_p, MUTATION_CALLBACK]
lib.tera_row_mutation_set_callback.restype = None
lib.tera_row_mutation_delete_column.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64]
lib.tera_row_mutation_delete_column.restype = None
lib.tera_row_mutation_rowkey.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_row_mutation_rowkey.restype = None
#########
# table #
#########
lib.tera_table_get.argtypes = [c_void_p, c_char_p, c_uint64,
c_char_p, c_char_p, c_uint64,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64),
POINTER(c_char_p), c_uint64]
lib.tera_table_get.restype = c_bool
lib.tera_table_put.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p,
c_char_p, c_uint64, c_char_p, c_uint64,
POINTER(c_char_p)]
lib.tera_table_put.restype = c_bool
lib.tera_table_delete.argtypes = [c_void_p, c_char_p, c_uint64,
c_char_p, c_char_p, c_uint64]
lib.tera_table_delete.restype = None
lib.tera_table_apply_mutation.argtypes = [c_void_p, c_void_p]
lib.tera_table_apply_mutation.restype = None
lib.tera_table_is_put_finished.argtypes = [c_void_p]
lib.tera_table_is_put_finished.restype = c_bool
lib.tera_row_mutation.argtypes = [c_void_p, c_char_p, c_uint64]
lib.tera_row_mutation.restype = c_void_p
def copy_string_to_user(value, size):
result = string_at(value, size)
libc = cdll.LoadLibrary('libc.so.6')
libc.free.argtypes = [c_void_p]
libc.free.restype = None
libc.free(value)
return result
lib = cdll.LoadLibrary('./libtera_c.so')
init_function_prototype()
| 31.990291 | 79 | 0.588407 | 11,030 | 0.629243 | 0 | 0 | 0 | 0 | 0 | 0 | 4,834 | 0.275772 |
2b8cd12ab30cf14546818a1c84e84c64173a25c5 | 2,218 | py | Python | thesis_scripts/train_probs_plot.py | jizongFox/kaggle-seizure-prediction | 9439d1c8ca9b4282fcd630a20d0d7946b790e218 | [
"MIT"
] | 55 | 2015-03-17T16:54:49.000Z | 2022-03-10T13:49:08.000Z | thesis_scripts/train_probs_plot.py | jizongFox/kaggle-seizure-prediction | 9439d1c8ca9b4282fcd630a20d0d7946b790e218 | [
"MIT"
] | 2 | 2018-05-12T20:29:52.000Z | 2021-08-10T11:27:05.000Z | thesis_scripts/train_probs_plot.py | jizongFox/kaggle-seizure-prediction | 9439d1c8ca9b4282fcd630a20d0d7946b790e218 | [
"MIT"
] | 18 | 2015-03-20T03:03:17.000Z | 2022-03-10T13:49:20.000Z | import numpy as np
import json
import cPickle
import matplotlib.pyplot as plt
from theano import config
import matplotlib.cm as cmx
import matplotlib.colors as colors
from sklearn.metrics import roc_curve
from utils.loader import load_train_data
from utils.config_name_creator import *
from utils.data_scaler import scale_across_features, scale_across_time
from cnn.conv_net import ConvNet
config.floatX = 'float32'
def get_cmap(N):
color_norm = colors.Normalize(vmin=0, vmax=N - 1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
def plot_train_probs(subject, data_path, model_path):
with open(model_path + '/' + subject + '.pickle', 'rb') as f:
state_dict = cPickle.load(f)
cnn = ConvNet(state_dict['params'])
cnn.set_weights(state_dict['weights'])
scalers = state_dict['scalers']
d = load_train_data(data_path, subject)
x, y = d['x'], d['y']
x, _ = scale_across_time(x, x_test=None, scalers=scalers) if state_dict['params']['scale_time'] \
else scale_across_features(x, x_test=None, scalers=scalers)
cnn.batch_size.set_value(x.shape[0])
probs = cnn.get_test_proba(x)
fpr, tpr, threshold = roc_curve(y, probs)
c = np.sqrt((1-tpr)**2+fpr**2)
opt_threshold = threshold[np.where(c==np.min(c))[0]]
print opt_threshold
x_coords = np.zeros(len(y), dtype='float64')
rng = np.random.RandomState(42)
x_coords += rng.normal(0.0, 0.08, size=len(x_coords))
plt.scatter(x_coords, probs, c=y, s=60)
plt.title(subject)
plt.show()
if __name__ == '__main__':
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
model_path = settings_dict['path']['model_path'] + '/' + create_cnn_model_name(settings_dict)
subjects = ['Patient_1', 'Patient_2', 'Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5']
for subject in subjects:
print '***********************', subject, '***************************'
plot_train_probs(subject, data_path, model_path) | 32.617647 | 104 | 0.681695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.125338 |
2b8d3ea059089d5ddd2f32278051ab1cf1181bdf | 2,464 | py | Python | tms_ts/tms_ts_smach/scripts/20141120_193027.py | SigmaHayashi/ros_tms_for_smart_previewed_reality | 4ace908bd3da0519246b3c45d0230cbd02e49da0 | [
"BSD-3-Clause"
] | null | null | null | tms_ts/tms_ts_smach/scripts/20141120_193027.py | SigmaHayashi/ros_tms_for_smart_previewed_reality | 4ace908bd3da0519246b3c45d0230cbd02e49da0 | [
"BSD-3-Clause"
] | null | null | null | tms_ts/tms_ts_smach/scripts/20141120_193027.py | SigmaHayashi/ros_tms_for_smart_previewed_reality | 4ace908bd3da0519246b3c45d0230cbd02e49da0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import roslib; roslib.load_manifest('tms_ts_smach')
import rospy
import smach
import smach_ros
from smach_ros import ServiceState
from smach import Concurrence
from tms_msg_rp.srv import *
from tms_msg_ts.srv import *
def smc0():
smc0 = smach.Concurrence( outcomes=['succeeded', 'aborted'],
default_outcome = 'aborted',
outcome_map = {'succeeded': {'0random_move':'succeeded', '1sensing':'succeeded'}},
child_termination_cb = lambda arg: True )
with smc0:
smach.Concurrence.add('0random_move',
ServiceState('rp_cmd',
rp_cmd,
request = rp_cmdRequest(9006, True, 2005, [0])))
smach.Concurrence.add('1sensing',
ServiceState('rp_cmd',
rp_cmd,
request = rp_cmdRequest(9007, True, 2005, [0])))
return smc0
def main():
rospy.init_node('tms_ts_smach_executive1')
sm_root = smach.StateMachine(['succeeded','aborted','preempted'])
with sm_root:
smach.StateMachine.add('smc0', smc0(), transitions={'succeeded':'control0'})
smach.StateMachine.add('control0',
ServiceState('ts_state_control',
ts_state_control,
request = ts_state_controlRequest(0, 0, 0, 2, "")),
transitions={'succeeded':'move1', 'aborted':'aborted'})
smach.StateMachine.add('move1',
ServiceState('rp_cmd',
rp_cmd,
request = rp_cmdRequest(9001, True, 2005, [0])),
transitions={'succeeded':'control1'})
smach.StateMachine.add('control1',
ServiceState('ts_state_control',
ts_state_control,
request = ts_state_controlRequest(0, 0, 0, 0, "")),
transitions={'succeeded':'succeeded', 'aborted':'aborted'})
sis = smach_ros.IntrospectionServer('tms_ts_smach_test', sm_root, '/ROS_TMS')
sis.start()
outcome = sm_root.execute()
rospy.spin()
sis.stop()
if __name__ == '__main__':
main()
| 36.235294 | 110 | 0.505276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.184659 |
2b8d46e20478051b98cf25878001dd95e0c89cd8 | 47,862 | py | Python | google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py | ryanyuan/python-bigtable | e55ca07561f9c946276f3bde599e69947769f560 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py | ryanyuan/python-bigtable | e55ca07561f9c946276f3bde599e69947769f560 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py | ryanyuan/python-bigtable | e55ca07561f9c946276f3bde599e69947769f560 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2,
)
from google.cloud.bigtable_admin_v2.proto import (
table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.CreateTableFromSnapshot = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListTables = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GenerateConsistencyToken = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
)
self.CheckConsistency = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
)
self.SnapshotTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetSnapshot = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString,
)
self.ListSnapshots = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
)
self.UpdateBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
)
self.DeleteBackup = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListBackups = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString,
)
self.RestoreTable = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTableFromSnapshot(self, request, context):
"""Creates a new table from the specified snapshot. The target table must
not exist. The snapshot and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ModifyColumnFamilies(self, request, context):
"""Performs a series of column family modifications on the specified table.
Either all or none of the modifications will occur before this method
returns, but data requests received prior to that point may see a table
where only some modifications have taken effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GenerateConsistencyToken(self, request, context):
"""Generates a consistency token for a Table, which can be used in
CheckConsistency to check whether mutations to the table that finished
before this call started have been replicated. The tokens will be available
for 90 days.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CheckConsistency(self, request, context):
"""Checks replication consistency based on a consistency token, that is, if
replication has caught up based on the conditions specified in the token
and the check request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SnapshotTable(self, request, context):
"""Creates a new snapshot in the specified cluster from the specified
source table. The cluster and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSnapshot(self, request, context):
"""Gets metadata information about the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListSnapshots(self, request, context):
"""Lists all snapshots associated with the specified cluster.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSnapshot(self, request, context):
"""Permanently deletes the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateBackup(self, request, context):
"""Starts creating a new Cloud Bigtable Backup. The returned backup
[long-running operation][google.longrunning.Operation] can be used to
track creation of the backup. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The
[response][google.longrunning.Operation.response] field type is
[Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the
returned operation will stop the creation and delete the backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetBackup(self, request, context):
"""Gets metadata on a pending or completed Cloud Bigtable Backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateBackup(self, request, context):
"""Updates a pending or completed Cloud Bigtable Backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteBackup(self, request, context):
"""Deletes a pending or completed Cloud Bigtable backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListBackups(self, request, context):
"""Lists Cloud Bigtable backups. Returns both completed and pending
backups.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RestoreTable(self, request, context):
"""Create a new table by restoring from a completed backup. The new table
must be in the same instance as the instance containing the backup. The
returned table [long-running operation][google.longrunning.Operation] can
be used to track the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
[RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The
[response][google.longrunning.Operation.response] type is
[Table][google.bigtable.admin.v2.Table], if successful.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists but does not have a policy
set.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy on a Table or Backup resource.
Replaces any existing policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that the caller has on the specified table resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateTable": grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
"CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler(
servicer.CreateTableFromSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ListTables": grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString,
),
"GetTable": grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
"DeleteTable": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
"DropRowRange": grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler(
servicer.GenerateConsistencyToken,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString,
),
"CheckConsistency": grpc.unary_unary_rpc_method_handler(
servicer.CheckConsistency,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString,
),
"SnapshotTable": grpc.unary_unary_rpc_method_handler(
servicer.SnapshotTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetSnapshot": grpc.unary_unary_rpc_method_handler(
servicer.GetSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString,
),
"ListSnapshots": grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString,
),
"DeleteSnapshot": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateBackup": grpc.unary_unary_rpc_method_handler(
servicer.CreateBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetBackup": grpc.unary_unary_rpc_method_handler(
servicer.GetBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString,
),
"UpdateBackup": grpc.unary_unary_rpc_method_handler(
servicer.UpdateBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString,
),
"DeleteBackup": grpc.unary_unary_rpc_method_handler(
servicer.DeleteBackup,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListBackups": grpc.unary_unary_rpc_method_handler(
servicer.ListBackups,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString,
),
"RestoreTable": grpc.unary_unary_rpc_method_handler(
servicer.RestoreTable,
request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BigtableTableAdmin(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
@staticmethod
def CreateTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CreateTableFromSnapshot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListTables(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ModifyColumnFamilies(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DropRowRange(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GenerateConsistencyToken(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CheckConsistency(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SnapshotTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetSnapshot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListSnapshots(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteSnapshot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CreateBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def UpdateBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteBackup(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListBackups(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString,
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def RestoreTable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetIamPolicy(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SetIamPolicy(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def TestIamPermissions(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 43.869844 | 166 | 0.704672 | 38,489 | 0.804166 | 0 | 0 | 18,379 | 0.384 | 0 | 0 | 10,329 | 0.215808 |
2b8eb6f4aea8446268376f43354cab9bc858fe41 | 9,313 | py | Python | jaeun.py | catubc/ensembles | cf4bd380a6ac300e477d555a6b708363100f974b | [
"MIT"
] | null | null | null | jaeun.py | catubc/ensembles | cf4bd380a6ac300e477d555a6b708363100f974b | [
"MIT"
] | null | null | null | jaeun.py | catubc/ensembles | cf4bd380a6ac300e477d555a6b708363100f974b | [
"MIT"
] | null | null | null | # Kang Miller et al 2014 method for computing ensembles
#
#
#
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.stats
def PCA(X, n_components):
from sklearn import decomposition
#pca = decomposition.SparsePCA(n_components=3, n_jobs=1)
pca = decomposition.PCA(n_components=n_components)
print "... fitting PCA ..."
pca.fit(X)
for k in range (n_components):
print "... explained variance: ", pca.explained_variance_[k]
print "... pca transform..."
return pca.transform(X)
def corr2_coeff(A,B):
# Rowwise mean of input arrays & subtract from input arrays themeselves
A_mA = A - A.mean(1)[:,None]
B_mB = B - B.mean(1)[:,None]
# Sum of squares across rows
ssA = (A_mA**2).sum(1);
ssB = (B_mB**2).sum(1);
# Finally get corr coeff
return np.dot(A_mA,B_mB.T)/np.sqrt(np.dot(ssA[:,None],ssB[None]))
def jaeun_detect(rasters, list_filename):
print rasters.shape
activity = np.sum(rasters,axis=0)
#plt.plot(activity)
std = np.std(activity)*4
#plt.plot([0,len(rasters[0])],[std,std])
#determine neurons in each ensemble:
indexes = np.where(activity>std)[0]
ensembles = []
vectors = []
for index in indexes:
ensembles.append(np.where(rasters[:,index]>0)[0])
print len(ensembles)
vectors.append(rasters[:,index])
#plt.plot([index,index],[0,40])
#plt.show()
#Normalize vectors
vec_matrix = np.float32(np.vstack(vectors)).T
print vec_matrix.shape
for k in range(len(vec_matrix)):
vec_matrix[k] = vec_matrix[k]/np.sum(vec_matrix[k])
vec_matrix = np.nan_to_num(vec_matrix).T
plt.imshow(vec_matrix)
plt.show()
#Compute correlation:
corr_matrix = np.zeros((len(vectors),len(vectors)),dtype=np.float32)
corr_array =[]
for e in range(len(vectors)):
for f in range(0,len(vectors),1):
print len(vectors[e]),len(vectors[f])
print scipy.stats.pearsonr(vectors[e],vectors[f])
r = scipy.stats.pearsonr(vectors[e],vectors[f])[1]
corr_matrix[e,f] = 0.5*np.log((1+r)/(1-r))
corr_array.append(0.5*np.log((1+r)/(1-r)))
print corr_matrix
print np.min(corr_matrix), np.max(corr_matrix)
plt.imshow(corr_matrix)
plt.show()
#from scipy.cluster.hierarchy import linkage, dendrogram
#linkage_matrix = linkage(vectors, 'single')
#dendogram = dendrogram(linkage_matrix,truncate_mode='none')
from sklearn.cluster import SpectralClustering
mat = corr_matrix
ids = SpectralClustering(10).fit_predict(mat)
#print img
if False: #Use PCA to cluster highly-active frames:
data = PCA(vectors,3)
plt.scatter(data[:,0], data[:,1])
plt.show()
if False: #Use hyperangles to compute difference
matrix = np.zeros((len(vectors),len(vectors)), dtype=np.float32)
vector0 = np.zeros(len(vectors[0]),dtype=np.float32)+1
dists = []
for e in range(len(vectors)):
p = np.dot(vectors[e],vector0)/np.linalg.norm(vectors[e])/np.linalg.norm(vector0) # -> cosine of the angle
dists.append(np.degrees(np.arccos(np.clip(p, -1, 1))))
for f in range(len(vectors)):
c = np.dot(vectors[e],vectors[f])/np.linalg.norm(vectors[e])/np.linalg.norm(vectors[f]) # -> cosine of the angle
#dists =
#print c
#c = np.degrees(np.arccos(np.clip(c, -1, 1)))
matrix[e,f] = np.degrees(np.arccos(np.clip(c, -1, 1)))
#matrix[e,f] = c
print "...angle: ", matrix[e,f]
#temp_angle_array.append(c)
plt.imshow(matrix)
plt.show()
bin_width = 10 # histogram bin width in usec
y = np.histogram(dists, bins = np.arange(0,90,bin_width))
plt.bar(y[1][:-1], y[0], bin_width, color='b', alpha=1)
plt.show()
if False: #Use SVD on all data;
pass
#quit()
return ensembles, ids
#Load ROI countour data first
roi_filename = '/media/cat/250GB/in_vivo/alejandro/G2M4/joint/all_registered_processed_ROIs.npz'
data_in = np.load(roi_filename, encoding= 'latin1', mmap_mode='c')
Bmat_array = data_in['Bmat_array']
cm = data_in['cm'] #Centre of mass
thr_array = data_in['thr_array'] #Threshold array original data, usually 0.2
traces = data_in['traces'] #
x_array = data_in['x_array']
y_array = data_in['y_array']
colors='b'
list_filename = '/media/cat/250GB/in_vivo/alejandro/G2M4/ch1_file_list.txt'
filenames = np.loadtxt(list_filename, dtype='str')
thr_fixed=.5
ctr=0
modularity_levels = np.arange(0,25,1)
colors = ['gold','mediumslateblue','grey','thistle','teal','palegreen','violet','deepskyblue','blue','green','cyan','orange','red']
for s, filename in enumerate(filenames):
print (filename)
if '000' in filename:
print ctr, (ctr*7)%21+int(ctr/3.)+1
print ("... spontaneous recording ...")
rasters = np.load(filename[:-4]+"_rasters.npy")
ensembles, ids = jaeun_detect(rasters, list_filename)
print ensembles
print ids
#frame_array, weight_array = luis_detect(rasters, list_filename)
#main_ensembles, other_ensembles = luis_ensembles (frame_array, weight_array, rasters)
print len(main_ensembles)
for k in range(len(main_ensembles)):
print "...plotting ensemble: ", k
ax=plt.subplot(3,3,k+1)
plt.title("Ensemble: "+str(k), fontsize=20)
ax.set_xticks([]); ax.set_yticks([])
##Draw all neurons first
#for i, (y,x,Bmat,thr) in enumerate(zip(y_array,x_array,Bmat_array,thr_array)):
# cs = plt.contour(y, x, Bmat, [thr_fixed], colors='black',alpha=0.3)
#Draw neurons at each modularity
unique_indexes = main_ensembles[k] #Select neurons at this level
for i in unique_indexes:
#cs = plt.contour(y_array[i], x_array[i], Bmat_array[i], [thr_fixed], colors=colors[k%13],linewidth=15, alpha=1)
cs = plt.contour(y_array[i], x_array[i], Bmat_array[i], [thr_fixed], colors='red',linewidth=15, alpha=1)
#Draw background neurons
unique_indexes = other_ensembles[k] #Select neurons at this level
for i in unique_indexes:
#cs = plt.contour(y_array[i], x_array[i], Bmat_array[i], [thr_fixed], colors=colors[k%13],linewidth=15, alpha=1)
cs = plt.contour(y_array[i], x_array[i], Bmat_array[i], [thr_fixed], colors='black',linewidth=15, alpha=0.5)
plt.show()
if '001' in filename:
print ("... stim recording...")
#Draw stim1 ensembles
network_stim1 = np.load(filename[:-4]+"_networks_stim1.npy")
ax=plt.subplot(3,7,(ctr*7)%21+int(ctr/3.)+1)
ax.set_xticks([]); ax.set_yticks([])
if ctr==1:
plt.ylabel("Horizontal",fontsize=15)
ctr+=1
plt.title(os.path.split(filename)[1][:-4].replace('_C1V1_GCaMP6s','')+", #: "+str(len(network_stim1)),fontsize=9)
#Draw all neurons first
for i, (y,x,Bmat,thr) in enumerate(zip(y_array,x_array,Bmat_array,thr_array)):
cs = plt.contour(y, x, Bmat, [thr_fixed], colors='black',alpha=0.3)
#Draw neurons at each modularity
for k in modularity_levels:
if k>(len(network_stim1)-1): break
index_array = network_stim1[k] #Select neurons at this level
unique_indexes=np.unique(index_array) #THIS is redundant as neurons are uniquely asigned to modularity levels
print ("... modularity: ", k, " # neurons: ", len(unique_indexes))
for i in unique_indexes:
cs = plt.contour(y_array[i], x_array[i], Bmat_array[i], [thr_fixed], colors=colors[k],linewidth=15, alpha=1)
#Draw stim2 ensembles
network_stim2 = np.load(filename[:-4]+"_networks_stim2.npy")
ax=plt.subplot(3,7,(ctr*7)%21+int(ctr/3.)+1)
ax.set_xticks([]); ax.set_yticks([])
if ctr==2:
plt.ylabel("Vertical",fontsize=15)
ctr+=1
plt.title(os.path.split(filename)[1][:-4].replace('_C1V1_GCaMP6s','')+", #: "+str(len(network_stim2)),fontsize=9)
#Draw all neurons first
for i, (y,x,Bmat,thr) in enumerate(zip(y_array,x_array,Bmat_array,thr_array)):
cs = plt.contour(y, x, Bmat, [thr_fixed], colors='black',alpha=0.3)
#Draw neurons at each modularity
for k in modularity_levels:
if k>(len(network_stim2)-1): break
index_array = network_stim2[k] #Select neurons at this level
unique_indexes=np.unique(index_array) #THIS is redundant as neurons are uniquely asigned to modularity levels
print ("... modularity: ", k, " # neurons: ", len(unique_indexes))
for i in unique_indexes:
cs = plt.contour(y_array[i], x_array[i], Bmat_array[i], [thr_fixed], colors=colors[k],linewidth=15, alpha=1)
plt.show()
| 36.521569 | 131 | 0.600021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,713 | 0.291313 |
2b8f14ec9702e12886e533477032b0cf4a2e6d8f | 1,918 | py | Python | tests/test_analysis_status_response.py | s0b0lev/mythx-models | 0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a | [
"MIT"
] | null | null | null | tests/test_analysis_status_response.py | s0b0lev/mythx-models | 0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a | [
"MIT"
] | null | null | null | tests/test_analysis_status_response.py | s0b0lev/mythx-models | 0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a | [
"MIT"
] | null | null | null | import json
import pytest
from mythx_models.exceptions import ValidationError
from mythx_models.response import Analysis, AnalysisStatusResponse
from mythx_models.util import serialize_api_timestamp
from . import common as testdata
def assert_analysis_data(expected, analysis: Analysis):
assert expected["apiVersion"] == analysis.api_version
assert expected["maruVersion"] == analysis.maru_version
assert expected["mythrilVersion"] == analysis.mythril_version
assert expected["harveyVersion"] == analysis.harvey_version
assert expected["queueTime"] == analysis.queue_time
assert expected["runTime"] == analysis.run_time
assert expected["status"] == analysis.status
assert expected["submittedAt"] == serialize_api_timestamp(analysis.submitted_at)
assert expected["submittedBy"] == analysis.submitted_by
assert expected["uuid"] == analysis.uuid
def test_analysis_list_from_valid_json():
resp = AnalysisStatusResponse.from_json(
json.dumps(testdata.ANALYSIS_STATUS_RESPONSE_DICT)
)
assert_analysis_data(testdata.ANALYSIS_STATUS_RESPONSE_DICT, resp.analysis)
def test_analysis_list_from_empty_json():
with pytest.raises(ValidationError):
AnalysisStatusResponse.from_json("{}")
def test_analysis_list_from_valid_dict():
resp = AnalysisStatusResponse.from_dict(testdata.ANALYSIS_STATUS_RESPONSE_DICT)
assert_analysis_data(testdata.ANALYSIS_STATUS_RESPONSE_DICT, resp.analysis)
def test_analysis_list_from_empty_dict():
with pytest.raises(ValidationError):
AnalysisStatusResponse.from_dict({})
def test_analysis_list_to_dict():
d = testdata.ANALYSIS_STATUS_RESPONSE_OBJECT.to_dict()
assert d == testdata.ANALYSIS_STATUS_RESPONSE_DICT
def test_analysis_list_to_json():
json_str = testdata.ANALYSIS_STATUS_RESPONSE_OBJECT.to_json()
assert json.loads(json_str) == testdata.ANALYSIS_STATUS_RESPONSE_DICT
| 34.872727 | 84 | 0.792492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.062565 |