max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
franki/exceptions.py | cr0hn/franki | 1 | 12762851 | <reponame>cr0hn/franki
class FrankiException(Exception):
pass
class FrankiInvalidFormatException(Exception):
pass
class FrankiFileNotFound(Exception):
pass
class FrankiInvalidFileFormat(Exception):
pass
__all__ = ("FrankiInvalidFormatException", "FrankiFileNotFound",
"FrankiInvalidFileFormat", "FrankiException")
| 1.710938 | 2 |
baselines/mend/efk_hparams.py | kmeng01/rome | 38 | 12762852 | <filename>baselines/mend/efk_hparams.py
from util.hparams import HyperParams
class EFKHyperParams(HyperParams):
KEYS = ["lr_scale", "n_toks", "model_name", "counterfact", "zsre"]
| 1.414063 | 1 |
auth.py | zendikit/wantan | 7 | 12762853 | import getpass
import keyring
KEYRING_SERVICE = "wantan"
def setup_args(args):
"""Add our args to a subgroup of the main script's."""
auth_verbs = args.add_mutually_exclusive_group(required=True)
auth_verbs.add_argument("--dump", action="store_true",
help="Dump the API key associated with a username")
auth_verbs.add_argument("--store", action="store_true",
help="Store an API key associated with a username")
args.add_argument("--key", help="The API key to store")
args.add_argument("user",
help="The username associated with the API key")
args.set_defaults(func=main)
def get_key(user):
return keyring.get_password(KEYRING_SERVICE, user)
def main(args):
if args.dump:
print(f"{get_key(args.user)}")
elif args.store:
key = args.key
if not key:
key = getpass.getpass(prompt="API key: ")
keyring.set_password(KEYRING_SERVICE, args.user, key)
| 2.78125 | 3 |
ffmpegNormaIUse.py | boollfd/forSelfUse | 0 | 12762854 | # -*-coding = utf-8 -*-
import json
import os
import subprocess
if __name__ == "__main__":
whereFfmpeg = ""
toDir = ""
beginPath = ""
paths = os.listdir(beginPath)
videoName = "\\video.m4s"
audioName = "\\audio.m4s"
for path in paths:
secondPath = os.path.join(beginPath, path)
liTaskPath = os.listdir(secondPath)
dirName = os.path.join(toDir, path)
if not os.path.exists(dirName):
os.mkdir(dirName)
for i in liTaskPath:
thirdPath = os.path.join(secondPath, i)
with open(thirdPath + "\\entry.json", encoding="utf-8") as file:
finalName = json.load(file)["page_data"]["download_subtitle"].replace(" ", "")
finalPath = ""
for j in os.listdir(thirdPath):
finalPath = os.path.join(thirdPath, j)
if os.path.isdir(finalPath):
break
finalString = whereFfmpeg + ' -i %s -i %s -c:v copy -c:a aac -strict experimental %s.mp4' \
% (finalPath + videoName, finalPath + audioName, dirName + "\\" + finalName)
print(finalString)
p = subprocess.Popen(finalString)
p.communicate()
| 2.28125 | 2 |
generate_sv_parameters.py | EngRaff92/RDL_REG_GEN | 2 | 12762855 | #!/opt/homebrew/bin/python3.9
##
## Icebreaker and IceSugar RSMB5 project - RV32I for Lattice iCE40
## With complete open-source toolchain flow using:
## -> yosys
## -> icarus verilog
## -> icestorm project
##
## Tests are written in several languages
## -> Systemverilog Pure Testbench (Vivado)
## -> UVM testbench (Vivado)
## -> PyUvm (Icarus)
## -> Formal either using SVA and PSL (Vivado) or cuncurrent assertions with Yosys
##
## Copyright (c) 2021 <NAME> (<EMAIL>)
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
############################################################################
#### import main packages
############################################################################
import json as j
import pandas as pd
import sys
import template as temp
from string import Template
############################################################################
############################################################################
#### Classes and functions
############################################################################
regfile_type = "regfile"
memory_type = "memory"
# Use to open the JSON file and get the dictionary back
def parse_json() -> dict:
data = {}
with open("./output_all/reg.json", "r") as f:
data = j.load(f)
f.close()
return data
def gen_lists_and_csv(data):
name = []
t_reg = []
address = []
sub_data = data['children']
sw_rd_mask = []
hw_rd_mask = []
sw_wr_mask = []
hw_wr_mask = []
reset_p = []
res = {}
res2 = {}
global is_regfile
global is_memory
for reg in sub_data:
# Check the register aggregation type
if reg['type'] == regfile_type:
is_regfile = True
is_memory = False
elif reg['type'] == memory_type:
is_regfile = False
is_memory = True
# according to the result we create the parameters
t_reg.append(reg['type'])
## check if Memory so that we can print the start and end
if ((not is_regfile) & is_memory):
address.append(reg['memory_adress_start'])
name.append("memory_adress_start")
else:
address.append(reg['absolute_adress'])
name.append(reg['inst_name'])
## Look Inside for children
for x in reg['children']:
t_reg.append(x['type'])
name.append(x['inst_name'])
if ((not is_memory) & is_regfile):
## Get the masks
sw_rd_mask.append(x['sw_read_mask'])
hw_rd_mask.append(x['hw_read_mask'])
sw_wr_mask.append(x['sw_write_mask'])
hw_wr_mask.append(x['hw_write_mask'])
reset_p.append(x['global_reset_value'])
if (x['type'] != "field"):
address.append(x['address_offset'])
if ((not is_regfile) & is_memory):
t_reg.append(memory_type)
name.append("memory_adress_end")
address.append(reg['memory_adress_end'])
## Generate the final dicationary
res = dict(zip(name, address))
res2 = dict(zip(name, t_reg))
rest_dict = dict(zip(name, reset_p))
hwwr_dict = dict(zip(name, hw_wr_mask))
hwrd_dict = dict(zip(name, hw_rd_mask))
swwr_dict = dict(zip(name, sw_wr_mask))
swrd_dict = dict(zip(name, sw_rd_mask))
df = pd.DataFrame(data={"TYPE": t_reg, "NAME": name, "ADDRESS": address})
with open ('./output_all/reg.csv', 'x') as f:
df.to_csv("./output_all/reg.csv", sep=',',index=False)
f.close()
t = Template(temp.param_template+'\n')
d = Template(temp.define_template+'\n')
p = Template(temp.python_const_template+'\n')
with open('./output_all/reg_param.svh', 'x') as f:
## Fristly write the header
f.write(temp.header)
## Start with Params
for x in res.keys():
if res2[x] == regfile_type:
a=t.substitute({'name' : "{}_{}".format(res2[x],x), 'value' : res[x].replace('0x',"32'h")})
elif res2[x] == memory_type:
a=t.substitute({'name' : "{}".format(x), 'value' : res[x].replace('0x',"32'h")})
else:
a=t.substitute({'name' : "register_{}".format(x), 'value' : res[x].replace('0x',"32'h")})
f.write(a)
## Start with Defines
for x in res.keys():
if res2[x] == regfile_type:
b=d.substitute({'name' : "{}_{}".format(res2[x],x), 'value' : res[x].replace('0x',"32'h")})
elif res2[x] == memory_type:
b=d.substitute({'name' : "{}".format(x), 'value' : res[x].replace('0x',"32'h")})
else:
b=d.substitute({'name' : "register_{}".format(x), 'value' : res[x].replace('0x',"32'h")})
f.write(b)
## Start for the Mask
for x in hwwr_dict.keys():
b=d.substitute({'name' : "mask_hwwr_{}".format(x), 'value' : hwwr_dict[x].replace('0x',"32'h")})
f.write(b)
for x in hwrd_dict.keys():
b=d.substitute({'name' : "mask_hwrd_{}".format(x), 'value' : hwrd_dict[x].replace('0x',"32'h")})
f.write(b)
for x in swwr_dict.keys():
b=d.substitute({'name' : "mask_swwr_{}".format(x), 'value' : swwr_dict[x].replace('0x',"32'h")})
f.write(b)
for x in swrd_dict.keys():
b=d.substitute({'name' : "mask_swrd_{}".format(x), 'value' : swrd_dict[x].replace('0x',"32'h")})
f.write(b)
## Start for Resert
for x in rest_dict.keys():
b=d.substitute({'name' : "{}_POR_VALUE".format(x), 'value' : rest_dict[x].replace('0x',"32'h")})
f.write(b)
f.close()
with open('./output_all/reg_python_const.py', 'x') as f:
## Fristly write the header
f.write(temp.header_python)
for x in res.keys():
if res2[x] == regfile_type:
c=p.substitute({'name' : "{}_{}".format(res2[x],x), 'value' : res[x]})
elif res2[x] == memory_type:
c=p.substitute({'name' : "{}".format(x), 'value' : res[x]})
else:
c=p.substitute({'name' : "register_{}".format(x), 'value' : res[x]})
f.write(c)
## Start for the Mask
for x in hwwr_dict.keys():
c=p.substitute({'name' : "mask_hwwr_{}".format(x), 'value' : hwwr_dict[x]})
f.write(c)
for x in hwrd_dict.keys():
c=p.substitute({'name' : "mask_hwrd_{}".format(x), 'value' : hwrd_dict[x]})
f.write(c)
for x in swwr_dict.keys():
c=p.substitute({'name' : "mask_swwr_{}".format(x), 'value' : swwr_dict[x]})
f.write(c)
for x in swrd_dict.keys():
c=p.substitute({'name' : "mask_swrd_{}".format(x), 'value' : swrd_dict[x]})
f.write(c)
## Start for Resert
for x in rest_dict.keys():
c=p.substitute({'name' : "{}_POR_VALUE".format(x), 'value' : rest_dict[x]})
f.write(c)
f.close()
def main():
data_f = parse_json()
gen_lists_and_csv(data_f)
if __name__ == '__main__':
main()
| 1.335938 | 1 |
Sessions/S05/json_from_api.py | paulbordea/session3 | 0 | 12762856 | import json
import requests
# POST
payload = {'userId': 6622, 'title': 'Something', 'body': 'some body'}
response = requests.post('https://jsonplaceholder.typicode.com/posts', data=json.dumps(payload))
assert response.status_code == 201, f'POST: Received status code {response.status_code}'
print(response.text)
# GET
data = requests.get('https://jsonplaceholder.typicode.com/todos')
assert data.status_code == 200, f'GET: Received status code {data.status_code}'
json_data = json.loads(data.text)
print(json_data)
print(type(json_data))
| 2.859375 | 3 |
tests/test_CatalogQuery.py | AmpelProject/extcats | 2 | 12762857 | <filename>tests/test_CatalogQuery.py
import pytest
from extcats.CatalogQuery import CatalogQuery
@pytest.fixture
def catq(milliquas, mongo_client):
return CatalogQuery("milliquas", dbclient=mongo_client)
with_index_methods = pytest.mark.parametrize(
"method",
["2dsphere", "healpix"],
)
@with_index_methods
def test_findwithin(catq, method):
assert len(catq.findwithin(185.453, -89.241, 10, method=method)) == 1
assert catq.findwithin(0, -90, 10, method=method) is None
@with_index_methods
def test_findclosest(catq, method):
match, dist = catq.findclosest(185.453, -89.241, 10, method=method)
assert dist == pytest.approx(3.5758, abs=1e-4)
assert catq.findwithin(0, -90, 10) is None
@with_index_methods
def test_binarysearch(catq, method):
assert catq.binaryserach(185.453, -89.241, 10, method=method) is True
assert catq.binaryserach(0, -90, 10, method=method) is False
| 2.421875 | 2 |
scripts/Qubit/Analysis/histogram.py | sourav-majumder/qtlab | 0 | 12762858 | <reponame>sourav-majumder/qtlab<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
powers = list(np.arange(8.3, 8.71, 0.01))
powers.append(8.75)
for power in powers:
# print r"D:\data Summary\Tanmoy\Time domain at different Power\RefCurve_pw-"+"%.2f"%power+".Wfm.csv"
ch1, ch2 = np.loadtxt(r"D:\data Summary\Tanmoy\Time domain at different Power\RefCurve_pw-"+"%.2f"%power+".Wfm.csv",
delimiter=';', unpack=True)
# ch1 = np.array_split(ch1, 2000)
# ch2 = np.array_split(ch2, 2000)
# time = np.linspace(-6.08e-005, 5e-006, len(ch1[0]))
# plt.scatter(ch1, ch2, marker='.', alpha=0.1)#, 'b.', markerfacecolor=(1, 1, 0, 0.1))
# plt.show()
# plt.hist2d(ch1, ch2, bins=40, norm=LogNorm())
# hist, _, _ = np.histogram2d(ch1, ch2, bins=40)
# plt.imshow(hist)
# plt.colorbar()
# plt.xlim(-4,4)
# plt.ylim(-4,4)
# plt.savefig(r"D:\data Summary\Tanmoy\Time domain at different Power\pw-"+"%.2f"%power+".png")
# plt.close()
# plt.show()
# plt.plot(time, ch1[0])
# plt.plot(time, ch2[0])
# plt.show() | 2.15625 | 2 |
scripts/slave/recipe_modules/buildbucket/config.py | bopopescu/chromium-build | 0 | 12762859 | <reponame>bopopescu/chromium-build<gh_stars>0
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.config import config_item_context, ConfigGroup
from recipe_engine.config import Single, Static
def BaseConfig(PLATFORM='default'):
return ConfigGroup(
buildbucket_host = Single(str, required=True),
buildbucket_client_path = Single(str, required=True),
PLATFORM = Static(str(PLATFORM))
)
config_ctx = config_item_context(BaseConfig)
@config_ctx(is_root=True)
def BASE(c):
if c.PLATFORM == 'win':
c.buildbucket_client_path = 'C:\\infra-tools\\buildbucket.exe'
else:
c.buildbucket_client_path = '/opt/infra-tools/buildbucket'
@config_ctx(group='host')
def production_buildbucket(c):
c.buildbucket_host = 'cr-buildbucket.appspot.com'
@config_ctx(group='host')
def test_buildbucket(c):
c.buildbucket_host = 'cr-buildbucket-test.appspot.com'
@config_ctx(group='host')
def dev_buildbucket(c):
c.buildbucket_host = 'cr-buildbucket-dev.appspot.com'
| 1.835938 | 2 |
openstack/network/v2/port_forwarding.py | horion/openstacksdk | 99 | 12762860 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class PortForwarding(resource.Resource):
name_attribute = "floating_ip_port_forwarding"
resource_name = "port forwarding"
resource_key = 'port_forwarding'
resources_key = 'port_forwardings'
base_path = '/floatingips/%(floatingip_id)s/port_forwardings'
_allow_unknown_attrs_in_body = True
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'internal_port_id', 'external_port', 'protocol'
)
# Properties
#: The ID of Floating IP address
floatingip_id = resource.URI('floatingip_id')
#: The ID of internal port
internal_port_id = resource.Body('internal_port_id')
#: The internal IP address
internal_ip_address = resource.Body('internal_ip_address')
#: The internal TCP/UDP/other port number
internal_port = resource.Body('internal_port', type=int)
#: The external TCP/UDP/other port number
external_port = resource.Body('external_port', type=int)
#: The protocol
protocol = resource.Body('protocol')
#: The description
description = resource.Body('description')
| 2.09375 | 2 |
disassembler.py | mkurzmann/soldec | 6 | 12762861 | <reponame>mkurzmann/soldec
from bytecodeblock import *
from ceptions import *
from opcodes import *
from bytecodes import *
from itertools import groupby
import sys, re
class Disassembler(object):
def __init__(self, binary):
if len(binary) == 0:
raise InputError("empty hex string")
binary += "00"
self.__detect_swarm_hash(binary)
self.raw_bytes = list()
for i in range(0, len(binary), 2):
try:
byte = int(binary[i:i + 2], 16)
except ValueError:
raise InputError("illegal hex character")
self.raw_bytes.append(byte)
self.bytecodes = dict()
# self.__call_addresses = set()
self.__decode_bytecodes()
self.__block_count = 0
self.__basic_blocks = dict()
self.__addresses = dict()
self.jump_dests = dict()
self.__create_basic_blocks()
self.__simplify_assertions()
def __detect_swarm_hash(self, binary):
binary_length = len(binary)
if binary_length % 2 != 0:
raise InputError("odd length binary")
# 0xa1 0x65 'b' 'z' 'z' 'r' '0'
swarm_pattern = re.compile("a165627a7a7230")
match = re.search(swarm_pattern, binary)
if not match:
self.swarm_hash_address = binary_length / 2
else:
self.swarm_hash_address = int(match.start() / 2)
def __decode_data(self, begin, end):
data = self.raw_bytes[begin: end]
data = [hex(d)[2:] for d in data]
data = [d.zfill(2) for d in data]
data = "".join(data)
try:
return int(data, 16)
except ValueError:
return 0
def __decode_bytecodes(self):
address = 0
while address < self.swarm_hash_address:
raw_byte = self.raw_bytes[address]
if raw_byte in opcodes:
opcode = opcodes[raw_byte]
else:
opcode = raw_byte #"GARBAGE"
bytecode = self.decode_bytecode(opcode, address, raw_byte)
self.bytecodes[address] = bytecode
if opcode in push_ops:
gamma = actions[opcode][-1]
data = self.__decode_data(address + 1, address + gamma + 1)
bytecode.set_dependency(0, data)
address += gamma
address += 1
@staticmethod
def decode_bytecode(opcode, address, raw_byte):
if opcode in push_ops:
bytecode = PushByteCode(opcode, raw_byte, address)
elif opcode in bin_ops:
bytecode = BinOpByteCode(opcode, raw_byte, address)
elif opcode in mono_ops:
bytecode = MonoOpByteCode(opcode, raw_byte, address)
else:
bytecode = ByteCode(opcode, raw_byte, address)
return bytecode
def __create_basic_blocks(self):
header_addresses, split = set(), False
for address in sorted(self.bytecodes):
bytecode = self.bytecodes[address]
if bytecode.is_jumpdest() or split:
header_addresses.add(address)
split = False
if bytecode.opcode in exit_ops \
or bytecode.opcode in jump_ops:
split = True
basic_block = BytecodeBlock(self.__block_count)
for address in sorted(self.bytecodes):
bytecode = self.bytecodes[address]
if address in header_addresses and address != 0:
self.__basic_blocks[basic_block.get_id()] = basic_block
self.__block_count += 1
basic_block = BytecodeBlock(self.__block_count)
if bytecode.is_jumpdest():
self.jump_dests[address] = basic_block.get_id()
basic_block.append(bytecode)
self.__addresses[address] = basic_block.get_id()
self.__basic_blocks[basic_block.get_id()] = basic_block
def __simplify_assertions(self):
block_ids = sorted(self.__basic_blocks.keys())
for i in range(len(block_ids) - 1):
id_0, id_1 = block_ids[i:i+2]
block_0 = self.__basic_blocks[id_0]
block_1 = self.__basic_blocks[id_1]
address = block_0.get_jumpi_address()
# if the last statement of the block is INVALID, then we insert aa assert-statement
if address is not None and block_1.is_invalid_block():
block_0.insert_assert()
# if the last statement of the block is REVERT, then we insert a require-statement
if address is not None and block_1.is_revert_block():
block_0.insert_require()
def debug_bytecodes(self):
for block_id in self.__basic_blocks:
basic_block = self.__basic_blocks[block_id]
basic_block.debug_block()
def get_raw_bytes(self, b=0, e=-1):
if e == -1:
return self.raw_bytes[b::]
return self.raw_bytes[b:e]
def get_swarm_hash_bytes(self):
return self.raw_bytes[self.swarm_hash_address:]
def get_blocks(self):
return self.__basic_blocks
def get_opcode_bytes(self):
opcode_bytes = list()
for address in sorted(self.bytecodes):
instruction = self.bytecodes[address]
opcode_byte = instruction.raw_byte
opcode_byte = hex(opcode_byte)[2:].zfill(2)
opcode_bytes.append(opcode_byte)
opcode_bytes = "".join(opcode_bytes)
return opcode_bytes
def get_block_trace(self, program_counters):
block_trace = [self.__addresses[pc] for pc in program_counters]
block_trace = [x[0] for x in groupby(block_trace)]
return block_trace
if __name__ == "__main__":
with open(sys.argv[1]) as f:
line = f.readline().strip()
dis = Disassembler(line)
dis.debug_bytecodes()
| 2.75 | 3 |
gen3/cli/dict.py | fantix/gen3 | 0 | 12762862 | import glob
import json
import os
import aiohttp
from ruamel.yaml import YAML
from . import with_loop, log
from ..dictionary import loader
@with_loop
async def main(args, loop):
if args.url.startswith("http"):
log.critical("Downloading dictionary JSON...")
async with aiohttp.ClientSession(loop=loop) as session:
async with session.get(args.url) as resp:
data = await resp.json()
elif os.path.isfile(args.url):
log.critical("Reading dictionary JSON...")
with open(args.url) as f:
data = json.load(f)
else:
log.critical("Reading dictionary YAML source...")
data = {}
yaml = YAML(typ="safe")
yaml.allow_duplicate_keys = True
for path in glob.glob(f"{args.url}/*.yaml"):
with open(path) as f:
data[os.path.basename(path)] = yaml.load(f)
loader.load(data)
| 2.59375 | 3 |
books_app/urls.py | srinidhibhat/booknotes | 0 | 12762863 | <reponame>srinidhibhat/booknotes
from django.urls import path
from .views import (
BookListView,
BookDetailView,
BookCreateView,
BookUpdateView,
BookDeleteView,
UserBookListView
)
from . import views
urlpatterns = [
path('', BookListView.as_view(), name = 'books_app-home'),
path('user/<str:username>/', UserBookListView.as_view(), name = 'books_app-user_books'),
# the below url is book specific i.e. after 'book/' primary key of the book takes us to specific book detail page
path('book/<int:pk>/', BookDetailView.as_view(), name = 'books_app-detail'),
path('book/new', BookCreateView.as_view(), name = 'books_app-create'),
path('book/<int:pk>/update', BookUpdateView.as_view(), name = 'books_app-update'),
path('book/<int:pk>/delete', BookDeleteView.as_view(), name = 'books_app-delete'),
path('about/', views.about, name = 'books_app-about'),
]
| 2.515625 | 3 |
scripts/heatmap/custom_idw/gdf_interpol_and_tiff_converter.py | PhilipeRLeal/xarray_case_studies | 1 | 12762864 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 14:27:05 2020
@author: ricardoguimaraes
"""
import numpy as np
import pandas as pd
import geopandas as gpd
from gdf_heatmap import gdf_heatmap
from array_to_tiff import array_to_tiff
if '__main__' == __name__ :
from shapely.geometry import Point
import matplotlib.pyplot as plt
df = pd.DataFrame({'x': np.random.normal(-45, 8, size=(100)),
'y': np.random.normal(-4, 8, size=(100)),
'z': np.random.normal(-40, 4, size=(100))}
)
df['geometry'] = df.apply(lambda x: Point(x['x'], x['y']), axis=1)
gdf = gpd.GeoDataFrame(df)
Result = gdf_heatmap(gdf, df_column ='z',
dx=0.5, dy=0.5, verbose=True,
smooth=0.3,
function='gaussian')
array_to_tiff(Result['array'], Result['x'],
Result['y'],Result['dx'], Result['dy'],
to_file=r'C:\Users\lealp\Downloads\Temp\My_tiff')
input('Press any to close')
plt.close('all')
del Result
del gdf | 2.703125 | 3 |
src/resources/organization.py | null-none/fhir-faker | 5 | 12762865 | <gh_stars>1-10
import random
import uuid
from common.base import Base
class Organization(Base):
"""Organization
https://www.hl7.org/fhir/organization.html
Demographics and other administrative information about an individual or animal receiving care or other health-related services.
Attributes:
"resourceType" : "Organization",
"identifier" : [{ Identifier }], // C? Identifies this organization across multiple systems
"active" : <boolean>, // Whether the organization's record is still in active use
"name" : "<string>", // C? Name used for the organization
"""
def __init__(self, faker):
"""Init Organization Resource"""
self.resourceType = "Organization"
self.identifier = uuid.uuid4().hex
self.active = bool(random.getrandbits(1))
self.name = faker.company()
def attributes(self):
"""Returns attributes"""
return {
"resourceType:": "Patient",
"identifier:": "[{ Identifier }], // C? Identifies this organization across multiple systems",
"active:": "<boolean>, // Whether the organization's record is still in active use",
"name:": "'<string>', // C? Name used for the organization",
}
| 2.90625 | 3 |
nn_models.py | neuropil/sleep_ann | 1 | 12762866 | from keras.layers import Input, Dense, Dropout, LSTM
from keras.models import Model, Sequential
from keras.optimizers import SGD
from keras import regularizers
def feedforward(layer_spec=[64],num_labels=5,activ='sigmoid',
optim='adam',loss='categorical_crossentropy',
droprate=None,loss_weights=None,reg_weight=0.01):
model = Sequential()
input_shape = (8,)
for i,units in enumerate(layer_spec):
if i == 0:
d_layer = Dense(units,activation=activ,kernel_regularizer=regularizers.l1(reg_weight),input_shape=input_shape)
else:
d_layer = Dense(units,activation=activ,kernel_regularizer=regularizers.l1(reg_weight))
model.add(d_layer)
if droprate is not None:
model.add(Dropout(droprate))
model.add(Dense(num_labels,activation='softmax'))
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=optim,
loss=loss,
loss_weights=loss_weights,
metrics=['categorical_accuracy'])
return model
# TODO: Finish the function below
def basic_rnn(timesteps,output_dim=4):
model = Sequential()
model.add( LSTM(10, input_shape=(timesteps,8), unroll=True, return_sequences=True) )
model.add( Dense(10) )
return model | 2.890625 | 3 |
tests/app/test_search_ctl.py | marcosnr/search-cli-app | 0 | 12762867 | <filename>tests/app/test_search_ctl.py
import pytest
from search_ctl import SearchApp
from models import OrganizationDAO, UserDAO, TicketDAO
@pytest.fixture(scope="function")
def app_init():
return SearchApp()
@pytest.fixture(scope="module")
def app():
app = SearchApp()
app.load_data()
return app
def test_ctl_init(app_init):
assert type(app_init.org_dao) is OrganizationDAO
assert type(app_init.user_dao) is UserDAO
assert type(app_init.ticket_dao) is TicketDAO
def test_ctl_load(app):
assert isinstance(app.org_dao, OrganizationDAO)
assert isinstance(app.user_dao, UserDAO)
assert isinstance(app.ticket_dao, TicketDAO)
def test_ctl_org_by_id(app):
org_result = app.search_organisations("_id", 101)
assert org_result.item['name'] == 'Enthaze'
with pytest.raises(Exception):
app.search_organisations("_id", 99)
def test_ctl_user_by_id(app):
org_result = app.search_users("_id", 1)
assert org_result.item['name'] == '<NAME>'
with pytest.raises(Exception):
app.search_users("_id", -1)
def test_ctl_ticket_by_id(app):
org_result = app.search_tickets("_id", "436bf9b0-1147-4c0a-8439-6f79833bff5b")
assert org_result.item['type'] == 'incident'
with pytest.raises(Exception):
app.search_tickets("_id", 100) | 2.359375 | 2 |
vodscrepe/aliases.py | dawsonbooth/vodscrepe | 2 | 12762868 | import re
stages = {
"Battlefield": re.compile(r"battle", flags=re.I),
"Dream Land N64": re.compile(r"land", flags=re.I),
"Final Destination": re.compile(r"final|fd", flags=re.I),
"Fountain of Dreams": re.compile(r"fount|fod", flags=re.I),
"Yoshi's Story": re.compile(r"yoshi", flags=re.I),
"Pokemon Stadium": re.compile(r"pokemon|stadium|ps", flags=re.I),
}
characters = {
"Bowser": re.compile(r"bowser", flags=re.I),
"Capt<NAME>": re.compile(r"falcon|cf", flags=re.I),
"<NAME>": re.compile(r"donkey|kong|dk", flags=re.I),
"Dr. Mario": re.compile(r"doc|dr", flags=re.I),
"Falco": re.compile(r"falco\b", flags=re.I),
"Fox": re.compile(r"fox", flags=re.I),
"Ganondorf": re.compile(r"ganon", flags=re.I),
"Ice Climbers": re.compile(r"ic", flags=re.I),
"Jigglypuff": re.compile(r"jig|puff", flags=re.I),
"Kirby": re.compile(r"kirby", flags=re.I),
"Link": re.compile(r"(?!y)link", flags=re.I),
"Luigi": re.compile(r"luigi", flags=re.I),
"Mario": re.compile(r"(?!d)mario", flags=re.I),
"Marth": re.compile(r"marth", flags=re.I),
"Mewtwo": re.compile(r"mew", flags=re.I),
"Mr. Game & Watch": re.compile(r"game|&", flags=re.I),
"Ness": re.compile(r"ness", flags=re.I),
"Peach": re.compile(r"peach|daisy", flags=re.I),
"Pichu": re.compile(r"pichu", flags=re.I),
"Pikachu": re.compile(r"pika", flags=re.I),
"Roy": re.compile(r"roy", flags=re.I),
"Samus": re.compile(r"samus", flags=re.I),
"Sheik": re.compile(r"sh", flags=re.I),
"<NAME>": re.compile(r"y.*link", flags=re.I),
"Yoshi": re.compile(r"yoshi", flags=re.I),
"Zelda": re.compile(r"zelda", flags=re.I),
}
rounds = {
"Winners Quarters": re.compile(r"winner.*quarter|wq", flags=re.I),
"Winners Semis": re.compile(r"winner.*semi|ws", flags=re.I),
"Winners Finals": re.compile(r"winner.*final|wf", flags=re.I),
"Losers Eighths": re.compile(r"loser.*eight", flags=re.I),
"Losers Quarters": re.compile(r"loser.*quarter|lq", flags=re.I),
"Losers Semis": re.compile(r"loser.*semi|ls", flags=re.I),
"Losers Finals": re.compile(r"loser.*final|lf", flags=re.I),
"Grand Finals": re.compile(r"grand.*final|gf", flags=re.I),
}
sponsors = {
"Team Liquid": re.compile(r"liquid|tl", flags=re.I),
"Alliance": re.compile(r"\[A\]|alliance", flags=re.I),
"Counter Logic Gaming": re.compile(r"clg|counter.*logic", flags=re.I),
"Cloud 9": re.compile(r"c9|cloud", flags=re.I),
}
def guess_character(ch):
for item, prog in characters.items():
if prog.search(ch):
return item
return None
def guess_stage(s):
for item, prog in stages.items():
if prog.search(s):
return item
return None
def guess_round(r):
for item, prog in rounds.items():
if prog.search(r):
return item
return None
def guess_sponsor(s):
for item, prog in sponsors.items():
if prog.search(s):
return item
return None
| 2.25 | 2 |
pytorchrl/agent/algorithms/a2c.py | PyTorchRL/pytorchrl | 20 | 12762869 | <reponame>PyTorchRL/pytorchrl<filename>pytorchrl/agent/algorithms/a2c.py
import torch
import itertools
import torch.nn as nn
import torch.optim as optim
import pytorchrl as prl
from pytorchrl.agent.algorithms.base import Algorithm
from pytorchrl.agent.algorithms.policy_loss_addons import PolicyLossAddOn
from pytorchrl.agent.algorithms.utils import get_gradients, set_gradients
class A2C(Algorithm):
"""
Algorithm class to execute A2C, from Mnih et al. 2016 (https://arxiv.org/pdf/1602.01783.pdf).
Parameters
----------
device : torch.device
CPU or specific GPU where class computations will take place.
actor : Actor
Actor_critic class instance.
lr_v : float
Value network learning rate.
lr_pi : float
Policy network learning rate.
gamma : float
Discount factor parameter.
num_test_episodes : int
Number of episodes to complete in each test phase.
max_grad_norm : float
Gradient clipping parameter.
test_every : int
Regularity of test evaluations in actor updates.
num_test_episodes : int
Number of episodes to complete in each test phase.
policy_loss_addons : list
List of PolicyLossAddOn components adding loss terms to the algorithm policy loss.
"""
def __init__(self,
device,
actor,
lr_v=1e-4,
lr_pi=1e-4,
gamma=0.99,
test_every=5000,
max_grad_norm=0.5,
num_test_episodes=5,
policy_loss_addons=[]):
# ---- General algo attributes ----------------------------------------
# Discount factor
self._gamma = gamma
# Number of steps collected with initial random policy
self._start_steps = int(0) # Default to 0 for On-policy algos
# Times data in the buffer is re-used before data collection proceeds
self._num_epochs = int(1)
# Number of data samples collected between network update stages
self._update_every = None # Depends on storage capacity
# Number mini batches per epoch
self._num_mini_batch = int(1)
# Size of update mini batches
self._mini_batch_size = None # Depends on storage capacity
# Number of network updates between test evaluations
self._test_every = int(test_every)
# Number of episodes to complete when testing
self._num_test_episodes = int(num_test_episodes)
# ---- A2C-specific attributes ----------------------------------------
self.iter = 0
self.device = device
self.actor = actor
self.max_grad_norm = max_grad_norm
assert hasattr(self.actor, "value_net1"), "A2C requires value critic (num_critics=1)"
# ----- Optimizer -----------------------------------------------------
self.pi_optimizer = optim.Adam(self.actor.policy_net.parameters(), lr=lr_pi)
self.v_optimizer = optim.Adam(self.actor.value_net1.parameters(), lr=lr_v)
# ----- Policy Loss Addons --------------------------------------------
# Sanity check, policy_loss_addons is a PolicyLossAddOn instance
# or a list of PolicyLossAddOn instances
assert isinstance(policy_loss_addons, (PolicyLossAddOn, list)),\
"A2C policy_loss_addons parameter should be a PolicyLossAddOn instance " \
"or a list of PolicyLossAddOn instances"
if isinstance(policy_loss_addons, list):
for addon in policy_loss_addons:
assert isinstance(addon, PolicyLossAddOn), \
"A2C policy_loss_addons parameter should be a PolicyLossAddOn" \
" instance or a list of PolicyLossAddOn instances"
else:
policy_loss_addons = [policy_loss_addons]
self.policy_loss_addons = policy_loss_addons
for addon in self.policy_loss_addons:
addon.setup(self.device)
@classmethod
def create_factory(cls,
lr_v=1e-4,
lr_pi=1e-4,
gamma=0.99,
test_every=5000,
max_grad_norm=0.5,
num_test_episodes=5,
policy_loss_addons=[]):
"""
Returns a function to create new A2C instances.
Parameters
----------
lr_v : float
Value network learning rate.
lr_pi : float
Policy network learning rate.
gamma : float
Discount factor parameter.
num_test_episodes : int
Number of episodes to complete in each test phase.
max_grad_norm : float
Gradient clipping parameter.
test_every : int
Regularity of test evaluations in actor updates.
num_test_episodes : int
Number of episodes to complete in each test phase.
policy_loss_addons : list
List of PolicyLossAddOn components adding loss terms to the algorithm policy loss.
Returns
-------
create_algo_instance : func
Function that creates a new A2C class instance.
algo_name : str
Name of the algorithm.
"""
def create_algo_instance(device, actor):
return cls(lr_pi=lr_pi,
lr_v=lr_v,
gamma=gamma,
device=device,
actor=actor,
test_every=test_every,
max_grad_norm=max_grad_norm,
num_test_episodes=num_test_episodes,
policy_loss_addons=policy_loss_addons)
return create_algo_instance, prl.A2C
@property
def gamma(self):
"""Returns discount factor gamma."""
return self._gamma
@property
def start_steps(self):
"""Returns the number of steps to collect with initial random policy."""
return self._start_steps
@property
def num_epochs(self):
"""
Returns the number of times the whole buffer is re-used before data
collection proceeds.
"""
return self._num_epochs
@property
def update_every(self):
"""
Returns the number of data samples collected between
network update stages.
"""
return self._update_every
@property
def num_mini_batch(self):
"""
Returns the number of times the whole buffer is re-used before data
collection proceeds.
"""
return self._num_mini_batch
@property
def mini_batch_size(self):
"""
Returns the number of mini batches per epoch.
"""
return self._mini_batch_size
@property
def test_every(self):
"""Number of network updates between test evaluations."""
return self._test_every
@property
def num_test_episodes(self):
"""
Returns the number of episodes to complete when testing.
"""
return self._num_test_episodes
def acting_step(self, obs, rhs, done, deterministic=False):
"""
A2C acting function.
Parameters
----------
obs: torch.tensor
Current world observation
rhs: torch.tensor
RNN recurrent hidden state (if policy is not a RNN, rhs will contain zeroes).
done: torch.tensor
1.0 if current obs is the last one in the episode, else 0.0.
deterministic: bool
Whether to randomly sample action from predicted distribution or take the mode.
Returns
-------
action: torch.tensor
Predicted next action.
clipped_action: torch.tensor
Predicted next action (clipped to be within action space).
rhs: torch.tensor
Policy recurrent hidden state (if policy is not a RNN, rhs will contain zeroes).
other: dict
Additional A2C predictions, value score and action log probability.
"""
with torch.no_grad():
(action, clipped_action, logp_action, rhs,
entropy_dist, dist) = self.actor.get_action(
obs, rhs, done, deterministic)
value_dict = self.actor.get_value(obs, rhs, done)
value = value_dict.get("value_net1")
rhs = value_dict.get("rhs")
other = {prl.VAL: value, prl.LOGP: logp_action}
return action, clipped_action, rhs, other
def compute_loss(self, data):
"""
Calculate A2C loss
Parameters
----------
data: dict
Data batch dict containing all required tensors to compute A2C loss.
Returns
-------
loss : torch.tensor
A2C loss.
"""
o, rhs, a, old_v = data[prl.OBS], data[prl.RHS], data[prl.ACT], data[prl.VAL]
r, d, old_logp, adv = data[prl.RET], data[prl.DONE], data[prl.LOGP], data[prl.ADV]
# Policy loss
logp, dist_entropy, dist = self.actor.evaluate_actions(o, rhs, d, a)
pi_loss = - (logp * adv).mean()
# Extend policy loss with addons
for addon in self.policy_loss_addons:
pi_loss += addon.compute_loss_term(self.actor, dist, data)
# Value loss
new_v = self.actor.get_value(o, rhs, d).get("value_net1")
value_loss = (r - new_v).pow(2).mean()
return pi_loss, value_loss
def compute_gradients(self, batch, grads_to_cpu=True):
"""
Compute loss and compute gradients but don't do optimization step,
return gradients instead.
Parameters
----------
data: dict
data batch containing all required tensors to compute A2C loss.
grads_to_cpu: bool
If gradient tensor will be sent to another node, need to be in CPU.
Returns
-------
grads: list of tensors
List of actor gradients.
info: dict
Dict containing current A2C iteration information.
"""
# Compute A2C losses
action_loss, value_loss = self.compute_loss(batch)
# Compute policy gradients
self.pi_optimizer.zero_grad()
action_loss.backward(retain_graph=True)
for p in self.actor.policy_net.parameters():
p.requires_grad = False
# Compute value gradients
self.v_optimizer.zero_grad()
value_loss.backward()
for p in self.actor.policy_net.parameters():
p.requires_grad = True
# Clip gradients to max value
nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm)
pi_grads = get_gradients(self.actor.policy_net, grads_to_cpu=grads_to_cpu)
v_grads = get_gradients(self.actor.value_net1, grads_to_cpu=grads_to_cpu)
grads = {"pi_grads": pi_grads, "v_grads": v_grads}
info = {
"value_loss": value_loss.item(),
"action_loss": action_loss.item(),
}
return grads, info
def apply_gradients(self, gradients=None):
"""
Take an optimization step, previously setting new gradients if provided.
Parameters
----------
gradients: list of tensors
List of actor gradients.
"""
if gradients is not None:
set_gradients(
self.actor.policy_net,
gradients=gradients["pi_grads"], device=self.device)
set_gradients(
self.actor.value_net1,
gradients=gradients["v_grads"], device=self.device)
self.pi_optimizer.step()
self.v_optimizer.step()
self.iter += 1
def set_weights(self, actor_weights):
"""
Update actor with the given weights.
Parameters
----------
actor_weights: dict of tensors
Dict containing actor weights to be set.
"""
self.actor.load_state_dict(actor_weights)
self.iter += 1
def update_algorithm_parameter(self, parameter_name, new_parameter_value):
"""
If `parameter_name` is an attribute of the algorithm, change its value
to `new_parameter_value value`.
Parameters
----------
parameter_name : str
Worker.algo attribute name
new_parameter_value : int or float
New value for `parameter_name`.
"""
if hasattr(self, parameter_name):
setattr(self, parameter_name, new_parameter_value)
if parameter_name == "lr_v":
for param_group in self.v_optimizer.param_groups:
param_group['lr'] = new_parameter_value
elif parameter_name == "lr_pi":
for param_group in self.pi_optimizer.param_groups:
param_group['lr'] = new_parameter_value
| 2.40625 | 2 |
great_url_shortener/main/views.py | theodor85/url-shortener | 0 | 12762870 | <reponame>theodor85/url-shortener
from django.http import HttpResponseRedirect
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from .models import Url
from .serializers import UrlSerializer
from .hash_generator import generate_url_hash
class UrlListViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = UrlSerializer
queryset = Url.objects.all()
@api_view(['POST'])
def url_shortener(request):
try:
origin_uri = request.data['origin_uri']
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST)
url, is_created = Url.objects.get_or_create(url=origin_uri)
if is_created:
url.url_hash = generate_url_hash()
url.save()
short_url = url.short_url
response_status = status.HTTP_201_CREATED
else:
short_url = url.short_url
response_status = status.HTTP_200_OK
return Response(
data={
'origin_uri': origin_uri,
'short_url': short_url,
},
status=response_status,
)
@api_view(['GET'])
def url_redirector(request, hash_):
try:
url = Url.objects.get(url_hash=hash_)
except Url.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
url = url.url
return HttpResponseRedirect(url)
| 2.328125 | 2 |
flake8_declarative_dict/_checker.py | joelschutz/flake8-declarative-dict | 0 | 12762871 | import ast
from typing import Tuple
from flake8_plugin_utils import Error
from .error import DCD001, DCD002, DCD003
def _get_large_dict(node: ast.Dict, dict_size_limit: int, allow_nested_empty_dicts: bool) -> Tuple[Error, int]:
'''Detects if the dict is larger that an allowed value'''
# If a negative value is provided no check will be done
if dict_size_limit < 0:
return None, None
dict_size = len(node.keys)
if dict_size > dict_size_limit:
values_are_empty_dicts = []
for i in node.values:
if isinstance(i, ast.Dict) and not i.values:
values_are_empty_dicts.append(True)
else:
values_are_empty_dicts.append(False)
if (not allow_nested_empty_dicts) or (not all(values_are_empty_dicts)):
return DCD001, dict_size
return None, None
def _get_to_many_args(node: ast.Call, args_size_limit: int) -> Tuple[Error, int]:
'''Detects if to many arguments are being passed to a function call'''
# If a negative value or one is provided no check will be done
if args_size_limit < 1:
return None, None
starred_args = []
for i in node.args:
if isinstance(i, ast.Starred): starred_args.append(i)
args_size = len(node.keywords) + len(node.args)
if any(
(
len(starred_args) > 1,
args_size > args_size_limit and len(node.args) > len(starred_args),
args_size - len(starred_args) > args_size_limit,
)
):
return DCD002, args_size
return None, None
def _get_large_list(node: ast.List, list_size_limit: int, allow_nested_empty_lists: bool) -> Tuple[Error, int]:
'''Detects if the list is larger that an allowed value'''
# If a negative value is provided no check will be done
if list_size_limit < 0:
return None, None
list_size = len(node.elts)
if list_size > list_size_limit:
values_are_empty_lists = []
for i in node.elts:
if isinstance(i, ast.List) and not i.elts:
values_are_empty_lists.append(True)
else:
values_are_empty_lists.append(False)
if (not allow_nested_empty_lists) or (not all(values_are_empty_lists)):
return DCD003, list_size
return None, None
| 2.578125 | 3 |
tfu/costs.py | diogo149/tfu | 3 | 12762872 | import tensorflow as tf
from . import utils
from . import base
def l2(params=None):
if params is None:
params = base.find_variables(weight=True)
return utils.smart_sum([tf.nn.l2_loss(x) for x in params])
| 2.6875 | 3 |
pycqed/measurement/openql_experiments/multi_qubit_oql.py | ZW7436/PycQED_py3 | 1 | 12762873 | <reponame>ZW7436/PycQED_py3
import numpy as np
import openql.openql as ql
import pycqed.measurement.openql_experiments.openql_helpers as oqh
from pycqed.utilities.general import int2base, suppress_stdout
from os.path import join
def single_flux_pulse_seq(qubit_indices: tuple,
platf_cfg: str):
p = oqh.create_program("single_flux_pulse_seq", platf_cfg)
k = oqh.create_kernel("main", p)
for idx in qubit_indices:
k.prepz(idx) # to ensure enough separation in timing
k.prepz(idx) # to ensure enough separation in timing
k.prepz(idx) # to ensure enough separation in timing
for i in range(7):
k.gate('CW_00', [i])
k.gate("wait", [0, 1, 2, 3, 4, 5, 6], 0)
k.gate('fl_cw_02', [qubit_indices[0], qubit_indices[1]])
p.add_kernel(k)
p = oqh.compile(p)
return p
def flux_staircase_seq(platf_cfg: str):
p = oqh.create_program("flux_staircase_seq", platf_cfg)
k = oqh.create_kernel("main", p)
for i in range(1):
k.prepz(i) # to ensure enough separation in timing
for i in range(1):
k.gate('CW_00', [i])
k.gate('CW_00', [6])
for cw in range(8):
k.gate('fl_cw_{:02d}'.format(cw), [2, 0])
k.gate('fl_cw_{:02d}'.format(cw), [3, 1])
k.gate("wait", [0, 1, 2, 3], 200) # because scheduling is wrong.
p.add_kernel(k)
p = oqh.compile(p)
return p
def multi_qubit_off_on(qubits: list, initialize: bool,
second_excited_state: bool, platf_cfg: str):
"""
Performs an 'off_on' sequence on the qubits specified.
off: (RO) - prepz - - - RO
on: (RO) - prepz - x180 - - RO
2nd (RO) - prepz - X180 - X12 - RO (if second_excited_state == True)
Will cycle through all combinations of off and on. Last qubit in the list
is considered the Least Significant Qubit (LSQ).
Args:
qubits (list) : list of integers denoting the qubits to use
initialize (bool): if True does an extra initial measurement to
allow post selecting data.
second_excited_state (bool): if True includes the 2-state in the
combinations.
platf_cfg (str) : filepath of OpenQL platform config file
"""
if second_excited_state:
base = 3
else:
base = 2
combinations = [int2base(i, base=base, fixed_length=len(qubits)) for
i in range(base**len(qubits))]
p = oqh.create_program("multi_qubit_off_on", platf_cfg)
for i, comb in enumerate(combinations):
k = oqh.create_kernel('Prep_{}'.format(comb), p)
# 1. Prepare qubits in 0
for q in qubits:
k.prepz(q)
# 2. post-selection extra init readout
if initialize:
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
# 3. prepare desired state
for state, target_qubit in zip(comb, qubits): # N.B. last is LSQ
if state == '0':
pass
elif state == '1':
k.gate('rx180', [target_qubit])
elif state == '2':
k.gate('rx180', [target_qubit])
k.gate('rx12', [target_qubit])
# 4. measurement of all qubits
k.gate('wait', qubits, 0)
# Used to ensure timing is aligned
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Ramsey_msmt_induced_dephasing(qubits: list, angles: list, platf_cfg: str):
"""
Ramsey sequence that varies azimuthal phase instead of time. Works for
a single qubit or multiple qubits. The coherence of the LSQ is measured,
while the whole list of qubits is measured.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
note: executes the measurement between gates to measure the measurement
induced dephasing
Input pars:
qubits: list specifying the targeted qubit MSQ, and the qubit
of which the coherence is measured LSQ.
angles: the list of angles for each Ramsey element
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Ramsey_msmt_induced_dephasing", platf_cfg)
for i, angle in enumerate(angles[:-4]):
cw_idx = angle//20 + 9
k = oqh.create_kernel("Ramsey_azi_"+str(angle), p)
for qubit in qubits:
k.prepz(qubit)
k.gate('rx90', [qubits[-1]])
for qubit in qubits:
k.measure(qubit)
k.gate('cw_{:02}'.format(cw_idx), [qubits[-1]])
p.add_kernel(k)
# adding the calibration points
oqh.add_single_qubit_cal_points(p, qubit_idx=qubits[-1])
p = oqh.compile(p)
return p
def echo_msmt_induced_dephasing(qubits: list, angles: list, platf_cfg: str,
wait_time: float=0):
"""
Ramsey sequence that varies azimuthal phase instead of time. Works for
a single qubit or multiple qubits. The coherence of the LSQ is measured,
while the whole list of qubits is measured.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
note: executes the measurement between gates to measure the measurement
induced dephasing
Input pars:
qubits: list specifying the targeted qubit MSQ, and the qubit
of which the coherence is measured LSQ.
angles: the list of angles for each Ramsey element
platf_cfg: filename of the platform config file
wait_time wait time to acount for the measurement time for the
second arm of the echo in s
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program('echo_msmt_induced_dephasing', platf_cfg)
for i, angle in enumerate(angles[:-4]):
cw_idx = angle//20 + 9
k = oqh.create_kernel('echo_azi_{}'.format(angle), p)
for qubit in qubits:
k.prepz(qubit)
k.gate('rx90', [qubits[-1]])
for qubit in qubits:
k.measure(qubit)
k.gate('rx180', [qubits[-1]])
k.gate("wait", [qubits[-1]], round(wait_time*1e9))
k.gate('cw_{:02}'.format(cw_idx), [qubits[-1]])
p.add_kernel(k)
# adding the calibration points
p = oqh.add_single_qubit_cal_points(p, qubit_idx=qubits[-1])
p = oqh.compile(p)
return p
def two_qubit_off_on(q0: int, q1: int, platf_cfg: str):
'''
off_on sequence on two qubits.
# FIXME: input arg should be "qubits" as a list
Args:
q0, q1 (int) : target qubits for the sequence
platf_cfg: str
'''
p = oqh.create_program('two_qubit_off_on', platf_cfg)
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
return p
def two_qubit_tomo_cardinal(q0: int, q1: int, cardinal: int, platf_cfg: str):
'''
Cardinal tomography for two qubits.
Args:
cardinal (int) : index of prep gate
q0, q1 (int) : target qubits for the sequence
'''
tomo_pulses = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
tomo_list_q0 = tomo_pulses
tomo_list_q1 = tomo_pulses
prep_index_q0 = int(cardinal % len(tomo_list_q0))
prep_index_q1 = int(((cardinal - prep_index_q0) / len(tomo_list_q0) %
len(tomo_list_q1)))
prep_pulse_q0 = tomo_list_q0[prep_index_q0]
prep_pulse_q1 = tomo_list_q1[prep_index_q1]
p = oqh.create_program('two_qubit_tomo_cardinal', platf_cfg)
# Tomography pulses
i = 0
for p_q1 in tomo_list_q1:
for p_q0 in tomo_list_q0:
i += 1
kernel_name = '{}_{}_{}'.format(i, p_q0, p_q1)
k = oqh.create_kernel(kernel_name, p)
k.prepz(q0)
k.prepz(q1)
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
k.gate(p_q0, [q0])
k.gate(p_q1, [q1])
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# every calibration point is repeated 7 times. This is copied from the
# script for Tektronix driven qubits. I do not know if this repetition
# is important or even necessary here.
p = oqh.add_two_q_cal_points(p, q0=q1, q1=q0, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_AllXY(q0: int, q1: int, platf_cfg: str,
sequence_type='sequential',
replace_q1_pulses_X180: bool=False,
double_points: bool=False):
"""
AllXY sequence on two qubits.
Has the option of replacing pulses on q1 with pi pulses
Args:
q0, q1 (str) : target qubits for the sequence
sequence_type (str) : Describes the timing/order of the pulses.
options are: sequential | interleaved | simultaneous | sandwiched
q0|q0|q1|q1 q0|q1|q0|q1 q01|q01 q1|q0|q0|q1
describes the order of the AllXY pulses
replace_q1_pulses_X180 (bool) : if True replaces all pulses on q1 with
X180 pulses.
double_points (bool) : if True measures each point in the AllXY twice
"""
p = oqh.create_program('two_qubit_AllXY', platf_cfg)
pulse_combinations = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],
['rx180', 'ry180'], ['ry180', 'rx180'],
['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],
['ry90', 'rx90'], ['rx90', 'ry180'],
['ry90', 'rx180'],
['rx180', 'ry90'], ['ry180', 'rx90'],
['rx90', 'rx180'],
['rx180', 'rx90'], ['ry90', 'ry180'],
['ry180', 'ry90'],
['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],
['ry90', 'ry90']]
pulse_combinations_tiled = pulse_combinations + pulse_combinations
if double_points:
pulse_combinations = [val for val in pulse_combinations
for _ in (0, 1)]
pulse_combinations_q0 = pulse_combinations
pulse_combinations_q1 = pulse_combinations_tiled
if replace_q1_pulses_X180:
pulse_combinations_q1 = [['rx180']*2 for val in pulse_combinations]
i = 0
for pulse_comb_q0, pulse_comb_q1 in zip(pulse_combinations_q0,
pulse_combinations_q1):
i += 1
k = oqh.create_kernel('AllXY_{}'.format(i), p)
k.prepz(q0)
k.prepz(q1)
# N.B. The identity gates are there to ensure proper timing
if sequence_type == 'interleaved':
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'sandwiched':
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'sequential':
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'simultaneous':
k.gate(pulse_comb_q0[0], [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate(pulse_comb_q1[1], [q1])
else:
raise ValueError("sequence_type {} ".format(sequence_type) +
"['interleaved', 'simultaneous', " +
"'sequential', 'sandwiched']")
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
p = oqh.compile(p)
return p
def residual_coupling_sequence(times, q0: int, q1: int, platf_cfg: str):
"""
Sequence to measure the residual (ZZ) interaction between two qubits.
Procedure is described in M18TR.
(q0) --X90--(tau/2)-Y180-(tau/2)-Xm90--RO
(q1) --X180-(tau/2)-X180-(tau/2)-------RO
Input pars:
times: the list of waiting times in s for each Echo element
q0 Phase measurement is performed on q0
q1 Excitation is put in and removed on q1
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("residual_coupling_sequence", platf_cfg)
for i, time in enumerate(times[:-4]):
k = oqh.create_kernel("residual_coupling_seq_{}".format(i), p)
k.prepz(q0)
k.prepz(q1)
wait_nanoseconds = int(round(time/1e-9/2))
k.gate('rx90', [q0])
k.gate('rx180', [q1])
k.gate("wait", [q0, q1], wait_nanoseconds)
k.gate('ry180', [q0])
k.gate('rx180', [q1])
k.gate("wait", [q0, q1], wait_nanoseconds)
k.gate('rxm90', [q0])
k.measure(q0)
k.measure(q1)
k.gate("wait", [q0, q1], 0)
p.add_kernel(k)
# adding the calibration points
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
return p
def Cryoscope(qubit_idx: int, buffer_time1=0, buffer_time2=0,
flux_cw: str='fl_cw_02',
platf_cfg: str=''):
"""
Single qubit Ramsey sequence.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
times: the list of waiting times for each Ramsey element
qubit_idx: int specifying the target qubit (starting at 0)
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Cryoscope", platf_cfg)
buffer_nanoseconds1 = int(round(buffer_time1/1e-9))
buffer_nanoseconds2 = int(round(buffer_time2/1e-9))
k = oqh.create_kernel("RamZ_X", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate(flux_cw, [2, 0])
k.gate("wait", [qubit_idx], buffer_nanoseconds2)
k.gate('rx90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("RamZ_Y", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate(flux_cw, [2, 0])
k.gate("wait", [qubit_idx], buffer_nanoseconds2)
k.gate('ry90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
p = oqh.compile(p)
return p
def CryoscopeGoogle(qubit_idx: int, buffer_time1, times, platf_cfg: str):
"""
A Ramsey sequence with varying waiting times `times` around a flux pulse.
Generates 2xlen(times) measurements (t1-x, t1-y, t2-x, t2-y. etc)
"""
p = oqh.create_program("CryoscopeGoogle", platf_cfg)
buffer_nanoseconds1 = int(round(buffer_time1/1e-9))
for t in times:
t_nanoseconds = int(round(t/1e-9))
k = oqh.create_kernel("RamZ_X", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate('fl_cw_02', [2, 0])
k.gate("wait", [qubit_idx], t_nanoseconds)
k.gate('rx90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("RamZ_Y", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate('fl_cw_02', [2, 0])
k.gate("wait", [qubit_idx], t_nanoseconds)
k.gate('ry90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
p = oqh.compile(p)
return p
def fluxed_ramsey(qubit_idx: int, wait_time: float,
flux_cw: str='fl_cw_02',
platf_cfg: str=''):
"""
Single qubit Ramsey sequence.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
maxtime: longest plux pulse time
qubit_idx: int specifying the target qubit (starting at 0)
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program('OpenQL_Platform', platf_cfg)
wait_time = wait_time/1e-9
k = oqh.create_kernel("fluxed_ramsey_1", p)
k.prepz(qubit_idx)
k.gate('rx90', qubit_idx)
k.gate(flux_cw, 2, 0)
k.gate("wait", [qubit_idx], wait_time)
k.gate('rx90', qubit_idx)
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("fluxed_ramsey_2", p)
k.prepz(qubit_idx)
k.gate('rx90', qubit_idx)
k.gate(flux_cw, 2, 0)
k.gate("wait", [qubit_idx], wait_time)
k.gate('ry90', qubit_idx)
k.measure(qubit_idx)
p.add_kernel(k)
# adding the calibration points
# add_single_qubit_cal_points(p, platf=platf, qubit_idx=qubit_idx)
with suppress_stdout():
p.compile()
# attribute get's added to program to help finding the output files
p.output_dir = ql.get_option('output_dir')
p.filename = join(p.output_dir, p.name + '.qisa')
return p
# FIMXE: merge into the real chevron seq
def Chevron_hack(qubit_idx: int, qubit_idx_spec,
buffer_time, buffer_time2, platf_cfg: str):
"""
Single qubit Ramsey sequence.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
times: the list of waiting times for each Ramsey element
qubit_idx: int specifying the target qubit (starting at 0)
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Chevron_hack", platf_cfg)
buffer_nanoseconds = int(round(buffer_time/1e-9))
buffer_nanoseconds2 = int(round(buffer_time/1e-9))
k = oqh.create_kernel("Chevron_hack", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx_spec])
k.gate('rx180', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds)
k.gate('fl_cw_02', [2, 0])
k.gate('wait', [qubit_idx], buffer_nanoseconds2)
k.gate('rx180', [qubit_idx])
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Chevron(qubit_idx: int, qubit_idx_spec: int,
buffer_time, buffer_time2, flux_cw: int, platf_cfg: str,
target_qubit_sequence: str='ramsey'):
"""
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
qubit_idx: int specifying the target qubit (starting at 0)
qubit_idx_spec: int specifying the spectator qubit
buffer_time :
buffer_time2 :
platf_cfg: filename of the platform config file
target_qubit_sequence: selects whether to run a ramsey sequence on
a target qubit ('ramsey'), keep it in gorund state ('ground')
or excite it iat the beginning of the sequnce ('excited')
Returns:
p: OpenQL Program object containing
Circuit:
q0 -x180-flux-x180-RO-
qspec --x90-----------RO- (target_qubit_sequence='ramsey')
q0 -x180-flux-x180-RO-
qspec -x180-----------RO- (target_qubit_sequence='excited')
q0 -x180-flux-x180-RO-
qspec ----------------RO- (target_qubit_sequence='ground')
"""
p = oqh.create_program("Chevron", platf_cfg)
buffer_nanoseconds = int(round(buffer_time/1e-9))
buffer_nanoseconds2 = int(round(buffer_time2/1e-9))
if flux_cw is None:
flux_cw = 2
k = oqh.create_kernel("Chevron", p)
k.prepz(qubit_idx)
if target_qubit_sequence == 'ramsey':
k.gate('rx90', [qubit_idx_spec])
elif target_qubit_sequence == 'excited':
k.gate('rx180', [qubit_idx_spec])
elif target_qubit_sequence == 'ground':
k.gate('i', [qubit_idx_spec])
else:
raise ValueError("target_qubit_sequence not recognized")
k.gate('rx180', [qubit_idx])
if buffer_nanoseconds>0:
k.gate("wait", [qubit_idx], buffer_nanoseconds)
k.gate('fl_cw_{:02}'.format(flux_cw), [2, 0])
if buffer_nanoseconds2>0:
k.gate('wait', [qubit_idx], buffer_nanoseconds2)
k.gate('rx180', [qubit_idx])
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
k.gate("wait", [qubit_idx, qubit_idx_spec], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def two_qubit_ramsey(times, qubit_idx: int, qubit_idx_spec: int,
platf_cfg: str, target_qubit_sequence: str='excited'):
"""
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
times: the list of waiting times for each Ramsey element
qubit_idx: int specifying the target qubit (starting at 0)
qubit_idx_spec: int specifying the spectator qubit
platf_cfg: filename of the platform config file
target_qubit_sequence: selects whether to run a ramsey sequence on
a target qubit ('ramsey'), keep it in gorund state ('ground')
or excite it iat the beginning of the sequnce ('excited')
Returns:
p: OpenQL Program object containing
Circuit:
q0 --x90-wait-x90-RO-
qspec --x90----------RO- (target_qubit_sequence='ramsey')
q0 --x90-wait-x90-RO-
qspec -x180----------RO- (target_qubit_sequence='excited')
q0 --x90-wait-x90-RO-
qspec ---------------RO- (target_qubit_sequence='ground')
"""
p = oqh.create_program("two_qubit_ramsey", platf_cfg)
for i, time in enumerate(times):
k = oqh.create_kernel("two_qubit_ramsey", p)
k.prepz(qubit_idx)
if target_qubit_sequence == 'ramsey':
k.gate('rx90', [qubit_idx_spec])
elif target_qubit_sequence == 'excited':
k.gate('rx180', [qubit_idx_spec])
elif target_qubit_sequence == 'ground':
k.gate('i', [qubit_idx_spec])
else:
raise ValueError('target_qubit_sequence not recognized.')
k.gate('rx90', [qubit_idx])
wait_nanoseconds = int(round(time/1e-9))
k.gate("wait", [qubit_idx, qubit_idx_spec], wait_nanoseconds)
k.gate('i', [qubit_idx_spec])
k.gate('rx90', [qubit_idx])
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
k.gate("wait", [qubit_idx, qubit_idx_spec], 0)
p.add_kernel(k)
# adding the calibration points
oqh.add_two_q_cal_points(p, qubit_idx, qubit_idx_spec, reps_per_cal_pt=2)
p = oqh.compile(p)
return p
def two_qubit_tomo_bell(bell_state, q0, q1,
platf_cfg, wait_after_flux: float=None):
'''
Two qubit bell state tomography.
Args:
bell_state (int): index of prepared bell state
q0, q1 (str): names of the target qubits
wait_after_flux (float): wait time after the flux pulse and
after-rotation before tomographic rotations
'''
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
# Choose a bell state and set the corresponding preparation pulses
if bell_state == 0: # |Phi_m>=|00>-|11>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'ry90'
elif bell_state % 10 == 1: # |Phi_p>=|00>+|11>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'ry90'
elif bell_state % 10 == 2: # |Psi_m>=|01>-|10>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'rym90'
elif bell_state % 10 == 3: # |Psi_p>=|01>+|10>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'rym90'
else:
raise ValueError('Bell state {} is not defined.'.format(bell_state))
# Recovery pulse is the same for all Bell states
after_pulse_q1 = 'rym90'
# # Define compensation pulses
# # FIXME: needs to be added
# print('Warning: not using compensation pulses.')
p = oqh.create_program("two_qubit_tomo_bell", platf_cfg)
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
k = oqh.create_kernel(
"BellTomo_{}{}_{}{}".format(q1, p_q1, q0, p_q0), p)
# next experiment
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# pre-rotations
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
# FIXME hardcoded edge because of
# brainless "directed edge recources" in compiler
k.gate('fl_cw_01', [2, 0])
# after-rotations
k.gate(after_pulse_q1, [q1])
# possibly wait
if wait_after_flux is not None:
k.gate("wait", [q0,q1], round(wait_after_flux*1e9))
# tomo pulses
k.gate(p_q0, [q1])
k.gate(p_q1, [q0])
# measure
k.measure(q0)
k.measure(q1)
# sync barrier before tomo
# k.gate("wait", [q0, q1], 0)
# k.gate("wait", [2, 0], 0)
p.add_kernel(k)
# 7 repetitions is because of assumptions in tomo analysis
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_tomo_bell_by_waiting(bell_state, q0, q1,
platf_cfg, wait_time: int=20):
'''
Two qubit (bell) state tomography. There are no flux pulses applied,
only waiting time. It is supposed to take advantage of residual ZZ to
generate entanglement.
Args:
bell_state (int): index of prepared bell state
q0, q1 (str): names of the target qubits
wait_time (int): waiting time in which residual ZZ acts
on qubits
'''
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
# Choose a bell state and set the corresponding preparation pulses
if bell_state == 0: # |Phi_m>=|00>-|11>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'ry90'
elif bell_state % 10 == 1: # |Phi_p>=|00>+|11>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'ry90'
elif bell_state % 10 == 2: # |Psi_m>=|01>-|10>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'rym90'
elif bell_state % 10 == 3: # |Psi_p>=|01>+|10>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'rym90'
else:
raise ValueError('Bell state {} is not defined.'.format(bell_state))
# Recovery pulse is the same for all Bell states
after_pulse_q1 = 'rym90'
p = oqh.create_program("two_qubit_tomo_bell_by_waiting", platf_cfg)
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
k = oqh.create_kernel("BellTomo_{}{}_{}{}".format(
q1, p_q1, q0, p_q0), p)
# next experiment
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# pre-rotations
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
if wait_time > 0:
k.wait([q0, q1], wait_time)
k.gate(after_pulse_q1, [q1])
# tomo pulses
k.gate(p_q1, [q0])
k.gate(p_q0, [q1])
# measure
k.measure(q0)
k.measure(q1)
# sync barrier before tomo
# k.gate("wait", [q0, q1], 0)
k.gate("wait", [2, 0], 0)
p.add_kernel(k)
# 7 repetitions is because of assumptions in tomo analysis
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_DJ(q0, q1, platf_cfg):
'''
Two qubit Deutsch-Josza.
Args:
q0, q1 (str): names of the target qubits
'''
p = oqh.create_program("two_qubit_DJ", platf_cfg)
# experiments
# 1
k = oqh.create_kernel("DJ1", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 2
k = oqh.create_kernel("DJ2", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# rotations
k.gate('rx180', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 3
k = oqh.create_kernel("DJ3", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# rotations
k.gate('ry90', [q1])
k.gate('rx180', [q0])
k.gate('rx180', [q1])
# Hardcoded flux pulse, FIXME use actual CZ
k.gate('wait', [2, 0], 100)
k.gate('fl_cw_01', [2, 0])
# FIXME hardcoded extra delays
k.gate('wait', [2, 0], 200)
k.gate('rx180', [q0])
k.gate('ry90', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 4
k = oqh.create_kernel("DJ4", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# rotations
k.gate('rym90', [q1])
# Hardcoded flux pulse, FIXME use actual CZ
k.gate('wait', [2, 0], 100)
k.gate('fl_cw_01', [2, 0])
# FIXME hardcoded extra delays
k.gate('wait', [2, 0], 200)
k.gate('rx180', [q1])
k.gate('rym90', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 7 repetitions is because of assumptions in tomo analysis
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_repeated_parity_check(qD: int, qA: int, platf_cfg: str,
number_of_repetitions: int = 10,
initialization_msmt: bool=False,
initial_states=[0, 1]):
"""
Implements a circuit for repeated parity checks.
Circuit looks as follows:
Data (M)|------0------- | ^N- M
| | |
Ancilla (M)|--y90-0-y90-M- | - M
The initial "M" measurement is optional, the circuit is repated N times
At the end both qubits are measured.
Arguments:
qD : Data qubit, this is the qubit that the repeated parity
check will be performed on.
qA : Ancilla qubit, qubit that the parity will be mapped onto.
platf_cfg: filename of the platform config file
number_of_repetitions: number of times to repeat the circuit
initialization_msmt : whether to start with an initial measurement
to prepare the starting state.
"""
p = oqh.create_program("two_qubit_repeated_parity_check", platf_cfg)
for initial_state in initial_states:
k = oqh.create_kernel(
'repeated_parity_check_{}'.format(initial_state), p)
k.prepz(qD)
k.prepz(qA)
if initialization_msmt:
k.measure(qA)
k.measure(qD)
k.gate('wait', [2, 0], 500)
if initial_state == 1:
k.gate('rx180', [qD])
for i in range(number_of_repetitions):
# hardcoded barrier because of openQL #104
k.gate('wait', [2, 0], 0)
# k.gate('wait', [qA, qD], 0)
k.gate('ry90', [qA])
k.gate('fl_cw_01', [2, 0])
# k.gate('fl_cw_01', qA, qD)
k.gate('ry90', [qA])
k.measure(qA)
k.measure(qD)
# hardcoded barrier because of openQL #104
k.gate('wait', [2, 0], 0)
k.gate('wait', [qA, qD], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def conditional_oscillation_seq(q0: int, q1: int, platf_cfg: str,
CZ_disabled: bool=False,
angles=np.arange(0, 360, 20),
wait_time_between: int=0,
wait_time_after: int=0,
add_cal_points: bool=True,
CZ_duration: int=260,
nr_of_repeated_gates: int =1,
fixed_max_nr_of_repeated_gates: int=None,
cases: list=('no_excitation', 'excitation'),
flux_codeword: str='fl_cw_01'):
'''
Sequence used to calibrate flux pulses for CZ gates.
q0 is the oscilating qubit
q1 is the spectator qubit
Timing of the sequence:
q0: -- X90 C-Phase Rphi90 -- RO
q1: (X180) -- -- -- (X180) RO
Args:
q0, q1 (str): names of the addressed qubits
RO_target (str): can be q0, q1, or 'all'
CZ_disabled (bool): disable CZ gate
angles (array): angles of the recovery pulse
wait_time_between (int) wait time in ns added after each flux pulse
wait_time_after (int): wait time in ns after triggering all flux
pulses
'''
p = oqh.create_program("conditional_oscillation_seq", platf_cfg)
# These angles correspond to special pi/2 pulses in the lutman
for i, angle in enumerate(angles):
for case in cases:
# cw_idx corresponds to special hardcoded angles in the lutman
cw_idx = angle//20 + 9
k = oqh.create_kernel("{}_{}".format(case, angle), p)
k.prepz(q0)
k.prepz(q1)
if case == 'excitation':
k.gate('rx180', [q1])
k.gate('rx90', [q0])
if not CZ_disabled:
for j in range(nr_of_repeated_gates):
if wait_time_between>0:
k.gate('wait', [2, 0], wait_time_between)
k.gate(flux_codeword, [2, 0])
else:
for j in range(nr_of_repeated_gates):
k.gate('wait', [2, 0], wait_time_between + CZ_duration)
if wait_time_after>0:
k.gate('wait', [2, 0], (wait_time_after))
# hardcoded angles, must be uploaded to AWG
if angle == 90:
# special because the cw phase pulses go in mult of 20 deg
k.gate('ry90', [q0])
elif angle == 0:
k.gate('rx90', [q0])
else:
k.gate('cw_{:02}'.format(cw_idx), [q0])
if case == 'excitation':
k.gate('rx180', [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [q1,q0], 0)
# Implements a barrier to align timings
# k.gate('wait', [q0, q1], 0)
# hardcoded barrier because of openQL #104
# k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if add_cal_points:
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
if add_cal_points:
cal_pts_idx = [361, 362, 363, 364]
else:
cal_pts_idx = []
p.sweep_points = np.concatenate(
[np.repeat(angles, len(cases)), cal_pts_idx])
# FIXME: remove try-except, when we depend hardly on >=openql-0.6
try:
p.set_sweep_points(p.sweep_points)
except TypeError:
# openql-0.5 compatibility
p.set_sweep_points(p.sweep_points, len(p.sweep_points))
return p
def grovers_two_qubit_all_inputs(q0: int, q1: int, platf_cfg: str,
precompiled_flux: bool=True,
second_CZ_delay: int=0,
CZ_duration: int=260,
add_echo_pulses: bool=False,
cal_points: bool=True):
"""
Writes the QASM sequence for Grover's algorithm on two qubits.
Sequence:
q0: G0 - - mY90 - - mY90 - RO
CZ_ij CZ
q1: G1 - - mY90 - - mY90 - RO
whit all combinations of (ij) = omega.
G0 and G1 are Y90 or Y90, depending on the (ij).
Args:
q0_name, q1_name (string):
Names of the qubits to which the sequence is applied.
RO_target (string):
Readout target. Can be a qubit name or 'all'.
precompiled_flux (bool):
Determies if the full waveform for the flux pulses is
precompiled, thus only needing one trigger at the start,
or if every flux pulse should be triggered individually.
add_echo_pulses (bool): if True add's echo pulses before the
second CZ gate.
cal_points (bool):
Whether to add calibration points.
Returns:
qasm_file: a reference to the new QASM file object.
"""
if not precompiled_flux:
raise NotImplementedError('Currently only precompiled flux pulses '
'are supported.')
p = oqh.create_program("grovers_two_qubit_all_inputs", platf_cfg)
for G0 in ['ry90', 'rym90']:
for G1 in ['ry90', 'rym90']:
k = oqh.create_kernel('Gr{}_{}'.format(G0, G1), p)
k.prepz(q0)
k.prepz(q1)
k.gate(G0, [q0])
k.gate(G1, [q1])
k.gate('fl_cw_03', [2, 0]) # flux cw03 is the multi_cz pulse
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# k.gate('fl_cw_00', 2,0)
k.gate('wait', [2, 0], second_CZ_delay//2)
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate('wait', [2, 0], second_CZ_delay//2)
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate('wait', [2, 0], CZ_duration)
k.gate('ry90', [q0])
k.gate('ry90', [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if cal_points:
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
return p
def grovers_tomography(q0: int, q1: int, omega: int, platf_cfg: str,
precompiled_flux: bool=True,
cal_points: bool=True, second_CZ_delay: int=260,
CZ_duration: int=260,
add_echo_pulses: bool=False):
"""
Tomography sequence for Grover's algorithm.
omega: int denoting state that the oracle prepares.
"""
if not precompiled_flux:
raise NotImplementedError('Currently only precompiled flux pulses '
'are supported.')
p = oqh.create_program("grovers_tomography",
platf_cfg)
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
if omega == 0:
G0 = 'ry90'
G1 = 'ry90'
elif omega == 1:
G0 = 'ry90'
G1 = 'rym90'
elif omega == 2:
G0 = 'rym90'
G1 = 'ry90'
elif omega == 3:
G0 = 'rym90'
G1 = 'rym90'
else:
raise ValueError('omega must be in [0, 3]')
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
k = oqh.create_kernel('Gr{}_{}_tomo_{}_{}'.format(
G0, G1, p_q0, p_q1), p)
k.prepz(q0)
k.prepz(q1)
# Oracle
k.gate(G0, [q0])
k.gate(G1, [q1])
k.gate('fl_cw_03', [2, 0]) # flux cw03 is the multi_cz pulse
# Grover's search
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# k.gate('fl_cw_00', 2[,0])
k.gate('wait', [2, 0], second_CZ_delay//2)
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate('wait', [2, 0], second_CZ_delay//2)
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate('wait', [2, 0], CZ_duration)
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# tomo pulses
k.gate(p_q1, [q0])
k.gate(p_q0, [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def CZ_poisoned_purity_seq(q0, q1, platf_cfg: str,
nr_of_repeated_gates: int,
cal_points: bool=True):
"""
Creates the |00> + |11> Bell state and does a partial tomography in
order to determine the purity of both qubits.
"""
p = oqh.create_program("CZ_poisoned_purity_seq",
platf_cfg)
tomo_list = ['rxm90', 'rym90', 'i']
for p_pulse in tomo_list:
k = oqh.create_kernel("{}".format(p_pulse), p)
k.prepz(q0)
k.prepz(q1)
# Create a Bell state: |00> + |11>
k.gate('rym90', [q0])
k.gate('ry90', [q1])
for i in range(nr_of_repeated_gates):
k.gate('fl_cw_01', [2, 0])
k.gate('rym90', [q1])
# Perform pulses to measure the purity of both qubits
k.gate(p_pulse, [q0])
k.gate(p_pulse, [q1])
k.measure(q0)
k.measure(q1)
# Implements a barrier to align timings
# k.gate('wait', [q0, q1], 0)
# hardcoded because of openQL #104
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if cal_points:
# FIXME: replace with standard add cal points function
k = oqh.create_kernel("Cal 00", p)
k.prepz(q0)
k.prepz(q1)
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
k = oqh.create_kernel("Cal 11", p)
k.prepz(q0)
k.prepz(q1)
k.gate("rx180", [q0])
k.gate("rx180", [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def CZ_state_cycling_light(q0: str, q1: str, N: int=1):
"""
Implements a circuit that performs a permutation over all computational
states. This light version performs this experiment for all 4 possible
input states.
Expected operation:
U (|00>) -> |01>
U (|01>) -> |11>
U (|10>) -> |00>
U (|11>) -> |10>
Args:
q0 (str): name of qubit q0
q1 (str): name of qubit q1
N (int): number of times to apply U
"""
raise NotImplementedError()
# filename = join(base_qasm_path, 'CZ_state_cycling_light.qasm')
# qasm_file = mopen(filename, mode='w')
# qasm_file.writelines('qubit {} \nqubit {} \n'.format(q0, q1))
# U = ''
# U += 'Y90 {} | mY90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# # Input |00>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# # Input |01>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# qasm_file.writelines('X180 {}\n'.format(q0))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# # Input |10>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# qasm_file.writelines('X180 {}\n'.format(q1))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# # Input |11>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# qasm_file.writelines('X180 {} | X180 {}\n'.format(q0, q1))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# qasm_file.close()
# return qasm_file
def CZ_restless_state_cycling(q0: str, q1: str, N: int=1):
"""
Implements a circuit that performs a permutation over all computational
states.
Expected operation:
U (|00>) -> |01>
U (|01>) -> |11>
U (|10>) -> |00>
U (|11>) -> |10>
Args:
q0 (str): name of qubit q0
q1 (str): name of qubit q1
N (int): number of times to apply U
"""
raise NotImplementedError()
# filename = join(base_qasm_path, 'CZ_state_cycling_light.qasm')
# qasm_file = mopen(filename, mode='w')
# qasm_file.writelines('qubit {} \nqubit {} \n'.format(q0, q1))
# U = ''
# U += 'Y90 {} | mY90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
def Chevron_first_manifold(qubit_idx: int, qubit_idx_spec: int,
buffer_time, buffer_time2, flux_cw: int, platf_cfg: str):
"""
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
qubit_idx: int specifying the target qubit (starting at 0)
qubit_idx_spec: int specifying the spectator qubit
buffer_time :
buffer_time2 :
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Chevron_first_manifold", platf_cfg)
buffer_nanoseconds = int(round(buffer_time/1e-9))
buffer_nanoseconds2 = int(round(buffer_time2/1e-9))
if flux_cw is None:
flux_cw = 2
k = oqh.create_kernel("Chevron", p)
k.prepz(qubit_idx)
k.gate('rx180', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds)
k.gate('fl_cw_{:02}'.format(flux_cw), [2, 0])
k.gate('wait', [qubit_idx], buffer_nanoseconds2)
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
k.gate("wait", [qubit_idx, qubit_idx_spec], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def partial_tomography_cardinal(q0: int, q1: int, cardinal: int, platf_cfg: str,
precompiled_flux: bool=True,
cal_points: bool=True, second_CZ_delay: int=260,
CZ_duration: int=260,
add_echo_pulses: bool=False):
"""
Tomography sequence for Grover's algorithm.
cardinal: int denoting cardinal state prepared.
"""
if not precompiled_flux:
raise NotImplementedError('Currently only precompiled flux pulses '
'are supported.')
p = oqh.create_program("partial_tomography_cardinal",
platf_cfg)
cardinal_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
if (cardinal > 35 or cardinal < 0):
raise ValueError('cardinal must be in [0, 35]')
idx_p0 = cardinal % 6
idx_p1 = ((cardinal - idx_p0)//6) % 6
# cardinal_gates[]
#k.gate(string_of_the_gate, integer_from_qubit)
tomo_gates = [('i', 'i'), ('i', 'rx180'), ('rx180', 'i'), ('rx180', 'rx180'),
('ry90', 'ry90'), ('rym90', 'rym90'), ('rx90', 'rx90'), ('rxm90', 'rxm90')]
for gates in tomo_gates:
# strings denoting the gates
SP0 = cardinal_gates[idx_p0]
SP1 = cardinal_gates[idx_p1]
t_q0 = gates[1]
t_q1 = gates[0]
k = oqh.create_kernel(
'PT_{}_tomo_{}_{}'.format(cardinal, idx_p0, idx_p1), p)
k.prepz(q0)
k.prepz(q1)
# Cardinal state preparation
k.gate(SP0, [q0])
k.gate(SP1, [q1])
# tomo pulses
# to be taken from list of tuples
k.gate(t_q1, [q0])
k.gate(t_q0, [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=2)
p = oqh.compile(p)
return p
def two_qubit_VQE(q0: int, q1: int, platf_cfg: str):
"""
VQE tomography for two qubits.
Args:
cardinal (int) : index of prep gate
q0, q1 (int) : target qubits for the sequence
"""
tomo_pulses = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
tomo_list_q0 = tomo_pulses
tomo_list_q1 = tomo_pulses
p = oqh.create_program("two_qubit_VQE", platf_cfg)
# Tomography pulses
i = 0
for p_q1 in tomo_list_q1:
for p_q0 in tomo_list_q0:
i += 1
kernel_name = '{}_{}_{}'.format(i, p_q0, p_q1)
k = oqh.create_kernel(kernel_name, p)
k.prepz(q0)
k.prepz(q1)
k.gate('ry180', [q0]) # Y180 gate without compilation
k.gate('i', [q0]) # Y180 gate without compilation
k.gate("wait", [q1], 40)
k.gate('fl_cw_02', [2, 0])
k.gate("wait", [q1], 40)
k.gate(p_q0, [q0]) # compiled z gate+pre_rotation
k.gate(p_q1, [q1]) # pre_rotation
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# every calibration point is repeated 7 times. This is copied from the
# script for Tektronix driven qubits. I do not know if this repetition
# is important or even necessary here.
p = oqh.add_two_q_cal_points(p, q0=q1, q1=q0, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def sliding_flux_pulses_seq(
qubits: list, platf_cfg: str,
angles=np.arange(0, 360, 20), wait_time: int=0,
flux_codeword_a: str='fl_cw_01', flux_codeword_b: str='fl_cw_01',
ramsey_axis: str='x',
add_cal_points: bool=True):
"""
Experiment to measure effect flux pulses on each other.
Timing of the sequence:
q0: -- flux_a -- wait -- X90 -- flux_b -- Rphi90 -- RO
q1: -- flux_a -- -- -- flux_b -- -- RO
N.B. q1 only exists to satisfy flux tuples notion in CCL
N.B.2 flux-tuples are now hardcoded to always be tuple [2,0] again
because of OpenQL.
Args:
qubits : list of qubits, LSQ (q0) is last entry in list
platf_cfg : openQL platform config
angles : angles along which to do recovery pulses
wait_time : time in ns after the first flux pulse and before the
first microwave pulse.
flux_codeword_a : flux codeword of the stimulus (1st) pulse
flux_codeword_b : flux codeword of the spectator (2nd) pulse
ramsey_axis : chooses between doing x90 or y90 rotation at the
beginning of Ramsey sequence
add_cal_points : if True adds calibration points at the end
"""
p = oqh.create_program("sliding_flux_pulses_seq", platf_cfg)
k = oqh.create_kernel("sliding_flux_pulses_seq", p)
q0 = qubits[-1]
q1 = qubits[-2]
for i, angle in enumerate(angles):
cw_idx = angle//20 + 9
k.prepz(q0)
k.gate(flux_codeword_a, [2, 0])
# hardcoded because of flux_tuples, [q1, q0])
k.gate('wait', [q0, q1], wait_time)
if ramsey_axis == 'x':
k.gate('rx90', [q0])
elif ramsey_axis == 'y':
k.gate('ry90', [q0])
else:
raise ValueError('ramsey_axis must be "x" ot "y"')
k.gate(flux_codeword_b, [2, 0])
k.gate('wait', [q0, q1], 60)
# hardcoded because of flux_tuples, [q1, q0])
# hardcoded angles, must be uploaded to AWG
if angle == 90:
# special because the cw phase pulses go in mult of 20 deg
k.gate('ry90', [q0])
else:
k.gate('cw_{:02}'.format(cw_idx), [q0])
k.measure(q0)
k.measure(q1)
# Implements a barrier to align timings
# k.gate('wait', [q0, q1], 0)
# hardcoded barrier because of openQL #104
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if add_cal_points:
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
if add_cal_points:
cal_pts_idx = [361, 362, 363, 364]
else:
cal_pts_idx = []
p.sweep_points = np.concatenate([angles, cal_pts_idx])
# FIXME: remove try-except, when we depend hardly on >=openql-0.6
try:
p.set_sweep_points(p.sweep_points)
except TypeError:
# openql-0.5 compatibility
p.set_sweep_points(p.sweep_points, len(p.sweep_points))
return p
| 2.15625 | 2 |
twitter.py | mr-karan/kayako-cust-serv | 0 | 12762874 | <gh_stars>0
#!/usr/bin/env python
import tweepy
import os
from config import(consumer_key, consumer_secret,access_token,\
access_token_secret )
class TwitterAPI(object):
'''
Authenticate requests to Twitter API using Tweepy and fetch tweets.
'''
def __init__(self):
'''
Setup Twitter API Authentication.
Get your keys from ``https://apps.twitter.com/app/new``
'''
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
def fetch_tweets(self, max_id = -1):
'''
Fetches tweets with #custserv and RT count >=1 in a chronological order.
Input: ``max_id`` optional parameter.
Extract 100 tweets from this ``max_id``
Returns ``result``: list of dictionary of all relevant tweets.
'''
result = []
querystring = '#custserv' # Query parameter, to search in tweets.
max_tweets = 100 # 100 is the max Twitter API permits in one
# request.
max_id = int(max_id) # max_id is obtained from `app.py`
try:
if max_id <= 0:
# If ``max_id`` isn't provided, default value = -1
new_tweets = self.api.search(q=querystring, count=max_tweets)
else:
new_tweets = self.api.search(q=querystring, count=max_tweets,
max_id=str(max_id - 1))
# In case no new tweets are found.
if not new_tweets:
pass
# Iterate through all tweets and extract relevant ones.
for tweet in new_tweets:
if tweet.retweet_count > 0:
if tweet not in result:
# Prevents duplicate tweet ids to be added.
result.append(
{'id':tweet.id,
'text':tweet.text,
'image':tweet.user.profile_image_url_https,
'name':tweet.user.screen_name,
'time':tweet.created_at}
)
except tweepy.TweepError as e:
print("Error : " + str(e))
return result
| 3.390625 | 3 |
docs/conf.py | oAGoulart/sphinx-bulma | 0 | 12762875 | # -*- coding: utf-8 -*-
import sys
import os
from datetime import datetime
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('.'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Sphinx Bulma Theme'
year = datetime.now().year
copyright = u'%d <NAME>' % year
exclude_patterns = ['_build']
html_logo = 'static/logo.png'
html_favicon = 'static/favicon.ico'
html_theme = 'sphinx-bulma'
html_theme_options = {
'display_git': True,
'git_host': 'github.com',
'git_user': 'oAGoulart',
'git_repo': 'sphinx-bulma',
'git_version': 'master/docs/',
'git_icon': 'github-circled',
'git_desc': 'Edit on GitHub'
}
html_theme_path = ["../src"]
| 1.554688 | 2 |
hltv/models/match/lib.py | kiobu/hltv-py | 0 | 12762876 | from hltv.api.match.request import MatchRequest
from hltv.models.team.lib import Team
from hltv.libs.helper import *
class Match:
def __init__(self, match_id: int):
self.body: Any = MatchRequest(match_id)()
self.results: dict = dict()
self.match_id = match_id
self._canonicalize_body(self.body)
@staticmethod
def get(match_id: int):
return Match(match_id)
def _canonicalize_body(self, body: Any):
head = get_head_data(body)
self.results.update(head)
self.results['team_one'], self.results['team_two'] = self._get_teams()
def _get_teams(self):
ret = list()
for div in self.body.find_all("div", attrs={"class": "standard-box teamsBox"}):
for a in div.find_all("a"):
if "/team/" in a['href']:
ret.append(Team.get(get_id_from_link(a['href'])))
return tuple(ret)
| 2.421875 | 2 |
openingangle/Calculatefunction.py | DingdingLuan/PycharmProjects | 0 | 12762877 | import numpy as np
from scipy.integrate import quad
import pandas as pd
# calculate the k-corrention in erg.s-1.cm-2:
def NE(E,Epeak,alpha,beita):
if (alpha-beita)*Epeak/(2+alpha)>=E:
NE=(E/100)**alpha*np.exp(-E*(2+alpha)/Epeak)
return NE
elif (alpha-beita)*Epeak/(2+alpha)<=E:
NE=(((alpha-beita)*Epeak/(100*(2+alpha)))**(alpha-beita)*np.exp(beita-alpha)*(E/100)**beita)
return NE
def k(Epeak,Z,alpha,beita,bandmin,bandmax):
a1=quad(lambda E:E*NE(E,Epeak,alpha,beita),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:E*NE(E,Epeak,alpha,beita),bandmin,bandmax)
k=a1[0]/a2[0]
return k
# calculate the k-corrention in photons.s-1.cm-2:
def nk(Epeak,Z,alpha,beita,bandmin,bandmax):
a1=quad(lambda E:E*NE(E,Epeak,alpha,beita),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:NE(E,Epeak,alpha,beita),bandmin,bandmax)
k=a1[0]/a2[0]
# return k
return k*1.6*10**(-9) #transform kev to erg
# calculate the luminosity distance
omegal=0.734
omegam=0.266
h=0.71
H0=1/(3.09*10**17)
H0yr=1/(9.78*10**9)
# H0=70*10**5
c=2.99792458*10**8
def dl(Z):
integrateportion=quad(lambda x:1/np.sqrt(omegam*(1+x)**3+omegal),0,Z)
dl=c*(1+Z)/(h*H0)*integrateportion[0]
# dl =c/H0*integrateportion[0]
return dl*10**2 # transform m to cm
#Calculate the opening angle
def seita(z,ep,s,alpha,beita,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*k(ep,z,alpha,beita,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
return seita
# calculate seita for photons.s-1.cm-2
def pseita(z,ep,s,alpha,beita,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*nk(ep,z,alpha,beita,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
return seita
#Calculate the Egamma
def egamma(z,ep):
Egama = (ep * (1 + z) / 10 ** 2.57) ** (1 / 0.61) * 3.8 * 10 ** 50
return Egama
#Calculate the Eiso
def eiso(z,ep,s,alpha,beita,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*k(ep,z,alpha,beita,bandmin,bandmax)/(1+z)
return eiso
#Define a new spectrum calculate method @2018.6.20 [the cases only contain 'alpha']
def alphaNE(E,Epeak,alpha):
NE=(E/100)**alpha*np.exp(-(2+alpha)*E/Epeak)
return NE
def alphaek(Epeak,alpha,Z,bandmin,bandmax):
a1=quad(lambda E:E*alphaNE(E,Epeak,alpha),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:E*alphaNE(E,Epeak,alpha),bandmin, bandmax)
k=a1[0]/a2[0]
return k
def alphapk(Epeak,alpha,Z,bandmin,bandmax):
a1=quad(lambda E:E*alphaNE(E,Epeak,alpha),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:alphaNE(E,Epeak,alpha),bandmin,bandmax)
k=a1[0]/a2[0]
return k*1.6*10**(-9)
def seitaerg6_20(z,ep,s,alpha,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*alphaek(ep,alpha,z,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
# k = alphaek(ep,alpha,z,bandmin, bandmax)
return seita,z,Egama
def seitaphoton6_20(z,ep,s,alpha,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*alphapk(ep,alpha,z,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
# k=alphapk(ep,alpha,z,bandmin,bandmax)*(1/1.6)*10**(9)
return seita,z,Egama
#refer the 6.11 work:
def erg6_11():
df = pd.read_excel("/Users/dingding/Desktop/calculate/6.9/erg.xlsx")
ebandmin = df['bandmin']
ebandmax=df['bandmax']
egrbname=df['GRB']
ez=df['z']
eep=df['ep']
ealpha=df['alpha']
ebeta=df['beta']
efluence=df['fluence']
i=0
seita1=[]
eegamma=[]
for i in range(len(egrbname)):
seita1=np.append(seita1,seita(ez[i],eep[i],efluence[i],ealpha[i],ebeta[i],ebandmin[i],ebandmax[i]))
eegamma=np.append(eegamma,egamma(ez[i],eep[i]))
return seita1,ez,eegamma
def photon6_11():
dp = pd.read_excel("/Users/dingding/Desktop/calculate/6.9/photons.xlsx")
pbandmin = dp['bandmin']
pbandmax=dp['bandmax']
pgrbname=dp['GRB']
pz=dp['z']
pep=dp['ep']
palpha=dp['alpha']
pbeta=dp['beta']
pfluence=dp['fluence']
i=0
seita2=[]
pegamma=[]
for i in range(len(pgrbname)):
seita2=np.append(seita2,pseita(pz[i],pep[i],pfluence[i],palpha[i],pbeta[i],pbandmin[i],pbandmax[i]))
pegamma=np.append(pegamma,egamma(pz[i],pep[i]))
return seita2,pz,pegamma
#Calculate the Linear regression equation:
def linearregressionEQ(series1,series2):
up=[]
down=[]
xmean=np.mean(series1)
ymean=np.mean(series2)
for i in range(len(series1)):
up=np.append(up,series1[i]*series2[i]-len(series1)*xmean*ymean)
down=np.append(down,series1[i]**2-len(series1)*xmean**2)
u=np.sum(up)
d=np.sum(down)
b=u/d
a=ymean-b*xmean
return a,b
def linearnew(series1,series2):
up1=[]
up2=[]
up3=[]
up4=[]
down1=[]
down2=[]
for i in range(len(series1)):
up1=np.append(up1,series1[i]**2)
up2=np.append(up2,series2[i])
up3=np.append(up3,series1[i])
up4=np.append(up4,series1[i]*series2[i])
down1=np.append(down1,series1[i]**2)
down2=np.append(down2,series1[i])
up1=np.sum(up1)
up2=np.sum(up2)
up3=np.sum(up3)
up4=np.sum(up4)
down1=np.sum(down1)
down2=np.sum(down2)
up=up1*up2-up3*up4
down=down1*len(series1)-down2**2
a0=up/down
up=len(series1)*up4-up3*up2
down=len(series1)*down1-down2**2
a1=up/down
return a0,a1
# 8.31
# Define a model to describe the distribution of GRB with redshift z
# define the complete gamma function:
def comGammaFunc(v):
gamma=quad(lambda t:t**(v-1)*np.e**(-t),0,float("inf"))
return gamma[0]
#define the incomplete gamma function:
def incomGammaFunc(v,z):
sgamma=quad(lambda u:u**(v-1)*np.e**(-u),0,z)[0]
bgamma=quad(lambda u:u**(v-1)*np.e**(-u),z,float('inf'))[0]
return bgamma,sgamma
#and define the Seitafunction:
def SeitaFunc(eps,z,alpha,beta):
Seita1=incomGammaFunc(alpha+2,eps**beta*10**(0.15*beta*z))[1]
Seita2=comGammaFunc(alpha+2)
Seita=Seita1/Seita2
return Seita
# define the star formation rate segment function:
def RSFR(z):
zpeak=1
if z<=zpeak:
Rsfr=(1+z)**(3.44)
return Rsfr
elif z>=zpeak:
Rsfr=(1+zpeak)**(3.44)
return Rsfr
# define the grb rate function:
def RGRB(z,eps,alpha,beta,rho):
A=1/(33.30270146296203)
RGRB=A*rho*RSFR(z)*SeitaFunc(eps,z,alpha,beta)
return RGRB
#define a number calculate function without duration T
def N(z,eps,alpha,beta,rho,zmax):
convertfactor=c*3600*24*365*10**2*3.26164*10**9
dlgpc=dl(z)/convertfactor
E=np.sqrt(omegam*(1+z)**3+omegal)
n=RGRB(z,eps,alpha,beta,rho)/(1+z)*4*np.pi*c*dlgpc**2/(H0yr*(1+z)**2*E)
N=quad(lambda z:n,z,zmax)
return N[0]
import matplotlib.pyplot as plt
import matplotlib
import random
# 9.6
# Here during the defination, normalized constant A_{L} is ellipsis:
def Luminosityfunction(L_gamma):
L_critical=10**(49.69) #unit is erg
sigma_L=0.4
A_L=1/(1.7235434382660358e+50)
luminosityfunc=A_L*np.exp(-(np.log10(L_gamma)-np.log10(
L_critical))**2/(2*sigma_L**2))/(np.sqrt(2*np.pi)*sigma_L)
return luminosityfunc
# Define the angle distribution as log-normal distribution:
def thetalogdistri(theta_jet):
theta_critical=10**(-1.27)
sigema_theta=0.6
A_theta=1/0.32112249370542306
Psi=A_theta*np.exp(-(np.log10(theta_jet)-np.log10(theta_critical))**2/
(2*sigema_theta**2))/(np.sqrt(2*np.pi)*sigema_theta)
return Psi-0.22039824379156006-0.688381515339374
#-0.22039824379156006
# def Ntheta(thetamin,thetamax):
# N=quad(lambda theta_jet:thetalogdistri(theta_jet),thetamin,thetamax)
# return N[0]
# Define peak flux P:
def P(z,L_gamma,theta_jet):
L=L_gamma/(1-np.cos(theta_jet/180*np.pi))
C=random.uniform(0.1,1)
ep=200*(L/10**52)**0.5/C/(1+z)
P=L/(4*np.pi*dl(z)**2*nk(ep,z,-1.1,-2.2,15,150)) #15-150 kev of swift/BAT
return P
# BAT trigger probability:
def eta_t(P):
if P<0.45:
eta_t=P**2
return eta_t/0.67 #noamalize the probability of p-detectable
elif P>=0.45:
eta_t=0.67*(1.0-0.4/P)**0.52
return eta_t/0.67 #noamalize the probability of p-detectable
# weak dependence of probability on the observed peak flux:
def eta_z(P):
eta_z=0.26+0.032*np.e**(1.61*np.log10(P))
return eta_z
# the probability of alignment for a GRB with jet opening angle theta_{j}:
def eta_a(theta_jet):
# eta_a=1.4*(1-np.cos(theta_jet))/(4*np.pi) #where 1.4 sr is instrument solid angle
normal=1-np.cos(theta_jet)
return normal
# def Nluminus(z,theta_jet,Luminusmin,Luminusmax):
# N=quad(lambda L_gamma:eta_a(theta_jet)*eta_t(P(z,L_gamma,theta_jet)
# )*eta_z(P(z,L_gamma,theta_jet))*Luminosityfunction(L_gamma),
# Luminusmin,Luminusmax)
# return N[0]
def luminosity(z,s,t90):
l=4*np.pi*dl(z)**2*s*k(80,z,-1,-2.5,15,150)*(1+z)/t90
return l
def P_obseved(z,s,t90):
l=luminosity(z,s,t90)
p=l/(4*np.pi*dl(z)**2*nk(80,z,-1,-2.5,15,150))
return p
def pdflog(series,down,up,num):
step=(up-down)/num
pdf=[]
for i in range(num):
counter=0
for j in range(len(series)):
if 10**(down+i*step)<series[j]<10**(down+(i+1)*step):
counter=counter+1
pdf=np.append(pdf,counter)
pdf=pdf/np.sum(pdf)
return pdf
# #Define a operation to delete the 'nan' element:
# def deletenan(series1,series2):
# series=np.append(series1,series2)
# a=series[:len(series):3]
# b=series[1:len(series):3]
# c=series[2:len(series):3]
# a=np.nan_to_num(a)
# itemindex=np.argwhere(a==0)
# a=np.delete(a,itemindex,axis=0)
# b=np.delete(b,itemindex,axis=0)
# c=np.delete(c,itemindex,axis=0)
# return a,b,c | 2.078125 | 2 |
GPyOpt/util/stats.py | zhenwendai/GPyOpt | 850 | 12762878 | <reponame>zhenwendai/GPyOpt
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
#from ..util.general import samples_multidimensional_uniform, multigrid, iroot
import numpy as np
| 0.898438 | 1 |
no_of_divisors_in_a_given_range.py | shubhangini-tripathy/geeks_for_geeks | 0 | 12762879 | T = int(input())
for i in range(T):
m, n, a, b = map(int, input().split())
count = 0
for num in range(m, n + 1):
if num % a == 0 or num % b == 0:
count += 1
print(count)
| 3.15625 | 3 |
MLiA_SourceCode/Ch13/extras/createFig2.py | Jerllina/MLiA_LearningRecording | 6 | 12762880 | '''
Created on Jun 1, 2011
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import pca
dataMat = pca.loadDataSet('testSet.txt')
lowDMat, reconMat = pca.pca(dataMat, 1)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(dataMat[:,0], dataMat[:,1], marker='^', s=90)
ax.scatter(reconMat[:,0], reconMat[:,1], marker='o', s=50, c='red')
plt.show() | 2.8125 | 3 |
util/ECA.py | tanlei0/ACECA-simple-classification | 0 | 12762881 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 20:21:50 2019
@author: <NAME>
"""
import numpy as np
class ECA:
def __init__(self, rule, init_state='0'*50+'1'+'0'*50,alpha=1.0, d_ini=0.5, k=0, Ttrs=0, Tsample=100, run_num=100, ConvergeMode=0):
"""Initialize the CA with the given rule and initial state."""
self.binary = f'{rule:08b}' # transform the rule number to a binary code (Example: rule 90 is 01011010 in binary code)
self.rule = rule
self.dict= { # make a dictionary to store the 8 possible pairs of 3 neighbourhood elements (with values 1 and 0)
"111": (self.binary[0]), # assign to each key, a value equivalent to a character from the binary code (from index 0 to index 7)
"110": (self.binary[1]),
"101": (self.binary[2]),
"100": (self.binary[3]),
"011": (self.binary[4]),
"010": (self.binary[5]),
"001": (self.binary[6]),
"000": (self.binary[7])
}
# for ring data
self.init_state = init_state
self.n_cell = len(init_state)
self.current_state = ""
self.run_num = run_num
self.alpha = alpha
self.d_ini = d_ini
self.k = k
self.n_1 = []
self.n_1.append(self.init_state.count('1'))
self.space = []
#self.space.append(self.init_state[-1] + self.init_state + self.init_state[0])
self.space.append(self.init_state)
# paramters for convergence
self.ConvergeMode = ConvergeMode
if self.ConvergeMode == 1 or self.ConvergeMode == 2 :
self.runStop = False
self.K_stop = int(1 / self.alpha)
self.Ttrs = Ttrs
self.Tsample = Tsample
def printDict(self):
print(self.dict)
def __state(self):
"""Returns the current state."""
return self.current_state
def __asyNext(self):
self.init_state = self.init_state[-1] + self.init_state + self.init_state[0]
self.current_state = ''
group = ''
for i in range(1, len(self.init_state) - 1):
randNum = np.random.random()
#print("turn "+str(i)+": ECA the randNum is "+ str(randNum))
if randNum >= self.alpha:
self.current_state += self.init_state[i]
else:
for j in range(i - 1, i + 2): # get groups of 3 elements (left, center, right)
group += self.init_state[j] # add elemnts to group
# print(group)
self.current_state += self.dict[
group] # add value (1 or 0) in self.current_state, after corresponding dictionary value of the 3 group characters
group = ''
# consider the convergence
if self.ConvergeMode == 1:
if len(self.space) >= self.K_stop:
K_sliced = self.space[-self.K_stop:]
K_sliced.append(self.current_state)
if len(set(K_sliced)) == 1:
self.runStop = True
self.init_state = self.__state() # prepare self.init_state for next itteration
return self.current_state
if self.ConvergeMode == 2:
if self.__SyncNext(self.current_state) == self.current_state:
self.runStop = True
self.init_state = self.__state() # prepare self.init_state for next itteration
return self.current_state
self.n_1.append(self.current_state.count('1'))
self.space.append(self.current_state)
self.init_state = self.__state() # prepare self.init_state for next itteration
return self.current_state
def __SyncNext(self, config):
config = config[-1] + config + config[0]
current_state = ""
group = ''
for i in range(1, len(config) - 1):
for j in range(i - 1, i + 2): # get groups of 3 elements (left, center, right)
group += config[j] # add elemnts to group
current_state += self.dict[group] # add value (1 or 0) in self.current_state, after corresponding dictionary value of the 3 group characters
group = ''
return current_state
def run(self, isPrint = True):
"""Progress and print num states.
0s are replaced by spaces, and 1s are replaced by * for pretty printing."""
if isPrint is True:
print(self.init_state.replace("0", " ").replace("1", "*")) # print the first line
for i in range(1, self.run_num):
if isPrint is True:
print(self.__asyNext().replace("0", " ").replace("1", "*"))
else:
self.__asyNext()
if self.ConvergeMode == 1 or self.ConvergeMode == 2 :
if self.runStop:
break
def getu(self):
#run_num 个时间的密度 den
den = np.array(self.n_1) / self.n_cell
u = 1.0/self.Tsample * den[self.Ttrs:self.Ttrs+self.Tsample].sum()
return u
def reset(self, **kargs):
if "alpha" in kargs.keys():
self.alpha = kargs['alpha']
if "init_state" in kargs.keys():
self.init_state = kargs['init_state']
self.n_cell = len(self.init_state) + 2
if "rule" in kargs.keys():
rule = kargs['rule']
self.binary = f'{rule:08b}'
self.dict= {
"111": (self.binary[0]),
"110": (self.binary[1]),
"101": (self.binary[2]),
"100": (self.binary[3]),
"011": (self.binary[4]),
"010": (self.binary[5]),
"001": (self.binary[6]),
"000": (self.binary[7])
}
if "run_num" in kargs.keys():
self.run_num = kargs['run_num']
# clear and re assign
self.current_state = ""
self.n_1 = []
self.n_1.append(self.init_state.count('1'))
self.space = []
self.space.append(self.init_state)
def getInitState(n_cell, d_ini):
init_state = ''
for i in range(0, n_cell):
rand = np.random.rand()
if rand >= d_ini:
init_state += '0'
else:
init_state += '1'
return init_state
if __name__ == '__main__':
ca = ECA(50)
ca.printDict()
ca.run()
| 2.875 | 3 |
pup/algorithms/fixed_maximum_cost.py | duykienvp/sigspatial-2020-spatial-privacy-pricing | 0 | 12762882 | """
Fixed Maximum Cost (FMC) baseline
"""
import logging
from collections import defaultdict
from typing import Tuple, List
import time
import numpy as np
from pup.algorithms import privacy_helper
from pup.algorithms.uniform_prior import cal_prob_dists_num_users_for_grid
from pup.algorithms.util import get_linear_profit_fixed_cost
from pup.common.datatypes import CheckinDataset
from pup.common.enums import MethodType
from pup.common.grid import Grid
from pup.config import Config
from pup.experiment import exp_util
from pup.io import dataio
logger = logging.getLogger(__name__)
def exe_fixed_maximum_cost(data: CheckinDataset, grid: Grid) -> Tuple[List[List], float, np.ndarray, float]:
"""
Execute Fixed Maximum Cost method
Parameters
----------
data
check-in dataset
grid
the grid for experiment evaluation
Returns
-------
typing.List[typing.List]
the matrix of probability distributions of the number of users for each grid cell
total_cost: float
total cost spent on buying data
costs: numpy.ndarray
costs of each region
exe_time: float
execution time
"""
s_time = time.time()
logger.info('Starting FIXED MAXIMUM COST method')
# Load config
price_from_noise_rate = Config.price_from_noise_func_rate
std_from_noise_initial_value = Config.standard_deviation_from_noise_func_initial_value
std_from_noise_rate = Config.standard_deviation_from_noise_func_rate
final_probs_filter_type = Config.final_probs_filter_type
budget_per_region = get_fmc_budget()
# START FMC ---------------------
logger.info('Budget = {}'.format(budget_per_region))
noisy_data, remain_budget_per_region = buy_data_with_budget(
budget_per_region, data,
price_from_noise_rate, std_from_noise_initial_value, std_from_noise_rate)
logger.info('Prepare {} noisy data point with normal random variables'.format(len(noisy_data)))
num_regions = np.prod(grid.get_shape())
cost = budget_per_region - remain_budget_per_region
costs = np.zeros(grid.get_shape())
costs.fill(cost)
total_cost = cost * num_regions
logger.info('Total cost spent on buying data = {}'.format(total_cost))
# Run experiment on the entire grid. One can run on single region by using 1x1 grid
# Calculate the probability distributions of the number of each grid cell
dists_of_num_users = cal_prob_dists_num_users_for_grid(grid, noisy_data, final_probs_filter_type)
exe_time = time.time() - s_time
return dists_of_num_users, total_cost, costs, exe_time
# END FMC ---------------------
def get_fmc_budget() -> float:
""" Get budget for FMC
- First, get based on given percentage
- Second, get based on probing costs if percentage is not given
- Third, get based on a fixed budget if others are not available
Returns
-------
float
budget
"""
fmc_budget_from_cost_percentage = Config.fmc_budget_from_cost_percentage
if fmc_budget_from_cost_percentage <= 0:
# we will not get budget from percentage of the fixed cost
fmc_budget_from_probing = Config.fmc_budget_from_probing
if fmc_budget_from_probing:
# we get budget from costs of SIP
costs = dataio.read_costs(MethodType.PROBING)
budget = int(np.average(costs)) + 1
else:
# we used a fixed budget
budget = Config.budget # prepare budget
else:
# get budget from the percentage of the fixed cost
budget = get_linear_profit_fixed_cost() * fmc_budget_from_cost_percentage / 100.0
return budget
def buy_data_with_budget(budget: float, data: CheckinDataset,
price_from_noise_rate: float,
std_from_noise_initial_value: float,
std_from_noise_rate: float) -> Tuple[CheckinDataset, float]:
""" Buy data points with a given total budget.
Each data point would be given the same amount of budget.
For a particular data point, the budget may be more than enough to buy it without perturbation.
So there can be some budget left. This budget is not used for other data points.
Parameters
----------
budget
maximum budget
data
the dataset to buy data from
price_from_noise_rate
rate of price from noise exponential function
std_from_noise_initial_value
initial value of standard deviation from noise exponential function, i.e. when input values is approx 0
std_from_noise_rate
rate of standard deviation from noise exponential function
Returns
-------
noisy_data: CheckinDataset
noisy data bought
remain_budget: float
remain budget
"""
# calculate the price to pay for each data point
num_data_points = exp_util.cal_num_data_points(data)
price_per_data_point = budget / float(num_data_points)
logger.info('Price per data point = {}'.format(price_per_data_point))
# buy noisy data
remain_budget = 0
noisy_data = defaultdict(defaultdict)
for user, checkins in data.items():
for c_id, c in checkins.items():
noisy_c = privacy_helper.buy_data_at_price(
c, price_per_data_point, price_from_noise_rate, std_from_noise_initial_value, std_from_noise_rate)
noisy_data[user][c_id] = noisy_c
if c.combined_privacy_value < price_per_data_point:
remain_budget += price_per_data_point - c.combined_privacy_value
logger.info('Remain budget for region = {}'.format(remain_budget))
return noisy_data, remain_budget
| 2.4375 | 2 |
restapi/github_request_with_auth_api.py | k19ran/python-net_automation | 0 | 12762883 | import requests
from pprint import pprint
from requests.auth import HTTPBasicAuth
from getpass import getpass
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
if __name__ == "__main__":
username = "k19ran"
password = getpass()
url = "https://api.github.com/user"
#http_headers = {"accept":"application/vnd.github.v3+json"}
#response = requests.get(url,headers=http_headers,auth=(username,password),verify=False)
response = requests.get(url,auth=(username,password))
response = response.json()
print()
print(response)
print()
| 2.671875 | 3 |
kubernetes/test/test_io_xk8s_cluster_v1alpha4_cluster_spec.py | mariusgheorghies/python | 0 | 12762884 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.io_xk8s_cluster_v1alpha4_cluster_spec import IoXK8sClusterV1alpha4ClusterSpec # noqa: E501
from kubernetes.client.rest import ApiException
class TestIoXK8sClusterV1alpha4ClusterSpec(unittest.TestCase):
"""IoXK8sClusterV1alpha4ClusterSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IoXK8sClusterV1alpha4ClusterSpec
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.io_xk8s_cluster_v1alpha4_cluster_spec.IoXK8sClusterV1alpha4ClusterSpec() # noqa: E501
if include_optional :
return IoXK8sClusterV1alpha4ClusterSpec(
cluster_network = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_cluster_spec_cluster_network.io_x_k8s_cluster_v1alpha3_Cluster_spec_clusterNetwork(
api_server_port = 56,
pods = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_cluster_spec_cluster_network_pods.io_x_k8s_cluster_v1alpha3_Cluster_spec_clusterNetwork_pods(
cidr_blocks = [
'0'
], ),
service_domain = '0',
services = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_cluster_spec_cluster_network_services.io_x_k8s_cluster_v1alpha3_Cluster_spec_clusterNetwork_services(
cidr_blocks = [
'0'
], ), ),
control_plane_endpoint = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_control_plane_endpoint.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_spec_controlPlaneEndpoint(
host = '0',
port = 56, ),
control_plane_ref = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_cluster_spec_control_plane_ref.io_x_k8s_cluster_v1alpha3_Cluster_spec_controlPlaneRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
infrastructure_ref = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_cluster_spec_infrastructure_ref.io_x_k8s_cluster_v1alpha3_Cluster_spec_infrastructureRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
paused = True,
topology = kubernetes.client.models.io_x_k8s_cluster_v1alpha4_cluster_spec_topology.io_x_k8s_cluster_v1alpha4_Cluster_spec_topology(
class = '0',
control_plane = kubernetes.client.models.io_x_k8s_cluster_v1alpha4_cluster_spec_topology_control_plane.io_x_k8s_cluster_v1alpha4_Cluster_spec_topology_controlPlane(
metadata = kubernetes.client.models.io_x_k8s_cluster_v1alpha4_cluster_spec_topology_control_plane_metadata.io_x_k8s_cluster_v1alpha4_Cluster_spec_topology_controlPlane_metadata(
annotations = {
'key' : '0'
},
labels = {
'key' : '0'
}, ),
replicas = 56, ),
rollout_after = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
version = '0',
workers = kubernetes.client.models.io_x_k8s_cluster_v1alpha4_cluster_spec_topology_workers.io_x_k8s_cluster_v1alpha4_Cluster_spec_topology_workers(
machine_deployments = [
kubernetes.client.models.io_x_k8s_cluster_v1alpha4_cluster_spec_topology_workers_machine_deployments.io_x_k8s_cluster_v1alpha4_Cluster_spec_topology_workers_machineDeployments(
class = '0',
name = '0',
replicas = 56, )
], ), )
)
else :
return IoXK8sClusterV1alpha4ClusterSpec(
)
def testIoXK8sClusterV1alpha4ClusterSpec(self):
"""Test IoXK8sClusterV1alpha4ClusterSpec"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 1.648438 | 2 |
geocube/_version.py | snowman2/geocube | 152 | 12762885 | """GeoCube Version"""
__version__ = "0.1.1.dev0"
| 1.070313 | 1 |
wristband/stages/serializers.py | hmrc/wristband | 1 | 12762886 | from rest_framework import serializers
class StageSerializer(serializers.Serializer):
name = serializers.CharField()
| 1.546875 | 2 |
energyPATHWAYS/shared_classes.py | anamileva/energyPATHWAYS | 3 | 12762887 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 08:46:24 2015
@author: Ben
"""
import util
import numpy as np
import pandas as pd
from datamapfunctions import Abstract, DataMapFunctions
import config as cfg
import pdb
import logging
class StockItem(object):
def __init__(self):
self.spy = 1. #stock rollover steps per year
def calculate_sales_shares(self, sales_shares,reference_run=False):
sales_shares = getattr(self, sales_shares)
for sales_share in sales_shares.values():
sales_share.calculate(vintages=self.vintages[1:], years=self.years)
if reference_run:
sales_share.values*=0
def calculate_sales(self, sales):
sales= getattr(self, sales)
for sales in sales.values():
sales.calculate(vintages=self.vintages[1:], years=self.years)
def calculate_specified_stocks(self):
stock_measures = getattr(self, 'specified_stocks')
for stock_measure in stock_measures.values():
stock_measure.calculate(vintages=self.vintages, years=self.years)
def reconcile_sales_shares(self, sales_shares, needed_sales_share_levels, needed_sales_share_names):
sales_shares = getattr(self, sales_shares)
for sales_share in sales_shares.values():
sales_share.reconcile_with_stock_levels(needed_sales_share_levels, needed_sales_share_names)
def reconcile_sales(self, sales, needed_sales_levels, needed_sales_names):
sales = getattr(self, sales)
for sales in sales.values():
sales.reconcile_with_stock_levels(needed_sales_levels, needed_sales_names)
def set_survival_parameters(self):
if self.mean_lifetime is None and self.min_lifetime is not None and self.max_lifetime is not None:
self.mean_lifetime = self.min_lifetime + (self.max_lifetime - self.min_lifetime) / 2.
if self.lifetime_variance is None and self.min_lifetime is not None and self.max_lifetime is not None:
self.lifetime_variance = ((self.max_lifetime - self.min_lifetime) / 2. * .5) ** 2 # approximate
if self.stock_decay_function == 'weibull':
self.weibull_beta_parameter = util.find_weibul_beta(self.mean_lifetime*self.spy, self.lifetime_variance*self.spy**2)
self.weibull_alpha_parameter = self.mean_lifetime*self.spy / util.mean_weibul_factor(self.weibull_beta_parameter)
self.max_survival_periods = max((self.mean_lifetime + np.sqrt(self.lifetime_variance)*10), len(self.years))*self.spy + 1
elif self.stock_decay_function == 'linear':
if self.min_lifetime is None and self.mean_lifetime is not None and self.lifetime_variance is not None:
self.min_lifetime = self.mean_lifetime - 2 * self.lifetime_variance ** .5 # approximate
if self.max_lifetime is None and self.mean_lifetime is not None and self.lifetime_variance is not None:
self.max_lifetime = self.mean_lifetime + 2 * self.lifetime_variance ** .5 # approximate
self.max_survival_periods = max(self.max_lifetime, len(self.years))*self.spy + 1
elif self.stock_decay_function == 'exponential':
self.max_survival_periods = max((self.mean_lifetime + np.sqrt(self.lifetime_variance)*10), len(self.years))*self.spy + 1
def calc_survival_vintaged(self, periods):
if self.stock_decay_function == 'weibull':
return np.exp(-(np.arange(periods) / self.weibull_alpha_parameter) ** self.weibull_beta_parameter)
elif self.stock_decay_function == 'linear':
start = [1] * int(round(self.min_lifetime*self.spy))
if self.max_lifetime ==1 and self.min_lifetime ==1:
middle = np.linspace(1, 0, int(round((self.max_lifetime - self.min_lifetime)*self.spy)))
else:
middle = np.linspace(1, 0, int(round((self.max_lifetime - self.min_lifetime)*self.spy)) + 1)
end = [0] * int(max(periods - (len(start) + len(middle)), 0))
return np.concatenate((start, middle, end))[:periods]
elif self.stock_decay_function == 'exponential':
rate = 1. / (self.mean_lifetime*self.spy)
return np.exp(-rate * np.arange(periods))
else:
raise ValueError('Unsupported stock decay function for stock id %s' % self.id)
def set_survival_vintaged(self):
periods = len(self.years)*self.spy + 1
self.survival_vintaged = self.calc_survival_vintaged(periods)
def set_decay_vintaged(self):
if not hasattr(self, 'survival_vintaged'):
self.set_survival_vintaged()
self.decay_vintaged = np.append([0.], np.diff(-self.survival_vintaged))
def set_survival_initial_stock(self):
long_survival_vintaged = self.calc_survival_vintaged(self.max_survival_periods)
self.survival_initial_stock = np.array([np.sum(long_survival_vintaged[i:]) for i in range(int(len(self.years)*self.spy + 1))])
self.survival_initial_stock /= self.survival_initial_stock[0]
def set_decay_initial_stock(self):
if not hasattr(self, 'survival_initial_stock'):
self.set_survival_initial_stock()
self.decay_initial_stock = np.append([0.], np.diff(-self.survival_initial_stock))
class AggregateStock(object):
def __init__(self):
pass
def calc_annual_stock_changes(self):
# stock is steady before the first year, thus fill the first NaN with 0
self.annual_stock_changes = self.total.groupby(level=self.rollover_group_names).transform(pd.Series.diff).fillna(0)
def set_rollover_groups(self):
# separate stock rollover except for year and technology
self.rollover_group_levels = []
self.rollover_group_names = []
for name, level in zip(self.total.index.names, self.total.index.levels):
if (name == 'year') or (name == 'technology'):
continue
self.rollover_group_levels.append(list(level))
self.rollover_group_names.append(name)
class Stock(Abstract):
def __init__(self, id, drivers, sql_id_table, sql_data_table, primary_key, data_id_key=None, scenario=None, **kwargs):
self.id = id
self.drivers = drivers
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
if data_id_key is None:
data_id_key = primary_key
Abstract.__init__(self, self.id, primary_key=primary_key, data_id_key=data_id_key, **kwargs)
def in_use_drivers(self):
"""reduces the stock driver dictionary to in-use drivers"""
driver_ids = [getattr(self, col) for col in cfg.drivr_col_names if getattr(self, col) is not None]
denominator_driver_ids = [getattr(self, col) for col in cfg.dnmtr_col_names if getattr(self, col) is not None]
drivers = driver_ids + denominator_driver_ids
self.drivers = {k: v for k, v in self.drivers.iteritems() if k in drivers}
def calc_annual_stock_changes(self):
# stock is steady before the first year, thus fill the first NaN with 0
self.annual_stock_changes = self.total.groupby(level=self.rollover_group_names).transform(pd.Series.diff).fillna(0)
def set_rollover_groups(self):
# separate stock rollover except for year and technology
self.rollover_group_levels = []
self.rollover_group_names = []
for name, level in zip(self.total.index.names, self.total.index.levels):
if (name == 'year') or (name == 'technology'):
continue
self.rollover_group_levels.append(list(level))
self.rollover_group_names.append(name)
@staticmethod
def calc_initial_shares(initial_total, transition_matrix, num_years=100):
""" Use a transition matrix to calculate the initial stock share
transition matrix: when technology in the column retires it is replace with technology in the row
All columns must sum to 1
Method works by raising the transition matrix to some large number then multiplying by the initial stock total
"""
return np.mean(np.linalg.matrix_power(transition_matrix, num_years), axis=1) * initial_total
def return_stock_slice(self, elements, levels, stock_name='technology'):
group = util.df_slice(getattr(self,stock_name), elements, levels)
return group
class SpecifiedStock(Abstract, DataMapFunctions):
def __init__(self, id, sql_id_table, sql_data_table, scenario=None):
self.id = id
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
self.mapped = False
Abstract.__init__(self, self.id, data_id_key='parent_id')
self.input_type='total'
def calculate(self, vintages, years):
self.vintages = vintages
self.years = years
if self.raw_values is not None:
try:
self.remap(fill_value=np.nan)
except:
print self.raw_values
raise
else:
self.values = None
class SalesShare(Abstract, DataMapFunctions):
def __init__(self, id, subsector_id, sql_id_table, sql_data_table, primary_key, data_id_key, reference=False, scenario=None):
self.id = id
self.subsector_id = subsector_id
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
self.mapped = False
if reference:
for col, att in util.object_att_from_table(self.sql_id_table, self.subsector_id, primary_key):
if att is not None:
setattr(self, col, att)
DataMapFunctions.__init__(self, data_id_key)
self.read_timeseries_data(subsector_id=self.subsector_id)
self.raw_values = util.remove_df_levels(self.raw_values, 'technology')
else:
self.replaced_demand_tech_id = None
# measure specific sales share does not require technology filtering
Abstract.__init__(self, self.id, primary_key=primary_key, data_id_key=data_id_key)
if self.raw_values is None:
raise ValueError('error encountered in sales share measure ' + str(self.id))
def calculate(self, vintages, years):
self.vintages = vintages
self.years = years
self.remap(time_index_name='vintage')
def reconcile_with_stock_levels(self, needed_sales_share_levels, needed_sales_share_names):
if self.input_type == 'intensity':
if not set(self.values.index.names).issubset(needed_sales_share_names):
# we can't have more specificity in sales share than in stock
raise ValueError('Sales share expressed as an intensity cannot have levels not in stock')
# pick up extra levels
self.values = util.expand_multi(self.values, needed_sales_share_levels,
needed_sales_share_names).sort_index()
self.values.fillna(0, inplace=True)
elif self.input_type == 'total':
raise ValueError(
'A sales share type of total is not currently supported. Please normalize to sales share as a percentage')
# if not set(sales_share.values.index.names).issubset(stock.values.index.names):
# we have extra salesshare levels and we need to do a groupby sum
# sales_share.values = sales_share.values.groupby(level=needed_sales_share_levels).sum()
# todo: add logic here so that if stock and service demand
# has more specificity than sales share, we raise an exception
@staticmethod
def scale_reference_array_to_gap(ss_array, space_for_reference):
num_years, num_techs, num_techs = np.shape(ss_array)
ref_sums = np.sum(ss_array, axis=1)
# ignore where no reference is specified to avoid dividing by zero
vintage_no_ref, retiring_no_ref = np.nonzero(ref_sums)
factors = np.zeros(np.shape(ref_sums))
factors[vintage_no_ref, retiring_no_ref] += space_for_reference[vintage_no_ref, retiring_no_ref] / ref_sums[
vintage_no_ref, retiring_no_ref]
factors = np.reshape(np.repeat(factors, num_techs, axis=0), (num_years, num_techs, num_techs))
# gross up reference sales share with the need
return ss_array * factors
@staticmethod
def normalize_array(ss_array, retiring_must_have_replacement=True):
# Normalize to 1
sums = np.sum(ss_array, axis=1)
if np.any(sums == 0) and retiring_must_have_replacement:
raise ValueError('Every retiring technology must have a replacement specified in sales share')
# indicies needing scaling
vintage, retiring = np.nonzero(sums != 1)
# normalize all to 1
ss_array[vintage, :, retiring] = (ss_array[vintage, :, retiring].T / sums[vintage, retiring]).T
ss_array = np.nan_to_num(ss_array)
return ss_array
@staticmethod
def cap_array_at_1(ss_array):
# Normalize down to 1
sums = np.sum(ss_array, axis=1)
vintage, retiring = np.nonzero(sums > 1)
# normalize those greater than 1
ss_array[vintage, :, retiring] = (ss_array[vintage, :, retiring].T / sums[vintage, retiring]).T
return ss_array
| 2.734375 | 3 |
src/__init__.py | WilliamAshbee/segmentation | 35 | 12762888 | <reponame>WilliamAshbee/segmentation<filename>src/__init__.py
# flake8: noqa
# isort:skip_file
from catalyst.dl import registry, SupervisedRunner as Runner
from catalyst.contrib.models.cv import segmentation as m
from .experiment import Experiment
from . import callbacks
registry.MODELS.add_from_module(m)
registry.CALLBACKS.add_from_module(callbacks)
| 1.273438 | 1 |
pypi_installer/sbtab/sbtab2html.py | derHahn/SBtab | 4 | 12762889 | <filename>pypi_installer/sbtab/sbtab2html.py
"""
SBtab2HTML
==========
Python script that converts SBtab file/s to HTML.
"""
#!/usr/bin/env python
import re
import string
import sys
from . import misc
urns = ["obo.chebi","kegg.compound","kegg.reaction","obo.go","obo.sgd","biomodels.sbo","ec-code","kegg.orthology","uniprot"]
def csv2html(sbtab_file,file_name,definition_file=None,sbtype=None):
'''
Generates html view out of csv file.
Parameters
----------
sbtab_file : str
SBtab file as string representation.
file_name : str
SBtab file name.
definition_file : str
SBtab definition file as string representation.
sbtype : str
SBtab attribute TableType.
'''
#extract information from the definition file
if not definition_file:
try:
def_file_open = open('definitions.tsv','r')
def_file = def_file_open.read()
def_delimiter = '\t'
col2description = findDescriptions(def_file,def_delimiter,sbtype)
def_file_open.close()
except:
print('You have not provided the definition file and it cannot be found in this directory. Please provide it.')
sys.exit(1)
else:
def_delimiter = '\t'
col2description = findDescriptions(definition_file,def_delimiter,sbtype)
#now start building the HTML file from the SBtab file
delimiter = misc.getDelimiter(sbtab_file) #checkseparator(sbtab_file)
ugly_sbtab = sbtab_file.split('\n')
nice_sbtab = '<html>\n<body>\n'
nice_sbtab += '<p>\n<h2><b>'+file_name+'</b></h2>\n</p>\n'
nice_sbtab += '<a style="background-color:#00BFFF">'+ugly_sbtab[0]+'</a>\n<br>\n'
nice_sbtab += '<table>\n'
ident_url = False
ident_cols = []
for row in ugly_sbtab[1:]:
if row.startswith('!'):
nice_sbtab += '<tr bgcolor="#87CEFA">\n'
splitrow = row.split(delimiter)
for i,element in enumerate(splitrow):
if 'Identifiers:' in element:
try:
searcher = re.search('Identifiers:(.*)',element)
ident_url = 'http://identifiers.org/'+searcher.group(1)+'/'
ident_cols.append(i)
except: pass
else: nice_sbtab += '<tr>\n'
for i,thing in enumerate(row.split(delimiter)):
try: title = col2description[thing[1:]]
except: title = ''
if not ident_url:
new_row = '<td title="'+str(title)+'">'+str(thing)+'</td>'
nice_sbtab += new_row+'\n'
else:
if i in ident_cols and not thing.startswith('!'):
ref_string = ident_url+thing
new_row = '<td><a href="'+ref_string+'" target="_blank">'+str(thing)+'</a></td>'
else:
new_row = '<td title="'+title+'">'+str(thing)+'</td>'
nice_sbtab += new_row+'\n'
nice_sbtab += '</tr>\n'
nice_sbtab += '</table>\n'
nice_sbtab += '</body>\n</html>\n'
html_file = open(file_name[:-4]+'.html','w')
for row in nice_sbtab: html_file.write(row)
html_file.close()
return nice_sbtab
def findDescriptions(def_file,def_delimiter,sbtype):
'''
Preprocesses the definition file in order to enable some nice mouseover effects for the known column names.
Parameters
----------
def_file : str
SBtab definition file as string representation.
def_delimiter : str
Delimiter used for the columns; usually comma, tab, or semicolon.
sbtype : str
SBtab attribute TableType.
'''
col2description = {}
col_dsc = False
columnrow = def_file.split('\n')[1]
columnrowspl = columnrow.split(def_delimiter)
for row in def_file.split('\n'):
splitrow = row.split(def_delimiter)
if len(splitrow) != len(columnrowspl): continue
if row.startswith('!!'): continue
if row.startswith('!'):
for i,elem in enumerate(splitrow):
if elem == "!Description":
col_dsc = i
if not string.capitalize(splitrow[2]) == string.capitalize(sbtype): continue
if col_dsc and not splitrow[2].startswith('!'): col2description[splitrow[0]] = splitrow[col_dsc]
return col2description
def checkseparator(sbtabfile):
'''
Finds the separator of the SBtab file.
Parameters
----------
sbtabfile : str
SBtab file as string representation.
'''
sep = False
for row in sbtabfile.split('\n'):
if row.startswith('!!'): continue
if row.startswith('!'):
s = re.search('(.)(!)',row[1:])
sep = s.group(1)
return sep
if __name__ == '__main__':
try: sys.argv[1]
except:
print('You have not provided input arguments. Please start the script by also providing an SBtab file, the definition file, and an optional HTML output filename: >python sbtab2html.py SBtabfile.csv definitions.tsv Output')
sys.exit()
file_name = sys.argv[1]
try:
default_def = sys.argv[2]
def_file = open(default_def,'r')
def_tab = def_file.read()
def_file.close()
except:
def_tab = None
try: output_name = sys.argv[3]+'.html'
except: output_name = file_name[:-4]+'.html'
sbtab_file = open(file_name,'r')
sbtab = sbtab_file.read()
html = csv2html(sbtab,file_name,def_tab,output_name)
#html_name = output_name
html_file = open(output_name,'w')
html_file.write(html)
html_file.close()
print('The HTML file has been successfully written to your working directory or chosen output path.')
| 3.34375 | 3 |
telbot.py | therokhman/telebot | 0 | 12762890 | from telebot import types, TeleBot
from dynaconf import settings as _settings
import pyowm
bot = TeleBot(_settings.SECRET_KEY)
keyboard = types.InlineKeyboardMarkup()
key_yes = types.InlineKeyboardButton(text='Что у нас по погоде?', callback_data='weather')
keyboard.add(key_yes)
key_no = types.InlineKeyboardButton(text='Пока не надо', callback_data='bye')
keyboard.add(key_no)
@bot.message_handler(commands=["weather"])
def weather_handler(message):
chat_id = message.chat.id
city = bot.send_message(chat_id, "В каком городе Вам показать погоду?")
bot.register_next_step_handler(city, weather)
def weather(message):
chat_id = message.chat.id
city = message.text.lower()
owm = pyowm.OWM(_settings.API_KEY, language="ru")
city_weather = owm.weather_at_place(city)
w = city_weather.get_weather()
temperature = w.get_temperature("celsius")["temp"]
wind = w.get_wind()["speed"]
hum = w.get_humidity()
desc = w.get_detailed_status()
bot.send_message(
chat_id,
"Сейчас в городе "
+ str(city)
+ " "
+ str(desc)
+ ", температура - "
+ str(temperature)
+ "°C, влажность - "
+ str(hum)
+ "%, скорость ветра - "
+ str(wind)
+ "м/с.",
)
@bot.message_handler(commands=["start", "go"])
def start_message(message):
chat_id = message.chat.id
user_name = message.from_user.first_name
bot.send_message(
chat_id,
f"Приветствую вас, {user_name}!\n"
f"Я бот, которй сообщит вам погоду в нужном для вас городе.\n"
f"Для этого просто нажмите соответствующую кнопку.",
reply_markup=keyboard,
)
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
if call.data == "weather":
bot.send_message(call.message.chat.id, "Для того, чтобы узнать погоду введите /weather")
else:
bot.send_message(call.message.chat.id, "Чтобы воспользоваться мной еще раз, то просто нажмите /start")
if __name__ == "__main__":
bot.polling(none_stop=True)
| 2.390625 | 2 |
src/algo/stmts/GLineStmt.py | TuringApp/Turing | 42 | 12762891 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from .BaseStmt import *
class GLineStmt(BaseStmt):
def __init__(self, start_x: AstNode, start_y: AstNode, end_x: AstNode, end_y: AstNode, color: AstNode):
super().__init__()
self.start_x = start_x
self.start_y = start_y
self.end_x = end_x
self.end_y = end_y
self.color = color
def __str__(self):
return "[Line (%s; %s) -> (%s; %s) - %s]" % (self.start_x, self.start_y, self.end_x, self.end_y, self.color)
def __repr__(self):
return "GLineStmt(%r, %r, %r, %r, %r)" % (self.start_x, self.start_y, self.end_x, self.end_y, self.color)
def python(self) -> List[str]:
return ["g_line(%s, %s, %s, %s, %s)" % (
self.start_x.python(), self.start_y.python(), self.end_x.python(), self.end_y.python(),
self.color.python())]
def get_children(self) -> List[AstNode]:
return [x for c in (self.start_x, self.start_y, self.end_x, self.end_y, self.color) for x in c.flatten()]
| 2.75 | 3 |
dash/snapshots.py | thepearson/dash-cli | 7 | 12762892 | from time import sleep, time
from .api import Api
class Snapshots(Api):
delay = 10
def get_snapshots(self, project):
return self.do_request('/naut/project/{project}/snapshots'.format(project=project))
def get_snapshot(self, project, id):
return self.do_request('/naut/project/{project}/snapshots/{id}'.format(project, id))
def get_snapshot_transfer(self, project, id):
return self.do_request('/naut/project/{project}/snapshots/transfer/{id}'.format(project, id))
def delete_snapshot(self, project, id):
return self.do_request('/naut/project/{project}/snapshots/{id}'.format(project, id), None, 'DELETE', empty_response=True)
def delete_all_snapshots(self, project):
snapshots = self.get_snapshots(project)
for snapshot in snapshots['data']:
self.delete_snapshot(project, snapshot['id'])
def create_snapshot(self, project, type, env):
data = {
"environment": env,
"mode": type
}
return self.do_request('/naut/project/{project}/snapshots'.format(project), data, 'POST')
def check_transfer_complete(self, project, transfer_id):
start_time = time()
complete = False
while complete == False:
transfer = self.get_snapshot_transfer(project, transfer_id)
if transfer['data']['attributes']['status'] == 'Finished':
complete = True
else:
print("Waiting for {project} snapshot to complete... elapsed {seconds} seconds".format(project, time() - start_time))
sleep(self.delay)
return transfer['data']['relationships']['snapshot']
def download_snapshot(self, project, snapshot_id):
snapshot = self.get_snapshot(project, snapshot_id)
download_link = snapshot['data']['links']['download_link']
self.download_request(download_link, "{project}-{id}-{mode}-snapshot.sspak".format(project=project, id=snapshot['data']['relationships']['source']['data'][0]['id'], mode=snapshot['data']['attributes']['mode']))
def easy_snapshot(self, project, type, env, filename = 'snapshot.spak'):
transfer = self.create_snapshot(project, type, env)
snapshot_info = self.check_transfer_complete(project, transfer['data']['id'])
snapshot = self.get_snapshot(project, snapshot_info['data']['id'])
download_link = snapshot['data']['links']['download_link']
download_file(download_link, filename)
| 2.875 | 3 |
blogapp/migrations/0008_auto_20200331_1751.py | uma-shankar-gupta/blog-website | 0 | 12762893 | <reponame>uma-shankar-gupta/blog-website
# Generated by Django 3.0.3 on 2020-03-31 12:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0007_comment_msg'),
]
operations = [
migrations.AlterField(
model_name='like',
name='for_post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='blogapp.Post'),
),
]
| 1.734375 | 2 |
cterasdk/core/reports.py | CTERA-Networks/ctera-python-sdk | 5 | 12762894 | from .base_command import BaseCommand
class Reports(BaseCommand):
"""
Reports APIs
"""
def storage(self):
"""
Retrieve the portals statistics report.\n
To retrieve this report, you must first browse the Global Administration Portal, using: `GlobalAdmin.portals.browse_global_admin()`
"""
return self._get_report('storageLocationsStatisticsReport')
def portals(self):
"""
Retrieve the storage statistics report.\n
To retrieve this report, you must first browse the Global Administration Portal, using: `GlobalAdmin.portals.browse_global_admin()`
"""
return self._get_report('portalsStatisticsReport')
def folders(self):
"""
Retrieve the cloud folders statistics report.\n
To retrieve this report, you must first browse the Virtual Portal that contains the report, using: `GlobalAdmin.portals.browse()`
"""
return self._get_report('foldersStatisticsReport')
def folder_groups(self):
"""
Retrieve the folder groups statistics report.\n
To retrieve this report, you must first browse the Virtual Portal that contains the report, using: `GlobalAdmin.portals.browse()`
"""
return self._get_report('folderGroupsStatisticsReport')
def _get_report(self, report_name):
return self._portal.get(f'/reports/{report_name}')
| 2.46875 | 2 |
Genome/NN/Brain.py | emre6943/MoneyBoi | 0 | 12762895 | from Genome.NN.Layer import Layer
import numpy as np
import pickle
class Brain:
def __init__(self, brain_structure):
self.brain_structure = brain_structure
self.layers = []
self.id = 0
# First layer added here
ids = []
genes = []
for i in range(brain_structure[0]):
ids.append(self.id)
self.id += 1
genes.append([np.random.rand(brain_structure[1]), np.random.rand(brain_structure[1]), np.random.rand(brain_structure[1])])
layer = Layer(ids)
layer.set_genes(genes)
self.layers.append(layer)
for i in range(1, len(brain_structure)):
if i == (len(brain_structure) - 1):
self.add_last_layer(brain_structure[-1])
else:
self.add_random_layer(brain_structure[i], brain_structure[i + 1])
def add_random_layer(self, node_count, next_node_count):
ids = []
genes = []
for i in range(node_count):
ids.append(self.id)
self.id += 1
genes.append([np.random.rand(next_node_count), np.random.rand(next_node_count), np.random.rand(next_node_count)])
layer = Layer(ids)
layer.set_genes(genes)
self.layers[-1].add_layer_connections(layer)
self.layers.append(layer)
def add_last_layer(self, node_count):
ids = []
for i in range(node_count):
ids.append(self.id)
self.id += 1
layer = Layer(ids)
self.layers[-1].add_layer_connections(layer)
self.layers.append(layer)
def set_data(self, data):
self.layers[0].set_layer_input(data)
def feed_forward(self):
for l in range(len(self.layers)):
if l != 0:
self.layers[l].normalize()
self.layers[l].feed_forward()
def make_bebe(self, partner, mutation_rate):
bebe = Brain(self.brain_structure)
for i in range(len(self.layers)):
bebe.layers[i] = self.layers[i].make_bebe(partner.layers[i], bebe.layers[i], mutation_rate)
return bebe
def get_answer(self):
return self.layers[-1].get_layer_input()
def save_model(self, file):
with open(file, 'wb') as config_dictionary_file:
pickle.dump(self, config_dictionary_file)
@staticmethod
def load_model(file):
with open(file, 'rb') as config_dictionary_file:
brain = pickle.load(config_dictionary_file)
return brain
def print_genes(self):
print("The genes od the brain")
for layer in self.layers:
print(layer.get_genes())
#
# brain = Brain(16, 32)
# brain.add_random_layer(32, 32)
# brain.add_random_layer(32, 48)
# brain.add_last_layer(2)
# brain.save_model("../Models/first_baby")
# brain = Brain.load_model("../Models/first_baby")
# print(len(brain.layers))
# brain.print_genes()
#
# brain.set_data(list(range(0, 16)))
# brain.feed_forward()
# print(brain.get_answer())
| 2.640625 | 3 |
Official_Model_For_Paper/ModelTraining/TwoStep_Algorithm1_Clip/run.py | hyliang96/ICNN | 0 | 12762896 | <reponame>hyliang96/ICNN<gh_stars>0
import os
# rlaunch = 'rlaunch --cpu=4 --memory=4096 --gpu=1 --preemptible=no '
datasets = ['VOCpart']
depths = [152]
gpu_id = '0'
batchsize = 32
epoch = 300
# exp_dir = '/data/ouyangzhihao/Exp/ICNN/LearnableMask/tb_dir/learnable_mask_baseline/Debug'
optim = 'adam'
lr = '1e-5' # finetune resnet152: 1e-5
lr_reg = '1e-3'
img_size = 128
lambda_reg = '1e-3' # reg. coef.
frozen = 'True'
ifmask = 'False'
train = 'False'
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
exp_dir_root = '/home/haoyu/mfs/project/CDCNN/ICNN_exp/VOCPart_train0.7_%d_pretrained/'%img_size
# os.system('rm -r ' + exp_dir)
for data in datasets:
for depth in depths:
# exp_dir = exp_dir_root + '%sres%d_bs%d_%s_lr%s_lrreg%s_lmd%s_%s_frz5:4_pre0-6:3' % ('naive_' if not ifmask == 'True' else '', depth, batchsize, optim, lr, lr_reg, lambda_reg,
# 'frozen' if frozen == 'True' else ''
# )
model_name = 'naive_res152_bs32_adam_lr1e-5_lrreg1e-3_lmd1e-3_frozen_3:2'
# model_name = 'res152_bs32_adam_lr1e-5_lrreg1e-3_lmd1e-3_frozen_frz5:4_pre10-3:2'
exp_dir = exp_dir_root+'/'+model_name
res = exp_dir + '/res.txt'
print('run ', exp_dir.split('/')[-1])
# cmd = rlaunch + '-- python3 ./train.py --dataset %s --depth %d --res %s --gpu-ids %s --batch_size %d --epoch %d --exp_dir %s' \
# %(data,depth,res,gpu_id,batchsize,epoch,exp_dir)
cmd = 'python3 ./train.py --dataset %s --depth %d --res %s --ifmask %s --gpu-ids %s --optim %s \
--batch_size %d --epoch %d --exp_dir %s --lr %s --img_size %d --lambda_reg %s --frozen %s --lr_reg %s --train %s' \
% (data, depth, res,ifmask, gpu_id, optim, batchsize, epoch, exp_dir, lr ,img_size, lambda_reg, frozen, lr_reg, train)
os.system(cmd)
| 1.679688 | 2 |
doc/Programs/LifeScience/Hudson_Bay.py | anacost/MachineLearning | 1 | 12762897 | <reponame>anacost/MachineLearning
import numpy as np
import matplotlib.pyplot as plt
def solver(m, H0, L0, dt, a, b, c, d, t0):
"""Solve the difference equations for H and L over m years
with time step dt (measured in years."""
num_intervals = int(m/float(dt))
t = np.linspace(t0, t0 + m, num_intervals+1)
H = np.zeros(t.size)
L = np.zeros(t.size)
print 'Init:', H0, L0, dt
H[0] = H0
L[0] = L0
for n in range(0, len(t)-1):
H[n+1] = H[n] + a*dt*H[n] - b*dt*H[n]*L[n]
L[n+1] = L[n] + d*dt*H[n]*L[n] - c*dt*L[n]
return H, L, t
# Load in data file
data = np.loadtxt('Hudson_Bay.csv', delimiter=',', skiprows=1)
# Make arrays containing x-axis and hares and lynx populations
t_e = data[:,0]
H_e = data[:,1]
L_e = data[:,2]
# Simulate using the model
H, L, t = solver(m=20, H0=34.91, L0=3.857, dt=0.1,
a=0.4807, b=0.02482, c=0.9272, d=0.02756,
t0=1900)
# Visualize simulations and data
plt.plot(t_e, H_e, 'b-+', t_e, L_e, 'r-o', t, H, 'm--', t, L, 'k--')
plt.xlabel('Year')
plt.ylabel('Numbers of hares and lynx')
plt.axis([1900, 1920, 0, 140])
plt.title(r'Population of hares and lynx 1900-1920 (x1000)')
plt.legend(('H_e', 'L_e', 'H', 'L'), loc='upper left')
plt.savefig('Hudson_Bay_sim.pdf')
plt.savefig('Hudson_Bay_sim.png')
plt.show()
| 3.59375 | 4 |
tests/commands/source/test_sql.py | atviriduomenys/spinta | 2 | 12762898 | <filename>tests/commands/source/test_sql.py
import collections
import pytest
import sqlalchemy as sa
from spinta.testing.datasets import pull
SQL = collections.namedtuple('SQL', ('engine', 'schema'))
@pytest.fixture
def sql(rc):
dsn = rc.get('backends', 'default', 'dsn', required=True)
engine = sa.create_engine(dsn)
schema = sa.MetaData(engine)
yield SQL(engine, schema)
schema.drop_all()
@pytest.mark.skip('datasets')
def test_sql(rc, cli, sql, app):
dsn = rc.get('backends', 'default', 'dsn', required=True)
rc = rc.fork({'datasets.default.sql.db': dsn})
country = sa.Table(
'tests_country', sql.schema,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('kodas', sa.Text),
sa.Column('pavadinimas', sa.Text),
)
sql.schema.create_all()
with sql.engine.begin() as conn:
conn.execute(country.insert(), [
{'kodas': 'lt', 'pavadinimas': 'Lietuva'},
{'kodas': 'lv', 'pavadinimas': 'Latvija'},
{'kodas': 'ee', 'pavadinimas': 'Estija'},
])
assert len(pull(cli, rc, 'sql')) == 3
assert len(pull(cli, rc, 'sql')) == 0
app.authorize(['spinta_getall', 'spinta_search'])
assert app.getdata('/country') == []
assert app.getdata('/country/:dataset/sql?select(code,title)&sort(+code)') == [
{'code': 'ee', 'title': 'Estija'},
{'code': 'lt', 'title': 'Lietuva'},
{'code': 'lv', 'title': 'Latvija'},
]
| 2.140625 | 2 |
lib-src/lv2/sratom/waflib/TaskGen.py | Marcusz97/CILP_Facilitatore_Audacity | 24 | 12762899 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
class task_gen(object):
mappings={}
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.prec=Utils.defaultdict(list)
self.mappings={}
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[id(self.path)]=self.bld.idx.get(id(self.path),0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[id(self.path)]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__.keys():
if x not in['env','bld','compiled_tasks','tasks']:
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):return val.split()
else:return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(list(st))
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
raise Errors.WafError('Cycle detected in the method execution %r'%prec)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d'%(self,id(self)))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)'%(x,id(self)))
v()
Logs.debug('task_gen: posted %s'%self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
if name.endswith(k):
return self.mappings[k]
for k in task_gen.mappings:
if name.endswith(k):
return task_gen.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (did you forget to load a waf tool?)"%(node,task_gen.mappings.keys()))
def create_task(self,name,src=None,tgt=None):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in['path','features']:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
ext=decider and decider(self,node)or cls.ext_out
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
keys=list(self.mappings.keys())+list(self.__class__.mappings.keys())
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in keys:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.bld.install_files(install_path,tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,self.path.__class__):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
cls=Task.task_factory(name,self.rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'update_outputs',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def run(self):
if getattr(self.generator,'is_copy',None):
self.outputs[0].write(self.inputs[0].read('rb'),'wb')
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
return
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
return
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
code=re_m4.sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env.get_flat(x)or self.env.get_flat(x.upper())
d[x]=str(tmp)
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=self.dep_vars=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun))
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.bld.install_files(getattr(self,'install_path','${LIBDIR}/pkgconfig/'),tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig=None
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('cound not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints and b.name.endswith('.h'):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
inst_to=getattr(self,'install_path',None)
if inst_to:
self.bld.install_files(inst_to,b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
| 2.125 | 2 |
we-dap/__init__.py | jeremyleung521/WE_dap | 1 | 12762900 | """
Author: <NAME>
Date of Creation: September 13th, 2021
Description:
"""
# Welcome to the WE-dap module!
| 0.96875 | 1 |
jobs/api.py | imcodingideas/CS10-job-board | 0 | 12762901 | <filename>jobs/api.py<gh_stars>0
from .models import JobPost, User, UserMembership, Membership, Payment
from rest_framework import serializers
# Customization of UserSerializer
from rest_framework.utils import model_meta
from .seralizer_helpers import raise_errors_on_nested_writes
from django.contrib.auth.hashers import make_password
# Tag serializer for JobPostSerializer
from taggit_serializer.serializers import (TagListSerializerField, TaggitSerializer)
# Serializers for API representation
class JWTSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'is_employer')
class UserRegistrationSerializer(serializers.ModelSerializer):
# Encrypts password with create_user=Django default create user method
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class Meta:
model = User
fields = (
'email',
'password',
'is_employer',
'first_name',
'last_name',
)
class UserViewSerializer(serializers.ModelSerializer):
def update(self, user, validated_data):
raise_errors_on_nested_writes('update', self, validated_data)
info = model_meta.get_field_info(user)
if 'password' in validated_data:
user.password = <PASSWORD>(
validated_data.get('password')
)
user.save()
return user
elif 'email' in validated_data:
user.email = validated_data.get('email')
user.save()
return user
# From: Django Serializers: https://github.com/encode/django-rest-framework/blob/master/rest_framework/serializers.py#L969
for attr, value in validated_data.items():
if attr in info.relations and info.relations[attr].to_many:
field = getattr(user, attr)
field.set(value)
else:
setattr(user, attr, value)
user.save()
return user
class Meta:
model = User
fields = (
'id',
'is_employer',
'email',
'password',
'is_active',
'company_name',
'company_logo',
'company_summary',
'application_inbox',
'first_name',
'last_name',
'profile_photo',
'created_date'
)
class UserIDSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id',)
class JobPostSerializer(TaggitSerializer, serializers.ModelSerializer):
# url = serializers.HyperlinkedIdentityField(
# view_name='posts-api:detail',
# lookup_field='slug'
# )
tags = TagListSerializerField()
class Meta:
model = JobPost
fields = '__all__'
class JobPreviewSerializer(serializers.ModelSerializer):
class Meta:
model = JobPost
fields = ('id', 'title', 'company_logo', 'description', 'min_salary', 'max_salary')
class MembershipSerializer(serializers.ModelSerializer):
class Meta:
model = Membership
fields = ('membership_type', 'price')
class UserMembershipSerializer(serializers.ModelSerializer):
class Meta:
model = UserMembership
fields = ('user', 'stripe_customer_id', 'membership')
class PaymentViewSerializer(serializers.ModelSerializer):
class Meta:
model = Payment
fields = '__all__'
| 2.15625 | 2 |
nps.py | BenDelgado/now-playing-spotify | 0 | 12762902 | <filename>nps.py
import spotipy
def artist(x):
print(x)
radiohead = 'Radiohead'
artist('Radiohead')
artist(123)
artist(radiohead)
| 1.710938 | 2 |
receipt_parser_core/enhancer.py | Dielee/receipt-parser-legacy | 611 | 12762903 | # !/usr/bin/python3
# coding: utf-8
# Copyright 2015-2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import cv2
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from wand.image import Image as WandImage
from scipy.ndimage import interpolation as inter
from receipt_parser_core import Receipt
from receipt_parser_core.config import read_config
BASE_PATH = os.getcwd()
INPUT_FOLDER = os.path.join(BASE_PATH, "data/img")
TMP_FOLDER = os.path.join(BASE_PATH, "data/tmp")
OUTPUT_FOLDER = os.path.join(BASE_PATH, "data/txt")
ORANGE = '\033[33m'
RESET = '\033[0m'
def prepare_folders():
"""
:return: void
Creates necessary folders
"""
for folder in [
INPUT_FOLDER, TMP_FOLDER, OUTPUT_FOLDER
]:
if not os.path.exists(folder):
os.makedirs(folder)
def find_images(folder):
"""
:param folder: str
Path to folder to search
:return: generator of str
List of images in folder
"""
for file in os.listdir(folder):
full_path = os.path.join(folder, file)
if os.path.isfile(full_path):
try:
_ = Image.open(full_path) # if constructor succeeds
yield file
except:
pass
def rotate_image(input_file, output_file, angle=90):
"""
:param input_file: str
Path to image to rotate
:param output_file: str
Path to output image
:param angle: float
Angle to rotate
:return: void
Rotates image and saves result
"""
with WandImage(filename=input_file) as img:
width, height = img.size
if width < height:
angle = 0
print(ORANGE + '\t~: ' + RESET + 'Rotate image by: ' + str(angle) + "°" + RESET)
with img.clone() as rotated:
rotated.rotate(angle)
rotated.save(filename=output_file)
def deskew_image(image, delta=1, limit=5):
def determine_score(arr, angle):
data = inter.rotate(arr, angle, reshape=False, order=0)
histogram = np.sum(data, axis=1)
score = np.sum((histogram[1:] - histogram[:-1]) ** 2)
return histogram, score
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
scores = []
angles = np.arange(-limit, limit + delta, delta)
for angle in angles:
histogram, score = determine_score(thresh, angle)
scores.append(score)
best_angle = angles[scores.index(max(scores))]
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, best_angle, 1.0)
print(ORANGE + '\t~: ' + RESET + 'Deskew image by: ' + str(best_angle) + ' angle' + RESET)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return rotated
def run_tesseract(input_file, output_file, language="deu"):
"""
:param input_file: str
Path to image to OCR
:param output_file: str
Path to output file
:return: void
Runs tesseract on image and saves result
"""
print(ORANGE + '\t~: ' + RESET + 'Parse image using pytesseract' + RESET)
print(ORANGE + '\t~: ' + RESET + 'Parse image at: ' + input_file + RESET)
print(ORANGE + '\t~: ' + RESET + 'Write result to: ' + output_file + RESET)
with io.BytesIO() as transfer:
with WandImage(filename=input_file) as img:
img.save(transfer)
with Image.open(transfer) as img:
image_data = pytesseract.image_to_string(img, lang=language, timeout=60, config="--psm 6")
out = open(output_file, "w", encoding='utf-8')
out.write(image_data)
out.close()
def rescale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Rescale image' + RESET)
img = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)
return img
def grayscale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Grayscale image' + RESET)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def remove_noise(img):
kernel = np.ones((1, 1), np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
print(ORANGE + '\t~: ' + RESET + 'Applying gaussianBlur and medianBlur' + RESET)
img = cv2.threshold(cv2.GaussianBlur(img, (5, 5), 0), 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.threshold(cv2.bilateralFilter(img, 5, 75, 75), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.adaptiveThreshold(cv2.bilateralFilter(img, 9, 75, 75), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 31, 2)
return img
def remove_shadows(img):
rgb_planes = cv2.split(img)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
result = cv2.merge(result_planes)
return result
def detect_orientation(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
print(ORANGE + '\t~: ' + RESET + 'Get rotation angle:' + str(angle) + RESET)
return image
def enhance_image(img, tmp_path ,high_contrast=True, gaussian_blur=True, rotate=True):
img = rescale_image(img)
if rotate:
cv2.imwrite(tmp_path, img)
rotate_image(tmp_path, tmp_path)
img = cv2.imread(tmp_path)
img = deskew_image(img)
img = remove_shadows(img)
if high_contrast:
img = grayscale_image(img)
if gaussian_blur:
img = remove_noise(img)
return img
def process_receipt(config, filename, rotate=True, grayscale=True, gaussian_blur=True):
input_path = INPUT_FOLDER + "/" + filename
output_path = OUTPUT_FOLDER + "/" + filename.split(".")[0] + ".txt"
print(ORANGE + '~: ' + RESET + 'Process image: ' + ORANGE + input_path + RESET)
prepare_folders()
try:
img = cv2.imread(input_path)
except FileNotFoundError:
return Receipt(config=config, raw="")
tmp_path = os.path.join(
TMP_FOLDER, filename
)
img = enhance_image(img, tmp_path,grayscale, gaussian_blur)
print(ORANGE + '~: ' + RESET + 'Temporary store image at: ' + ORANGE + tmp_path + RESET)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, output_path, config.language)
print(ORANGE + '~: ' + RESET + 'Store parsed text at: ' + ORANGE + output_path + RESET)
raw = open(output_path, 'r').readlines()
return Receipt(config=config, raw=raw)
def main():
prepare_folders()
dir_path = os.getcwd()
config = read_config(config=dir_path + "/config.yml")
images = list(find_images(INPUT_FOLDER))
print(ORANGE + '~: ' + RESET + 'Found: ' + ORANGE + str(len(images)),
RESET + ' images in: ' + ORANGE + INPUT_FOLDER + RESET)
i = 1
for image in images:
input_path = os.path.join(
INPUT_FOLDER,
image
)
tmp_path = os.path.join(
TMP_FOLDER,
image
)
out_path = os.path.join(
OUTPUT_FOLDER,
image + ".txt"
)
if i != 1: print()
print(ORANGE + '~: ' + RESET + 'Process image (' + ORANGE + str(i) + '/' + str(
len(images)) + RESET + ') : ' + input_path + RESET)
img = cv2.imread(input_path)
img = enhance_image(img, tmp_path)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, out_path, config.language)
i = i + 1
if __name__ == '__main__':
main()
| 2.65625 | 3 |
tests/view/test_images.py | hnesk/ocrd-browser | 14 | 12762904 | import unittest
from unittest.mock import MagicMock
from ocrd_browser.view import ViewImages
from ocrd_browser.ui import MainWindow
from tests import TestCase
class ViewImagesTestCase(TestCase):
def setUp(self):
self.vx = ViewImages('unique', MagicMock(spec=MainWindow))
def test_can_construct(self):
self.assertIsNotNone(self.vx)
if __name__ == '__main__':
unittest.main()
| 2.25 | 2 |
migrations/versions/6c5a2b1c24f3_seventh_migration.py | Maryan23/1-MIN-OF-YOU | 0 | 12762905 | <gh_stars>0
"""Seventh migration
Revision ID: 6c5a2b1c24f3
Revises: <PASSWORD>
Create Date: 2021-11-10 12:13:42.087615
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6c5a2b1c24f3'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('downvote',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('upvote',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_foreign_key(None, 'comments', 'pitches', ['pitch_id'], ['id'])
op.add_column('pitches', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'pitches', 'users', ['user_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'pitches', type_='foreignkey')
op.drop_column('pitches', 'user_id')
op.drop_constraint(None, 'comments', type_='foreignkey')
op.drop_table('upvote')
op.drop_table('downvote')
# ### end Alembic commands ###
| 1.75 | 2 |
Problem_9_hard.py | alucardthefish/DailyCodingProblem | 0 | 12762906 | # This problem was asked by Airbnb.
#
# Given a list of integers, write a function that returns the largest sum of non-adjacent numbers.
# Numbers can be 0 or negative.
# For example, [2, 4, 6, 2, 5] should return 13, since we pick 2, 6, and 5. [5, 1, 1, 5] should return 10,
# since we pick 5 and 5.
# Follow-up: Can you do this in O(N) time and constant space?
# Input examples
input_1 = [2, 4, 6, 2, 5] # return 13
input_2 = [5, 1, 1, 5] # return 10
input_3 = [2, 14, 6, 2, 15] # return 29
input_4 = [2, 5, 11, 8, 3] # return 16
input_5 = [90, 15, 10, 30, 100] # return 200
input_6 = [29, 51, 8, 10, 43, 28] # return 94
def largest_sum_adj(arr):
result = 0
size = len(arr)
# validate the input list. It must be a length greater than 2
if size > 2:
arr[2] += arr[0]
result = arr[2]
for i in range(3, size):
tmp = arr[i-3]
if arr[i-2] > arr[i-3]:
tmp = arr[i-2]
tmp_addition = tmp + arr[i]
arr[i] = tmp_addition
if tmp_addition > result:
result = tmp_addition
else:
print("The length of input list must be greater than 2")
return result
| 4.125 | 4 |
Includes/include/python3.py | WikiLibs/Parser | 0 | 12762907 | <gh_stars>0
import python31 as p3
from python31 import Test as t3
import sys
print('Python', sys.version)
print('\ntiming range()')
print('Python', sys.version)
print('Hello, World!')
print("some text,", end="")
print(' print more text on the same line')
print('Python', sys.version)
print('3 / 2 =', 3 / 2)
print('3 // 2 =', 3 // 2)
print('3 / 2.0 =', 3 / 2.0)
print('3 // 2.0 =', 3 // 2.0)
print('Python', sys.version)
print('strings are now utf-8 \u03BCnico\u0394é!')
print('and Python', sys.version, end="")
print('Python', sys.version)
try:
let_us_cause_a_NameError
except NameError as err:
print(err, '--> our error message')
p3.test_range(10)
t31 = t3()
t31.Do("Heyyyy")
| 2.546875 | 3 |
manhattan/tests/test_timerotating_log.py | cartlogic/manhattan | 1 | 12762908 | from __future__ import absolute_import, division, print_function
import types
import time
from threading import Thread
from manhattan.record import Record, PageRecord, GoalRecord
from manhattan.log.timerotating import TimeRotatingLog
from .base import BaseTest, work_path
def set_fake_name(log, index):
def fake_name(self, timestamp):
return '%s.%s' % (self.path, index)
log.log_name_for = types.MethodType(fake_name, log)
def make_thread_consumer(log_r, process_from=None):
consumed = []
last_pointer_container = [None]
log_r.sleep_delay = 0.001
def consume(l):
for rec, ptr in l.process(stay_alive=True, process_from=process_from):
consumed.append(Record.from_list(rec))
last_pointer_container[0] = ptr
consumer = Thread(target=consume, args=(log_r,))
consumer.start()
return consumed, consumer, last_pointer_container
class TimeRotatingLogTest(BaseTest):
def test_basic(self):
path = work_path('trl-basic')
log_w = TimeRotatingLog(path)
log_w.write(PageRecord(url='/foo').to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 1)
rec = Record.from_list(records[0][0])
self.assertEqual(rec.url, '/foo')
def test_multiple_logs(self):
path = work_path('trl-multi')
log_w = TimeRotatingLog(path)
set_fake_name(log_w, '001')
log_w.write(PageRecord(url='/foo').to_list())
set_fake_name(log_w, '004')
log_w.write(PageRecord(url='/bar').to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 2)
self.assertEqual(Record.from_list(records[0][0]).url, '/foo')
self.assertEqual(Record.from_list(records[1][0]).url, '/bar')
def test_stay_alive_single(self):
path = work_path('trl-stayalive')
log_r = TimeRotatingLog(path)
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
try:
self.assertEqual(len(consumed), 0)
log_w = TimeRotatingLog(path)
log_w.write(PageRecord(url='/baz').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/baz')
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 2)
self.assertEqual(consumed[1].url, '/herp')
finally:
log_r.killed.set()
def test_stay_alive_multiple(self):
path = work_path('trl-stayalive-multi')
log_r = TimeRotatingLog(path)
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
try:
self.assertEqual(len(consumed), 0)
log_w = TimeRotatingLog(path)
set_fake_name(log_w, '357')
log_w.write(PageRecord(url='/baz').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/baz')
set_fake_name(log_w, '358')
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 2)
self.assertEqual(consumed[1].url, '/herp')
finally:
log_r.killed.set()
def test_stay_alive_nofiles(self):
log_r = TimeRotatingLog(work_path('trl-stayalive-none'))
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
log_r.killed.set()
def test_unicode_names(self):
path = work_path('trl-unicode')
log_w = TimeRotatingLog(path)
goal_name = u'Goo\xf6aa\xe1llll!!!'
rec = GoalRecord(name=goal_name,
value='',
value_type='',
value_format='')
log_w.write(rec.to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 1)
rec = Record.from_list(records[0][0])
self.assertEqual(rec.name, goal_name)
def test_resume(self):
path = work_path('trl-resume')
log_w = TimeRotatingLog(path)
# Create a thread consumer
log_r1 = TimeRotatingLog(path)
consumed, consumer, ptr_container = make_thread_consumer(log_r1)
try:
# Write one record
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r1.sleep_delay * 10)
# Check that one record was read.
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/herp')
finally:
# Kill the thread
log_r1.killed.set()
# Wait for it to die.
time.sleep(log_r1.sleep_delay * 10)
last_pointer = ptr_container[0]
self.assertIsNotNone(last_pointer)
try:
# Write one record
log_w.write(PageRecord(url='/derp').to_list())
time.sleep(log_r1.sleep_delay * 10)
# Create a new thread consumer
log_r2 = TimeRotatingLog(path)
consumed, consumer, _ = \
make_thread_consumer(log_r2, process_from=last_pointer)
time.sleep(log_r2.sleep_delay * 10)
# Check that the second record was read.
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/derp')
finally:
log_r2.killed.set()
| 2.234375 | 2 |
lib/python3.6/site-packages/trello/card.py | HaiBinh/slack_for_trello | 0 | 12762909 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
from dateutil import parser as dateparser
from trello.checklist import Checklist
from trello.label import Label
class Card(object):
"""
Class representing a Trello card. Card attributes are stored on
the object
"""
@property
def member_id(self):
return self.idMembers
@property
def short_id(self):
return self.idShort
@property
def list_id(self):
return self.idList
@property
def board_id(self):
return self.idBoard
@property
def description(self):
return self.desc
@property
def date_last_activity(self):
return self.dateLastActivity
@description.setter
def description(self, value):
self.desc = value
@property
def idLabels(self):
return self.label_ids
@idLabels.setter
def idLabels(self, values):
self.label_ids = values
@property
def list_labels(self):
if self.labels:
return self.labels
return None
@property
def comments(self):
"""
Lazily loads and returns the comments
"""
try:
if self._comments is None:
self._comments = self.fetch_comments()
except AttributeError:
self._comments = None
return self._comments
@property
def checklists(self):
"""
Lazily loads and returns the checklists
"""
try:
if self._checklists is None:
self._checklists = self.fetch_checklists()
except AttributeError:
self._checklists = None
return self._checklists
def __init__(self, parent, card_id, name=''):
"""
:trello_list: reference to the parent list
:card_id: ID for this card
"""
if isinstance(parent, List):
self.trello_list = parent
self.board = parent.board
else:
self.board = parent
self.client = parent.client
self.id = card_id
self.name = name
@classmethod
def from_json(cls, parent, json_obj):
"""
Deserialize the card json object to a Card object
:trello_list: the list object that the card belongs to
:json_obj: json object
"""
if 'id' not in json_obj:
raise Exception("key 'id' is not in json_obj")
card = cls(parent,
json_obj['id'],
name=json_obj['name'].encode('utf-8'))
card.desc = json_obj.get('desc', '')
card.closed = json_obj['closed']
card.url = json_obj['url']
card.member_ids = json_obj['idMembers']
card.idLabels = json_obj['idLabels']
card.idList = json_obj['idList']
card.labels = Label.from_json_list(card.board, json_obj['labels'])
return card
def __repr__(self):
return '<Card %s>' % self.name
def fetch(self, eager=True):
"""
Fetch all attributes for this card
:param eager: If eager is true comments and checklists will be fetched immediately, otherwise on demand
"""
json_obj = self.client.fetch_json(
'/cards/' + self.id,
query_params={'badges': False})
self.id = json_obj['id']
self.name = json_obj['name'].encode('utf-8')
self.desc = json_obj.get('desc', '')
self.closed = json_obj['closed']
self.url = json_obj['url']
self.idMembers = json_obj['idMembers']
self.idShort = json_obj['idShort']
self.idList = json_obj['idList']
self.idBoard = json_obj['idBoard']
self.idLabels = json_obj['idLabels']
self.labels = Label.from_json_list(self.board, json_obj['labels'])
self.badges = json_obj['badges']
self.pos = json_obj['pos']
if json_obj.get('due', ''):
self.due = json_obj.get('due', '')
else:
self.due = ''
self.checked = json_obj['checkItemStates']
self.dateLastActivity = dateparser.parse(json_obj['dateLastActivity'])
self._checklists = self.fetch_checklists() if eager else None
self._comments = self.fetch_comments() if eager else None
def fetch_comments(self, force=False):
comments = []
if (force is True) or (self.badges['comments'] > 0):
comments = self.client.fetch_json(
'/cards/' + self.id + '/actions',
query_params={'filter': 'commentCard'})
return sorted(comments, key=lambda comment: comment['date'])
return comments
def get_list(self):
obj = self.client.fetch_json('/lists/' + self.idList)
return List.from_json(board=self, json_obj=obj)
def get_comments(self):
"""Alias for fetch_comments for backward compatibility. Always contact server"""
return self.fetch_comments(force=True)
def fetch_checklists(self):
checklists = []
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/checklists', )
# Thanks https://github.com/HuffAndPuff for noticing checklist were not sorted
json_obj = sorted(json_obj, key=lambda checklist: checklist['pos'])
for cl in json_obj:
checklists.append(Checklist(self.client, self.checked, cl,
trello_card=self.id))
return checklists
def fetch_actions(self, action_filter='createCard'):
"""
Fetch actions for this card can give more argv to action_filter,
split for ',' json_obj is list
"""
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/actions',
query_params={'filter': action_filter})
self.actions = json_obj
def attriExp(self, multiple):
"""
Provides the option to explore what comes from trello
:multiple is one of the attributes of GET /1/cards/[card id or shortlink]/actions
"""
self.fetch_actions(multiple)
return self.actions
def listCardMove_date(self):
"""
Will return the history of transitions of a card from one list to another
The lower the index the more resent the historical item
It returns a list of lists. The sublists are triplates of
starting list, ending list and when the transition occured.
"""
self.fetch_actions('updateCard:idList')
res = []
for idx in self.actions:
date_str = idx['date']
dateDate = dateparser.parse(date_str)
strLst = idx['data']['listBefore']['name']
endLst = idx['data']['listAfter']['name']
res.append([strLst, endLst, dateDate])
return res
@property
def latestCardMove_date(self):
"""
returns the date of the last card transition
"""
self.fetch_actions('updateCard:idList')
date_str = self.actions[0]['date']
return dateparser.parse(date_str)
@property
def create_date(self):
"""Will return the creation date of the card.
WARNING: if the card was create via convertion of a checklist item
it fails. attriExp('convertToCardFromCheckItem') allows to
test for the condition.
"""
self.fetch_actions()
date_str = self.actions[0]['date']
return dateparser.parse(date_str)
@property
def due_date(self):
return dateparser.parse(self.due) if self.due else ''
def set_name(self, new_name):
"""
Update the name on the card to :new_name:
"""
self._set_remote_attribute('name', new_name)
self.name = new_name
def set_description(self, description):
self._set_remote_attribute('desc', description)
self.desc = description
def set_due(self, due):
"""Set the due time for the card
:due: a datetime object
"""
datestr = due.strftime('%Y-%m-%dT%H:%M:%S')
self._set_remote_attribute('due', datestr)
self.due = datestr
def set_pos(self, pos):
"""
Update card position in list
:pos: 'top', 'bottom' or int
"""
self._set_remote_attribute('pos', pos)
self.pos = pos
def set_closed(self, closed):
self._set_remote_attribute('closed', closed)
self.closed = closed
def delete(self):
# Delete this card permanently
self.client.fetch_json(
'/cards/' + self.id,
http_method='DELETE')
def assign(self, member_id):
self.client.fetch_json(
'/cards/' + self.id + '/members',
http_method='POST',
post_args={'value': member_id})
def unassign(self, member_id):
self.client.fetch_json(
'/cards/' + self.id + '/idMembers/' + member_id,
http_method='DELETE')
def subscribe(self):
self.client.fetch_json(
'/cards/' + self.id + '/subscribed',
http_method='PUT',
post_args={'value': True})
def comment(self, comment_text):
"""Add a comment to a card."""
self.client.fetch_json(
'/cards/' + self.id + '/actions/comments',
http_method='POST',
post_args={'text': comment_text})
def add_label(self, label):
self.client.fetch_json(
'/cards/' + self.id + '/idLabels',
http_method='POST',
post_args={'value': label.id})
def attach(self, name=None, mimeType=None, file=None, url=None):
"""
Add an attachment to the card. The attachment can be either a
file or a url. Setting the name and/or mime type is optional.
:param name: The name of the attachment
:param mimeType: mime type for the attachement
:param file: a file-like, binary object that supports read()
:param url: a URL pointing to the resource to be attached
"""
if (file and url) or (not file and not url):
raise Exception('Please provide either a file or url, and not both!')
kwargs = {}
if file:
kwargs['files'] = dict(file=(name, file, mimeType))
else:
kwargs['name'] = name
kwargs['mimeType'] = mimeType
kwargs['url'] = url
self._post_remote_data(
'attachments', **kwargs
)
def change_list(self, list_id):
self.client.fetch_json(
'/cards/' + self.id + '/idList',
http_method='PUT',
post_args={'value': list_id})
def change_board(self, board_id, list_id=None):
args = {'value': board_id}
if list_id is not None:
args['idList'] = list_id
self.client.fetch_json(
'/cards/' + self.id + '/idBoard',
http_method='PUT',
post_args=args)
def add_checklist(self, title, items, itemstates=None):
"""Add a checklist to this card
:title: title of the checklist
:items: a list of the item names
:itemstates: a list of the state (True/False) of each item
:return: the checklist
"""
if itemstates is None:
itemstates = []
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/checklists',
http_method='POST',
post_args={'name': title}, )
cl = Checklist(self.client, [], json_obj, trello_card=self.id)
for i, name in enumerate(items):
try:
checked = itemstates[i]
except IndexError:
checked = False
cl.add_checklist_item(name, checked)
self.fetch()
return cl
def _set_remote_attribute(self, attribute, value):
self.client.fetch_json(
'/cards/' + self.id + '/' + attribute,
http_method='PUT',
post_args={'value': value}, )
def _post_remote_data(self, attribute, files=None, **kwargs):
self.client.fetch_json(
'/cards/' + self.id + '/' + attribute,
http_method='POST',
files=files,
post_args=kwargs)
from trello.trellolist import List
| 2.953125 | 3 |
tlib/es/index_stress.py | txu2008/TXLIB | 1 | 12762910 | <reponame>txu2008/TXLIB
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/11 14:02
# @Author : Tao.Xu
# @Email : <EMAIL>
"""Elasticsearch Stress
FYI: https://github.com/logzio/elasticsearch-stress-test
"""
import random
import string
import time
import sys
import json
# import urllib3
import threading
from threading import Lock, Thread, Condition, Event
from concurrent.futures import ThreadPoolExecutor, as_completed
from elasticsearch import Elasticsearch
from elasticsearch.connection import create_ssl_context
from elasticsearch.exceptions import TransportError
from tlib import log
from tlib.retry import retry
from tlib.utils import util
# =============================
# --- Global
# =============================
logger = log.get_logger()
# urllib3.disable_warnings()
ES_CONN_TIMEOUT = 10800 # 180 min = 180 * 60 = 10800
ES_OPERATION_TIMEOUT = '180m'
class ElasticsearchObj(object):
"""ElasticsearchObj"""
_conn = None
def __init__(self, esaddress, username, password, port, cafile, no_verify):
super(ElasticsearchObj, self).__init__()
self.esaddress = esaddress
self.username = username
self.password = password
self.port = port
self.cafile = cafile
self.no_verify = no_verify
@retry(tries=5, delay=3)
def connect(self):
"""
Initiate the elasticsearch session, We increase the timeout here from the default value (10 seconds)
to ensure we wait for requests to finish even if the cluster is overwhelmed and
it takes a bit longer to process one bulk.
:return:
"""
try:
logger.info(
"Connect to ES({0},{1},{2},{3})...".format(self.esaddress, self.username, self.password, self.port))
context = create_ssl_context(cafile=self.cafile) if self.cafile else ''
auth = (self.username, self.password) if self.username and self.password else ()
es_conn = Elasticsearch(self.esaddress, http_auth=auth, verify_certs=(not self.no_verify),
ssl_context=context, port=self.port, timeout=ES_CONN_TIMEOUT)
return es_conn
except Exception as e:
raise Exception("Failed:Connect to ES!\n{0}".format(e))
@property
def conn(self):
if self._conn is None:
self._conn = self.connect()
return self._conn
def get_cat_index_info(self, index_name=None):
cat_result_list = self.conn.cat.indices(index=index_name, v=True).split('\n')
index_info = dict()
if cat_result_list:
if index_name is None:
index_info = []
for i in range(1, len(cat_result_list)):
index_info.append(dict(zip(cat_result_list[0].split(), cat_result_list[i].split())))
else:
index_info = dict(zip(cat_result_list[0].split(), cat_result_list[1].split()))
return index_info
@property
def es_indices_names(self):
# return [es_indices.split()[2] for es_indices in self.conn.cat.indices().strip().split('\n')]
es_indices_names = []
for es_indices in self.conn.cat.indices().strip().split('\n'):
es_indices_info = es_indices.split()
if len(es_indices_info) > 3:
es_indices_names.append(es_indices_info[2])
return es_indices_names
@retry(tries=3, delay=3, jitter=1, raise_exception=False)
def delete_indices(self, index):
"""
delete index from indices
:param index:
:return:
"""
try:
logger.info("Delete indices:{0} ...".format(index))
self.conn.indices.delete(index=index, ignore=[400, 404])
return True
except Exception as e:
raise Exception("Failed:delete index {0}. Continue anyway..\n{1}".format(index, e))
@retry(tries=20, delay=3, jitter=1)
def create_indices(self, index, shards, replicas):
try:
# And create it in ES with the shard count and replicas
logger.info("Create indices:index={0},shards={1}, replicas={2} ...".format(index, shards, replicas))
self.conn.indices.create(index=index, body={"settings": {"number_of_shards": shards,
"number_of_replicas": replicas}})
return True
except TransportError as e:
if 'exists' in e.error:
logger.warning(e)
return True
raise Exception("Failed:Create index!\n{0}".format(e))
def multi_delete_indices(self, index_list, name_start=None):
pool = ThreadPoolExecutor(max_workers=100)
futures = []
for index in index_list:
if name_start and not index.startswith(name_start):
continue
futures.append(pool.submit(self.delete_indices, index))
pool.shutdown()
future_result = [future.result() for future in as_completed(futures)]
result = False if False in future_result else True
return result
def multi_create_indices(self, index_list, shards, replicas):
pool = ThreadPoolExecutor(max_workers=100)
futures = []
for index in index_list:
futures.append(pool.submit(self.create_indices, index, shards, replicas))
pool.shutdown()
future_result = [future.result() for future in as_completed(futures)]
result = False if False in future_result else True
return result
@retry(tries=30, delay=10)
def wait_for_green(self):
try:
self.conn.cluster.health(wait_for_status='green', master_timeout='600s', timeout='600s')
return True
except Exception as e:
raise Exception(e)
class ESIndexStress(ElasticsearchObj):
"""
Elasticsearch Stress
FYI: https://github.com/logzio/elasticsearch-stress-test
"""
def __init__(self, esaddress, username, password, port, cafile, no_verify, indices, documents, clients, seconds,
number_of_shards, number_of_replicas, bulk_size, max_fields_per_document, max_size_per_field, cleanup,
stats_frequency, green, index_name=None):
super(ESIndexStress, self).__init__(esaddress, username, password, port, cafile, no_verify)
self.esaddress = esaddress
self.indices = indices
self.documents = documents
self.clients = clients
self.seconds = seconds
self.number_of_shards = number_of_shards
self.number_of_replicas = number_of_replicas
self.bulk_size = bulk_size
self.max_fields_per_document = max_fields_per_document
self.max_size_per_field = max_size_per_field
self.cleanup = cleanup # cleanup index after test complete, if True
self.stats_frequency = stats_frequency
self.green = green
self.index_name = index_name
# Placeholders
self.start_timestamp = 0
self.success_bulks = 0
self.failed_bulks = 0
self.total_size = 0
# Thread safe
self.success_lock = Lock()
self.fail_lock = Lock()
self.size_lock = Lock()
self.shutdown_event = Event()
# Helper functions
def increment_success(self):
# First, lock
self.success_lock.acquire()
try:
self.success_bulks += 1
finally: # Just in case
# Release the lock
self.success_lock.release()
def increment_failure(self):
# First, lock
self.fail_lock.acquire()
try:
self.failed_bulks += 1
finally: # Just in case
# Release the lock
self.fail_lock.release()
def increment_size(self, size):
# First, lock
self.size_lock.acquire()
try:
self.total_size += size
finally: # Just in case
# Release the lock
self.size_lock.release()
def has_timeout(self, start_timestamp):
# Match to the timestamp
if (start_timestamp + self.seconds) > int(time.time()):
return False
return True
# Just to control the minimum value globally (though its not configurable)
@staticmethod
def generate_random_int(max_size):
try:
return random.randint(1, max_size)
except Exception as e:
print("Not supporting {0} as valid sizes!".format(max_size))
raise e
# Generate a random string with length of 1 to provided param
def generate_random_string(self, max_size):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(self.generate_random_int(max_size)))
# Create a document template
def generate_document(self):
temp_doc = {}
# Iterate over the max fields
for _ in range(self.generate_random_int(self.max_fields_per_document)):
# Generate a field, with random content
temp_doc[self.generate_random_string(10)] = self.generate_random_string(self.max_size_per_field)
# Return the created document
return temp_doc
def fill_documents(self, documents_templates):
"""
fill document with random string from template
:param documents_templates:
:return:
"""
document_list = []
# Generating 10 random subsets
for _ in range(10):
# Get a temp document
random_doc = random.choice(documents_templates)
# Populate the fields
temp_doc = {}
for field in random_doc:
temp_doc[field] = self.generate_random_string(self.max_size_per_field)
document_list.append(temp_doc)
return document_list
def client_worker(self, indices, document_list):
# Running until timeout
thread_id = threading.current_thread()
logger.info("Perform the bulk operation, bulk_size:{0} ({1})...".format(self.bulk_size, thread_id))
while (not self.has_timeout(self.start_timestamp)) and (not self.shutdown_event.is_set()):
curr_bulk = ""
# Iterate over the bulk size
for _ in range(self.bulk_size):
# Generate the bulk operation
curr_bulk += "{0}\n".format(json.dumps({"index": {"_index": random.choice(indices),
"_type": "stresstest"}}))
curr_bulk += "{0}\n".format(json.dumps(random.choice(document_list)))
try:
# Perform the bulk operation
self.conn.bulk(body=curr_bulk, timeout=ES_OPERATION_TIMEOUT)
# Adding to success bulks
self.increment_success()
# Adding to size (in bytes)
self.increment_size(sys.getsizeof(str(curr_bulk)))
except Exception as e:
# Failed. incrementing failure
self.increment_failure()
logger.error(e)
def generate_clients(self, indices, document_list):
# Clients placeholder
temp_clients = []
# Iterate over the clients count
for _ in range(self.clients):
temp_thread = Thread(target=self.client_worker, args=[indices, document_list])
temp_thread.daemon = True
# Create a thread and push it to the list
temp_clients.append(temp_thread)
# Return the clients
return temp_clients
def generate_documents(self):
# Documents placeholder
temp_documents = []
# Iterate over the clients count
for _ in range(self.documents):
# Create a document and push it to the list
temp_documents.append(self.generate_document())
# Return the documents
return temp_documents
def generate_indices(self):
# Placeholder
temp_indices = []
# Iterate over the indices count
for x in range(self.indices):
# Generate the index name
temp_index = '{0}_{1}'.format(self.index_name, x) if self.index_name else self.generate_random_string(16)
temp_indices.append(temp_index)
self.multi_create_indices(temp_indices, self.number_of_shards, self.number_of_replicas)
return temp_indices
def print_stats(self):
# Calculate elpased time
elapsed_time = (int(time.time()) - self.start_timestamp)
# Calculate size in MB
size_mb = self.total_size / 1024 / 1024
# Protect division by zero
if elapsed_time == 0:
mbs = 0
else:
mbs = size_mb / float(elapsed_time)
# Print stats to the user
logger.info("Elapsed time: {0} seconds".format(elapsed_time))
logger.info("Successful bulks: {0} ({1} documents)".format(self.success_bulks, (self.success_bulks * self.bulk_size)))
logger.info("Failed bulks: {0} ({1} documents)".format(self.failed_bulks, (self.failed_bulks * self.bulk_size)))
logger.info("Indexed approximately {0} MB which is {1:.2f} MB/s".format(size_mb, mbs))
logger.info("")
def print_stats_worker(self):
# Create a conditional lock to be used instead of sleep (prevent dead locks)
lock = Condition()
# Acquire it
lock.acquire()
# Print the stats every STATS_FREQUENCY seconds
while (not self.has_timeout(self.start_timestamp)) and (not self.shutdown_event.is_set()):
# Wait for timeout
lock.wait(self.stats_frequency)
# To avoid double printing
if not self.has_timeout(self.start_timestamp):
# Print stats
self.print_stats()
def run(self):
clients = []
all_indices = []
# Set the timestamp
self.start_timestamp = int(time.time())
logger.info("")
logger.info("Starting initialization of {0} ...".format(self.esaddress))
logger.info("Generate docs ...")
documents_templates = self.generate_documents()
document_list = self.fill_documents(documents_templates)
logger.info("Done!")
logger.info("Creating indices.. ")
indices = self.generate_indices()
all_indices.extend(indices)
logger.info("Done!")
if self.green:
logger.info('Check es cluster health ...')
self.wait_for_green()
logger.info("Done!")
logger.info("Generating documents and workers.. ") # Generate the clients
clients.extend(self.generate_clients(indices, document_list))
logger.info("Done!")
logger.info("Starting the test. Will print stats every {0} seconds.".format(self.stats_frequency))
logger.info("The test would run for {0} seconds, but it might take a bit more "
"because we are waiting for current bulk operation to complete.".format(self.seconds))
original_active_count = threading.active_count()
# Run the clients!
for d in clients:
d.start()
# Create and start the print stats thread
stats_thread = Thread(target=self.print_stats_worker)
stats_thread.daemon = True
stats_thread.start()
for c in clients:
while c.is_alive():
try:
c.join(timeout=0.1)
except KeyboardInterrupt:
logger.info("")
logger.info("Ctrl-c received! Sending kill to threads...")
self.shutdown_event.set()
# set loop flag true to get into loop
flag = True
while flag:
# sleep 2 secs that we don't loop to often
time.sleep(2)
'''
# set loop flag to false. If there is no thread still alive it will stay false
flag = False
# loop through each running thread and check if it is alive
for t in threading.enumerate():
# if one single thread is still alive repeat the loop
if t.isAlive():
flag = True
'''
# wait the bulk threads complete!
bulk_active_count = threading.active_count() - original_active_count
if bulk_active_count > 0:
print('bulk_active_count: {0}'.format(bulk_active_count))
flag = True
else:
flag = False
if self.cleanup:
logger.info("Cleaning up created indices.. ")
self.multi_delete_indices(all_indices)
logger.info('')
logger.info("Test is done! Final results:")
self.print_stats()
if self.cleanup:
logger.info("Cleaning up created indices.. ")
self.multi_delete_indices(all_indices)
logger.info("Done!")
| 2.1875 | 2 |
flask_helloworld_apache2/workflow_handler.py | sidhshar/gcp-explore | 0 | 12762911 | <filename>flask_helloworld_apache2/workflow_handler.py<gh_stars>0
import datetime
import time
import localsettings as ls
import sqlite3
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class DBHandler(object):
def __init__(self):
self.connection = sqlite3.connect(ls.DBSTORE)
self.connection.isolation_level = None #If you want autocommit mode, then set isolation_level to None.
self.create_table()
def execute_query_via_cursor(self, query):
cursor = self.connection.cursor()
cursor.execute(query)
cursor.close()
def execute_parameterised_query_via_cursor(self, query, parameters):
# TODO: Create cursor pool
cursor = self.connection.cursor()
cursor.execute(query, parameters)
cursor.close()
def execute_parameterised_query_via_cursor_with_results(self, query, parameters):
cursor = self.connection.cursor()
cursor.execute(query, parameters)
results = cursor.fetchall()
cursor.close()
return results
def create_table(self):
create_query = '''CREATE TABLE IF NOT EXISTS EVENT_PROCESSOR
(ID INTEGER PRIMARY KEY, REMOTE_ADDR text, ua text, ph text, ts timestamp, vulassessment int, retrycount int, retry_timestamps text)'''
self.execute_query_via_cursor(create_query)
def write_to_db(self, ip, ua, ph, ts, cvss, rcount, rts):
now = datetime.datetime.now()
insert_query = "INSERT INTO EVENT_PROCESSOR(REMOTE_ADDR, ua, ph, ts, vulassessment, retrycount, retry_timestamps) values (?, ?, ?, ?, ?, ?, ?)"
insert_values = (ip, ua, ph, ts, cvss, rcount, rts)
self.execute_parameterised_query_via_cursor(insert_query, insert_values)
def perform_select_on_ip(self, ip):
selectvalues = (ip,)
select_query = 'SELECT * FROM EVENT_PROCESSOR WHERE REMOTE_ADDR=?'
results = self.execute_parameterised_query_via_cursor_with_results(select_query, selectvalues)
return results
def close(self):
self.connection.close()
class RequestItem(object):
def __init__(self, ip, ua):
self.ip = ip
self.ua_from_istio = ua
self.state = ls.WORKFLOW_STATES[ls.RECEIVED_FROM_ISTIO]
self.retry_count = 0
self.retry_timestamps = []
self.ts = datetime.datetime.now()
self.cvss_score = 0
self.ph = None
def get_ip(self):
return self.ip
def get_verbose_state(self):
return ls.WORKFLOW_STATES_VERBOSE[self.state]
def increment_retry_count(self):
self.retry_count += 1
self.retry_timestamps.append(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
def change_to_nvm_event_recieved(self):
self.state = ls.WORKFLOW_STATES[ls.RECIEVED_WEBHOOK_TRIGGER]
def mark_vul_assess_done(self):
self.state = ls.WORKFLOW_STATES[ls.VUL_ASSESS_COMPLETE]
def mark_complete(self):
self.state = ls.WORKFLOW_STATES[ls.ALL_PROCESSING_DONE]
def is_complete(self):
return self.state == ls.WORKFLOW_STATES[ls.ALL_PROCESSING_DONE]
def set_cvss_score(self, cvss):
self.cvss_score = cvss
def set_process_hash(self, ph):
self.ph = ph
def save_audit_trail(self):
dbhandle = DBHandler()
dbhandle.write_to_db(self.ip, self.ua_from_istio, self.ph, self.ts, self.cvss_score,
self.retry_count, "+".join(self.retry_timestamps))
dbhandle.close()
class WorkflowManager(object):
__metaclass__ = Singleton
def __init__(self):
self.pending = {}
def get_pending_by_ip(self, ip):
if self.pending.has_key(ip):
return self.pending[ip]
def create_new_request(self, ip, ua):
pending = self.get_pending_by_ip(ip)
if pending is None:
# Create new pending request
reqitem = RequestItem(ip, ua)
self.pending[ip] = reqitem
return reqitem
else:
# Update the retry count
pending.increment_retry_count()
return pending
def mark_nvm_flow_arrival(self, ip, ph):
pending = self.get_pending_by_ip(ip)
if pending is None:
# Error condition. Exit loop here
print 'Did not find workflow object with IP: %s hash: %s. IGNORE Request.' % (ip, ph)
return False
else:
pending.change_to_nvm_event_recieved()
pending.set_process_hash(ph)
cvss_score = self.make_vul_assess_call(ph)
pending.set_cvss_score(cvss_score)
pending.mark_vul_assess_done()
# Write object details to DB and pop the queue object
pending.save_audit_trail()
pending.mark_complete()
self.pending.pop(ip)
return True
def make_vul_assess_call(self, ph):
time.sleep(2)
return ls.TEST_CVSS_SCORE
def wait_for_complete_state(reqitem):
waiting_time = 0
while (waiting_time <= ls.TIMEOUT_IN_SECS):
if reqitem.is_complete():
return True
time.sleep(ls.SLEEP_TIME_SLICE)
waiting_time += ls.SLEEP_TIME_SLICE
return False
def invoke_step1(ip, ua):
print 'Invoke step1'
cvss_score = 0
dbhandle = DBHandler()
results = dbhandle.perform_select_on_ip(ip)
if results:
# Assume 1 result as of now. TODO
result = results[0]
cvss_score = result[4]
print 'Got data from DB'
else:
print 'Starting Workflow Manager..'
wobj = WorkflowManager()
reqitem = wobj.create_new_request(ip, ua)
status = wait_for_complete_state(reqitem)
if not status:
# Timeout occured. Return negative response
print 'Timeout in Step1'
return { ls.INSERT_HEADER_NAME: False }
cvss_score = reqitem.cvss_score
if ls.TEST_CVSS_THRESHOLD <= cvss_score:
# Return Positive response
response = { ls.INSERT_HEADER_NAME: True }
else:
# Return negative response
response = { ls.INSERT_HEADER_NAME: False }
print 'Step 1 response: ',response
return response
def invoke_step2(host, ph):
print 'Invoke step2'
wobj = WorkflowManager()
response = wobj.mark_nvm_flow_arrival(host, ph)
print 'Step 2 response: ',response
return response
def invoke_test_step1():
istio_request = {'X-Initiator-Remote-Addr-1': '172.16.17.32, 192.168.3.11',
'X-Initiator-Ua': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'}
remote_addr = istio_request['X-Initiator-Remote-Addr-1'].split(',')[0]
result = invoke_step1(remote_addr, istio_request['X-Initiator-Ua'])
def invoke_test_step2():
splunk_webhook_data = { 'host': '172.16.17.32', 'ph': '072041FA70BB351030C516E1B6F7F21D15495DA158F3890826BA5B978AF8900E' }
invoke_step2(splunk_webhook_data['host'], splunk_webhook_data['ph'])
if __name__ == '__main__':
#invoke_test_step1()
#time.sleep(2)
invoke_test_step2()
| 2.421875 | 2 |
reproductions/offline/discrete_cql.py | ningyixue/AIPI530_Final_Project | 1 | 12762912 | import argparse
import d3rlpy
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', type=str, default='breakout')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', type=int)
args = parser.parse_args()
d3rlpy.seed(args.seed)
dataset, env = d3rlpy.datasets.get_atari_transitions(
args.game,
fraction=0.01,
index=1 if args.game == "asterix" else 0,
)
env.seed(args.seed)
cql = d3rlpy.algos.DiscreteCQL(
learning_rate=5e-5,
optim_factory=d3rlpy.models.optimizers.AdamFactory(eps=1e-2 / 32),
batch_size=32,
alpha=4.0,
q_func_factory=d3rlpy.models.q_functions.QRQFunctionFactory(
n_quantiles=200),
scaler="pixel",
n_frames=4,
target_update_interval=2000,
reward_scaler=d3rlpy.preprocessing.ClipRewardScaler(-1.0, 1.0),
use_gpu=args.gpu)
env_scorer = d3rlpy.metrics.evaluate_on_environment(env, epsilon=0.001)
cql.fit(dataset,
eval_episodes=[None],
n_steps=50000000 // 4,
n_steps_per_epoch=125000,
scorers={
'environment': env_scorer,
},
experiment_name=f"DiscreteCQL_{args.game}_{args.seed}")
if __name__ == '__main__':
main()
| 2.125 | 2 |
main.py | riandeoliveira/mern-stack-project-starter | 0 | 12762913 | <reponame>riandeoliveira/mern-stack-project-starter
import pyautogui
import pyperclip
import time
print('========== MERN STACK PROJECT STARTER ==========\n')
print('by <NAME>\n')
# Pega os dados necessários do usuário.
USERNAME = input('Nome de usuário do GitHub: ')
PROJECT_NAME = input('Nome do projeto: ')
DB_NAME = input('Nome do banco de dados: ')
PROJECT_LOCATION = input('Local onde ficará o projeto na sua máquina: ')
print('\nIniciando projeto, aguarde...')
print('Por favor, não mexa no teclado e mouse enquanto o programa estiver rodando.')
# Abre o navegador.
time.sleep(5)
pyautogui.hotkey('win', 'd')
time.sleep(0.5)
pyautogui.hotkey('win')
time.sleep(0.5)
pyautogui.write('Microsoft Edge')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
# Vai até o perfil do GitHub do usuário.
time.sleep(0.5)
pyautogui.write('https://github.com/new')
time.sleep(0.5)
pyautogui.press('enter')
# Cria um novo repositório.
time.sleep(2)
pyautogui.press('tab')
time.sleep(0.5)
pyautogui.write(PROJECT_NAME)
time.sleep(0.5)
for i in range(9):
pyautogui.press('tab')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(5)
pyautogui.hotkey('win', 'd')
# Abre o terminal.
time.sleep(0.5)
pyautogui.press('win')
time.sleep(0.5)
pyautogui.write('Git Bash')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(2)
pyautogui.hotkey('win', 'up')
# Inicia o banco de dados MongoDB.
time.sleep(0.5)
pyautogui.write('mongod')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(3)
pyautogui.hotkey('win', 'd')
# Abre o terminal.
time.sleep(0.5)
pyautogui.press('win')
time.sleep(0.5)
pyautogui.write('Git Bash')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(2)
pyautogui.hotkey('win', 'up')
# Entra no MongoDB.
time.sleep(0.5)
pyautogui.write('mongo')
time.sleep(0.5)
pyautogui.press('enter')
# Cria um novo banco de dados e uma collection de exemplo.
time.sleep(0.5)
pyautogui.write('use ' + DB_NAME)
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('db.example.insertOne({ "name" : "example" })')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('show dbs')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('db.example.find().pretty()')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(3)
pyautogui.hotkey('ctrl', 'c')
# Cria o diretório do projeto.
time.sleep(0.5)
pyperclip.copy('cd ' + PROJECT_LOCATION)
time.sleep(0.5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('mkdir ' + PROJECT_NAME)
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('cd ' + PROJECT_NAME)
time.sleep(0.5)
pyautogui.press('enter')
# Abre o VSCode.
time.sleep(0.5)
pyautogui.write('code .')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(5)
pyautogui.hotkey('win', 'up')
# Clona o repositório do GitHub que contém o template a ser utilizado.
time.sleep(2)
pyautogui.hotkey('ctrl', '"')
time.sleep(2)
pyautogui.write('git clone https://github.com/riandeoliveira/mern-stack-template .')
time.sleep(0.5)
pyautogui.press('enter')
# Remove o repositório git do template.
time.sleep(10)
pyautogui.write('bash')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('rm -rf .git')
time.sleep(0.5)
pyautogui.press('enter')
# Inicia um novo repositório git.
time.sleep(0.5)
pyautogui.write('git init')
time.sleep(0.5)
pyautogui.press('enter')
# Configura a branch de master para main.
time.sleep(5)
pyautogui.write('git branch -M main')
time.sleep(0.5)
pyautogui.press('enter')
# Adiciona o repositório do GitHub pertencente ao projeto.
time.sleep(1)
pyautogui.write("git remote add origin 'https://github.com/" + USERNAME + "/" + PROJECT_NAME + "'")
time.sleep(0.5)
pyautogui.press('enter')
# Adiciona todo o template ao repositório local.
time.sleep(3)
pyautogui.write('git add .')
time.sleep(0.5)
pyautogui.press('enter')
# Faz commit de todo o projeto.
time.sleep(5)
pyautogui.write("git commit -m 'Initial commit'")
time.sleep(0.5)
pyautogui.press('enter')
# Faz um push, mandando o projeto para o GitHub.
time.sleep(5)
pyautogui.write('git push origin main')
time.sleep(0.5)
pyautogui.press('enter')
# Instala as dependências do back-end.
time.sleep(5)
pyautogui.write('cd server')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('npm install')
time.sleep(0.5)
pyautogui.press('enter')
# Instala as dependências do front-end.
time.sleep(30)
pyautogui.write('cd ..')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('cd client')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('npm install')
time.sleep(0.5)
pyautogui.press('enter')
print('\n================================================') | 3.015625 | 3 |
src/main/python/trajectory/utils/prereqs.py | jrouly/trajectory | 4 | 12762914 | """
trajectory/utils/prereqs.py
Author: <NAME>
Define a collection of useful utility functions for analyzing course and
departmental prerequisite structures.
"""
def get_prereq_graph(course_id, format=None):
"""
Generate a graph of prerequisites within a course. If format is not
requested, simply return a NetworkX graph object.
couse_id: the ID of the requested course
format: what format to return in (optional)
node: json formatted as node-link style
adjacency: json formatted as adjacency style
tree: json formatted as tree style
"""
from trajectory.models import Department, Course
from trajectory.models.meta import session
from trajectory.utils.common import row2dict
from networkx.readwrite import json_graph
import networkx as nx
import json
if format not in [None, "node", "adjacency", "tree"]:
raise RuntimeError("Unknown requested data format %s" % format)
# Initialize a new NetworkX graph.
G = nx.DiGraph()
# Attempt to look up the requested course.
course = session.query(Course).get(course_id)
if course is None:
return None
# Recursively add course ids in a subtree to the graph.
def add_tree(G, tree, parent=None):
cid = tree[0] # unpack information
prereqs = tree[1] # unpack information
course = session.query(Course).get(cid)
# Insert all known data, including department abbreviation.
node_data = row2dict(course)
node_data['dept'] = course.department.abbreviation
# Identify the primary course in the graph (the requested).
if str(cid) == str(course_id):
node_data['prime'] = True
else:
node_data['prime'] = False
# If the course has already been added, generate a unique ID for it
# based on its parent, and add it anyway. But don't recurse into
# its list of prereqs.
seen = False
if cid in G.nodes():
cid = str(parent) + "-" + str(cid)
seen = True
# Add course and an edge from its parent, if relevant.
G.add_node(cid, node_data)
if parent is not None:
G.add_edge(parent, cid)
# Recurse through the prerequisite tree and add in subtrees.
if not seen:
for prereq in prereqs:
add_tree(G, prereq, cid)
# Navigate the prerequisite tree and add the course ids as nodes, and
# prerequisite relationships as unweighted edges.
prereq_tree = get_prereq_tree(course_id)
add_tree(G, prereq_tree)
if G is None:
return G
# Calculate and apply a basic layout.
pos = nx.spring_layout(G)
for node in G.nodes():
G.node[node]["viz"] = {
'position': {
'x': pos[node][0],
'y': pos[node][1]
}
}
# Apply any requested data output formatting.
if format == "node":
return json.dumps(json_graph.node_link_data(G))
elif format == "adjacency":
return json.dumps(json_graph.adjacency_data(G))
elif format == "tree":
return json.dumps(json_graph.tree_data(G, int(course_id)))
else:
return G
def get_prereq_tree(course_id, parents=set()):
"""
Recursively identify the prerequisite chain of a course. This tree is
rooted at the requested parent course and is structured as a tuple of
tuples.
Ex:
(a [
(b, [ ]) prereq of a
(c, [ prereq of a
(d, []) prereq of c
(e, []) prereq of c
])
])
"""
from trajectory.models import Course
from trajectory.models.meta import session
# Attempt to identify the parent course.
course = session.query(Course).get(course_id)
if course is None:
return None
# Recursive depth base case.
if course_id in parents:
return None
else:
parents = parents | {course_id}
# Base case.
if len(course.prerequisites) == 0:
return (course.id, [])
# Recursive call.
builder = []
for prerequisite in course.prerequisites:
sub_prereqs = get_prereq_tree(prerequisite.id, parents)
if sub_prereqs is not None:
builder.append(sub_prereqs)
# Add recursively determined list.
return (course.id, builder)
def get_prereq_set(course_id):
"""
Get the set of prerequisite courses for a requested course. That is, a
flat set with no repeats. This set does not contain the requested
course.
"""
# Attempt to identify a reference to the requested course.
prereq_tree = get_prereq_tree(course_id)
if prereq_tree is None:
return set()
# Flatten function of an arbitrarily deeply nested list of lists.
def flatten(container):
for i in container:
if isinstance(i, list) or isinstance(i, tuple):
for j in flatten(i):
yield j
else:
yield i
# Remove duplicates.
return set(flatten(prereq_tree)) - {course_id}
| 2.890625 | 3 |
blusky/transforms/apply_father_wavelet_2d.py | fogoke/blusky | 3 | 12762915 | import re
import keras.backend as keras_backend
from keras.layers import DepthwiseConv2D
import numpy as np
from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property
from blusky.wavelets.i_wavelet_2d import IWavelet2D
class ApplyFatherWavlet2D(HasStrictTraits):
"""
Provides a "convolution" method that will apply a father wavelet to
the endpoints of a cascade. Be sure to first apply layers to remove
any of the padding.
Assuming the input to the cascade is a power of 2 in shape, the result
will be a set of scattering coefficients at all orders of the transform
sampled regularly throughout the image. You can imagine that every
set of coefficients will be computed at the center of a tile, the shape
of which is determined by the "J" parameter. The degree to which these
tiles over lap is controlled by the "overlap_log_2". For interpretation,
consider values of "J" to give a tile of shape (2**(J+2), 2**(J+2)),
over which the texture of the image can be considered stationary.
The tiles can overlap by a factor of "M", however if you use the
default decimation, you must ensure that you have oversampled enough
to properly represent the stride at all scales of the transform.
With default decimation, oversamples=1, overlap_log_2 can be upto
J - 1. For each unit of overlap, you need to pay the cost of an
additional unit of oversampling.
"""
#: (J) This is the "J" scale parameter of the father wavelet used in the
# transform.
J = Int(2)
#: (M) This is defines the overlap of the tiles, so overlap_log_2 = 0
# would be no overlap, overlap_log_2 = 1 would be 50% overlap,
# overlap_log_2 = 2 would be 75% etc.
overlap_log_2 = Int(0)
#: Size of the image input to the Cascade_2d. This needs to be padded to a
# power of "2" to ensure that the coefficients are consistent.
img_size = Tuple
#: The sample rate of the input data
sample_rate = Float
#: Wavelet to use in convolution
wavelet = Instance(IWavelet2D)
#: Equivalent tile size derived from the log scale J
# J = round(log2(min(tile_size))) - 2
_tile_size = Property(Int, depends_on="J")
def _get__tile_size(self):
size = 2 ** (self.J + 2)
if size > self.img_size[0] or size > self.img_size[1]:
mn = min(self.img_size)
msg = "For image {} by {}, max J is {}".format(
self.img_size[0], self.img_size[1], np.log2(mn) - 2
)
raise RuntimeError(msg)
return (2 ** (self.J + 2), 2 ** (self.J + 2))
def _convolve(self, input_layer, trainable=False):
"""
The concept here is to first derive the applied decimation
from the shape of the input layer, then pad the layer and
apply the a convolution with the father wavelet. The padding
and strideof the convolution is designed to return set of coefficients
for a collections of regular (optionally overlapping) tiles.
This will be the case provided the size of the original input to the
transform are a power of 2.
Parameters
----------
input_layer - Keras Layer
A layer to apply the father wavelet to. The applied wavelet
is derived from the shape of the layer and knowlege of the
input image shape.
trainable - Bool (optional)
Toggle setting the convolution to be trainable. Either way it
is initialized with a gabor wavelet.
Returns
-------
conv - Keras Layer
A Keras layer applying the convolution to the input
"""
# create a convenient name
name = re.sub("[_/].*", "", input_layer.name)
name += "phi"
_, nh, nw, _ = input_layer.shape
nh = nh
nw = nw
# amount of decimation to here.
factor_1 = self.img_size[0] // nh
factor_2 = self.img_size[1] // nw
# how much to decimate the wavelet to required bandwidth
wavelet_stride = min(factor_1, factor_2)
# need to guarantee this, ideally crop the wavelet to a
# power of "2"
wav = self.wavelet.kernel(
0.0, shape=(2 ** (self.J + 2) - 1, 2 ** (self.J + 2) - 1)
)
#
wav = wav[::wavelet_stride, ::wavelet_stride]
# needs to be real
if np.iscomplexobj(wav):
wav = wav.real
# define a little helper to intialize the weights.
def init_weights(shape, **kwargs):
dtype = np.float32
weights = np.zeros(shape, dtype=dtype)
for ichan in range(shape[2]):
weights[:, :, ichan, 0] = wav.astype(dtype)
return keras_backend.variable(value=weights, dtype=dtype)
# use the father wavelet scale here instead of the default:
conv_stride = (
max(
2 ** (-self.overlap_log_2) * self._tile_size[0] // factor_1, 1
),
max(
2 ** (-self.overlap_log_2) * self._tile_size[1] // factor_2, 1
),
)
conv_stride = (int(conv_stride[0]), int(conv_stride[0]))
conv = DepthwiseConv2D(
name=name,
kernel_size=wav.shape,
depth_multiplier=1,
data_format="channels_last",
padding="valid",
strides=conv_stride,
trainable=trainable,
depthwise_initializer=init_weights,
)
return conv(input_layer)
def convolve(self, end_points):
"""
Apply father wavelet convolution.
Parameters
----------
end_points - List(Keras Layers)
Typically this would be the multiple end-points of the 2-D Cascade.
Returns
-------
scattering_transform - List(Keras Layers)
The father wavelet applied to each end-point. The stride and
padding of the convolution produces a consistent set of
coefficients at each scale, provided the shape of the original
image is a power of 2. For example, img.shape = (128, 256).
"""
scattering_transform = [self._convolve(i) for i in end_points]
return scattering_transform
| 3.078125 | 3 |
controller.py | elsayed5454/Signal-Flow-Graph-Solver | 1 | 12762916 | import networkx as nx
from matplotlib.pyplot import draw, show, clf
from mason import mason
def add_node(g, node):
if node == "":
return "Add name to the node"
g.add_node(node)
return "Node added successfully"
def remove_node(g, node):
if g.has_node(node):
g.remove_node(node)
return "Node removed successfully"
else:
return "Node doesn't exist in graph"
def add_edge(g, from_node, to_node, weight):
if len(weight) == 0:
weight = '1'
if g.has_node(from_node) and g.has_node(to_node):
if weight.isdigit():
g.add_weighted_edges_from([(from_node, to_node, int(weight))])
return "Edge added successfully\nDefault weight is 1"
else:
"The weight must be positive integer"
else:
return "One of the nodes is not in the graph"
def remove_edge(g, from_node, to_node, weight):
if g.has_node(from_node) and g.has_node(to_node):
if len(g.get_edge_data(from_node, to_node)) == 0:
return "No edge exists"
elif len(g.get_edge_data(from_node, to_node)) == 1:
g.remove_edge_clicked(from_node, to_node)
return "Edge removed successfully (Weight is neglected because it's the only edge between the nodes)"
else:
if len(weight) == 0:
return "There are multiple edges, specify the weight"
try:
to_remove = [(u, v, k) for u, v, k in g.edges(data=True) if k['weight'] == int(weight)]
g.remove_edges_from(to_remove)
except:
return "An exception occurred"
return "Edge removed successfully"
else:
return "One of the nodes is not in the graph"
def refresh(g):
clf()
pos = nx.spring_layout(g)
nx.draw(g, pos, with_labels=True, connectionstyle='arc3, rad=0.1')
labels = {}
for u, v, data in g.edges(data=True):
labels[(u, v)] = data['weight']
nx.draw_networkx_edge_labels(g, pos, edge_labels=labels, label_pos=0.3)
draw()
show()
def solve(g, source, sink):
nodes = list(g.nodes)
if len(nodes) == 0:
return "The graph is empty"
if len(source) == 0:
source = nodes[0]
if len(sink) == 0:
sink = nodes[len(nodes) - 1]
if g.has_node(source) and g.has_node(sink):
return mason(g, source, sink)
else:
return "One of the nodes is not in the graph"
| 3.578125 | 4 |
imagepath.py | gitwipo/path_utils | 0 | 12762917 | #!/usr/bin/env python
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# # http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
file=imagepath.py
Seperate image into parts.
Get image values
Get/Set base name
Get/Set frame
Get/Set version
"""
__version__ = '3.0.0'
__author__ = '<NAME>'
# MODULES
import os.path
import re
class Image(object):
"""
Manipulates vfx image values
"""
IMAGE = None
IMAGE_DICT = None
def __init__(self, image=None):
"""
Init the inage class.
It sets up all basic variables for the input image.
:params image: path to an image file
:type image: str
"""
# Init internal vars
self.image_path = None
self.image_name = None
self.name = None
self.ext = None
# Assign init parm
self.IMAGE = image
# Assign internal vars
self._get_basic_parts()
# private vars
self._name_list = self._split_name()
# Assign global class vars
self.IMAGE_DICT = self.get_image_values()
# REGEX FUNCTIONS
def _regex_version(self):
"""
Create version regex string.
:return: re_major_version, re_prefix_major_version,
re_prefix_major_minor_version
:rtype: tuple(str)
"""
re_major_version = r'^([v|V])(\d+)'
re_prefix_major_version = r'([.|_|-])([v|V])(\d+)*'
re_prefix_major_minor_version = r'([.|_|-])([v|V])(\d+)([.|_|-])(\d+)'
return (re_major_version, re_prefix_major_version,
re_prefix_major_minor_version)
def _regex_frame(self):
"""
Create frame regex string
:return: re_frame, re_frame_only
:rtype: tuple(str)
"""
re_frame = r'([.|_|-])((\d+)|(%0\dd)|(#+))\Z'
re_frame_only = r'^((\d+)|(%0\dd)|(#+))\Z'
return re_frame, re_frame_only
def _re_compile_version(self):
"""
Compile re version object.
:return: re_major_version, re_prefix_major_version,
re_prefix_major_minor_version
:rtype: tuple(re object)
"""
re_major_version = re.compile(self._regex_version()[0])
re_prefix_major_version = re.compile(self._regex_version()[1])
re_prefix_major_minor_version = re.compile(self._regex_version()[2])
return (re_major_version, re_prefix_major_version,
re_prefix_major_minor_version)
def _re_compile_frame(self):
"""
Compile re frame object.
:return: re_frame, re_frame_only
:rtype: tuple(re object)
"""
re_frame = re.compile(self._regex_frame()[0])
re_frame_only = re.compile(self._regex_frame()[1])
return re_frame, re_frame_only
# HELPER FUNCTIONS
def _set_padded_number(self, number, padding):
"""
Set padded number.
:params number:
:type number: int
:params padding:
:type padding: int
:return: padded number string
:rtype: str
"""
return '%0{}d'.format(padding) % number
# FUNCTIONS
def _get_basic_parts(self):
"""
Get path, name, ext
:return: [dirname, name, ext]
:rtype: list(str)
"""
self.image_path = os.path.dirname(self.IMAGE)
self.image_name = os.path.basename(self.IMAGE)
self.name, self.ext = os.path.splitext(self.image_name)
def _split_name(self):
"""
Split image into base name, prefix & frame part
:return: [basename, frame_prefix, frame]
or if frame_parts=True:
[basename, frame_prefix, frame,
frame_digit, frame_notation, frame_hash]
:rtype: list
"""
re_frame, re_frame_only = self._re_compile_frame()
self._get_basic_parts()
name_list = []
try:
name_list = re_frame.split(self.name)
if len(name_list) == 1:
name_list = re_frame_only.split(self.name)
if len(name_list) > 1:
name_list.insert(0, None)
else:
name_list.extend([None, None])
name_list = name_list[:6]
except IndexError:
pass
name_list = [None if v is '' else v for v in name_list]
return name_list
def get_b_name(self):
"""
Get image base name.
:return: base name
:rtype: str
"""
return self._name_list[0]
def set_b_name(self, new_name):
"""
Set image base name.
:params new_name: base name to use for the rename
:type new_name: str
:return: image
:rtype: str
"""
name_list = self._name_list
name_list = ['' if v is None else v for v in name_list]
new_name = new_name + ''.join(name_list[1:3])
self.IMAGE = os.path.join(self.image_path, new_name) + self.ext
self._name_list = self._split_name()
return self.IMAGE
def get_frame(self):
"""
Get image frame values.
Option name=True adds name value pair to dict.
:return: frame_dict = {'frame_prefix': frame_prefix,
'frame': frame,
'frame_padding': padding,
'frame_digit': frame_digit,
'frame_notation':frame_notation,
'frame_hash': frame_hash
}
:rtype: dict
"""
frame_prefix, frame = None, None
frame_digit, frame_notation, frame_hash = None, None, None
if self._name_list[2]:
frame_prefix = self._name_list[1]
frame = self._name_list[2]
frame_digit = self._name_list[3]
frame_notation = self._name_list[4]
frame_hash = self._name_list[5]
# GET FRAME PADDING
padding = None
if frame_digit:
padding = len(frame)
elif frame_notation:
padding = int(frame_notation[2])
elif frame_hash:
padding = len(frame_hash)
# FRAME NOTATION, HASH
if padding:
if frame:
frame_notation = '%0' + str(padding) + 'd'
frame_hash = '#' * padding
elif frame_notation:
frame_hash = '#' * padding
elif frame_hash:
frame_notation = '%0' + str(padding) + 'd'
frame_dict = {'frame_prefix': frame_prefix,
'frame': frame,
'frame_padding': padding,
'frame_digit': frame_digit,
'frame_notation':frame_notation,
'frame_hash': frame_hash
}
return frame_dict
def set_frame(self, new_frame, prefix=None):
"""
Set image frame value. Can also set the prefix if given.
:params new_frame: new frame number
:type new_frame: str
:params prefix: character to use before the frame e.g. _
:type prefix: str
:return: image
:rtype: str
"""
new_frame = str(new_frame)
re_frame, re_frame_only = self._re_compile_frame()
name_list = self._name_list
# Check input values
parm = None
value = None
if not re_frame_only.search(new_frame):
parm = 'new_frame'
value = new_frame
error_msg = '{} \"{}\" must be given as frame hash/frame,\
notation/digit.'.format(parm, value)
raise ValueError(error_msg)
elif prefix and not isinstance(prefix, str):
parm = 'prefix'
value = str(prefix)
error_msg = '{} \"{}\" must be given as string.'.format(parm, value)
raise ValueError(error_msg)
# CONVERT NONE TO EMPTY STRING
name_list = ['' if v is None else v for v in name_list]
frame_prefix = None
if name_list[1]:
frame_prefix = name_list[1]
elif prefix:
frame_prefix = prefix
else:
frame_prefix = ''
# Assign with existing frame
self.name = name_list[0] + frame_prefix + new_frame
self.IMAGE = os.path.join(self.image_path, self.name) + self.ext
# Replace values in internal var
self._name_list = self._split_name()
self.IMAGE_DICT = self.get_image_values()
return self.IMAGE
def get_version(self, major_minor=False):
"""
Get all version strings.
:params major_minor: Set to True if the image is using two style version
convention; default to False
:type major_minor: bool
:return: version_dict = {'version_folder_level': version_folder_level,
'version_folder_prefix': version_folder_prefix,
'version_folder': version_folder,
'version_prefix': version_prefix,
'version': version,
'version_sep': version_sep
}
:rtype: dict
"""
re_version_all = self._re_compile_version()
re_version_only = re_version_all[0]
re_version = re_version_all[1]
re_major_minor_version = re_version_all[2]
version_folder_prefix = None
version_folder = None
version_prefix = None
version = None
version_sep = None
def get_version_result(value):
"""
Inside method fetching version from input value.
:param value: image base name
:type value: str
:return: version_prefix, version
:rtype: tuple(str)
"""
re_version_result = re_version.search(value)
version_prefix = ''.join(re_version_result.group(1, 2))
version = re_version_result.group(3)
return version_prefix, version
def get_version_only_result(value):
"""
Inside method fetching version from input value
if the name may only consist of the version.
:param value: image base name
:type value: str
:return: version_prefix, version
:rtype: tuple(str)
"""
re_version_result = re_version_only.search(value)
version_prefix = re_version_result.group(1)
version = re_version_result.group(2)
return version_prefix, version
# Get file version
if major_minor:
try:
re_version_result_image = re_major_minor_version.search(self.name)
version_prefix = ''.join(re_version_result_image.group(1, 2))
version = re_version_result_image.group(3, 5)
version_sep = re_version_result_image.group(4)
except AttributeError:
pass
else:
try:
version_prefix, version = get_version_result(self.name)
except AttributeError:
try:
version_prefix, version = get_version_only_result(self.name)
except AttributeError:
pass
# Get folder version
level = 1
while level < len(self.image_path.split(os.sep))-1:
image_folder = self.image_path.split(os.sep)[-level]
try:
version_folder_prefix, version_folder = get_version_result(image_folder)
except AttributeError:
try:
version_folder_prefix, version_folder = get_version_only_result(image_folder)
except AttributeError:
pass
if version_folder:
break
level += 1
if not version_folder:
level = None
version_dict = {'version_folder_level': level,
'version_folder_prefix': version_folder_prefix,
'version_folder': version_folder,
'version_prefix': version_prefix,
'version': version,
'version_sep': version_sep
}
return version_dict
def set_version(self, new_version, set_folder=True, major_minor=False,
prefix=None, sep=None):
"""
Set the given version.
:params new_version: version as a string without the prefix
:type new_version: str
:params set_folder: Set the version in the folder
:type set_folder: bool
:params major_minor: Set to True if the version is using
major, minor version style
:type major_minor: bool
:params prefix: character to use before the version
:type prefix: str
:params sep: separator to use for major, minor version style
:type sep: str
:return: image
:rtype: str
"""
# Init self.regex
re_version_all = self._re_compile_version()
re_version_only = re_version_all[0]
re_version = re_version_all[1]
re_major_minor_version = re_version_all[2]
# Get current version
version_dict = self.get_version(major_minor)
version_folder_level = version_dict['version_folder_level']
version_folder_prefix = version_dict['version_folder_prefix']
version_folder = version_dict['version_folder']
version_prefix = version_dict['version_prefix']
version = version_dict['version']
version_sep = version_dict['version_sep']
if version_folder_level > 1:
folder_split = self.image_path.split(os.sep)
image_root = os.sep.join(folder_split[:-(version_folder_level)])
image_folder = folder_split[-version_folder_level]
sub_folder = os.sep.join(folder_split[-(version_folder_level-1):])
else:
image_root = os.path.dirname(self.image_path)
image_folder = os.path.basename(self.image_path)
sub_folder = ''
# Assign input parameter
if prefix:
version_prefix = prefix
if version_folder_prefix:
version_folder_prefix = prefix
if sep:
version_sep = sep
# Set version
try:
# Set version in file
if version:
if major_minor:
if isinstance(new_version, (list, tuple)):
sub_major = version_prefix + str(new_version[0])
sub_minor = version_sep + str(new_version[1])
substition = sub_major + sub_minor
self.name = re_major_minor_version.sub(substition, self.name)
else:
substition = version_prefix + str(new_version)
self.name = re_major_minor_version.sub(substition, self.name)
else:
if re_version.search(self.name):
substition = version_prefix + str(new_version)
self.name = re_version.sub(substition, self.name)
elif re_version_only.search(self.name):
substition = version_prefix + str(new_version)
self.name = re_version_only.sub(substition, self.name)
# Set version in folder
if set_folder:
if isinstance(new_version, (list, tuple)):
new_version = new_version[0]
if version_folder:
if re_version.search(image_folder):
substition = version_folder_prefix + str(new_version)
image_folder = re_version.sub(substition, image_folder)
elif re_version_only.search(image_folder):
substition = version_folder_prefix + str(new_version)
image_folder = re_version_only.sub(substition, image_folder)
# Generate image string
self.image_path = os.path.join(image_root, image_folder, sub_folder)
self.IMAGE = os.path.join(self.image_path, self.name) + self.ext
self._name_list = self._split_name()
return self.IMAGE
except (AttributeError, TypeError) as err:
error_msg = 'Wrong input. Error: {}'.format(err)
raise ValueError(error_msg)
def get_image_values(self, major_minor=False):
"""
Get all image part values.
:params major_minor: Set to True if the version is using
major, minor version style
:type major_minor: bool
:return: image_dict = {'path': image_path,
'name': b_name,
'ext': ext,
'version_folder_level': version_folder_level,
'version_folder_prefix': version_folder_prefix,
'version_folder': version_folder,
'version_prefix': version_prefix,
'version': version,
'version_sep': version_sep,
'frame_prefix': frame_prefix,
'frame': frame,
'frame_padding': padding,
'frame_notation': frame_notation,
'frame_hash': frame_hash
}
:rtype: dict
"""
# FRAME
frame_dict = self.get_frame()
# VERSION
version_dict = self.get_version(major_minor)
# GENERATE IMAGE DICT
image_dict = {'path': self.image_path,
'name': self._name_list[0],
'ext': self.ext,
'version_folder_level': version_dict['version_folder_level'],
'version_folder_prefix': version_dict['version_folder_prefix'],
'version_folder': version_dict['version_folder'],
'version_prefix': version_dict['version_prefix'],
'version': version_dict['version'],
'version_sep': version_dict['version_sep'],
'frame_prefix': frame_dict['frame_prefix'],
'frame': frame_dict['frame'],
'frame_padding': frame_dict['frame_padding'],
'frame_notation': frame_dict['frame_notation'],
'frame_hash': frame_dict['frame_hash']
}
return image_dict
| 2.859375 | 3 |
statusbar.py | tushar176/Notepad- | 1 | 12762918 | <reponame>tushar176/Notepad-
from tkinter import BOTTOM
from tkinter import ttk
'''Here statusbar and functionality of linebar is defined '''
class Statusbar:
def __init__(self, windows, text_write,linebar):
self.root = windows
self.text_write = text_write
self.linebar=linebar
self.status_bar = ttk.Label(self.root, text='Status Bar')
self.status_bar.pack(side=BOTTOM)
# text_changed -> To work with exit() in menu.py
self.text_changed = False
# '<<Modified>>' -> any change(i,.e; text in text editor)
self.text_write.text_editor.bind('<<Modified>>', self.changed)
#-----------------------------------------------Functions----------------------------------------------------------
#to get the Number of Lines
def get_line_numbers(self):
output = ''
row, col = self.text_write.text_editor.index("end").split('.') #row give the no of row in text
#print(int(row)-1)
for i in range(1, int(row)):
output += str(i) + '\n' #making a string with row no. with \n(next line) [for displaying purpose]
#print(output)
return output
#to dispaly the Number of line at the line Bar text field
def update_line_numbers(self):
line_numbers = self.get_line_numbers()
self.linebar.line_number_bar.config(state='normal')
#changing the font size anf family according to the text editor, if not changed then allignment get disturbed
self.linebar.line_number_bar.config(font=(self.text_write.current_font_family,self.text_write.current_font_size,'normal'))
self.linebar.line_number_bar.delete('1.0', 'end')
self.linebar.line_number_bar.insert('1.0', line_numbers)
self.linebar.line_number_bar.config(state='disabled')
#check whether is there and changes in the text field,if any thwn change the values of statusbar & Linebar
def changed(self, event=None):
# Change in text editor(i.e; empty to increase in characters)
if self.text_write.text_editor.edit_modified():
self.text_changed = True # text editor has some text
self.text_write.text_editor.get(1.0, 'end-1c').split() #(start - end-1char) > This delete last char
# because of it count when line changes
# count word through split() -> for each word
words = len(self.text_write.text_editor.get(1.0, 'end-1c').split())
#count Characters
characters = len(self.text_write.text_editor.get(1.0, 'end-1c'))
#count Lines
row, col = self.text_write.text_editor.index("end").split('.')
# Shows count of chars & words & Number of Lines
self.status_bar.config(text=f'Lines: {int(row)-1} Characters: {characters} Words: {words}')
self.status_bar.config(anchor='e')
self.update_line_numbers()
# to make code again wheneven there is change in text field
self.text_write.text_editor.edit_modified(False)
| 3.578125 | 4 |
tests/API1/testutils.py | sdrees/param | 90 | 12762919 | <gh_stars>10-100
import datetime as dt
import param
import pytest
from param import guess_param_types
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
now = dt.datetime.now()
today = dt.date.today()
guess_param_types_data = {
'Parameter': (param.Parameter(), param.Parameter),
'Date': (today, param.Date),
'Datetime': (now, param.Date),
'Boolean': (True, param.Boolean),
'Integer': (1, param.Integer),
'Number': (1.2, param.Number),
'String': ('test', param.String),
'Dict': (dict(a=1), param.Dict),
'NumericTuple': ((1, 2), param.NumericTuple),
'Tuple': (('a', 'b'), param.Tuple),
'DateRange': ((dt.date(2000, 1, 1), dt.date(2001, 1, 1)), param.DateRange),
'List': ([1, 2], param.List),
'Unsupported_None': (None, param.Parameter),
}
if np:
guess_param_types_data.update({
'Array':(np.ndarray([1, 2]), param.Array),
})
if pd:
guess_param_types_data.update({
'DataFrame': (pd.DataFrame(data=dict(a=[1])), param.DataFrame),
'Series': (pd.Series([1, 2]), param.Series),
})
@pytest.mark.parametrize('val,p', guess_param_types_data.values(), ids=guess_param_types_data.keys())
def test_guess_param_types(val, p):
input = {'key': val}
output = guess_param_types(**input)
assert isinstance(output, dict)
assert len(output) == 1
assert 'key' in output
out_param = output['key']
assert isinstance(out_param, p)
if not type(out_param) == param.Parameter:
assert out_param.default is val
assert out_param.constant
| 2.28125 | 2 |
cs15211/VerifyinganAlienDictionary.py | JulyKikuAkita/PythonPrac | 1 | 12762920 | <reponame>JulyKikuAkita/PythonPrac
# coding=utf-8
__source__ = 'https://leetcode.com/problems/verifying-an-alien-dictionary/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 953. Verifying an Alien Dictionary
#
# In an alien language, surprisingly they also use english lowercase letters,
# but possibly in a different order.
# The order of the alphabet is some permutation of lowercase letters.
#
# Given a sequence of words written in the alien language, and the order of the alphabet,
# return true if and only if the given words are sorted lexicographicaly in this alien language.
#
# Example 1:
#
# Input: words = ["hello","leetcode"], order = "hlabcdefgijkmnopqrstuvwxyz"
# Output: true
# Explanation: As 'h' comes before 'l' in this language, then the sequence is sorted.
# Example 2:
#
# Input: words = ["word","world","row"], order = "worldabcefghijkmnpqstuvxyz"
# Output: false
# Explanation: As 'd' comes after 'l' in this language, then words[0] > words[1],
# hence the sequence is unsorted.
# Example 3:
#
# Input: words = ["apple","app"], order = "abcdefghijklmnopqrstuvwxyz"
# Output: false
# Explanation: The first three characters "app" match,
# and the second string is shorter (in size.)
# According to lexicographical rules "apple" > "app",
# because 'l' > '∅', where '∅' is defined as the blank character
# which is less than any other character (More info).
#
#
# Note:
#
# 1 <= words.length <= 100
# 1 <= words[i].length <= 20
# order.length == 26
# All characters in words[i] and order are english lowercase letters.
#
import unittest
# 28ms 100%
class Solution(object):
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
order_index = {c: i for i, c in enumerate(order)}
for i in xrange(len(words) - 1):
word1 = words[i]
word2 = words[i+1]
# Find the first difference word1[k] != word2[k].
for k in xrange(min(len(word1), len(word2))):
# If they compare badly, it's not sorted.
if word1[k] != word2[k]:
if order_index[word1[k]] > order_index[word2[k]]:
return False
break
else:
# If we didn't find a first difference, the
# words are like ("app", "apple").
if len(word1) > len(word2):
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/verifying-an-alien-dictionary/solution/
Approach 1: Check Adjacent Words
Complexity Analysis
Time Complexity: O(C), where C is the total content of words.
Space Complexity: O(1)
# 4ms 100%
class Solution {
public boolean isAlienSorted(String[] words, String order) {
int[] map = new int[26];
for (int i = 0; i < 26; i++) {
map[order.charAt(i) - 'a'] = i;
}
if (words == null || words.length <= 1) return true;
for (int i = 1; i < words.length; i++) {
if (comp(words[i - 1], words[i], map)) { // true if words[i-1] > words[i]
return false;
}
}
return true;
}
private boolean comp(String a, String b, int[] map) {
int alen = a.length(), blen = b.length(), minlen = Math.min(alen, blen);
char[] as = a.toCharArray(), bs = b.toCharArray();
for (int i = 0; i < minlen; i++) {
if (map[as[i] - 'a'] < map[bs[i] - 'a']) return false;
else if (map[as[i] - 'a'] == map[bs[i] - 'a']) continue;
else return true;
}
return alen > blen;
}
}
''' | 3.8125 | 4 |
ptart/migrations/0011_label_deprecated.py | Fisjkars/sh00t | 17 | 12762921 | # Generated by Django 2.2.24 on 2022-04-20 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ptart', '0010_project_archived'),
]
operations = [
migrations.AddField(
model_name='label',
name='deprecated',
field=models.BooleanField(default=False),
),
]
| 1.390625 | 1 |
src/ion/process/test/manage_system.py | scionrep/scioncc_new | 2 | 12762922 | <filename>src/ion/process/test/manage_system.py
#!/usr/bin/env python
""" call system_management_service actions. use as a proof of concept or the world's worst UI.
invoke with commands like this:
bin/pycc -x ion.process.test.manage_system.ChangeLogLevel logger=ion.process.bootstrap.ion_logger level=DEBUG
bin/pycc -x ion.process.test.manage_system.ReportStats
bin/pycc -x ion.process.test.manage_system.ClearStats
"""
__author__ = '<NAME>, <NAME>, <NAME>'
from pyon.core.bootstrap import get_service_registry
from pyon.public import ImmediateProcess
from interface.objects import AllContainers, ReportStatistics, ClearStatistics
class ChangeLogLevel(ImmediateProcess):
def on_start(self):
logger = self.CFG.get("logger")
level = self.CFG.get("level")
recursive = self.CFG.get("recursive", False)
svc = get_service_registry().services['system_management'].client(process=self)
svc.set_log_level(logger=logger, level=level, recursive=recursive)
class ReportStats(ImmediateProcess):
def on_start(self):
svc = get_service_registry().services['system_management'].client(process=self)
svc.perform_action(predicate=AllContainers(), action=ReportStatistics())
class ClearStats(ImmediateProcess):
def on_start(self):
svc = get_service_registry().services['system_management'].client(process=self)
svc.perform_action(predicate=AllContainers(), action=ClearStatistics())
| 2 | 2 |
utility.py | syaoming/molle | 0 | 12762923 | from z3 import *
from operator import or_
from pprint import pprint
def _sorted_inters(inter_list, sp):
''' Sorts the inter_list = [('from', 'to', 'positive'), ...] into a dict,
where keyvalue is a couple of number tuples, wich integer codes as keys.
e.g.
{1: ( (2, 3), (5, 9) )} : species 1 is activated by 2 and 3, and repressed
by 5 and 9.
'''
d = dict([(c, ([], [])) for c in range(len(sp))]) # initialization
for i in inter_list:
f, t = i[:2]
if 'negative' in i: idx = 1
elif 'positive' in i: idx = 0
else:
print 'no +/- assigend to interactions %d'%(inter_list)
raise(Error)
tcode, fcode = sp.index(t), sp.index(f)
d.setdefault(tcode, ([], []))[idx].append(fcode)
return d
def readModel(f, opmt = True):
''' Take a file Object as input, return a tuple of 6 objects:
species: a tuple of gene name.
logics : a dict. { gene_name: list_of_allowed_logic_numbers }
kofe : a dict. { "FE": list_of_FEable_gene, "KO": list_of_KOable_gene }
defI : a dict of defined interations. Processed by _sorted_inters()
optI : a dict of optional interactions.
'''
species = []
logics = {}
kofe = {'KO':[], 'FE':[]}
def_inters_list = []
opt_inters_list = []
# read the components line
for c in f.readline().strip().split(','):
# get the gene name and +- mark
if '(' in c : gene_ = c[:c.index('(')].strip()
else: gene_ = c.strip()
gene = filter(lambda x: not x in '+-', gene_)
mark = filter(lambda x: x in '+-', gene_)
# add to kofe if the gene has mark
if('+' in mark): kofe['FE'].append(gene)
if('-' in mark): kofe['KO'].append(gene)
# record the allowed logics; if no, set to range(18)
if '(' in c:
left, right = c.index('('), c.index(')')
rules = tuple( int(i) for i in c[left+1:right].split() )
else:
rules = tuple(range(18))
logics[gene] = rules
species.append(gene)
# read the interaction lines
total_opt = total_def = 0
for line in f.readlines():
l = line.strip().split()
if(not l): continue # skip empty line
if 'optional' in l:
opt_inters_list.append(tuple(l[:3]))
total_opt += 1
else:
def_inters_list.append(tuple(l[:3]))
total_def += 1
defI = _sorted_inters(def_inters_list, species)
optI = _sorted_inters(opt_inters_list, species)
return (species, logics, kofe, defI, optI)
# kept from old version
def _addExp(d, name, time_point, state_names_list):
d.setdefault(name, []).append( (int(time_point), state_names_list) )
# kept from old version
def _addState(d, state_name, gene, value):
d.setdefault(state_name, []).append( (gene, int(value)) )
# kept from old version
def readExp(f):
'''
Take the file for experiment constrains, return two dicts:
exps: the Experimental constrains for every experiment
states: records the mapping of shortcut name to node states
'''
exps = dict()
states = dict()
shortcut = ''
for l in f.readlines():
l = l.strip();
if(not l): continue; # skip empty line
try: l = l[:l.index('"')] # remove commment
except ValueError: None
try: l = l[:l.index(';')] # remove ;
except ValueError: None
if(shortcut): # inside the braket { }
if(l[0] == '{'): continue # skip left bracket
elif(l[0] == '}'): shortcut = '' # exit the braket;
else:
(left, right) = l.split('=');
name = left.strip();
value = right.split()[0];
_addState(states, shortcut, name, value); # record configuration
l = l.split();
if(l[0] == "//"): continue # comment line
elif(l[0] == "under"): _addExp(exps, l[1], l[3], l[4:]) # recordexp
elif(l[0] == "let"):
shortcut = l[1]; # ready to enter the braket
try: shortcut = shortcut[:shortcut.index(':')]
except ValueError: None
return (exps, states);
def compati(l, actn, repn):
''' Speed up the solving.
Not sure with the validicity when actn == 0 of such approach. '''
if len(l) < 16: return l
if actn == 0:
if repn == 0: return (-1, )
else: # only repressors
return filter(lambda x: x > 15, l) or (-1, )
elif repn == 0: # only activator
return filter(lambda x: x < 2, l) or (-1, )
else:
return l
zero = BitVecVal(0, 1)
def Any(bvs):
return reduce(or_, bvs, zero)
def _concat(bvs):
if len(bvs) == 1: return bvs[0]
else: return Concat(bvs)
def _create_bit_rule(num, act_list, rep_list, A, R):
''' Create the update rule that return bit-vector of length 1. '''
if num == -1: return BoolVal(False) # special case
# initialization
if act_list: act = _concat(act_list)
else: act = A = zero
if rep_list: rep = _concat(rep_list)
else: rep = R = zero
# creating result
if num == 0:
return And(R == 0, A != 0, A & act == A)
elif num == 1:
return And(R == 0, A & act != 0)
#return And(R == 0, A != 0, A & act != 0)
elif num == 2:
return Or( And(R == 0, A != 0, A & act == A),
And(R != 0, rep & R == 0, A & act != 0) )
#return Or( And(R == 0, A != 0, A & act == A),
# And(R != 0, A != 0, rep & R == 0, A & act != 0) )
elif num == 3:
return And(A & act != 0, rep & R == 0)
elif num == 4:
return And( A != 0, A & act == A,
Or(R == 0, rep & R != R) )
#return Or( And(R == 0, A != 0, A & act == A),
# And(A != 0, A & act == A, rep & R != R) )
#return Or( And(R == 0, A != 0, A & act == A),
# And(R != 0, A != 0, A & act == A, rep & R != R) )
elif num == 5:
return Or( And(R == 0, act & A != 0),
And(A != 0, act & A == A, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A == A, rep & R != R) )
elif num == 6:
return Or( And(R == 0, A != 0, act & A == A),
And(act & A != 0, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, act & A != 0, rep & R != R) )
elif num == 7:
return Or( And(R == 0, act & A != 0),
And(act & A != 0, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A != 0, rep & R != R) )
elif num == 8:
return And(A != 0, act & A == A)
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, act & A == A) )
elif num == 9:
return Or( And(R == 0, act & A != 0),
And(R != 0, A != 0, act & A == A) )
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A == A) )
elif num == 10:
return Or( And(A != 0, act & A == A),
And(R != 0, act & A != 0, rep & R == 0) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, Or(act & A == A,
# And(act & A != 0, rep & R == 0))) )
elif num == 11:
return Or( And(R == 0, A != 0, act & A != 0),
And(R != 0, A != 0, Or(act & A == A,
And(act & A != 0, rep & R == 0))) )
elif num == 12:
return Or( And(A != 0, act & A == A),
And(act & A != 0, rep & R != R) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, Or(act & A == A,
# And(act & A != 0, rep & R != R))) )
elif num == 13:
return Or( And(R == 0, A != 0, act & A != 0),
And(R != 0, A != 0, Or(act & A == A,
And(act & A != 0, rep & R != R))) )
elif num == 14:
return Or( And(R == 0, A != 0, act & A == A),
And(R != 0, act & A != 0) )
#return Or( And(R == 0, A != 0, act & A == A),
# And(R != 0, A != 0, act & A != 0) )
elif num == 15:
return act & A != 0
#return Or( And(R == 0, A != 0, act & A != 0),
# And(R != 0, A != 0, act & A != 0) )
elif num == 16:
return And(A == 0, rep & R != 0, rep & R != R)
#return And(A == 0, R != 0, rep & R != 0, rep & R != R)
elif num == 17:
return And(A == 0, R != 0, rep & R == 0)
else:
print "Strange Num"
raise ValueError
def _with_kofe(kofe_idx, ko, fe, expr):
koc, fec = kofe_idx
if koc:
ko = Extract(koc-1,koc-1,ko) == 1 # a trick to avoid 0 == False
if fec:
fe = Extract(fec-1,fec-1,fe) == 1
return Or(fe, And(Not(ko), expr))
else: return And(Not(ko), expr)
elif fec:
fe = Extract(fec-1,fec-1,fe) == 1
return Or(fe, expr)
else: return expr
def makeFunction(acts, reps, kofe_index, logic, A, R):
''' Makes a function that takes q, A, R, and return a coresponding z3 expr.
A is the acticators-selecting bit-vector, R for repressors.
'''
return lambda q, ko, fe: simplify(
_with_kofe(kofe_index, ko, fe,
_create_bit_rule(logic,
[Extract(i,i,q) for i in acts],
[Extract(i,i,q) for i in reps],
A, R)))
def isExpOf2(bvv):
return len(filter(lambda x: x == '1', bin(bvv.as_long()))) == 1
### Output Utilities ###
#########################
boolf = BoolVal(False)
def conv_time(secs, th = 300):
if secs > th: return '%.1f min'%( secs / 60 )
return '%.1f sec'%secs
def _Or(l):
if(not l): return boolf
if(len(l) == 1): return l[0]
else: return Or(l);
def _And(l):
if(not l): return boolf
if(len(l) == 1): return l[0]
else: return And(l);
def _create_sym_rule(num, act, rep):
if num < -1 or num > 17:
return Bool('Strang, num=%d, act=%s, rep=%s'%(num,str(act), str(rep)))
if num == -1: return boolf
if act:
actt = [Bool(node) for node in act]
if rep:
rept = [Bool(node) for node in rep]
if act:
if not rep:
if num%2 == 0: return _And(actt)
else: return _Or(actt)
elif num == 0: return boolf
elif num == 1: return boolf
elif(num < 4): return And(_Or(actt), Not(_Or(rept)))
elif(num < 6): return And(_And(actt), Not(_And(rept)));
elif(num < 8): return And(_Or(actt), Not(_And(rept)))
elif(num < 10): return _And(actt)
elif(num < 12): return Or(_And(actt), And(_Or(actt), Not(_Or(rept))))
elif(num < 14): return Or(_And(actt), And(_Or(actt), Not(_And(rept))))
elif(num < 16): return _Or(actt)
else: return boolf
if rep:
if num == 16: return And(_Or(rept), Not(_And(rept)))
elif num==17: return Not(_Or(rept));
else: return boolf
else: return boolf # no act no rep
def checkBit(i, bv):
# simplify is necessary
return simplify(Extract(i, i, bv)).as_long() == 1
def bv2logic(lbvv, llist):
''' convert a bit-vector to a integer, as logic function number.'''
assert isExpOf2(lbvv)
lcode = len(bin(lbvv.as_long()).lstrip('0b')) - 1
return llist[lcode]
def bv2inters(ibvv, ilist, species):
if is_true(simplify(ibvv == 0)): return []
assert is_false(simplify(ibvv == 0))
l = ibvv.size() - 1
return [species[c] for i, c in enumerate(ilist) if checkBit(l-i, ibvv)]
def getDetail(m, A_, R_, L_, species, inters, logics):
A = {}; R = {}; L = {}
for c, s in enumerate(species):
L[s] = bv2logic(m[L_[s]], logics[s])
if A_[s]: A[s] = bv2inters(m[A_[s]] or zero, inters[c][0], species)
else: A[s] = []
if R_[s]: R[s] = bv2inters(m[R_[s]] or zero, inters[c][1], species)
else: R[s] = []
return (A, R, L)
def printModel(species, A, R, L, config = True, model = True):
''' Print the solved model nicely. '''
# printing the model
if config:
print ">>\tConfigurations: "
for s in species:
print ">>\t\t%s:%d%s%s" \
%(s, L[s],
A[s] and '\t<- ' + ','.join(A[s]) or '',
R[s] and '\t|- ' + ','.join(R[s]) or '')
if model:
print ">>\tModel: "
for s in species: print ">>\t\t%s' = %s" \
%(s,simplify( _create_sym_rule(L[s], A[s], R[s]) ))
from smtplib import SMTP, SMTPAuthenticationError
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def mailMe(addr, pw, content, title = 'Computation Finished'):
msg = MIMEMultipart('alternative')
msg['Subject'] = title
msg['From'] = msg['To'] = addr
msg.attach(MIMEText(content, 'plain'))
server = SMTP('smtp.qq.com')
try:
server.login(addr, pw)
server.sendmail(addr, addr, msg.as_string())
server.quit()
except SMTPAuthenticationError:
print ">> SMTP: login fail with %s:%s"%(addr, pw)
| 3.171875 | 3 |
functional_tests/test_book.py | PMPL-Arieken/django-locallibrary-tutorial | 1 | 12762924 | import time
from .base import FunctionalTest
from catalog.models import Book
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
bookCatalogLink = '/catalog/books/'
bookDetailsLink = '/catalog/book/'
class TestBookPage(FunctionalTest):
submit_selector = 'input[type=submit]'
def setUp(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
def test_book_page_empty(self):
self.browser.get(self.live_server_url + bookCatalogLink)
self.assertEqual(self.browser.title, 'Local Library')
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertEqual(header_text, 'Book List')
list_text = self.browser.find_element_by_tag_name('p').text
self.assertEqual(list_text, 'There are no books in the library.')
def test_book_page_filled(self):
self.setUpBooks()
self.browser.get(self.live_server_url + bookCatalogLink)
time.sleep(1)
book_list = self.browser.find_element_by_id('book-list')
rows = book_list.find_elements_by_tag_name('li')
self.assertIn('Book Title (<NAME>)', [row.text for row in rows])
def test_book_page_create(self):
self.login(self.admin)
self.setUpBooks()
self.browser.get(self.live_server_url + '/book/create/')
time.sleep(10)
title = self.browser.find_element_by_css_selector('input[name=title]')
author_box = Select(self.browser.find_element_by_name('author'))
summary = self.browser.find_element_by_css_selector('textarea[name=summary]')
isbn = self.browser.find_element_by_css_selector('input[name=isbn]')
genre_box = Select(self.browser.find_element_by_name('genre'))
language = Select(self.browser.find_element_by_name('language'))
submit = self.browser.find_element_by_css_selector(self.submit_selector)
title.send_keys('Book Title 2')
author_box.select_by_visible_text('<NAME>')
summary.send_keys('Summary of Book 2')
isbn.send_keys('1234567890123')
genre_box.select_by_visible_text('Fantasy')
language.select_by_visible_text('English')
submit.send_keys(Keys.ENTER)
time.sleep(1)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertEqual(header_text, 'Title: Book Title 2')
def test_book_page_delete(self):
self.setUpBooks()
book = Book.objects.all()[0]
self.login(self.admin)
self.browser.get(self.live_server_url + bookDetailsLink + str(book.id))
delete_button = self.browser.find_element_by_link_text('Delete')
delete_button.click()
submit = self.browser.find_element_by_css_selector(self.submit_selector)
submit.send_keys(Keys.ENTER)
time.sleep(1)
self.browser.get(self.live_server_url + bookCatalogLink)
list_text = self.browser.find_element_by_tag_name('p').text
self.assertEqual(list_text, 'There are no books in the library.')
def test_book_page_update(self):
self.setUpBooks()
book = Book.objects.all()[0]
self.login(self.admin)
self.browser.get(self.live_server_url + bookDetailsLink + str(book.id))
delete_button = self.browser.find_element_by_link_text('Update')
delete_button.click()
title = self.browser.find_element_by_css_selector('input[name=title]')
title.clear()
title.send_keys('Laskar')
submit = self.browser.find_element_by_css_selector(self.submit_selector)
submit.send_keys(Keys.ENTER)
time.sleep(1)
self.browser.get(self.live_server_url + bookCatalogLink)
book_list = self.browser.find_element_by_id('book-list')
rows = book_list.find_elements_by_tag_name('li')
self.assertIn('Laskar (<NAME>)', [row.text for row in rows]) | 2.46875 | 2 |
koans/segunda_semana/acerca_de_funciones.py | benjymb/python_koans_pachaqtec | 0 | 12762925 | <filename>koans/segunda_semana/acerca_de_funciones.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def mi_funcion_global(a,b):
return a + b
def funcion_que_no_retorna_nada(a, b):
resultado = a + b
def funcion_con_argumentos_que_tienen_valor_por_defecto(
a, b='Soy el valor por defecto de la variable b'
):
return [a, b]
def funcion_con_cantidad_variable_de_argumentos_no_nombrados(*args):
return args
def funcion_vacia():
"""
# RECORDAR : Se usa la palabra reservada pass para indicar
# bloques de codigo vacio.
"""
pass
def funcion_vacia_que_hace_trampa():
pass
return "XD"
class AcercaDeFunciones(Koan):
def test_llamando_a_funcion_global(self):
self.assertEqual(__, mi_funcion_global(2,3))
"""
# NOTA: Llamar a una funcion con una cantidad de parametros diferentes
# a lo esperado, no lanzara una excepcion SyntaxisError.
# Es un error en tiempo de ejecucion.
"""
def test_llamando_funciones_con_el_numero_de_parametros_inadecuados(self):
try:
mi_funcion_global()
except TypeError as exception:
mensaje = exception.args[0]
self.assertRegex(mensaje,
r'mi_funcion_global\(\) missing 2 required positional arguments')
try:
mi_funcion_global(1, 2, 3)
except Exception as e:
mensaje = e.args[0]
"""
# NOTA: Cuidado con los parentesis, necesitan ser escapados.
# r'\(\)'
"""
self.assertRegex(mensaje, __)
def test_llamando_funcion_que_no_retorna_nada(self):
self.assertEqual(__, funcion_que_no_retorna_nada(1, 2))
def test_llamando_funcion_con_argumentos_que_tienen_valor_por_defecto(self):
self.assertEqual(__, funcion_con_argumentos_que_tienen_valor_por_defecto(1))
self.assertEqual(__, funcion_con_argumentos_que_tienen_valor_por_defecto(1, 2))
def test_llamando_funcion_con_cantidad_variable_de_argumentos_no_nombrados(self):
self.assertEqual(__, funcion_con_cantidad_variable_de_argumentos_no_nombrados())
self.assertEqual(('uno',), funcion_con_cantidad_variable_de_argumentos_no_nombrados('uno'))
self.assertEqual(__, funcion_con_cantidad_variable_de_argumentos_no_nombrados('uno', 'dos'))
def test_llamando_funcion_vacia(self):
self.assertEqual(__, funcion_vacia())
def test_llamando_funcion_vacia_que_hace_trampa(self):
self.assertEqual(____, "XD" != funcion_vacia_que_hace_trampa())
| 2.9375 | 3 |
rpisec/telegram_bot/commands/status.py | marclr/rpi-security | 0 | 12762926 | <gh_stars>0
def status(bot, update, webcontrol):
chat_id = update.message.chat_id
code, text = webcontrol.execute('detection', 'status')
if code == 200:
bot.sendMessage(chat_id=chat_id, text=text)
else:
bot.sendMessage(chat_id=chat_id, text="Try it later")
| 2.171875 | 2 |
src/check_solution.py | roessig/verify-nn | 1 | 12762927 | from model_boundd import MIPwithBounds
import networkx as nx
def get_vars_and_coefficients(elements, start=3):
"""Use a list which comes from line.split() to create lists of float coefficients and SCIP variables."""
return [var for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]
def check_sol(filepath, value_dict, eps=1e-8, print_values=False):
"""Check solution given by input variables for feasibility.
Args:
filepath: str, path to .rlv file with AssertOut for output constraints
value_dict: dict, mapping input variables names (str) to values of the solution
eps: float, tolerance for checking
Returns:
true, if solution is valid, false otherwise
"""
graph = nx.DiGraph()
relu_nodes = set()
max_pool_nodes = set()
linear_nodes = set()
relu_in_nodes = set()
mip = MIPwithBounds(filepath, 1e-7)
model, vars = mip.read_file_into_graph()
# vars is a dict of the input nodes
output_cons = []
input_cons = []
input_bounds = {}
with open(filepath, "r") as f:
for line in f:
if line.startswith("#"):
continue
elements = line.split()
if elements[0] == "Input":
input_bounds[elements[1]] = {"lb": None, "ub": None}
graph.add_node(elements[1], node_type="input")
if elements[0] == "ReLU":
bias = float(elements[2])
variables, coeffs = get_vars_and_coefficients(elements)
relu_nodes.add(elements[1])
graph.add_node(elements[1] + "_in", bias=bias)
graph.add_edge(elements[1] + "_in", elements[1])
relu_in_nodes.add(elements[1] + "_in")
for v, w in zip(variables, coeffs):
graph.add_edge(v, elements[1] + "_in", weight=w)
if elements[0] == "Linear":
linear_nodes.add(elements[1])
bias = float(elements[2])
variables, coeffs = get_vars_and_coefficients(elements)
graph.add_node(elements[1], bias=bias)
for v, w in zip(variables, coeffs):
graph.add_edge(v, elements[1], weight=w)
if elements[0] == "MaxPool":
max_pool_nodes.add(elements[1])
graph.add_node(elements[1], node_type="max_pool")
graph.add_edges_from(((v, elements[1]) for v in elements[2:]), weight=1)
if elements[0] == "AssertOut":
output_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))
if elements[0] == "Assert":
input_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))
"""if len(elements) == 5 and elements[-1] in input_bounds:
if elements[1] == "<=":
new_lb = float(elements[2]) / float(elements[3])
if input_bounds[elements[-1]]["lb"] is None or input_bounds[elements[-1]]["lb"] < new_lb:
input_bounds[elements[-1]]["lb"] = new_lb
elif elements[1] == ">=":
new_ub = float(elements[2]) / float(elements[3])
if input_bounds[elements[-1]]["ub"] is None or input_bounds[elements[-1]]["ub"] > new_ub:
input_bounds[elements[-1]]["ub"] = new_ub"""
val = True
for lhs, direction, (variables, coeffs) in input_cons:
if direction == "<=":
if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:
val = False
print(lhs, direction, variables, coeffs)
break
elif direction == ">=":
if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:
val = False
print(lhs, direction, variables, coeffs)
break
else:
raise NotImplementedError
if not val: # input constraints do not hold
print("input constraints not fulfilled")
return False
else:
if print_values:
print("input constraints hold")
nodes_sorted = list(nx.topological_sort(graph))
relu_phases = {x: -1 for x in relu_nodes}
relu_phases_all = {x: 0 for x in relu_nodes}
for node in nodes_sorted:
if node in vars:
continue # skip the input nodes
new_value = 0
if node in linear_nodes or node in relu_in_nodes:
for n in graph.predecessors(node):
new_value += graph.edges[n, node]["weight"] * value_dict[n]
new_value += graph.node[node]["bias"]
elif node in max_pool_nodes:
new_value = max(value_dict[n] for n in graph.predecessors(node))
elif node in relu_nodes:
pred = list(graph.predecessors(node))
assert len(pred) == 1
if value_dict[pred[0]] > 0: # apply ReLU here
new_value = value_dict[pred[0]]
relu_phases[node] = 1
else:
relu_phases[node] = 0
value_dict[node] = new_value
for relu, phase in relu_phases.items():
assert phase >= 0
relu_phases_all[relu] += phase
if print_values:
for s in value_dict.items():
print(s)
val = True
# check the ouput constraints
#print(output_cons)
for lhs, direction, (variables, coeffs) in output_cons:
if direction == "<=":
if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:
val = False
break
elif direction == ">=":
if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:
val = False
break
else:
raise NotImplementedError
return val
if __name__ == "__main__":
directory = "../benchmarks/collisionDetection/"
directory2 = "../../benchmarks/scip/ACAS/"
directory3 = "../benchmarks/twinladder/"
directory5_out = "../benchmarks/mnist/"
filepath = directory2 + "property2/5_3.rlv"
#filepath = directory2 + "property5/property.rlv"
#filepath = directory2 + "property_3.rlv"
file = "../logs/neurify_11_10_0_adv"
with open(file, "r") as f:
list_of_pixels = [float(x) for x in f.readline()[:-1].split()]
#value_dict = {"in_" + str(i): x*255 for i, x in enumerate(list_of_pixels)}
value_dict = {'in_0': 55947.69100, 'in_1': 0.198666, 'in_2': -3.051407, 'in_3': 1145.0000, 'in_4': 50.768384}
if check_sol(filepath, value_dict=value_dict, eps=1e-2, print_values=True):
print("valid solution found -> SAT")
else:
print("the solution is not valid")
| 2.6875 | 3 |
chpt4/coinFlipStreaks.py | maxmacdon/Automate | 0 | 12762928 | #! python3
import random
numberOfStreaks = 0
for experimentNumber in range(10000):
# Code that creates a list of 100 'heads' or 'tails' values.
randomList = []
for listEntry in range(100):
if random.randint(0,1) == 1:
randomList.append('H')
else:
randomList.append('T')
# Code that checks if there is a streak of 6 heads or tails in a row.
counterH, counterT = 0, 0
for flip in range(100):
if randomList[flip] == 'H':
counterH += 1
counterT = 0
else:
counterT += 1
counterH = 0
if counterH == 6 or counterT == 6:
numberOfStreaks +=1
counterH, counterT = 0, 0
print('Chance of streak: %s%%' % (numberOfStreaks/100)) | 3.84375 | 4 |
django_eveonline_connector/views/sso.py | KryptedGaming/django-eveonline-connector | 3 | 12762929 | from django.shortcuts import render, redirect
from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
"""
SSO Views
"""
@login_required
def sso_callback(request):
code = request.GET.get('code', None)
eve_client = EveClient.get_instance()
# verify token
esi_security = EveClient.get_esi_security()
esi_token = esi_security.auth(code)
esi_character = esi_security.verify()
# create new token
new_token = EveToken.objects.get_or_create(
access_token=esi_token['access_token'],
refresh_token=esi_token['refresh_token'],
expires_in=esi_token['expires_in'],
user=request.user
)[0]
# set scopes M2M
scopes = EveScope.objects.filter(name__in=esi_character['scp'])
if scopes.count() != len(esi_character['scp']):
logger.error(
f"Whoa there. Somehow we added a scope we don't know about. Pass this to Krypted Developers: \n ${esi_character['scp']}")
new_token.scopes.set(scopes)
# find or create character
if EveCharacter.objects.filter(external_id=esi_character['sub'].split(":")[-1]).exists():
character = EveCharacter.objects.get(
external_id=esi_character['sub'].split(":")[-1])
if character.token:
old_token = character.token
old_token.delete()
character.token = new_token
character.save()
else:
character = EveCharacter.objects.create(
external_id=esi_character['sub'].split(":")[-1],
name=esi_character['name'],
token=new_token,
)
# if no primary user, set
if not PrimaryEveCharacterAssociation.objects.filter(user=request.user).exists():
PrimaryEveCharacterAssociation.objects.create(
user=request.user,
character=character
)
return redirect('/')
@login_required
def add_sso_token(request):
try:
sso_url = EveClient.get_instance().get_sso_url()
return redirect(sso_url)
except Exception:
logger.exception("Failed to get SSO url from EveClient")
messages.warning(
request, "Eve Settings are not configured correctly. Contact your administrator.")
return redirect('/')
@login_required
def update_sso_token(request, token_id):
eve_token = EveToken.objects.get(pk=token_id)
return redirect(EveClient.get_instance().get_sso_url(
EveScope.convert_to_list(eve_token.requested_scopes.all())
))
@login_required
def remove_sso_token(request, pk):
eve_token = EveToken.objects.get(pk=pk)
if request.user == eve_token.user:
try:
if PrimaryEveCharacterAssociation.objects.filter(character=eve_token.evecharacter).exists():
PrimaryEveCharacterAssociation.objects.filter(
character=eve_token.evecharacter).delete()
except Exception:
logger.exception(
"Encountered error when deleting token character associations")
eve_token.delete()
else:
messages.error(request, "You cannot delete someone elses token.")
messages.success(
request, "Successfully deleted EVE Online token and character data")
return redirect("/")
| 1.9375 | 2 |
project/tests/test_string_replace.py | TobiasPrt/Smartphoniker-shop | 2 | 12762930 | from project.server.common.escape import cleanify
class TestReplacements:
def test_ae(self):
assert cleanify("") == ""
assert cleanify("Äpfel") == "Aepfel"
assert cleanify("äpfel") == "aepfel"
assert cleanify("Äpfel Äpfel äpfel") == "Aepfel Aepfel aepfel"
def test_oe(self):
assert cleanify("Ömel") == "Oemel"
assert cleanify("ömel") == "oemel"
assert cleanify("Ömel ömel Ömel") == "Oemel oemel Oemel"
def test_ue(self):
assert cleanify("Ümel") == "Uemel"
assert cleanify("ümel") == "uemel"
assert cleanify("Ümel ümel Ümel") == "Uemel uemel Uemel"
def test_ss(self):
assert cleanify("Scheiße") == "Scheisse"
| 2.34375 | 2 |
src/local/butler/create_config.py | mi-ac/clusterfuzz | 5,023 | 12762931 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for creating a new deployment config."""
import json
import os
import shutil
import subprocess
import sys
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
import google_auth_httplib2
import httplib2
from local.butler import appengine
from local.butler import common
_REQUIRED_SERVICES = (
'appengineflex.googleapis.com',
'bigquery-json.googleapis.com',
'cloudapis.googleapis.com',
'cloudbuild.googleapis.com',
'clouddebugger.googleapis.com',
'clouderrorreporting.googleapis.com',
'cloudprofiler.googleapis.com',
'cloudresourcemanager.googleapis.com',
'compute.googleapis.com',
'containerregistry.googleapis.com',
'datastore.googleapis.com',
'deploymentmanager.googleapis.com',
'file.googleapis.com',
'iam.googleapis.com',
'iamcredentials.googleapis.com',
'logging.googleapis.com',
'monitoring.googleapis.com',
'oslogin.googleapis.com',
'pubsub.googleapis.com',
'redis.googleapis.com',
'replicapool.googleapis.com',
'replicapoolupdater.googleapis.com',
'resourceviews.googleapis.com',
'siteverification.googleapis.com',
'sourcerepo.googleapis.com',
'stackdriver.googleapis.com',
'storage-api.googleapis.com',
'storage-component.googleapis.com',
'vpcaccess.googleapis.com',
)
_NUM_RETRIES = 2
_ENABLE_SERVICE_BATCH_SIZE = 19
class DomainVerifier(object):
"""Domain verifier."""
def __init__(self, oauth_client_secrets_path):
flow = InstalledAppFlow.from_client_secrets_file(
oauth_client_secrets_path,
scopes=['https://www.googleapis.com/auth/siteverification'])
credentials = flow.run_console()
http = google_auth_httplib2.AuthorizedHttp(
credentials, http=httplib2.Http())
self.api = discovery.build('siteVerification', 'v1', http=http)
def get_domain_verification_tag(self, domain):
"""Get the domain verification meta tag."""
response = self.api.webResource().getToken(
body={
'verificationMethod': 'FILE',
'site': {
'identifier': domain,
'type': 'SITE',
}
}).execute(num_retries=_NUM_RETRIES)
return response['token']
def verify(self, domain):
"""Verify the domain verification meta tag."""
self.api.webResource().insert(
body={
'site': {
'identifier': domain,
'type': 'SITE',
}
},
verificationMethod='FILE').execute(num_retries=_NUM_RETRIES)
def add_owner(self, domain, email):
"""Add a new domain owner."""
response = self.api.webResource().get(id=domain).execute(
num_retries=_NUM_RETRIES)
if email not in response['owners']:
response['owners'].append(email)
self.api.webResource().update(
id=domain, body=response).execute(num_retries=_NUM_RETRIES)
def get_numeric_project_id(gcloud, project_id):
"""Get the numeric project ID."""
project_info = json.loads(
gcloud.run('projects', 'describe', project_id, '--format=json'))
return project_info['projectNumber']
def app_engine_service_account(project_id):
"""Get the default App Engine service account."""
return project_id + '@appspot.gserviceaccount.com'
def compute_engine_service_account(gcloud, project_id):
"""Get the default compute engine service account."""
return (get_numeric_project_id(gcloud, project_id) +
<EMAIL>')
def enable_services(gcloud):
"""Enable required services."""
for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE):
end = i + _ENABLE_SERVICE_BATCH_SIZE
gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])
def replace_file_contents(file_path, replacements):
"""Replace contents of a file."""
with open(file_path) as f:
old_contents = f.read()
contents = old_contents
for find, replace in replacements:
contents = contents.replace(find, replace)
if contents == old_contents:
return
with open(file_path, 'w') as f:
f.write(contents)
def project_bucket(project_id, bucket_name):
"""Return a project-specific bucket name."""
return '{name}.{project_id}.appspot.com'.format(
name=bucket_name, project_id=project_id)
def create_new_config(gcloud, project_id, new_config_dir,
domain_verification_tag, bucket_replacements,
gae_location, gce_zone, firebase_api_key):
"""Create a new config directory."""
if os.path.exists(new_config_dir):
print('Overwriting existing directory.')
shutil.rmtree(new_config_dir)
gae_region = appengine.region_from_location(gae_location)
replacements = [
('test-clusterfuzz-service-account-email',
compute_engine_service_account(gcloud, project_id)),
('test-clusterfuzz', project_id),
('test-project', project_id),
('domain-verification-tag', domain_verification_tag),
('gae-region', gae_region),
('gce-zone', gce_zone),
('firebase-api-key', firebase_api_key),
]
replacements.extend(bucket_replacements)
shutil.copytree(os.path.join('configs', 'test'), new_config_dir)
for root_dir, _, filenames in os.walk(new_config_dir):
for filename in filenames:
file_path = os.path.join(root_dir, filename)
replace_file_contents(file_path, replacements)
def deploy_appengine(gcloud, config_dir, appengine_location):
"""Deploy to App Engine."""
try:
gcloud.run('app', 'describe')
except common.GcloudError:
# Create new App Engine app if it does not exist.
gcloud.run('app', 'create', '--region=' + appengine_location)
subprocess.check_call([
'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine',
'--prod', '--config-dir', config_dir
])
def deploy_zips(config_dir):
"""Deploy source zips."""
subprocess.check_call([
'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod',
'--config-dir', config_dir
])
def create_buckets(project_id, buckets):
"""Create buckets."""
gsutil = common.Gsutil()
for bucket in buckets:
try:
gsutil.run('defstorageclass', 'get', 'gs://' + bucket)
except common.GsutilError:
# Create the bucket if it does not exist.
gsutil.run('mb', '-p', project_id, 'gs://' + bucket)
def set_cors(config_dir, buckets):
"""Sets cors settings."""
gsutil = common.Gsutil()
cors_file_path = os.path.join(config_dir, 'gae', 'cors.json')
for bucket in buckets:
gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)
def add_service_account_role(gcloud, project_id, service_account, role):
"""Add an IAM role to a service account."""
gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member',
'serviceAccount:' + service_account, '--role', role)
def execute(args):
"""Create a new config directory and deployment."""
# Check this early on, as the deployment at the end would fail otherwise.
if common.is_git_dirty():
print('Your checkout contains uncommitted changes. Cannot proceed.')
sys.exit(1)
verifier = DomainVerifier(args.oauth_client_secrets_path)
gcloud = common.Gcloud(args.project_id)
enable_services(gcloud)
# Get tag for domain verification.
appspot_domain = 'https://' + args.project_id + '.appspot.com/'
domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain)
blobs_bucket = project_bucket(args.project_id, 'blobs')
deployment_bucket = project_bucket(args.project_id, 'deployment')
bucket_replacements = (
('test-blobs-bucket', blobs_bucket),
('test-deployment-bucket', deployment_bucket),
('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')),
('test-backup-bucket', project_bucket(args.project_id, 'backup')),
('test-coverage-bucket', project_bucket(args.project_id, 'coverage')),
('test-fuzzer-logs-bucket', project_bucket(args.project_id,
'fuzzer-logs')),
('test-corpus-bucket', project_bucket(args.project_id, 'corpus')),
('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')),
('test-shared-corpus-bucket',
project_bucket(args.project_id, 'shared-corpus')),
('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')),
('test-mutator-plugins-bucket',
project_bucket(args.project_id, 'mutator-plugins')),
)
# Write new configs.
create_new_config(gcloud, args.project_id, args.new_config_dir,
domain_verification_tag, bucket_replacements,
args.appengine_location, args.gce_zone,
args.firebase_api_key)
prev_dir = os.getcwd()
os.chdir(args.new_config_dir)
# Deploy App Engine and finish verification of domain.
os.chdir(prev_dir)
deploy_appengine(
gcloud, args.new_config_dir, appengine_location=args.appengine_location)
verifier.verify(appspot_domain)
# App Engine service account requires:
# - Domain ownership to create domain namespaced GCS buckets
# - Datastore export permission for periodic backups.
# - Service account signing permission for GCS uploads.
service_account = app_engine_service_account(args.project_id)
verifier.add_owner(appspot_domain, service_account)
add_service_account_role(gcloud, args.project_id, service_account,
'roles/datastore.importExportAdmin')
add_service_account_role(gcloud, args.project_id, service_account,
'roles/iam.serviceAccountTokenCreator')
# Create buckets now that domain is verified.
create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements])
# Set CORS settings on the buckets.
set_cors(args.new_config_dir, [blobs_bucket])
# Set deployment bucket for the cloud project.
gcloud.run('compute', 'project-info', 'add-metadata',
'--metadata=deployment-bucket=' + deployment_bucket)
# Deploy source zips.
deploy_zips(args.new_config_dir)
| 1.265625 | 1 |
examples/4/size.py | ekohilas/comp2041_plpy | 2 | 12762932 | #!/usr/local/bin/python3.5 -u
import sys
sys.stdout.write("Enter a number: ")
a = float(sys.stdin.readline())
if a < 0:
print("negative")
elif a == 0:
print("zero")
elif a < 10:
print("small")
else:
print("large")
| 3.859375 | 4 |
nuitka/utils/Jinja2.py | mikehaben69/Nuitka | 5,421 | 12762933 | # Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Jinja folklore wrappers and handling of inline copy usage.
"""
from .Importing import importFromInlineCopy
environments = {}
def unlikely_if(value):
if value:
return "unlikely"
else:
return ""
def unlikely_or_likely_from(value):
if value:
return "unlikely"
else:
return "likely"
def getEnvironment(module_name):
if module_name not in environments:
# Import dependencies, sadly we get to manage this ourselves.
importFromInlineCopy("markupsafe", must_exist=True)
jinja2 = importFromInlineCopy("jinja2", must_exist=True)
import jinja2
env = jinja2.Environment(
loader=jinja2.PackageLoader(module_name, "templates"),
# extensions=["jinja2.ext.do"],
trim_blocks=True,
lstrip_blocks=True,
)
# For shared global functions.
env.globals.update(
{
"unlikely_if": unlikely_if,
"unlikely_or_likely_from": unlikely_or_likely_from,
}
)
env.undefined = jinja2.StrictUndefined
environments[module_name] = env
return environments[module_name]
def getTemplate(module_name, template_name):
return getEnvironment(module_name).get_template(template_name)
| 1.789063 | 2 |
tests/models/programdb/control/control_unit_test.py | weibullguy/ramstk | 4 | 12762934 | <gh_stars>1-10
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.programdb.control.control_unit_test.py is part of The RAMSTK
# Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Class for testing FMEA Control algorithms and models."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKControlRecord
from ramstk.models.dbtables import RAMSTKControlTable
from tests import (
MockDAO,
UnitTestDeleteMethods,
UnitTestGetterSetterMethods,
UnitTestInsertMethods,
UnitTestSelectMethods,
)
@pytest.mark.usefixtures("test_record_model", "unit_test_table_model")
class TestCreateControlModels:
"""Class for unit testing Control model __init__() methods.
Because each table model contains unique attributes, these methods must be
local to the module being tested.
"""
@pytest.mark.unit
def test_record_model_create(self, test_record_model):
"""Return a Control record model instance."""
assert isinstance(test_record_model, RAMSTKControlRecord)
# Verify class attributes are properly initialized.
assert test_record_model.__tablename__ == "ramstk_control"
assert test_record_model.description == "Test FMEA Control #1 for Cause ID #3."
assert test_record_model.type_id == "Detection"
@pytest.mark.unit
def test_table_model_create(self, unit_test_table_model):
"""Return a Control table model instance."""
assert isinstance(unit_test_table_model, RAMSTKControlTable)
assert isinstance(unit_test_table_model.tree, Tree)
assert isinstance(unit_test_table_model.dao, MockDAO)
assert unit_test_table_model._db_id_colname == "fld_control_id"
assert unit_test_table_model._db_tablename == "ramstk_control"
assert unit_test_table_model._tag == "control"
assert unit_test_table_model._root == 0
assert unit_test_table_model._revision_id == 0
assert unit_test_table_model._parent_id == 0
assert unit_test_table_model.last_id == 0
assert pub.isSubscribed(
unit_test_table_model.do_select_all, "selected_revision"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_attributes, "request_get_control_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_set_attributes, "request_set_control_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_set_attributes, "wvw_editing_control"
)
assert pub.isSubscribed(
unit_test_table_model.do_update, "request_update_control"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_tree, "request_get_control_tree"
)
assert pub.isSubscribed(
unit_test_table_model.do_delete, "request_delete_control"
)
assert pub.isSubscribed(
unit_test_table_model.do_insert, "request_insert_control"
)
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestSelectControl(UnitTestSelectMethods):
"""Class for unit testing Control table do_select() and do_select_all()."""
__test__ = True
_record = RAMSTKControlRecord
_tag = "control"
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestInsertControl(UnitTestInsertMethods):
"""Class for unit testing Control table do_insert() method."""
__test__ = True
_next_id = 0
_record = RAMSTKControlRecord
_tag = "control"
@pytest.mark.skip(reason="Control records are non-hierarchical.")
def test_do_insert_child(self, test_attributes, unit_test_table_model):
"""Should not run because Controls are not hierarchical."""
pass
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestDeleteControl(UnitTestDeleteMethods):
"""Class for unit testing Control table do_delete() method."""
__test__ = True
_next_id = 0
_record = RAMSTKControlRecord
_tag = "control"
@pytest.mark.usefixtures("test_attributes", "test_record_model")
class TestGetterSetterControl(UnitTestGetterSetterMethods):
"""Class for unit testing Control table methods that get or set."""
__test__ = True
_id_columns = [
"revision_id",
"hardware_id",
"mode_id",
"mechanism_id",
"cause_id",
"control_id",
]
_test_attr = "type_id"
_test_default_value = ""
@pytest.mark.unit
def test_get_record_model_attributes(self, test_record_model):
"""Should return a dict of attribute key:value pairs.
This method must be local because the attributes are different for each
database record model.
"""
_attributes = test_record_model.get_attributes()
assert isinstance(_attributes, dict)
assert _attributes["description"] == "Test FMEA Control #1 for Cause ID #3."
assert _attributes["type_id"] == "Detection"
| 2.234375 | 2 |
tools/my_infer.py | flying0712/reid-strong-baseline | 0 | 12762935 | <reponame>flying0712/reid-strong-baseline
from data.datasets.dataset_loader import read_image # 图片读取方法,可以自己写,我是用的baseline里自带的
import os
import torch
import numpy as np
import json
from utils.re_ranking import re_ranking
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # 指定gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_root = '/root/code/my_data/'
def my_inference(model, transform, batch_size): # 传入模型,数据预处理方法,batch_size
query_list = list()
# with open(data_root + 'query_a_list.txt', 'r') as f:
# # 测试集中txt文件
# lines = f.readlines()
# for i, line in enumerate(lines):
# data = line.split(" ")
# image_name = data[0].split("/")[1]
# img_file = os.path.join(data_root + 'query_b', image_name) # 测试集query文件夹
# query_list.append(img_file)
query_list = [os.path.join(data_root + 'query_b', x) for x in # 测试集gallery文件夹
os.listdir(data_root + 'query_b')]
gallery_list = [os.path.join(data_root + 'gallery_b', x) for x in # 测试集gallery文件夹
os.listdir(data_root + 'gallery_b')]
query_num = len(query_list)
img_list = list()
for q_img in query_list:
q_img = read_image(q_img)
q_img = transform(q_img)
img_list.append(q_img)
for g_img in gallery_list:
g_img = read_image(g_img)
g_img = transform(g_img)
img_list.append(g_img)
# img_list = img_list[:1000]
iter_n = int(len(img_list)/batch_size) # batch_size
if len(img_list) % batch_size != 0:
iter_n += 1
# img_list = img_list[0:iter_n*batch_size]
print(iter_n)
img_data = torch.Tensor([t.numpy() for t in img_list]).cuda()
# img_data = torch.Tensor([t.numpy() for t in img_list]).cpu
model = model.to(device)
model.eval()
all_feature = list()
for i in range(iter_n):
print("batch ----%d----" % (i))
batch_data = img_data[i*batch_size:(i+1)*batch_size]
with torch.no_grad():
batch_feature = model(batch_data).detach().cpu()
# print(batch_feature)
# batch_feature = model( batch_data ).detach().cuda()
all_feature.append(batch_feature)
print('done')
all_feature = torch.cat(all_feature)
gallery_feat = all_feature[query_num:]
query_feat = all_feature[:query_num]
distmat = re_ranking(query_feat, gallery_feat, k1=20, k2=6, lambda_value=0.3) # rerank方法
# distmat = distmat # 如果使用 euclidean_dist,不使用rerank改为:distamt = distamt.numpy()
num_q, num_g = distmat.shape
print(num_q)
indices = np.argsort(distmat, axis=1)
max_200_indices = indices[:, :200]
print(max_200_indices)
res_dict = dict()
for q_idx in range(num_q):
print(query_list[q_idx])
filename = query_list[q_idx][query_list[q_idx].rindex("/")+1:]
max_200_files = [gallery_list[i][gallery_list[i].rindex("/")+1:] for i in max_200_indices[q_idx]]
res_dict[filename] = max_200_files
with open(r'submission_B_4.json', 'w' ,encoding='utf-8') as f: # 提交文件
json.dump(res_dict, f)
# if __name__ == '__main__':
# my_inference()
| 2.375 | 2 |
tests/optimizers/test_binary.py | jole6826/pyswarms | 1 | 12762936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.discrete import BinaryPSO
@pytest.mark.parametrize(
"options",
[
{"c2": 0.7, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2},
],
)
def test_keyword_exception(options):
"""Tests if exceptions are thrown when keywords are missing"""
with pytest.raises(KeyError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize(
"options",
[
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": -1, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 6, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 5},
],
)
def test_invalid_k_or_p_values(options):
"""Tests if exception is thrown when passing
an invalid value for k or p"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize("velocity_clamp", [[1, 3], np.array([1, 3])])
def test_vclamp_type_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp type is not a
tuple"""
with pytest.raises(TypeError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(1, 1, 1), (2, 3, 1)])
def test_vclamp_shape_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp's size is not equal
to 2"""
with pytest.raises(IndexError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(3, 2), (10, 8)])
def test_vclamp_maxmin_exception(velocity_clamp, options):
"""Tests if the max velocity_clamp is less than min velocity_clamp and
vice-versa"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
def test_reset_default_values(binary_reset):
"""Tests if best cost and best pos are set properly when the reset()
method is called"""
assert binary_reset.swarm.best_cost == np.inf
assert set(binary_reset.swarm.best_pos) == set(np.array([]))
@pytest.mark.parametrize(
"history, expected_shape",
[
("cost_history", (1000,)),
("mean_pbest_history", (1000,)),
("mean_neighbor_history", (1000,)),
("pos_history", (1000, 10, 2)),
("velocity_history", (1000, 10, 2)),
],
)
def test_training_history_shape(binary_history, history, expected_shape):
"""Test if training histories are of expected shape"""
pso = vars(binary_history)
assert np.array(pso[history]).shape == expected_shape
| 2.46875 | 2 |
pytest_faker/plugin.py | pytest-dev/pytest-faker | 37 | 12762937 | """pytest-faker plugin."""
import pytest
from faker import Factory
@pytest.fixture(scope='session')
def faker_locale():
"""Faker locale.
None by default which means faker's default locale.
"""
return None
@pytest.fixture(scope='session')
def faker(faker_locale):
"""Faker factory object."""
return Factory.create(faker_locale)
| 2.203125 | 2 |
deepvariant/core/variantutils.py | rose-brain/deepvariant | 1 | 12762938 | # Copyright 2017 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Variant utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from deepvariant.core.genomics import struct_pb2
from deepvariant.core.genomics import variants_pb2
from deepvariant.core import ranges
# The alternate allele string for reference (no alt).
NO_ALT_ALLELE = '.'
# The alternate allele string for the gVCF "any" alternate allele.
GVCF_ALT_ALLELE = '<*>'
def set_variantcall_gq(variant_call, gq):
if 'GQ' in variant_call.info:
del variant_call.info['GQ']
variant_call.info['GQ'].values.extend([struct_pb2.Value(number_value=gq)])
def decode_variants(encoded_iter):
"""Yields a genomics.Variant from encoded_iter.
Args:
encoded_iter: An iterable that produces binary encoded
third_party.nucleus.protos.Variant strings.
Yields:
A parsed third_party.nucleus.protos.Variant for each
encoded element of encoded_iter
in order.
"""
for encoded in encoded_iter:
yield variants_pb2.Variant.FromString(encoded)
def variant_position(variant):
"""Returns a new Range at the start position of variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A new Range with the same reference_name as variant and start but an end
that is start + 1. This produces a range that is the single basepair of the
start of variant, hence the name position.
"""
return ranges.make_range(variant.reference_name, variant.start,
variant.start + 1)
def variant_range(variant):
"""Returns a new Range covering variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A new Range with the same reference_name, start, and end as variant.
"""
return ranges.make_range(variant.reference_name, variant.start, variant.end)
def variant_range_tuple(variant):
"""Returns a new tuple of (reference_name, start, end) for the variant.
A common use case for this function is to sort variants by chromosomal
location, with usage like `sorted(variants, key=variant_range_tuple)`.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A three-tuple with the same reference_name, start, and end as variant.
"""
return (variant.reference_name, variant.start, variant.end)
@enum.unique
class GenotypeType(enum.Enum):
"""An enumeration of the types of genotypes."""
hom_ref = ('homozygous reference', [0, 0], 0)
het = ('heterozygous', [0, 1], 1)
hom_var = ('homozygous non-reference', [1, 1], 2)
no_call = ('no call', [-1, -1], -1)
def __init__(self, full_name, example_gt, class_id):
self.full_name = full_name
self.example_gt = example_gt
self.class_id = class_id
@enum.unique
class VariantType(enum.Enum):
"""An enumeration of the types of variants."""
# a variant.proto where there is no alt allele
ref = 0
# a non-reference variant.proto where all ref and alt alleles
# are single basepairs
snp = 1
# a non-reference variant.proto where at least one of ref or alt alleles
# are longer than 1 bp
indel = 2
def format_filters(variant):
"""Gets a human-readable string showing the filters applied to variant.
Returns a string with the filter field values of variant separated by commas.
If the filter field isn't set, returns '.'.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A string.
"""
return ','.join(variant.filter) if variant.filter else '.'
def format_alleles(variant):
"""Gets a string representation of the variants alleles.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A string ref_bases/alt1,alt2 etc.
"""
return '{}/{}'.format(variant.reference_bases, ','.join(
variant.alternate_bases))
def format_position(variant):
"""Gets a string representation of the variants position.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A string chr:start + 1 (as start is zero-based).
"""
return '{}:{}'.format(variant.reference_name, variant.start + 1)
def is_snp(variant):
"""Is variant a SNP?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if all alleles of variant are 1 bp in length.
"""
return (not is_ref(variant) and len(variant.reference_bases) == 1 and
len(variant.alternate_bases) >= 1 and
all(len(x) == 1 for x in variant.alternate_bases))
def is_indel(variant):
"""Is variant an indel?
An indel event is simply one where the size of at least one of the alleles
is > 1.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an insertion/deletion event
occurs at this site.
"""
# redacted
# redacted
return (not is_ref(variant) and
(len(variant.reference_bases) > 1 or
any(len(alt) > 1 for alt in variant.alternate_bases)))
def is_biallelic(variant):
"""Returns True if variant has exactly one alternate allele."""
return len(variant.alternate_bases) == 1
def is_multiallelic(variant):
"""Does variant have multiple alt alleles?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if variant has more than one alt allele.
"""
return len(variant.alternate_bases) > 1
def is_ref(variant):
"""Returns true if variant is a reference record.
Variant protos can encode sites that aren't actually mutations in the
sample. For example, the record ref='A', alt='.' indicates that there is
no mutation present (i.e., alt is the missing value).
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A boolean.
"""
alts = variant.alternate_bases
return not alts or (len(alts) == 1 and alts[0] == '.')
def variant_type(variant):
"""Gets the VariantType of variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
VariantType indicating the type of this variant.
"""
if is_ref(variant):
return VariantType.ref
elif is_snp(variant):
return VariantType.snp
else:
return VariantType.indel
def is_transition(allele1, allele2):
"""Is the pair of single bp alleles a transition?
Args:
allele1: A string of the first allele, must be 1 bp in length.
allele2: A string of the second allele, must be 1 bp in length.
Returns:
True if allele1/allele2 are a transition SNP.
Raises:
ValueError: if allele1 and allele2 are equal or aren't 1 bp in length.
"""
if allele1 == allele2:
raise ValueError('Alleles must be unique:', allele1, allele2)
if len(allele1) != 1:
raise ValueError('Alleles must be 1 bp in length.', allele1)
if len(allele2) != 1:
raise ValueError('Alleles must be 1 bp in length.', allele2)
alleles_set = {allele1, allele2}
return any(alleles_set == x for x in [{'A', 'G'}, {'C', 'T'}])
def is_insertion(ref, alt):
"""Is alt an insertion w.r.t. ref?
Args:
ref: A string of the reference allele.
alt: A string of the alternative allele.
Returns:
True if alt is an insertion w.r.t. ref.
"""
return len(ref) < len(alt)
def is_deletion(ref, alt):
"""Is alt a deletion w.r.t. ref?
Args:
ref: A string of the reference allele.
alt: A string of the alternative allele.
Returns:
True if alt is a deletion w.r.t. ref.
"""
return len(ref) > len(alt)
def has_insertion(variant):
"""Does variant have an insertion?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an insertion event
occurs at this site.
"""
ref = variant.reference_bases
return (is_indel(variant) and
any(is_insertion(ref, alt) for alt in variant.alternate_bases))
def has_deletion(variant):
"""Does variant have a deletion?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an deletion event
occurs at this site.
"""
ref = variant.reference_bases
return (is_indel(variant) and
any(is_deletion(ref, alt) for alt in variant.alternate_bases))
@enum.unique
class AlleleMismatchType(enum.Enum):
"""An enumeration of the types of allele mismatches we detect."""
# Duplicate alleles
duplicate_eval_alleles = 1
duplicate_true_alleles = 2
# Truth has an allele that doesn't match any allele in eval.
unmatched_true_alleles = 3
# Eval has an allele that doesn't match any allele in truth.
unmatched_eval_alleles = 4
def allele_mismatches(evalv, truev):
"""Determines the set of allele mismatch discordances between evalv and truev.
Compares the alleles present in evalv and truev to determine if there are any
disagreements between the set of called alleles in the two Variant protos. The
type of differences basically boil down to:
-- Are there duplicate alt alleles?
-- Can we find a matching allele in the truev for each allele in evalv, and
vice versa?
Two alleles A and B match when they would produce the same sequence of bases
in ref and alt haplotypes starting at the same position. So CA=>TA is the same
as C=>T (position is the same, replacing A by A is a noop) but AC=>AT isn't
the same as C=>T because the former event changes bases 1 bp further along in
the reference genome than the C=>T allele.
Args:
evalv: A third_party.nucleus.protos.Variant.
truev: A third_party.nucleus.protos.Variant.
Returns:
A set of AlleleMismatchType values.
"""
unmatched_eval_alleles = []
# Use set removes duplicate alleles in truth and eval variants.
allele_matches = {alt: [] for alt in set(truev.alternate_bases)}
for eval_alt in set(evalv.alternate_bases):
# Loop over each possible alt allele, adding eval_alt to each matching alt
# allele.
found_match = False
for true_alt in allele_matches:
if (simplify_alleles(evalv.reference_bases, eval_alt) == simplify_alleles(
truev.reference_bases, true_alt)):
# We are a match to true_alt, so record that fact in allele_matches
allele_matches[true_alt].append(eval_alt)
found_match = True
if not found_match:
# We never found a match for eval_alt.
unmatched_eval_alleles.append(eval_alt)
# At this point we've checked every alt against every eval allele, and are
# ready to summarize the differences using our AlleleMismatchType enum.
types = set()
if len(set(evalv.alternate_bases)) != len(evalv.alternate_bases):
types.add(AlleleMismatchType.duplicate_eval_alleles)
if len(set(truev.alternate_bases)) != len(truev.alternate_bases):
types.add(AlleleMismatchType.duplicate_true_alleles)
if unmatched_eval_alleles:
types.add(AlleleMismatchType.unmatched_eval_alleles)
if any(len(match) != 1 for match in allele_matches.itervalues()):
types.add(AlleleMismatchType.unmatched_true_alleles)
return types
def simplify_alleles(*alleles):
"""Simplifies alleles by stripping off common postfix bases.
For example, simplify("AC", "GC") would produce the tuple "A", "G" as the "C"
base is a common postfix of both alleles. But simplify("AC", "GT") would
produce "AC", "GT" as there is no common postfix.
Note this function will never simplify any allele down to the empty string. So
if alleles = ['CACA', 'CA'], the longest common postfix is 'CA' but we will
not produce ['CA', ''] as this is an invalid Variant allele encoding. Instead
we produce ['CAC', 'C'].
Args:
*alleles: A tuple of bases, each as a string, to simplify.
Returns:
A tuple, one for each allele in alleles in order, with any common postfix
bases stripped off.
"""
def all_the_same(items):
first = next(items)
return all(item == first for item in items)
# Loop over the alleles to determine the length of the shared postfix. Start
# at 1 so every allele, even after trimming the postfix, has at least len 1.
# For example, alleles = ['ATT', 'TT'] reduces to ['AT', 'T'] not ['A', ''].
shortest_allele_len = min(len(a) for a in alleles)
common_postfix_len = 0
for i in range(1, shortest_allele_len):
if not all_the_same(a[-i] for a in alleles):
break
common_postfix_len = i
if common_postfix_len:
return tuple(a[0:-common_postfix_len] for a in alleles)
else:
# Fast path for the case where there's no shared postfix.
return alleles
def is_filtered(variant):
"""Returns True if variant has a non-PASS filter field, or False otherwise."""
return bool(variant.filter) and any(
f not in {'PASS', '.'} for f in variant.filter)
def is_variant_call(variant,
require_non_ref_genotype=True,
no_calls_are_variant=False):
"""Is variant a non-reference call?
A Variant proto doesn't always imply that there's a variant present in the
genome. The call may not have alternate bases, may be filtered, may a have
hom-ref genotype, etc. This function looks for all of those configurations
and returns true iff the variant is asserting that a mutation is present
in the same.
Note that this code allows a variant without a calls field to be variant,
but one with a genotype call must have a non-reference genotype to be
considered variant (if require_non_ref_genotype is True, the default). If
False, a variant that passes all fo the site-level requirements for being
a variant_call will return a True value, regardless of the genotypes, which
means that we'll consider a site with a sample with a hom-ref or no-call site
a variant call.
Args:
variant: third_party.nucleus.protos.Variant.
require_non_ref_genotype: Should we require a site with a genotype call to
have a non-reference (het, hom-var) genotype for the site to be considered
a variant call?
no_calls_are_variant: If a site has genotypes, should we consider no_call
genotypes as being variant or not?
Returns:
True if variant is really a mutation call.
Raises:
ValueError: If variant has more than one call (i.e., is multi-sample).
"""
if not variant.alternate_bases:
return False
elif is_filtered(variant):
return False
elif not variant.calls or not require_non_ref_genotype:
return True
# All tests after this point should only look at genotype-based fields, as
# we may have aborted out in the prev. line due to require_non_ref_genotype.
elif len(variant.calls) > 1:
raise ValueError('Unsupported: multiple genotypes found at', variant)
elif any(g > 0 for g in variant.calls[0].genotype):
return True
elif no_calls_are_variant:
return all(g == -1 for g in variant.calls[0].genotype)
else:
return False
def has_genotypes(variant):
"""Does variant have genotype calls?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if variant has genotype calls.
"""
# I don't want to return the actual data structure so I'm doing the
# explicit True/False evaluation here.
# pylint: disable=g-explicit-length-test
return len(variant.calls) > 0
def genotype_type(variant):
"""Gets the GenotypeType for variant.
If variant doesn't have genotypes, returns no_call. Otherwise
returns one of no_call, hom_ref, het, or hom_var depending on the
status of the genotypes in the call field of variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A GenotypeType.
Raises:
ValueError: If variant has more than one call (i.e., is multi-sample).
"""
if not has_genotypes(variant):
return GenotypeType.no_call
elif len(variant.calls) > 1:
raise ValueError('Unsupported: multiple genotypes found at', variant)
else:
gt = set(variant.calls[0].genotype)
if gt == {-1}:
return GenotypeType.no_call
elif gt == {0}:
return GenotypeType.hom_ref
elif len(gt) > 1:
return GenotypeType.het
else:
return GenotypeType.hom_var
def genotype_as_alleles(variant):
"""Gets genotype of the sample in variant as a list of actual alleles.
Returns the alleles specified by the genotype indices of variant.calls[0].
For example, if variant.reference_bases = 'A' and variant.alternative_bases
= ['C'] and the genotypes are [0, 1], this function will return
['A', 'C'].
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A list of allele (string) from variant, one for each genotype in
variant.calls[0], in order.
Raises:
ValueError: If variant doesn't have genotypes.
ValueError: If variant has more than one call (i.e., is multi-sample).
"""
if not has_genotypes(variant):
raise ValueError('Not genotypes present in', variant)
elif len(variant.calls) > 1:
raise ValueError('Unsupported: multiple genotypes found at', variant)
else:
# Genotypes are encoded as integers, where 0 is the reference allele,
# indices > 0 refer to alt alleles, and the no-call genotypes is encoded
# as -1 in the genotypes. This code relies on this encoding to quickly
# reference into the alleles by adding 1 to the genotype index.
alleles = ['.', variant.reference_bases] + list(variant.alternate_bases)
return [alleles[i + 1] for i in variant.calls[0].genotype]
def genotype_quality(variant, default=None):
"""Gets the genotype quality (GQ) value the genotype call in variant.
If variant doesn't have genotypes, returns default, otherwise tries
to retrieve the GQ field of the call field, returning that value if
present otherwise returning default if its absent.
Args:
variant: third_party.nucleus.protos.Variant.
default: The value for GQ to return if variant has no genotypes or
if GQ is present in the genotype call record.
Returns:
The GQ value (may be a string or whatever value default is).
"""
if not has_genotypes(variant):
return default
call = variant.calls[0]
if 'GQ' in call.info:
return call.info['GQ'].values[0].number_value
else:
return default
def is_gvcf(variant):
"""Returns true if variant encodes a standard gVCF reference block.
This means in practice that variant has a single alternate allele that is the
canonical gVCF allele GVCF_ALT_ALLELE constant exported here.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
Boolean. True if variant is a gVCF record, False otherwise.
"""
return variant.alternate_bases == [GVCF_ALT_ALLELE]
def _genotype_order_in_likelihoods(num_alts, ploidy=2):
"""Yields tuples of `ploidy` ints for the given number of alt alleles.
https://samtools.github.io/hts-specs/VCFv4.1.pdf
"If A is the allele in REF and B,C,... are the alleles as ordered in ALT,
the ordering of genotypes for the likelihoods is given by:
F(j/k) = (k*(k+1)/2)+j. In other words, for biallelic sites the ordering is:
AA,AB,BB; for triallelic sites the ordering is: AA,AB,BB,AC,BC,CC, etc."
The biallelic sites in our case are 0/0, 0/1, 1/1.
The triallelic sites are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2.
This wiki page has more information that generalizes to different ploidy.
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
Args:
num_alts: int. The number of alternate alleles at the site.
ploidy: int. The ploidy for which to return genotypes.
Yields:
Tuples of `ploidy` ints representing allele indices in the order they appear
in the corresponding genotype likelihood array.
"""
if ploidy == 1:
for i in range(num_alts + 1):
yield (i,)
elif ploidy == 2:
for j in range(num_alts + 1):
for i in range(j + 1):
yield (i, j)
else:
raise NotImplementedError('Only haploid and diploid supported.')
def genotype_ordering_in_likelihoods(variant):
"""Yields (i, j, allele_i, allele_j) for the genotypes ordering in GLs.
https://samtools.github.io/hts-specs/VCFv4.1.pdf
"If A is the allele in REF and B,C,... are the alleles as ordered in ALT,
the ordering of genotypes for the likelihoods is given by:
F(j/k) = (k*(k+1)/2)+j. In other words, for biallelic sites the ordering is:
AA,AB,BB; for triallelic sites the ordering is: AA,AB,BB,AC,BC,CC, etc."
The biallelic sites in our case are 0/0, 0/1, 1/1.
The triallelic sites are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2.
This wiki page has more information that generalizes ot different ploidy.
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
Currently this function only implements for diploid cases.
Args:
variant: third_party.nucleus.protos.Variant.
Yields:
allele indices and strings (i, j, allele_i, allele_j) in the correct order.
"""
alleles = [variant.reference_bases] + list(variant.alternate_bases)
for i, j in _genotype_order_in_likelihoods(
len(variant.alternate_bases), ploidy=2):
yield i, j, alleles[i], alleles[j]
def genotype_likelihood_index(allele_indices):
"""Returns the genotype likelihood index for the given allele indices.
Args:
allele_indices: list(int). The list of allele indices for a given genotype.
E.g. diploid homozygous reference is represented as [0, 0].
Returns:
The index into the associated genotype likelihood array corresponding to
the likelihood of this list of alleles.
Raises:
NotImplementedError: The allele_indices are more than diploid.
"""
if len(allele_indices) == 1:
# Haploid case.
return allele_indices[0]
elif len(allele_indices) == 2:
# Diploid case.
g1, g2 = sorted(allele_indices)
return g1 + (g2 * (g2 + 1) // 2)
else:
raise NotImplementedError(
'Genotype likelihood index only supports haploid and diploid: {}'.
format(allele_indices))
def allele_indices_for_genotype_likelihood_index(gl_index, ploidy=2):
"""Returns a tuple of allele_indices corresponding to the given GL index.
This is the inverse function to `genotype_likelihood_index`.
Args:
gl_index: int. The index within a genotype likelihood array for which to
determine the associated alleles.
ploidy: int. The ploidy of the result.
Returns:
A tuple of `ploidy` ints representing the allele indices at this GL index.
Raises:
NotImplementedError: The requested allele indices are more than diploid.
"""
if ploidy == 1:
return gl_index
elif ploidy == 2:
# redacted
# https://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
# rather than creating all genotypes explicitly.
num_alts = 1
while genotype_likelihood_index([num_alts, num_alts]) < gl_index:
num_alts += 1
genotypes = list(_genotype_order_in_likelihoods(num_alts, ploidy=ploidy))
return genotypes[gl_index]
else:
raise NotImplementedError(
'Allele calculations only supported for haploid and diploid.')
def genotype_likelihood(variantcall, allele_indices):
"""Returns the genotype likelihood for the given allele indices.
Args:
variantcall: third_party.nucleus.protos.VariantCall. The VariantCall from
which to extract the genotype likelihood of the allele indices.
allele_indices: list(int). The list of allele indices for a given genotype.
E.g. diploid heterozygous alternate can be represented as [0, 1].
Returns:
The float value of the genotype likelihood of this set of alleles.
"""
return variantcall.genotype_likelihood[genotype_likelihood_index(
allele_indices)]
def allele_indices_with_num_alts(variant, num_alts, ploidy=2):
"""Returns a list of allele indices configurations with `num_alts` alternates.
Args:
variant: third_party.nucleus.protos.Variant. The variant of interest, which
defines the candidate alternate alleles that can be used to generate
allele indices configurations.
num_alts: int in [0, `ploidy`]. The number of non-reference alleles for
which to create the allele indices configurations.
ploidy: int. The ploidy for which to return allele indices configurations.
Returns: A list of tuples. Each tuple is of length `ploidy` and represents the
allele indices of all `ploidy` genotypes that contain `num_alts`
non-reference alleles.
Raises:
ValueError: The domain of `num_alts` is invalid.
NotImplementedError: `ploidy` is not diploid.
"""
if ploidy != 2:
raise NotImplementedError(
'allele_indices_with_num_alts only supports diploid.')
if not 0 <= num_alts <= ploidy:
raise ValueError(
'Invalid number of alternate alleles requested: {} for ploidy {}'.
format(num_alts, ploidy))
max_candidate_alt_ix = len(variant.alternate_bases)
if num_alts == 0:
return [(0, 0)]
elif num_alts == 1:
return [(0, i) for i in range(1, max_candidate_alt_ix + 1)]
else:
return [(i, j)
for i in range(1, max_candidate_alt_ix + 1)
for j in range(i, max_candidate_alt_ix + 1)]
| 1.226563 | 1 |
Engine/test_engine.py | spineki/CrazyDiamond | 0 | 12762939 | <filename>Engine/test_engine.py
from Engine.engine import Engine
def test_react_to_keyword():
e = Engine()
e.reactive_keyword = ["apple", "banana"]
verif = e.react_to_keyword("apple")
assert verif == True
verif = e.react_to_keyword("ban")
assert verif == True
verif = e.react_to_keyword("bananana")
assert verif == False
def test_print_v(capsys):
with capsys.disabled():
e = Engine()
e.verbose = True
e.print_v("test")
captured = capsys.readouterr()
assert captured.out == "test; \n"
e.verbose = False
e.print_v("second_test")
captured = capsys.readouterr()
assert captured.out == ""
logs = e.log
assert logs[2:] == ['test; ','second_test; ']
def test_get_logs():
e = Engine()
e.print_v("test1", "test2")
e.print_v("test3")
logs = e.get_logs()
assert logs == "test1 test2\ntest3"
logs = e.get_logs(sep="_")
assert logs == "test1 test2_test3"
def test_purify_name():
e = Engine()
purified = e.purify_name("test")
assert purified == "test"
purified = e.purify_name("test>test|test<test?test!test")
assert purified == "test_test_test_test_test_test" | 2.5625 | 3 |
varats/varats/plots/case_study_overview.py | se-passau/VaRA-Tool-Suite | 8 | 12762940 | <gh_stars>1-10
"""Generate plots that show a detailed overview of the state of one case-
study."""
import typing as tp
import matplotlib.pyplot as plt
from matplotlib import style
from pandas import DataFrame
from varats.data.databases.file_status_database import FileStatusDatabase
from varats.data.reports.empty_report import EmptyReport
from varats.mapping.commit_map import CommitMap, get_commit_map
from varats.paper.case_study import CaseStudy
from varats.plot.plot import Plot
from varats.plot.plot_utils import find_missing_revisions
from varats.plot.plots import PlotGenerator
from varats.project.project_util import (
get_project_cls_by_name,
get_local_project_git_path,
)
from varats.report.report import FileStatusExtension, BaseReport
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.ts_utils.click_param_types import (
REQUIRE_REPORT_TYPE,
REQUIRE_CASE_STUDY,
)
from varats.utils.git_util import ShortCommitHash, FullCommitHash
SUCCESS_COLOR = (0.5568627450980392, 0.7294117647058823, 0.25882352941176473)
BLOCKED_COLOR = (0.20392156862745098, 0.5411764705882353, 0.7411764705882353)
FAILED_COLOR = (0.8862745098039215, 0.2901960784313726, 0.2)
COMPILE_ERROR_COLOR = (0.8862745098039215, 0.2901960784313726, 0.2)
MISSING_COLOR = (0.984313725490196, 0.7568627450980392, 0.3686274509803922)
BACKGROUND_COLOR = (0.4666666666666667, 0.4666666666666667, 0.4666666666666667)
OPTIONAL_SHOW_BLOCKED: CLIOptionTy = make_cli_option(
"--show-blocked/--hide-blocked",
type=bool,
default=True,
required=False,
metavar="show_blocked",
help="Shows/hides blocked revisions."
)
OPTIONAL_SHOW_ALL_BLOCKED: CLIOptionTy = make_cli_option(
"--show-all-blocked/--hide-all-blocked",
type=bool,
default=False,
required=False,
metavar="show_all_blocked",
help="Shows/hides all blocked revisions."
)
def _gen_overview_data(tag_blocked: bool,
**kwargs: tp.Any) -> tp.Dict[str, tp.List[int]]:
case_study: CaseStudy = kwargs["case_study"]
project_name = case_study.project_name
commit_map: CommitMap = get_commit_map(project_name)
project = get_project_cls_by_name(project_name)
if 'report_type' in kwargs:
result_file_type: tp.Type[BaseReport] = kwargs['report_type']
else:
result_file_type = EmptyReport
positions: tp.Dict[str, tp.List[int]] = {
"background": [],
"blocked": [],
"blocked_all": [],
"compile_error": [],
"failed": [],
"missing": [],
"success": []
}
for c_hash, index in commit_map.mapping_items():
if not case_study.has_revision(ShortCommitHash(c_hash)):
positions["background"].append(index)
if hasattr(project, "is_blocked_revision"
) and project.is_blocked_revision(c_hash)[0]:
positions["blocked_all"].append(index)
revisions = FileStatusDatabase.get_data_for_project(
project_name, ["revision", "time_id", "file_status"],
commit_map,
case_study,
result_file_type=result_file_type,
tag_blocked=tag_blocked
)
positions["success"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.SUCCESS.get_status_extension()]
)["time_id"].tolist()
positions["failed"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.FAILED.get_status_extension()]
)["time_id"].tolist()
positions["blocked"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.BLOCKED.get_status_extension()]
)["time_id"].tolist()
positions["blocked_all"].extend((
revisions[revisions["file_status"] ==
FileStatusExtension.BLOCKED.get_status_extension()]
)["time_id"].tolist())
positions["missing"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.MISSING.get_status_extension()]
)["time_id"].tolist()
positions["compile_error"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.COMPILE_ERROR.get_status_extension()]
)["time_id"].tolist()
return positions
class CaseStudyOverviewPlot(Plot, plot_name="case_study_overview_plot"):
"""Plot showing an overview of all revisions within a case study."""
NAME = 'case_study_overview_plot'
def plot(self, view_mode: bool) -> None:
style.use(self.plot_config.style())
data = _gen_overview_data(
self.plot_kwargs["show_blocked"], **self.plot_kwargs
)
fig_width = 4
dot_to_inch = 0.01389
line_width = 0.75
_, axis = plt.subplots(1, 1, figsize=(fig_width, 1))
commit_map: CommitMap = get_commit_map(
self.plot_kwargs["case_study"].project_name
)
linewidth = (
fig_width / len(commit_map.mapping_items())
) / dot_to_inch * line_width
axis.eventplot(
data["background"], linewidths=linewidth, colors=BACKGROUND_COLOR
)
axis.eventplot(
data["success"], linewidths=linewidth, colors=SUCCESS_COLOR
)
axis.eventplot(
data["failed"], linewidths=linewidth, colors=FAILED_COLOR
)
axis.eventplot(
data["missing"], linewidths=linewidth, colors=MISSING_COLOR
)
axis.eventplot(
data["compile_error"],
linewidths=linewidth,
colors=COMPILE_ERROR_COLOR
)
if self.plot_kwargs["show_all_blocked"]:
axis.eventplot(
data["blocked_all"], linewidths=linewidth, colors=BLOCKED_COLOR
)
else:
axis.eventplot(
data["blocked"], linewidths=linewidth, colors=BLOCKED_COLOR
)
axis.set_axis_off()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
case_study: CaseStudy = self.plot_kwargs["case_study"]
project_name: str = case_study.project_name
commit_map: CommitMap = get_commit_map(project_name)
def gen_revision_df(**plot_kwargs: tp.Any) -> DataFrame:
result_file_type: tp.Type[BaseReport] = plot_kwargs.get(
"report_type", EmptyReport
)
# load data
frame = FileStatusDatabase.get_data_for_project(
project_name, ["revision", "time_id", "file_status"],
commit_map,
case_study,
result_file_type=result_file_type,
tag_blocked=True
)
return frame
revision_df = gen_revision_df(**self.plot_kwargs)
revision_df.sort_values(by=['time_id'], inplace=True)
def head_cm_neighbours(
lhs_cm: ShortCommitHash, rhs_cm: ShortCommitHash
) -> bool:
return commit_map.short_time_id(
lhs_cm
) + 1 == commit_map.short_time_id(rhs_cm)
def should_insert_revision(last_row: tp.Any,
row: tp.Any) -> tp.Tuple[bool, float]:
return last_row["file_status"] != row["file_status"], 1.0
def get_commit_hash(row: tp.Any) -> ShortCommitHash:
return ShortCommitHash(str(row["revision"]))
return find_missing_revisions(
revision_df.iterrows(), get_local_project_git_path(project_name),
commit_map, should_insert_revision, get_commit_hash,
head_cm_neighbours
)
class CaseStudyOverviewGenerator(
PlotGenerator,
generator_name="cs-overview-plot",
options=[
REQUIRE_REPORT_TYPE, REQUIRE_CASE_STUDY, OPTIONAL_SHOW_BLOCKED,
OPTIONAL_SHOW_ALL_BLOCKED
]
):
"""Generates a case study overview plot."""
def generate(self) -> tp.List[Plot]:
return [CaseStudyOverviewPlot(self.plot_config, **self.plot_kwargs)]
| 2.21875 | 2 |
old/test/dist.py | dominickeehan/bayesian-microlensing | 1 | 12762941 | import MulensModel as mm
import Functions as mc
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
from scipy.stats import truncnorm, loguniform, uniform
#plt.style.use('ggplot')
print(plt.style.available)
#print(plt.rcParams["font.family"].available)
#print(matplotlib.get_cachedir())
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#rc('font',**{'family':'serif','serif':['Times New Roman']})
#rc('text', usetex=True)
#plt.rcParams["font.family"] = "serif"
#print(plt.rcParams.keys())
#plt.rcParams['font.size'] = 12
s_pi = mc.logUniDist(0.2, 5)
q_pi = mc.logUniDist(10e-6, 1)
alpha_pi = mc.uniDist(0, 360)
u0_pi = mc.uniDist(0, 2)
t0_pi = mc.uniDist(0, 72)
tE_pi = mc.truncatedLogNormDist(1, 100, 10**1.15, 10**0.45)
rho_pi = mc.logUniDist(10**-4, 10**-2)
distr = tE_pi
y=[]
x=np.linspace(1, 100, 1000)
mu=0
for i in x:
mu+=np.exp(distr.log_PDF(i))*i
y.append(np.exp(distr.log_PDF(i)))
print(mu/len(x))
#print(y)
plt.rcParams["font.family"] = "serif"
plt.rcParams['font.size'] = 12
plt.style.use('seaborn-bright')
plt.rcParams["legend.edgecolor"] = '0'
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.title_fontsize"] = 10
plt.rcParams["legend.fontsize"] = 9
plt.rcParams["grid.linestyle"] = 'dashed'
plt.rcParams["grid.alpha"] = 0.25
plt.plot(x, y, label='Probability\nDensity')
plt.xlabel(r'Parameter [$\chi$]')
plt.ylabel(r'Probability Density [$\rho$]')
plt.title('Probability Density Function')
plt.legend(title='Entries')#, framealpha=1.0, edgecolor='0.0') #
#plt.axis('scaled')
plt.tight_layout()
plt.grid()
plt.savefig('Plots/pdf-test.png')
def centre_offsets_pointilism(supset_model, subset_model, symbols, name = '', dpi = 100):
supset_offsets = (supset_model.sampled.states_array(scaled = True) - supset_model.centre.scaled[:, np.newaxis])
subset_offsets = (subset_model.sampled.states_array(scaled = True) - subset_model.centre.scaled[:, np.newaxis])
n_dim = subset_model.D
style()
# construct shape with corner
figure = corner.corner(subset_offsets.T)
# font/visibility
plt.rcParams['font.size'] = 8
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['axes.labelsize'] = 14
# extract the axes
axes = np.array(figure.axes).reshape((n_dim, n_dim))
# Loop over the diagonal to remove from plot
for i in range(n_dim):
ax = axes[i, i]
ax.cla()
ax.patch.set_alpha(0.0)
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
# loop over lower triangle
for yi in range(n_dim):
for xi in range(yi):
ax = axes[yi, xi]
ax.cla()
# overlay points
ax.scatter(subset_offsets[xi, :], subset_offsets[yi, :], c = np.linspace(0.0, 1.0, subset_model.sampled.n), cmap = 'winter', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
ax.scatter(supset_offsets[xi, :], supset_offsets[yi, :], c = np.linspace(0.0, 1.0, supset_model.sampled.n), cmap = 'spring', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
if yi == n_dim - 1: # last row
ax.set_xlabel(symbols[xi])
ax.tick_params(axis = 'x', labelrotation = 45)
else:
ax.axes.get_xaxis().set_ticklabels([])
if xi == 0: # first column
ax.set_ylabel(symbols[yi])
ax.tick_params(axis = 'y', labelrotation = 45)
else:
ax.axes.get_yaxis().set_ticklabels([])
figure.savefig('results/' + name + '-centreed-pointilism.png', bbox_inches = "tight", dpi = dpi, transparent=True)
figure.clf()
return | 2.1875 | 2 |
s3parcp_download/miniwdl_s3parcp.py | chanzuckerberg/miniwdl-s3parcp | 1 | 12762942 | <gh_stars>1-10
"""
miniwdl download plugin for s3:// URIs using s3parcp -- https://github.com/chanzuckerberg/s3parcp
Requires s3parcp docker image tag supplied in miniwdl configuration, either via custom cfg file
(section s3parcp, key docker_image) or environment variable MINIWDL__S3PARCP__DOCKER_IMAGE.
Inherits AWS credentials from miniwdl's environment (as detected by boto3).
The plugin is installed using the "entry points" mechanism in setup.py. Furthermore, the miniwdl
configuration [plugins] section has options to enable/disable installed plugins. Installed &
enabled plugins can be observed using miniwdl --version and/or miniwdl run --debug.
"""
import os
import tempfile
import boto3
def main(cfg, logger, uri, **kwargs):
# get AWS credentials from boto3
b3 = boto3.session.Session()
b3creds = b3.get_credentials()
aws_credentials = {
"AWS_ACCESS_KEY_ID": b3creds.access_key,
"AWS_SECRET_ACCESS_KEY": b3creds.secret_key,
}
if b3creds.token:
aws_credentials["AWS_SESSION_TOKEN"] = b3creds.token
# s3parcp (or perhaps underlying golang AWS lib) seems to require region set to match the
# bucket's; in contrast to awscli which can conveniently 'figure it out'
aws_credentials["AWS_REGION"] = b3.region_name if b3.region_name else "us-west-2"
# format them as env vars to be sourced in the WDL task command
aws_credentials = "\n".join(f"export {k}='{v}'" for (k, v) in aws_credentials.items())
# write them to a temp file that'll self-destruct automatically
temp_dir = "/mnt"
if cfg.has_option("s3parcp", "dir"):
temp_dir = cfg["s3parcp"]["dir"]
with tempfile.NamedTemporaryFile(
prefix="miniwdl_download_s3parcp_credentials_", delete=True, mode="w", dir=temp_dir
) as aws_credentials_file:
print(aws_credentials, file=aws_credentials_file, flush=True)
# make file group-readable to ensure it'll be usable if the docker image runs as non-root
os.chmod(aws_credentials_file.name, os.stat(aws_credentials_file.name).st_mode | 0o40)
# yield WDL task and inputs (followed by outputs as well)
recv = yield {
"task_wdl": wdl,
"inputs": {
"uri": uri,
"aws_credentials": aws_credentials_file.name,
"docker": cfg["s3parcp"]["docker_image"],
},
}
# yield task outputs (unchanged)
yield recv
# WDL task source code
wdl = """
task s3parcp {
input {
String uri
File aws_credentials
String docker
Int cpu = 4
}
command <<<
set -euo pipefail
source "~{aws_credentials}"
mkdir __out
cd __out
# allocating one hardware thread to two concurrent part xfers
s3parcp --checksum -c ~{cpu*2} "~{uri}" .
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: cpu
memory: "~{cpu}G"
docker: docker
}
}
"""
| 1.984375 | 2 |
projects/urls.py | ahmedbatty/portfolio | 0 | 12762943 | <filename>projects/urls.py<gh_stars>0
from django.urls import path
from .views import ProjectView, AboutView, ContactView
urlpatterns = [
# ex: /project/5/
path('project/<int:project_id>/<slug:project_slug>', ProjectView.as_view(), name='project'),
# ex: /about/
path('about/', AboutView.as_view(), name='about'),
# ex: /contact/
path('contact/', ContactView.as_view(), name='contact'),
]
| 1.929688 | 2 |
pyiron_atomistics/gpaw/pyiron_ase.py | dgehringer/pyiron_atomistics | 0 | 12762944 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from ase import Atoms
from ase.constraints import dict2constraint
import copy
import importlib
import numpy as np
from pyiron_atomistics.atomistics.job.interactive import GenericInteractive
from pyiron_atomistics.atomistics.structure.atoms import pyiron_to_ase, Atoms as PAtoms
try:
from ase.cell import Cell
except ImportError:
Cell = None
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Sep 1, 2018"
def ase_structure_todict(structure):
atoms_dict = {
"symbols": structure.get_chemical_symbols(),
"positions": structure.get_positions(),
"pbc": structure.get_pbc(),
"celldisp": structure.get_celldisp(),
"constraint": [c.todict() for c in structure.constraints],
"info": copy.deepcopy(structure.info),
}
if Cell is not None:
atoms_dict["cell"] = structure.get_cell().todict()
else:
atoms_dict["cell"] = structure.get_cell()
if structure.has("tags"):
atoms_dict["tags"] = structure.get_tags()
if structure.has("masses"):
atoms_dict["masses"] = structure.get_masses()
if structure.has("momenta"):
atoms_dict["momenta"] = structure.get_momenta()
if structure.has("initial_magmoms"):
atoms_dict["magmoms"] = structure.get_initial_magnetic_moments()
if structure.has("initial_charges"):
atoms_dict["charges"] = structure.get_initial_charges()
if structure.calc is not None:
calculator_dict = structure.calc.todict()
calculator_dict["calculator_class"] = (
str(structure.calc.__class__).replace("'", " ").split()[1]
)
calculator_dict["label"] = structure.calc.label
atoms_dict["calculator"] = calculator_dict
return atoms_dict
def ase_calculator_fromdict(class_path, class_dict):
module_loaded = importlib.import_module(".".join(class_path.split(".")[:-1]))
module_class = getattr(module_loaded, class_path.split(".")[-1])
return module_class(**class_dict)
def ase_structure_fromdict(atoms_dict):
def cell_fromdict(celldict):
celldict.pop("pbc", None)
if Cell is not None:
return Cell(**celldict)
else:
return celldict
atoms_dict_copy = copy.deepcopy(atoms_dict)
if "calculator" in atoms_dict_copy.keys():
calculator_dict = atoms_dict_copy["calculator"]
calculator_class = calculator_dict["calculator_class"]
del calculator_dict["calculator_class"]
atoms_dict_copy["calculator"] = ase_calculator_fromdict(
calculator_class, calculator_dict
)
if "constraint" in atoms_dict_copy.keys():
atoms_dict_copy["constraint"] = [
dict2constraint(const_dict) for const_dict in atoms_dict_copy["constraint"]
]
atoms_dict_copy["cell"] = cell_fromdict(celldict=atoms_dict_copy["cell"])
atoms = Atoms(**atoms_dict_copy)
if atoms.calc is not None:
atoms.calc.read(atoms.calc.label)
return atoms
class AseJob(GenericInteractive):
def __init__(self, project, job_name):
super(AseJob, self).__init__(project, job_name)
self.__name__ = "AseJob"
self.__version__ = (
None # Reset the version number to the executable is set automatically
)
@property
def structure(self):
return GenericInteractive.structure.fget(self)
@structure.setter
def structure(self, structure):
if isinstance(structure, PAtoms):
structure = pyiron_to_ase(structure)
GenericInteractive.structure.fset(self, structure)
def to_hdf(self, hdf=None, group_name=None):
super(AseJob, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
hdf_input["structure"] = ase_structure_todict(self._structure)
def from_hdf(self, hdf=None, group_name=None):
super(AseJob, self).from_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
self.structure = ase_structure_fromdict(hdf_input["structure"])
def run_static(self):
pre_run_mode = self.server.run_mode
self.server.run_mode.interactive = True
self.run_if_interactive()
self.interactive_close()
self.server.run_mode = pre_run_mode
def run_if_interactive(self):
if self.structure.calc is None:
self.set_calculator()
super(AseJob, self).run_if_interactive()
self.interactive_collect()
def set_calculator(self):
raise NotImplementedError(
"The _set_calculator function is not implemented for this code."
)
def interactive_structure_setter(self, structure):
self.structure.calc.calculate(structure)
def interactive_positions_setter(self, positions):
self.structure.positions = positions
def interactive_initialize_interface(self):
self.status.running = True
self._structure.calc.set_label(self.working_directory + "/")
self._interactive_library = True
def interactive_close(self):
if self.interactive_is_activated():
super(AseJob, self).interactive_close()
with self.project_hdf5.open("output") as h5:
if "interactive" in h5.list_groups():
for key in h5["interactive"].list_nodes():
h5["generic/" + key] = h5["interactive/" + key]
def interactive_forces_getter(self):
return self.structure.get_forces()
def interactive_pressures_getter(self):
return -self.structure.get_stress(voigt=False)
def interactive_energy_pot_getter(self):
return self.structure.get_potential_energy()
def interactive_energy_tot_getter(self):
return self.structure.get_potential_energy()
def interactive_indices_getter(self):
element_lst = sorted(list(set(self.structure.get_chemical_symbols())))
return np.array(
[element_lst.index(el) for el in self.structure.get_chemical_symbols()]
)
def interactive_positions_getter(self):
return self.structure.positions.copy()
def interactive_steps_getter(self):
return len(self.interactive_cache[list(self.interactive_cache.keys())[0]])
def interactive_time_getter(self):
return self.interactive_steps_getter()
def interactive_volume_getter(self):
return self.structure.get_volume()
def interactive_cells_getter(self):
return self.structure.cell.copy()
def write_input(self):
pass
def collect_output(self):
pass
def run_if_scheduler(self):
self._create_working_directory()
super(AseJob, self).run_if_scheduler()
def interactive_index_organizer(self):
index_merge_lst = self._interactive_species_lst.tolist() + list(
np.unique(self._structure_current.get_chemical_symbols())
)
el_lst = sorted(set(index_merge_lst), key=index_merge_lst.index)
current_structure_index = [
el_lst.index(el) for el in self._structure_current.get_chemical_symbols()
]
previous_structure_index = [
el_lst.index(el) for el in self._structure_previous.get_chemical_symbols()
]
if not np.array_equal(
np.array(current_structure_index),
np.array(previous_structure_index),
):
self._logger.debug("Generic library: indices changed!")
self.interactive_indices_setter(self._structure_current.indices)
def _get_structure(self, frame=-1, wrap_atoms=True):
if (
self.server.run_mode.interactive
or self.server.run_mode.interactive_non_modal
):
# Warning: We only copy symbols, positions and cell information - no tags.
if self.output.indices is not None and len(self.output.indices) != 0:
indices = self.output.indices[frame]
else:
return None
if len(self._interactive_species_lst) == 0:
el_lst = list(np.unique(self._structure_current.get_chemical_symbols()))
else:
el_lst = self._interactive_species_lst.tolist()
if indices is not None:
if wrap_atoms:
positions = self.output.positions[frame]
else:
if len(self.output.unwrapped_positions) > max([frame, 0]):
positions = self.output.unwrapped_positions[frame]
else:
positions = (
self.output.positions[frame]
+ self.output.total_displacements[frame]
)
atoms = Atoms(
symbols=np.array([el_lst[el] for el in indices]),
positions=positions,
cell=self.output.cells[frame],
pbc=self.structure.pbc,
)
# Update indicies to match the indicies in the cache.
atoms.indices = indices
return atoms
else:
return None
else:
if (
self.get("output/generic/cells") is not None
and len(self.get("output/generic/cells")) != 0
):
return super()._get_structure(frame=frame, wrap_atoms=wrap_atoms)
else:
return None
class AseAdapter(object):
def __init__(self, ham, fast_mode=False):
self._ham = ham
self._fast_mode = fast_mode
if self._ham.server.run_mode.interactive and fast_mode:
self.interactive_cache = {
"velocities": [],
"energy_kin": [],
"momenta": [],
"positions": [],
"energy_tot": [],
"energy_pot": [],
}
self._ham.run()
self._ham.interactive_cache = {}
elif self._ham.server.run_mode.interactive:
self.interactive_cache = {"velocities": [], "energy_kin": [], "momenta": []}
self.constraints = []
try:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.numbers,
}
except AttributeError:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.get_atomic_numbers(),
}
@property
def communicator(self):
return None
def get_masses(self):
return np.array(self._ham.structure.get_masses())
def get_positions(self):
return self.arrays["positions"]
def set_positions(self, positions):
self.arrays["positions"] = positions
def get_forces(self, md=True):
if self._fast_mode:
self._ham.interactive_positions_setter(self.arrays["positions"])
self.interactive_cache["positions"].append(self.arrays["positions"])
self._ham.interactive_execute()
self.interactive_cache["energy_pot"].append(
self._ham.interactive_energy_pot_getter()
)
return np.array(self._ham.interactive_forces_getter())
else:
self._ham.structure.positions = self.arrays["positions"]
if self._ham.server.run_mode.interactive:
self._ham.run()
else:
self._ham.run(delete_existing_job=True)
return self._ham.output.forces[-1]
def interactive_close(self):
self._ham.interactive_store_in_cache(
"velocities", self.interactive_cache["velocities"]
)
self._ham.interactive_store_in_cache(
"energy_kin", self.interactive_cache["energy_kin"]
)
if self._fast_mode:
self._ham.interactive_store_in_cache(
"positions", self.interactive_cache["positions"]
)
self._ham.interactive_store_in_cache(
"energy_pot", self.interactive_cache["energy_pot"][::2]
)
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self.interactive_cache["energy_pot"][::2])
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
else:
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self._ham.output.energy_pot)[::2]
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
self._ham.interactive_close()
def get_number_of_atoms(self):
return self._ham.structure.get_number_of_atoms()
# ASE functions
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get("momenta")
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def set_momenta(self, momenta, apply_constraint=True):
"""Set momenta."""
if apply_constraint and len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, "adjust_momenta"):
constraint.adjust_momenta(self, momenta)
self.set_array("momenta", momenta, float, (3,))
self.interactive_cache["velocities"].append(self.get_velocities())
self.interactive_cache["energy_kin"].append(self.get_kinetic_energy())
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if "momenta" in self.arrays:
return self.arrays["momenta"].copy()
else:
return np.zeros((len(self), 3))
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, b.shape)
)
b[:] = a
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype, order="C")
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags["C_CONTIGUOUS"]:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError("Array has wrong length: %d != %d." % (len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, (a.shape[0:1] + shape))
)
self.arrays[name] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'initial_magmoms',
'initial_charges'."""
# XXX extend has to calculator properties
return name in self.arrays
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays["positions"]) / m.sum()
if scaled:
if self._fast_mode:
return np.linalg.solve(self._ham.structure.cells[-1].T, com)
else:
return np.linalg.solve(self._ham.output.cells[-1].T, com)
else:
return com
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get("momenta")
if momenta is None:
return None
m = self.get_masses()
# m = self.arrays.get('masses')
# if m is None:
# m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def __len__(self):
return len(self._ham.structure)
| 1.875 | 2 |
digsby/src/tests/testgui/uberdemos/UberProgressBarDemo.py | ifwe/digsby | 35 | 12762945 | import wx
from gui.uberwidgets.UberProgressBar import UberProgressBar
from gui import skin as skincore
class F(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.NewId(), "Progress Bar sampler",(0,0),(600,250))
self.Bind(wx.EVT_SLIDER, self.on_slide)
self.content = wx.BoxSizer(wx.VERTICAL)
self.g = UberProgressBar(self,wx.NewId(),100,'progressbar',showlabel=True,size=(300,20))
self.s = wx.Slider(self, -1, 0, 0, 100, (0,0), (300, 50))
self.content.Add(self.g,0,wx.ALIGN_CENTER_HORIZONTAL)
self.content.Add(self.s,0,wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(self.content)
def on_slide(self,e):
self.g.SetValue(self.s.GetValue())
print self.s.GetValue()
if __name__=='__main__':
a = wx.PySimpleApp( 0 )
skincore.skininit('../../../../res')
f=F()
f.Show(True)
a.MainLoop()
| 2.5625 | 3 |
mps_database/models/threshold_fault.py | slaclab/mps_database | 0 | 12762946 | from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, backref
from mps_database.models import Base
class ThresholdFault(Base):
"""
ThresholdFault class (threshold_faults table)
Describe an analog fault, which is generated by an AnalogDevice.
The AnalogDevice provides a compressed analog value from the device,
the compressed value is expressed a reduced number of bits (e.g. 12).
The value read from the device is compared to the threshold stored
here. The conversion from the threshold to analog value is done
via the threshold_values_map and threshold_values tables.
Properties:
name: short fault description
greater_than: if true, if the AnalogDevice value is larger than the
compressed_threshold then a ThresholdFault is generated
if false, if the AnalogDevice value is smaller than the
compressed threshold then a ThresholdFault is generated
References:
analog_device_id: defines the type of analog device related to this
fault
threshold_value_id: defines which threshold value is used when calculating
if a fault happened
Relationships:
threshold_fault_state: through the ThresholdFaultStates this
ThresholdFault is linked to an AllowedClass (allowed beam class)
"""
__tablename__ = 'threshold_faults'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
analog_device_id = Column(Integer, ForeignKey('analog_devices.id'), nullable=False)
#If greater_than is true, a value larger than the threshold will generate a fault.
#If greater_than is false, a value smaller than the threshold will generate a fault.
greater_than = Column(Boolean, nullable=False)
threshold_fault_state = relationship("ThresholdFaultState", uselist=False, backref="threshold_fault")
threshold_value_id = Column(Integer, ForeignKey('threshold_values.id'), nullable=False)
@property
def less_than(self):
return not self.greater_than
| 2.8125 | 3 |
SR/model/SRCNN.py | AntonyYX/Super-Resolution | 0 | 12762947 | <gh_stars>0
import math
from torch import nn
import torch
from torch.nn.modules.activation import ReLU
from torchvision import transforms
from PIL import Image
class SRCNN(nn.Module):
def __init__(self, in_channel: int = 3):
super(SRCNN, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=in_channel, out_channels=64,
kernel_size=9, padding=9//2),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=5, padding=5//2),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=in_channel,
kernel_size=5, padding=5//2),
nn.ReLU(True),
)
def forward(self, inputs):
return self.body(inputs)
class SRCNN_BN(nn.Module):
def __init__(self, in_channel: int = 3):
super(SRCNN_BN, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=in_channel, out_channels=64,
kernel_size=9, padding=9//2),
nn.ReLU(True),
nn.BatchNorm2d(64),
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=5, padding=5//2),
nn.ReLU(True),
nn.BatchNorm2d(32),
nn.Conv2d(in_channels=32, out_channels=in_channel,
kernel_size=5, padding=5//2),
nn.ReLU(True),
)
def forward(self, inputs):
return self.body(inputs)
if __name__ == "__main__":
model = SRCNN_BN(3)
img = torch.rand((1, 3, 600, 600))
print(model(img).shape)
| 2.734375 | 3 |
assignments/assignment-6/src/GOT_classification.py | PeterThramkrongart/cds-language-portfolio | 1 | 12762948 | <reponame>PeterThramkrongart/cds-language-portfolio
#!/usr/bin/env python
# system tools
import os
import sys
sys.path.append(os.path.join(".."))
# pandas, numpy
import pandas as pd
import numpy as np
# import my classifier utility functions
import utils.classifier_utils as clf
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
# Machine learning stuff from sklearn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
# tools from tensorflow
import tensorflow as tf
from tensorflow.random import set_seed
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense,
Dropout,
BatchNormalization,
)
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
from tensorflow.keras.regularizers import L2
# matplotlib
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("Agg")
random_state = 42
#set seed for reproducibility
set_seed(random_state)
np.random.seed(random_state)
def plot_history(H, epochs):
"""
Utility function for plotting model history using matplotlib
H: model history
epochs: number of epochs for which the model was trained
"""
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.draw()
plt.savefig(os.path.join("..","models", "nn_training_history.png"))
def main():
"""
A function for running text classification of GoT texts from the terminal
"""
# loading data
data = pd.read_csv(os.path.join("..", "data", "raw","Game_of_Thrones_Script.csv"))
# gathering all lines from a given character by a seson an episode to context and model's accuracy
data = data.groupby(["Season", "Episode", "Name"])
data = data["Sentence"].agg(lambda x: " ".join(x)).to_frame()
data = data.reset_index().rename(columns ={"Sentence": "Text"}) #resetting index
# train and test split using sklearn
X_train, X_test, y_train, y_test = train_test_split(data.Text,
data["Season"],
test_size=0.1,
random_state=random_state)
print("Data loaded and split")
### a baseline model of a logistic regresssion ###
print("fitting baseline LogReg model")
pipe = Pipeline(steps=[
('tfidf', TfidfVectorizer()),
('clf', LogisticRegression(solver = "liblinear",random_state = random_state))
])
# report model metrict
classifier = pipe.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier_metrics_lr = metrics.classification_report(y_test, y_pred)
print(classifier_metrics_lr)
# save the classification report
filepath = os.path.join("..","models","LG_metrics.txt")
text_file = open(filepath, "w")
text_file.write(classifier_metrics_lr)
text_file.close()
### Building network ###
# integers to one-hot vectors
lb = LabelBinarizer()
y_train_bin = lb.fit_transform(y_train)
y_test_bin = lb.fit_transform(y_test)
# the nn will have a vocabulary size of 15000
maxlen = 15000
vectorizer = TfidfVectorizer(ngram_range=(1,2), max_features = maxlen)
X_train_feats = vectorizer.fit_transform(X_train).toarray()
X_test_feats = vectorizer.transform(X_test).toarray()
# l2 regularization
l2 = L2(0.00001)
# a new neural network
model = Sequential()
model.add(Dense(64, activation='relu', kernel_regularizer=l2,input_shape=(maxlen,)))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(8, activation='softmax'))
# compiler
model.compile(loss='categorical_crossentropy',
optimizer= SGD(learning_rate= .01),
metrics=['accuracy'])
epochs = 10
print(model.summary())
achitecture_path = os.path.join("..","models","nn_model_architecture.png")
#plot model
plot_model(model, to_file = achitecture_path, show_shapes=True, show_layer_names=True)
print(f"Image of model architecture saved in {achitecture_path}")
print("fitting nn-model")
# a fit history of the network
history = model.fit(X_train_feats, y_train_bin,
epochs=epochs,
verbose=True,
validation_data=(X_test_feats, y_test_bin))
# plot history
plot_history(history, epochs = epochs)
predictions=model.predict(X_test_feats, verbose=True)
# get the class with highest probability for each sample
y_pred = np.argmax(predictions, axis=1)
le = LabelEncoder()
y_test_int = le.fit_transform(y_test) #encode labels for the classification report
# get the classification report
metrics_nn = metrics.classification_report(y_test_int, y_pred, target_names = y_test.sort_values().unique())
print(metrics_nn)
# save metrics
filepath = os.path.join("..","models","NN_metrics.txt")
text_file = open(filepath, "w")
text_file.write(metrics_nn)
text_file.close()
print("We will now use grid search and crossvalidation to find a better model using an SGD-classifier")
# Grid Search for SGD Classifier (stochastic gradient classifier)
## making a pipeline where we use two embedding methods to find out the best one
pipe = Pipeline(steps=[
('tfidf', TfidfVectorizer()),
('clf', SGDClassifier(random_state = random_state))
])
## specifying
parameters = {
'tfidf__ngram_range': [(1, 1), (1, 2),(1,3)],
'tfidf__max_df': [1.0, 0.95,0.9,0.85],
'tfidf__min_df': [0.0, 0.05],
'clf__alpha': [1e-3, 1e-2, 1e-1], # learning rate
'clf__penalty': ['l2'],
}
search = GridSearchCV(pipe, parameters, n_jobs = -1, verbose = 1, refit = True)
gs_clf = search.fit(X_train, y_train)
print(f"The best{gs_clf.best_score_}")
print(f"The best model hyper parameters: {gs_clf.best_params_}")
y_pred = gs_clf.predict(X_test)
classifier_metrics_sgd = metrics.classification_report(y_test, y_pred)
print(classifier_metrics_sgd)
# get the classification report
filepath = os.path.join("..","models", "SGD_metrics.txt")
text_file = open(filepath, "w")
text_file.write(classifier_metrics_sgd)
text_file.close()
if __name__=="__main__":
main()
| 2.34375 | 2 |
main.py | JAMJU/KernelMethod | 0 | 12762949 | import numpy as np
from logistic_regression import logistic_kernel_regression, compute_label
from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig
from kernel_creation import convert_acid_kernel, convert_acid_quad, convert_mismatch_lev, convert_lect_trig, get_mismatch_dict
from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode
from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian
from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted
from SVM import SVM, svm_compute_label
list_letters = ["A", "C", "G", "T"]
list_trig = [a + b + c for a in list_letters for b in list_letters for c in list_letters]
list_quad = [a + b + c + d for a in list_letters for b in list_letters for c in list_letters for d in list_letters]
list_quint = [a + b + c + d + e for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters]
list_six = [a + b + c + d + e + f for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters for f in list_letters]
dico_acid = {'Alanine': [ 'GCU', 'GCC', 'GCA', 'GCG'], 'Arginine': ['CGU', 'CGC', 'CGA', 'CGG' , 'AGA', 'AGG'],
'Asparagine': ['AAU', 'AAC'], 'Acide aspartique': ['GAU', 'GAC'],
'Cysteine': ['UGU', 'UGC'], 'Glutamine': ['CAA', 'CAG'], 'Acide glutamique':['GAA', 'GAG'],
'Glycine':['GGU', 'GGC', 'GGA', 'GGG'], 'Histidine': ['CAU', 'CAC'], 'Isoleucine': ['AUU', 'AUC', 'AUA'],
'Leucine': ['UUA', 'UUG' , 'CUU', 'CUC', 'CUA', 'CUG'], 'Lysine': ['AAA', 'AAG'],
'Methionine': ['AUG'], 'Phenylalanine':['UUU', 'UUC'], 'Proline' :['CCU', 'CCC', 'CCA', 'CCG'],
'Pyrrolysine': ['UAG'], 'Selenocysteine':['UGA'], 'Serine':['UCU', 'UCC', 'UCA', 'UCG' , 'AGU', 'AGC'],
'Threonine':['ACU', 'ACC', 'ACA', 'ACG'], 'Tryptophane':['UGG'], 'Tyrosine':['UAU', 'UAC'],
'Valine':['GUU', 'GUC', 'GUA', 'GUG'], 'Initiation': ['AUG'], 'Terminaison': ['UAG', 'UAA', 'UGA']}
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
## Parameters
lamb_log = 0.0000001
lamb_svm = 0.00001
sigma = 0.8
add_param = 10.**(-10)
list_seq_id = list_six
mis_lev = False
if mis_lev:
dict_mismatch = get_mismatch_dict(list_seq_id)
mis_dic = False
size_seq = 6
nb_mis = 0
beg = 0
if mis_dic:
dict_corres = get_correspondances(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
print(list_mis_corres)
mis_dic_full = False
if mis_dic_full:
dict_corres = get_full_corres(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
##
list_labels_log = []
list_labels_svm = []
for name in [ "0", "1","2"]:
print ("beginning loading of the data")
# Training data
sequences = read_csv_file_data("data/Xtr"+ name+ ".csv")
#list_converted = convert_spectral_kernel_trig(sequences, list_seq_id)
#list_converted = convert_spectral_kernel_quad(sequences, list_quad)
list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_acid_kernel(sequences, dico_acid)
#list_converted = convert_acid_quad(sequences, dico_acid, list_quad
#list_converted = convert_mismatch_lev(sequences, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted = convert_lect_trig(sequences, list_seq_id, beg)
#list_converted = convert_lect_acid(sequences, dico_acid, beg)
#list_converted = convert_mismatch_dico(sequences, dict_corres,list_mis_corres, list_seq_id)
#list_converted = convert_encode(sequences, list_letters)
training = np.asarray(list_converted, dtype = float)
# to avoid huge values and to save time for the logistic regression :
sm = np.sum(training, axis= 1)
training = training/sm[0]
mean = np.mean(training, axis= 0)
training = training - mean
#vst = np.std(training, axis= 0)
#training = training / vst
#save_data_converted("spectral_kernel/Xtr"+ name+ ".csv", training)
# label training data
label = read_csv_file_label("data/Ytr"+ name+ ".csv")
label= np.asarray(label).reshape((len(label), ))
# select what will be the test for training
size_test = int(training.shape[0]/10)
test_train = training[0:size_test]
label_test_train = label[0:size_test]
print( label_test_train.shape)
size_total = training.shape[0]
training = training[size_test:size_total]
label_train = label[size_test:size_total]
print (label_train.shape)
# Test data
sequences_test = read_csv_file_data("data/Xte"+ name+ ".csv")
#list_converted_test = convert_spectral_kernel_trig(sequences_test, list_seq_id)
#list_converted_test = convert_spectral_kernel_quad(sequences_test, list_quad)
list_converted_test = convert_spectral_kernel_quint(sequences_test, list_quint)
#list_converted_test = convert_acid_kernel(sequences_test, dico_acid)
#list_converted_test = convert_acid_quad(sequences_test, dico_acid, list_quad)
#list_converted_test = convert_mismatch_lev(sequences_test, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted_test = convert_lect_trig(sequences_test, list_seq_id, beg )
#list_converted_test = convert_lect_acid(sequences_test, dico_acid, beg)
#list_converted_test = convert_mismatch_dico(sequences_test, dict_corres,list_mis_corres, list_seq_id)
#list_converted_test = convert_encode(sequences, list_letters)
testing = np.asarray(list_converted_test, dtype = float)
# to avoid huge values and to save time for the logistic regression :
testing = testing/sm[0]
testing = testing - mean
#testing = testing/ vst
# param for each dataset:
"""if name=="0":
lamb_svm = 0.000008
add_param = 10. ** (-10)
if name=="1":
lamb_svm = 0.00001
add_param = 10.**(-10)
if name == "2":
lamb_svm = 0.000005
add_param=10.**(-9)"""
if name=="2":
add_param = 10**(-9)
print ("data loaded")
# Computing the kernel
print ("beginning computing K")
K = compute_K_matrix(training)
add = add_param*np.identity(K.shape[0])
K_add = K + add # to make it positive definite
#K = compute_K_gaussian(training, sigma)
#K_add = K
print(K)
print("K shape", K.shape)
print(is_pos_def(K_add))
K_test_train = compute_test_matrix(training, test_train)
print (K_test_train.shape)
print ("K computed")
"""#Training : kernel logistic regression
alpha = logistic_kernel_regression(K, label_train, lamb_log, 15, K_test_train, label_test_train)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = compute_label(Ktest, alpha)
list_labels_log = list_labels_log + labels_test"""
# Training : SVM
alpha = SVM(K_add, label_train, lamb_svm, K_test_train, label_test_train)
print(alpha)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = svm_compute_label(Ktest, alpha)
list_labels_svm = list_labels_svm + labels_test
save_label(0, list_labels_svm,"results/SVM-quint-centered-mixed.csv" )
| 2.109375 | 2 |
test/test_algorithm.py | ehigham/broad-challenge | 0 | 12762950 | # -*- coding: utf-8 -*-
import unittest
import networkx
from challenge.algorithm import dijkstras_shortest_path
class TestAlgorithmModule(unittest.TestCase):
def test_empty_graph(self):
"""Nodes are memebers of graph.nodes"""
graph = networkx.Graph()
with self.assertRaises(ValueError):
dijkstras_shortest_path(graph, 'A', 'C')
def test_disjoint_graph(self):
graph = networkx.Graph()
graph.add_nodes_from(['A', 'B'])
path = dijkstras_shortest_path(graph, 'A', 'B')
self.assertListEqual(path, [])
def test_path_to_itself(self):
"""A"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C')])
path = dijkstras_shortest_path(graph, 'A', 'A')
self.assertListEqual(path, [])
def test_simple_shortest_path(self):
"""A - B - C """
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C')])
path = dijkstras_shortest_path(graph, 'A', 'C')
self.assertListEqual(path, ['A', 'B', 'C'])
def test_shortcut_path(self):
"""
A - B - C - D - E - F
\\ /
--- G ---
"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E'), ('E', 'F')])
graph.add_edges_from([('B', 'G'), ('G', 'E')])
path = dijkstras_shortest_path(graph, 'A', 'F')
self.assertListEqual(path, ['A', 'B', 'G', 'E', 'F'])
def test_cyclic_graph_path(self):
"""
A - B - C - D - E
| |
- G -
"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E')])
graph.add_edges_from([('C', 'G'), ('G', 'B')])
path = dijkstras_shortest_path(graph, 'A', 'E')
self.assertListEqual(path, ['A', 'B', 'C', 'D', 'E'])
if __name__ == '__main__':
unittest.main()
| 3.71875 | 4 |