content
stringlengths 5
1.05M
|
|---|
# exc. 5.3.5
def distance(num1, num2, num3):
ans1 = abs(num2 - num1)
ans2 = abs(num2 - num3)
ans3 = abs(num3 - num1)
ans4 = abs(num3 - num1)
if ans1 == 1 or ans3 == 1:
if ans1 >= 2 and ans2 >= 2 or ans3 >= 2 and ans4 >= 2:
return True
return False
print(distance(4, 5, 3))
|
#!/usr/bin/python
#Created : Wed 05 Nov 2008 08:31:22 PM GMT
#Last Modified : Wed 05 Nov 2008 09:37:17 PM GMT
#qpy:2
#qpy:console
import site
import os
import sys
#from time import strftime
from pyPdf import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
if len(sys.argv) < 4:
print "Begini boh: %s fail_asal muka_mula muka_akhir fail_baru" % sys.argv[0]
sys.exit(1)
fail_asal = sys.argv[1]
muka_mula = sys.argv[2]
muka_akhir = sys.argv[3]
fail_baru = sys.argv[4]
input1 = PdfFileReader(file(fail_asal, "rb"))
mula = int(muka_mula)-1
akhir = int(muka_akhir)
for i in range(mula, akhir):
output.addPage(input1.getPage(i))
namafailbaru = os.path.dirname(fail_asal)+"/"+fail_baru+"-"+os.path.basename(fail_asal)
outputStream = file(namafailbaru, "wb")
output.write(outputStream)
outputStream.close()
print "Dokumen asal, %s mengandungi %s mukasurat." % (fail_asal,input1.getNumPages())
input2 = PdfFileReader(file(namafailbaru, "rb"))
print "Dokumen baru, %s mengandungi %s mukasurat." % (namafailbaru,input2.getNumPages())
|
from django.utils.deprecation import MiddlewareMixin
from django.utils import timezone
from accounts.models.user_profile import ClubUserProfile
from accounts.models import Messages
class ManageMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated:
try:
profile = ClubUserProfile.objects.filter(phone_number=request.user.phone_number, is_active=True)
request.profile = profile[0]
except ClubUserProfile.DoesNotExist:
request.profile = ClubUserProfile.objects.create(phone_number=request.user.phone_number, is_active=True,
gender=request.user.gender, job="anonymous",
joined_date=timezone.now().date())
class MessageMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated:
if Messages.objects.filter(to_user=request.profile, is_read=False).exists():
request.have_message = True
else:
request.have_message = False
|
from setuptools import setup, find_packages
setup(
name='sampi',
version='0.1.0',
author='Jonathan Lindbloom',
author_email='jonathan@lindbloom.com',
license='LICENSE',
packages=find_packages(),
description='A personal Python package for computational math-related things.',
long_description=open('README.md').read(),
)
|
import bnd_rebuilder
import unittest
from bnd_rebuilder import unpack_bnd
from bnd_rebuilder import repack_bnd
# Comsume byte unit tests
class consume_byte_unit_tests(unittest.TestCase):
def setUp(self):
self.consume_byte_instance = bnd_rebuilder.consume_byte
return super().setUp()
def tearDown(self):
return super().tearDown()
class is_content_byte(consume_byte_unit_tests):
def runTest(self):
content = b'DCX\x00'
assert self.consume_byte_instance(content, 0, b'D', 1) == 1, \
"Expected: 1"
class is_content_is_not_byte(consume_byte_unit_tests):
def runTest(self):
content = b'DCX\x00'
self.assertRaises(ValueError, self.consume_byte_instance, content, 0, b'D', 3)
# Extract STRZ unit tests
class extract_strz_unit_test(unittest.TestCase):
def setUp(self):
self.extract_strz_instance = bnd_rebuilder.extract_strz
return super().setUp()
def tearDown(self):
return super().tearDown()
class extract_strz_string(extract_strz_unit_test):
def runTest(self):
content = b'hello\x00' # need thex \x00 otherwise inf loop
assert self.extract_strz_instance(content, 0) == 'hello', \
"Expected: b'hello'"
# Appears bnd unit tests
class appears_bnd_unit_tests(unittest.TestCase):
def setUp(self):
self.appears_bnd_instance = bnd_rebuilder.appears_bnd
return super().setUp()
def tearDown(self):
return super().tearDown()
class is_bnd(appears_bnd_unit_tests):
def runTest(self):
assert self.appears_bnd_instance("BND3") == True, \
"This is not an instance of bnd"
class is_byte_garbage(appears_bnd_unit_tests):
def runTest(self):
assert self.appears_bnd_instance(b"MEMES") == False, \
"This is an instance of bnd"
class is_normal_array(appears_bnd_unit_tests):
def runTest(self):
assert self.appears_bnd_instance([70, 38, 39, 40, 21]) == False, \
"This is an instance of bnd"
# Offset next multiple unit tests
class offset_to_next_multiple_unit_tests(unittest.TestCase):
def setUp(self):
self.offset_to_next_multiple_instance = bnd_rebuilder.offset_to_next_multiple
return super().setUp()
def tearDown(self):
return super().tearDown()
class next_mult_is_zero(offset_to_next_multiple_unit_tests):
def runTest(self):
assert self.offset_to_next_multiple_instance(15, 0) == 0, \
"Expected: 0"
class next_mult(offset_to_next_multiple_unit_tests):
def runTest(self):
assert self.offset_to_next_multiple_instance(15, 30) == 15, \
"Expected: 15"
class next_mult_equals_offset(offset_to_next_multiple_unit_tests):
def runTest(self):
assert self.offset_to_next_multiple_instance(15, 15) == 0, \
"Expected: 15"
class unpack_bnd_unit_test(unittest.TestCase):
def setUp(self):
self.unpack_bnd_instance = bnd_rebuilder.unpack_bnd
return super().setUp()
def tearDown(self):
return super().tearDown()
class repack_bnd_unit_test(unittest.TestCase):
def setUp(self):
self.repack_bnd_instance = bnd_rebuilder.repack_bnd
return super().setUp()
def tearDown(self):
return super().tearDown()
class unpack_test(unpack_bnd_unit_test):
def runTest(self):
pack = repack_bnd([[3, '2', b'1']])
assert self.unpack_bnd_instance(pack) == [(3, '2', b'1')], \
"repack pass"
# class repack_test(repack_bnd_unit_test):
# def runTest(self):
# unpack = unpack_bnd(b' ttttttt\100 ')
# assert self.repack_bnd_instance(unpack) == [[3],"filepathlmao", "dummy"], \
# "repack pass"
if __name__ == "__main__":
unittest.main()
|
import os
import logging
import time
import string
import traceback
from buttons import BTNS
from pyrogram import Client, filters
import datetime
from pyrogram.errors import UserNotParticipant, ChatAdminRequired, UsernameNotOccupied
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from config import Config
@Client.on_callback_query()
async def cd_handler(bot, update):
if update.data =="subject_cd":
await update.message.edit_text(
text=f"Hey! {update.from_user.mention} Choose the Subject",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(BTNS.SUBJECT_BTN)
)
elif update.data =="biology_cd":
await update.message.edit_text(
text=f"Hey! {update.from_user.mention} Choose the Chapter which you want to study in Biology",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(BTNS.BCHAPTER_BTN)
)
elif update.data == "physics_cd":
await update.message.edit_text(
text=f"Hey! {update.from_user.mention} Choose the Chapter which you want to study in Physics",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(PCHAPTER_BTN)
)
elif update.data =="chemistry_cd":
await update.message.edit_text(
text=f"Hey! {update.from_user.mention} Choose the Chapter which you wany to study in Chemistry",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(CCHAPTER_BTN)
)
elif update.data =="bch1_cd":
await update.message.edit_text(
text=f"""<b>Lakshya NEET Batch PW🎯🎯🎯
Biology
Microbes in Human Welfare
Lecture 03
Lecture🔰
👉 https://d1d34p8vz63oiq.cloudfront.net/9eb5c7a1-611c-40ac-b7a8-d07dc56b5304/master.m3u8
Class Notes🔰
👉 https://d2bps9p1kiy4ka.cloudfront.net/5eb393ee95fab7468a79d189/50c941fa-52bc-4714-9cc3-e1af8b86b590.pdf
DPP🔰
👉 https://d2bps9p1kiy4ka.cloudfront.net/5eb393ee95fab7468a79d189/5b193fd6-9d46-4f43-80fe-530f4cd767e6.pdf
🦧 Use @bryll_urluploader_bot to download Lecture. 🦦
For any help contact💬 @bryll_helpdesk_bot</b>
""",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(BCHAPTER_BTN)
)
elif update.data =="refresh":
try:
chat = await bot.get_chat_member(Config.FORCE_SUB, update.from_user.id)
if chat.status=='kicked':
return await update.reply_text('Hey you are kicked from my update channel. So, you are not able to use me🤣🤣🤣😂😂😂', quote=True)
except UserNotParticipant:
button = [[
InlineKeyboardButton('BRYLL Bots Updates Channel', url=f'https://t.me/bryllbots')
],[
InlineKeyboardButton('Updates Channel', url=f'https://t.me/{Config.FORCE_SUB}')
],[
InlineKeyboardButton('🔄 Refresh 🔄', callback_data='refresh')
]]
markup = InlineKeyboardMarkup(button)
return await update.answer(text=f"I told you to join my updates channel. Join the updates channel then send me message.", show_alert=True)
await update.message.edit_text(
text=f"Hello {message.from_user.mention},\nI'm a telegram Education Bot From BRYLL EDUCATION bot by @bryllbots",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(START_BTN))
elif update.data =="close":
await update.message.delete()
|
# Copyright 2020 Tony Astolfi
#
import csv
import matplotlib.pyplot as plt
import matplotlib
with open('avail_model_85.csv') as fp:
r = csv.reader(fp)
d85 = [d for d in r]
d85 = [(int(n), float(p)) for n, p in d85[1:]]
(n_odd, a85_odd) = zip(*d85[:25])
(n_even, a85_even) = zip(*d85[25:])
with open('avail_model_94.csv') as fp:
r = csv.reader(fp)
d94 = [d for d in r]
a94 = [(int(n), float(p)) for n, p in d94[1:]]
(_, a94) = zip(*(a94 + [(0, 1.0)] * 12))
print(a94)
print(len(a94))
with open('avail_model_97.csv') as fp:
r = csv.reader(fp)
d97 = [d for d in r]
a97 = [(int(n), float(p)) for n, p in d97[1:]]
(_, a97) = zip(*(a97 + [(0, 1.0)] * 16))
print(a97)
print(len(a97))
clip = 10
n_odd = n_odd[:clip]
n_even = n_even[:clip]
a85_odd = a85_odd[:clip]
a85_even = a85_even[:clip]
a94 = a94[:clip]
a97 = a97[:clip]
plt.axes(ylabel='Availability (log10)')
plt.axes(xlabel='Cluster Size (N)')
plt.yscale('logit')
plt.scatter(n_odd, a85_odd)
plt.scatter(n_even, a85_even)
plt.scatter(n_odd, a94)
plt.scatter(n_odd, a97)
plt.grid()
#plt.ylim([0.5, 1.0])
line_a85_odd, = plt.plot(n_odd, a85_odd, label='Available(Host)=85% (odd N)')
line_a85_even, = plt.plot(n_even, a85_even, label='Available(Host)=85% (even N)')
line_a94, = plt.plot(n_odd, a94, label='Available(Host)=94% (3 wk/yr)')
line_a97, = plt.plot(n_odd, a97, label='Available(Host)=97% (1 wk/yr)')
#ax = plt.axes()
#ax.grid();
#ax.ticklabel_format(axis='y', style='plain')
plt.legend(handles=[line_a85_odd, line_a85_even, line_a94, line_a97])
plt.show()
|
#!/usr/bin/env python3
# JM: 30 Aug 2018
# process the *scalar.nc files (because I couldn't get XIOS to spit out just
# a number for whatever reason...)
import netCDF4
import glob, sys
#--------------------------------------------------------
# define the argument parser
import argparse
parser = argparse.ArgumentParser(description = "Process the scalar netcdf files and output as text")
# fixed arguments
parser.add_argument("data_dir", type = str,
help = "specify data directory")
# optional arguments
parser.add_argument("--lquery",
help = "print out the variables available", action = "store_true")
# collect arguments
args = parser.parse_args()
#--------------------------------------------------------
# Main commands
# grab the relevant filenames
file_list = []
for file in glob.glob(args.data_dir + "*scalar*.nc"):
file_list.append(file)
if not file_list:
print("no files grabbed, are you in the right directory?")
print("no files grabbed, are you in the right directory?")
print("no files grabbed, are you in the right directory?")
# sort it according to the timestamps
file_list.sort()
# cycle through the files
for i in range(len(file_list)):
# grab output time in years
# assumes file format is $EXP_$PERIOD_$START_$END_scalar.nc
# so pulls out the $START and $END and keeps only the first four entries
# string here for use in output
start_time = file_list[i].replace(args.data_dir, "").split("_")[2][0:4]
end_time = file_list[i].replace(args.data_dir, "").split("_")[3][0:4]
data = netCDF4.Dataset(file_list[i])
t = data.variables["time_centered"][:]
if args.lquery:
for name, variable in data.variables.items():
for attrname in variable.ncattrs():
if attrname == "standard_name":
print("{} -- {}".format(name, getattr(variable, attrname)))
data.close()
print(" ")
sys.exit("finished query, exiting scalar_to_txt...")
else:
# ?? could do something like the query loop above to pull out all keys
# and dump accordingly; leaving it as manual for now
# pull out the data written in 2d field for some reason and write it out
txt_file = open(args.data_dir + file_list[i].replace(args.data_dir, "").replace(".nc", ".txt"), "w")
txt_file.write("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n"
% ("time",
"scvoltot", "scsshtot", "scsshste", "scsshtst", "sctemtot",
"scsaltot",
"bgtemper", "bgsaline", "bgheatco", "bgsaltco", "bgvolssh",
"bgvole3t", "bgfrcvol", "bgfrctem", "bgfrcsal"
)
)
for kt in range(len(t)):
print("processing %s at index %i / %i..."
% (file_list[i].replace(args.data_dir, ""), kt, len(t))
)
# global mean/totals
time = (t[kt] - t[0]) / (3600 * 24 * 365) + int(start_time)
scvoltot = data.variables["scvoltot"][kt, 0, 0] # sea water volume, m^3
scsshtot = data.variables["scsshtot"][kt, 0, 0] # mean ssh, m
scsshste = data.variables["scsshste"][kt, 0, 0] # mean ssh steric, m
scsshtst = data.variables["scsshtst"][kt, 0, 0] # mean ssh thermosteric, m
sctemtot = data.variables["sctemtot"][kt, 0, 0] # mean temperature, C
scsaltot = data.variables["scsaltot"][kt, 0, 0] # mean salinity, psu
#global drifts wrt time step 1
bgtemper = data.variables["bgtemper"][kt, 0, 0] # mean temperature, C
bgsaline = data.variables["bgsaline"][kt, 0, 0] # mean saline, psu
bgheatco = data.variables["bgheatco"][kt, 0, 0] # mean heat content, 1e+20 J
bgsaltco = data.variables["bgsaltco"][kt, 0, 0] # mean salt content, 1e-3 km^3
bgvolssh = data.variables["bgvolssh"][kt, 0, 0] # mean ssh volume, km^3
bgvole3t = data.variables["bgvole3t"][kt, 0, 0] # mean volume variation, km^3
bgfrcvol = data.variables["bgfrcvol"][kt, 0, 0] # mean volume from forcing, km^3
bgfrctem = data.variables["bgfrctem"][kt, 0, 0] # mean heat content from forcing, 1e+20 J
bgfrcsal = data.variables["bgfrcsal"][kt, 0, 0] # mean salt content from forcing, 1e-3 km^3
txt_file.write("%.2f %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e %.8e\n"
% (time,
scvoltot, scsshtot, scsshste, scsshtst, sctemtot,
scsaltot,
bgtemper, bgsaline, bgheatco, bgsaltco, bgvolssh,
bgvole3t, bgfrcvol, bgfrctem, bgfrcsal
)
)
txt_file.close()
data.close()
print("finished processing, exiting scalar_to_txt...")
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2020
# --------------------------------------------------------------------------
# gendoc: ignore
import importlib
import platform
import sys
import warnings
from sys import version_info
def check_import(mname):
try:
importlib.import_module(mname)
return True
except ImportError:
return False
def check_platform():
platform_error_msg = "docplex is not compatible with this version of Python: only 64 bits on Windows, Linux, Darwin and AIX, with Python 2.7.9+, 3.4+ are supported."
platform_system = platform.system()
if platform_system in ('Darwin', 'Linux', 'Windows', 'Microsoft', 'AIX'):
if version_info[0] == 3:
if version_info < (3, 4, 0):
warnings.warn(platform_error_msg)
elif version_info[0] == 2:
if version_info[1] != 7:
warnings.warn(platform_error_msg)
else:
warnings.warn(platform_error_msg)
else:
print("docplex is not officially supported on this platform. Use it at your own risk.", RuntimeWarning)
is_64bits = sys.maxsize > 2 ** 32
if is_64bits is False:
warnings.warn("docplex is not officially supported on 32 bits. Use it at your own risk.", RuntimeWarning)
def run_docplex_check_list():
check_platform()
from docplex.version import latest_cplex_major, latest_cplex_minor
cplex_latest_version_as_tuple = (latest_cplex_major, latest_cplex_minor)
diagnostics = []
# check requirements
for rm in ["six", "enum", "cloudpickle"]:
if not check_import(rm):
diagnostics.append("Module {0} is missing, run: pip install {0}".format(rm))
# check pandas
try:
import pandas as pd # @UnusedImport
# noinspection PyUnresolvedReferences
from pandas import DataFrame
DataFrame({})
except ImportError:
print("-- pandas is not present, some features might be unavailable.")
from docplex.mp.environment import Environment
Environment().print_information()
# check cplex
try:
# noinspection PyUnresolvedReferences
from cplex import Cplex
cpx = Cplex()
cpxv = cpx.get_version()
cpxvt = tuple(float(x) for x in cpx.get_version().split("."))[:2]
lcpxv = ".".join(str(z) for z in cplex_latest_version_as_tuple)
if cpxvt < cplex_latest_version_as_tuple:
print("Warning: Your cplex version {0} is not the latest, {1} is available".format(cpxv, lcpxv))
elif cpxvt > cplex_latest_version_as_tuple:
print("* Your cplex version {0} is ahead of the latest DOcplex-compatible version {1}, this might not be compatible.".format(cpxv, lcpxv))
else:
print("* Your cplex version {0} is the latest available".format(cpxv))
cpx.end()
except ImportError as ie:
Cplex = None
diagnostics.append("No local installation of CPLEX has been found.")
print("Cplex DLL not found, error importing cplex: {0!s}".format(ie))
check_python_path(diagnostics)
# check creation of an empty model...
try:
if Cplex:
# noinspection PyUnresolvedReferences
from docplex.mp.model import Model
Model()
# promotional?
if Model.is_cplex_ce():
print("! Cplex promotional version, limited to 1000 variables, 1000 constraints")
diagnostics.append("Your local CPLEX edition is limited. Consider purchasing a full license.")
except ImportError:
print("Docplex is not present: cannot import class docplex.mp.model")
diagnostics.append("Your installation of DOcplex may be corrupted.")
except Exception as e:
print("Exception raised when creating one model instance: {0!s}".format(e))
diagnostics.append("Your installation of DOcplex may be corrupted.")
if diagnostics:
print("\n!! diagnostics: {0}".format(len(diagnostics)))
for s in diagnostics:
print(" -- {0}".format(s))
else:
print("> No problem found: you're all set!")
def cplex_system_dir():
platform_system = platform.system().lower()
if 'windows' in platform_system:
return 'x64_win64'
elif 'linux' in platform_system:
return 'x86-64_linux'
elif 'darwin' in platform_system:
return ''
else:
return None
def check_python_path(diagnostics):
import os
pypaths = os.environ.get('PYTHONPATH')
if not pypaths:
print("No PYTHONPATH is set, you must add cplex Python to the PYTHONPATH to solve")
else:
expected_sysname = cplex_system_dir()
if expected_sysname:
platform_version = platform.python_version_tuple()
py_version2 = "%s.%s" % platform_version[:2]
for ppath in pypaths.split(';'):
base, last = os.path.split(ppath)
if last == expected_sysname and py_version2 not in base:
diagnostics.append("Possible version mismatch in PYTHONPATH: {0}".format(ppath))
if __name__ == "__main__":
run_docplex_check_list()
|
#!/usr/bin/env python
# -*- encode: utf-8 -*-
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Authors: Manos Tsardoulias
# contact: etsardou@iti.gr
import sys
import time
import os
from pylab import *
from scipy.io import wavfile
## @class EnergyDenoise
# Performs energy denoising on an audio file
class EnergyDenoise:
## Performs energy-based denoising
#
# Handles service callback
# rapp_audio_processing.AudioProcessing#energyDenoiseCallback
#
# @param audio_file [string] Audio file path
# @param scale [float] Energy denoise scale
# @param denoised_audio_file [string] Path to write denoised audio file
def energyDenoise(self, audio_file, scale, denoised_audio_file, energy_denoising_debug):
if not os.path.isfile(audio_file):
return False
samp_freq, signal = wavfile.read(audio_file)
samples = signal.shape[0]
sq_signal = signal * 1.0
if energy_denoising_debug:
timearray = arange(0, samples*1.0, 1)
timearray /= samp_freq
timearray *= 1000.0
subplot(3,1,1)
plot(timearray, signal, color = 'k')
for i in range(0, len(sq_signal)):
sq_signal[i] *= sq_signal[i]
mean_sq = mean(sq_signal)
for i in range(0, len(sq_signal)):
if sq_signal[i] < scale * mean_sq:
signal[i] = 0
if energy_denoising_debug:
timearray = arange(0, samples*1.0, 1)
timearray /= samp_freq
timearray *= 1000.0
subplot(3,1,2)
plot(timearray, signal, color = 'k')
if energy_denoising_debug:
show()
wavfile.write(denoised_audio_file, samp_freq, signal)
return True
|
#coding:utf-8
'''
filename:comprehension.py
chap:4
subject:18
conditions:
solution:
'''
#找出[2,4,-7,19,-2,-1,45]中小于0的数
lst = [2,4,-7,19,-2,-1,45]
print([i for i in lst if i<0])
#找出{'python':89,'java':58,'physics':65,'math':87,'chinese':74,'english':60}大于平均分的学科
scores = {'python':89,'java':58,'physics':65,'math':87,'chinese':74,'english':60}
print([k for k,v in scores.items() if v>sum(scores.values())/len(scores)])
#计算1到100的整数平方的和
print(sum([i**2 for i in range(1,101)]))
s=0
i = 1
while i<=100:
s+=i**2
i+=1
print(s)
|
"""
A utility function for walking a simplified document
"""
from inspect import signature
def walk(document, fun, TYPE="document", no_iter=None):
"""
Walk an document tree and apply a function to matching nodes
:param document: Simplified Docx element to walk
:type document:object
:param fun: A function to apply at each node in the document. If ``fun``
takes just one parameter, it is passed the current element,
otherwise it is passed the current element, the containing element,
and the position of the current element within the containing
element which is an integer in the current element is contained in
an array of ``VALUE``s and ``None`` if the current element is the
parent's ``VALUE``.
:type fun: Callable
:param TYPE: The node ``TYPE``s at which to apply the function ``fun``
:type TYPE: str
:param no_iter: Optional. A list of elmenet ``TYPE``s into which the walker
should refrain from walking into. For example, setting
``no_iter=["paragraph"]`` would prevent the walker from traversing
children (``VALUE``s) paragraph nodes.
:type no_iter: Sequence[str]
:return: ``None``
:return type: None
"""
_sig = signature(fun)
_params = _sig.parameters
has_multiple_parameters = len(_params) > 1 or any(
param.kind in (param.VAR_KEYWORD, param.VAR_POSITIONAL)
for param in _params.values()
)
stack = [(document, None)]
while True:
try:
current, index = stack.pop()
except IndexError:
break
if index is None:
# CURRENT IS AN OBJECT:
# APPLY THE FUNCTION
if TYPE is None or current.get("TYPE", None) == TYPE:
if has_multiple_parameters:
try:
parent, parent_index = stack[-1]
except IndexError:
out = fun(current, None, None)
else:
out = fun(current, parent, parent_index - 1)
else:
out = fun(current)
if out is not None:
return out
val = current.get("VALUE", None)
if isinstance(val, dict) and current.get("TYPE", None):
# CHILD IS AN ELEMENT TO BE WAKLED
stack.append((val, None))
continue
if (
isinstance(val, list)
and val
and val[0].get("TYPE", None)
and (no_iter is None or current["TYPE"] not in no_iter)
):
# CHILD IS A LIST OF ELEMENTS
stack.append((val, 0))
continue
else:
# CURRENT IS A LIST
try:
nxt = current[index]
except IndexError:
pass
else:
stack.append((current, index + 1))
stack.append((nxt, None))
del nxt
del current, index
|
"""
:mod:`frbr` Models for FRBR Redis datastore
"""
__author__ = "Jeremy Nelson"
import datetime,os,logging
import redis,urllib2
import namespaces as ns
import common
from lxml import etree
FRBR_RDF_URL = 'http://metadataregistry.org/schema/show/id/5.rdf'
def load_rdf(rdf_url=FRBR_RDF_URL):
"""
Function takes an URL to a RDF file and creates a FRBR Redis
datastore using key syntax of **frbr.reg_name**
:param rdf_url: URL of FRBR RDF, default is FRBR_RDF_URL
constant.
"""
raw_frbr_rdf = urllib2.urlopen(rdf_url).read()
frbr_rdf = etree.XML(raw_frbr_rdf)
rdf_descriptions = frbr_rdf.findall('{%s}Description' % \
ns.RDF)
for element in rdf_descriptions:
about_url = element.attrib['{%s}about' % ns.RDF]
rdf_type = element.find('{%s}type' % ns.RDF)
rdfs_label = element.find('{%s}label' % ns.RDFS)
reg_name = element.find('{%s}name' % ns.REG)
if reg_name is not None:
redis_key = 'frbr.%s' % reg_name.text
elif rdfs_label is not None:
redis_key = 'frbr.%s' % rdfs_label.strip()
else:
redis_key = None
skos_definition = element.find('{%s}definition' % ns.SKOS)
if rdf_type is not None:
if rdf_type.attrib.has_key('{%s}resource' % ns.RDF):
resource_type = rdf_type.attrib['{%s}resource' % ns.RDF]
if resource_type == 'http://www.w3.org/2002/07/owl#Class':
common.redis_server.set("%s:label" % redis_key,
rdfs_label.text)
common.redis_server.set("%s:definition" % redis_key,
skos_definition.text)
print("Added %s with key %s to datastore" % (rdfs_label,
redis_key))
class Expression(common.BaseModel):
"""
:class:`Expression` class includes attributes and roles with other Entities in
the datastore.
"""
def __init__(self,**kwargs):
"""
Creates an instance of :class:`Expression`
:param redis_key: Redis key for FRBR Expression, default is frbr:Expression
"""
if not kwargs.has_key("redis_key"):
kwargs['redis_key'] = 'frbr:Expression'
common.BaseModel.__init__(self,**kwargs)
class Item(common.BaseModel):
"""
:class:`Item` class includes attributes and roles with other Entities in
the datastore.
"""
def __init__(self,**kwargs):
"""
Creates an instance of :class:`Item`
:param redis_key: Redis key for FRBR Item, default is
frbr:Item
"""
if not kwargs.has_key("redis_key"):
kwargs['redis_key'] = 'frbr:Item'
common.BaseModel.__init__(self,**kwargs)
class Manifestation(common.BaseModel):
"""
:class:`Manifestation` class includes attributes and roles with other Entities in
the datastore.
"""
def __init__(self,**kwargs):
"""
Creates an instance of :class:`Manifestation`
:param redis_key: Redis key for FRBR Manifestation, default is frbr:Manifestation
"""
if not kwargs.has_key("redis_key"):
kwargs['redis_key'] = 'frbr:Manifestation'
common.BaseModel.__init__(self,**kwargs)
class Work(common.BaseModel):
"""
:class:`Work` class includes attributes and roles with other Entities in
the datastore.
"""
def __init__(self,**kwargs):
"""
Creates an instance of :class:`Work`
:param redis_key: Redis key for FRBR Work, default is frbr:Work
"""
if not kwargs.has_key("redis_key"):
kwargs['redis_key'] = 'frbr:Work'
common.BaseModel.__init__(self,**kwargs)
|
from spider.collector.data_collector import DataRecord
from spider.conf import SpiderConfig
from spider.conf.observe_meta import EntityType, ObserveMetaMgt
from spider.data_process.prometheus_processor import PrometheusProcessor
from spider.entity_mgt import ObserveEntityCreator
from .common import assert_data_record, assert_observe_entity
from .common import gen_instant_resp_data, gen_task_metric_item, gen_tcp_link_metric_item, append_metric_items
from .common import init_spider_config, init_observe_meta_mgt
def setup_module():
init_spider_config()
init_observe_meta_mgt()
class TestPrometheusProcessor:
def setup_class(self):
self.prometheus_processor = PrometheusProcessor()
# test data
self.metric_id1 = "gala_gopher_tcp_link_rx_bytes"
self.resp_data1 = gen_instant_resp_data()
self.metric_item11 = gen_tcp_link_metric_item('rx_bytes', machine_id='machine1')
self.metric_item12 = gen_tcp_link_metric_item('rx_bytes', machine_id='machine2')
append_metric_items(self.resp_data1, [self.metric_item11, self.metric_item12])
self.metric_id2 = "gala_gopher_tcp_link_tx_bytes"
self.resp_data2 = gen_instant_resp_data()
self.metric_item21 = gen_tcp_link_metric_item('tx_bytes', machine_id='machine1')
self.metric_item22 = gen_tcp_link_metric_item('tx_bytes', machine_id='machine2')
append_metric_items(self.resp_data2, [self.metric_item21, self.metric_item22])
self.metric_id3 = "gala_gopher_task_fork_count"
self.resp_data3 = gen_instant_resp_data()
self.metric_item31 = gen_task_metric_item('fork_count', pid=1)
self.metric_item32 = gen_task_metric_item('fork_count', pid=2)
append_metric_items(self.resp_data3, [self.metric_item31, self.metric_item32])
def _mock_requests(self, requests_mock):
spider_config = SpiderConfig()
prometheus_conf = spider_config.prometheus_conf
url = prometheus_conf.get("base_url") + prometheus_conf.get("instant_api")
requests_mock.get(url, json={})
requests_mock.get(url + "?query={}".format(self.metric_id1), json=self.resp_data1)
requests_mock.get(url + "?query={}".format(self.metric_id2), json=self.resp_data2)
requests_mock.get(url + "?query={}".format(self.metric_id3), json=self.resp_data3)
def test_collect_observe_entity(self, requests_mock):
observe_meta_mgt = ObserveMetaMgt()
self._mock_requests(requests_mock)
observe_meta = observe_meta_mgt.observe_meta_map.get(EntityType.TCP_LINK.value)
res = self.prometheus_processor.collect_observe_entity(observe_meta, 0)
assert isinstance(res, list)
expect_res = [self.metric_item11, self.metric_item12, self.metric_item21, self.metric_item22]
assert len(res) == len(expect_res)
for ret_item, expect_item in zip(res, expect_res):
assert isinstance(ret_item, DataRecord)
assert_data_record(ret_item, expect_item)
def test_collect_observe_entities(self, requests_mock):
self._mock_requests(requests_mock)
res = self.prometheus_processor.collect_observe_entities(0)
assert isinstance(res, dict)
assert len(res) == 2
assert EntityType.TASK.value in res
assert EntityType.TCP_LINK.value in res
assert len(res[EntityType.TASK.value]) == 2
assert len(res[EntityType.TCP_LINK.value]) == 4
def test_aggregate_entities_by_label(self, requests_mock):
self._mock_requests(requests_mock)
observe_entities = self.prometheus_processor.collect_observe_entities(0)
expect_res = {
"task": [
{"__name__": "gala_gopher_task_fork_count", "pid": 1, "fork_count": 1, "timestamp": 0,
"task_name": "task0", "machine_id": "machine0"},
{"__name__": "gala_gopher_task_fork_count", "pid": 2, "fork_count": 1, "timestamp": 0,
"task_name": "task0", "machine_id": "machine0"},
]
}
res = self.prometheus_processor.aggregate_entities_by_label(observe_entities)
assert isinstance(res, dict)
assert "task" in res
assert len(res.get('task')) == len(expect_res.get('task'))
for ret_item, expect_item in zip(res.get('task'), expect_res.get('task')):
for key in ret_item:
assert ret_item[key] == expect_item[key]
def test_get_observe_entities(self, requests_mock):
observe_meta_mgt = ObserveMetaMgt()
self._mock_requests(requests_mock)
task_meta = observe_meta_mgt.get_observe_meta('task')
task_data = {'machine_id': 'machine0', 'pid': 1, 'fork_count': 1, 'task_name': 'task0', 'timestamp': 0}
task_data1 = {'machine_id': 'machine0', 'pid': 2, 'fork_count': 1, 'task_name': 'task0', 'timestamp': 0}
expect_res = [
ObserveEntityCreator.create_observe_entity('task', task_data, task_meta),
ObserveEntityCreator.create_observe_entity('task', task_data1, task_meta),
]
res = self.prometheus_processor.get_observe_entities(0)
for expect_entity in expect_res:
is_exist = False
for ret_entity in res:
if ret_entity.id == expect_entity.id:
assert_observe_entity(ret_entity, expect_entity)
is_exist = True
break
assert is_exist
|
from __future__ import absolute_import, division, print_function
import sys
from subprocess import check_call
from glue.tests.helpers import requires_qt
from .._deps import Dependency, categories
class TestDependency(object):
def test_installed(self):
d = Dependency('math', 'the math module')
assert d.installed
def test_uninstalled(self):
d = Dependency('asdfasdf', 'Non-existent module')
assert not d.installed
def test_installed_str(self):
d = Dependency('math', 'info')
assert str(d) == " math:\tINSTALLED (unknown version)"
def test_noinstalled_str(self):
d = Dependency('asdfasdf', 'info')
assert str(d) == " asdfasdf:\tMISSING (info)"
def test_failed_str(self):
d = Dependency('asdfasdf', 'info')
d.failed = True
assert str(d) == " asdfasdf:\tFAILED (info)"
@requires_qt
def test_optional_dependency_not_imported():
"""
Ensure that a GlueApplication instance can be created without
importing any non-required dependency
"""
optional_deps = categories[5:]
deps = [dep.module for cateogry, deps in optional_deps for dep in deps]
code = """
class ImportDenier(object):
__forbidden = set(%s)
def find_module(self, mod_name, pth=None):
if pth:
return
if mod_name in self.__forbidden:
return self
def load_module(self, mod_name):
raise ImportError("Importing %%s" %% mod_name)
import sys
sys.meta_path.append(ImportDenier())
from glue.app.qt import GlueApplication
from glue.core import data_factories
ga = GlueApplication()
""" % deps
cmd = [sys.executable, '-c', code]
check_call(cmd)
|
# -*- coding: utf-8 -*-
import mock
from typing import Any
from zerver.lib.attachments import user_attachments
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Attachment
class AttachmentsTests(ZulipTestCase):
def setUp(self):
# type: () -> None
user_profile = self.example_user('cordelia')
self.attachment = Attachment.objects.create(
file_name='test.txt', path_id='foo/bar/test.txt', owner=user_profile)
def test_list_by_user(self):
# type: () -> None
user_profile = self.example_user('cordelia')
self.login(user_profile.email)
result = self.client_get('/json/attachments')
self.assert_json_success(result)
attachments = user_attachments(user_profile)
self.assertEqual(result.json()['attachments'], attachments)
def test_remove_attachment_exception(self):
# type: (Any) -> None
user_profile = self.example_user('cordelia')
self.login(user_profile.email)
with mock.patch('zerver.lib.attachments.delete_message_image', side_effect=Exception()):
result = self.client_delete('/json/attachments/{id}'.format(id=self.attachment.id))
self.assert_json_error(result, "An error occured while deleting the attachment. Please try again later.")
@mock.patch('zerver.lib.attachments.delete_message_image')
def test_remove_attachment(self, ignored):
# type: (Any) -> None
user_profile = self.example_user('cordelia')
self.login(user_profile.email)
result = self.client_delete('/json/attachments/{id}'.format(id=self.attachment.id))
self.assert_json_success(result)
attachments = user_attachments(user_profile)
self.assertEqual(attachments, [])
def test_list_another_user(self):
# type: () -> None
user_profile = self.example_user('iago')
self.login(user_profile.email)
result = self.client_get('/json/attachments')
self.assert_json_success(result)
self.assertEqual(result.json()['attachments'], [])
def test_remove_another_user(self):
# type: () -> None
user_profile = self.example_user('iago')
self.login(user_profile.email)
result = self.client_delete('/json/attachments/{id}'.format(id=self.attachment.id))
self.assert_json_error(result, 'Invalid attachment')
user_profile_to_remove = self.example_user('cordelia')
attachments = user_attachments(user_profile_to_remove)
self.assertEqual(attachments, [self.attachment.to_dict()])
def test_list_unauthenticated(self):
# type: () -> None
result = self.client_get('/json/attachments')
self.assert_json_error(result, 'Not logged in: API authentication or user session required', status_code=401)
def test_delete_unauthenticated(self):
# type: () -> None
result = self.client_delete('/json/attachments/{id}'.format(id=self.attachment.id))
self.assert_json_error(result, 'Not logged in: API authentication or user session required', status_code=401)
|
# -*- coding: utf-8 -*-
import random
from src.sprites.characters.behaviours.RavenBehaviourState import *
from src.sprites.Character import *
from src.sprites.MySprite import *
from src.sprites.EnemyRange import *
# ------------------------------------------------------------------------------
# Clase RavenFollowPlayerState
class RavenFollowPlayerState(RavenBehaviourState):
def __init__(self, previousState):
RavenBehaviourState.__init__(self)
self.delayTime = random.randint(4, 6)*1000
self.elapseTime = 0
self.previousState = previousState
def move_ai(self, enemy, player):
# Obtenemos las posiciones del enemigo y el jugador
(enemyX, enemyY) = enemy.rect.center
(playerX, playerY) = player.rect.center
# Obtenemos el ángulo entre el enemigo y el jugador
angle = int(math.degrees(math.atan2(enemyY-playerY, playerX-enemyX)))
# Corrección cuando el ángulo es entre 180-360
if angle < 0:
angle = 360 + angle
# Calculamos hacia donde tiene que moverse el personaje
lookAt, move = EnemyRange.discretice_angle(angle)
# Se actualiza el movimiento del personaje
Character.move(enemy, move)
# Comprobamos si se está colisionando con el enemigo para volver al
# otro estado
if pygame.sprite.collide_mask(player, enemy):
enemy.change_behaviour(self.previousState)
self.previousState.angle = int(angle+180)
if self.previousState.angle > 360:
self.previousState.angle -= 360
def update(self, enemy, time, mapRect, mapMask):
# Se actualiza el movimiento del personaje
Character.update_movement(enemy, time)
enemy.speed = (enemy.speed[0]*1.25, enemy.speed[1]*1.25)
MySprite.update(enemy, time)
self.elapseTime += time
if self.elapseTime > self.delayTime or not mapRect.inflate(-48, -48).contains(enemy.rect):
enemy.change_behaviour(self.previousState)
|
#!/usr/bin/env python
# Written by Ken Yin
import os
import pydot
import psutil
import subprocess
import importlib
import pkgutil
import inspect
import apt
import argparse
import random
import string
from simulator.builders import BuilderBase, BuilderSelector
from simulator.utilities.ImageDepot import ImageDepot
from collections import OrderedDict
#from logging import getLogger
import logging
import sys
from simulator.utilities.LogWrapper import getLogger
log = getLogger(__name__)
class DotSimulator(object):
def __init__(self, **kwargs):
# Set the simulation directory
if 'sim_dir' in kwargs:
self.sim_dir = kwargs['sim_dir']
else:
unique_id = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(5))
if not os.path.exists('/tmp/{0}'.format(os.getlogin())):
os.mkdir('/tmp/{0}'.format(os.getlogin()))
self.sim_dir = '/tmp/{0}/{1}-{2}/'.format(os.getlogin(), self.__class__.__name__, unique_id)
os.mkdir(self.sim_dir)
# Set the Image Depot directory
if 'image_depot' in kwargs:
self.image_depot_dir = kwargs['image_depot']
else:
self.image_depot_dir = '/media/psf/image_depot'
self.image_depot = ImageDepot(self.image_depot_dir)
# The class inheriting DotSimulator should also be
# inheriting from DotTopo. This is where self.graph
# is defined.
self.builder = BuilderSelector(self, self.sim_dir, self.image_depot).builder
def configure(self):
pass
def run(self):
self.builder.run()
def stop(self):
self.builder.stop()
def run_from_cmdline(self):
parser = argparse.ArgumentParser(description='Start/Stop PyDotSimulator')
parser.add_argument('--info', action='store_true', help='Display the PyDot topology', default=None)
parser.add_argument('--start', action='store_true', help='Start the PyDot topology', default=None)
parser.add_argument('--stop', action='store_true', help='Stop the PyDot topology', default=None)
parser.add_argument('--loglevel', help='Set the logging level of the output', choices=['DEBUG', 'INFO', 'WARN', 'ERROR'], default='INFO')
parser.add_argument('--dir', help='Directory that the simulation run/stores info', default=None)
parser.add_argument('--image-depot', help='Directory that stores all the base VM images', default=None)
args = parser.parse_args()
if args.loglevel == 'DEBUG':
level = logging.DEBUG
elif args.loglevel == 'ERROR':
level = logging.ERROR
elif args.loglevel == 'WARN':
level = logging.WARN
else:
level = logging.INFO
# Set Logger
logging.getLogger().setLevel(level)
FORMAT = "%(asctime)s:%(levelname)7s:%(name)24s: %(message)s"
logging.basicConfig(format=FORMAT)
if args.image_depot:
if os.path.exists(args.image_depot):
self.image_depot = args.image_depot
else:
log.info('The image depot {0} isn\'t a directory'.format(args.image_depot))
sys.exit(1)
if args.info:
log.info(self.show())
if args.start and (not args.stop):
log.debug("Starting Simulation in the directory: {0}".format(self.sim_dir))
self.run()
elif args.stop and (not args.start) and args.dir:
log.debug('Stopping Simultion in {0}'.format(args.dir))
self.sim_dir = args.dir
# Check if the simulation directory ends with '/'
if not self.sim_dir.endswith('/'):
self.sim_dir += '/'
self.builder.sim_dir = self.sim_dir
self.stop()
|
from django.conf import settings
DRF_HAYSTACK_NEGATION_KEYWORD = getattr(settings, "DRF_HAYSTACK_NEGATION_KEYWORD", "not")
GEO_SRID = getattr(settings, "GEO_SRID", 4326)
|
'''OpenGL extension NV.pixel_data_range
Overview (from the spec)
The vertex array range extension is intended to improve the
efficiency of OpenGL vertex arrays. OpenGL vertex arrays' coherency
model and ability to access memory from arbitrary locations in memory
prevented implementations from using DMA (Direct Memory Access)
operations.
Many image-intensive applications, such as those that use dynamically
generated textures, face similar problems. These applications would
like to be able to sustain throughputs of hundreds of millions of
pixels per second through DrawPixels and hundreds of millions of
texels per second through TexSubImage.
However, the same restrictions that limited vertex throughput also
limit pixel throughput.
By the time that any pixel operation that reads data from user memory
returns, OpenGL requires that it must be safe for the application to
start using that memory for a different purpose. This coherency
model prevents asynchronous DMA transfers directly out of the user's
buffer.
There are also no restrictions on the pointer provided to pixel
operations or on the size of the data. To facilitate DMA
implementations, the driver needs to know in advance what region of
the address space to lock down.
Vertex arrays faced both of these restrictions already, but pixel
operations have one additional complicating factor -- they are
bidirectional. Vertex array data is always being transfered from the
application to the driver and the HW, whereas pixel operations
sometimes transfer data to the application from the driver and HW.
Note that the types of memory that are suitable for DMA for reading
and writing purposes are often different. For example, on many PC
platforms, DMA pulling is best accomplished with write-combined
(uncached) AGP memory, while pushing data should use cached memory so
that the application can read the data efficiently once it has been
read back over the AGP bus.
This extension defines an API where an application can specify two
pixel data ranges, which are analogous to vertex array ranges, except
that one is for operations where the application is reading data
(e.g. glReadPixels) and one is for operations where the application
is writing data (e.g. glDrawPixels, glTexSubImage2D, etc.). Each
pixel data range has a pointer to its start and a length in bytes.
When the pixel data range is enabled, and if the pointer specified
as the argument to a pixel operation is inside the corresponding
pixel data range, the implementation may choose to asynchronously
pull data from the pixel data range or push data to the pixel data
range. Data pulled from outside the pixel data range is undefined,
while pushing data to outside the pixel data range produces undefined
results.
The application may synchronize with the hardware in one of two ways:
by flushing the pixel data range (or causing an implicit flush) or by
using the NV_fence extension to insert fences in the command stream.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/NV/pixel_data_range.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_pixel_data_range'
GL_WRITE_PIXEL_DATA_RANGE_NV = constant.Constant( 'GL_WRITE_PIXEL_DATA_RANGE_NV', 0x8878 )
GL_READ_PIXEL_DATA_RANGE_NV = constant.Constant( 'GL_READ_PIXEL_DATA_RANGE_NV', 0x8879 )
GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV = constant.Constant( 'GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV', 0x887A )
glget.addGLGetConstant( GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV, (1,) )
GL_READ_PIXEL_DATA_RANGE_LENGTH_NV = constant.Constant( 'GL_READ_PIXEL_DATA_RANGE_LENGTH_NV', 0x887B )
glget.addGLGetConstant( GL_READ_PIXEL_DATA_RANGE_LENGTH_NV, (1,) )
GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV = constant.Constant( 'GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV', 0x887C )
GL_READ_PIXEL_DATA_RANGE_POINTER_NV = constant.Constant( 'GL_READ_PIXEL_DATA_RANGE_POINTER_NV', 0x887D )
glPixelDataRangeNV = platform.createExtensionFunction(
'glPixelDataRangeNV', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLsizei, ctypes.c_void_p,),
doc = 'glPixelDataRangeNV( GLenum(target), GLsizei(length), c_void_p(pointer) ) -> None',
argNames = ('target', 'length', 'pointer',),
)
glFlushPixelDataRangeNV = platform.createExtensionFunction(
'glFlushPixelDataRangeNV', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,),
doc = 'glFlushPixelDataRangeNV( GLenum(target) ) -> None',
argNames = ('target',),
)
def glInitPixelDataRangeNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
"""Run the trade functions based on information gained from the other scripts."""
import configparser
import ccxt
# import time
# from threading import Timer
# import release_trader.check_availability as ca
# import logging
# import gate_api
config = configparser.ConfigParser()
config.read_file(open(r"../.user.cfg"))
LOGIN_Gv4 = {
"API_KEY": config.get("gateio_user_config", "api_key"),
"SECRET_KEY": config.get("gateio_user_config", "secret_key"),
}
def ratelimit():
"""Print the rate limit of API calls."""
gateio = ccxt.gateio(
{"apiKey": LOGIN_Gv4["API_KEY"], "secret": LOGIN_Gv4["SECRET_KEY"]}
) # gateio
print(gateio.rateLimit)
def buy_crypto():
"""Place a buy order with a limit sell order on new coins."""
pass
# gateio = ccxt.gateio(
# {"apiKey": LOGIN_Gv4["API_KEY"], "secret": LOGIN_Gv4["SECRET_KEY"]}
# ) # gateio
# crypto = ca.new_crypto() # crypto
# Here we want to buy the currency we found as a market buy order with a
# stop loss. We can potentially (actually it is very likely) buy several
# coins at the same time (might as well trash all but one?)
# for symbol in crypto:
# # time.sleep(gateio.rateLimit / 1000) # The rateLimit is 1000
# time.sleep(1)
# OLD
# for symbol in crypto:
# time.sleep(gateio.rateLimit / 1000)
# gateio.create_market_buy_order(
# symbol, # symbol
# 100, # amount (100 USDT)
# type='stop_loss_limit',
# params={'stopPrice': .95 * }
# )
# After we have bought something we want to check if the price of our new
# coin(s) has increased with at least 10 %
# while not gateio.has['fetch_balance']:
# time.sleep(.5)
# bal = gateio.fetch_balance() # balance
# for trade in gateio.fetch_my_trades(symbol='LTC/BTC'):
# print(trade['info']['rate'])
# rate = trade['info']['rate'] * 1.1
# if rate < the_current_price:
# gateio.create_limit_sell_order('coin_that_is_available')
# print(bal['LTC'])
# print(bal['BTC'])
# gateio.create_limit_sell_order(symbol, amount, price)
# This will call this function again in five seconds.
# Timer(5, buy_crypto).start()
if __name__ == "__main__":
ratelimit()
# buy_crypto()
|
# Dependencies
from src.dataset import MNIST, ToTensor, train_test_split
from torch.utils.data import DataLoader
from torch.nn.functional import mse_loss, binary_cross_entropy
from torch import nn, optim
import torch
import matplotlib.pyplot as plt
import numpy as np
import tqdm
import time
import os
class AutoEncoder(nn.Module):
""" Auto Encoder (AE)
Classic auto encoder with convolutional layers and symmetrical encoder and
decoder sides, useful for digits reconstruction.
"""
def __init__(self, latent_dim=2):
""" Constructor
Args
latent_dim (int) Dimension of the encoded latent space
Raise
(ValueError) In case latent dimension is not valid (not
integer or lower than one)
"""
# Call parent constructor
super().__init__()
# Check latent space dimension
if not isinstance(latent_dim, int) or latent_dim < 1:
# Raise exception
raise ValueError('given latent space dimension is not valid')
# Define encoding convolutional layer
self.encoder_cnn = nn.Sequential(
nn.Conv2d(1, 8, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(8, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(16, 32, 3, stride=2, padding=0),
nn.ReLU(True)
)
# Define encoding linear layer
self.encoder_lin = nn.Sequential(
nn.Linear(3 * 3 * 32, 64),
nn.ReLU(True),
nn.Linear(64, latent_dim)
)
# Define decoding linear layer
self.decoder_lin = nn.Sequential(
nn.Linear(latent_dim, 64),
nn.ReLU(True),
nn.Linear(64, 3 * 3 * 32),
nn.ReLU(True)
)
# Define decoding convolutional layer
self.decoder_cnn = nn.Sequential(
nn.ConvTranspose2d(32, 16, 3, stride=2, output_padding=0),
nn.ReLU(True),
nn.ConvTranspose2d(16, 8, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(8, 1, 3, stride=2, padding=1, output_padding=1)
)
# Store latent space dimension
self.latent_dim = latent_dim
def forward(self, x):
""" Fed input to the network
Args
x (torch.Tensor) Input images batch as 3d tensor
Return
(torch.tensor) Reconstructed input images batch as 3d tensor
"""
# Go through encoding side
x = self.encode(x)
# Go through decoding side
x = self.decode(x)
# Return reconstructed images batch
return x
def encode(self, x):
""" Fed raw input to encoding side
Args
x (torch.Tensor) Input images batch as 3d tensor
Return
(torch.tensor) Encoded input images batch as 2d tensor
"""
# Feed input through convolutional layers
x = self.encoder_cnn(x)
# Flatten convolutional layers output
x = torch.flatten(x, 1)
# x = x.view([x.size(0), -1])
# Feed input through linear layers
return self.encoder_lin(x)
def decode(self, x):
""" Fed encoded input to decoding side
Args
x (torch.Tensor) Encoded input images batch as 2d tensor
Return
(torch.tensor) Decoded input images batch as 3d tensor
"""
# Feed encoded input through linear decoder
x = self.decoder_lin(x)
# Reshape linear decoder output
x = x.view([-1, 32, 3, 3])
# Feed convolutional decoder with reshaped input
x = self.decoder_cnn(x)
# Apply decision layer (sigmoid)
return torch.sigmoid(x)
@property
def device(self):
return next(self.parameters()).device
def train_batch(self, batch, loss_fn, optim=None, ret_images=False, eval=False):
""" Train network on a single batch
Args
batch (tuple) Tuple containing output labels tensor, input
images tensor and output images tensor
loss_fn (nn.Module) Loss function instance
optim (nn.Module) Optimizer used during weights update
ret_images (bool) Wether to return test images either
eval (bool) Wether to do training or evaluation (test)
Return
(float) Current batch loss
(torch.Tensor) Eventually return reconstructed images either
Raise
(ValueError) In case training mode has been chosen without
defining an optimizer instance
"""
# Check that optimizer has been set in training mode
if (not eval) and (optim is None):
# Raise exception
raise ValueError('optimizer must be set for training')
# Retrieve device
device = self.device
# Retrieve output labels, input image and output image
out_labels, in_images, out_images = batch
# Move input and output images to device
in_images, out_images = in_images.to(device), out_images.to(device)
# Make forward pass
net_images = self(in_images)
# Compute loss
loss = loss_fn(net_images, out_images)
# Training mode
if not eval:
# Clean previous optimizer state
optim.zero_grad()
# Make backward pass (update weights)
loss.backward()
# Update weights
optim.step()
# Case images have been required
if ret_images:
# Return either loss and images
return float(loss.data), net_images
# Return loss
return float(loss.data)
def test_batch(self, batch, loss_fn, ret_images=False):
""" Test network on a single batch
Args
batch (tuple) Tuple containing output labels tensor, input
images tensor and output images tensor
loss_fn (nn.Module) Loss function instance
ret_images (bool) Wether to return test images either
Return
(float) Current batch loss
(torch.Tensor) Eventually return reconstructed images either
"""
return self.train_batch(batch, loss_fn, optim=None, ret_images=ret_images, eval=True)
def train_epoch(self, dataloader, loss_fn, optim=None, eval=False):
""" Train network on all the batches in a single epoch
Args
dataloader (Dataloader) Dataloader allowing to iterate over batches
loss_fn (nn.Module) Loss function instance
optim (nn.Module) Optimizer used during weights update
eval (bool) Wether to do training or evaluation (test)
Return
(float) Current epoch mean loss
(float) Current epoch total time, in seconds
Raise
(ValueError) In case training mode has been chosen without
defining an optimizer instance
"""
# Initialize losses and times
epoch_losses, epoch_times = [], []
# Set network in training/evaluation mode
self.eval() if eval else self.train()
# Loop through each batch in given dataloader
for batch in dataloader:
# Initialize batch timer
batch_start = time.time()
# Get current batch loss
batch_loss = self.train_batch(batch, loss_fn=loss_fn, optim=optim, eval=eval)
# Store current batch loss
epoch_losses.append(batch_loss)
# Store current batch time
epoch_times.append(time.time() - batch_start)
# Return mean loss and total time
return sum(epoch_losses) / len(epoch_losses), sum(epoch_times)
def test_epoch(self, dataloader, loss_fn):
""" Test network on all the batches in a single epoch
Args
dataloader (Dataloader) Dataloader allowing to iterate over batches
loss_fn (nn.Module) Loss function instance
Return
(float) Current epoch mean loss
(float) Current epoch total time, in seconds
Raise
(ValueError) In case training mode has been chosen without
defining an optimizer instance
"""
return self.train_epoch(dataloader, loss_fn, optim=None, eval=True)
class VariationalAutoEncoder(AutoEncoder):
""" Variational Auto Encoder (VAE)
This AutoEncoder is similar to the default one, except that it encodes to
a distribution in the latent space, not to a point. This should provide
latent space with either continuity and completeness, while reducing
overfitting.
"""
def __init__(self, latent_dim=2):
""" Constructor
Args
latent_dim (int) Dimension of the encoded latent space
Raise
(ValueError) In case latent dimension is not valid (not
integer or lower than one)
"""
# Call parent constructor
super().__init__(latent_dim=latent_dim)
# Remove linear encoder
del self.encoder_lin
# Define linear encoder for mean
self.encoder_mu = nn.Sequential(
nn.Linear(3 * 3 * 32, 64),
nn.ReLU(True),
nn.Linear(64, latent_dim)
)
# Define linear encoder for (log transformed) variance
self.encoder_logvar = nn.Sequential(
nn.Linear(3 * 3 * 32, 64),
nn.ReLU(True),
nn.Linear(64, latent_dim)
)
def forward(self, x):
""" Fed input to the network
Args
x (torch.Tensor) Input images batch as 3d tensor
Return
(torch.Tensor) Reconstructed input images batch as 3d tensor
(torch.Tensor) Latent space encoded mean
(torch.Tensor) Latent space encoded (log transformed) variance
"""
# Encode either mean and (log transformed) variance
mu, logvar = self.encode(x)
# Apply reparametrisation trick
z = self.reparameterize(mu, logvar)
# Return either decoded
return self.decode(z), mu, logvar
def encode(self, x):
""" Fed raw input to encoding side
Args
x (torch.Tensor) Input images batch as 3d tensor
Return
(torch.Tensor) Encoded input mean
(torch.Tensor) Encoded input (log transformed) variance
"""
# Feed input through convolutional layers
x = self.encoder_cnn(x)
# Flatten convolutional layers output
x = torch.flatten(x, 1)
# Encode mean
mu = self.encoder_mu(x)
# Encode (log transformed) variance
logvar = self.encoder_logvar(x)
# Return encoded mean and variance
return mu, logvar
def reparameterize(self, mu, logvar):
""" Reparametrisation trick
Args
mu (torch.Tensor) Values of the mean
logvar (torch.Tensor) Values of the (log transformed) variance
Return
(torch.Tensor) Values sampled according to given parameters
"""
# Compute standard deviation (Monte-Carlo expectation approximation)
std = torch.exp(0.5 * logvar)
# Sample from standard normalnormal (eps shape will match std one)
eps = torch.randn_like(std)
# Compute objective function
return mu + eps * std
@staticmethod
def loss_fn(x_pred, x_true, mu, logvar):
""" Variational autoencoder loss function
Args
x_pred (torch.Tensor) Reconstructed image tensor
x_true (torch.Tensor) Original image tensor
mu (torch.Tensor) Mean terms
logvar (torch.Tensor) Variance terms (log transformed)
Return
(torch.Tensor) Computed losses
"""
# Reconstruction loss: BCE
bce = binary_cross_entropy(x_pred, x_true, reduction='sum')
# # Reconstruction loss: MSE
# mse = mse_loss(x_pred, x_true, reduction='mean')
# KLD: Kullback-Leibler divergence (regularization)
kld = 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# Return loss
return bce - kld
# return mse - kld
def train_batch(self, batch, loss_fn, optim=None, ret_images=False, eval=False):
""" Train network on a single batch
Args
batch (tuple) Tuple containing output labels tensor, input
images tensor and output images tensor
loss_fn (nn.Module) Loss function instance
optim (nn.Module) Optimizer used during weights update
ret_images (bool) Wether to return test images either
eval (bool) Wether to do training or evaluation (test)
Return
(float) Current batch loss
(torch.Tensor) Eventually return reconstructed images either
Raise
(ValueError) In case training mode has been chosen without
defining an optimizer instance
"""
# Check that optimizer has been set in training mode
if (not eval) and (optim is None):
# Raise exception
raise ValueError('optimizer must be set for training')
# Retrieve device
device = self.device
# Retrieve output labels, input image and output image
out_labels, in_images, out_images = batch
# Move input and output images to device
in_images, out_images = in_images.to(device), out_images.to(device)
# Make forward pass
net_images, mu, logvar = self(in_images)
# Case loss function is not the regularized one (e.g. MSE)
if loss_fn != self.loss_fn:
# Give only reconstructed images to loss
loss = loss_fn(net_images, out_images)
# Case loss function is the regularized one
if loss_fn == self.loss_fn:
# Compute loss using either distribution parameters
loss = loss_fn(net_images, out_images, mu, logvar)
# Training mode
if not eval:
# Clean previous optimizer state
optim.zero_grad()
# Make backward pass (update weights)
loss.backward()
# Update weights
optim.step()
# Case images have been required
if ret_images:
# Return either loss and images
return float(loss.data), net_images
# Return loss
return float(loss.data)
def train_test_epoch(net, loss_fn, optim, train_data, test_data):
""" Train and test the network over a single epoch
Args
net (nn.Module) Network to be trained and tested
loss_fn (nn.Module) Loss function instance
optim (nn.Module) Optimizer used during weights update
train_data (DataLoader) Training dataset loader
test_data (DataLoader) Test dataset loader
Return
(float) Training loss for current epoch
(float) Training time for current epoch
(float) Test loss for current epoch
(float) Test time for current epoch
"""
# # Check given number of epochs
# if not isinstance(num_epochs, int) or num_epochs < 1:
# # Raise exception
# raise ValueError('given number of epochs is not valid')
# # Check givem step
# if not isinstance(step_epochs, int) or step_epochs < 1:
# # Raise exception
# raise ValueError('given epochs step is not valid')
# # Loop through each epoch
# for i in range(0, num_epochs, step_epochs):
# # Initialize list of epoch training losses and times
# train_losses, train_times = [], []
# # Initialize list of epoch test losses and times
# test_losses, test_times = [], []
# # Loop through each epoch in current step
# for j in range(i, min(num_epochs, i + step_epochs)):
# Make training, retrieve mean loss and total time
train_loss, train_time = net.train_epoch(
dataloader=train_data,
loss_fn=loss_fn,
optim=optim
)
# # Store training loss and time
# train_losses.append(train_loss)
# train_times.append(train_time)
# Disable gradient computation
with torch.no_grad():
# Make evaluation, retrieve mean loss and total time
test_loss, test_time = net.test_epoch(
dataloader=test_data,
loss_fn=loss_fn
)
# # Store test loss and time
# test_losses.append(test_loss)
# test_times.append(test_time)
# # Yield results
# yield j, train_losses, train_times, test_losses, test_times
# Return results
return train_loss, train_time, test_loss, test_time
# Test
if __name__ == '__main__':
# Define project root path
ROOT_PATH = os.path.dirname(__file__) + '/..'
# Define data folder path
DATA_PATH = ROOT_PATH + '/data'
# Define MNIST dataset path
MNIST_PATH = DATA_PATH + '/MNIST.mat'
# Retrieve best device
device = torch.device('cuda') if torch.cuda.is_available() else torch.cpu()
# Load dataset
dataset = MNIST.from_mat(MNIST_PATH)
# Add transformation
dataset.transform = ToTensor()
# Split dataset in training and test
train_iter, test_iter = train_test_split(dataset, train_perc=0.6)
# Make lists out of iterators
train_dataset, test_dataset = list(train_iter), list(test_iter)
# Define autoencoder instance
net = AutoEncoder(latent_dim=2)
net.to(device)
# Define loss function
loss_fn = nn.MSELoss()
# Define optimizer
optimizer = optim.Adam(net.parameters(), lr=1e-3, weight_decay=1e-5)
# Define a sample image index
k = np.random.choice(len(train_dataset))
# Retrieve image and label
label, image, _ = train_dataset[k]
# Add batch size, move to device
image = image.unsqueeze(0).to(device)
# Show shape
print('Retrieved image has shape (AE):', image.shape)
# Try encoding (add batch size)
encoded = net.encode(image)
# Show encoded shape
print('Encoded image shape (AE):', encoded.shape)
# Try decoding
decoded = net.decode(encoded)
# Show decoded shape
print('Decoded image shape (AE):', decoded.shape)
print()
# # Initialize results table
# results = {'train_loss': [], 'train_time': [], 'test_loss': [], 'test_time': []}
# # Define iterator
# step_iter = tqdm.tqdm(desc='Training', iterable=train_test_epochs(
# net=net, loss_fn=loss_fn, optim=optimizer, num_epochs=10,
# train_data=DataLoader(train_dataset, batch_size=1000, shuffle=True),
# test_data=DataLoader(test_dataset, batch_size=1000, shuffle=False)
# ))
# # Make training and evaluation
# for step_results in step_iter:
# # Store current results
# results['train_loss'] += step_results[0]
# results['train_time'] += step_results[1]
# results['test_loss'] += step_results[2]
# results['test_time'] += step_results[3]
#
# # Initialize plot: show loss
# fig, ax = plt.subplots(figsize=(25, 5))
# # Retrieve y train and y test
# y_train = results['train_loss']
# y_test = results['test_loss']
# # Plot train loss
# ax.plot(range(1, len(y_train) + 1), y_train, '-')
# ax.plot(range(1, len(y_test) + 1), y_test, '-')
# # Add title and labels
# ax.set_title('Loss per epoch: train vs test')
# ax.set_ylabel('Loss')
# ax.set_xlabel('Epoch')
# # Show plot
# plt.show()
# Define variational autoencoder
vae = VariationalAutoEncoder(latent_dim=2)
vae.to(device)
# Define a sample image index
k = np.random.choice(len(train_dataset))
# Retrieve image and label
label, image, _ = train_dataset[k]
# Add batch size, move to device
image = image.unsqueeze(0).to(device)
# Show shape
print('Retrieved image has shape (VAE):', image.shape)
# Encode to mean and variance (add batch size)
mu, logvar = vae.encode(image)
# Encode to value using reparametrisation tirck
encoded = vae.reparameterize(mu, logvar)
# Show encoded shape
print('Encoded mean has shape (VAE):', mu.shape)
print('Encoded (log transformed) variance has shape (VAE):', logvar.shape)
print('Sampled point in latent space has shape (VAE):', encoded.shape)
# Decode
decoded = vae.decode(encoded)
# Show decoded shape
print('Decoded image shape (VAE):', decoded.shape)
# Compute loss
loss = vae.loss_fn(decoded, image, mu, logvar).cpu()
# Show computed loss
print('Computed regularized loss (VAE):', loss.item())
print()
|
"""
Provide computer status info.
To get network upload and download speeds, speedtest-cli app should be installed
"""
#!/usr/bin/env python
import operator
import os
import threading
import time
from collections import deque
from pprint import pprint
import psutil
import platform
from stream2py import SourceReader
from stream2py.utility.typing_hints import ComparableType, Any
__all__ = ['StatusInfo', 'StatusInfoReader']
DFLT_STATUS_INFO_READ_INTERVAL = 1000 # in ms
class StatusInfo:
SPEEDTEST_CMD = 'speedtest'
@staticmethod
def disk_free_bytes(path: str = '/') -> dict:
return {'val': int(psutil.disk_usage(path).free), 'unit': 'bytes'}
@staticmethod
def disk_used_percents(path: str = '/') -> dict:
return {'val': float(psutil.disk_usage(path).percent), 'unit': '%'}
@staticmethod
def cpu_used_percents():
return {'val': psutil.cpu_percent(), 'unit': '%'}
@staticmethod
def cpu_temp():
_t = -1
try:
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as ftemp:
_t = ftemp.readline()
_t = float(int(_t) / 1000)
except Exception as ex:
_t = -1
return {'val': _t, 'unit': 'C'}
@staticmethod
def mem_total():
"""
- total:
total physical memory available.
"""
return {'val': psutil.virtual_memory().total, 'unit': 'bytes'}
@staticmethod
def mem_available():
"""
- available:
the memory that can be given instantly to processes without the
system going into swap.
This is calculated by summing different memory values depending
on the platform and it is supposed to be used to monitor actual
memory usage in a cross platform fashion.
"""
return {'val': psutil.virtual_memory().available, 'unit': 'bytes'}
@staticmethod
def mem_used_percent():
"""
- used:
the percentage usage calculated as (total - available) / total * 100
"""
return {'val': psutil.virtual_memory().percent, 'unit': '%'}
@staticmethod
def mem_used_bytes():
"""
- used:
memory used, calculated differently depending on the platform and
designed for informational purposes only:
macOS: active + wired
BSD: active + wired + cached
Linux: total - free
"""
return {'val': psutil.virtual_memory().used, 'unit': 'bytes'}
@staticmethod
def mem_free():
"""
- free:
memory not being used at all (zeroed) that is readily available;
note that this doesn't reflect the actual memory available
(use 'available' instead)
"""
return {'val': psutil.virtual_memory().free, 'unit': 'bytes'}
@staticmethod
def platform():
_info = platform.uname()
if _info is None or len(_info._fields) == 0:
return {'val': None, 'unit': None}
_values = dict()
for _f in _info._fields:
_values[_f] = _info.__getattribute__(_f)
return {'val': _values, 'unit': 'json'}
@staticmethod
def network_download_speed() -> dict or None:
"""
Method to get download speed by testing real network speed.
It requires speedtest python system app to be installed.
WARNING: The method is very slow
:return:
"""
try:
with os.popen(
StatusInfo.SPEEDTEST_CMD + ' --no-upload --simple '
) as speedtest_output:
for line in speedtest_output:
label, value, unit = line.split()
if 'download' in label.lower():
return {'val': float(value), 'unit': unit}
except Exception as ex:
return {'val': float(0), 'unit': 'not installed'}
@staticmethod
def network_upload_speed() -> dict:
"""
Method to get upload speed by testing real network speed.
It requires speedtest python system app to be installed.
WARNING: The method is very slow
:return:
"""
try:
with os.popen(
StatusInfo.SPEEDTEST_CMD + ' --no-download --simple '
) as speedtest_output:
for line in speedtest_output:
label, value, unit = line.split()
if 'upload' in label.lower():
return {'val': float(value), 'unit': unit}
except Exception as ex:
return {'val': float(0), 'unit': 'not installed'}
@staticmethod
def all(
include_network_download_speed: bool = False,
include_network_upload_speed: bool = False,
):
_info = {
'memory': {
'total': StatusInfo.mem_total(),
'available': StatusInfo.mem_available(),
'free': StatusInfo.mem_free(),
'used_bytes': StatusInfo.mem_used_bytes(),
'used_percents': StatusInfo.mem_used_percent(),
},
'disk': {
'free': StatusInfo.disk_free_bytes(),
'used': StatusInfo.disk_used_percents(),
},
'cpu': {
'used': StatusInfo.cpu_used_percents(),
'temp': StatusInfo.cpu_temp(),
},
'platform': StatusInfo.platform(),
}
if include_network_download_speed or include_network_upload_speed:
_info['network'] = dict()
if include_network_download_speed:
_info['network']['download'] = StatusInfo.network_download_speed()
if include_network_upload_speed:
_info['network']['upload'] = StatusInfo.network_upload_speed()
return _info
_ITEMGETTER_0 = operator.itemgetter(0)
class SyncQueue:
def __init__(self):
self.lock = threading.Lock()
self.queue = deque()
def len(self):
with self.lock:
return len(self.queue)
def popleft(self):
with self.lock:
return self.queue.popleft()
def popleft_no_block(self):
with self.lock:
if len(self.queue):
return self.queue.popleft()
def clear(self):
with self.lock:
return self.queue.clear()
def append(self, item):
with self.lock:
return self.queue.append(item)
class StatusInfoReader(SourceReader, threading.Thread):
_index: int = 0
_data: SyncQueue = SyncQueue()
_stop: threading.Event = threading.Event()
_bt: int = None
def __init__(
self,
read_interval_ms=DFLT_STATUS_INFO_READ_INTERVAL,
include_network_download_speed: bool = False,
include_network_upload_speed: bool = False,
):
self.read_interval_ms = read_interval_ms
self.include_network_download_speed = include_network_download_speed
self.include_network_upload_speed = include_network_upload_speed
threading.Thread.__init__(self, daemon=True)
def open(self):
self._data.clear()
self._bt = self.get_timestamp()
self._index = 0
self._stop.clear()
self.start()
def read(self):
"""Returns one data item
:return: (index, timestamp, character)
"""
return self._data.popleft_no_block()
def close(self):
self._stop.set()
@property
def info(self) -> dict:
return {'bt': self._bt}
def key(self, data: Any) -> ComparableType:
"""
:param data: (index, timestamp, character)
:return: index
"""
return _ITEMGETTER_0(data)
def run(self):
try:
while not self._stop.is_set():
self._data.append(
(
self._index,
self.get_timestamp(),
StatusInfo.all(
self.include_network_download_speed,
self.include_network_upload_speed,
),
)
) # (index, timestamp, character)
self._index += 1
if self.read_interval_ms > 0:
time.sleep(self.read_interval_ms / 1000)
except Exception:
self.close()
raise
def main():
with StatusInfoReader() as source:
print('Ctrl+C to exit')
time.sleep(3)
while True:
try:
data = source.read()
if data is not None:
index, timestamp, info = data
pprint(f'{index}.{timestamp}: {info}')
except KeyboardInterrupt as kb:
break
print('Done!')
if __name__ == '__main__':
main()
# As separated info
# print(f"Memory:\n---------------------------------")
# print(f"\tmem total {StatusInfo.mem_total()}")
# print(f"\tmem available {StatusInfo.mem_available()}")
# print(f"\tmem free {StatusInfo.mem_free()}")
# print(f"\tmem used bytes {StatusInfo.mem_used_bytes()}")
# print(f"\tmem used percents {StatusInfo.mem_used_percent()}")
#
# print(f"\nDisk:\n---------------------------------")
# print(f"\tdisk free {StatusInfo.disk_free_bytes()}")
# print(f"\tdisk used {StatusInfo.disk_used_percents()}")
#
# print(f"\nCPU:\n---------------------------------")
# print(f"\tcpu used {StatusInfo.cpu_used_percents()}")
# print(f"\tcpu temp {StatusInfo.cpu_temp()}")
#
# print(f"\nPlatform:\n---------------------------------")
# print(f"\tplatform info {StatusInfo.platform()}")
#
# print(f"\nNetwork:\n---------------------------------")
# print(f"\tDownload speed {StatusInfo.network_download_speed()}")
# print(f"\tUpload speed {StatusInfo.network_upload_speed()}")
# or as a single JSON
# print(StatusInfo.all(include_network_download_speed=True, include_network_upload_speed=True))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import django.conf.locale
from .base import * # noqa isort:skip @UnusedWildImport
LANGUAGES = [("ach-ug", "In-context translation")] # noqa
EXTRA_LANG_INFO = {
"ach-ug": {
"bidi": False,
"code": "ach-ug",
"name": "In-context translation",
"name_local": "Language Name",
}
}
LANGUAGE_CODE = "ach-ug"
django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)
|
import sys
import traceback
from django.core.management.base import BaseCommand, CommandParser
from devproject.core.selectors import get_registered_developers
from devproject.core.services import sync_developer
ERROR_CODE_MISSING_INPUT = 1
class Command(BaseCommand):
help = "creates or updates local developer information from Github"
requires_migrations_checks = True
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument("logins", nargs="*", type=str)
parser.add_argument(
"-r",
"--registered",
action="store_true",
help="use registered developers as input",
)
def handle(self, *args, **options) -> None:
if options["registered"]:
options["logins"] = get_registered_developers().values_list(
"login", flat=True
)
if len(options["logins"]) == 0:
self.stdout.write(
self.style.ERROR(
"either provide an space-separated list of logins or pass --registered flag"
)
)
sys.exit(ERROR_CODE_MISSING_INPUT)
for login in options["logins"]:
self.stdout.write(f"syncing developer '{login}' ... ", ending="")
try:
sync_developer(login=login)
self.stdout.write(self.style.SUCCESS("SUCCESS"))
except Exception as e: # noqa
self.stdout.write(self.style.ERROR("FAILED"))
self.stderr.write(traceback.format_exc())
|
from marshmallow import fields, Schema
from . import db
from .x import course_x_group, teacher_x_course
from .db_class_base import db_class_base
class study_course(db.Model, db_class_base):
__tablename__ = 'study_courses'
_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
description = db.Column(db.String(500), nullable=True)
study_groups = db.relationship(
'study_group',
secondary=course_x_group,
lazy=True)
teacher = db.relationship(
'user_model',
secondary=teacher_x_course,
backref='study_courses',
lazy=True)
def __init__(self, data):
self.name = data.get('name')
self.description = data.get('description')
def add_group(self, group):
self.study_group.append(group)
db.session.commit()
def add_teacher(self, teacher):
self.teacher.append(teacher)
db.session.commit()
@staticmethod
def get_one_course(_id):
return study_course.query.get(_id)
def __repr__(self):
return '<_id {}>'.format(self._id)
class StudyCourseShema(Schema):
_id = fields.Int(dump_only=True)
name = fields.Str(required=True)
description = fields.Str(required=False)
|
from flask import request, jsonify
from .base import Base
from .json_validate import SCHEMA
from jybase.utils import validate_hash_key, create_md5_key, create_hash_key
from config import config
class StoreSessions(Base):
def post(self):
# TODO: add sms login
is_valid, data = self.get_params_from_request(
request, SCHEMA['store_sessions_post'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
mobile = data['mobile']
password = data['password']
flag, store = self.db.find_by_condition('stores', {'mobile': mobile})
if not flag:
return '', 500
if len(store) == 0:
return self.error_msg(self.ERR['user_not_found'])
store = store[0]
store_id = store['id']
password_from_db = store['password']
salt = create_md5_key(config['secret'])
if not validate_hash_key(password, password_from_db, salt):
return self.error_msg(self.ERR['password_verification_failed'])
token = self.create_jwt({'accountId': store_id}, salt)
result = self.get_data_with_keys(
store, ('mobile', 'id', 'address', 'storeName'),
{'token': token.decode()})
return jsonify(result), 201
|
from django.urls import path, re_path, include
from .ajax_util import get_all_ajax_functions
from .views import (
get_boxes_codes,
get_clerk_codes,
get_counter_commands,
checkout_view,
overseer_view,
vendor_view,
accept_terms,
get_items,
all_to_print,
item_add,
item_hide,
item_to_printed,
item_to_not_printed,
item_update_name,
item_update_type,
item_update_price,
remove_item_from_receipt,
get_boxes,
box_add,
box_content,
box_hide,
box_print,
stats_view,
type_stats_view,
statistical_stats_view,
lost_and_found_list,
)
from .views.frontpage import front_page
from .checkout_api import checkout_js
from .views.mobile import index as mobile_index, logout as mobile_logout
from .views.vendors import change_vendor, create_vendor
from .views.accounting import accounting_receipt_view
from .views.item_dump import dump_items_view
__author__ = 'jyrkila'
app_name = "kirppu"
event_urls = [
path(r'accounting/', accounting_receipt_view, name="accounting"),
path(r'itemdump/', dump_items_view, name="item_dump"),
path(r'clerks/', get_clerk_codes, name='clerks'),
path(r'boxes/', get_boxes_codes, name="box_codes"),
path(r'checkout/', checkout_view, name='checkout_view'),
path(r'overseer/', overseer_view, name='overseer_view'),
path(r'stats/', stats_view, name='stats_view'),
path(r'stats/type/<str:type_id>', type_stats_view, name='type_stats_view'),
path(r'stats/statistical/', statistical_stats_view, name='statistical_stats_view'),
path(r'', vendor_view, name='vendor_view'),
path(r'vendor/', vendor_view),
path(r'vendor/accept_terms', accept_terms, name='accept_terms'),
path(r'vendor/items/', get_items, name='page'),
path(r'vendor/items/move_to_print', all_to_print, name='all_to_print'),
path(r'vendor/item/', item_add, name='item_add'),
path(r'vendor/item/<str:code>/to_printed', item_to_printed, name='item_to_printed'),
path(r'vendor/item/<str:code>/price', item_update_price, name='item_update_price'),
path(r'vendor/item/<str:code>/name', item_update_name, name='item_update_name'),
path(r'vendor/item/<str:code>/type', item_update_type, name='item_update_type'),
path(r'vendor/item/<str:code>/to_not_printed', item_to_not_printed, name='item_to_not_printed'),
path(r'vendor/item/<str:code>/hide', item_hide, name='item_hide'),
path(r'remove_item', remove_item_from_receipt, name='remove_item_from_receipt'),
path(r'lost_and_found/', lost_and_found_list, name='lost_and_found'),
path(r'vendor/boxes/', get_boxes, name='vendor_boxes'),
path(r'vendor/box/', box_add, name='box_add'),
path(r'vendor/box/<str:box_id>/content', box_content, name='box_content'),
path(r'vendor/box/<str:box_id>/hide', box_hide, name='box_hide'),
path(r'vendor/box/<str:box_id>/print', box_print, name='box_print'),
path(r'vendor/status/', mobile_index, name='mobile'),
path(r'vendor/status/logout/', mobile_logout, name='mobile_logout'),
path('vendor/change', change_vendor, name="change_vendor"),
path('vendor/create', create_vendor, name="create_vendor"),
path('api/checkout.js', checkout_js, name='checkout_js'),
path(r'commands/', get_counter_commands, name='commands'),
]
common_urls = [
path(r'', front_page, name="front_page"),
]
event_urls.extend([
re_path(func.url, func.func, name=func.view_name)
for _, func in get_all_ajax_functions()
])
urlpatterns = [path(r'<slug:event_slug>/', include(event_urls))] + common_urls
|
from TschunkView import *
from CodeView import *
import time
t = TschunkView(TschunkMap1())
#c = CodeView(TschunkMap1())
direction = 0
def simple_main():
print 'thread started!'
direction = -1
while True:
time.sleep(1)
if not t.move((0,direction)):
direction *= -1
def move():
time.sleep(1)
t.move()
def drop():
time.sleep(1)
t.drop()
def rotateLeft():
time.sleep(1)
global direction
directions = [(0, -1), (1, 0), (0, 1), (-1, 0)]
direction = (direction + 1) % 4
t.setDirection(directions[direction])
def main():
print 'thread started!'
for i in range(0,4):
# round i:
move()
move()
rotateLeft()
move()
move()
drop()
rotateLeft()
rotateLeft()
move()
move()
rotateLeft()
move()
if __name__ == '__main__':
print 'starting thead ...'
thread = threading.Thread(target=main)
thread.setDaemon(True)
thread.start()
pyglet.app.run()
|
"""Test for the IP functions."""
import pytest
from netutils import ip
IP_TO_HEX = [
{
"sent": {"ip": "10.1.1.1"},
"received": "a010101",
},
{
"sent": {"ip": "2001:db8:3333:4444:5555:6666:7777:8888"},
"received": "20010db8333344445555666677778888",
},
]
IP_ADDITION = [
{
"sent": {"ip": "10.1.1.1", "val": 10},
"received": "10.1.1.11",
},
{
"sent": {"ip": "2001:db8:3333:4444:5555:6666:7777:8888", "val": 10},
"received": "2001:db8:3333:4444:5555:6666:7777:8892",
},
]
IP_FIRST_USABLE = [
{
"sent": {"ip_network": "10.1.1.0/24"},
"received": "10.1.1.1",
},
{
"sent": {"ip_network": "10.1.1.0/255.255.255.0"},
"received": "10.1.1.1",
},
{
"sent": {"ip_network": "10.1.1.0/31"},
"received": "10.1.1.0",
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/64"},
"received": "2001:db8:3c4d:15::1",
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/127"},
"received": "2001:db8:3c4d:15::",
},
]
IP_TO_BIN = [
{
"sent": {
"ip": "10.1.1.1",
},
"received": "1010000000010000000100000001",
},
{
"sent": {
"ip": "2001:db8:3333:4444:5555:6666:7777:8888",
},
"received": "100000000000010000110110111000001100110011001101000100010001000101010101010101011001100110011001110111011101111000100010001000",
},
]
IP_SUBTRACT = [
{
"sent": {"ip": "10.1.1.1", "val": 10},
"received": "10.1.0.247",
},
{
"sent": {"ip": "2001:db8:3333:4444:5555:6666:7777:8888", "val": 10},
"received": "2001:db8:3333:4444:5555:6666:7777:887e",
},
]
IS_IP = [
{
"sent": {
"ip": "10.1.1.1",
},
"received": True,
},
{
"sent": {
"ip": "255.255.255.255",
},
"received": True,
},
{
"sent": {
"ip": "2001:db8:3333:4444:5555:6666:7777:8888",
},
"received": True,
},
{
"sent": {
"ip": "NOT AN IP",
},
"received": False,
},
{
"sent": {
"ip": "255.255.255.256",
},
"received": False,
},
]
GET_BROADCAST_ADDRESS = [
{
"sent": {"ip_network": "10.1.1.0/24"},
"received": "10.1.1.255",
},
{
"sent": {"ip_network": "10.1.1.0/255.255.255.0"},
"received": "10.1.1.255",
},
{
"sent": {"ip_network": "10.1.1.0/31"},
"received": "10.1.1.1",
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/64"},
"received": "2001:db8:3c4d:15:ffff:ffff:ffff:ffff",
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/127"},
"received": "2001:db8:3c4d:15::1",
},
]
GET_ALL_HOST = [
{
"sent": {"ip_network": "10.1.1.0/30"},
"received": ["10.1.1.1", "10.1.1.2"],
},
{
"sent": {"ip_network": "10.1.1.0/255.255.255.252"},
"received": ["10.1.1.1", "10.1.1.2"],
},
{
"sent": {"ip_network": "10.1.1.0/31"},
"received": ["10.1.1.0", "10.1.1.1"],
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/126"},
"received": ["2001:db8:3c4d:15::1", "2001:db8:3c4d:15::2", "2001:db8:3c4d:15::3"],
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/127"},
"received": ["2001:db8:3c4d:15::", "2001:db8:3c4d:15::1"],
},
]
USABLE_RANGE = [
{
"sent": {"ip_network": "10.1.1.0/24"},
"received": "10.1.1.1 - 10.1.1.254",
},
{
"sent": {"ip_network": "10.1.1.0/255.255.255.0"},
"received": "10.1.1.1 - 10.1.1.254",
},
{
"sent": {"ip_network": "10.1.1.0/31"},
"received": "10.1.1.0 - 10.1.1.1",
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/64"},
"received": "2001:db8:3c4d:15::1 - 2001:db8:3c4d:15:ffff:ffff:ffff:fffe",
},
{
"sent": {"ip_network": "2001:db8:3c4d:15::/127"},
"received": "2001:db8:3c4d:15:: - 2001:db8:3c4d:15::1",
},
]
IS_NETMASK = [
{"sent": {"netmask": "255.255.255.0"}, "received": True},
{"sent": {"netmask": "255.192.0.0"}, "received": True},
{"sent": {"netmask": "255.266.0.0"}, "received": False},
{"sent": {"netmask": "255.0.128.0"}, "received": False},
{"sent": {"netmask": "44"}, "received": False},
{"sent": {"netmask": "mynetmask"}, "received": False},
{"sent": {"netmask": "dead:beef:cafe::"}, "received": False},
{"sent": {"netmask": "ff00::"}, "received": True},
{"sent": {"netmask": "ffff:ffff:ffff:ffff:ffff::"}, "received": True},
]
NETMASK_CIDR = [
{"sent": {"netmask": "255.255.255.0"}, "received": 24},
{"sent": {"netmask": "255.192.0.0"}, "received": 10},
{"sent": {"netmask": "255.255.255.252"}, "received": 30},
{"sent": {"netmask": "ff00::"}, "received": 8},
{"sent": {"netmask": "ffff:ffff:ffff:ffff:ffff::"}, "received": 80},
]
CIDR_NETMASK = [
{"sent": {"cidr": 24}, "received": "255.255.255.0"},
{"sent": {"cidr": 28}, "received": "255.255.255.240"},
{"sent": {"cidr": 10}, "received": "255.192.0.0"},
{"sent": {"cidr": 17}, "received": "255.255.128.0"},
]
CIDR_NETMASK6 = [
{"sent": {"cidr": 8}, "received": "ff00::"},
{"sent": {"cidr": 80}, "received": "ffff:ffff:ffff:ffff:ffff::"},
]
COUNT_BITS = [
{"sent": 0, "received": 0},
{"sent": 234, "received": 5},
{"sent": 255, "received": 8},
{"sent": 0xFFFFFFFFFFFFFFFF, "received": 64},
]
@pytest.mark.parametrize("data", IP_TO_HEX)
def test_ip_to_hex(data):
assert ip.ip_to_hex(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", IP_ADDITION)
def test_ip_addition(data):
assert ip.ip_addition(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", IP_TO_BIN)
def test_ip_to_bin(data):
assert ip.ip_to_bin(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", IP_SUBTRACT)
def test_ip_subtract(data):
assert ip.ip_subtract(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", IS_IP)
def test_is_ip(data):
assert ip.is_ip(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", GET_ALL_HOST)
def test_get_all_host(data):
assert list(ip.get_all_host(**data["sent"])) == data["received"]
@pytest.mark.parametrize("data", GET_BROADCAST_ADDRESS)
def test_get_broadcast_address(data):
assert ip.get_broadcast_address(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", IP_FIRST_USABLE)
def test_get_first_usable(data):
assert ip.get_first_usable(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", USABLE_RANGE)
def test_get_usable_range(data):
assert ip.get_usable_range(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", IS_NETMASK)
def test_is_netmask(data):
assert ip.is_netmask(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", NETMASK_CIDR)
def test_netmask_to_cidr(data):
assert ip.netmask_to_cidr(**data["sent"]) == data["received"]
def test_netmask_to_cidr_fail():
with pytest.raises(ValueError, match=r"Subnet mask is not valid"):
data = {"netmask": "255.266.0.0"}
ip.netmask_to_cidr(**data)
@pytest.mark.parametrize("data", CIDR_NETMASK)
def test_cidr_to_netmask(data):
assert ip.cidr_to_netmask(**data["sent"]) == data["received"]
@pytest.mark.parametrize("data", CIDR_NETMASK6)
def test_cidr_to_netmaskv6(data):
assert ip.cidr_to_netmaskv6(**data["sent"]) == data["received"]
def test_cidr_to_netmask_fail():
with pytest.raises(ValueError, match=r"Parameter must be an integer between 0 and 32."):
data = {"cidr": 37}
ip.cidr_to_netmask(**data)
|
from __future__ import absolute_import
from ScopeFoundry.base_app import BaseMicroscopeApp, BaseApp
from .measurement import Measurement
from .hardware import HardwareComponent
from .logged_quantity import LoggedQuantity, LQRange, LQCollection
|
import RPi.GPIO as GPIO
from time import sleep
import time, math,sys,os
dist_meas= 0.0
km_per_hour=0
rpm=0
pulse=0
sensor=2
elapse=0
rate=0
start_time= time.time()
def init_GPIO():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(sensor,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
def calculate_time(channel):
global pulse, start_time, elapse
pulse=pulse+1
elapse= time.time()-start_time
start_time=time.time()
def calculate_speed(r_cm):
global pulse, elapse, rpm, dist_km, dist_meas, km_per_second, km_per_hour
if elapse!=0:
rpm=1/elapse*60
circ_cm=(2*math.pi)*r_cm
dist_km=circ_cm*100000
km_per_second= dist_km/elapse/1000
km_per_hour=km_per_second/3600
dist_meas=(dist_km*pulse)*1000
return km_per_hour
def init_interrupt():
GPIO.add_event_detect(sensor,GPIO.FALLING,callback = calculate_time,bouncetime =20)
if __name__=='__main__':
init_GPIO()
init_interrupt()
while True:
if GPIO.input(sensor)== 0:
rpm=0
km_per_hour=0
dist_meas=0
pulse=0
elapse=0
try:
calculate_speed(3)
print('rpm:{0:0f}-RPM kmh:{1:.0f}-KMH time:{2}'.format(rpm,km_per_hour,elapse*3600))
sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
sys.exit
|
import secrets
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class HouseEnv(gym.Env):
def __init__(self, X, y, seed=None):
self.seed(seed)
self.X = X
self.y = y
self.random = secrets.SystemRandom(seed)
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
done = False
reward = 0.
price = self.y[self.steps_counter]
self.steps_counter += 1
reward = 1. / abs(price - action)
if self.steps_counter == len(self.y) - 1:
done = True
self.state = self.X.iloc[[self.steps_counter]]
return self.state, reward, done, {}
def reset(self):
self.steps_counter = 0
self.state = [0] * len(self.X.columns)
return self.state
|
# coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import looker_client_31
from looker_client_31.api.role_api import RoleApi # noqa: E501
from looker_client_31.rest import ApiException
class TestRoleApi(unittest.TestCase):
"""RoleApi unit test stubs"""
def setUp(self):
self.api = looker_client_31.api.role_api.RoleApi() # noqa: E501
def tearDown(self):
pass
def test_all_model_sets(self):
"""Test case for all_model_sets
Get All Model Sets # noqa: E501
"""
pass
def test_all_permission_sets(self):
"""Test case for all_permission_sets
Get All Permission Sets # noqa: E501
"""
pass
def test_all_permissions(self):
"""Test case for all_permissions
Get All Permissions # noqa: E501
"""
pass
def test_all_roles(self):
"""Test case for all_roles
Get All Roles # noqa: E501
"""
pass
def test_create_model_set(self):
"""Test case for create_model_set
Create Model Set # noqa: E501
"""
pass
def test_create_permission_set(self):
"""Test case for create_permission_set
Create Permission Set # noqa: E501
"""
pass
def test_create_role(self):
"""Test case for create_role
Create Role # noqa: E501
"""
pass
def test_delete_model_set(self):
"""Test case for delete_model_set
Delete Model Set # noqa: E501
"""
pass
def test_delete_permission_set(self):
"""Test case for delete_permission_set
Delete Permission Set # noqa: E501
"""
pass
def test_delete_role(self):
"""Test case for delete_role
Delete Role # noqa: E501
"""
pass
def test_model_set(self):
"""Test case for model_set
Get Model Set # noqa: E501
"""
pass
def test_permission_set(self):
"""Test case for permission_set
Get Permission Set # noqa: E501
"""
pass
def test_role(self):
"""Test case for role
Get Role # noqa: E501
"""
pass
def test_role_groups(self):
"""Test case for role_groups
Get Role Groups # noqa: E501
"""
pass
def test_role_users(self):
"""Test case for role_users
Get Role Users # noqa: E501
"""
pass
def test_set_role_groups(self):
"""Test case for set_role_groups
Update Role Groups # noqa: E501
"""
pass
def test_set_role_users(self):
"""Test case for set_role_users
Update Role Users # noqa: E501
"""
pass
def test_update_model_set(self):
"""Test case for update_model_set
Update Model Set # noqa: E501
"""
pass
def test_update_permission_set(self):
"""Test case for update_permission_set
Update Permission Set # noqa: E501
"""
pass
def test_update_role(self):
"""Test case for update_role
Update Role # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
from __future__ import unicode_literals
from subprocess import call
from itertools import izip
import argparse
import json
import csv
import pysolr
import gzip
INDEX_NAME = 'entityawareindex'
INDEX_MAP = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"]
SOLR_URL = 'http://localhost:8983/solr'
# Location, Time, Person, Organization, Money, Percent, Date (Stanford NER)
# Person, Norp (Nationalities or religious or political groups.), Facility, Org, GPE (Countries, cities, states.)
# Loc (Non GPE Locations ex. mountain ranges, water), Product (Objects, vehicles, foods, etc. (Not services.),
# EVENT (Named hurricanes, battles, wars, sports events, etc.), WORK_OF_ART (Titles of books, songs, etc), LANGUAGE
# Refer to https://spacy.io/docs/usage/entity-recognition (SPACY NER)
def create_document(record, ner_tags):
"""
This function creates a representation for the document to be
put in the solr index.
"""
document = {}
for idx, field in enumerate(INDEX_MAP):
if field.lower() == 'id':
document[field.lower()] = record[idx]
elif field.lower() == 'title':
for ner_tag, tokens in ner_tags.iteritems():
# generate field like title_<ner_tag> ex. title_person, title_organization etc.
document["_news_%s_%s" % (field.lower(), ner_tag.lower())] \
= " ".join(map(lambda x:x.lower(), tokens))
document["_news_%s" % (field.lower())] = record[idx].lower()
else:
document["_news_%s" % (field.lower())] = record[idx].lower()
return document
def index(input_file, ner_tags_filename, num_records):
"""
Creates a representation of the document and puts the document
in the solr index. The index name is defined as a part of the url.
"""
# create the solr core
call(["./../../resources/solr-6.6.0/bin/solr", "create", "-c", INDEX_NAME])
solr_interface = pysolr.Solr(url="%s/%s" % (SOLR_URL, INDEX_NAME))
with open(input_file) as csvfile, gzip.open(ner_tags_filename) as ner_tags_file:
records = csv.reader(csvfile, delimiter=b'\t')
ner_tags = csv.reader(ner_tags_file, delimiter=b'\t')
batched_documents = []
for idx, (record, ner_tag_serialized) in enumerate(izip(records, ner_tags)):
if idx == num_records:
break
if len(record) != 8:
continue
if idx % 5000 == 0:
solr_interface.add(batched_documents)
batched_documents = []
print 'Added %d documents to the %s index' % (idx, INDEX_NAME)
ner_tag = json.loads(ner_tag_serialized[1])
batched_documents.append(create_document(record, ner_tag))
# Commit the changes to the index after adding the documents
solr_interface.commit()
print 'Finished adding the documents to the solr index'
return
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--input', action='store', dest='input', )
arg_parser.add_argument('--ner_tags', action='store', dest='ner_tags', )
arg_parser.add_argument('--num_records', action='store', dest='num_records', default=250000)
args = arg_parser.parse_args()
index(input_file=args.input, ner_tags_filename=args.ner_tags, num_records=args.num_records)
if __name__ == '__main__':
main()
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from graphite.render.datalib import TimeSeries, timestamp
from graphite.render.attime import parseTimeOffset
from itertools import izip
import math
import re
#Utility functions
def safeSum(values):
safeValues = [v for v in values if v is not None]
if not safeValues: return None
return sum(safeValues)
def safeDiff(values):
safeValues = [v for v in values if v is not None]
if not safeValues: return None
values = map(lambda x: x*-1, safeValues[1:])
values.insert(0, safeValues[0])
return sum(values)
def safeLen(values):
return len([v for v in values if v is not None])
def safeDiv(a,b):
if a is None: return None
if b in (0,None): return None
return float(a) / float(b)
def safeMul(a,b):
if a is None or b is None: return None
return float(a) * float(b)
def safeLast(values):
for v in reversed(values):
if v is not None: return v
def safeMin(values):
safeValues = [v for v in values if v is not None]
if safeValues:
return min(safeValues)
def safeMax(values):
safeValues = [v for v in values if v is not None]
if safeValues:
return max(safeValues)
def lcm(a,b):
'least common multiple'
if a == b: return a
if a < b: (a,b) = (b,a) #ensure a > b
for i in xrange(1,a * b):
if a % (b * i) == 0 or (b * i) % a == 0: #probably inefficient
return max(a,b * i)
return a * b
def normalize(seriesLists):
seriesList = reduce(lambda L1,L2: L1+L2,seriesLists)
step = reduce(lcm,[s.step for s in seriesList])
for s in seriesList:
s.consolidate( step / s.step )
start = min([s.start for s in seriesList])
end = max([s.end for s in seriesList])
end -= (end - start) % step
return (seriesList,start,end,step)
# Series Functions
#NOTE: Some of the functions below use izip, which may be problematic.
#izip stops when it hits the end of the shortest series
#in practice this *shouldn't* matter because all series will cover
#the same interval, despite having possibly different steps...
def sumSeries(requestContext, *seriesLists):
try:
(seriesList,start,end,step) = normalize(seriesLists)
except:
return []
#name = "sumSeries(%s)" % ','.join((s.name for s in seriesList))
name = "sumSeries(%s)" % ','.join(set([s.pathExpression for s in seriesList]))
values = ( safeSum(row) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def sumSeriesWithWildcards(requestContext, seriesList, *position): #XXX
if type(position) is int:
positions = [position]
else:
positions = position
newSeries = {}
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if newname in newSeries.keys():
newSeries[newname] = sumSeries(requestContext, (series, newSeries[newname]))[0]
else:
newSeries[newname] = series
newSeries[newname].name = newname
return newSeries.values()
def averageSeriesWithWildcards(requestContext, seriesList, *position): #XXX
if type(position) is int:
positions = [position]
else:
positions = position
result = []
matchedList = {}
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if not matchedList.has_key(newname):
matchedList[newname] = []
matchedList[newname].append(series)
for name in matchedList.keys():
result.append( averageSeries(requestContext, (matchedList[name]))[0] )
result[-1].name = name
return result
def diffSeries(requestContext, *seriesLists):
(seriesList,start,end,step) = normalize(seriesLists)
name = "diffSeries(%s)" % ','.join(set([s.pathExpression for s in seriesList]))
values = ( safeDiff(row) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def averageSeries(requestContext, *seriesLists):
(seriesList,start,end,step) = normalize(seriesLists)
#name = "averageSeries(%s)" % ','.join((s.name for s in seriesList))
name = "averageSeries(%s)" % ','.join(set([s.pathExpression for s in seriesList]))
values = ( safeDiv(safeSum(row),safeLen(row)) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def minSeries(requestContext, *seriesLists):
(seriesList, start, end, step) = normalize(seriesLists)
pathExprs = list( set([s.pathExpression for s in seriesList]) )
name = "minSeries(%s)" % ','.join(pathExprs)
values = ( safeMin(row) for row in izip(*seriesList) )
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series]
def maxSeries(requestContext, *seriesLists):
(seriesList, start, end, step) = normalize(seriesLists)
pathExprs = list( set([s.pathExpression for s in seriesList]) )
name = "maxSeries(%s)" % ','.join(pathExprs)
values = ( safeMax(row) for row in izip(*seriesList) )
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series]
def keepLastValue(requestContext, seriesList):
for series in seriesList:
series.name = "keepLastValue(%s)" % (series.name)
for i,value in enumerate(series):
if value is None and i != 0:
value = series[i-1]
series[i] = value
return seriesList
def asPercent(requestContext, seriesList1, seriesList2orNumber):
assert len(seriesList1) == 1, "asPercent series arguments must reference *exactly* 1 series"
series1 = seriesList1[0]
if type(seriesList2orNumber) is list:
assert len(seriesList2orNumber) == 1, "asPercent series arguments must reference *exactly* 1 series"
series2 = seriesList2orNumber[0]
name = "asPercent(%s,%s)" % (series1.name,series2.name)
series = (series1,series2)
step = reduce(lcm,[s.step for s in series])
for s in series:
s.consolidate( step / s.step )
start = min([s.start for s in series])
end = max([s.end for s in series])
end -= (end - start) % step
values = ( safeMul( safeDiv(v1,v2), 100.0 ) for v1,v2 in izip(*series) )
else:
number = float(seriesList2orNumber)
name = "asPercent(%s,%.1f)" % (series1.name,number)
step = series1.step
start = series1.start
end = series1.end
values = ( safeMul( safeDiv(v,number), 100.0 ) for v in series1 )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def divideSeries(requestContext, dividendSeriesList, divisorSeriesList):
if len(divisorSeriesList) != 1:
raise ValueError("divideSeries second argument must reference exactly 1 series")
divisorSeries = divisorSeriesList[0]
results = []
for dividendSeries in dividendSeriesList:
name = "divideSeries(%s,%s)" % (dividendSeries.name, divisorSeries.name)
bothSeries = (dividendSeries, divisorSeries)
step = reduce(lcm,[s.step for s in bothSeries])
for s in bothSeries:
s.consolidate( step / s.step )
start = min([s.start for s in bothSeries])
end = max([s.end for s in bothSeries])
end -= (end - start) % step
values = ( safeDiv(v1,v2) for v1,v2 in izip(*bothSeries) )
quotientSeries = TimeSeries(name, start, end, step, values)
quotientSeries.pathExpression = name
results.append(quotientSeries)
return results
def scale(requestContext, seriesList, factor):
for series in seriesList:
series.name = "scale(%s,%.1f)" % (series.name,float(factor))
for i,value in enumerate(series):
series[i] = safeMul(value,factor)
return seriesList
def offset(requestContext, seriesList, factor):
for series in seriesList:
series.name = "offset(%s,%.1f)" % (series.name,float(factor))
for i,value in enumerate(series):
if value is not None:
series[i] = value + factor
return seriesList
def movingAverage(requestContext, seriesList, windowSize):
for seriesIndex, series in enumerate(seriesList):
newName = "movingAverage(%s,%.1f)" % (series.name, float(windowSize))
newSeries = TimeSeries(newName, series.start, series.end, series.step, [])
newSeries.pathExpression = newName
windowIndex = windowSize - 1
for i in range( len(series) ):
if i < windowIndex: # Pad the beginning with None's since we don't have enough data
newSeries.append( None )
else:
window = series[i - windowIndex : i + 1]
nonNull = [ v for v in window if v is not None ]
if nonNull:
newSeries.append( sum(nonNull) / len(nonNull) )
else:
newSeries.append(None)
seriesList[ seriesIndex ] = newSeries
return seriesList
def cumulative(requestContext, seriesList):
for series in seriesList:
series.consolidationFunc = 'sum'
series.name = 'cumulative(%s)' % series.name
return seriesList
def derivative(requestContext, seriesList):
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev,val):
newValues.append(None)
prev = val
continue
newValues.append(val - prev)
prev = val
newName = "derivative(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def integral(requestContext, seriesList):
results = []
for series in seriesList:
newValues = []
current = 0.0
for val in series:
if val is None:
newValues.append(None)
else:
current += val
newValues.append(current)
newName = "integral(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def nonNegativeDerivative(requestContext, seriesList, maxValue=None):
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev, val):
newValues.append(None)
prev = val
continue
diff = val - prev
if diff >= 0:
newValues.append(diff)
elif maxValue is not None and maxValue >= val:
newValues.append( (maxValue - prev) + val + 1 )
else:
newValues.append(None)
prev = val
newName = "nonNegativeDerivative(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def alias(requestContext, seriesList, newName):
for series in seriesList:
series.name = newName
return seriesList
def substr(requestContext, seriesList, start=0, stop=0):
for series in seriesList:
left = series.name.rfind('(') + 1
right = series.name.find(')')
if right < 0:
right = len(series.name)+1
cleanName = series.name[left:right:]
if int(stop) == 0:
series.name = '.'.join(cleanName.split('.')[int(start)::])
else:
series.name = '.'.join(cleanName.split('.')[int(start):int(stop):])
return seriesList
def log(requestContext, seriesList, base=10):
results = []
for series in seriesList:
newValues = []
for val in series:
if val is None:
newValues.append(None)
elif val <= 0:
newValues.append(None)
else:
newValues.append(math.log(val, base))
newName = "log(%s, %s)" % (series.name, base)
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def maximumAbove(requestContext, seriesList, n):
results = []
for series in seriesList:
if max(series) >= n:
results.append(series)
return results
def maximumBelow(requestContext, seriesList, n):
result = []
for series in seriesList:
if max(series) <= n:
result.append(series)
return result
def highestCurrent(requestContext, seriesList, n):
return sorted( seriesList, key=safeLast )[-n:]
def highestMax(requestContext, seriesList, n):
"""Returns upto n seriesList members where the respective series has a max member is in the top-n."""
result_list = sorted( seriesList, key=lambda s: max(s) )[-n:]
return sorted(result_list, key=lambda s: max(s), reverse=True)
def lowestCurrent(requestContext, seriesList, n):
return sorted( seriesList, key=safeLast )[:n]
def currentAbove(requestContext, seriesList, n):
return [ series for series in seriesList if safeLast(series) >= n ]
def currentBelow(requestContext, seriesList, n):
return [ series for series in seriesList if safeLast(series) <= n ]
def highestAverage(requestContext, seriesList, n):
return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[-n:]
def lowestAverage(requestContext, seriesList, n):
return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[:n]
def averageAbove(requestContext, seriesList, n):
return [ series for series in seriesList if safeDiv(safeSum(series),safeLen(series)) >= n ]
def averageBelow(requestContext, seriesList, n):
return [ series for series in seriesList if safeDiv(safeSum(series),safeLen(series)) <= n ]
def percentileOrdinal(n, series):
result = int( safeDiv(n * len(series), 100) + 0.5 )
return result
def nPercentile(requestContext, seriesList, n):
"""Returns n-percent of each series in the seriesList."""
assert n, 'The requested percent is required to be greater than 0'
results = []
for s in seriesList:
# Create a sorted copy of the TimeSeries excluding None values in the values list.
s_copy = TimeSeries( s.name, s.start, s.end, s.step, sorted( [item for item in s if item is not None] ) )
if not s_copy:
continue # Skip this series because it is empty.
pord = percentileOrdinal( n, s_copy )
if pord > 0:
i = pord - 1
else:
i = pord
perc_val = s_copy[i]
if perc_val:
results.append( TimeSeries( '%dth Percentile(%s, %.1f)' % ( n, s_copy.name, perc_val ),
s_copy.start, s_copy.end, s_copy.step, [perc_val] ) )
return results
def limit(requestContext, seriesList, n):
return seriesList[0:n]
def sortByMaxima(requestContext, seriesList):
def compare(x,y):
return cmp(max(y), max(x))
seriesList.sort(compare)
return seriesList
def sortByMinima(requestContext, seriesList):
def compare(x,y):
return cmp(min(x), min(y))
newSeries = [series for series in seriesList if max(series) > 0]
newSeries.sort(compare)
return newSeries
def mostDeviant(requestContext, n, seriesList):
deviants = []
for series in seriesList:
mean = safeDiv( safeSum(series), safeLen(series) )
if mean is None: continue
square_sum = sum([ (value - mean) ** 2 for value in series if value is not None ])
sigma = safeDiv(square_sum, safeLen(series))
if sigma is None: continue
deviants.append( (sigma, series) )
deviants.sort(key=lambda i: i[0], reverse=True) #sort by sigma
return [ series for (sigma,series) in deviants ][:n] #return the n most deviant series
# returns a two-element tuple
# the first element is the std dev, the second is the new sum of squares
def doStdDev(sumOfSquares, first, new, n, avg):
newSumOfSquares = sumOfSquares - (first * first) + (new * new)
return (math.sqrt((newSumOfSquares / float(n)) - (avg * avg)), newSumOfSquares)
def stdev(requestContext, seriesList, time):
count = 0
for series in seriesList:
stddevs = TimeSeries("stddev(%s,%.1f)" % (series.name, float(time)), series.start, series.end, series.step, [])
stddevs.pathExpression = "stddev(%s,%.1f)" % (series.name, float(time))
avg = safeDiv(safeSum(series[:time]), time)
if avg is not None:
sumOfSquares = sum(map(lambda(x): x * x, [v for v in series[:time] if v is not None]))
(sd, sumOfSquares) = doStdDev(sumOfSquares, 0, 0, time, avg)
stddevs.append(sd)
else:
stddevs.append(None)
for (index, el) in enumerate(series[time:]):
if el is None:
continue
toDrop = series[index]
if toDrop is None:
toDrop = 0
s = safeSum([safeMul(time, avg), el, -toDrop])
avg = safeDiv(s, time)
if avg is not None:
(sd, sumOfSquares) = doStdDev(sumOfSquares, toDrop, series[index+time], time, avg)
stddevs.append(sd)
else:
stddevs.append(None)
for i in range(0, time-1):
stddevs.insert(0, None)
seriesList[count] = stddevs
count = count + 1
return seriesList
def drawAsInfinite(requestContext, seriesList):
for series in seriesList:
series.options['drawAsInfinite'] = True
series.name = 'drawAsInfinite(%s)' % series.name
return seriesList
def lineWidth(requestContext, seriesList, width):
for series in seriesList:
series.options['lineWidth'] = width
return seriesList
def dashed(requestContext, *seriesList):
if len(seriesList) == 2:
dashLength = seriesList[1]
else:
dashLength = 5
for series in seriesList[0]:
series.name = 'dashed(%s, %d)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList[0]
def timeShift(requestContext, seriesList, timeShift):
delta = abs( parseTimeOffset(timeShift) )
myContext = requestContext.copy()
myContext['startTime'] = requestContext['startTime'] - delta
myContext['endTime'] = requestContext['endTime'] - delta
series = seriesList[0] # if len(seriesList) > 1, they will all have the same pathExpression, which is all we care about.
results = []
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, %s)' % (shiftedSeries.name, timeShift)
shiftedSeries.start = series.start
shiftedSeries.end = series.end
results.append(shiftedSeries)
return results
def constantLine(requestContext, value):
start = timestamp( requestContext['startTime'] )
end = timestamp( requestContext['endTime'] )
step = end - start
series = TimeSeries(str(value), start, end, step, [value])
return [series]
def threshold(requestContext, value, label=None, color=None):
series = constantLine(requestContext, value)[0]
if label:
series.name = label
if color:
series.color = color
return [series]
def group(requestContext, *seriesLists):
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup
def exclude(requestContext, seriesList, pattern):
regex = re.compile(pattern)
return [s for s in seriesList if not regex.search(s.name)]
def summarize(requestContext, seriesList, intervalString):
results = []
delta = parseTimeOffset(intervalString)
interval = delta.seconds + (delta.days * 86400)
for series in seriesList:
buckets = {}
timestamps = range( int(series.start), int(series.end), int(series.step) )
datapoints = zip(timestamps, series)
for (timestamp, value) in datapoints:
bucketInterval = timestamp - (timestamp % interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
newStart = series.start - (series.start % interval)
newEnd = series.end - (series.end % interval) + interval
newValues = []
for timestamp in range(newStart, newEnd, interval):
bucket = buckets.get(timestamp, [])
if bucket:
newValues.append( sum(bucket) )
else:
newValues.append( None )
newName = "summarize(%s, \"%s\")" % (series.name, intervalString)
newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def hitcount(requestContext, seriesList, intervalString):
"""Estimate hit counts from a list of time series.
This function assumes the values in each time series represent
hits per second. It calculates hits per some larger interval
such as per day or per hour. This function is like summarize(),
except that it compensates automatically for different time scales
(so that a similar graph results from using either fine-grained
or coarse-grained records) and handles rarely-occurring events
gracefully.
"""
results = []
delta = parseTimeOffset(intervalString)
interval = int(delta.seconds + (delta.days * 86400))
for series in seriesList:
length = len(series)
step = int(series.step)
bucket_count = int(math.ceil(float(series.end - series.start) / interval))
buckets = [[] for _ in range(bucket_count)]
newStart = int(series.end - bucket_count * interval)
for i, value in enumerate(series):
if value is None:
continue
start_time = int(series.start + i * step)
start_bucket, start_mod = divmod(start_time - newStart, interval)
end_time = start_time + step
end_bucket, end_mod = divmod(end_time - newStart, interval)
if end_bucket >= bucket_count:
end_bucket = bucket_count - 1
end_mod = interval
if start_bucket == end_bucket:
# All of the hits go to a single bucket.
if start_bucket >= 0:
buckets[start_bucket].append(value * (end_mod - start_mod))
else:
# Spread the hits among 2 or more buckets.
if start_bucket >= 0:
buckets[start_bucket].append(value * (interval - start_mod))
hits_per_bucket = value * interval
for j in range(start_bucket + 1, end_bucket):
buckets[j].append(hits_per_bucket)
if end_mod > 0:
buckets[end_bucket].append(value * end_mod)
newValues = []
for bucket in buckets:
if bucket:
newValues.append( sum(bucket) )
else:
newValues.append(None)
newName = 'hitcount(%s, "%s")' % (series.name, intervalString)
newSeries = TimeSeries(newName, newStart, series.end, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def pieAverage(requestContext, series):
return safeDiv(safeSum(series),safeLen(series))
def pieMaximum(requestContext, series):
return max(series)
def pieMinimum(requestContext, series):
return min(series)
PieFunctions = {
'average' : pieAverage,
'maximum' : pieMaximum,
'minimum' : pieMinimum,
}
SeriesFunctions = {
# Combine functions
'sumSeries' : sumSeries,
'sum' : sumSeries,
'diffSeries' : diffSeries,
'divideSeries' : divideSeries,
'averageSeries' : averageSeries,
'avg' : averageSeries,
'sumSeriesWithWildcards': sumSeriesWithWildcards,
'averageSeriesWithWildcards': averageSeriesWithWildcards,
'minSeries' : minSeries,
'maxSeries' : maxSeries,
# Transform functions
'scale' : scale,
'offset' : offset,
'derivative' : derivative,
'integral' : integral,
'nonNegativeDerivative' : nonNegativeDerivative,
'log' : log,
'timeShift': timeShift,
'summarize' : summarize,
'hitcount' : hitcount,
# Calculate functions
'movingAverage' : movingAverage,
'stdev' : stdev,
'asPercent' : asPercent,
'pct' : asPercent,
# Filter functions
'mostDeviant' : mostDeviant,
'highestCurrent' : highestCurrent,
'lowestCurrent' : lowestCurrent,
'highestMax' : highestMax,
'currentAbove' : currentAbove,
'currentBelow' : currentBelow,
'highestAverage' : highestAverage,
'lowestAverage' : lowestAverage,
'averageAbove' : averageAbove,
'averageBelow' : averageBelow,
'maximumAbove' : maximumAbove,
'maximumBelow' : maximumBelow,
'nPercentile' : nPercentile,
'limit' : limit,
'sortByMaxima' : sortByMaxima,
'sortByMinima' : sortByMinima,
# Special functions
'alias' : alias,
'cumulative' : cumulative,
'keepLastValue' : keepLastValue,
'drawAsInfinite' : drawAsInfinite,
'lineWidth' : lineWidth,
'dashed' : dashed,
'substr' : substr,
'group' : group,
'exclude' : exclude,
'constantLine' : constantLine,
'threshold' : threshold,
}
#Avoid import circularity
from graphite.render.evaluator import evaluateTarget
|
import math
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as pb
import random
from datetime import datetime
import time
def dist(x, y, pos):
return math.sqrt((pos[0]-x)**2 + (pos[1]-y)**2)
areaSize=(30, 30)
node_positions = (areaSize[0]+6,areaSize[1]+6)
node_pos=[(-node_positions[0],node_positions[1]),(node_positions[0],node_positions[1]),(node_positions[0],-node_positions[1]),(-node_positions[0],-node_positions[1])]
initial_pos=(0,0)
NOISE_LEVEL=1
RESOLUTION=10
STEP_SIZE=1/RESOLUTION
random.seed(datetime.now())
n = 3
rss0 = 20 + 20 * math.log10(3 / (4 * math.pi * 2.4 * 10))
rss0 = rss0-NOISE_LEVEL*random.random()
def gen_wifi(freq=2.4, power=20, trans_gain=0, recv_gain=0, size=areaSize, pos=(5,5), shadow_dev=2, n=3,noise=NOISE_LEVEL):
if pos is None:
pos = (random.randrange(size[0]), random.randrange(size[1]))
normal_dist = np.random.normal(0, shadow_dev, size=[size[0]+1, size[1]+1])
rss = []
random.seed(datetime.now())
for x in range(0,4):
distance = dist(node_pos[x][0], node_pos[x][1], pos)
val =rss0 - 10 * n * math.log10(distance) + normal_dist[int(pos[0])][int(pos[1])]
rss.append(val-noise*random.random())
return rss
# plt.ion()
# plt.title("Robot Trajectory")
# plt.ylim(-areaSize[1],areaSize[1])
# plt.xlim(-areaSize[0],areaSize[0])
# plt.plot( node_pos[0],node_pos[1], 'ro', markersize=5, clip_on=False, zorder=100)
# plt.plot( node_pos[2],node_pos[3], 'ro', markersize=5, clip_on=False, zorder=100)
rss_pos=[(node_pos[0][0]+3,node_pos[0][1]-1),(node_pos[1][0]-25,node_pos[1][1]-1),(node_pos[2][0]-25,node_pos[2][1]-1),(node_pos[3][0]+3,node_pos[3][1]-1)]
text=[]
overall_rss=[]
original_tragectory=[]
Previous_pos = initial_pos
for i in range(0,4):
text.append(plt.text(rss_pos[i][0],rss_pos[i][1], 'RSS'+str(i), fontsize=10))
def move(pos):
x,y=pos[0],pos[1]
original_tragectory.append((x,y))
# plt.plot([Previous_pos[0],x],[Previous_pos[1],y],'g-',linewidth=2,clip_on=False)
rss = gen_wifi(pos=(x,y))
overall_rss.append(rss)
for i in range(0,4):
text[i].set_text('RSS'+str(i+1)+'='+str(round(rss[i], 2)))
# plt.draw()
# plt.pause(0.0001)
x=0
for y in np.arange(0,areaSize[1],STEP_SIZE):
move((x,y))
Previous_pos = (x,y)
y=areaSize[1]
for x in np.arange(0,areaSize[0],STEP_SIZE):
move((x,y))
Previous_pos = (x,y)
x=areaSize[0]
for y in np.arange(areaSize[1],-areaSize[1],-STEP_SIZE):
move((x,y))
Previous_pos = (x,y)
y=-areaSize[1]
for x in np.arange(areaSize[0],-areaSize[0],-STEP_SIZE):
move((x,y))
Previous_pos = (x,y)
x=-areaSize[0]
for y in np.arange(-areaSize[0],areaSize[0],STEP_SIZE):
move((x,y))
Previous_pos = (x,y)
y=areaSize[1]
for x in np.arange(-areaSize[0],0,STEP_SIZE):
move((x,y))
Previous_pos = (x,y)
# plt.show(block=False)
# plt.savefig('PF_tragectory_boundry.png')
# plt.pause(3)
# plt.clf()
# plt.close()
def getDistanceFromRSS(rssi):
return math.pow(10,((rss0-rssi)/(10*n)))
dist_i = []
candidate_pos = []
for i in range(0,len(overall_rss)):
dist_j = []
for j in range(0,4):
dist_j.append(getDistanceFromRSS(overall_rss[i][j]))
dist_i.append(dist_j)
# print(dist_j)
candidate_pos_j =[]
for j in range(0,4):
y = node_pos[j][1]-dist_j[j]
x = node_pos[j][0]
# print(x,y)
candidate_inter_pos = []
while y < node_pos[j][1]:
x_inter = math.sqrt(abs((dist_j[j]**2) - ((y-node_pos[j][1])**2)))
candidate_inter_pos.append((x_inter+x,y))
candidate_inter_pos.append((x_inter+x,-y))
candidate_inter_pos.append((-x_inter+x,y))
candidate_inter_pos.append((-x_inter+x,-y))
y+=1
candidate_pos_j.append(candidate_inter_pos)
candidate_pos.append(candidate_pos_j)
# plt.ion()
# plt.ylim(-areaSize[1],areaSize[1])
# plt.xlim(-areaSize[0],areaSize[0])
distance_error =[]
start_time = time.time()
for i in range(0,len(original_tragectory)):
positions =[]
errors=[]
for j in range(0,4):
for k in range(len(candidate_pos[i][j])):
position = candidate_pos[i][j][k]
error = 0
for l in range(0,4):
error_inter = math.sqrt(((position[0]-node_pos[l][0])**2) + ((position[1]-node_pos[l][1])**2))
error = error + math.pow((error_inter - dist_i[i][l]),2)
errors.append(error)
positions.append(position)
min_error = min(errors)
min_index = errors.index(min_error)
predicted_pos = positions[min_index]
distance_error.append(dist(predicted_pos[0],predicted_pos[1],original_tragectory[i]))
# plt.show(block=False)
# plt.savefig('PF_predicted_trajectory_boundry.png')
print("--- Computation Time: %s seconds ---" % (time.time() - start_time))
distcumulativeEror=np.sum(distance_error)/10
distmeanError=np.average(distance_error)/10
distStandardDeviationError=np.std(distance_error)/10
print("DIST_ERROR: Cumulative Error: " + str(distcumulativeEror)+"\tMean Error: "+str(distmeanError)+"\tStandard Deviation: "+str(distStandardDeviationError))
resultFile = open("error_boundry_LSE.csv", "a") # append mode
resultFile.write(str(distcumulativeEror)+","+str(distmeanError)+","+str(distStandardDeviationError)+"\n")
resultFile.close()
# plt.pause(3)
# plt.clf()
# plt.close()
|
#!/usr/bin/env python
'''
Module containing simple statistical calculations
Author: Sid Narayanan < sidn AT mit DOT edu >
'''
import ROOT as root
#import root_numpy as rnp
import numpy as np
from PandaCore.Tools.Misc import *
from RooFitUtils import *
from numpyUtils import *
class SimpleVar(object):
'''
Simple wrapper object to hold a formula and its bounds.
Automatically creates a RooRealVar.
'''
def __init__(self,name,lo,hi,nbins=None,title=None):
self.name=name
self.lo=lo
self.hi=hi
self.title = title if title else name
self.rvar = root.RooRealVar(self.name,self.title,self.lo,self.hi)
self.nbins = nbins if nbins else 50 # is a dummy value if only used in unbinned context
def calcUnbinnedKSFromMC(svars,mcTree,dataTree,mcCut,mcWeight,dataCut=None,returnPlot=False):
'''
Calculate unbinned Kolmogorov Smirnoff probability.
Given TTrees of data and MC simulation,
builds a KDE pdf out of the simulated data points,
then calculates the KS stat
'''
if not dataCut:
dataCut=mcCut
svar=svars[0]
kde = treeToKDE(mcTree,[s.rvar for s in svars],mcCut,mcWeight)
obs = sorted(list(treeToArr(dataTree,[svar.name],dataCut)[svar.name]))
cdf = kde.createCdf(root.RooArgSet(svar.rvar))
ks = 0
nD = len(obs)
for iD in xrange(nD):
x = obs[iD]
y_data = 1.*iD/nD
svar.rvar.setVal(x)
y_mc = cdf.getVal()
ks = max(ks,abs(y_data-y_mc))
# convert ks to a probability
mD = mcTree.GetEntries(mcCut)
prob = root.TMath.KolmogorovProb( ks * np.sqrt(dM*dN/(dM+dN)) )
if not returnPlot:
return prob
else:
PError("PandaCore.SimpleStats.calcUnbinnedKSFromMC","Returning plots is not implemented yet")
return prob
def calcBinnedKS(svar,mcTree,dataTree,mcCut,mcWeight,dataCut=None,returnPlot=False):
'''
Calculate binned KS statistic.
Given TTrees of data and MC simulation,
builds a binned PDF (histogram) and then
calculates the KS stat
'''
if not dataCut:
datacut=mcCut
hdata = root.TH1D('hdata','hdata',svar.nbins,svar,lo,svar.hi)
hmc = hdata.Clone('hmc')
mcTree.Draw('%s>>hmc'%(svar.name),tTIMES(mcWeight,mcCut))
dataTree.Draw('%s>>hdata'%(svar.name),dataCut)
prob = hmc.KolmogorovTest(hdata)
if not returnPlot:
return prob
else:
PError("PandaCore.SimpleStats.calcBinnedKS","Returning plots is not implemented yet")
return prob
|
import numpy as np
from scipy import linalg as sciLinalg
from dataio import DataIO
class ClosedForm(object):
""" Closed Form solution of regression problem.
Polynomial Fitting
y(x, w) = w0 + w1 x^1 + w2 x^2 ... wM x^M (order M)
-> compute weights wj
SOLVE
- read input and target vector
- compute weights of M order polynomial
- write weights to file
FUNCTION
- create input and target vector to draw M order polynomial
1) create x (length N), loop through bounded interval
2) compute N time Y(x, w)
3) Write to file (option xy)
YVALUE
- given x value, return y(x, w)
"""
X_LB = 0.0 # Lower Bound
X_UB = 1.0 # Upper Bound
NLARGE = 100001 # decritization number of y(x, w) (vizualization)
def __init__(self, M):
""" Initializes ClosedForm object. """
self.M = M
self.A = np.empty([self.M+1, self.M+1])
self.T = np.empty([self.M+1])
self.W = None
self.polynomial = None
self.RMSE_train = None
self.RMSE_test = None
def solve(self, file_name, file_out, ln_lambda=None):
""" Import data, compute weights.
:param file_name - str, name of data file.
:param file_out - str, name of the data file to store the weights.
:param ln_lambda - flaot, regularization constant.
:output None - weights are computed and exported.
"""
data = DataIO.read_data(file_name)
x, t, e = data
self._populate_A(x, ln_lambda)
self._populate_T(x, t)
self.W = sciLinalg.solve(self.A, self.T)
# store polynomial function
self._polynomial = self._vectorize_polynomial()
title = 'weights'
DataIO.write_data([self.W], file_out, title)
# compute Ermse
y = self._polynomial(x)
self.RMSE_train = np.sqrt( np.mean( (y-t)**2 ) )
def function(self, file_name):
x = np.linspace(self.X_LB, self.X_UB, self.NLARGE)
y = self._polynomial(x)
title = 'input\tpolynomial'
DataIO.write_data([x,y], file_name, title)
def test(self, file_name):
""" Compute RMSE test data set. """
data = DataIO.read_data(file_name)
x, t, e = data
y = self._polynomial(x)
self.RMSE_test = np.sqrt( np.mean( (y-t)**2 ) )
def yvalue(self, x):
""" return y value, given x value. """
return self._polynomial(x)
def _vectorize_polynomial(self):
""" Return fitted polynomial function.
:output def - function is returned.
"""
def polynomial(x):
y = sum([wj*x**j for j, wj in enumerate(self.W)])
return y
return np.vectorize(polynomial)
def _populate_A(self, x, ln_lambda):
""" Construct symmetric A matrix.
:param x - input vector of length N.
"""
if ln_lambda is not None:
l = np.exp(ln_lambda)
else:
l = 0
for i in range(self.M+1):
for j in range(i, self.M+1):
Aij = sum([xn**(i+j) for xn in x])
if i !=j:
self.A[i,j] = Aij
self.A[j,i] = Aij
else:
self.A[i,i] = Aij + l
def _populate_T(self, x, t):
""" Construct vector T.
:param x - input vector of length N.
:param t - target vector of length N.
"""
for i in range(self.M+1):
Ti = sum([xn**i*tn for xn, tn in zip(x, t)])
self.T[i] = Ti
|
val = 100
print(val.__hash__())
print("falcon".__hash__())
print((1,).__hash__())
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add os command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
os_defaults = {
'aquilon': {
'solaris': {
'11.1-x86_64': {},
},
},
'aurora': {
'linux': {
'generic': {},
},
},
'f5': {
'f5': {
'generic': {},
},
},
'filer': {
'ontap': {
'7.3.3p1': {},
},
},
'netinfra': {
'generic': {
'generic': {},
},
},
'utappliance': {
'utos': {
'1.0': {},
},
},
'utarchetype1': {
'utos': {
'1.0': {},
},
},
'utarchetype2': {
'utos2': {
'1.0': {},
},
},
'vmhost': {
'esxi': {
'5.0.0': {},
'5.0.2': {},
},
},
'windows': {
'windows': {
'generic': {},
'nt61e': {},
},
},
}
class TestAddOS(TestBrokerCommand):
linux_version_prev = None
linux_version_curr = None
@classmethod
def setUpClass(cls):
super(TestAddOS, cls).setUpClass()
cls.linux_version_prev = cls.config.get("unittest",
"linux_version_prev")
cls.linux_version_curr = cls.config.get("unittest",
"linux_version_curr")
cls.proto = cls.protocols['aqdsystems_pb2']
cls.lifecycle_type = cls.proto.OperatingSystem.DESCRIPTOR.fields_by_name["lifecycle"].enum_type
def test_100_add_aquilon_prev(self):
self.noouttest(["add_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_prev])
def test_105_add_aquilon_new(self):
self.noouttest(["add_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_curr])
def test_110_add_aurora_prev(self):
self.noouttest(["add_os", "--archetype", "aurora", "--osname", "linux",
"--osversion", self.linux_version_prev])
def test_120_add_default_oses(self):
for arch, osdefs in os_defaults.items():
for osname, versions in osdefs.items():
for osver, params in versions.items():
command = ["add_os", "--archetype", arch,
"--osname", osname, "--osversion", osver]
self.noouttest(command)
def test_121_show_utos(self):
command = "show os --archetype utarchetype1 --osname utos --osversion 1.0"
out = self.commandtest(command.split(" "))
self.output_equals(out, """
Operating System: utos
Version: 1.0
Archetype: utarchetype1
Lifecycle: evaluation
""", command)
def test_121_show_utos_proto(self):
command = ["show_os", "--archetype=utarchetype1", "--osname=utos",
"--osversion=1.0", "--format=proto"]
utos = self.protobuftest(command, expect=1)[0]
self.assertEqual(utos.archetype.name, "utarchetype1")
self.assertEqual(utos.name, "utos")
self.assertEqual(utos.version, "1.0")
self.assertEqual(utos.lifecycle, self.proto.EVALUATION)
def test_121_verify_os_only(self):
command = "show os --osname utos --archetype utarchetype1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Operating System: utos", command)
self.matchclean(out, "linux", command)
def test_121_verify_vers_only(self):
command = "show os --osversion 1.0 --archetype utarchetype1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Version: 1.0", command)
self.matchclean(out, "linux", command)
def test_200_add_existing(self):
command = ["add_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_prev]
out = self.badrequesttest(command)
self.matchoutput(out, "Operating System linux, version %s, "
"archetype aquilon already exists." %
self.linux_version_prev,
command)
def test_200_add_bad_name(self):
command = "add os --archetype aquilon --osname oops@!" \
" --osversion 1.0"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "'oops@!' is not a valid value for --osname.",
command)
def test_200_add_bad_version(self):
command = "add os --archetype aquilon --osname newos" \
" --osversion oops@!"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "'oops@!' is not a valid value for --osversion.",
command)
def test_200_show_not_found(self):
command = "show os --osname os-does-not-exist --osversion foobar --archetype aquilon"
self.notfoundtest(command.split(" "))
def test_300_verify_all(self):
command = "show os --all"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Operating System: utos", command)
self.matchoutput(out, "Operating System: linux", command)
self.matchoutput(out, "Archetype: utarchetype1", command)
self.matchoutput(out, "Archetype: aquilon", command)
def test_310_verify_all_proto(self):
command = "show os --all --format=proto"
oslist = self.protobuftest(command.split(" "))
found_aquilon_new = False
found_ut = False
for os in oslist:
if os.archetype.name == 'aquilon' and \
os.name == 'linux' and os.version == self.linux_version_curr:
found_aquilon_new = True
if os.archetype.name == 'utarchetype1' and \
os.name == 'utos' and os.version == '1.0':
found_ut = True
self.assertTrue(found_aquilon_new,
"Missing proto output for aquilon/linux/%s" %
self.linux_version_curr)
self.assertTrue(found_ut,
"Missing proto output for utarchetype1/utos/1.0")
def test_400_update_os_comments(self):
command = ["update_os", "--osname", "windows", "--osversion", "nt61e",
"--archetype", "windows",
"--comments", "Windows 7 Enterprise (x86)"]
self.noouttest(command)
def test_410_verify_os_comments(self):
command = "show os --archetype windows --osname windows --osversion nt61e"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Comments: Windows 7 Enterprise (x86)", command)
def test_420_update_os_lifecycle(self):
command = ["add_os", "--osname", "lctestos", "--osversion", "1.0",
"--archetype", "aquilon"]
self.noouttest(command)
stages = ['pre_prod', 'early_prod',
'production', 'pre_decommission',
'inactive', 'decommissioned']
for lifecycle in stages:
command = ["update_os", "--osname", "lctestos",
"--osversion", "1.0", "--archetype", "aquilon",
"--lifecycle", lifecycle]
self.noouttest(command)
command = "show os --archetype aquilon --osname lctestos --osversion 1.0"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Lifecycle: %s" % lifecycle, command)
command = command + " --format=proto"
os = self.protobuftest(command.split(), expect=1)[0]
val = self.lifecycle_type.values_by_name[lifecycle.upper()]
self.assertEqual(os.lifecycle, val.number)
command = ["del_os", "--osname", "lctestos", "--osversion", "1.0",
"--archetype", "aquilon"]
self.noouttest(command)
def test_430_update_os_lifecycle(self):
command = ["add_os", "--osname", "lctestos", "--osversion", "1.0",
"--archetype", "aquilon"]
self.noouttest(command)
lifecycle = 'withdrawn'
command = ["update_os", "--osname", "lctestos",
"--osversion", "1.0", "--archetype", "aquilon",
"--lifecycle", lifecycle]
self.noouttest(command)
command = "show os --archetype aquilon --osname lctestos --osversion 1.0"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Lifecycle: %s" % lifecycle, command)
command = command + " --format=proto"
os = self.protobuftest(command.split(), expect=1)[0]
val = self.lifecycle_type.values_by_name[lifecycle.upper()]
self.assertEqual(os.lifecycle, val.number)
command = ["del_os", "--osname", "lctestos", "--osversion", "1.0",
"--archetype", "aquilon"]
self.noouttest(command)
def test_440_update_os_status_invalid(self):
command = ["update_os", "--osname", "windows", "--osversion", "nt61e",
"--archetype", "windows",
"--lifecycle", "invalidstat"]
out = self.badrequesttest(command)
self.matchoutput(out, "Unknown asset lifecycle"
" 'invalidstat'. The valid values are:"
" decommissioned, early_prod, evaluation,"
" inactive, pre_decommission, pre_prod,"
" production, withdrawn.",
command)
def test_450_update_os_lifecycle_invalid_transition(self):
command = ["update_os", "--osname", "windows", "--osversion", "nt61e",
"--archetype", "windows",
"--lifecycle", "production"]
out = self.badrequesttest(command)
self.matchoutput(out, "Cannot change lifecycle stage to production"
" from evaluation. Legal states are: pre_prod"
", withdrawn",
command)
def test_500_fix_os_lifecycle(self):
# TODO: This should be integrated better
self.noouttest(["update_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_prev,
"--lifecycle", "pre_prod"])
self.noouttest(["update_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_prev,
"--lifecycle", "early_prod"])
self.noouttest(["update_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_curr,
"--lifecycle", "pre_prod"])
self.noouttest(["update_os", "--archetype", "aquilon", "--osname", "linux",
"--osversion", self.linux_version_curr,
"--lifecycle", "early_prod"])
self.noouttest(["update_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64", "--lifecycle", "pre_prod"])
self.noouttest(["update_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64", "--lifecycle", "early_prod"])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddOS)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import homework.divisor_master as dm
do_this = True
while do_this:
a = int(input("Введите нижнюю границу диапазона "))
b = int(input("Введите верхнюю границу диапазона "))
c = int(input(f"Введите число для обработки в заданном диапазоне от {a} до {b} "))
num = dm.set_num_range(c, a, b)
if dm.is_prime(num):
print(f"Число {num} является простым")
else:
print(f'Число {num} не является простым')
print("Список целых делителей:")
print(dm.get_num_int_divisors(num))
print("Наибольший делитель:")
print(dm.get_max_divisor(num))
print("Наибольший простой делитель:")
print(dm.get_max_prime_divisor(num))
print("Наименьший простой делитель:")
print(dm.get_min_prime_divisor(num))
print("Результат разложения числа на множители")
print(dm.decompose_on_prime(num))
print()
print("Продолжить? (Yes - y) (Exit - Press any key...")
d = input()
if d != "y":
do_this = False
print("Всего доброго, ждём Вас снова!")
|
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.richmedia.reply import TemplateReplyNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphReplyTests(TemplateGraphTestClient):
def test_text_reply_node_from_xml(self):
template = ET.fromstring("""
<template>
<reply>
<text>Servusai.com</text>
</reply>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateReplyNode)
self.assertIsNotNone(node._text)
self.assertIsNone(node._postback)
def test_text_postback_reply_node_from_xml(self):
template = ET.fromstring("""
<template>
<reply>
<text>Servusai.com</text>
<postback>SERVUSAI DOT COM</postback>
</reply>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateReplyNode)
self.assertIsNotNone(node._text)
self.assertIsNotNone(node._postback)
|
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
class SmlConan(ConanFile):
name = "sml"
description = "[Boost].SML: C++14 State Machine Library"
topics = ("conan", "sml", "state-machine")
url = "https://github.com/bincrafters/conan-sml"
homepage = "https://github.com/boost-experimental/sml"
license = "BSL-1.0"
settings = "compiler"
no_copy_source = True
@property
def _source_subfolder(self):
return os.path.join(self.source_folder, "source_subfolder")
def configure(self):
minimal_cpp_standard = "14"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "5",
"clang": "3.4",
"apple-clang": "10",
"Visual Studio": "14"
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
"%s recipe lacks information about the %s compiler standard version support." % (self.name, compiler))
self.output.warn(
"%s requires a compiler that supports at least C++%s." % (self.name, minimal_cpp_standard))
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s." % (self.name, minimal_cpp_standard))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _extract_license(self):
header = tools.load(os.path.join(self._source_subfolder, "include", "boost", "sml.hpp"))
license_contents = header[0:header.find("#", 2)]
tools.save("LICENSE", license_contents)
def package(self):
self._extract_license()
self.copy("LICENSE", dst="licenses")
include_folder = os.path.join(self._source_subfolder, "include")
self.copy(pattern="*", dst="include", src=include_folder)
def package_id(self):
self.info.header_only()
|
"""
TODO
"""
from pyderl.utils.data_structures.segment_tree import SegmentTree
from pyderl.utils.data_structures.segment_tree import SumSegmentTree
from pyderl.utils.data_structures.segment_tree import MinSegmentTree
|
import io
import logging
import logging.config
import os
import yaml
class Logger:
YAML_PATH = os.path.join(os.path.dirname(__file__), 'config', 'mylogger_config.yml')
string_io = io.StringIO()
def __init__(self) -> None:
"""
1. Create the logger based on the configuration provided if applicable.
2. Point stream from string_io handler to a io.StringIO() object.
"""
config = self.load_yaml_config()
# Create the logger as per the configuration says.
self.logger = self.create_logger(config)
# Manualy make the stringio handler stream to a stringIO() object.
self.point_stringio_stream_to_stringio_variable()
@staticmethod
def load_yaml_config(file: str = YAML_PATH) -> dict:
"""
Load the config based on the arguments provided.
Returns: dict
dictionary which will be used for configuring the logger, handlers, etc.
"""
with open(file, 'r') as config_yaml:
return yaml.safe_load(config_yaml.read())
@staticmethod
def create_logger(config: dict) -> logging.getLogger():
"""
Create the log based on a config dictionary
Returns: logging.getLogger()
logger object to be used like logger.error, logger.info etc.
"""
logging.config.dictConfig(config)
return logging.getLogger(__name__)
def point_stringio_stream_to_stringio_variable(self):
"""
io.StringIO() stream seems to be not directly configurable in the yml file itself, so the handler
stringio must be configured ad-hoc for the stream to point to an io.StringIO() object.
Returns: None
just adds the handler.
"""
for h in self.logger.parent.handlers:
if h.name == 'stringio':
h.stream = self.string_io
def flush_buffer(self):
"""
Return the errors/exceptions logged in memory throughout the execution.
Returns: str
buffer contents (log errors/exceptions) stored in the io.StringIO() object.
"""
with self.string_io:
return self.string_io.getvalue().strip()
if __name__ == '__main__':
pass
|
import json
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State, MATCH
import plotly.express as px
import pandas as pd
## DATA FROM https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
data_urls = {
"cases": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv",
"death": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv",
"recovery": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv",
}
def _read_and_melt(url):
df = pd.read_csv(url).drop("Province/State", axis=1)
df = df.melt(id_vars=["Country/Region", "Lat", "Long"])
df["variable"] = pd.to_datetime(df["variable"])
return df
def _update_data(df):
data = pd.DataFrame()
for country in df["Country/Region"].unique():
df_country = df[df["Country/Region"] == country].copy().reset_index(drop=True)
first_value = df_country["value"][0]
counts_list = list(df_country["value"].diff().values)
counts_list[0] = first_value
df_country["counts"] = counts_list
data = pd.concat([data, df_country])
return data
def read_john_data(url):
df = _read_and_melt(url)
df = _update_data(df)
return df
def _mapdata_to_weekly(df):
df = df.set_index("variable")
df = df.resample("W").last()
df = df.drop("counts", axis=1)
df = df.reset_index()
df["variable"] = df["variable"].astype("str")
return df
def mapdata(df):
data = pd.DataFrame()
for country in df["Country/Region"].unique():
country_df = df[df["Country/Region"] == country]
country_df = _mapdata_to_weekly(country_df)
data = pd.concat([data, country_df])
return data
cases = read_john_data("data/cases.csv")
death = read_john_data("data/death.csv")
recovery = read_john_data("data/recovery.csv")
cases_map = mapdata(cases)
death_map = mapdata(death)
recovery_map = mapdata(recovery)
cases_map["data_type"] = "cases"
death_map["data_type"] = "death"
recovery_map["data_type"] = "recovery"
all_data = pd.concat([cases_map, death_map, recovery_map])
mapbox = "your-token"
px.set_mapbox_access_token(mapbox)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div(
[
html.Div(
[
html.H1("COVID-19 Time series Data"),
html.P(
"Data from Johns Hopkins University: ", style={"fontSize": "2.5rem"}
),
html.A(
"https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series",
href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series",
),
],
style={
"marginBottom": "2%",
"backgroundColor": "#5feb4d",
"padding": "3%",
"borderRadius": "20px",
},
),
html.Div(
[
html.H3("国ごとのデータ"),
html.Div(
[
dcc.Dropdown(
id="drop1",
options=[
{"label": i, "value": i}
for i in ["感染者数", "死亡者数", "回復者数"]
],
value="感染者数",
),
dcc.Dropdown(
id="drop2",
options=[
{"label": i, "value": i}
for i in cases["Country/Region"].unique()
],
value=["Japan"],
multi=True,
),
dcc.RadioItems(
id="graph_radio",
options=[{"label": s, "value": s} for s in ["新規", "累計"]],
value="新規",
),
]
),
dcc.Graph(id="first_graph"),
dcc.Graph(
id="map_graph", style={"width": "65%", "display": "inline-block"}
),
dcc.Graph(
id="callback_graph",
style={"width": "35%", "display": "inline-block"},
),
html.H1(id="test"),
],
style={
"marginBottom": "2%",
"backgroundColor": "#5feb4d",
"padding": "3%",
"borderRadius": "20px",
},
),
# html.Div(
# [
# html.Div(
# [
# html.H3("国ごとのデータ(パターン・マッチング・コールバック)"),
# html.Button(id="junku_button", children="PUSHME", n_clicks=0),
# html.Div(id="add_layout", children=[]),
# ]
# )
# ],
# style={
# "backgroundColor": "#5feb4d",
# "padding": "3%",
# "borderRadius": "20px",
# },
# ),
],
style={"padding": "5%", "backgroundColor": "#17be06"},
)
@app.callback(
Output("first_graph", "figure"),
Output("map_graph", "figure"),
Input("drop1", "value"),
Input("drop2", "value"),
Input("graph_radio", "value"),
)
def update_graph(type_select, cnt_select, graph_select):
if type_select == "死亡者数":
death_data = death[death["Country/Region"].isin(cnt_select)]
if graph_select == "新規":
return (
px.line(death_data, x="variable", y="counts", color="Country/Region"),
px.scatter_mapbox(
death_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
return (
px.line(death_data, x="variable", y="value", color="Country/Region"),
px.scatter_mapbox(
death_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
elif type_select == "回復者数":
recovery_data = recovery[recovery["Country/Region"].isin(cnt_select)]
if graph_select == "新規":
return (
px.line(
recovery_data, x="variable", y="counts", color="Country/Region"
),
px.scatter_mapbox(
recovery_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
return (
px.line(recovery_data, x="variable", y="value", color="Country/Region"),
px.scatter_mapbox(
recovery_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
cases_data = cases[cases["Country/Region"].isin(cnt_select)]
if graph_select == "新規":
return (
px.line(cases_data, x="variable", y="counts", color="Country/Region"),
px.scatter_mapbox(
cases_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
return (
px.line(cases_data, x="variable", y="value", color="Country/Region"),
px.scatter_mapbox(
cases_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
@app.callback(
Output("callback_graph", "figure"),
Input("map_graph", "selectedData"),
Input("drop1", "value"),
)
def update_graph(selectedData, selected_value):
if selectedData is None:
selectedData = {"points": [{"hovertext": "Japan"}]}
country_list = list()
for one_dict in selectedData["points"]:
country_list.append(one_dict["hovertext"])
if selected_value == "死亡者数":
death_df = death[death["Country/Region"].isin(country_list)]
return px.line(
death_df,
x="variable",
y="value",
color="Country/Region",
title=f"選択国の{selected_value}(累計値: SHIFT+クリック)",
height=800,
)
elif selected_value == "回復者数":
recovery_df = recovery[recovery["Country/Region"].isin(country_list)]
return px.line(
recovery_df,
x="variable",
y="value",
color="Country/Region",
title=f"選択国の{selected_value}(累計値: SHIFT+クリック)",
height=800,
)
else:
cases_df = cases[cases["Country/Region"].isin(country_list)]
return px.line(
cases_df,
x="variable",
y="value",
color="Country/Region",
title=f"選択国の{selected_value}(累計値: SHIFT+クリック)",
height=800,
)
# @app.callback(
# Output("add_layout", "children"),
# Input("junku_button", "n_clicks"),
# State("add_layout", "children"),
# )
# def update_layout(n_clicks, layout_children):
# append_layout = html.Div(
# [
# dcc.Dropdown(
# id={"type": "count_select_drop", "index": n_clicks},
# options=[
# {"value": i, "label": i} for i in cases["Country/Region"].unique()
# ],
# value=cases["Country/Region"].unique()[n_clicks],
# ),
# dcc.RadioItems(
# id={"type": "count_select_radio", "index": n_clicks},
# options=[{"value": i, "label": i} for i in ["Linear", "Log"]],
# value="Linear",
# ),
# dcc.Graph(id={"type": "count_select_graph", "index": n_clicks}),
# ],
# style={"width": "46%", "padding": "2%", "display": "inline-block"},
# )
# layout_children.append(append_layout)
# return layout_children
# @app.callback(
# Output({"type": "count_select_graph", "index": MATCH}, "figure"),
# Input({"type": "count_select_drop", "index": MATCH}, "value"),
# Input({"type": "count_select_radio", "index": MATCH}, "value"),
# )
# def update_country_graph(selected_country, selected_radio_value):
# if selected_country is None:
# dash.exceptions.PreventUpdate
# selected_country_data = all_data[all_data["Country/Region"] == selected_country]
# if selected_radio_value == "Log":
# return px.line(
# selected_country_data,
# x="variable",
# y="value",
# color="data_type",
# log_y=True,
# )
# return px.line(selected_country_data, x="variable", y="value", color="data_type",)
if __name__ == "__main__":
app.run_server(debug=True)
|
from PyQt5.QtCore import Qt, QPointF, QRectF
from PyQt5.QtWidgets import QGraphicsItem, QGraphicsRectItem
from PyQt5.QtGui import QTransform, QPen
from .tile import Cell
class Piece(QGraphicsItem):
tileStates = [1, 2, 3, 1, 2, 3, 1]
coords = [
[
[[0, 1], [0, 0], [1, 0], [-1, 0]],
[[0, 1], [0, 0], [1, 0], [0, -1]],
[[-1, 0], [0, 0], [1, 0], [0, -1]],
[[0, 1], [0, 0], [0, -1], [-1, 0]]
],
[
[[-1, 0], [0, 0], [1, 0], [1, 1]],
[[0, -1], [1, -1], [0, 0], [0, 1]],
[[-1, -1], [-1, 0], [0, 0], [1, 0]],
[[0, -1], [0, 0], [-1, 1], [0, 1]]
],
[
[[-1, 0], [0, 0], [0, 1], [1, 1]],
[[1, -1], [0, 0], [1, 0], [0, 1]]
],
[
[[-1, 0], [0, 0], [-1, 1], [0, 1]]
],
[
[[0, 0], [1, 0], [-1, 1], [0, 1]],
[[0, -1], [0, 0], [1, 0], [1, 1]]
],
[
[
[-1, 0], [0, 0], [1, 0], [-1, 1]],
[[0, -1], [0, 0], [0, 1], [1, 1]],
[[1, -1], [-1, 0], [0, 0], [1, 0]],
[[-1, -1], [0, -1], [0, 0], [0, 1]]
],
[
[[-2, 0], [-1, 0], [0, 0], [1, 0]],
[[0, -2], [0, -1], [0, 0], [0, 1]]
]
]
def __init__(self, type, orient, opacity=127, level=8, *args, **kwargs):
super().__init__(*args, **kwargs)
self.opacity = opacity
self.level = level
self.setType(type, orient)
def setType(self, type, orient=0):
self.prepareGeometryChange()
self.type = type
self.orient = orient
self.items = []
state = Piece.tileStates[type]
for coord in Piece.coords[type][orient]:
item = Cell(self.level)
item.setState(state)
item.setOffset(8 * coord[0], 8 * coord[1])
item.setOpacity(self.opacity)
self.items.append(item)
def cw(self):
self.setType([self.type[0], (self.type[1]+1)%len(CursorItem.coords[self.type[0]])])
def ccw(self):
self.setType([self.type[0], (self.type[1]-1)%len(CursorItem.coords[self.type[0]])])
def updatePalette(self, level):
self.level = level
for item in self.items:
item.updatePalette(level)
item.setOpacity(self.opacity)
self.update()
def updateOffset(self, x, y):
# self.prepareGeometryChange()
self.setTransform(QTransform().translate(x, y))
def paint(self, painter, option, widget):
for tile in self.items:
tile.paint(painter, option, widget)
# self.bound.paint(painter, option, widget)
def boundingRect(self):
if len(self.items) == 0:
self.bound = QGraphicsRectItem(QRectF())
self.bound.setPen(QPen(Qt.red, 1))
return QRectF()
# All items contain the center tile, so this is probably alright
minx,miny,maxx,maxy=0,0,8,8
for tile in self.items:
minx = min(minx, tile.boundingRect().x())
miny = min(miny, tile.boundingRect().y())
maxx = max(maxx, tile.boundingRect().x()+tile.boundingRect().width())
maxy = max(maxy, tile.boundingRect().y()+tile.boundingRect().height())
self.bound = QGraphicsRectItem(QRectF(QPointF(minx, miny), QPointF(maxx, maxy)))
self.bound.setPen(QPen(Qt.red, 1))
# print(QRectF(QPointF(minx, miny), QPointF(maxx, maxy)))
return QRectF(QPointF(minx, miny), QPointF(maxx, maxy))
|
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
def main():
options = Options()
options.add_argument('--incognito')
options.headless = True
options.page_load_strategy = 'normal'
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
url = f'https://www.amazon.com/s?k={keyword}&ref=nb_sb_noss'
driver.get(url)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
results = soup.find_all('div', {'data-component-type': 's-search-result'})
master_list = []
for result in results:
data_dict = {}
try:
data_dict['name'] = result.find('span', {'class': 'a-size-base-plus a-color-base a-text-normal'}).text
except:
data_dict['name'] = result.find('span', {'class': 'a-size-medium a-color-base a-text-normal'}).text
try:
price = result.find('span', {'class': 'a-offscreen'}).text
price = price.replace('$', '')
price = float(price)
data_dict['price_$'] = price
except:
data_dict['price_$'] = float(0)
try:
rating = result.find('span', {'class': 'a-icon-alt'}).text
rating = rating.split()
rating = float(rating[0])
data_dict['rating_out_of_5'] = rating
except:
data_dict['rating_out_of_5'] = 'None'
master_list.append(data_dict)
driver.quit()
amazon_df = pd.DataFrame(master_list)
amazon_df.to_csv(f'ws_amazon_example_{keyword}.csv', index=False)
if __name__ == '__main__':
print('write search keyword')
keyword = input()
main()
|
from collections.abc import Sequence
from pyball.fundamentals.base_fundamental import BaseFundamental
class CompetitionNews(BaseFundamental):
def __init__(self, soup):
super(CompetitionNews, self).__init__(soup)
self.news_list = []
self.process()
def process(self):
news = self.soup.find("div", class_="sport_top_list")
self.news_list = NewsList(news)
class NewsList(Sequence):
def __getitem__(self, index):
return self.news[index]
def __len__(self):
return len(self.news)
def __init__(self, data):
self.data = data
self.news = []
self.process()
def process(self):
odds = self.data.find_all("a", class_="odd")
evens = self.data.find_all("a", class_="even")
news = odds + evens
for d in news:
self.news.append(NewsSingle(d))
class NewsSingle:
def __init__(self, data):
self.title = data.text
self.text = data['title']
|
__author__ = 'huyheo'
#*****************
#intent definition
#*****************
TO_GO_SOMEWHERE = 'go_to_some_where'
TIME_INFO = 'time_info'
DISTANCE_INFO ='distance_info'
DURATION_INFO = 'duration_info'
METHOD_TO_GO = 'method_to_go'
DOING_SMTH = 'doing_smth'
AGE_INFO = 'age_info'
#personal assistant
ASK_WHAT_SHOULD_I_DO = 'ask_what_should_i_do'
ASK_WHY = 'ask_why'
ASK_IF_WANT_TO_DO_SMTH = 'ask_if_want_to_do_smth'
ASK_IF_DOING_SMTH = 'ask_if_doing_smth'
#ask
ASK_AGE = 'ask_age'
ASK_NAME = 'ask_name'
ASK_JOB = 'ask_job'
ASK_OPINION_ABOUT_SOMETHING = 'ask_opinion_about_something'
ASK_TIME = 'ask_time'
ASK_WHAT_TO_DO = 'ask_what_to_do'
ASK_HOBBY = 'ask_hobby'
#ask distance, duration
ASK_DURATION= 'ask_duration_of_doing_smth'
ASK_DISTANCE= 'ask_distance'
ASK_CONTACT_INFO = 'ask_contact_info'
ASK_WHAT_ARE_U_DOING = 'ask_what_are_u_doing'
ASK_WHERE_ARE_U_FROM ='ask_where_from'
ASK_WHY_LIKE = 'ask_why_like'
ASK_HOW_TO_DO = 'ask_how_to_do'
COME_FROM_INFO = 'come_from_info'
YES_NO_INFO = 'yes_no_info'
#greeting
GREETING = 'greeting'
#common
OFFER_HELP = 'offer_help'
SAY_THANK = 'thank_you'
NICE_TO_MEET_YOU = 'nice_to_meet_you'
DONT_NO ='say_dont_no'
#introduce
INTRODUCE_MYSELF = 'introduce_myself'
COMPLIMENT = 'compliment'
#stop conversation
STOP_CONVERSATION = 'stop_conversation'
MOTIVATION_INFO = 'motivation_info'
PRACTISE_INFO = 'practise_info'
#emotion
EMOTIONAL_EXPRESSION = 'emotional_expression'
#express
LIKE_SMTH = 'like_smth'
#activity
READING_ACT = 'reading_act'
WAITING_ACT = 'waiting_act'
STUDYING_ACT = 'studying_act'
RUNNING_ACT = 'running'
WORKING_ACT = 'working_act'
GREETING_ACT ='greeting_act'
ASK_HEALTH_STS = 'ask_health_sts'
#digital assistant
ASK_WHAT_MAKE_HAPPY = 'ask_what_make_happy'
#time
IT_IS_SMTH = 'it_is_smth'
#subect
IT_SUBJECT = 'it'
#adjective
ADJECTIVE_INFO = 'adjective_info'
#wit.ai
#*************
#entity definition
#*************
TARGET_NAME = 'target_name'
DATETIME = 'datetime'
ACTIVITY_INFO = 'activity_info'
CONTACT_TYPE = 'contact'
DISTANCE ='distance'
AGE_OF_PERSON= 'age_of_person'
COUNTRY_INFO = 'country_info'
FEELING = 'feeling'
LEVEL_OF_EXPRESSION = 'level_of_expression'
GREETING_LEVEL ='greeting_level'
#time
START_STOP_INFO = 'start_stop_info'
#introduce
NAME_INFO = 'contact'
JOB_INFO = 'job_info'
#flag
ASK_FLAG = 'ask'
ANSWER_FLAG = 'asnwer'
#****** End entity **********
#define object
HEALTH_OBJ = 'health_obj'
HOBBY_TOPIC = 'hobby_topic'
JOB_TOPIC = 'job_topic'
USER_TOPIC = 'user_topic'
|
# -*- coding: utf-8 -*-
import requests
url="http://127.0.0.1:8080/predict"
files = {'file':('image.jpg',open("images/7.jpg",'rb'),' image/jpeg')}
r = requests.post(url,files=files)
print(r.text)
|
from taichi.type.annotations import *
from taichi.type.primitive_types import *
|
__all__ = ['get_orientation',
'reorient_image2',
'get_possible_orientations',
'get_center_of_mass']
import numpy as np
from tempfile import mktemp
from . import apply_transforms
from .. import utils
from ..core import ants_image as iio
_possible_orientations = ['RIP','LIP', 'RSP', 'LSP', 'RIA', 'LIA',
'RSA', 'LSA', 'IRP', 'ILP', 'SRP', 'SLP', 'IRA', 'ILA', 'SRA',
'SLA', 'RPI', 'LPI', 'RAI', 'LAI', 'RPS', 'LPS', 'RAS', 'LAS',
'PRI', 'PLI', 'ARI', 'ALI', 'PRS', 'PLS', 'ARS', 'ALS', 'IPR',
'SPR', 'IAR', 'SAR', 'IPL', 'SPL', 'IAL', 'SAL', 'PIR', 'PSR',
'AIR', 'ASR', 'PIL', 'PSL', 'AIL', 'ASL']
def get_possible_orientations():
return _possible_orientations
def get_orientation(image):
direction = image.direction
orientation = []
for i in range(3):
row = direction[:,i]
idx = np.where(np.abs(row)==np.max(np.abs(row)))[0][0]
if idx == 0:
if row[idx] < 0:
orientation.append('L')
else:
orientation.append('R')
elif idx == 1:
if row[idx] < 0:
orientation.append('P')
else:
orientation.append('A')
elif idx == 2:
if row[idx] < 0:
orientation.append('S')
else:
orientation.append('I')
return ''.join(orientation)
def reorient_image2(image, orientation='RAS'):
"""
Reorient an image.
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni.reorient_image2()
"""
if image.dimension != 3:
raise ValueError('image must have 3 dimensions')
inpixeltype = image.pixeltype
ndim = image.dimension
if image.pixeltype != 'float':
image = image.clone('float')
libfn = utils.get_lib_fn('reorientImage2')
itkimage = libfn(image.pointer, orientation)
new_img = iio.ANTsImage(pixeltype='float', dimension=ndim,
components=image.components, pointer=itkimage)#.clone(inpixeltype)
if inpixeltype != 'float':
new_img = new_img.clone(inpixeltype)
return new_img
def get_center_of_mass(image):
"""
Compute an image center of mass in physical space which is defined
as the mean of the intensity weighted voxel coordinate system.
ANTsR function: `getCenterOfMass`
Arguments
---------
image : ANTsImage
image from which center of mass will be computed
Returns
-------
scalar
Example
-------
>>> fi = ants.image_read( ants.get_ants_data("r16"))
>>> com1 = ants.get_center_of_mass( fi )
>>> fi = ants.image_read( ants.get_ants_data("r64"))
>>> com2 = ants.get_center_of_mass( fi )
"""
if image.pixeltype != 'float':
image = image.clone('float')
libfn = utils.get_lib_fn('centerOfMass%s' % image._libsuffix)
com = libfn(image.pointer)
return tuple(com)
|
# user-agent (www.useragentstring.com) : Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36
import requests
from bs4 import BeautifulSoup # beautifulsoup : python library for pulling data from html and xml files. it works with a parser.
import bs4.element
import datetime
# BeautifulSoup 객체 생성
def get_soup_obj(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
res = requests.get(url, headers=headers) # type(res) : <class 'requests.models.Response'> //type(res.text) : <class 'str'>
soup = BeautifulSoup(res.text, 'lxml') # type(soup) : <class 'bs4.BeautifulSoup'> // 파이썬 표준 라이브러리인 html parser도 있지만 lxml이 더 빠르게 동작
return soup
#뉴스의 기본정보 가져오기
def get_top3_news_info(sec,sid):
# 임시이미지
default_img = "https://search.naver.com/search.naver?where=image&sm=tab_jum&query=naver#"
# 해당 분야 상위 뉴스 목록 주소
sec_url = "https://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + sid
# print("********************************************************")
print("section url : ", sec_url)
# 해당 분야 상위 뉴스 HTML 가져오기
soup = get_soup_obj(sec_url) # soup : sec_url을 request.get한 후 lxml 파서로 파싱하고, 원하는 정보를 뽑아오기 위해 beautifulsoup 라이브러리를 이용하려고 soup 클래스 객체로 변환
# 해당 분야 상위 뉴스 세 개 가져오기
news_list3 = []
lis3=soup.find('ul', class_='type06_headline').find_all("li", limit=3) # 태그 이름이 ul, class속성값이 type06_headline 중에서 다시 태그이름이 li인것을 3개까지 찾기
# print(lis3)
for li in lis3:
# title : 뉴스 제목, news_url : 뉴스 url, image_url : 이미지 url
# news_info : 딕셔너리, 각 기사마다의 정보를 저장해놓은 새로운 딕셔너리
news_info = {
"title": li.img.attrs.get('alt') if li.img
else li.a.text.replace("\n", "").replace("\t", "").replace("\r", ""),
"date": li.find(class_="date").text,
"news_url": li.a.attrs.get('href'),
"image_url": li.img.attrs.get('src') if li.img else default_img
}
#print(news_info)
news_list3.append(news_info)
# print("//////////////////////////////////////////////////////////////")
# print(lis3)
return news_list3 # news_list3 : 리스트, news_info 딕셔너리를 요소로 갖는 리스트 news_list3 반환
#뉴스 본문 가져오기
def get_news_contents(url):
soup=get_soup_obj(url)
body=soup.find('div',class_="_article_body_contents")
news_contents=''
for content in body:
if type(content) is bs4.element.NavigableString and len(content) > 50:
#content.strip() : whitepace 제거
# 뉴스요약을 위하여 '.'마침표 뒤에 한칸을 띄워 문장을 구분하도록 함
news_contents += content.strip() + ''
return news_contents
#'정치', '경제', '사회' 분야의 상위 세 개 뉴스 크롤링
def get_naver_news_top3():
#뉴스 결과를 담아낼 dictionary
news_dic=dict() # dic, list : 가변 데이터 타입, 나중에 언제든지 데이터를 추가할 수 있음 // tuple : immutable
#sections : '정치','경제','사회'
sections=["pol","eco","soc"]
#section_ids : url에 사용될 뉴스 각 부문 id
section_ids=["100","101","102"]
for sec, sid in zip(sections,section_ids): #for 변수 in zip(,,) : 변수에 pair로 묶거나, sec,sid처럼 각각 변수를 지정해도 됨.
#뉴스의 기본 정보 가져오기
news_info=get_top3_news_info(sec,sid) # news_info : 리스트, news_info 딕셔너리를 요로소 갖는 news_list3 반환됨
#print(news_info)
for news in news_info: # news : 딕셔너리, 리스트 news_info의 요소
#뉴스 본문 가져오기
news_url=news['news_url'] # news_url은 각 기사마다의 주소.
news_contents=get_news_contents(news_url) # 기사 url을 다시 요청하여 contents 가져옴
#뉴스 정보를 저장하는 dictionary를 구성
news['news_contents']=news_contents
news_dic[sec]=news_info
return news_dic # news_dic : 딕셔너리, [key]=section, [value]= 리스트, section별 기사 3개에 대한 정보를 각각 딕셔너리 요소로 가지고 있음
#함수 호출 - '정치', '경제', '사회' 분야의 상위 세개 뉴스 크롤링
news_dic=get_naver_news_top3()
#경제의 첫번째 결과 확인하기
print("-------------------------------------------------------------")
# print(news_dic)
print(news_dic['eco'][0])
|
str = "this is a string example... wow!!"
print(str.find('example'))
|
import pytest
from cryptokey.backend.cryptography import hashes as backend
from hashvectors import check_vector, hash_vectors
@pytest.mark.parametrize("vector", hash_vectors)
def test_vectors(vector) -> None:
try:
check_vector(vector, backend, backend.CryptographyHash)
except NotImplementedError:
pass
|
import os
import github3
from github3 import repos
from datetime import datetime
from tests.utils import (BaseCase, load, mock)
class TestRepository(BaseCase):
def __init__(self, methodName='runTest'):
super(TestRepository, self).__init__(methodName)
self.repo = repos.Repository(load('repo'))
def setUp(self):
super(TestRepository, self).setUp()
self.repo = repos.Repository(self.repo.to_json(), self.g)
self.api = 'https://api.github.com/repos/sigmavirus24/github3.py/'
def test_add_collaborator(self):
self.response('', 204)
self.put(self.api + 'collaborators/sigmavirus24')
self.conf = {'data': None}
self.assertRaises(github3.GitHubError, self.repo.add_collaborator,
'foo')
self.login()
assert self.repo.add_collaborator(None) is False
assert self.repo.add_collaborator('sigmavirus24')
self.mock_assertions()
def test_archive(self):
headers = {'content-disposition': 'filename=foo'}
self.response('archive', 200, **headers)
self.get(self.api + 'tarball/master')
self.conf.update({'stream': True})
assert self.repo.archive(None) is False
assert os.path.isfile('foo') is False
assert self.repo.archive('tarball')
assert os.path.isfile('foo')
os.unlink('foo')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
assert os.path.isfile('path_to_file') is False
assert self.repo.archive('tarball', 'path_to_file')
assert os.path.isfile('path_to_file')
os.unlink('path_to_file')
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
self.get(self.api + 'zipball/randomref')
assert self.repo.archive('zipball', ref='randomref')
os.unlink('foo')
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
o = mock.mock_open()
with mock.patch('{0}.open'.format(__name__), o, create=True):
with open('archive', 'wb+') as fd:
self.repo.archive('tarball', fd)
o.assert_called_once_with('archive', 'wb+')
fd = o()
fd.write.assert_called_once_with(b'archive_data')
def test_blob(self):
self.response('blob')
sha = '3ceb856e2f14e9669fed6384e58c9a1590a2314f'
self.get(self.api + 'git/blobs/' + sha)
blob = self.repo.blob(sha)
assert isinstance(blob, github3.git.Blob)
assert repr(blob).startswith('<Blob')
self.mock_assertions()
def test_branch(self):
self.response('branch')
self.get(self.api + 'branches/master')
b = self.repo.branch('master')
assert isinstance(b, repos.branch.Branch)
self.mock_assertions()
assert repr(b) == '<Repository Branch [master]>'
def test_commit(self):
self.response('commit')
sha = '76dcc6cb4b9860034be81b7e58adc286a115aa97'
self.get(self.api + 'commits/' + sha)
assert isinstance(self.repo.commit(sha), repos.commit.RepoCommit)
self.mock_assertions()
def test_commit_comment(self):
self.response('commit_comment')
comment_id = 1380832
self.get(self.api + 'comments/{0}'.format(comment_id))
assert isinstance(self.repo.commit_comment(comment_id),
repos.comment.RepoComment)
self.mock_assertions()
def test_compare_commits(self):
self.response('comparison')
base = 'a811e1a270f65eecb65755eca38d888cbefcb0a7'
head = '76dcc6cb4b9860034be81b7e58adc286a115aa97'
self.get(self.api + 'compare/{0}...{1}'.format(base, head))
assert isinstance(self.repo.compare_commits(base, head),
repos.comparison.Comparison)
self.mock_assertions()
def test_contents(self):
self.response('contents')
filename = 'setup.py'
self.get(self.api + 'contents/' + filename)
assert isinstance(self.repo.contents(filename),
repos.contents.Contents)
self.mock_assertions()
self.response('', 404)
assert self.repo.contents(filename) is None
self.response('contents', _iter=True)
files = self.repo.contents(filename)
assert isinstance(files, dict)
self.mock_assertions()
def test_contents_ref(self):
self.response('contents')
filename = 'setup.py'
self.get(self.api + 'contents/' + filename)
self.conf = {'params': {'ref': 'foo'}}
assert isinstance(self.repo.contents(filename, ref='foo'),
repos.contents.Contents)
self.mock_assertions()
def test_create_blob(self):
self.response('blob', 201)
content = 'VGVzdCBibG9i\n'
encoding = 'base64'
sha = '30f2c645388832f70d37ab2b47eb9ea527e5ae7c'
self.post(self.api + 'git/blobs')
self.conf = {'data': {'content': content, 'encoding': encoding}}
self.assertRaises(github3.GitHubError, self.repo.create_blob,
content, encoding)
self.login()
assert self.repo.create_blob(None, None) == ''
assert self.repo.create_blob(content, encoding) == sha
self.mock_assertions()
def test_create_comment(self):
self.response('commit_comment', 201)
body = ('Late night commits are never a good idea. I refactored a '
'bit. `User` objects and `Organization` objects share a lot '
'of common attributes. I turned those common attributes into '
'one `BaseAccount` class to make things simpler. ')
sha = 'd41566090114a752eb3a87dbcf2473eb427ef0f3'
self.post(self.api + 'commits/{0}/comments'.format(sha))
self.conf = {'data': {'body': body, 'line': 1}}
self.assertRaises(github3.GitHubError, self.repo.create_comment,
body, sha)
self.login()
assert self.repo.create_comment(None, None) is None
assert self.repo.create_comment(body, sha, line=0) is None
assert isinstance(self.repo.create_comment(body, sha),
repos.comment.RepoComment)
self.mock_assertions()
def test_create_commit(self):
self.response('commit', 201)
data = {'message': 'My commit message',
'author': {
'name': 'Ian Cordasco',
'email': 'foo@example.com',
'date': '2008-07-09T16:13:30+12:00',
},
'committer': {},
'parents': [
'7d1b31e74ee336d15cbd21741bc88a537ed063a0'
],
'tree': '827efc6d56897b048c772eb4087f854f46256132',
}
self.conf = {'data': data}
self.post(self.api + 'git/commits')
self.assertRaises(github3.GitHubError, self.repo.create_commit, **data)
self.login()
assert self.repo.create_commit(None, None, None) is None
assert isinstance(self.repo.create_commit(**data), github3.git.Commit)
self.mock_assertions()
def test_create_fork(self):
self.response('repo', 202)
self.conf = {'data': None}
self.post(self.api + 'forks')
self.assertRaises(github3.GitHubError, self.repo.create_fork)
self.login()
assert isinstance(self.repo.create_fork(), repos.Repository)
self.mock_assertions()
self.conf['data'] = {'organization': 'github3py'}
assert isinstance(self.repo.create_fork('github3py'), repos.Repository)
self.mock_assertions()
def test_create_hook(self):
self.response('hook', 201)
self.post(self.api + 'hooks')
self.conf = {
'data': {
'name': 'Hookname',
'config': {
'foo': 'bar'
}
}
}
self.assertRaises(github3.GitHubError, self.repo.create_hook,
None, None)
self.login()
assert self.repo.create_hook(None, {'foo': 'bar'}) is None
assert self.repo.create_hook('name', None) is None
assert self.repo.create_hook('name', 'bar') is None
self.not_called()
h = self.repo.create_hook(**self.conf['data'])
assert isinstance(h, repos.hook.Hook)
self.mock_assertions()
def test_create_issue(self):
self.response('issue', 201)
title = 'Construct _api attribute on our own'
self.post(self.api + 'issues')
self.conf = {'data': {'title': title}}
self.assertRaises(github3.GitHubError, self.repo.create_issue, title)
self.login()
assert self.repo.create_issue(None) is None
assert isinstance(self.repo.create_issue(title), github3.issues.Issue)
self.mock_assertions()
body = 'Fake body'
#self.conf['data'].update(body=body)
assert isinstance(self.repo.create_issue(title, body),
github3.issues.Issue)
self.mock_assertions()
assignee, mile, labels = 'sigmavirus24', 1, ['bug', 'enhancement']
#self.conf['data'].update({'assignee': assignee, 'milestone': mile,
# 'labels': labels})
issue = self.repo.create_issue(title, body, assignee, mile, labels)
assert isinstance(issue, github3.issues.Issue)
self.mock_assertions()
def test_create_key(self):
self.response('key', 201)
self.post(self.api + 'keys')
self.conf = {'data': {'key': 'ssh-rsa foobarbogus',
'title': 'Fake key'}}
self.assertRaises(github3.GitHubError, self.repo.create_key,
**self.conf['data'])
self.login()
assert self.repo.create_key(None, None) is None
self.not_called()
assert isinstance(self.repo.create_key(**self.conf['data']),
github3.users.Key)
self.mock_assertions()
def test_create_label(self):
self.response('label', 201)
self.post(self.api + 'labels')
self.conf = {'data': {'name': 'foo', 'color': 'f00f00'}}
self.assertRaises(github3.GitHubError, self.repo.create_label,
**self.conf['data'])
self.login()
assert self.repo.create_label(None, None) is None
self.not_called()
assert isinstance(self.repo.create_label(**self.conf['data']),
github3.issues.label.Label)
self.mock_assertions()
def test_create_milestone(self):
self.response('milestone', 201)
self.post(self.api + 'milestones')
self.conf = {'data': {'title': 'foo'}}
self.assertRaises(github3.GitHubError, self.repo.create_milestone,
**self.conf['data'])
self.login()
assert self.repo.create_milestone(None) is None
self.not_called()
assert isinstance(self.repo.create_milestone('foo'),
github3.issues.milestone.Milestone)
self.mock_assertions()
def test_create_pull(self):
self.response('pull', 201)
self.post(self.api + 'pulls')
self.conf = {'data': {'title': 'Fake title', 'base': 'master',
'head': 'feature_branch'}}
self.assertRaises(github3.GitHubError, self.repo.create_pull,
**self.conf['data'])
self.login()
assert self.repo.create_pull(None, None, None) is None
self.not_called()
assert isinstance(self.repo.create_pull(**self.conf['data']),
github3.pulls.PullRequest)
self.mock_assertions()
def test_create_pull_from_issue(self):
self.response('pull', 201)
self.post(self.api + 'pulls')
self.conf = {'data': {'issue': 1, 'base': 'master',
'head': 'feature_branch'}}
self.assertRaises(github3.GitHubError,
self.repo.create_pull_from_issue,
**self.conf['data'])
self.login()
assert self.repo.create_pull_from_issue(0, 'foo', 'bar') is None
self.not_called()
pull = self.repo.create_pull_from_issue(**self.conf['data'])
assert isinstance(pull, github3.pulls.PullRequest)
self.mock_assertions()
def test_create_ref(self):
self.response('ref', 201)
self.post(self.api + 'git/refs')
self.conf = {'data': {'ref': 'refs/heads/master', 'sha': 'fakesha'}}
self.assertRaises(github3.GitHubError, self.repo.create_ref,
'foo', 'bar')
self.login()
assert self.repo.create_ref('foo/bar', None) is None
assert isinstance(self.repo.create_ref(**self.conf['data']),
github3.git.Reference)
self.mock_assertions()
def test_create_status(self):
self.response('status', 201)
self.post(self.api + 'statuses/fakesha')
self.conf = {'data': {'state': 'success'}}
self.assertRaises(github3.GitHubError, self.repo.create_status,
'fakesha', 'success')
self.login()
assert self.repo.create_status(None, None) is None
s = self.repo.create_status('fakesha', 'success')
assert isinstance(s, repos.status.Status)
assert repr(s) > ''
self.mock_assertions()
def test_create_tag(self):
self.response('tag', 201)
self.post(self.api + 'git/tags')
data = {
'tag': '0.3', 'message': 'Fake message', 'object': 'fakesha',
'type': 'commit', 'tagger': {
'name': 'Ian Cordasco', 'date': 'Not a UTC date',
'email': 'graffatcolmingov@gmail.com'
}
}
self.conf = {'data': data.copy()}
data['obj_type'] = data['type']
data['sha'] = data['object']
del(data['type'], data['object'])
self.assertRaises(github3.GitHubError, self.repo.create_tag,
None, None, None, None, None)
self.login()
with mock.patch.object(repos.Repository, 'create_ref'):
assert self.repo.create_tag(None, None, None, None,
None) is None
tag = self.repo.create_tag(**data)
assert isinstance(tag, github3.git.Tag)
assert repr(tag).startswith('<Tag')
self.mock_assertions()
with mock.patch.object(repos.Repository, 'create_ref') as cr:
self.repo.create_tag('tag', '', 'fakesha', '', '',
lightweight=True)
cr.assert_called_once_with('refs/tags/tag', 'fakesha')
def test_create_tree(self):
self.response('tree', 201)
self.post(self.api + 'git/trees')
data = {'tree': [{'path': 'file1', 'mode': '100755',
'type': 'tree',
'sha': '75b347329e3fc87ac78895ca1be58daff78872a1'}],
'base_tree': ''}
self.conf = {'data': data}
self.assertRaises(github3.GitHubError, self.repo.create_tree, **data)
self.login()
assert self.repo.create_tree(None) is None
assert self.repo.create_tree({'foo': 'bar'}) is None
self.not_called()
assert isinstance(self.repo.create_tree(**data), github3.git.Tree)
self.mock_assertions()
def test_delete(self):
self.response('', 204)
self.delete(self.api[:-1])
self.conf = {}
self.assertRaises(github3.GitHubError, self.repo.delete)
self.login()
assert self.repo.delete()
self.mock_assertions()
def test_delete_key(self):
self.response('', 204)
self.delete(self.api + 'keys/2')
self.conf = {}
self.assertRaises(github3.GitHubError, self.repo.delete_key, 2)
self.login()
assert self.repo.delete_key(-2) is False
self.not_called()
assert self.repo.delete_key(2)
self.mock_assertions()
def test_delete_subscription(self):
self.response('', 204)
self.delete(self.api + 'subscription')
self.assertRaises(github3.GitHubError, self.repo.delete_subscription)
self.not_called()
self.login()
assert self.repo.delete_subscription()
self.mock_assertions()
def test_edit(self):
self.response('repo')
self.patch(self.api[:-1])
self.conf = {'data': {'name': 'foo'}}
self.assertRaises(github3.GitHubError, self.repo.edit, 'Foo')
self.login()
assert self.repo.edit(None) is False
self.not_called()
assert self.repo.edit('foo')
self.mock_assertions()
self.conf['data']['description'] = 'bar'
assert self.repo.edit(**self.conf['data'])
self.mock_assertions()
def test_is_collaborator(self):
self.response('', 204)
self.get(self.api + 'collaborators/user')
assert self.repo.is_collaborator(None) is False
self.not_called()
assert self.repo.is_collaborator('user')
self.mock_assertions()
def test_git_commit(self):
self.response('git_commit')
self.get(self.api + 'git/commits/fakesha')
assert self.repo.git_commit(None) is None
self.not_called()
assert isinstance(self.repo.git_commit('fakesha'), github3.git.Commit)
self.mock_assertions()
def test_hook(self):
self.response('hook')
self.get(self.api + 'hooks/2')
self.assertRaises(github3.GitHubError, self.repo.hook, 2)
self.login()
assert self.repo.hook(-2) is None
self.not_called()
assert isinstance(self.repo.hook(2), repos.hook.Hook)
self.mock_assertions()
def test_is_assignee(self):
self.response('', 204)
self.get(self.api + 'assignees/login')
assert self.repo.is_assignee(None) is False
self.not_called()
assert self.repo.is_assignee('login')
self.mock_assertions()
def test_issue(self):
self.response('issue')
self.get(self.api + 'issues/2')
assert self.repo.issue(-2) is None
self.not_called()
assert isinstance(self.repo.issue(2), github3.issues.Issue)
self.mock_assertions()
def test_key(self):
self.response('key')
self.get(self.api + 'keys/2')
self.assertRaises(github3.GitHubError, self.repo.key, 2)
self.login()
assert self.repo.key(-2) is None
self.not_called()
assert isinstance(self.repo.key(2), github3.users.Key)
self.mock_assertions()
def test_label(self):
self.response('label')
self.get(self.api + 'labels/name')
assert self.repo.label(None) is None
self.not_called()
assert isinstance(self.repo.label('name'), github3.issues.label.Label)
self.mock_assertions()
def test_iter_assignees(self):
self.response('user', _iter=True)
self.get(self.api + 'assignees')
self.conf = {'params': {'per_page': 100}}
u = next(self.repo.iter_assignees())
assert isinstance(u, github3.users.User)
self.mock_assertions()
def test_iter_branches(self):
self.response('branch', _iter=True)
self.get(self.api + 'branches')
self.conf = {'params': {'per_page': 100}}
b = next(self.repo.iter_branches())
assert isinstance(b, repos.branch.Branch)
self.mock_assertions()
def test_iter_collaborators(self):
self.response('user', _iter=True)
self.get(self.api + 'collaborators')
self.conf = {'params': {'per_page': 100}}
u = next(self.repo.iter_collaborators())
assert isinstance(u, github3.users.User)
self.mock_assertions()
def test_iter_comments(self):
self.response('repo_comment', _iter=True)
self.get(self.api + 'comments')
self.conf = {'params': {'per_page': 100}}
c = next(self.repo.iter_comments())
assert isinstance(c, repos.comment.RepoComment)
self.mock_assertions()
def test_iter_comments_on_commit(self):
self.response('repo_comment', _iter=True)
self.get(self.api + 'commits/fakesha/comments')
self.conf = {'params': {'per_page': 1}}
c = next(self.repo.iter_comments_on_commit('fakesha'))
assert isinstance(c, repos.comment.RepoComment)
self.mock_assertions()
def test_iter_commits(self):
self.response('commit', _iter=True)
self.get(self.api + 'commits')
self.conf = {'params': {'per_page': 100}}
c = next(self.repo.iter_commits())
assert isinstance(c, repos.commit.RepoCommit)
self.mock_assertions()
self.conf = {'params': {'sha': 'fakesha', 'path': '/',
'per_page': 100}}
c = next(self.repo.iter_commits('fakesha', '/'))
self.mock_assertions()
since = datetime(2013, 6, 1, 0, 0, 0)
until = datetime(2013, 6, 2, 0, 0, 0)
self.conf = {'params': {'since': '2013-06-01T00:00:00',
'until': '2013-06-02T00:00:00',
'per_page': 100}}
c = next(self.repo.iter_commits(since=since, until=until))
self.mock_assertions()
since = '2013-06-01T00:00:00'
until = '2013-06-02T00:00:00'
self.conf = {'params': {'since': '2013-06-01T00:00:00',
'until': '2013-06-02T00:00:00',
'per_page': 100}}
c = next(self.repo.iter_commits(since=since, until=until))
self.mock_assertions()
def test_iter_contributors(self):
self.response('user', _iter=True)
self.get(self.api + 'contributors')
self.conf = {'params': {'per_page': 100}}
u = next(self.repo.iter_contributors())
assert isinstance(u, github3.users.User)
self.mock_assertions()
self.conf = {'params': {'anon': True, 'per_page': 100}}
next(self.repo.iter_contributors(True))
self.mock_assertions()
next(self.repo.iter_contributors('true value'))
self.mock_assertions()
def test_iter_events(self):
self.response('event', _iter=True)
self.get(self.api + 'events')
self.conf = {'params': {'per_page': 100}}
e = next(self.repo.iter_events())
assert isinstance(e, github3.events.Event)
self.mock_assertions()
def test_iter_forks(self):
self.response('repo', _iter=True)
self.get(self.api + 'forks')
self.conf = {'params': {'per_page': 100}}
r = next(self.repo.iter_forks())
assert isinstance(r, repos.Repository)
self.mock_assertions()
self.conf['params']['sort'] = 'newest'
forks_params = self.conf['params'].copy()
forks_params.pop('per_page')
next(self.repo.iter_forks(**forks_params))
self.mock_assertions()
def test_iter_hooks(self):
self.response('hook', _iter=True)
self.get(self.api + 'hooks')
self.conf = {'params': {'per_page': 100}}
self.assertRaises(github3.GitHubError, self.repo.iter_hooks)
self.login()
h = next(self.repo.iter_hooks())
assert isinstance(h, repos.hook.Hook)
self.mock_assertions()
def test_iter_issues(self):
self.response('issue', _iter=True)
self.get(self.api + 'issues')
params = {'per_page': 100}
self.conf = {'params': params}
i = next(self.repo.iter_issues())
assert isinstance(i, github3.issues.Issue)
self.mock_assertions()
params['milestone'] = 'none'
next(self.repo.iter_issues('none'))
self.mock_assertions()
params['state'] = 'open'
request_params = params.copy()
request_params.pop('per_page')
next(self.repo.iter_issues(**request_params))
self.mock_assertions()
def test_iter_issue_events(self):
self.response('issue_event', _iter=True)
self.get(self.api + 'issues/events')
self.conf = {'params': {'per_page': 100}}
e = next(self.repo.iter_issue_events())
assert isinstance(e, github3.issues.event.IssueEvent)
self.mock_assertions()
def test_iter_keys(self):
self.response('key', _iter=True)
self.get(self.api + 'keys')
self.assertRaises(github3.GitHubError, self.repo.iter_keys)
self.login()
k = next(self.repo.iter_keys())
assert isinstance(k, github3.users.Key)
self.mock_assertions()
def test_iter_labels(self):
self.response('label', _iter=True)
self.get(self.api + 'labels')
l = next(self.repo.iter_labels())
assert isinstance(l, github3.issues.label.Label)
self.mock_assertions()
def test_iter_languages(self):
#: repos/:login/:repo/languages is just a dictionary, so _iter=False
self.response('language')
self.get(self.api + 'languages')
l = next(self.repo.iter_languages())
assert isinstance(l, tuple)
self.assertNotIn('ETag', l)
self.assertNotIn('Last-Modified', l)
self.mock_assertions()
def test_iter_milestones(self):
self.response('milestone', _iter=True)
self.get(self.api + 'milestones')
m = next(self.repo.iter_milestones())
assert isinstance(m, github3.issues.milestone.Milestone)
self.mock_assertions()
def test_iter_network_events(self):
self.response('event', _iter=True)
self.get(self.api.replace('repos', 'networks', 1) + 'events')
e = next(self.repo.iter_network_events())
assert isinstance(e, github3.events.Event)
self.mock_assertions()
def test_iter_notifications(self):
self.response('notification', _iter=True)
self.get(self.api + 'notifications')
self.conf.update(params={'per_page': 100})
self.assertRaises(github3.GitHubError, self.repo.iter_notifications)
self.login()
n = next(self.repo.iter_notifications())
assert isinstance(n, github3.notifications.Thread)
self.mock_assertions()
def test_iter_pulls(self):
self.response('pull', _iter=True)
self.get(self.api + 'pulls')
base_params = {'per_page': 100, 'sort': 'created', 'direction': 'desc'}
self.conf.update(params=base_params)
p = next(self.repo.iter_pulls())
assert isinstance(p, github3.pulls.PullRequest)
self.mock_assertions()
next(self.repo.iter_pulls('foo'))
self.mock_assertions()
params = {'state': 'open'}
params.update(base_params)
self.conf.update(params=params)
next(self.repo.iter_pulls('Open'))
self.mock_assertions()
params = {'head': 'user:branch'}
params.update(base_params)
self.conf.update(params=params)
next(self.repo.iter_pulls(head='user:branch'))
self.mock_assertions()
params = {'base': 'branch'}
params.update(base_params)
self.conf.update(params=params)
next(self.repo.iter_pulls(base='branch'))
self.mock_assertions()
def test_iter_refs(self):
self.response('ref', _iter=True)
self.get(self.api + 'git/refs')
r = next(self.repo.iter_refs())
assert isinstance(r, github3.git.Reference)
self.mock_assertions()
self.get(self.api + 'git/refs/subspace')
r = next(self.repo.iter_refs('subspace'))
assert isinstance(r, github3.git.Reference)
self.mock_assertions()
def test_iter_stargazers(self):
self.response('user', _iter=True)
self.get(self.api + 'stargazers')
u = next(self.repo.iter_stargazers())
assert isinstance(u, github3.users.User)
self.mock_assertions()
def test_iter_subscribers(self):
self.response('user', _iter=True)
self.get(self.api + 'subscribers')
u = next(self.repo.iter_subscribers())
assert isinstance(u, github3.users.User)
self.mock_assertions()
def test_iter_statuses(self):
self.response('status', _iter=True)
self.get(self.api + 'statuses/fakesha')
with self.assertRaises(StopIteration):
next(self.repo.iter_statuses(None))
self.not_called()
s = next(self.repo.iter_statuses('fakesha'))
assert isinstance(s, repos.status.Status)
self.mock_assertions()
def test_iter_tags(self):
self.response('tag', _iter=True)
self.get(self.api + 'tags')
t = next(self.repo.iter_tags())
assert isinstance(t, repos.tag.RepoTag)
self.mock_assertions()
assert repr(t).startswith('<Repository Tag')
assert str(t) > ''
def test_iter_teams(self):
self.response('team', _iter=True)
self.get(self.api + 'teams')
self.assertRaises(github3.GitHubError, self.repo.iter_teams)
self.not_called()
self.login()
t = next(self.repo.iter_teams())
assert isinstance(t, github3.orgs.Team)
self.mock_assertions()
def test_mark_notifications(self):
self.response('', 205)
self.put(self.api + 'notifications')
self.conf = {'data': {'read': True}}
self.assertRaises(github3.GitHubError, self.repo.mark_notifications)
self.not_called()
self.login()
assert self.repo.mark_notifications()
self.mock_assertions()
assert self.repo.mark_notifications('2013-01-18T19:53:04Z')
self.conf['data']['last_read_at'] = '2013-01-18T19:53:04Z'
self.mock_assertions()
def test_merge(self):
self.response('commit', 201)
self.post(self.api + 'merges')
self.conf = {'data': {'base': 'master', 'head': 'sigma/feature'}}
self.assertRaises(github3.GitHubError, self.repo.merge, 'foo', 'bar')
self.not_called()
self.login()
assert isinstance(self.repo.merge('master', 'sigma/feature'),
repos.commit.RepoCommit)
self.mock_assertions()
self.conf['data']['commit_message'] = 'Commit message'
self.repo.merge('master', 'sigma/feature', 'Commit message')
self.mock_assertions()
def test_milestone(self):
self.response('milestone', 200)
self.get(self.api + 'milestones/2')
assert self.repo.milestone(0) is None
self.not_called()
assert isinstance(self.repo.milestone(2),
github3.issues.milestone.Milestone)
self.mock_assertions()
def test_parent(self):
json = self.repo.to_json().copy()
json['parent'] = json.copy()
r = repos.Repository(json)
assert isinstance(r.parent, repos.Repository)
def test_pull_request(self):
self.response('pull', 200)
self.get(self.api + 'pulls/2')
assert self.repo.pull_request(0) is None
self.not_called()
assert isinstance(self.repo.pull_request(2), github3.pulls.PullRequest)
self.mock_assertions()
def test_readme(self):
self.response('readme', 200)
self.get(self.api + 'readme')
assert isinstance(self.repo.readme(), repos.contents.Contents)
self.mock_assertions()
def test_ref(self):
self.response('ref', 200)
self.get(self.api + 'git/refs/fakesha')
assert self.repo.ref(None) is None
self.not_called()
assert isinstance(self.repo.ref('fakesha'), github3.git.Reference)
self.mock_assertions()
def test_remove_collaborator(self):
self.response('', 204)
self.delete(self.api + 'collaborators/login')
self.assertRaises(github3.GitHubError, self.repo.remove_collaborator,
None)
self.not_called()
self.login()
assert self.repo.remove_collaborator(None) is False
self.not_called()
assert self.repo.remove_collaborator('login')
self.mock_assertions()
def test_repr(self):
assert repr(self.repo) == '<Repository [sigmavirus24/github3.py]>'
def test_source(self):
json = self.repo.to_json().copy()
json['source'] = json.copy()
r = repos.Repository(json)
assert isinstance(r.source, repos.Repository)
def test_set_subscription(self):
self.response('subscription')
self.put(self.api + 'subscription')
self.conf = {'data': {'subscribed': True, 'ignored': False}}
self.assertRaises(github3.GitHubError, self.repo.set_subscription,
True, False)
self.not_called()
self.login()
s = self.repo.set_subscription(True, False)
assert isinstance(s, github3.notifications.Subscription)
self.mock_assertions()
def test_subscription(self):
self.response('subscription')
self.get(self.api + 'subscription')
self.assertRaises(github3.GitHubError, self.repo.subscription)
self.not_called()
self.login()
s = self.repo.subscription()
assert isinstance(s, github3.notifications.Subscription)
self.mock_assertions()
def test_tag(self):
self.response('tag')
self.get(self.api + 'git/tags/fakesha')
assert self.repo.tag(None) is None
self.not_called()
assert isinstance(self.repo.tag('fakesha'), github3.git.Tag)
self.mock_assertions()
def test_tree(self):
self.response('tree')
self.get(self.api + 'git/trees/fakesha')
assert self.repo.tree(None) is None
self.not_called()
assert isinstance(self.repo.tree('fakesha'), github3.git.Tree)
self.mock_assertions()
def test_update_label(self):
self.response('label')
self.patch(self.api + 'labels/Bug')
self.conf = {'data': {'name': 'big_bug', 'color': 'fafafa'}}
self.assertRaises(github3.GitHubError, self.repo.update_label,
'foo', 'bar')
self.not_called()
self.login()
with mock.patch.object(repos.Repository, 'label') as l:
l.return_value = None
assert self.repo.update_label('foo', 'bar') is False
self.not_called()
with mock.patch.object(repos.Repository, 'label') as l:
l.return_value = github3.issues.label.Label(load('label'), self.g)
assert self.repo.update_label('big_bug', 'fafafa')
self.mock_assertions()
def test_equality(self):
assert self.repo == repos.Repository(load('repo'))
def test_create_file(self):
self.response('create_content', 201)
self.put(self.api + 'contents/setup.py')
self.conf = {'data': {'message': 'Foo bar',
'content': 'Zm9vIGJhciBib2d1cw==',
'branch': 'develop',
'author': {'name': 'Ian', 'email': 'foo'},
'committer': {'name': 'Ian', 'email': 'foo'}}}
self.assertRaises(github3.GitHubError, self.repo.create_file,
None, None, None)
self.not_called()
self.login()
ret = self.repo.create_file('setup.py', 'Foo bar', b'foo bar bogus',
'develop',
{'name': 'Ian', 'email': 'foo'},
{'name': 'Ian', 'email': 'foo'})
assert isinstance(ret, dict)
assert isinstance(ret['commit'], github3.git.Commit)
assert isinstance(ret['content'], repos.contents.Contents)
self.mock_assertions()
def test_update_file(self):
self.response('create_content', 200)
self.put(self.api + 'contents/setup.py')
self.conf = {
'data': {
'message': 'foo',
'content': 'Zm9vIGJhciBib2d1cw==',
'sha': 'ae02db',
}
}
self.assertRaises(github3.GitHubError, self.repo.update_file,
None, None, None, None)
self.not_called()
self.login()
ret = self.repo.update_file('setup.py', 'foo', b'foo bar bogus',
'ae02db')
assert isinstance(ret, dict)
assert isinstance(ret['commit'], github3.git.Commit)
assert isinstance(ret['content'], repos.contents.Contents)
self.mock_assertions()
def test_delete_file(self):
self.response('create_content', 200)
self.delete(self.api + 'contents/setup.py')
self.conf = {'data': {'message': 'foo', 'sha': 'ae02db'}}
self.assertRaises(github3.GitHubError, self.repo.delete_file,
'setup.py', None, None)
self.not_called()
self.login()
ret = self.repo.delete_file('setup.py', 'foo', 'ae02db')
assert isinstance(ret, github3.git.Commit)
self.mock_assertions()
def test_weekly_commit_count(self):
self.response('weekly_commit_count', ETag='"foobarbogus"')
self.request.return_value.headers['Last-Modified'] = 'foo'
self.get(self.api + 'stats/participation')
w = self.repo.weekly_commit_count()
self.assertTrue(w.get('owner') is not None)
self.assertTrue(w.get('all') is not None)
self.mock_assertions()
self.response('', 202)
w = self.repo.weekly_commit_count()
self.assertEqual(w, {})
self.mock_assertions()
def test_iter_commit_activity(self):
self.response('commit_activity', _iter=True)
self.get(self.api + 'stats/commit_activity')
w = next(self.repo.iter_commit_activity())
assert isinstance(w, dict)
self.mock_assertions()
def test_iter_contributor_statistics(self):
self.response('contributor_statistics', _iter=True)
self.get(self.api + 'stats/contributors')
s = next(self.repo.iter_contributor_statistics())
assert isinstance(s, repos.stats.ContributorStats)
self.mock_assertions()
def test_iter_code_frequency(self):
self.response('code_frequency', _iter=True)
self.get(self.api + 'stats/code_frequency')
s = next(self.repo.iter_code_frequency())
assert isinstance(s, list)
self.mock_assertions()
class TestContents(BaseCase):
def __init__(self, methodName='runTest'):
super(TestContents, self).__init__(methodName)
self.contents = repos.contents.Contents(load('readme'))
self.api = self.contents._api
def setUp(self):
super(TestContents, self).setUp()
self.contents = repos.contents.Contents(self.contents.to_json(),
self.g)
def test_equality(self):
contents = repos.contents.Contents(load('readme'))
assert self.contents == contents
contents.sha = 'fakesha'
assert self.contents != contents
def test_git_url(self):
assert self.contents.links['git'] == self.contents.git_url
def test_html_url(self):
assert self.contents.links['html'] == self.contents.html_url
def test_repr(self):
assert repr(self.contents) == '<Content [{0}]>'.format('README.rst')
def test_delete(self):
self.response('create_content', 200)
self.delete(self.api)
self.conf = {
'data': {
'message': 'foo',
'sha': self.contents.sha,
}
}
self.assertRaises(github3.GitHubError, self.contents.delete, None)
self.not_called()
self.login()
c = self.contents.delete('foo')
assert isinstance(c, github3.git.Commit)
self.mock_assertions()
def test_update(self):
self.response('create_content', 200)
self.put(self.api)
self.conf = {
'data': {
'message': 'foo',
'content': 'Zm9vIGJhciBib2d1cw==',
'sha': self.contents.sha,
}
}
self.assertRaises(github3.GitHubError, self.contents.update,
None, None)
self.not_called()
self.login()
ret = self.contents.update('foo', b'foo bar bogus')
assert isinstance(ret, github3.git.Commit)
self.mock_assertions()
class TestHook(BaseCase):
def __init__(self, methodName='runTest'):
super(TestHook, self).__init__(methodName)
self.hook = repos.hook.Hook(load('hook'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"hooks/292492")
def setUp(self):
super(TestHook, self).setUp()
self.hook = repos.hook.Hook(self.hook.to_json(), self.g)
def test_equality(self):
h = repos.hook.Hook(load('hook'))
assert self.hook == h
h._uniq = 1
assert self.hook != h
def test_repr(self):
assert repr(self.hook) == '<Hook [readthedocs]>'
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.hook.delete)
self.not_called()
self.login()
assert self.hook.delete()
self.mock_assertions()
def test_edit(self):
self.response('hook', 200)
self.patch(self.api)
data = {
'config': {'push': 'http://example.com'},
'events': ['push'],
'add_events': ['fake_ev'],
'rm_events': ['fake_ev'],
'active': True,
}
self.conf = {'data': data.copy()}
self.conf['data']['remove_events'] = data['rm_events']
del(self.conf['data']['rm_events'])
self.assertRaises(github3.GitHubError, self.hook.edit, **data)
self.login()
self.not_called()
assert self.hook.edit(**data)
self.mock_assertions()
def test_edit_failed(self):
self.response('', 404)
self.patch(self.api)
self.conf = {}
self.login()
assert self.hook.edit() is False
self.mock_assertions()
def test_test(self):
# Funny name, no?
self.response('', 204)
self.post(self.api + '/tests')
self.conf = {}
self.assertRaises(github3.GitHubError, self.hook.test)
self.not_called()
self.login()
assert self.hook.test()
self.mock_assertions()
def test_ping(self):
# Funny name, no?
self.response('', 204)
self.post(self.api + '/pings')
self.conf = {}
self.assertRaises(github3.GitHubError, self.hook.ping)
self.not_called()
self.login()
assert self.hook.ping()
self.mock_assertions()
class TestRepoComment(BaseCase):
def __init__(self, methodName='runTest'):
super(TestRepoComment, self).__init__(methodName)
self.comment = repos.comment.RepoComment(load('repo_comment'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"comments/1380832")
def setUp(self):
super(TestRepoComment, self).setUp()
self.comment = repos.comment.RepoComment(self.comment.to_json(),
self.g)
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.comment.delete)
self.not_called()
self.login()
assert self.comment.delete()
self.mock_assertions()
def test_repr(self):
assert repr(self.comment).startswith('<Repository Comment')
def test_update(self):
self.post(self.api)
self.response('repo_comment', 200)
self.conf = {'data': {'body': 'This is a comment body'}}
self.assertRaises(github3.GitHubError, self.comment.update, 'foo')
self.login()
assert self.comment.update(None) is False
self.not_called()
assert self.comment.update('This is a comment body')
self.mock_assertions()
class TestRepoCommit(BaseCase):
def __init__(self, methodName='runTest'):
super(TestRepoCommit, self).__init__(methodName)
self.commit = repos.commit.RepoCommit(load('commit'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"commits/76dcc6cb4b9860034be81b7e58adc286a115aa97")
def test_equality(self):
c = repos.commit.RepoCommit(load('commit'))
assert self.commit == c
c._uniq = 'fake'
assert self.commit != c
def test_repr(self):
assert repr(self.commit).startswith('<Repository Commit')
def test_diff(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.diff'})
assert self.commit.diff().startswith(b'archive_data')
self.mock_assertions()
def test_patch(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.patch'})
assert self.commit.patch().startswith(b'archive_data')
self.mock_assertions()
class TestComparison(BaseCase):
def __init__(self, methodName='runTest'):
super(TestComparison, self).__init__(methodName)
self.comp = repos.comparison.Comparison(load('comparison'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"compare/a811e1a270f65eecb65755eca38d888cbefcb0a7..."
"76dcc6cb4b9860034be81b7e58adc286a115aa97")
def test_repr(self):
assert repr(self.comp).startswith('<Comparison ')
def test_equality(self):
comp = repos.comparison.Comparison(load('comparison'))
assert self.comp == comp
comp.commits.pop(0)
assert self.comp != comp
def test_diff(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.diff'})
assert self.comp.diff().startswith(b'archive_data')
self.mock_assertions()
def test_patch(self):
self.response('archive', 200)
self.get(self.api)
self.conf.update(headers={'Accept': 'application/vnd.github.patch'})
assert self.comp.patch().startswith(b'archive_data')
self.mock_assertions()
class TestAsset(BaseCase):
def __init__(self, methodName='runTest'):
super(TestAsset, self).__init__(methodName)
self.asset = repos.release.Asset(load('asset'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"releases/assets/37945")
def test_repr(self):
assert repr(self.asset) == '<Asset [github3.py-0.7.1.tar.gz]>'
def test_download(self):
headers = {'content-disposition': 'filename=foo'}
self.response('archive', 200, **headers)
self.get(self.api)
self.conf.update({
'stream': True,
'allow_redirects': False,
'headers': {'Accept': 'application/octet-stream'}
})
# 200, to default location
assert os.path.isfile('foo') is False
assert self.asset.download()
assert os.path.isfile('foo')
os.unlink('foo')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
# 200, to path
assert os.path.isfile('path_to_file') is False
assert self.asset.download('path_to_file')
assert os.path.isfile('path_to_file')
os.unlink('path_to_file')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
# 200, to file-like object
o = mock.mock_open()
with mock.patch('{0}.open'.format(__name__), o, create=True):
with open('download', 'wb+') as fd:
self.asset.download(fd)
o.assert_called_once_with('download', 'wb+')
fd = o()
fd.write.assert_called_once_with(b'archive_data')
self.mock_assertions()
self.request.return_value.raw.seek(0)
self.request.return_value._content_consumed = False
# 302, to file-like object
r = self.request.return_value
target = 'http://github.s3.example.com/foo'
self.response('', 302, location=target)
self.get(target)
self.request.side_effect = [self.request.return_value, r]
self.conf['headers'].update({
'Authorization': None,
'Content-Type': None,
})
del self.conf['allow_redirects']
o = mock.mock_open()
with mock.patch('{0}.open'.format(__name__), o, create=True):
with open('download', 'wb+') as fd:
self.asset.download(fd)
o.assert_called_once_with('download', 'wb+')
fd = o()
fd.write.assert_called_once_with(b'archive_data')
self.mock_assertions()
# 404
self.response('', 404)
self.request.side_effect = None
assert self.asset.download() is False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
def main():
setup(
name="spentfuelgpr",
version="0.1",
description="Python part of the MIsoEnrichment module",
author="Nuclear Verification and Disarmament Group, RWTH Aachen University",
url="https://github.com/maxschalz/miso_enrichment/",
license="BSD-3-Clause",
packages=["spentfuelgpr"],
classifiers=["License :: OSI Approved :: BSD-3-Clause License",
"Programming Language :: Python :: 3"],
install_requires=["numpy", "scipy"]
)
return
if __name__=="__main__":
main()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Client for communicating with the IBM Quantum API via websocket."""
import json
import logging
from abc import ABC
from typing import Dict, Any
from websocket import WebSocketApp, STATUS_NORMAL
from qiskit_ibm_provider.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES
from qiskit_ibm_provider.utils.utils import filter_data
from .base import BaseWebsocketClient, WebsocketClientCloseCode
from ..exceptions import (
WebsocketError,
WebsocketIBMProtocolError,
WebsocketAuthenticationError,
)
from ..rest.utils.data_mapper import map_job_status_response
logger = logging.getLogger(__name__)
class WebsocketMessage(ABC):
"""Container for a message sent or received via websockets."""
def __init__(self, type_: str, data: Any) -> None:
"""WebsocketMessage constructor.
Args:
type_: Message type.
data: Message data
"""
self._type = type_
self._data = data
@property
def data(self) -> Any:
"""Return message data."""
return self._data
@property
def type(self) -> str:
"""Return message type."""
return self._type
def as_json(self) -> str:
"""Return a JSON representation of the message."""
return json.dumps({"type": self._type, "data": self._data})
class WebsocketAuthenticationMessage(WebsocketMessage):
"""Container for an authentication message sent via websockets."""
def __init__(self, access_token: str) -> None:
"""WebsocketAuthenticationMessage constructor.
Args:
access_token: Access token.
"""
super().__init__(type_="authentication", data=access_token)
class WebsocketResponseMethod(WebsocketMessage):
"""Container for a message received via websockets."""
@classmethod
def from_json(cls, json_string: str) -> "WebsocketResponseMethod":
"""Instantiate a message from a JSON response."""
try:
parsed_dict = json.loads(json_string)
except (ValueError, AttributeError) as ex:
exception_to_raise = WebsocketIBMProtocolError(
"Unable to parse the message received from the server: {!r}".format(
json_string
)
)
logger.info(
'An exception occurred. Raising "%s" from "%s"',
repr(exception_to_raise),
repr(ex),
)
raise exception_to_raise from ex
return cls(parsed_dict["type"], parsed_dict.get("data", None))
class WebsocketClient(BaseWebsocketClient):
"""Client for websocket communication with the IBM Quantum API."""
_API_STATUS_INTERNAL_ERROR = 4001
_API_STATUS_JOB_DONE = 4002
_API_STATUS_JOB_NOT_FOUND = 4003
def on_open(self, wsa: WebSocketApp) -> None:
"""Called when websocket connection established.
Args:
wsa: WebSocketApp object.
"""
super().on_open(wsa)
# Authenticate against the server.
auth_request = WebsocketAuthenticationMessage(access_token=self._access_token)
self._ws.send(auth_request.as_json())
def _handle_message(self, message: str) -> None:
"""Handle received message.
Args:
message: Message received.
"""
if not self._authenticated:
# First message is an auth ACK
self._handle_auth_response(message)
else:
self._handle_status_response(message)
def _handle_auth_response(self, message: str) -> None:
"""Handle authentication response.
Args:
message: Authentication response message.
"""
auth_response = WebsocketResponseMethod.from_json(message)
if auth_response.type != "authenticated":
self._error = message
self.disconnect(WebsocketClientCloseCode.PROTOCOL_ERROR)
else:
self._authenticated = True
def _handle_status_response(self, message: str) -> None:
"""Handle status response.
Args:
message: Status response message.
"""
response = WebsocketResponseMethod.from_json(message)
if logger.getEffectiveLevel() is logging.DEBUG:
logger.debug(
"Received message from websocket: %s", filter_data(response.data)
)
self._last_message = map_job_status_response(response.data)
if self._message_queue is not None:
self._message_queue.put(self._last_message)
self._current_retry = 0
job_status = response.data.get("status")
if job_status and ApiJobStatus(job_status) in API_JOB_FINAL_STATES:
self.disconnect()
def get_job_status(
self, retries: int = 5, backoff_factor: float = 0.5
) -> Dict[str, str]:
"""Return the status of a job.
Read status messages from the server, which are issued at regular
intervals. When a final state is reached, the server
closes the socket. If the websocket connection is closed without
a reason, the exponential backoff algorithm is used as a basis to
re-establish the connection. The steps are:
1. When a connection closes, sleep for a calculated backoff
time.
2. Try to make a new connection and increment the retry
counter.
3. Attempt to get the job status.
- If the connection is closed, go back to step 1.
- If the job status is read successfully, reset the retry
counter.
4. Continue until the job reaches a final state or the maximum
number of retries is met.
Args:
retries: Max number of retries.
backoff_factor: Backoff factor used to calculate the
time to wait between retries.
Returns:
The final API response for the status of the job, as a dictionary that
contains at least the keys ``status`` and ``id``.
Raises:
WebsocketError: If the websocket connection ended unexpectedly.
WebsocketTimeoutError: If the timeout has been reached.
"""
url = "{}/jobs/{}/status/v/1".format(self._websocket_url, self._job_id)
return self.stream(url=url, retries=retries, backoff_factor=backoff_factor)
def _handle_stream_iteration(self) -> None:
"""Handle a streaming iteration."""
if not self._authenticated:
raise WebsocketAuthenticationError(
f"Failed to authenticate against the server: {self._error}"
)
if self._server_close_code == self._API_STATUS_JOB_DONE:
self._server_close_code = STATUS_NORMAL
if self._server_close_code == self._API_STATUS_JOB_NOT_FOUND:
raise WebsocketError(
f"Connection with websocket closed with code {self._server_close_code}: "
f"Job ID {self._job_id} not found."
)
|
"""
URL route definitions for api_v2
Of importance (and perhaps in error) the OAuth2 endpoints are
including underneath the `/api_v2` url segment.
In theory this will allow us to use a different OAuth2 mechanism
for future versions of the API with less friction. We will see
how that plays out in practice.
"""
from django.conf.urls import include
from django.urls import path
from .users import user_endpoints
from .workflows import workflow_endpoints
from ..views import workflow_api_root
from django_workflow_system.views import get_user_input_type_helper
urlpatterns = [
path(
"step_user_input_type_helper/",
get_user_input_type_helper,
name="step_user_input_type_helper",
),
path("workflow_system/users/", include(user_endpoints)),
path("workflow_system/", include(workflow_endpoints)),
path("", workflow_api_root),
]
|
##############################################################################################################################
# Author: Roni Haas
# Main goal: Takes several pileup files and for each one prints to a new file only unique sites (that is, only sites that are
# not in any of the other pileup files
##############################################################################################################################
from Utility.generators_utilities import class_generator
from Utility.parallel_generator import parallel_generator
from Utility.Pileup_class import Pileup_line
from Processing.pileup_sorting import pileup_sort
from docopt import docopt
import tempfile
import os
import shutil
def get_candidate_nucl(pileup_line):
clean_str=pileup_line.split()[4]
clean_str = [char for char in clean_str if not char in ('^', '$')]
clean_str = ''.join(clean_str)
sense_string = clean_str.replace('^','')
sense_string = clean_str.replace('$','')
sense_string = clean_str.replace(',','.')
# TODO before use assumed letters are only upper case. I fix this assumption here
#sense_string_upper = sense_string.upper()
# Find the candidate nucleotide reads
nucl_changes = {"A": 0, "C": 0, "G": 0, "T": 0, "a": 0, "c": 0, "g": 0, "t": 0}
for nucl in list(sense_string):
if nucl in nucl_changes.keys():
nucl_changes[nucl] = nucl_changes[nucl]+1
else:
continue
# get the maximal nucleous change, key is the value of the dict
(candidate_nucl, candidate_nucl_reads) = max(nucl_changes.items(), key=lambda x: x[1])
return(candidate_nucl)
def filter_for_specific_node_XtoY_editing_sites(pileup_file, node_name):
from_nuc = node_name.split("_")[2]
to_nuc = node_name.split("_")[3]
nuc_pair_list = {"A":"T", "C":"G", "T":"A", "G":"C"}
original_pileup_file = os.path.join(pileup_file + "_before_" + from_nuc + "_" + to_nuc + "_filtering")
shutil.copyfile(pileup_file, original_pileup_file)
with open (original_pileup_file,'r') as pileup:
with open (pileup_file,'w') as new_pileup:
for line in pileup:
reference_nucl = line.split()[2]
reference_nucl = reference_nucl.upper()
if reference_nucl == from_nuc and get_candidate_nucl(line) == to_nuc or \
reference_nucl == to_nuc and get_candidate_nucl(line) == from_nuc or \
reference_nucl == nuc_pair_list[to_nuc] and get_candidate_nucl(line) == nuc_pair_list[from_nuc].lower() or \
reference_nucl == nuc_pair_list[from_nuc] and get_candidate_nucl(line) == nuc_pair_list[to_nuc].lower():
new_pileup.write(str(line))
return pileup_file
def filter_hyper_non_relevant_editing_sites(pileup_filename_list):
for file in pileup_filename_list:
is_hyper = False
node_name = file.split("/")[-3]
try:
is_hyper = (node_name.split("_")[1] == "hyper")
except:
is_hyper == False
if is_hyper:
filter_for_specific_node_XtoY_editing_sites(file, node_name)
|
# 821. Shortest Distance to a Character
# ttungl@gmail.com
# Given a string S and a character C, return an array of integers representing the shortest distance from the character C in the string.
# Example 1:
# Input: S = "loveleetcode", C = 'e'
# Output: [3, 2, 1, 0, 1, 0, 0, 1, 2, 2, 1, 0]
# Note:
# S string length is in [1, 10000].
# C is a single character, and guaranteed to be in string S.
# All letters in S and C are lowercase.
class Solution(object):
def shortestToChar(self, S, C):
"""
:type S: str
:type C: str
:rtype: List[int]
"""
# sol 1:
# runtime: 955ms
return [min(abs(i - li) for li in [j for j, v in enumerate(S) if v==C]) for i in range(len(S))]
# sol 2:
# runtime:
lst = [i for i,v in enumerate(S) if v==C] # matched character list
return [min(abs(i - j) for j in lst) for i in range(len(S))]
# sol 2:
# runtime: 62ms
n = len(S)
res = [n]*n
pos = -n
for i in range(n) + range(n)[::-1]:
if S[i] == C:
pos = i
res[i] = min(res[i], abs(i - pos))
return res
|
import numpy as np
from pathlib import Path
from sklearn.preprocessing import LabelEncoder
DATA_DIR = Path('data')
def create_cleaned_df(df, class_label_str):
"""Transform the wide-from Dataframe (df) from main.xlsx into one with
unique row names, values 0-1001 as the column names and a label column
containing the class label as an int.
Parameters
----------
df : pandas DataFrame
A DataFrame read in from main.xlsx. It must have columns 'Name',
'Analyte' and 'Concentration'.
class_label_str: str (len 2)
The class label for the dataframe. It must be two characters long and
one of 'Cu', 'Cd', 'Pb' or 'Sw'.
Returns
-------
pandas DataFrame
Wide-form dataframe with unique row and column names and a label column.
"""
# Replace spaces with underscores in Concentration column
df['Concentration'] = df['Concentration'].str.replace(' ', '_')
# Create new column (we will use this to extract unique names later on)
df['metal_concentration'] = df['Analyte'] + '_' + df['Concentration']
df = df.drop(columns=['Name', 'Analyte', 'Concentration'])
# Transpose df (now columns are a range - 0, 1, 2, etc.)
df['metal_concentration'] = [f'{name}_{i}' for i, name in enumerate(df['metal_concentration'])]
df = df.set_index('metal_concentration')
df.index.name = None
df.columns = range(0, 1002)
class_label_to_int_mapping = get_class_label_to_int_mapping()
df['label'] = class_label_to_int_mapping[class_label_str]
return df
def get_class_label_to_int_mapping():
"""Create mapping from str labels to int labels (PyTorch expects labels
to be ints).
Returns
-------
Dict
Dict mapping str 2-letter labels to ints
"""
labels_str = ['Cu', 'Cd', 'Pb', 'Sw']
label_enc = LabelEncoder()
labels_int = label_enc.fit_transform(labels_str)
label_to_int_mapping = dict(zip(labels_str, labels_int))
return label_to_int_mapping
|
import sys
import json
import numpy as np
from tqdm import tqdm
import utils.load_info_for_model
from candidate_generator import random_walk
def evaluate(playlists, candidate_generator):
sizes = [10, 100, 500, 1000, 5000, 10000, 20000, 30000]
metrics = [[], [], [], [], [], [], [], []]
for i, playlist in enumerate(tqdm(playlists[:500])):
candidates = candidate_generator.create_candidates(playlist)
# print(candidates)
tracks = set(playlist["deleted_track"])
holdouts = len(playlist["deleted_track"])
candidates = [int(x in tracks) for x in candidates]
# print(len(candidates))
candidates = np.cumsum(candidates)
# print(len(candidates))
for metric, size in zip(metrics, sizes):
if size > len(candidates):
metric.append(0)
continue
value = candidates[size - 1] / np.min([holdouts, size])
metric.append(value)
with open(sys.argv[3], "w") as output_file:
output_file.write("{}\t{}\t{:.1f}\n".format(candidate_generator._candidate_size, prob, visits))
for metric, size in zip(metrics, sizes):
output_file.write("{}\t{:.8f}\n".format(size, np.average(metric)))
if __name__ == '__main__':
playlists = utils.load_info_for_model.load_lines_json(sys.argv[1])
print("TEST READ")
output = []
with open(sys.argv[2], "r") as file:
for line in tqdm(file):
output.append(json.loads(line))
train = output
print("TRAIN READ")
size = int(sys.argv[4])
prob = float(sys.argv[5])
visits = int(sys.argv[6])
print(size, prob, visits)
candidate_generator = random_walk.RandomWalkCandidates(prob, train, size, visits)
evaluate(playlists, candidate_generator)
|
from datasets import DataProvider
if __name__ == "__main__":
hparams = {
"dataset": {
'name': "Urban100",
'test_only': True,
'patch_size': 96,
'ext': 'sep',
'scale': 2,
"batch_size": 16,
'test_bz': 1,
'train_bz': 1,
'repeat': 2,
},
}
provider = DataProvider(hparams['dataset'])
train_ds = provider.train_dl
for x, y, _ in train_ds:
print(x.shape, y.shape)
for x, y, _ in provider.test_dl.dataset:
print(x.shape, y.shape)
|
from flask import Blueprint
from . import is_api
from methods import content
from methods.board import if_board_exist
from constants import messages
ai_content_blueprint = Blueprint('AI_Content', __name__)
@ai_content_blueprint.route('/test', methods=['GET'])
def test():
return if_board_exist('1. 머신러닝')
@ai_content_blueprint.route('/add_content', methods=['POST'])
@is_api(required_keys=['board_string', 'content'], input_type='json')
def add_content(data):
if not if_board_exist(data['board_string']):
return {"error": messages.no_exists.format('board')}, 404
status, message, status_code = content.add_content(**data)
if not status:
return {'error': message}, status_code
else:
return {'message': message}
@ai_content_blueprint.route('/delete_content/<int:content_id>', methods=['DELETE'])
def delete_content(content_id: int):
if not content.if_content_exist(content_id):
return {"error": messages.no_exists.format('content')}, 404
status, message, status_code = content.delete_content(content_id)
if not status:
return {'error': message}, status_code
else:
return {'message': message}
@ai_content_blueprint.route('/modify_content/<int:content_id>', methods=['PUT'])
@is_api(acceptable_keys=['board_string', 'content'], input_type='json')
def modify_content(data, content_id: int):
if not content.if_content_exist(content_id):
return {"error": messages.no_exists.format('content')}, 404
if not if_board_exist(data['board_string']):
return {"error": messages.no_exists.format('board')}, 404
status, message, status_code = content.modify_contents(content_id, **data)
if not status:
return {'error': message}, status_code
else:
return {'message': message}
@ai_content_blueprint.route('/get_content/<string:board_string>', methods=['GET'])
@is_api(acceptable_keys=[])
def get_content(data, board_string: str):
if not if_board_exist(board_string):
return {"error": messages.no_exists.format('board')}, 404
return content.get_contents(board_string)
|
from lgsf.councillors.scrapers import ModGovCouncillorScraper
class Scraper(ModGovCouncillorScraper):
base_url = "http://applications.huntingdonshire.gov.uk/moderngov/"
|
""" DEMO text adventure game """
from text_adventure_parser.text_util import list_to_text
from demo_game.engine import Engine
DIVIDER = "\n" + ("-" * 50) + "\n"
def main():
"""The game loop"""
game = Engine()
print(DIVIDER)
print("Welcome to the game brave adventurer!")
print("Type 'help' or '?' to see what words I understand.")
while True:
if game.redisplay_room:
print(DIVIDER)
print(game.current_room.description)
print("\nItems : " + list_to_text(game.current_room.items))
print("Exits : " + list_to_text(game.current_room.exits))
command = input("\nWhat now?\n")
game.handle_user_command(command)
print("\n" + game.response)
|
def mnn_concatenate(*adatas, geneset=None, k=20, sigma=1, n_jobs=None, **kwds):
"""Merge AnnData objects and correct batch effects using the MNN method.
Batch effect correction by matching mutual nearest neighbors [Haghverdi18]_
has been implemented as a function 'mnnCorrect' in the R package
`scran <https://bioconductor.org/packages/release/bioc/html/scran.html>`__
This function provides a wrapper to use the mnnCorrect function when
concatenating Anndata objects by using the Python-R interface `rpy2
<https://pypi.org/project/rpy2/>`__.
Parameters
----------
adatas : :class:`~anndata.AnnData`
AnnData matrices to concatenate with. Each dataset should generally be
log-transformed, e.g., log-counts. Datasets should have the same number
of genes, or at lease have all the genes in geneset.
geneset : `list`, optional (default: `None`)
A list specifying the genes with which distances between cells are
calculated in mnnCorrect, typically the highly variable genes.
All genes are used if no geneset provided. See the `scran manual
<https://bioconductor.org/packages/release/bioc/html/scran.html>`__ for
details.
k : `int`, ptional (default: 20)
See the `scran manual <https://bioconductor.org/packages/release/bioc/html/scran.html>`__
for details.
sigma : `int`, ptional (default: 20)
See the `scran manual <https://bioconductor.org/packages/release/bioc/html/scran.html>`__
for details.
n_jobs : `int` or `None` (default: `sc.settings.n_jobs`)
Number of jobs.
kwds :
Keyword arguments passed to Anndata.concatenate
Returns
-------
An :class:`~anndata.AnnData` object with MNN corrected data matrix X.
Example
-------
>>> adata1
AnnData object with n_obs × n_vars = 223 × 33694
obs: 'n_genes', 'percent_mito', 'n_counts', 'Sample', 'Donor', 'Tissue'
var: 'gene_ids', 'n_cells'
>>> adata2
AnnData object with n_obs × n_vars = 1457 × 33694
obs: 'n_genes', 'percent_mito', 'n_counts', 'Sample', 'Donor', 'Tissue'
var: 'gene_ids', 'n_cells'
>>> adata3 = sc.pp.mnnconcatenate(adata2, adata1, geneset = hvgs)
"""
from rpy2.robjects.packages import importr
from rpy2.robjects import numpy2ri
adata = AnnData.concatenate(*adatas, **kwds)
if geneset is None:
datamats = tuple([adata.X.T for adata in adatas])
else:
datamats = tuple([adata[:, geneset].X.T for adata in adatas])
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
numpy2ri.activate()
rbase = importr('base')
rscran = importr('scran')
bpparam = importr('BiocParallel').MulticoreParam(
workers = n_jobs) if n_jobs > 1 else importr('BiocParallel').SerialParam()
mnn_result = rscran.mnnCorrect(*datamats, k=k, sigma=sigma, BPPARAM = bpparam)
corrected = np.asarray(rbase.do_call(rbase.cbind, mnn_result[0])).T
numpy2ri.deactivate()
if geneset is None:
adata = adata[:, geneset]
adata.X = corrected
return adata
|
import asyncio
import functools
import re
from . import constants as const
from .pool import MemcachePool
from .exceptions import ClientException, ValidationException
__all__ = ['Client']
def acquire(func):
@asyncio.coroutine
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
conn = yield from self._pool.acquire()
try:
return (yield from func(self, conn, *args, **kwargs))
except Exception as exc:
conn[0].set_exception(exc)
raise
finally:
self._pool.release(conn)
return wrapper
class Client(object):
def __init__(self, host, port=11211, *,
pool_size=2, pool_minsize=None, loop=None):
if not pool_minsize:
pool_minsize = pool_size
self._pool = MemcachePool(
host, port, minsize=pool_minsize, maxsize=pool_size, loop=loop)
# key supports ascii sans space and control chars
# \x21 is !, right after space, and \x7e is -, right before DEL
# also 1 <= len <= 250 as per the spec
_valid_key_re = re.compile(b'^[\x21-\x7e]{1,250}$')
def _validate_key(self, key):
if not isinstance(key, bytes): # avoid bugs subtle and otherwise
raise ValidationException('key must be bytes', key)
m = self._valid_key_re.match(key)
if m:
# in python re, $ matches either end of line or right before
# \n at end of line. We can't allow latter case, so
# making sure length matches is simplest way to detect
if len(m.group(0)) != len(key):
raise ValidationException('trailing newline', key)
else:
raise ValidationException('invalid key', key)
return key
@asyncio.coroutine
def _execute_simple_command(self, conn, raw_command):
response, line = bytearray(), b''
conn.writer.write(raw_command)
yield from conn.writer.drain()
while not line.endswith(b'\r\n'):
line = yield from conn.reader.readline()
response.extend(line)
return response[:-2]
@asyncio.coroutine
def close(self):
"""Closes the sockets if its open."""
yield from self._pool.clear()
@asyncio.coroutine
def _multi_get(self, conn, *keys, with_cas=True):
# req - get <key> [<key> ...]\r\n
# resp - VALUE <key> <flags> <bytes> [<cas unique>]\r\n
# <data block>\r\n (if exists)
# [...]
# END\r\n
if not keys:
return {}, {}
[self._validate_key(key) for key in keys]
if len(set(keys)) != len(keys):
raise ClientException('duplicate keys passed to multi_get')
cmd = b'gets ' if with_cas else b'get '
conn.writer.write(cmd + b' '.join(keys) + b'\r\n')
received = {}
cas_tokens = {}
line = yield from conn.reader.readline()
while line != b'END\r\n':
terms = line.split()
if terms[0] == b'VALUE': # exists
key = terms[1]
flags = int(terms[2])
length = int(terms[3])
if flags != 0:
raise ClientException('received non zero flags')
val = (yield from conn.reader.readexactly(length+2))[:-2]
if key in received:
raise ClientException('duplicate results from server')
received[key] = val
cas_tokens[key] = int(terms[4]) if with_cas else None
else:
raise ClientException('get failed', line)
line = yield from conn.reader.readline()
if len(received) > len(keys):
raise ClientException('received too many responses')
return received, cas_tokens
@acquire
def delete(self, conn, key):
"""Deletes a key/value pair from the server.
:param key: is the key to delete.
:return: True if case values was deleted or False to indicate
that the item with this key was not found.
"""
assert self._validate_key(key)
command = b'delete ' + key + b'\r\n'
response = yield from self._execute_simple_command(conn, command)
if response not in (const.DELETED, const.NOT_FOUND):
raise ClientException('Memcached delete failed', response)
return response == const.DELETED
@acquire
def get(self, conn, key, default=None):
"""Gets a single value from the server.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, is the data for this specified key.
"""
values, _ = yield from self._multi_get(conn, key)
return values.get(key, default)
@acquire
def gets(self, conn, key, default=None):
"""Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
"""
values, cas_tokens = yield from self._multi_get(
conn, key, with_cas=True)
return values.get(key, default), cas_tokens.get(key)
@acquire
def multi_get(self, conn, *keys):
"""Takes a list of keys and returns a list of values.
:param keys: ``list`` keys for the item being fetched.
:return: ``list`` of values for the specified keys.
:raises:``ValidationException``, ``ClientException``,
and socket errors
"""
values, _ = yield from self._multi_get(conn, *keys)
return tuple(values.get(key) for key in keys)
@acquire
def stats(self, conn, args=None):
"""Runs a stats command on the server."""
# req - stats [additional args]\r\n
# resp - STAT <name> <value>\r\n (one per result)
# END\r\n
if args is None:
args = b''
conn.writer.write(b''.join((b'stats ', args, b'\r\n')))
result = {}
resp = yield from conn.reader.readline()
while resp != b'END\r\n':
terms = resp.split()
if len(terms) == 2 and terms[0] == b'STAT':
result[terms[1]] = None
elif len(terms) == 3 and terms[0] == b'STAT':
result[terms[1]] = terms[2]
elif len(terms) >= 3 and terms[0] == b'STAT':
result[terms[1]] = b' '.join(terms[2:])
else:
raise ClientException('stats failed', resp)
resp = yield from conn.reader.readline()
return result
@asyncio.coroutine
def _storage_command(self, conn, command, key, value,
flags=0, exptime=0, cas=None):
# req - set <key> <flags> <exptime> <bytes> [noreply]\r\n
# <data block>\r\n
# resp - STORED\r\n (or others)
# req - set <key> <flags> <exptime> <bytes> <cas> [noreply]\r\n
# <data block>\r\n
# resp - STORED\r\n (or others)
# typically, if val is > 1024**2 bytes server returns:
# SERVER_ERROR object too large for cache\r\n
# however custom-compiled memcached can have different limit
# so, we'll let the server decide what's too much
assert self._validate_key(key)
if not isinstance(exptime, int):
raise ValidationException('exptime not int', exptime)
elif exptime < 0:
raise ValidationException('exptime negative', exptime)
args = [str(a).encode('utf-8') for a in (flags, exptime, len(value))]
_cmd = b' '.join([command, key] + args)
if cas:
_cmd += b' ' + str(cas).encode('utf-8')
cmd = _cmd + b'\r\n' + value + b'\r\n'
resp = yield from self._execute_simple_command(conn, cmd)
if resp not in (
const.STORED, const.NOT_STORED, const.EXISTS, const.NOT_FOUND):
raise ClientException('stats {} failed'.format(command), resp)
return resp == const.STORED
@acquire
def set(self, conn, key, value, exptime=0):
"""Sets a key to a value on the server
with an optional exptime (0 means don't auto-expire)
:param key: ``bytes``, is the key of the item.
:param value: ``bytes``, data to store.
:param exptime: ``int``, is expiration time. If it's 0, the
item never expires.
:return: ``bool``, True in case of success.
"""
flags = 0 # TODO: fix when exception removed
resp = yield from self._storage_command(
conn, b'set', key, value, flags, exptime)
return resp
@acquire
def cas(self, conn, key, value, cas_token, exptime=0):
"""Sets a key to a value on the server
with an optional exptime (0 means don't auto-expire)
only if value hasn't change from first retrieval
:param key: ``bytes``, is the key of the item.
:param value: ``bytes``, data to store.
:param exptime: ``int``, is expiration time. If it's 0, the
item never expires.
:param cas_token: ``int``, unique cas token retrieve from previous
``gets``
:return: ``bool``, True in case of success.
"""
flags = 0 # TODO: fix when exception removed
resp = yield from self._storage_command(
conn, b'cas', key, value, flags, exptime, cas=cas_token)
return resp
@acquire
def add(self, conn, key, value, exptime=0):
"""Store this data, but only if the server *doesn't* already
hold data for this key.
:param key: ``bytes``, is the key of the item.
:param value: ``bytes``, data to store.
:param exptime: ``int`` is expiration time. If it's 0, the
item never expires.
:return: ``bool``, True in case of success.
"""
flags = 0 # TODO: fix when exception removed
return (yield from self._storage_command(
conn, b'add', key, value, flags, exptime))
@acquire
def replace(self, conn, key, value, exptime=0):
"""Store this data, but only if the server *does*
already hold data for this key.
:param key: ``bytes``, is the key of the item.
:param value: ``bytes``, data to store.
:param exptime: ``int`` is expiration time. If it's 0, the
item never expires.
:return: ``bool``, True in case of success.
"""
flags = 0 # TODO: fix when exception removed
return (yield from self._storage_command(
conn, b'replace', key, value, flags, exptime))
@acquire
def append(self, conn, key, value, exptime=0):
"""Add data to an existing key after existing data
:param key: ``bytes``, is the key of the item.
:param value: ``bytes``, data to store.
:param exptime: ``int`` is expiration time. If it's 0, the
item never expires.
:return: ``bool``, True in case of success.
"""
flags = 0 # TODO: fix when exception removed
return (yield from self._storage_command(
conn, b'append', key, value, flags, exptime))
@acquire
def prepend(self, conn, key, value, exptime=0):
"""Add data to an existing key before existing data
:param key: ``bytes``, is the key of the item.
:param value: ``bytes``, data to store.
:param exptime: ``int`` is expiration time. If it's 0, the
item never expires.
:return: ``bool``, True in case of success.
"""
flags = 0 # TODO: fix when exception removed
return (yield from self._storage_command(
conn, b'prepend', key, value, flags, exptime))
@asyncio.coroutine
def _incr_decr(self, conn, command, key, delta):
delta_byte = str(delta).encode('utf-8')
cmd = b' '.join([command, key, delta_byte]) + b'\r\n'
resp = yield from self._execute_simple_command(conn, cmd)
if not resp.isdigit() or resp == const.NOT_FOUND:
raise ClientException(
'Memcached {} command failed'.format(str(command)), resp)
return int(resp) if resp.isdigit() else None
@acquire
def incr(self, conn, key, increment=1):
"""Command is used to change data for some item in-place,
incrementing it. The data for the item is treated as decimal
representation of a 64-bit unsigned integer.
:param key: ``bytes``, is the key of the item the client wishes
to change
:param increment: ``int``, is the amount by which the client
wants to increase the item.
:return: ``int``, new value of the item's data,
after the increment or ``None`` to indicate the item with
this value was not found
"""
assert self._validate_key(key)
resp = yield from self._incr_decr(
conn, b'incr', key, increment)
return resp
@acquire
def decr(self, conn, key, decrement=1):
"""Command is used to change data for some item in-place,
decrementing it. The data for the item is treated as decimal
representation of a 64-bit unsigned integer.
:param key: ``bytes``, is the key of the item the client wishes
to change
:param decrement: ``int``, is the amount by which the client
wants to decrease the item.
:return: ``int`` new value of the item's data,
after the increment or ``None`` to indicate the item with
this value was not found
"""
assert self._validate_key(key)
resp = yield from self._incr_decr(
conn, b'decr', key, decrement)
return resp
@acquire
def touch(self, conn, key, exptime):
"""The command is used to update the expiration time of
an existing item without fetching it.
:param key: ``bytes``, is the key to update expiration time
:param exptime: ``int``, is expiration time. This replaces the existing
expiration time.
:return: ``bool``, True in case of success.
"""
assert self._validate_key(key)
_cmd = b' '.join([b'touch', key, str(exptime).encode('utf-8')])
cmd = _cmd + b'\r\n'
resp = yield from self._execute_simple_command(conn, cmd)
if resp not in (const.TOUCHED, const.NOT_FOUND):
raise ClientException('Memcached touch failed', resp)
return resp == const.TOUCHED
@acquire
def version(self, conn):
"""Current version of the server.
:return: ``bytes``, memcached version for current the server.
"""
command = b'version\r\n'
response = yield from self._execute_simple_command(
conn, command)
if not response.startswith(const.VERSION):
raise ClientException('Memcached version failed', response)
version, number = response.split()
return number
@acquire
def flush_all(self, conn):
"""Its effect is to invalidate all existing items immediately"""
command = b'flush_all\r\n'
response = yield from self._execute_simple_command(
conn, command)
if const.OK != response:
raise ClientException('Memcached flush_all failed', response)
|
class Matrix:
def __init__(self, data):
self.matrix = []
self.m = 0 #number of rows
self.n = 0 #number of columns
if ";" in data: #if ";" exists, there is more than 1 row in the matrix
self.m = len(data.split(";")) #count the number of rows in the input
self.n = len(data[0:data.find(";")].split()) #count the number of element per row
for data_row in data.split(";"): #append each splited row into the matrix
self.matrix.append([int(i) for i in data_row.split()])
#append each element into a list (row) and then to matrix
else: #input has only 1 row
self.m = 1; #set number of rows to 1
self.n = len(data.split()) #split data into single elements and count how many there are
self.matrix.append([int(i) for i in data.split()]) #append the single row to matrix
def swap_rows(self, index1, index2):
if index1 < self.m or index2 < self.m: #if any of two indexes is less than number of rows, do normal swap
temp_row = self.matrix[index1]
self.matrix[index1] = self.matrix[index2]
self.matrix[index2] = temp_row
def row_addition(self, index1, index2): #add row at index 1 to row at index 2
for i in range(0, self.n): #for each element from 0 to number of columns
self.matrix[index1][i] += self.matrix[index2][i] #add current element in row1 to current element in row2
def row_scale_up(self, row, scalar): #scale up a specific row by a given scalar
for i in range(0, self.n):
self.matrix[row][i] *= scalar #multiply current element by scalar
def row_scale_down(self, row, scalar): #scale down a specific row by given scalar
for i in range(0, self.n):
self.matrix[row][i] /= scalar #divide current element by scalar
def print_matrix(self): #print matrix, only works well with 1 digit numbers
out = "" #initialize out string for matrix contents
for i, row in enumerate(self.matrix): #iterate through m size
for j in range(0, self.n): #iterate through each element in row
out += str(row[j]) + " " #append row element to out string, add space
print(out) #print the matrix, create new line
out = "" #clear out string
|
'''
Created on May 26, 2017
@author: Tim
'''
import os
from os.path import join
from praatio import tgio
from praatio import audioio
from praatio import praatio_scripts
root = r"C:\Users\Tim\Dropbox\workspace\praatIO\examples\files"
audioFN = join(root, "mary.wav")
tgFN = join(root, "mary.TextGrid")
outputPath = join(root, "splice_example")
outputAudioFN = join(outputPath, "barry_spliced.wav")
outputTGFN = join(outputPath, "barry_spliced.TextGrid")
tierName = "phone"
if not os.path.exists(outputPath):
os.mkdir(outputPath)
# Find the region to replace and the region that we'll replace it with
tg = tgio.openTextgrid(tgFN)
tier = tg.tierDict[tierName]
mEntry = tier.entryList[tier.find('m')[0]]
bEntry = tier.entryList[tier.find('b')[0]]
sourceAudioObj = audioio.openAudioFile(audioFN)
mAudioObj = sourceAudioObj.getSubsegment(mEntry[0], mEntry[1])
bAudioObj = sourceAudioObj.getSubsegment(bEntry[0], bEntry[1])
# Replace 'm' with 'b'
audioObj, tg = praatio_scripts.audioSplice(sourceAudioObj,
bAudioObj,
tg,
tierName,
"b",
mEntry[0],
mEntry[1])
# Replace 'b' with 'm'
# The times are now different, so we have to get them again
bEntry = tg.tierDict[tierName].entryList[tier.find('b')[0]]
audioObj, tg = praatio_scripts.audioSplice(audioObj,
mAudioObj,
tg,
tierName,
"m",
bEntry[0],
bEntry[1])
audioObj.save(outputAudioFN)
tg.save(outputTGFN)
|
from django.core.files.uploadedfile import UploadedFile
from formtools.wizard.storage.exceptions import NoFileStorageConfigured
from formtools.wizard.storage.session import SessionStorage
class MultiFileSessionStorage(SessionStorage):
"""Custom formtools storage to handle multiple file uploads.
The `formtools` extension provides a wizard view that is used to split
forms into several steps.
The WizardView subclass uses a Storage utility class to store data between
steps. Unfortunately, this class does not handle the case
when multiple files are uploaded for a single input[type=file] field with
`multiple=true`.
See https://github.com/jazzband/django-formtools/issues/98
This class is a tweaked version to handle such a use case.
Note: somehow, it really ressembles the solution existing here:
https://github.com/astahlhofen/formtools-wizard-multiple-fileupload
"""
def set_step_files(self, step, files):
if files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads."
)
if step not in self.data[self.step_files_key]:
self.data[self.step_files_key][step] = {}
if not files:
return
for field in files.keys():
field_files = files.getlist(field)
file_dicts = []
for field_file in field_files:
tmp_filename = self.file_storage.save(field_file.name, field_file)
file_dict = {
"tmp_name": tmp_filename,
"name": field_file.name,
"content_type": field_file.content_type,
"size": field_file.size,
"charset": field_file.charset,
}
file_dicts.append(file_dict)
self.data[self.step_files_key][step][field] = file_dicts
def get_step_files(self, step):
wizard_files = self.data[self.step_files_key].get(step, {})
if wizard_files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads."
)
files = {}
for field, field_files in wizard_files.items():
files[field] = []
for field_dict in field_files:
field_dict = field_dict.copy()
tmp_name = field_dict.pop("tmp_name")
if (step, field) not in self._files:
self._files[(step, field)] = UploadedFile(
file=self.file_storage.open(tmp_name), **field_dict
)
files[field] = self._files[(step, field)]
return files or None
def reset(self):
# Store unused temporary file names in order to delete them
# at the end of the response cycle through a callback attached in
# `update_response`.
wizard_files = self.data[self.step_files_key]
for step_files in wizard_files.values():
for step_field_files in step_files.values():
for step_file in step_field_files:
self._tmp_files.append(step_file["tmp_name"])
self.init_data()
|
kernel_forward = '''
extern "C"
__global__ void roi_forward(const float* const bottom_data,const float* const bottom_rois,
float* top_data, int* argmax_data,
const double spatial_scale,const int channels,const int height,
const int width, const int pooled_height,
const int pooled_width,const int NN
){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx>=NN)
return;
const int pw = idx % pooled_width;
const int ph = (idx / pooled_width) % pooled_height;
const int c = (idx / pooled_width / pooled_height) % channels;
int num = idx / pooled_width / pooled_height / channels;
const int roi_batch_ind = bottom_rois[num * 5 + 0];
const int roi_start_w = round(bottom_rois[num * 5 + 1] * spatial_scale);
const int roi_start_h = round(bottom_rois[num * 5 + 2] * spatial_scale);
const int roi_end_w = round(bottom_rois[num * 5 + 3] * spatial_scale);
const int roi_end_h = round(bottom_rois[num * 5 + 4] * spatial_scale);
// Force malformed ROIs to be 1x1
const int roi_width = max(roi_end_w - roi_start_w + 1, 1);
const int roi_height = max(roi_end_h - roi_start_h + 1, 1);
const float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
const float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<float>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
float maxval = is_empty ? 0 : -1E+37;
// If nothing is pooled, argmax=-1 causes nothing to be backprop'd
int maxidx = -1;
const int data_offset = (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[data_offset + bottom_index] > maxval) {
maxval = bottom_data[data_offset + bottom_index];
maxidx = bottom_index;
}
}
}
top_data[idx]=maxval;
argmax_data[idx]=maxidx;
}
'''
kernel_backward = '''
extern "C"
__global__ void roi_backward(const float* const top_diff,
const int* const argmax_data,const float* const bottom_rois,
float* bottom_diff, const int num_rois,
const double spatial_scale, int channels,
int height, int width, int pooled_height,
int pooled_width,const int NN)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
////Importtan >= instead of >
if(idx>=NN)
return;
int w = idx % width;
int h = (idx / width) % height;
int c = (idx/ (width * height)) % channels;
int num = idx / (width * height * channels);
float gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
// Skip if ROI's batch index doesn't match num
if (num != static_cast<int>(bottom_rois[roi_n * 5])) {
continue;
}
int roi_start_w = round(bottom_rois[roi_n * 5 + 1]
* spatial_scale);
int roi_start_h = round(bottom_rois[roi_n * 5 + 2]
* spatial_scale);
int roi_end_w = round(bottom_rois[roi_n * 5 + 3]
* spatial_scale);
int roi_end_h = round(bottom_rois[roi_n * 5 + 4]
* spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height
* pooled_width;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int phstart = floor(static_cast<float>(h - roi_start_h)
/ bin_size_h);
int phend = ceil(static_cast<float>(h - roi_start_h + 1)
/ bin_size_h);
int pwstart = floor(static_cast<float>(w - roi_start_w)
/ bin_size_w);
int pwend = ceil(static_cast<float>(w - roi_start_w + 1)
/ bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int index_ = ph * pooled_width + pw + offset;
if (argmax_data[index_] == (h * width + w)) {
gradient += top_diff[index_];
}
}
}
}
bottom_diff[idx] = gradient;
}
'''
|
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework import authentication, permissions, viewsets, mixins
from rest_framework.decorators import action
from rest_framework.response import Response
from core.models import Notebook, Member
from notebook.serializers.folder import FolderSerializer
from notebook.serializers.member import MemberSerializer
from notebook.serializers.notebook import NotebookSerializer
from notebook.serializers.search import SearchResult, SearchResultSerializer
class NotebookRolePermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj: Notebook):
if request.method in permissions.SAFE_METHODS:
return True
if view.action == 'destroy':
if request.user != obj.owner:
return False
else:
membership = obj.members.get(user=request.user)
if membership.role != Member.Roles.ADMIN:
return False
return True
class NotebookViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin, mixins.ListModelMixin,
mixins.RetrieveModelMixin, mixins.DestroyModelMixin, mixins.UpdateModelMixin):
serializer_class = NotebookSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated, NotebookRolePermission)
queryset = Notebook.objects.all()
def get_queryset(self):
if self.request.user.is_anonymous:
return self.queryset
return Notebook.objects.filter(member__user=self.request.user,
member__is_active=True)
@swagger_auto_schema(
responses={200: MemberSerializer(many=True)}
)
@action(detail=True, methods=['get'])
def members(self, request, pk=None):
instance: Notebook = self.get_object()
serializer = MemberSerializer(instance.members.filter(is_active=True), many=True)
return Response(serializer.data)
@swagger_auto_schema(
responses={200: FolderSerializer()}
)
@action(detail=True, methods=['get'])
def root(self, request, pk=None):
instance: Notebook = self.get_object()
serializer = FolderSerializer(instance.root_folder)
return Response(serializer.data)
@swagger_auto_schema(
manual_parameters=[Parameter('q', 'query', required=True, type='string',
description='_query_ de pesquisa (pode ser substituído pelo parâmetro `query`)')],
responses={200: SearchResultSerializer()}
)
@action(detail=True, methods=['get'])
def search(self, request, pk=None):
instance: Notebook = self.get_object()
query = request.query_params.get('q', None) or request.query_params.get('query', '')
serializer = SearchResultSerializer(SearchResult(instance, query))
return Response(serializer.data)
|
from pySDC.core.Errors import TransferError
from pySDC.core.SpaceTransfer import space_transfer
from pySDC.implementations.datatype_classes.parallel_mesh import parallel_mesh, parallel_imex_mesh
from mpi4py_fft import PFFT, newDistArray
import numpy as np
class fft_to_fft(space_transfer):
"""
Custon base_transfer class, implements Transfer.py
This implementation can restrict and prolong between PMESH datatypes meshes with FFT for periodic boundaries
"""
def __init__(self, fine_prob, coarse_prob, params):
"""
Initialization routine
Args:
fine_prob: fine problem
coarse_prob: coarse problem
params: parameters for the transfer operators
"""
# invoke super initialization
super(fft_to_fft, self).__init__(fine_prob, coarse_prob, params)
assert self.fine_prob.params.spectral == self.coarse_prob.params.spectral
self.spectral = self.fine_prob.params.spectral
Nf = list(self.fine_prob.fft.global_shape())
Nc = list(self.coarse_prob.fft.global_shape())
self.ratio = [int(nf / nc) for nf, nc in zip(Nf, Nc)]
axes = tuple(range(len(Nf)))
self.fft_pad = PFFT(self.coarse_prob.params.comm, Nc, padding=self.ratio, axes=axes,
dtype=self.coarse_prob.fft.dtype(False),
slab=True)
def restrict(self, F):
"""
Restriction implementation
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F, parallel_mesh):
if self.spectral:
G = self.coarse_prob.dtype_u(self.coarse_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = newDistArray(self.fine_prob.fft, False)
tmpF = self.fine_prob.fft.backward(F[..., i], tmpF)
tmpG = tmpF[::int(self.ratio[0]), ::int(self.ratio[1])]
G[..., i] = self.coarse_prob.fft.forward(tmpG, G[..., i])
else:
tmpF = self.fine_prob.fft.backward(F)
tmpG = tmpF[::int(self.ratio[0]), ::int(self.ratio[1])]
G[:] = self.coarse_prob.fft.forward(tmpG, G)
else:
G = self.coarse_prob.dtype_u(self.coarse_prob.init)
G[:] = F[::int(self.ratio[0]), ::int(self.ratio[1])]
else:
raise TransferError('Unknown data type, got %s' % type(F))
return G
def prolong(self, G):
"""
Prolongation implementation
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G, parallel_mesh):
if self.spectral:
F = self.fine_prob.dtype_u(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = self.fft_pad.backward(G[..., i])
F[..., i] = self.fine_prob.fft.forward(tmpF, F[..., i])
else:
tmpF = self.fft_pad.backward(G)
F[:] = self.fine_prob.fft.forward(tmpF, F)
else:
F = self.fine_prob.dtype_u(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
G_hat = self.coarse_prob.fft.forward(G[..., i])
F[..., i] = self.fft_pad.backward(G_hat, F[..., i])
else:
G_hat = self.coarse_prob.fft.forward(G)
F[:] = self.fft_pad.backward(G_hat, F)
elif isinstance(G, parallel_imex_mesh):
if self.spectral:
F = self.fine_prob.dtype_f(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
tmpF = self.fft_pad.backward(G.impl[..., i])
F.impl[..., i] = self.fine_prob.fft.forward(tmpF, F.impl[..., i])
tmpF = self.fft_pad.backward(G.expl[..., i])
F.expl[..., i] = self.fine_prob.fft.forward(tmpF, F.expl[..., i])
else:
tmpF = self.fft_pad.backward(G.impl)
F.impl[:] = self.fine_prob.fft.forward(tmpF, F.impl)
tmpF = self.fft_pad.backward(G.expl)
F.expl[:] = self.fine_prob.fft.forward(tmpF, F.expl)
else:
F = self.fine_prob.dtype_f(self.fine_prob.init)
if hasattr(self.fine_prob, 'ncomp'):
for i in range(self.fine_prob.ncomp):
G_hat = self.coarse_prob.fft.forward(G.impl[..., i])
F.impl[..., i] = self.fft_pad.backward(G_hat, F.impl[..., i])
G_hat = self.coarse_prob.fft.forward(G.expl[..., i])
F.expl[..., i] = self.fft_pad.backward(G_hat, F.expl[..., i])
else:
G_hat = self.coarse_prob.fft.forward(G.impl)
F.impl[:] = self.fft_pad.backward(G_hat, F.impl)
G_hat = self.coarse_prob.fft.forward(G.expl)
F.expl[:] = self.fft_pad.backward(G_hat, F.expl)
else:
raise TransferError('Unknown data type, got %s' % type(G))
return F
|
import utils
import functions as func
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND> `NEW WORD`"),
("Description:",
"If you use `@@game_name@@` in your channel name templates, when no game is detected or "
"there are multiple games being played, the word \"General\" is used instead of any game name.\n"
"Use this command to change \"General\" to something else, like \"Party\", \"Lounge\", etc."),
("Example:", "<PREFIX><COMMAND> Lounge"),
]
]
async def execute(ctx, params):
params_str = ' '.join(params)
guild = ctx['guild']
settings = ctx['settings']
author = ctx['message'].author
new_word = params_str.replace('\n', ' ') # Can't have newlines in channel name.
new_word = utils.strip_quotes(new_word)
previous_word = "General" if 'general' not in settings else func.esc_md(settings['general'])
if not new_word:
return False, ("You need to define a new word, e.g. `{}general Lounge` to make "
"**Lounge** shown instead of **{}**.".format(ctx['print_prefix'], previous_word))
settings['general'] = new_word
utils.set_serv_settings(guild, settings)
e_new_word = func.esc_md(new_word)
await func.server_log(
guild,
"🎮 {} (`{}`) set the server's \"General\" word to **{}**".format(
func.user_hash(author), author.id, e_new_word
), 2, settings)
return True, ("Done! From now on I'll use **{}** instead of **{}**.".format(e_new_word, previous_word))
command = Cmd(
execute=execute,
help_text=help_text,
params_required=1,
gold_required=True,
admin_required=True,
)
|
import os, sys, subprocess, getpass, paramiko, re
from collections import OrderedDict
# Log helpers
class colors:
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
GRAY = "\033[90m"
ENDC = "\033[0m"
TAG_PROMPT = colors.GREEN + "[ PROMPT ]" + colors.ENDC
TAG_INFO = colors.YELLOW + "[ INFO ]" + colors.ENDC
TAG_DEBUG = colors.BLUE + "[ DEBUG ]" + colors.ENDC
TAG_ERROR = colors.RED + "[ ERROR ]" + colors.ENDC
class Log:
def log(self, tag, *args):
message = ""
for message_part in args:
message += "{0} ".format(message_part)
print("{0} {1}".format(tag, message))
def p(self, *args):
self.log(TAG_PROMPT, *args)
def i(self, *args):
self.log(TAG_INFO, *args)
def d(self, *args):
self.log(TAG_DEBUG, *args)
def e(self, *args):
self.log(TAG_ERROR, *args)
def std(self, prefix, std, print_out = True):
out = ""
for line in iter(lambda: std.readline(2048), ""):
line = "{0}{1}".format(prefix, line)
out += line
if print_out:
print(line, end="")
return out
def stds(self, prefix, stdout, stderr):
self.std(prefix, stderr)
self.std(prefix, stdout)
log = Log()
# Get Edison credentials
edison = {}
edison["host"] = os.environ.get("EDISON_HOST") or "192.168.2.15"
edison["user"] = os.environ.get("EDISON_USER") or "root"
edison["password"] = os.environ.get("EDISON_PASSWORD")
skip_cli_prompt = os.environ.get("EDISON_CLI_SKIP_PROMPT") == "1"
# Prompt helpers
def should_prompt_for(env_key):
if skip_cli_prompt:
return False
elif os.environ.get(env_key) == None:
return True
return False
def prompt_for(env_key, key, display_value = True):
if should_prompt_for(env_key):
log.p("Enter Edison {0}".format(key))
edison[key] = input() or edison[key]
elif display_value == False:
log.p("Edison {0} is set".format(key))
else:
log.p("Edison {0} is".format(key), edison[key])
def traverse_commands(commands, command_defs):
for command_def in command_defs:
if type(command_defs[command_def]) == OrderedDict:
if command_def in commands:
for deep_command in command_defs[command_def]:
traverse_commands(deep_command, command_defs[command_def])
else:
traverse_commands(commands, command_defs[command_def])
elif command_def in commands:
command_defs[command_def]()
# Command helpers
prompted = False
def prompt_for_edison_info():
global prompted
if prompted == False:
prompt_for("EDISON_HOST", "host")
prompt_for("EDISON_USER", "user")
prompt_for("EDISON_PASSWORD", "password", False)
prompted = True
ssh = None
def setup_ssh_client():
global ssh
if ssh == None:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
ssh.connect(edison["host"], username=edison["user"], password=edison["password"])
sshpass_checked = False
def check_sshpass():
global sshpass_checked
if sshpass_checked == False:
# Check for ssh-pass
install_sshpass = False
try:
sshpass_p = subprocess.Popen(["sshpass", "-V"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
sshpass_p.communicate()
install_sshpass = sshpass_p.returncode != 0
except:
install_sshpass = True
if install_sshpass:
log.i("Installing \"sshpass\"")
os.system("sudo apt-get install sshpass")
sshpass_checked = False
def run_ssh_command(command):
run_helpers("edison_info", "check_sshpass")
return os.system("sshpass -p {0} ssh {1}@{2} \"{3}\"".format(edison["password"], edison["user"], edison["host"], command))
def run_helpers(*args):
if "edison_info" in args:
prompt_for_edison_info()
if "ssh_client" in args:
setup_ssh_client()
if "check_sshpass":
check_sshpass()
# Commands
def command_clean():
run_helpers("edison_info", "ssh_client")
log.i("Cleaning \"imc-server\"")
run_ssh_command("rm -rf /home/root/imc-server")
def command_push():
run_helpers("edison_info", "ssh_client")
log.i("Pushing \"imc-server\" to Edison")
sftp = ssh.open_sftp()
for (dir_path, dir_names, filenames) in os.walk("imc-server"):
for filename in filenames:
local_path = os.path.join(dir_path, filename)
server_path = os.path.join("/home/root", local_path)
try:
sftp.mkdir("/home/root/{0}".format(dir_path))
except:
pass
try:
sftp.put(local_path, server_path)
except FileNotFoundError:
log.e("Failed to push \"{0}\", does not exist".format(local_path))
sftp.close()
log.i("Push success")
def command_compile():
run_helpers("edison_info", "ssh_client")
log.i("Compiling \"imc-server\"")
compile_exit_code = run_ssh_command("mkdir -p /home/root/imc-server/build && cd /home/root/imc-server/build && /home/root/cmake-3.3.1-Linux-i386/bin/cmake .. && make")
if compile_exit_code != 0:
log.e("Failed to compile \"imc-server\"")
sys.exit(0)
def command_run():
run_helpers("edison_info", "ssh_client")
log.i("Running \"imc-server\"")
print("--------------------")
run_ssh_command("cd /home/root/imc-server/build && ./imc-server")
def command_kill():
log.i("Killing \"imc-server\"")
netstat_p = subprocess.Popen(
[
"sshpass",
"-p",
edison["password"],
"ssh",
"{0}@{1}".format(edison["user"], edison["host"]),
"netstat",
"-lpn"
],
shell=False,
stdout=subprocess.PIPE)
(netstat_out, netstat_err) = netstat_p.communicate()
netstat_out = netstat_out.decode("utf-8")
pid_exp = re.compile("(\d{0,4})\/imc-server")
pid_match = re.search(pid_exp, netstat_out)
try:
pid = pid_match.group(1)
run_ssh_command("kill -9 {0}".format(pid))
log.i("\"imc-server\" killed successfully")
except:
log.i("\"imc-server\" not running")
def command_ssh():
run_helpers("edison_info", "check_sshpass")
log.i("Accessing Edison via SSH")
run_ssh_command("")
def command_serial():
log.i("Accessing Edison via serial")
if not os.path.isfile("/dev/ttyUSB0"):
log.e("Can not find Edison on \"/dev/ttyUSB0\"")
return
os.system("sudo screen /dev/ttyUSB0 115200")
# Get commands
commands = sys.argv
commands.pop(0)
command_defs = OrderedDict([
("clean", command_clean),
("build",
OrderedDict([
("push", command_push),
("compile", command_compile),
("run", command_run),
("kill", command_kill)
])
),
("ssh", command_ssh),
("serial", command_serial)
])
traverse_commands(commands, command_defs)
if ssh != None:
ssh.close()
|
#!/usr/bin/env python
import json
import subprocess
def run(apps_path, out_path):
# Generate www content
cmd = ["python", "-m", "trame.tools.www", "--output", out_path]
subprocess.run(cmd)
# Generate app files index.html => {app_name}.html
with open(apps_path, "r") as rf:
apps_dict = json.load(rf) # noqa
for app_name, config in apps_dict.items():
# handle custom modules for www
web_modules = config.get("www_modules")
if web_modules is not None:
cmd = [
"python",
"-m",
"trame.tools.www",
"--output",
out_path,
*web_modules,
]
subprocess.run(cmd)
# Create app.html file from index.html
cmd = [
"python",
"-m",
"trame.tools.app",
"--input",
out_path,
"--name",
app_name,
]
subprocess.run(cmd)
if __name__ == "__main__":
apps_path = "/opt/trame/apps.json"
out_path = "/deploy/server/www"
run(apps_path, out_path)
|
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Bar, Timeline, Pie
df = pd.read_csv('G:/PythonFIle/appRank/clearData/tabName.csv', encoding='gbk')
sep = df.shape
l, h = sep[1], sep[0]
print(l, h)
ys = []
x = []
time = []
head = df.columns
for i in range(0, l - 1):
x.append(head[i + 1])
for i in range(0, h):
y = df.iloc[i][1:].tolist()
y2 = map(lambda x: int(x), y)
ys.append(list(y2))
time.append(df.iloc[i][0])
tl = Timeline()
i=1
data_pair = [list(z) for z in zip(x, ys[i])]
a = [39,16,61,100,0,98,13,12,64,80,99,96,0,0,26,13,78,2,52]
print(ys[i],type(ys[i]))
bar = (
Bar()
.add_xaxis(xaxis_data=x)
.add_yaxis(series_name=time[i], y_axis=ys[i])
.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-30)),
)
)
bar.render()
|
#Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 3-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 3-Clause License for more details.
import os
from lenet import LeNet5
import resnet
import torch
from torch.autograd import Variable
from torchvision.datasets.mnist import MNIST
from torchvision.datasets import CIFAR10
from torchvision.datasets import CIFAR100
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import argparse
from my_utils import LogPrint, set_up_dir, get_CodeID
parser = argparse.ArgumentParser(description='train-teacher-network')
# Basic model parameters.
parser.add_argument('--dataset', type=str, default='MNIST', choices=['MNIST','cifar10','cifar100'])
parser.add_argument('--data', type=str, default='/home4/wanghuan/Projects/20180918_KD_for_NST/TaskAgnosticDeepCompression/Bin_CIFAR10/data_MNIST')
parser.add_argument('--output_dir', type=str, default='/home4/wanghuan/Projects/DAFL/MNIST_teacher_model/')
parser.add_argument('-p', '--project_name', type=str, default='')
parser.add_argument('--resume', type=str, default='')
parser.add_argument('--CodeID', type=str, default='')
parser.add_argument('--debug', action="store_true")
parser.add_argument('--which_net', type=str, default="")
args = parser.parse_args()
# set up log dirs
TimeID, ExpID, rec_img_path, weights_path, log = set_up_dir(args.project_name, args.resume, args.debug)
args.output_dir = weights_path
logprint = LogPrint(log, ExpID)
args.ExpID = ExpID
args.CodeID = get_CodeID()
logprint(args.__dict__)
os.makedirs(args.output_dir, exist_ok=True)
acc = 0
acc_best = 0
if args.dataset == 'MNIST':
data_train = MNIST(args.data,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
data_test = MNIST(args.data,
train=False,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
data_train_loader = DataLoader(data_train, batch_size=256, shuffle=True, num_workers=8)
data_test_loader = DataLoader(data_test, batch_size=1024, num_workers=8)
net = LeNet5().cuda()
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
if args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
data_train = CIFAR10(args.data,
transform=transform_train)
data_test = CIFAR10(args.data,
train=False,
transform=transform_test)
data_train_loader = DataLoader(data_train, batch_size=128, shuffle=True, num_workers=8)
data_test_loader = DataLoader(data_test, batch_size=100, num_workers=0)
if args.which_net == "embed":
net = resnet.ResNet34_2neurons().cuda()
else:
net = resnet.ResNet34().cuda()
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
if args.dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
data_train = CIFAR100(args.data,
transform=transform_train)
data_test = CIFAR100(args.data,
train=False,
transform=transform_test)
data_train_loader = DataLoader(data_train, batch_size=128, shuffle=True, num_workers=0)
data_test_loader = DataLoader(data_test, batch_size=128, num_workers=0)
net = resnet.ResNet34(num_classes=100).cuda()
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
def adjust_learning_rate(optimizer, epoch):
"""For resnet, the lr starts from 0.1, and is divided by 10 at 80 and 120 epochs"""
if epoch < 80:
lr = 0.1
elif epoch < 120:
lr = 0.01
else:
lr = 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(epoch):
if args.dataset != 'MNIST':
adjust_learning_rate(optimizer, epoch)
global cur_batch_win
net.train()
loss_list, batch_list = [], []
for i, (images, labels) in enumerate(data_train_loader):
images, labels = Variable(images).cuda(), Variable(labels).cuda()
optimizer.zero_grad()
output = net(images)
loss = criterion(output, labels)
loss_list.append(loss.data.item())
batch_list.append(i+1)
if i == 1:
logprint('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, i, loss.data.item()))
loss.backward()
optimizer.step()
def test():
global acc, acc_best
net.eval()
total_correct = 0
avg_loss = 0.0
with torch.no_grad():
for i, (images, labels) in enumerate(data_test_loader):
images, labels = Variable(images).cuda(), Variable(labels).cuda()
output = net(images)
avg_loss += criterion(output, labels).sum()
pred = output.data.max(1)[1]
total_correct += pred.eq(labels.data.view_as(pred)).sum()
avg_loss /= len(data_test)
acc = float(total_correct) / len(data_test)
if acc_best < acc:
acc_best = acc
logprint('Test Avg. Loss: %f, Accuracy: %f' % (avg_loss.data.item(), acc))
def train_and_test(epoch):
train(epoch)
test()
def main():
if args.dataset == 'MNIST':
epoch = 10
else:
epoch = 200
for e in range(1, epoch):
train_and_test(e)
if args.which_net == "embed":
torch.save(net,args.output_dir + '/teacher_embed')
else:
torch.save(net,args.output_dir + '/teacher')
if __name__ == '__main__':
main()
|
from uuid import uuid4
SECRET_KEY = str(uuid4())
LOGGER_NAME = 'http'
HOST = 'localhost'
PORT = 5000
|
import io
import os.path
from msgpack import packb
import pytest
from .hashindex import H
from .key import TestKey
from ..archive import Statistics
from ..cache import AdHocCache
from ..compress import CompressionSpec
from ..crypto.key import RepoKey
from ..hashindex import ChunkIndex, CacheSynchronizer
from ..helpers import Manifest
from ..repository import Repository
class TestCacheSynchronizer:
@pytest.fixture
def index(self):
return ChunkIndex()
@pytest.fixture
def sync(self, index):
return CacheSynchronizer(index)
def test_no_chunks(self, index, sync):
data = packb({
'foo': 'bar',
'baz': 1234,
'bar': 5678,
'user': 'chunks',
'chunks': []
})
sync.feed(data)
assert not len(index)
def test_simple(self, index, sync):
data = packb({
'foo': 'bar',
'baz': 1234,
'bar': 5678,
'user': 'chunks',
'chunks': [
(H(1), 1, 2),
(H(2), 2, 3),
]
})
sync.feed(data)
assert len(index) == 2
assert index[H(1)] == (1, 1, 2)
assert index[H(2)] == (1, 2, 3)
def test_multiple(self, index, sync):
data = packb({
'foo': 'bar',
'baz': 1234,
'bar': 5678,
'user': 'chunks',
'chunks': [
(H(1), 1, 2),
(H(2), 2, 3),
]
})
data += packb({
'xattrs': {
'security.foo': 'bar',
'chunks': '123456',
},
'stuff': [
(1, 2, 3),
]
})
data += packb({
'xattrs': {
'security.foo': 'bar',
'chunks': '123456',
},
'chunks': [
(H(1), 1, 2),
(H(2), 2, 3),
],
'stuff': [
(1, 2, 3),
]
})
data += packb({
'chunks': [
(H(3), 1, 2),
],
})
data += packb({
'chunks': [
(H(1), 1, 2),
],
})
part1 = data[:70]
part2 = data[70:120]
part3 = data[120:]
sync.feed(part1)
sync.feed(part2)
sync.feed(part3)
assert len(index) == 3
assert index[H(1)] == (3, 1, 2)
assert index[H(2)] == (2, 2, 3)
assert index[H(3)] == (1, 1, 2)
@pytest.mark.parametrize('elem,error', (
({1: 2}, 'Unexpected object: map'),
(bytes(213), [
'Unexpected bytes in chunks structure', # structure 2/3
'Incorrect key length']), # structure 3/3
(1, 'Unexpected object: integer'),
(1.0, 'Unexpected object: double'),
(True, 'Unexpected object: true'),
(False, 'Unexpected object: false'),
(None, 'Unexpected object: nil'),
))
@pytest.mark.parametrize('structure', (
lambda elem: {'chunks': elem},
lambda elem: {'chunks': [elem]},
lambda elem: {'chunks': [(elem, 1, 2)]},
))
def test_corrupted(self, sync, structure, elem, error):
packed = packb(structure(elem))
with pytest.raises(ValueError) as excinfo:
sync.feed(packed)
if isinstance(error, str):
error = [error]
possible_errors = ['cache_sync_feed failed: ' + error for error in error]
assert str(excinfo.value) in possible_errors
@pytest.mark.parametrize('data,error', (
# Incorrect tuple length
({'chunks': [(bytes(32), 2, 3, 4)]}, 'Invalid chunk list entry length'),
({'chunks': [(bytes(32), 2)]}, 'Invalid chunk list entry length'),
# Incorrect types
({'chunks': [(1, 2, 3)]}, 'Unexpected object: integer'),
({'chunks': [(1, bytes(32), 2)]}, 'Unexpected object: integer'),
({'chunks': [(bytes(32), 1.0, 2)]}, 'Unexpected object: double'),
))
def test_corrupted_ancillary(self, index, sync, data, error):
packed = packb(data)
with pytest.raises(ValueError) as excinfo:
sync.feed(packed)
assert str(excinfo.value) == 'cache_sync_feed failed: ' + error
def make_index_with_refcount(self, refcount):
index_data = io.BytesIO()
index_data.write(b'BORG_IDX')
# num_entries
index_data.write((1).to_bytes(4, 'little'))
# num_buckets
index_data.write((1).to_bytes(4, 'little'))
# key_size
index_data.write((32).to_bytes(1, 'little'))
# value_size
index_data.write((3 * 4).to_bytes(1, 'little'))
index_data.write(H(0))
index_data.write(refcount.to_bytes(4, 'little'))
index_data.write((1234).to_bytes(4, 'little'))
index_data.write((5678).to_bytes(4, 'little'))
index_data.seek(0)
index = ChunkIndex.read(index_data)
return index
def test_corrupted_refcount(self):
index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE + 1)
sync = CacheSynchronizer(index)
data = packb({
'chunks': [
(H(0), 1, 2),
]
})
with pytest.raises(ValueError) as excinfo:
sync.feed(data)
assert str(excinfo.value) == 'cache_sync_feed failed: invalid reference count'
def test_refcount_max_value(self):
index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE)
sync = CacheSynchronizer(index)
data = packb({
'chunks': [
(H(0), 1, 2),
]
})
sync.feed(data)
assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234, 5678)
def test_refcount_one_below_max_value(self):
index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE - 1)
sync = CacheSynchronizer(index)
data = packb({
'chunks': [
(H(0), 1, 2),
]
})
sync.feed(data)
# Incremented to maximum
assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234, 5678)
sync.feed(data)
assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234, 5678)
class TestAdHocCache:
@pytest.yield_fixture
def repository(self, tmpdir):
self.repository_location = os.path.join(str(tmpdir), 'repository')
with Repository(self.repository_location, exclusive=True, create=True) as repository:
repository.put(H(1), b'1234')
repository.put(Manifest.MANIFEST_ID, b'5678')
yield repository
@pytest.fixture
def key(self, repository, monkeypatch):
monkeypatch.setenv('BORG_PASSPHRASE', 'test')
key = RepoKey.create(repository, TestKey.MockArgs())
key.compressor = CompressionSpec('none').compressor
return key
@pytest.fixture
def manifest(self, repository, key):
Manifest(key, repository).write()
return Manifest.load(repository, key=key, operations=Manifest.NO_OPERATION_CHECK)[0]
@pytest.fixture
def cache(self, repository, key, manifest):
return AdHocCache(repository, key, manifest)
def test_does_not_contain_manifest(self, cache):
assert not cache.seen_chunk(Manifest.MANIFEST_ID)
def test_does_not_delete_existing_chunks(self, repository, cache):
assert cache.seen_chunk(H(1)) == ChunkIndex.MAX_VALUE
cache.chunk_decref(H(1), Statistics())
assert repository.get(H(1)) == b'1234'
def test_does_not_overwrite(self, cache):
with pytest.raises(AssertionError):
cache.add_chunk(H(1), b'5678', Statistics(), overwrite=True)
def test_seen_chunk_add_chunk_size(self, cache):
assert cache.add_chunk(H(1), b'5678', Statistics()) == (H(1), 4, 0)
def test_deletes_chunks_during_lifetime(self, cache, repository):
"""E.g. checkpoint archives"""
cache.add_chunk(H(5), b'1010', Statistics())
assert cache.seen_chunk(H(5)) == 1
cache.chunk_decref(H(5), Statistics())
assert not cache.seen_chunk(H(5))
with pytest.raises(Repository.ObjectNotFound):
repository.get(H(5))
def test_files_cache(self, cache):
assert cache.file_known_and_unchanged(bytes(32), None) == (False, None)
assert cache.cache_mode == 'd'
assert cache.files is None
def test_txn(self, cache):
assert not cache._txn_active
cache.seen_chunk(H(5))
assert cache._txn_active
assert cache.chunks
cache.rollback()
assert not cache._txn_active
assert not hasattr(cache, 'chunks')
def test_incref_after_add_chunk(self, cache):
assert cache.add_chunk(H(3), b'5678', Statistics()) == (H(3), 4, 47)
assert cache.chunk_incref(H(3), Statistics()) == (H(3), 4, 47)
def test_existing_incref_after_add_chunk(self, cache):
"""This case occurs with part files, see Archive.chunk_file."""
assert cache.add_chunk(H(1), b'5678', Statistics()) == (H(1), 4, 0)
assert cache.chunk_incref(H(1), Statistics()) == (H(1), 4, 0)
|
import unittest
import numpy as np
from geeksw.utils.data_loader_tools import make_data_loader
class Test(unittest.TestCase):
def test_data_loader_tools(self):
data = {"a": np.array([1, 2, 3]), "b": np.array([2, 3, 4]), "c": np.array([5, 6, 7])}
funcs = {
"d": lambda df: df["a"] + df["b"],
"e": lambda df: df["b"] * df["c"],
"f": lambda df: df["d"] - df["e"],
}
# Test if we can load columns directly in the data
load_df = make_data_loader(["a", "b", "c"])
df = load_df(data)
np.testing.assert_equal(df["a"], data["a"])
np.testing.assert_equal(df["b"], data["b"])
np.testing.assert_equal(df["c"], data["c"])
# Test if we can get columns which are directly derived from the available columns
load_df = make_data_loader(["a", "b", "c", "d", "e"], producers=funcs)
df = load_df(data)
np.testing.assert_equal(df["a"], data["a"])
np.testing.assert_equal(df["b"], data["b"])
np.testing.assert_equal(df["c"], data["c"])
np.testing.assert_equal(df["d"], data["a"] + data["b"])
np.testing.assert_equal(df["e"], data["b"] * data["c"])
# Test if we can load columns which are "second-order derived" without loading anything else
load_df = make_data_loader(["f"], producers=funcs)
df = load_df(data)
np.testing.assert_equal(df["f"], data["a"] + data["b"] - data["b"] * data["c"])
if __name__ == "__main__":
unittest.main(verbosity=2)
|
from os import system
from calcupy import settings
from calcupy import commands
from calcupy import evaluator
from calcupy.variables import NumberVariables, FunctionVariables
from math import sqrt, sin, cos, tan
from math import pi, e
class Calculator:
DEFAULT_NUMBER_VARIABLES = {"pi": pi, "e": e}
DEFAULT_FUNCTION_VARIABLES = {"sqrt": sqrt, "sin": sin, "cos": cos, "tan": tan}
def __init__(
self,
number_variables: dict = None,
function_variables: dict = None,
title: str = settings.DEFAULT_TITLE,
bullet: str = settings.DEFAULT_BULLET,
):
self._number_variables = NumberVariables()
self._function_variables = FunctionVariables()
self._title = title
self._bullet = bullet
self._command_handler = commands.CommandHandler()
self._set_default_number_variables()
self._set_default_function_variables()
if number_variables:
self._set_number_variables(number_variables)
if function_variables:
self._set_function_variables(function_variables)
def _set_default_number_variables(self) -> None:
for identifier, value in self.DEFAULT_NUMBER_VARIABLES.items():
self._number_variables.add_variable(identifier, value)
def _set_default_function_variables(self) -> None:
for identifier, function in self.DEFAULT_FUNCTION_VARIABLES.items():
self._function_variables.add_builtins(identifier, function)
def _set_number_variables(self, number_variables: dict) -> None:
for identifier, value in number_variables.items():
self._number_variables.add_variable(identifier, value)
def _set_function_variables(self, function_variables: dict) -> None:
for identifier, function in function_variables.items():
self._function_variables.add_variable(identifier, function)
def _update_ans(self, value) -> None:
self._number_variables.add_variable("ans", value)
def _setting_commands(self, input_) -> None:
if not input_:
return True
if input_.lower() == "clear" or input_.lower() == "cls":
system("cls")
print(self._title)
return True
if input_.lower() == "exit" or input_.lower() == "stop":
exit()
return None
def start(self):
system("cls")
print(self._title)
while True:
input_ = input(f"\n{self._bullet} ").strip()
if self._setting_commands(input_):
continue
try:
command = self._command_handler.get_command(
input_, self._function_variables
)
if command:
command.handle(self._number_variables)
continue
result = evaluator.evaluate(
input_, self._number_variables, self._function_variables
)
self._update_ans(result)
print(result)
except Exception as exception:
print(f"Error: {exception}")
|
from __init__ import *
class DarkTheme:
@staticmethod
def button(obj: QPushButton):
obj.setStyleSheet("""
QPushButton{
color: #E0E0E0;
border: none;
outline: none;
background-color: #424242;
selection-background-color: #424242;
}
""")
@staticmethod
def widget(obj):
obj.setStyleSheet("""
QWidget{
background-color: #616161;
}
""")
@staticmethod
def group_box(obj):
obj.setStyleSheet("""
QGroupBox{
color: #606060;
border: none;
outline: none;
background-color: #A2A2A2;
selection-background-color: #A2A2A2;
}
""")
@staticmethod
def title_label(obj):
obj.setStyleSheet("""
QLabel{
color: #212121;
font: medium Ubuntu;
font-size: 30px;
}
""")
@staticmethod
def info_label(obj):
obj.setStyleSheet("""
QLabel{
color: #212121;
font: medium Ubuntu;
font-size: 14px;
}
""")
@staticmethod
def tile_style(obj):
obj.setStyleSheet("""
QLabel{
color: #E0E0E0;
border: none;
outline: none;
background-color: #424242;
selection-background-color: #424242;
}
""")
class LightTheme:
@staticmethod
def button(obj: QPushButton):
obj.setStyleSheet("""
QPushButton{
color: #202020;
border: none;
outline: none;
background-color: #C2C2C2;
selection-background-color: #C2C2C2;
}
""")
@staticmethod
def button_unavaliable(obj: QPushButton):
obj.setStyleSheet("""
QPushButton{
color: #202020;
border: none;
outline: none;
background-color: #A2A2A2;
selection-background-color: #A2A2A2;
}
""")
@staticmethod
def group_box(obj):
obj.setStyleSheet("""
QGroupBox{
color: #202020;
border: none;
outline: none;
background-color: #C2C2C2;
selection-background-color: #C2C2C2;
}
""")
@staticmethod
def widget(obj):
obj.setStyleSheet("""
QWidget{
}
""")
@staticmethod
def title_label(obj):
obj.setStyleSheet("""
QLabel{
color: #202020;
font: medium Ubuntu;
font-size: 30px;
}
""")
@staticmethod
def info_label(obj):
obj.setStyleSheet("""
QLabel{
color: #202020;
font: medium Ubuntu;
font-size: 14px;
}
""")
@staticmethod
def tile_style(obj):
obj.setStyleSheet("""
QLabel{
color: #202020;
border: none;
outline: none;
background-color: #A2A2A2;
selection-background-color: #A2A2A2;
}
""")
class Theme:
DarkTheme = DarkTheme()
LightTheme = LightTheme()
|
from unittest import TestCase
from zmodulo.plot.line.end_point import EndPoint
__author__ = 'aruff'
class TestArrow(TestCase):
def test_integer_assignment(self):
"""
Tests the integer initialization of the Arrow class
"""
arrow = EndPoint(0, 0)
self.assertEqual(arrow.to_str(), "\tx2 = 0;\n\ty2 = 0;\n")
def test_string_assignment(self):
"""
Tests the integer initialization of the Arrow class
"""
arrow = EndPoint("0", "0")
self.assertEqual(arrow.to_str(), "\tx2 = 0;\n\ty2 = 0;\n")
if __name__ == '__main__':
TestCase.main()
|
# -*- coding: utf-8 -
#
# Copyright (C) 2011 by Saúl Ibarra Corretgé
#
# This file is part of gaffer. See the NOTICE for more information.
import pyuv
import datetime
import errno
import logging
import time
try:
import _thread as thread
except ImportError:
import thread
from collections import deque
import six
from tornado import ioloop, stack_context
class Waker(object):
def __init__(self, loop):
self._async = pyuv.Async(loop, lambda x: None)
self._async.unref()
def wake(self):
self._async.send()
class IOLoop(object):
NONE = ioloop.IOLoop.NONE
READ = ioloop.IOLoop.READ
WRITE = ioloop.IOLoop.WRITE
ERROR = ioloop.IOLoop.ERROR
_instance_lock = thread.allocate_lock()
def __init__(self, impl=None, _loop=None):
if impl is not None:
raise RuntimeError('When using pyuv the poller implementation cannot be specifiedi')
self._loop = _loop or pyuv.Loop()
self._poll_handles = {}
self._handlers = {}
self._callbacks = deque()
self._callback_lock = thread.allocate_lock()
self._timeouts = set()
self._running = False
self._stopped = False
self._thread_ident = None
self._cb_handle = pyuv.Prepare(self._loop)
self._waker = Waker(self._loop)
@staticmethod
def instance():
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this IOLoop object as the singleton instance.
This is normally not necessary as `instance()` will create
an IOLoop on demand, but you may want to call `install` to use
a custom subclass of IOLoop.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
def _close_loop_handles(self):
def cb(handle):
try:
handle.close()
except Exception:
pass
self._loop.walk(cb)
def close(self, all_fds=False, all_handlers=False):
if all_fds:
for fd in self._handlers:
poll, stack = self._handlers[fd]
if not poll.closed:
poll.close()
self._handlers = {}
if all_handlers:
self._close_loop_handles()
# Run the loop so the close callbacks are fired and memory is freed
# It will not block because all handles are closed
assert not self._loop.run_once(), "there are pending handles"
def add_handler(self, fd, handler, events):
if fd in self._handlers:
raise IOError("fd %d already registered" % fd)
poll = pyuv.Poll(self._loop, fd)
poll.fd = fd
self._handlers[fd] = (poll, stack_context.wrap(handler))
poll_events = 0
if (events & IOLoop.READ):
poll_events |= pyuv.UV_READABLE
if (events & IOLoop.WRITE):
poll_events |= pyuv.UV_WRITABLE
poll.start(poll_events, self._handle_poll_events)
def update_handler(self, fd, events):
poll, _ = self._handlers[fd]
poll_events = 0
if (events & IOLoop.READ):
poll_events |= pyuv.UV_READABLE
if (events & IOLoop.WRITE):
poll_events |= pyuv.UV_WRITABLE
poll.start(poll_events, self._handle_poll_events)
def remove_handler(self, fd):
self._handlers.pop(fd, None)
def set_blocking_signal_threshold(self, seconds, action):
raise NotImplementedError
def set_blocking_log_threshold(self, seconds):
raise NotImplementedError
def log_stack(self, signal, frame):
raise NotImplementedError
def start(self, run_loop=True):
if self._stopped:
self._stopped = False
return
self._thread_ident = thread.get_ident()
self._running = True
if run_loop:
while self._running:
# We should use run() here, but we need to have break() for that
self._loop.run_once()
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def running(self):
"""Returns true if this IOLoop is currently running."""
return self._running
def add_timeout(self, deadline, callback):
timeout = _Timeout(deadline, stack_context.wrap(callback), io_loop=self)
self._timeouts.add(timeout)
return timeout
def remove_timeout(self, timeout):
self._timeouts.remove(timeout)
timer = timeout._timer
if timer.active:
timer.stop()
def add_callback(self, callback):
with self._callback_lock:
was_active = self._cb_handle.active
self._callbacks.append(stack_context.wrap(callback))
if not was_active:
self._cb_handle.start(self._prepare_cb)
if not was_active or thread.get_ident() != self._thread_ident:
self._waker.wake()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the IOLoop
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in sys.exc_info.
"""
logging.error("Exception in callback %r", callback, exc_info=True)
def _run_callback(self, callback):
try:
callback()
except Exception:
self.handle_callback_exception(callback)
def _handle_poll_events(self, handle, poll_events, error):
events = 0
if error is not None:
# TODO: do I need to do anything else here?
events |= IOLoop.ERROR
if (poll_events & pyuv.UV_READABLE):
events |= IOLoop.READ
if (poll_events & pyuv.UV_WRITABLE):
events |= IOLoop.WRITE
fd = handle.fd
try:
self._handlers[fd][1](fd, events)
except (OSError, IOError) as e:
if e.args[0] == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
logging.error("Exception in I/O handler for fd %s", fd, exc_info=True)
except Exception:
logging.error("Exception in I/O handler for fd %s", fd, exc_info=True)
def _prepare_cb(self, handle):
self._cb_handle.stop()
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = deque()
while callbacks:
self._run_callback(callbacks.popleft())
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'io_loop', '_timer']
def __init__(self, deadline, callback, io_loop=None):
if (isinstance(deadline, six.integer_types)
or isinstance(deadline, float)):
self.deadline = deadline
elif isinstance(deadline, datetime.timedelta):
self.deadline = time.time() + _Timeout.timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r" % deadline)
self.callback = callback
self.io_loop = io_loop or IOLoop.instance()
timeout = max(self.deadline - time.time(), 0)
self._timer = pyuv.Timer(self.io_loop._loop)
self._timer.start(self._timer_cb, timeout, 0.0)
def _timer_cb(self, handle):
self._timer.close()
self._timer = None
self.io_loop._timeouts.remove(self)
self.io_loop._run_callback(self.callback)
self.io_loop = None
@staticmethod
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
class PeriodicCallback(object):
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
self.callback_time = callback_time / 1000.0
self.io_loop = io_loop or IOLoop.instance()
self._timer = pyuv.Timer(self.io_loop._loop)
self._running = False
def _timer_cb(self, timer):
try:
self.callback()
except Exception:
logging.error("Error in periodic callback", exc_info=True)
def start(self):
if self._running:
return
self._running = True
self._timer.start(self._timer_cb, self.callback_time, self.callback_time)
self._timer.repeat = self.callback_time
def stop(self):
if not self._running:
return
self._running = False
self._timer.stop()
def install():
# Patch Tornado's classes with ours
ioloop.IOLoop = IOLoop
ioloop._Timeout = _Timeout
ioloop.PeriodicCallback = PeriodicCallback
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
from __future__ import print_function
"""
XML-RPC servers for parent and children
Following typical XmlRpcNode code, code is divided into:
a) Handlers: these actually define and execute the XML-RPC API
b) Nodes: these run the XML-RPC server
In this code you'll find 'Parent' and 'Child' code. The parent node
is the original roslaunch process. The child nodes are the child
processes it launches in order to handle remote launching (or
execution as a different user).
"""
import errno
import logging
import os
import socket
import sys
import time
import traceback
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
import rosgraph.network as network
import rosgraph.xmlrpc as xmlrpc
import roslaunch.config
from roslaunch.pmon import ProcessListener, Process
import roslaunch.xmlloader
from roslaunch.launch import ROSLaunchRunner
from roslaunch.core import RLException, \
add_printlog_handler, add_printerrlog_handler, printlog, printerrlog, printlog_bold
#For using Log message level constants
from rosgraph_msgs.msg import Log
# interface class so that we don't have circular deps
class ChildROSLaunchProcess(Process):
"""
API for remote roslaunch processes
"""
def __init__(self, name, args, env):
super(ChildROSLaunchProcess, self).__init__('roslaunch', name, args, env, False)
self.uri = None
def set_uri(self, uri):
self.uri = uri
class ROSLaunchBaseHandler(xmlrpc.XmlRpcHandler):
"""
Common XML-RPC API for the roslaunch server and child node
"""
def __init__(self, pm):
self.pm = pm
self.logger = logging.getLogger('roslaunch.server')
if self.pm is None:
raise RLException("cannot create xmlrpc handler: pm is not initialized")
#TODO: kill process, restart (with optional prefix). list active, list dead. CPU usage
def list_processes(self):
"""
@return: code, msg, process list.
Process list is two lists, where first list of
active process names along with the number of times that
process has been spawned. Second list contains dead process
names and their spawn count.
@rtype: int, str, [[(str, int),], [(str,int),]]
"""
return 1, "processes on parent machine", self.pm.get_process_names_with_spawn_count()
def process_info(self, process_name):
"""
@return: dictionary of metadata about process. Keys vary by implementation
@rtype: int, str, dict
"""
p = self.pm.get_process(process_name)
if p is None:
return -1, "no process by that name", {}
else:
return 1, "process info", p.get_info()
def get_pid(self):
"""
@return: code, msg, pid
@rtype: int, str, int
"""
pid = os.getpid()
return 1, str(pid), pid
def get_node_names(self):
"""
@return: code, msg, list of node names
@rtype: int, str, [str]
"""
if self.pm is None:
return 0, "uninitialized", []
return 1, "node names", self.pm.get_active_names()
def _shutdown(self, reason):
"""
xmlrpc.XmlRpcHandler API: inform handler of shutdown
@param reason: human-readable shutdown reason
@type reason: str
"""
return 1, '', 1
# Uses camel-case network-API naming conventions
class ROSLaunchParentHandler(ROSLaunchBaseHandler):
"""
XML-RPC API for the roslaunch server node
"""
def __init__(self, pm, child_processes, listeners):
"""
@param child_processes: Map of remote processes so that server can update processes
with information as children register. Handler will not modify
keys.
@type child_processes: {name : ChildROSLaunchProcess}.
@param listeners [ProcessListener]: list of
listeners to notify when process_died events occur.
"""
super(ROSLaunchParentHandler, self).__init__(pm)
self.child_processes = child_processes
self.listeners = listeners
def register(self, client, uri):
"""
Registration callback from newly launched roslaunch clients
@param client: name of client
@type client: str
@param uri: XML-RPC URI of client
@type uri: str
@return: code, msg, ignore
@rtype: int, str, int
"""
if client not in self.child_processes:
self.logger.error("Unknown child [%s] registered with server", client)
return -1, "unknown child [%s]"%client, 0
else:
self.logger.info("child [%s] registered with server, uri[%s]", client, uri)
self.child_processes[client].set_uri(uri)
return 1, "registered", 1
def list_children(self):
"""
List the roslaunch child processes.
@return int, str, [str]: code, msg, list of the roslaunch children URIS
"""
return 1, 'roslaunch children', [v.uri for v in self.child_processes.values() if v.uri is not None]
def process_died(self, process_name, exit_code):
"""
Inform roslaunch server that a remote process has died
@param process_name: name of process that died
@type process_name: str
@param exit_code: exit code of remote process
@type exit_code: int
@return: code, msg, ignore
@rtype: int, str, int
"""
for l in self.listeners:
try:
l.process_died(process_name, exit_code)
except:
self.logger.error(traceback.format_exc())
return 1, '', 0
def log(self, client, level, message):
"""
Report a log message to the server
@param client: name of client
@type client: str
@param level: log level (uses rosgraph_msgs.msg.Log levels)
@type level: int
@param message: message to log
@type message: str
"""
try:
if level >= Log.ERROR:
printerrlog("[%s]: %s"%(client, message))
else:
#hack due to the fact that we only have one INFO level
if 'started with pid' in message:
printlog_bold("[%s]: %s"%(client, message))
else:
printlog("[%s]: %s"%(client, message))
except:
# can't trust the logging system at this point, so just dump to screen
traceback.print_exc()
return 1, '', 1
class ROSLaunchChildHandler(ROSLaunchBaseHandler):
"""
XML-RPC API implementation for child roslaunches
NOTE: the client handler runs a process monitor so that
it can track processes across requests
"""
def __init__(self, run_id, name, server_uri, pm):
"""
@param server_uri: XML-RPC URI of server
@type server_uri: str
@param pm: process monitor to use
@type pm: L{ProcessMonitor}
@raise RLException: If parameters are invalid
"""
super(ROSLaunchChildHandler, self).__init__(pm)
if server_uri is None:
raise RLException("server_uri is not initialized")
self.run_id = run_id
# parse the URI to make sure it's valid
_, urlport = network.parse_http_host_and_port(server_uri)
if urlport <= 0:
raise RLException("ERROR: roslaunch server URI is not a valid XML-RPC URI. Value is [%s]"%m.uri)
self.name = name
self.pm = pm
self.server_uri = server_uri
self.server = ServerProxy(server_uri)
def _shutdown(self, reason):
"""
xmlrpc.XmlRpcHandler API: inform handler of shutdown
@param reason: human-readable shutdown reason
@type reason: str
"""
if self.pm is not None:
self.pm.shutdown()
self.pm.join()
self.pm = None
def shutdown(self):
"""
@return: code, msg, ignore
@rtype: int, str, int
"""
self._shutdown("external call")
return 1, "success", 1
def _log(self, level, message):
"""
log message to log file and roslaunch server
@param level: log level
@type level: int
@param message: message to log
@type message: str
"""
try:
if self.logger is not None:
self.logger.debug(message)
if self.server is not None:
self.server.log(str(self.name), level, str(message))
except:
self.logger.error(traceback.format_exc())
def launch(self, launch_xml):
"""
Launch the roslaunch XML file. Because this is a child
roslaunch, it will not set parameters nor manipulate the
master. Call blocks until launch is complete
@param xml: roslaunch XML file to launch
@type xml: str
@return: code, msg, [ [ successful launches], [failed launches] ]
@rtype: int, str, [ [str], [str] ]
"""
if self.pm is None:
return 0, "uninitialized", -1
rosconfig = roslaunch.config.ROSLaunchConfig()
try:
roslaunch.xmlloader.XmlLoader().load_string(launch_xml, rosconfig)
except roslaunch.xmlloader.XmlParseException as e:
return -1, "ERROR: %s"%e, [[], []]
# won't actually do anything other than local, but still required
rosconfig.assign_machines()
try:
# roslaunch clients try to behave like normal roslaunches as much as possible. It's
# mainly the responsibility of the roslaunch server to not give us any XML that might
# cause conflict (e.g. master tags, param tags, etc...).
self._log(Log.INFO, "launching nodes...")
runner = ROSLaunchRunner(self.run_id, rosconfig, server_uri=self.server_uri, pmon=self.pm)
succeeded, failed = runner.launch()
self._log(Log.INFO, "... done launching nodes")
# enable the process monitor to exit of all processes die
self.pm.registrations_complete()
return 1, "launched", [ succeeded, failed ]
except Exception as e:
return 0, "ERROR: %s"%traceback.format_exc(), [[], []]
_STARTUP_TIMEOUT = 5.0 #seconds
class ROSLaunchNode(xmlrpc.XmlRpcNode):
"""
Base XML-RPC server for roslaunch parent/child processes
"""
def __init__(self, handler):
"""
@param handler: xmlrpc api handler
@type handler: L{ROSLaunchBaseHandler}
"""
super(ROSLaunchNode, self).__init__(0, handler)
def start(self):
"""
Startup roslaunch server XML-RPC services
@raise RLException: if server fails to start
"""
logger = logging.getLogger('roslaunch.server')
logger.info("starting roslaunch XML-RPC server")
super(ROSLaunchNode, self).start()
# wait for node thread to initialize
timeout_t = time.time() + _STARTUP_TIMEOUT
logger.info("waiting for roslaunch XML-RPC server to initialize")
while not self.uri and time.time() < timeout_t:
time.sleep(0.01)
if not self.uri:
raise RLException("XML-RPC initialization failed")
# Make sure our xmlrpc server is actually up. We've seen very
# odd cases where remote nodes are unable to contact the
# server but have been unable to prove this is the cause.
server_up = False
while not server_up and time.time() < timeout_t:
try:
code, msg, val = ServerProxy(self.uri).get_pid()
if val != os.getpid():
raise RLException("Server at [%s] did not respond with correct PID. There appears to be something wrong with the networking configuration"%self.uri)
server_up = True
except IOError:
# presumably this can occur if we call in a small time
# interval between the server socket port being
# assigned and the XMLRPC server initializing, but it
# is highly unlikely and unconfirmed
time.sleep(0.1)
except socket.error as e:
if e.errno == errno.EHOSTUNREACH:
p = urlparse(self.uri)
raise RLException("Unable to contact the address [%s], which should be local.\nThis is generally caused by:\n * bad local network configuration\n * bad ROS_IP environment variable\n * bad ROS_HOSTNAME environment variable\nCan you ping %s?"%(self.uri, p.hostname))
else:
time.sleep(0.1)
if not server_up:
p = urlparse(self.uri)
raise RLException("""Unable to contact my own server at [%s].
This usually means that the network is not configured properly.
A common cause is that the machine cannot ping itself. Please check
for errors by running:
\tping %s
For more tips, please see
\thttp://wiki.ros.org/ROS/NetworkSetup
"""%(self.uri, p.hostname))
printlog_bold("started roslaunch server %s"%(self.uri))
def run(self):
"""
run() should not be called by higher-level code. ROSLaunchNode
overrides underlying xmlrpc.XmlRpcNode implementation in order
to log errors.
"""
try:
super(ROSLaunchNode, self).run()
except:
logging.getLogger("roslaunch.remote").error(traceback.format_exc())
print("ERROR: failed to launch XML-RPC server for roslaunch", file=sys.stderr)
class ROSLaunchParentNode(ROSLaunchNode):
"""
XML-RPC server for parent roslaunch.
"""
def __init__(self, rosconfig, pm):
"""
@param config: ROSConfig launch configuration
@type config: L{ROSConfig}
@param pm: process monitor
@type pm: L{ProcessMonitor}
"""
self.rosconfig = rosconfig
self.listeners = []
self.child_processes = {} #{ child-name : ChildROSLaunchProcess}.
if pm is None:
raise RLException("cannot create parent node: pm is not initialized")
handler = ROSLaunchParentHandler(pm, self.child_processes, self.listeners)
super(ROSLaunchParentNode, self).__init__(handler)
def add_child(self, name, p):
"""
@param name: child roslaunch's name. NOTE: \a name is not
the same as the machine config key.
@type name: str
@param p: process handle of child
@type p: L{Process}
"""
self.child_processes[name] = p
def add_process_listener(self, l):
"""
Listen to events about remote processes dying. Not
threadsafe. Must be called before processes started.
@param l: Process listener
@type l: L{ProcessListener}
"""
self.listeners.append(l)
class _ProcessListenerForwarder(ProcessListener):
"""
Simple listener that forwards ProcessListener events to a roslaunch server
"""
def __init__(self, server):
self.server = server
def process_died(self, process_name, exit_code):
try:
self.server.process_died(process_name, exit_code)
except Exception as e:
logging.getLogger("roslaunch.remote").error(traceback.format_exc())
class ROSLaunchChildNode(ROSLaunchNode):
"""
XML-RPC server for roslaunch child processes
"""
def __init__(self, run_id, name, server_uri, pm):
"""
## Startup roslaunch remote client XML-RPC services. Blocks until shutdown
## @param name: name of remote client
## @type name: str
## @param server_uri: XML-RPC URI of roslaunch server
## @type server_uri: str
## @return: XML-RPC URI
## @rtype: str
"""
self.logger = logging.getLogger("roslaunch.server")
self.run_id = run_id
self.name = name
self.server_uri = server_uri
self.pm = pm
if self.pm is None:
raise RLException("cannot create child node: pm is not initialized")
handler = ROSLaunchChildHandler(self.run_id, self.name, self.server_uri, self.pm)
super(ROSLaunchChildNode, self).__init__(handler)
def _register_with_server(self):
"""
Register child node with server
"""
name = self.name
self.logger.info("attempting to register with roslaunch parent [%s]"%self.server_uri)
try:
server = ServerProxy(self.server_uri)
code, msg, _ = server.register(name, self.uri)
if code != 1:
raise RLException("unable to register with roslaunch server: %s"%msg)
except Exception as e:
self.logger.error("Exception while registering with roslaunch parent [%s]: %s"%(self.server_uri, traceback.format_exc()))
# fail
raise RLException("Exception while registering with roslaunch parent [%s]: %s"%(self.server_uri, traceback.format_exc()))
self.logger.debug("child registered with server")
# register printlog handler so messages are funneled to remote
def serverlog(msg):
server.log(name, Log.INFO, msg)
def servererrlog(msg):
server.log(name, Log.ERROR, msg)
add_printlog_handler(serverlog)
add_printerrlog_handler(servererrlog)
# register process listener to forward process death events to main server
self.pm.add_process_listener(_ProcessListenerForwarder(server))
def start(self):
"""
Initialize child. Must be called before run
"""
self.logger.info("starting roslaunch child process [%s], server URI is [%s]", self.name, self.server_uri)
super(ROSLaunchChildNode, self).start()
self._register_with_server()
|
from .HOG import hog
|
from copy import deepcopy
import requests
from django.conf import settings
from .utils import check_required_settings
REQUIRED_SETTINGS_POST = ('OPEN311_API_KEY', 'OPEN311_API_SERVICE_CODE', 'OPEN311_API_BASE_URL')
REQUIRED_SETTINGS_GET = ('OPEN311_API_BASE_URL',)
class Open311Exception(Exception):
pass
def _get_api_base_url():
url = settings.OPEN311_API_BASE_URL
if not url.endswith('/'):
url += '/'
return url
# This function sends the feedback to the Helsinki feedback API
def create_ticket(feedback):
feedback = deepcopy(feedback)
feedback['api_key'] = settings.OPEN311_API_KEY
feedback['service_code'] = settings.OPEN311_API_SERVICE_CODE
try:
new_ticket = post_service_request_to_api(feedback)
except requests.RequestException as e:
raise Open311Exception(e)
for entry in new_ticket:
if 'code' in entry:
raise Open311Exception(
'Got error code {}, description: {}'.format(entry['code'], entry['description'])
)
elif 'service_request_id' in entry:
break
else:
raise Open311Exception('Something wrong with api data, entry: {}'.format(entry))
try:
new_ticket_id = new_ticket[0]['service_request_id']
except KeyError:
raise Open311Exception("New data doesn't contain service_request_id, ticket {}".format(new_ticket))
return new_ticket_id
def post_service_request_to_api(data):
check_required_settings(REQUIRED_SETTINGS_POST)
try:
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post('{}requests.json'.format(_get_api_base_url()), data=data, headers=headers)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
raise Open311Exception(e)
def get_service_request_from_api(ticket_id):
check_required_settings(REQUIRED_SETTINGS_GET)
try:
response = requests.get('{}requests/{}.json'.format(_get_api_base_url(), ticket_id))
response.raise_for_status()
return response.json()
except requests.RequestException as e:
raise Open311Exception(e)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0039_auto_20160203_1417'),
]
operations = [
migrations.AddField(
model_name='checksettings',
name='c_night_spam_score_max',
field=models.FloatField(default=4.0, help_text='\u665a\u4e0a(19:00--07:00)\u5982\u679cspam\u68c0\u6d4b\u5206\u6570\u8d85\u8fc7\u8be5\u9600\u503c, \u5219\u88ab\u8ba4\u4e3a\u662f\u5783\u573e\u90ae\u4ef6, \u9ed8\u8ba4\u4e3a5.0', verbose_name='\u7f51\u5173spam\u68c0\u6d4b\u5206\u6570\u9600\u503c(\u665a\u4e0a)'),
),
migrations.AddField(
model_name='checksettings',
name='c_spam_score_max',
field=models.FloatField(default=5.0, help_text='\u767d\u5929(07:00--19:00)\u5982\u679cspam\u68c0\u6d4b\u5206\u6570\u8d85\u8fc7\u8be5\u9600\u503c, \u5219\u88ab\u8ba4\u4e3a\u662f\u5783\u573e\u90ae\u4ef6, \u9ed8\u8ba4\u4e3a5.0', verbose_name='\u7f51\u5173spam\u68c0\u6d4b\u5206\u6570\u9600\u503c(\u767d\u5929)'),
),
migrations.AlterField(
model_name='checksettings',
name='night_spam_score_max',
field=models.FloatField(default=4.0, help_text='\u665a\u4e0a(19:00--07:00)\u5982\u679cspam\u68c0\u6d4b\u5206\u6570\u8d85\u8fc7\u8be5\u9600\u503c, \u5219\u88ab\u8ba4\u4e3a\u662f\u5783\u573e\u90ae\u4ef6, \u9ed8\u8ba4\u4e3a5.0', verbose_name='\u4e2d\u7ee7spam\u68c0\u6d4b\u5206\u6570\u9600\u503c(\u665a\u4e0a)'),
),
migrations.AlterField(
model_name='checksettings',
name='spam_score_max',
field=models.FloatField(default=5.0, help_text='\u767d\u5929(07:00--19:00)\u5982\u679cspam\u68c0\u6d4b\u5206\u6570\u8d85\u8fc7\u8be5\u9600\u503c, \u5219\u88ab\u8ba4\u4e3a\u662f\u5783\u573e\u90ae\u4ef6, \u9ed8\u8ba4\u4e3a5.0', verbose_name='\u4e2d\u7ee7spam\u68c0\u6d4b\u5206\u6570\u9600\u503c(\u767d\u5929)'),
),
]
|
import yaml
import subprocess
import glob
import pandas as pd
stream = open('rais/configuration.yaml')
config = yaml.safe_load(stream)
def create_folder_tmp():
path = config['path_output_data']
create_folder(path, 'tmp')
def create_folder_year(year):
path = config['path_output_data'] + 'tmp/'
create_folder(path, str(year))
def create_folder_inside_year(year, name_folder):
path = config['path_output_data'] + 'tmp/' + str(year) + '/'
create_folder(path, name_folder)
def get_all_original_files_year(year):
path = config['path_input_data'] + str(year) + '/'
extension = get_extension(year)
files = get_all_files(path, extension)
return files
def get_all_tmp_files(year, directory, extension):
path = config['path_output_data'] + 'tmp/' + str(year) + '/' + directory + '/'
files = get_all_files(path, extension)
return files
def get_file_name(file):
file = file.split('/')[-1]
return file.split('.')[0]
#------------------------------------------------------------------------------------------------
def create_folder(path, folder_name):
command = 'mkdir ' + path + folder_name
subprocess.run(command, shell=True)
def change_file_format(file, file_format):
names = file.split('.')
names[-1] = file_format
names = '.'.join(names)
return names
def change_folder_name(file, folder):
names = file.split('/')
names[-2] = folder
names = '/'.join(names)
return names
def get_year_path(year, path):
path_year = path + str(year) + '/'
return path_year
def get_all_files(path, extension):
files = glob.glob(path + '*.' + extension)
return files
def get_extension(year):
if year <= 2010:
return 'TXT'
else:
return 'txt'
|
import argparse
from ..submission import submit_any_ingestion
from ..base import UsingCGAPKeysFile
EPILOG = __doc__
@UsingCGAPKeysFile
def main(simulated_args_for_testing=None):
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid
description="Submits a gene list",
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'genelist_filename',
help='a local Excel or txt filename that is the gene list'
)
parser.add_argument(
'--institution', '-i',
help='institution identifier',
default=None
)
parser.add_argument(
'--project', '-p',
help='project identifier',
default=None
)
parser.add_argument(
'--server', '-s',
help="an http or https address of the server to use",
default=None
)
parser.add_argument(
'--env', '-e',
help="a CGAP beanstalk environment name for the server to use",
default=None
)
parser.add_argument(
'--validate-only', '-v',
action="store_true",
help="whether to stop after validating without submitting",
default=False
)
args = parser.parse_args(args=simulated_args_for_testing)
return submit_any_ingestion(
ingestion_filename=args.genelist_filename,
ingestion_type='genelist',
institution=args.institution,
project=args.project,
server=args.server,
env=args.env,
validate_only=args.validate_only,
)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from datetime import datetime, timedelta
def read_period_file(file):
""" Opens the .txt file of the period calendar app. This function is meant
for the app Period Tracker of Simple Design Ltd."""
period_cal = []
periods = []
# read the text file to a list as start-end pairs with datetime
with open(file, "r", encoding="utf-8") as f:
for line in f:
newline = line.split("\t")
if line.strip().split("\t")[1] == "Period Starts":
period_cal.append([])
# add start of period
period_cal[-1].append(datetime.strptime(newline[0], "%d %b, %Y"))
elif line.strip().split("\t")[1] == "Period Ends":
# add end of period
period_cal[-1].append(datetime.strptime(newline[0], "%d %b, %Y"))
# make list of cycle and menstruation times
for period in period_cal[1:]:
num = period_cal.index(period)
if num > 0:
lengths = []
# add first day
lengths.append(period_cal[num][0])
# add length of cycle
lengths.append((period_cal[num][0] - period_cal[num - 1][0]).days)
# add length of menstruation
lengths.append((period_cal[num][1] - period_cal[num][0]).days + 1)
periods.append(lengths)
return periods
def sliding_windows(periods):
""" Split into training and test sets, augment the data. """
x = []
y = []
for period in periods[:-3]:
p_index = periods.index(period)
x.append([])
x[-1].append([period[-2], period[-1]])
x[-1].append([periods[p_index + 1][-2], periods[p_index + 1][-1]])
x[-1].append([periods[p_index + 2][-2], periods[p_index + 2][-1]])
y.append([periods[p_index + 3][-2], periods[p_index + 3][-1]])
assert len(x) == len(y)
return x, y
def make_train_test_sets(periods):
""" Split into training and test sets, augment the data. """
x, y = sliding_windows(periods)
x = x * 5
y = y * 5
train_size = int(len(y) * 0.8)
train_x = np.array(x[0:train_size])
train_y = np.array(y[0:train_size])
test_x = np.array(x[train_size : len(x)])
test_y = np.array(y[train_size : len(y)])
# the last period of the train set, so that we can print a date on the
# predicted periods of the test set
last_known_period = (periods*5)[train_size][0]
return train_x, train_y, test_x, test_y, last_known_period
def evaluate_predictions(test_y, predictions):
""" Evaluate on the test set. """
assert len(test_y) == len(predictions)
right_cycle = 0
right_menstr = 0
for idx, y in enumerate(test_y):
if y[0] == predictions[idx][0]:
right_cycle += 1
if y[1] == predictions[idx][1]:
right_menstr += 1
return right_cycle / len(test_y), right_menstr / len(test_y)
def print_predictions(last_known_period, predictions):
# add the first predicted period
next_periods = [[
last_known_period + timedelta(days = predictions[0][0]),
last_known_period + timedelta(days = predictions[0][0] + predictions[0][1]),
predictions[0][1]
]]
# add the next ones
for period in predictions[1:]:
last_period = next_periods[-1]
next_periods.append([
last_period[0] + timedelta(days = period[0]),
last_period[0] + timedelta(days = period[0] + period[1]),
period[1]
])
for num, period in enumerate(next_periods):
print(str(num) + ". From " + period[0].strftime('%d.%m.%Y') + \
" to " + period[1].strftime('%d.%m.%Y') + ", length: " + str(period[2]))
return next_periods
|
import numpy as np
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.sampling.random_sampling import FloatRandomSampling
from pymoo.problems.single import Rastrigin
problem = Rastrigin(n_var=30)
crossover = SimulatedBinaryCrossover(eta=20)
pop = FloatRandomSampling().do(problem, 2)
parents = np.array([[0, 1]])
off = crossover.do(problem, pop, parents)
print(off)
ind_a = pop[0]
ind_b = pop[1]
off = crossover.do(problem, ind_a, ind_b)
print(off)
|
>>> con= sqlite3.connect('population2.db')
>>> cur=con.cursor()
>>> cur.execute('''CREATE TABLE PopByRegion (
... Region TEXT NOT NULL,
... Population INTEGER NOT NULL,
... PRIMARY KEY (Region))''')
<sqlite3.Cursor object at 0x7fc0c81907a0>
>>> cur.execute('''
... CREATE TABLE PopByCountry(
... Region TEXT NOT NULL,
... Country TEXT NOT NULL,
... Population INTEGER NOT NULL,
... CONSTRAINT CountryKey PRIMARY KEY (Region, Country))''')
>>> con.commit()
>>> con.close
|
import time
def format_seconds(seconds):
return time.strftime("%H:%M:%S", time.gmtime(seconds))
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import filter
from future import standard_library
standard_library.install_aliases()
from vcfx import util
from vcfx.field.nodes import Unknown
M_NAMES = [
"address", "calendar", "communication",
"custom", "explanatory", "general",
"geographical", "identification",
"organizational", "security"
]
modules = util.get_modules(M_NAMES, __path__, __name__)
all_nodes = []
for m in modules:
all_nodes += util.filter_defined_cls(m)
all_nodes.append(Unknown)
def get_field_by_key(key=None):
found = list(filter(lambda x: x.KEY == key, all_nodes))
if len(found) == 0:
return Unknown
return found[0]
__all__ = [
"all_nodes",
"get_field_by_key",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.