code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from direct.directnotify import DirectNotifyGlobal
from toontown.parties.DistributedPartyActivityAI import DistributedPartyActivityAI
from direct.task import Task
import PartyGlobals
class DistributedPartyJukeboxActivityBaseAI(DistributedPartyActivityAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedPartyJukeboxActivityBaseAI")
def __init__(self, air, parent, activityTuple):
DistributedPartyActivityAI.__init__(self, air, parent, activityTuple)
self.music = PartyGlobals.PhaseToMusicData40
self.queue = []
self.owners = []
self.currentToon = 0
self.playing = False
def delete(self):
taskMgr.remove('playSong%d' % self.doId)
DistributedPartyActivityAI.delete(self)
def setNextSong(self, song):
avId = self.air.getAvatarIdFromSender()
phase = self.music.get(song[0])
if avId != self.currentToon:
self.air.writeServerEvent('suspicious',avId,'Toon tried to set song without using the jukebox!')
if not phase:
self.air.writeServerEvent('suspicious',avId,'Toon supplied invalid phase for song!')
return
if not phase.has_key(song[1]):
self.air.writeServerEvent('suspicious',avId,'Toon supplied invalid song name!')
return
if avId in self.owners:
self.queue[self.owners.index(avId)] = song
else:
self.queue.append(song)
self.owners.append(avId)
for toon in self.toonsPlaying:
self.sendUpdateToAvatarId(toon, 'setSongInQueue', [song])
if not self.playing:
#stop default party music...
self.d_setSongPlaying([0, ''], 0)
self.__startPlaying()
def __startPlaying(self):
if len(self.queue) == 0:
#start default party music!
self.d_setSongPlaying([13, 'party_original_theme.ogg'], 0)
self.playing = False
return
self.playing = True
#get song information....
details = self.queue.pop(0)
owner = self.owners.pop(0)
songInfo = self.music[details[0]][details[1]]
#play song!
self.d_setSongPlaying(details, owner)
taskMgr.doMethodLater(songInfo[1]*PartyGlobals.getMusicRepeatTimes(songInfo[1]), self.__pause, 'playSong%d' % self.doId, extraArgs=[])
def __pause(self):
#stop music!
self.d_setSongPlaying([0, ''], 0)
#and hold.
taskMgr.doMethodLater(PartyGlobals.MUSIC_GAP, self.__startPlaying, 'playSong%d' % self.doId, extraArgs=[])
def toonJoinRequest(self):
avId = self.air.getAvatarIdFromSender()
if self.currentToon:
self.sendUpdateToAvatarId(avId, 'joinRequestDenied', [1])
return
self.currentToon = avId
taskMgr.doMethodLater(PartyGlobals.JUKEBOX_TIMEOUT, self.__removeToon, 'removeToon%d', extraArgs=[])
self.toonsPlaying.append(avId)
self.updateToonsPlaying()
def toonExitRequest(self):
pass
def toonExitDemand(self):
avId = self.air.getAvatarIdFromSender()
if avId != self.currentToon:
return
taskMgr.remove('removeToon%d' % self.doId)
self.currentToon = 0
self.toonsPlaying.remove(avId)
self.updateToonsPlaying()
def __removeToon(self):
if not self.currentToon:
return
self.toonsPlaying.remove(self.currentToon)
self.updateToonsPlaying()
self.currentToon = 0
def d_setSongPlaying(self, details, owner):
self.sendUpdate('setSongPlaying', [details, owner])
def queuedSongsRequest(self):
avId = self.air.getAvatarIdFromSender()
if avId in self.owners:
index = self.owners.index(avId)
else:
index = -1
self.sendUpdateToAvatarId(avId, 'queuedSongsResponse', [self.queue, index])
def moveHostSongToTopRequest(self):
avId = self.air.getAvatarIdFromSender()
if avId != self.currentToon:
self.air.writeServerEvent('suspicious',avId,'Toon tried to set song without using the jukebox!')
host = self.air.doId2do[self.parent].hostId
if avId != host:
self.air.writeServerEvent('suspicious',avId,'Toon tried to move the host\'s song to the top!')
return
if not host in self.owners:
self.air.writeServerEvent('suspicious',avId,'Host tried to move non-existent song to the top of the queue!')
return
index = self.owners.index(host)
self.owners.remove(host)
song = self.queue.pop(index)
self.owners.insert(0, host)
self.queue.insert(0, song)
for toon in self.toonsPlaying:
self.sendUpdateToAvatarId(toon, 'moveHostSongToTop', [])
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/parties/DistributedPartyJukeboxActivityBaseAI.py
|
Python
|
mit
| 4,954
|
import sys, unittest, os
sys.path.append(os.path.realpath(os.path.dirname(__file__))+'/lib')
tests_folder = os.path.realpath(os.path.dirname(__file__))+'/tests'
from tests import ConsoleTestRunner
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
pattern='test_*.py'
for dirname, dirnames, filenames in os.walk(tests_folder):
for path in dirnames:
path=dirname+'/'+path
for all_test_suite in unittest.defaultTestLoader.discover(path, pattern=pattern, top_level_dir=path):
for test_suite in all_test_suite:
suite.addTest(test_suite)
return suite
if __name__ == '__main__':
os.environ['ENVIRONMENT'] = 'test'
unittest.main(verbosity=2, exit=False, testRunner=ConsoleTestRunner)
|
creative-workflow/pi-setup
|
test.py
|
Python
|
mit
| 748
|
"""
WSGI config for bball_intel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bball_intel.settings.base")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
ltiao/basketball-intelligence
|
bball_intel/bball_intel/wsgi.py
|
Python
|
mit
| 437
|
import re
import sys
import requests
from django.conf import settings
from django.core.management import BaseCommand
from requests.exceptions import InvalidJSONError, RequestException
from .import_measures import BadRequest
from .import_measures import Command as ImportMeasuresCommand
from .import_measures import ImportLog, relativedelta
class Command(BaseCommand):
def handle(self, github_url, **options):
try:
measure_id = import_preview_measure(github_url)
except BadRequest as e:
# We want these errors to be visble to users who run via ebmbot but the only
# way to achieve that is to write them to stderr and exit 0 :(
self.stdout.write(
f"Importing measure preview failed for {github_url}\n\n{e.message}"
)
sys.exit(0)
measure_url = f"https://openprescribing.net/measure/{measure_id}/"
self.stdout.write(
f"Measure can be previewed at:\n{measure_url}\n\n"
f"When you've finished remember to delete the preview with:\n"
f"@ebmbot op measures delete_preview {measure_id}"
)
def add_arguments(self, parser):
parser.add_argument("github_url")
def import_preview_measure(github_url):
measure_id, json_url = get_id_and_json_url(github_url)
measure_def = fetch_measure_def(json_url)
measure_def["id"] = measure_id
measure_def = make_preview_measure(measure_def)
import_measure(measure_def)
return measure_def["id"]
def get_id_and_json_url(github_url):
match = re.match(
r"^"
r"https://github\.com/ebmdatalab/openprescribing/blob/"
r"(?P<git_ref>[^/\.]+)"
r"/openprescribing/measure_definitions/"
r"(?P<measure_id>[^/\.]+)"
r"\.json"
r"$",
github_url,
)
if not match:
raise BadRequest(
"Expecting a URL in the format:\n"
"https://github.com/ebmdatalab/openprescribing/blob/<GIT_REF>/"
"openprescribing/measure_definitions/<MEASURE_ID>.json\n"
"\n"
"You can get this URL by finding the measure file in your branch on "
"Github:\n"
"https://github.com/ebmdatalab/openprescribing/branches\n"
"\n"
"Or if you have a PR open you can go to the Files tab, click the three "
"dots next to the measure filename and select 'View file'"
)
git_ref = match.group("git_ref")
measure_id = match.group("measure_id")
json_url = (
f"https://raw.githubusercontent.com/ebmdatalab/openprescribing/"
f"{git_ref}/openprescribing/measure_definitions/{measure_id}.json"
)
return measure_id, json_url
def fetch_measure_def(json_url):
try:
response = requests.get(json_url)
response.raise_for_status()
except RequestException as e:
raise BadRequest(f"Failed to fetch measure JSON, got error:\n{e}")
try:
measure_def = response.json()
except InvalidJSONError as e:
raise BadRequest(f"Measure definition was not valid JSON, got error:\n{e}")
return measure_def
def make_preview_measure(measure_def):
measure_def = measure_def.copy()
measure_def["id"] = settings.MEASURE_PREVIEW_PREFIX + measure_def["id"]
measure_def["name"] = f"PREVIEW: {measure_def['name']}"
measure_def["tags"] = []
measure_def["include_in_alerts"] = False
return measure_def
def import_measure(measure_def):
end_date = ImportLog.objects.latest_in_category("prescribing").current_at
start_date = end_date - relativedelta(years=5)
command = ImportMeasuresCommand()
command.check_definitions([measure_def], start_date, end_date, verbose=False)
command.build_measures(
[measure_def],
start_date,
end_date,
verbose=False,
options={
"measure": measure_def["id"],
"definitions_only": False,
"bigquery_only": False,
},
)
|
ebmdatalab/openprescribing
|
openprescribing/frontend/management/commands/preview_measure.py
|
Python
|
mit
| 4,017
|
import unittest
from fire_risk.models import DIST
class TestDISTModel(unittest.TestCase):
def test_dist_import(self):
floor_extent = False
results = {'floor_of_origin': 126896L,
'beyond': 108959L,
'object_of_origin': 383787L,
'room_of_origin': 507378L,
'building_of_origin': 529300L}
dist = DIST(floor_extent=floor_extent, **results)
self.assertAlmostEqual(dist.gibbs_sample(), 32.0, delta=4)
if __name__ == '__main__':
unittest.main()
|
garnertb/fire-risk
|
tests/models/tests.py
|
Python
|
mit
| 559
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from srxraylib.plot.gol import plot
from oasys.util.oasys_util import write_surface_file
from srxraylib.metrology.profiles_simulation import slopes
# def transform_data(file_name):
#
# """First chapuza to create a file similar to FEA"""
#
# df = pd.read_csv(file_name, sep=';', header=None, skiprows=23)
# # new columns #
# df.columns = ['x(m)', 'y(m)', 'uz(m)']
#
# new_col = ['z(m)','ux(m)','uy(m)']
# # adding zeros for each new column
# for col in new_col:
# df[col] = 0.0
#
# # reordering the columns #
#
# cols = df.columns.tolist()
#
# # order to be like FEA ESRF #
# cols = cols[:2]+cols[3:4]+cols[-2:]+cols[2:3]
#
# df = df[cols]
#
# return df
#
# def get_line(file_name, row = 'central'):
# """Function to get a profile file for a given Sagittal line
# of a mirror 2D measurements"""
#
# df = pd.read_csv(file_name, sep=';', header=None, skiprows=23)
#
# df.columns = ['x(m)', 'y(m)', 'z(m)']
#
# #sagittal_rows = df[df.duplicated(['y(m)'])]
# #print(sagittal_rows)
#
# rows_shape = df.pivot_table(columns=['y(m)'], aggfunc='size')
#
# n_rows = rows_shape.size
#
# if row == 'central':
# n = int(n_rows/2)
# elif (isinstance(row, int) == True) and (row < n_rows):
# n = row
# else:
# raise RuntimeError(f'ERROR: {row} is not an integer number or is higher than the number of rows {n_rows}')
#
# #print(rows_shape.index[n])
#
# sub_df = df[df['y(m)'] == rows_shape.index[n]]
#
# return sub_df
def get_shadow_h5(file_name):
"""Function to get an h5 file with OASYS structure
from 2D measurements """
df = pd.read_csv(file_name, sep=';', header=None, comment='#', skiprows=1)
df.columns = ['x(m)', 'y(m)', 'z(m)']
# this part is to get the ordinates and the number of abscissas for each
rows_shape = df.pivot_table(columns=['y(m)'], aggfunc='size')
#print(rows_shape)
#n_rows = rows_shape.size
#print(n_rows)
x_coors = []
x_mins = []
x_maxs = []
z_heights = []
for i,y in enumerate(rows_shape.index):
sub_df = df[df['y(m)'] == y]
x_coors.append(np.array(sub_df['x(m)']))
x_mins.append(x_coors[i][0])
x_maxs.append(x_coors[i][-1])
z_heights.append(np.array(sub_df['z(m)']))
# checking that all coordinates along the mirror have the same steps #
if (all(x==x_mins[0] for x in x_mins)) and (all(x==x_maxs[0] for x in x_maxs)):
print("All elements in x_coors are the same")
x = x_coors[0]
y = rows_shape.index
else:
#TODO: define coordinates along the mirror and interpolate all#
#z for all y coord #
pass
#print(z_heights)
return np.array(x), np.array(y), np.array(z_heights)
# def app_gaussian(z, sigma_0= 10, sigma_1 = 10):
#
# """Copy paste of Manolos filtering function"""
#
# filtered_z = gaussian_filter(z, (sigma_0,sigma_1), order=0, output=None, mode='nearest', cval=0.0, truncate=4.0)
#
# return filtered_z
#
# def scale_profile(surface, factor):
# """Brief function just to rescale the full surface"""
# z2 = np.copy(surface)
# z2 *= factor
#
# return z2
#
#
# def detrend_best_circle(x,y,z,fitting_domain_ratio=0.5, plotting = False):
#
# """Almost copy paste of Manolos detrend best circle function"""
#
# xm = x.copy()
# zm = z[y.size//2,:]
# print(f'Medium line at {y.size//2}')
# zm.shape = -1
#
# icut = np.argwhere(np.abs(xm) <= fitting_domain_ratio)
# if len(icut) <=5:
# raise Exception("Not enough points for fitting.")
#
# xcut = xm[icut]
# #print(len(xm),len(xcut))
# zmcut = zm[icut]
#
# #print(len(zm), len(zmcut))
#
# xcut.shape = -1
# zmcut.shape = -1
#
# if plotting:
# plot(xm, zm, legend=["original"])
#
# print( np.argwhere(np.isnan(z)))
# print("Fitting interval: [%g,%g] (using %d points)" % (xcut[0],xcut[-1],xcut.size))
#
# coeff = np.polyfit(xcut, np.gradient(zmcut,xcut), deg=1)
#
# # # zfit = coeff[0] * xm + coeff[1]
# radius = 1 / coeff[0]
# #print("Detrending straight line on sloped (axis=%d): zfit = %g * coordinate + %g " % (axis, coeff[1], coeff[0]))
# print("Radius of curvature: %g m" % (1.0 / coeff[0]))
#
# if radius >= 0:
# zfit = radius - np.sqrt(radius ** 2 - xm ** 2)
# else:
# zfit = radius + np.sqrt(radius ** 2 - xm ** 2)
# if plotting:
# plot(xm, zfit, legend=["fit"])
# #plot(xcut, zmcut, xm, zfit, legend=["cut","fit"])
#
# #print(len(zfit))
#
# plot(xm, zm-zfit, legend=["detrended"])
#
# for i in range(z.shape[0]):
# z[i,:] -= zfit
#
#
# nx, ny = z.shape
# z = z - (z[nx//2,ny//2])
#
# # print(f" Slope error is {round(z[:, 0].std(), 6)}")
#
# return xm, z
def plot2d(x,y,data):
plt.pcolormesh(x,y,data, cmap=plt.cm.viridis)
plt.colorbar().ax.tick_params(axis='y',labelsize=12)
plt.ylabel("Vertical [mm]",fontsize=12)
plt.xlabel("Horizontal [mm]",fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.show()
if __name__ == '__main__':
file_name = 'ring256_TypbeB_F127001_frontside_ontissue_meas2__avg_2D.txt'
x, y, z = get_shadow_h5(file_name)
print(z.shape, x.shape, y.shape, z.min(), z.max())
from srxraylib.plot.gol import plot_image
plot_image(z*1e6, y*1e3, x*1e3, aspect="auto")
# x,z = detrend_best_circle(x,y,z,fitting_domain_ratio=0.5, plotting=True)
#
# print(z.shape)
# #plot2d(x,y,z)
#
# z2 = app_gaussian(z, sigma_0= 6, sigma_1 = 2)
#
# z3 = scale_profile(z2,1)
#
# #plot2d(x,y,z)
slp = slopes(z, y, x, silent=0, return_only_rms=0)
#
# slp_y = np.round(slp[1][1]*1e6, 3)
output_filename = f'ring256.h5'
# plot(x,z[y.size//2,:],x,z[y.size//2,:],legend=["detrended","Gauss_filtered"])
#
# plot(x,np.gradient(z[y.size//2,:],x), legend=["Slope errors"])
write_surface_file(z.T, y, x, output_filename, overwrite=True)
print("write_h5_surface: File for OASYS " + output_filename + " written to disk.")
print(">>>>>", z.T.shape, y.shape, x.shape,)
|
srio/shadow3-scripts
|
METROLOGY/surface2d_to_hdf5.py
|
Python
|
mit
| 6,588
|
from rest_framework.decorators import list_route
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from .models import User
from .serializers import UserSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
http_method_names = ['get', 'patch']
|
mnazim/django-rest-kickstart
|
users/views.py
|
Python
|
mit
| 362
|
''' Wrapper around session access. Keep track of session madness as the app grows. '''
import logging
logger = logging.getLogger("pyfb")
FB_ACCESS_TOKEN = "fb_access_token"
FB_EXPIRES = "fb_expires"
FB_USER_ID = "fb_user_id"
def get_fb_access_token(request, warn=True):
token = request.session.get(FB_ACCESS_TOKEN)
if not token and warn:
logger.warn("pyfb: No access token found in session")
return token
def get_fb_expires(request, warn=True):
expires = request.session.get(FB_EXPIRES)
if not expires and warn:
logger.warn("pyfb: No 'expires' found in session")
return expires
def get_fb_user_id(request, warn=True):
user_id = request.session.get(FB_USER_ID)
if not user_id and warn:
logger.warn("pyfb: No user_id found in session")
return user_id
def set_fb_access_token(request, access_token):
request.session[FB_ACCESS_TOKEN] = access_token
def set_fb_expires(request, expires):
request.session[FB_EXPIRES] = expires
def set_fb_user_id(request, user_id):
request.session[FB_USER_ID] = user_id
|
bdelliott/pyfb
|
pyfb_django/session.py
|
Python
|
mit
| 1,100
|
#!/usr/bin/python
"""test_Read.py to test the Read class.
Requires:
python 2 (https://www.python.org/downloads/)
nose 1.3 (https://nose.readthedocs.org/en/latest/)
Joy-El R.B. Talbot Copyright (c) 2014
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from nose.tools import raises
from Read import Read
from MetageneError import MetageneError
##TODO: test set_sam_tag method
##TODO: test set_chromosome_sizes
cigar_string = {}
bad_cigar_string = {}
bitwise_flag = {}
bad_bitwise_flag = {}
good_input = {}
bad_input = {}
chromosome_conversion = {"1": "chr1", "2": "chr2"}
def setup():
"""Create fixtures"""
# define cigar strings; value: ((args for build_positions), expected_result)
cigar_string['full_match'] = ((1, "10M", "*"), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
cigar_string['insertion'] = ((1, "5M4I5M", "*"), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
cigar_string['deletion'] = ((1, "5M4D5M", "*"), [1, 2, 3, 4, 5, 10, 11, 12, 13, 14])
cigar_string['gapped_match'] = ((1, "5M3N5M", "*"), [1, 2, 3, 4, 5, 9, 10, 11, 12, 13])
cigar_string['softclipped_match'] = ((4, "3S5M", "*"), [4, 5, 6, 7, 8])
cigar_string['hardclipped_match'] = ((4, "3H5M3H", "*"), [4, 5, 6, 7, 8])
cigar_string['padded_match'] = ((1, "3P5M", "*"), [4, 5, 6, 7, 8])
cigar_string['mismatch'] = ((1, "5=1X3=", "*"), [1, 2, 3, 4, 5, 6, 7, 8, 9])
cigar_string['no_cigar_match'] = ((1, "*", "aaaaa"), [1, 2, 3, 4, 5])
bad_cigar_string['unknown_length'] = ((1, "*", "*"), "raise MetageneError")
bad_cigar_string['illegal_cigar'] = ((1, "5M4B", "*"), "raise MetageneError")
bad_cigar_string['misordered_cigar'] = ((1, "M5N4M5", "*"), "raise MetageneError")
# define bitwise flags; value: ((args for parse_sam_bitwise_flag), expected_result(count?, reverse_complemented?))
bitwise_flag['unmapped'] = ((int("0b000000000100", 2),), (False, False))
bitwise_flag['unmapped_withflags'] = ((int("0b100111011101", 2),), (False, True))
bitwise_flag['plus_strand'] = ((int("0b000000000000", 2),), (True, False))
bitwise_flag['minus_strand'] = ((int("0b000000010000", 2),), (True, True))
bitwise_flag['multiple_segments'] = ((int("0b000000000001", 2),), (True, False))
# try various default and user-changed boolean flags
bitwise_flag['count_secondary_alignment'] = ((int("0b000100000000", 2),), (True, False))
bitwise_flag['skip_secondary_alignment'] = (
(int("0b000100000000", 2), False, False, False, True, False, False), (False, False))
bitwise_flag['skip_failed_quality_control'] = ((int("0b001000000000", 2),), (False, False))
bitwise_flag['count_failed_quality_control'] = (
(int("0b001000000000", 2), True, True, False, True, False, False), (True, False))
bitwise_flag['skip_PCR_optical_duplicate'] = ((int("0b010000000000", 2),), (False, False))
bitwise_flag['count_PCR_optical_duplicate'] = (
(int("0b010000000000", 2), True, False, True, True, False, False), (True, False))
bitwise_flag['count_supplementary_alignment'] = ((int("0b100000000000", 2),), (True, False))
bitwise_flag['skip_supplementary_alignment'] = (
(int("0b100000000000", 2), True, False, False, False, False, False), (False, False))
bitwise_flag['count_only_start_success'] = (
(int("0b000001000001", 2), True, False, False, True, True, False), (True, False))
bitwise_flag['count_only_start_fail'] = (
(int("0b000000000001", 2), True, False, False, True, True, False), (False, False))
bitwise_flag['count_only_end_success'] = (
(int("0b000010000001", 2), True, False, False, True, False, True), (True, False))
bitwise_flag['count_only_end_fail'] = (
(int("0b000000000001", 2), True, False, False, True, False, True), (False, False))
bad_bitwise_flag['count_only_both'] = (
(int("0b000011000001", 2), True, False, False, True, True, True), ("Raise MetageneError",))
# define good and bad samline inputs
good_input['no_tags'] = (0, "chr1", 200, "10M", 10, 1, 1, "+")
good_input['plus_strand_match'] = (0, "chr1", 200, "10M", 10, 2, 4, "+")
good_input['minus_strand_match'] = (16, "chr1", 200, "10M", 10, 2, 4, "-")
good_input['no_match'] = (4, "*", 0, "*", 10, 1, 1, ".")
sample = ["NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4"]
Read.process_set_sam_tag(sample, count_tag=True, tag_regex='NA:i:(\d+)')
Read.process_set_sam_tag(sample, count_tag=True, tag_regex='NH:i:(\d+)')
def test_build_positions():
for test in cigar_string:
yield (check_build_positions, test, cigar_string[test])
def check_build_positions(test, (values, expected)):
position_array = Read.build_positions(*values)
test_description = "\nTest: \t{}\n".format(test)
test_description += "Expected:\t{}\n".format(expected)
test_description += "Position:\t{}\n".format(position_array)
assert position_array == expected, "{}Error: \tDid not create the expected position array.".format(
test_description)
def test_catch_bad_cigar_input():
for test in bad_cigar_string:
yield (check_catch_bad_cigar_input, test, bad_cigar_string[test])
@raises(MetageneError)
def check_catch_bad_cigar_input(test, (values, expected)):
print Read.build_positions(*values)
def test_parse_sam_bitwise_flag():
for test in bitwise_flag:
yield (check_parse_sam_bitwise_flag, test, bitwise_flag[test])
def check_parse_sam_bitwise_flag(test, (values, expected)):
bitwise_result = Read.parse_sam_bitwise_flag(*values)
test_description = "\nTest: \t{}\n".format(test)
test_description += "Expected:\t{}\n".format(expected)
test_description += "Position:\t{}\n".format(bitwise_result)
assert bitwise_result == expected, "{}Error: \tDid not parse bitwise flag as expected.".format(test_description)
def test_catch_bad_bitwise_input():
for test in bad_bitwise_flag:
yield (check_catch_bad_bitwise_input, test, bad_bitwise_flag[test])
@raises(MetageneError)
def check_catch_bad_bitwise_input(test, (values, expected)):
print Read.parse_sam_bitwise_flag(*values)
def build_samline(bitcode, chromosome, start, cigar, length, abundance, mappings):
"""Return a SAM format line"""
string = "a" * length
return "read\t{}\t{}\t{}\t255\t{}\t*\t0\t0\t{}\t{}\tNH:i:{}\tNA:i:{}".format(
bitcode,
chromosome,
start,
cigar,
string,
string,
mappings,
abundance)
def test_create_read():
for test in good_input:
yield (check_create_read, test, good_input[test])
def check_create_read(test, values):
# create expected result
if int(values[0]) == 4:
expected = "Non-aligning read"
else:
start = int(values[2])
end = int(values[2]) + int(values[4]) - 1
if values[7] == "-":
start = end
end = int(values[2])
expected = "Read at {0}:{1}-{2} on {3} strand; counts for {4:2.3f}:".format(
values[1], # chromosome
start,
end,
values[7], # strand
float(values[5]) / float(values[6])) # abundance / mappings
# build input to test
samline = build_samline(*values[0:-1]) # exclude final value
(created, read) = Read.create_from_sam(samline, chromosome_conversion.values(), count_method='all')
output = str(read).split("\t")[0]
# create description in case test fails
test_description = "\nTest: \t{}\n".format(test)
test_description += "Abundance:\t{}\n".format(Read.has_sam_tag["NA"])
test_description += "Mappings:\t{}\n".format(Read.has_sam_tag["NH"])
test_description += "Sam Line:\t{}\n".format(samline)
test_description += "Expected:\t{}\n".format(expected)
test_description += "Position:\t{}\n".format(output)
assert output == expected, "{}Error: \tDid not create expected read.".format(test_description)
def test_catch_bad_input():
for test in bad_input:
yield (check_catch_bad_input, test, bad_input[test])
@raises(MetageneError)
def check_catch_bad_input(test, samline):
print Read(sam_line)
|
Joy-El/metagene_analysis
|
test_Read.py
|
Python
|
mit
| 9,434
|
"""
@file
@brief This extension contains various functionalities to help unittesting.
"""
import os
import stat
import sys
import re
import warnings
import time
import importlib
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
def _get_PyLinterRunV():
# Separate function to speed up import.
from pylint.lint import Run as PyLinterRun
from pylint import __version__ as pylint_version
if pylint_version >= '2.0.0':
PyLinterRunV = PyLinterRun
else:
PyLinterRunV = lambda *args, do_exit=False: PyLinterRun( # pylint: disable=E1120, E1123
*args, exit=do_exit) # pylint: disable=E1120, E1123
return PyLinterRunV
def get_temp_folder(thisfile, name=None, clean=True, create=True,
persistent=False, path_name="tpath"):
"""
Creates and returns a local temporary folder to store files
when unit testing.
@param thisfile use ``__file__`` or the function which runs the test
@param name name of the temporary folder
@param clean if True, clean the folder first, it can also a function
called to determine whether or not the folder should be
cleaned
@param create if True, creates it (empty if clean is True)
@param persistent if True, create a folder at root level to reduce path length,
the function checks the ``MAX_PATH`` variable and
shorten the test folder is *max_path* is True on :epkg:`Windows`,
on :epkg:`Linux`, it creates a folder three level ahead
@param path_name test path used when *max_path* is True
@return temporary folder
The function extracts the file which runs this test and will name
the temporary folder base on the name of the method. *name* must be None.
Parameter *clean* can be a function.
Signature is ``def clean(folder)``.
"""
if name is None:
name = thisfile.__name__
if name.startswith("test_"):
name = "temp_" + name[5:]
elif not name.startswith("temp_"):
name = "temp_" + name
thisfile = os.path.abspath(thisfile.__func__.__code__.co_filename)
final = os.path.split(name)[-1]
if not final.startswith("temp_") and not final.startswith("temp2_"):
raise NameError( # pragma: no cover
"the folder '{0}' must begin with temp_".format(name))
local = os.path.join(
os.path.normpath(os.path.abspath(os.path.dirname(thisfile))), name)
if persistent:
if sys.platform.startswith("win"): # pragma: no cover
from ctypes.wintypes import MAX_PATH
if MAX_PATH <= 300:
local = os.path.join(os.path.abspath("\\" + path_name), name)
else:
local = os.path.join(
local, "..", "..", "..", "..", path_name, name)
else:
local = os.path.join(local, "..", "..", "..",
"..", path_name, name)
local = os.path.normpath(local)
if name == local:
raise NameError( # pragma: no cover
"The folder '{0}' must be relative, not absolute".format(name))
if not os.path.exists(local):
if create:
os.makedirs(local)
mode = os.stat(local).st_mode
nmode = mode | stat.S_IWRITE
if nmode != mode:
os.chmod(local, nmode) # pragma: no cover
else:
if (callable(clean) and clean(local)) or (not callable(clean) and clean):
# delayed import to speed up import time of pycode
from ..filehelper.synchelper import remove_folder
remove_folder(local)
time.sleep(0.1)
if create and not os.path.exists(local):
os.makedirs(local)
mode = os.stat(local).st_mode
nmode = mode | stat.S_IWRITE
if nmode != mode:
os.chmod(local, nmode) # pragma: no cover
return local
def _extended_refactoring(filename, line): # pragma: no cover
"""
Private function which does extra checkings
when refactoring :epkg:`pyquickhelper`.
@param filename filename
@param line line
@return None or error message
"""
if "from pyquickhelper import fLOG" in line:
if "test_code_style" not in filename:
return "issue with fLOG"
if "from pyquickhelper import noLOG" in line:
if "test_code_style" not in filename:
return "issue with noLOG"
if "from pyquickhelper import run_cmd" in line:
if "test_code_style" not in filename:
return "issue with run_cmd"
if "from pyquickhelper import get_temp_folder" in line:
if "test_code_style" not in filename:
return "issue with get_temp_folder"
return None
class PEP8Exception(Exception):
"""
Code or style issues.
"""
pass
def check_pep8(folder, ignore=('E265', 'W504'), skip=None,
complexity=-1, stop_after=100, fLOG=None,
pylint_ignore=('C0103', 'C1801',
'R0201', 'R1705',
'W0108', 'W0613',
'W0107', 'C0415',
'C0209'),
recursive=True, neg_pattern=None, extended=None,
max_line_length=143, pattern=".*[.]py$",
run_lint=True, verbose=False, run_cmd_filter=None):
"""
Checks if :epkg:`PEP8`,
the function calls command :epkg:`pycodestyle`
on a specific folder.
@param folder folder to look into
@param ignore list of warnings to skip when raising an exception if
:epkg:`PEP8` is not verified, see also
`Error Codes <http://pep8.readthedocs.org/en/latest/intro.html#error-codes>`_
@param pylint_ignore ignore :epkg:`pylint` issues, see
:epkg:`pylint error codes`
@param complexity see `check_file <https://pycodestyle.pycqa.org/en/latest/api.html>`_
@param stop_after stop after *stop_after* issues
@param skip skip a warning if a substring in this list is found
@param neg_pattern skip files verifying this regular expressions
@param extended list of tuple (name, function), see below
@param max_line_length maximum allowed length of a line of code
@param recursive look into subfolder
@param pattern only file matching this pattern will be checked
@param run_lint run :epkg:`pylint`
@param verbose :epkg:`pylint` is slow, tells which file is
investigated (but it is even slower)
@param run_cmd_filter some files makes :epkg:`pylint` crashes (``import yaml``),
the test for this is run in a separate process
if the function *run_cmd_filter* returns True of the filename,
*verbose* is set to True in that case
@param fLOG logging function
@return output
Functions mentioned in *extended* takes two parameters (file name and line)
and they returned None or an error message or a tuple (position in the line, error message).
When the return is not empty, a warning will be added to the ones
printed by :epkg:`pycodestyle`.
A few codes to ignore:
* *E501*: line too long (?? characters)
* *E265*: block comments should have a space after #
* *W504*: line break after binary operator, this one is raised
after the code is modified by @see fn remove_extra_spaces_and_pep8.
The full list is available at :epkg:`PEP8 codes`. In addition,
the function adds its own codes:
* *ECL1*: line too long for a specific reason.
Some errors to disable with :epkg:`pylint`:
* *C0103*: variable name is not conform
* *C0111*: missing function docstring
* *C1801*: do not use `len(SEQUENCE)` to determine if a sequence is empty
* *R0201*: method could be a function
* *R0205*: Class '?' inherits from object, can be safely removed from bases in python3 (pylint)
* *R0901*: too many ancestors
* *R0902*: too many instance attributes
* *R0911*: too many return statements
* *R0912*: too many branches
* *R0913*: too many arguments
* *R0914*: too many local variables
* *R0915*: too many statements
* *R1702*: too many nested blocks
* *R1705*: unnecessary "else" after "return"
* *W0107*: unnecessary pass statements
* *W0108*: Lambda may not be necessary
* *W0613*: unused argument
The full list is available at :epkg:`pylint error codes`.
:epkg:`pylint` was added used to check the code.
It produces the following list of errors
:epkg:`pylint error codes`.
If *neg_pattern* is empty, it populates with a default value
which skips unnecessary folders:
``".*[/\\\\\\\\]((_venv)|([.]git)|(__pycache__)|(temp_)).*"``.
"""
# delayed import to speed up import time of pycode
import pycodestyle
from ..filehelper.synchelper import explore_folder_iterfile
if fLOG is None:
from ..loghelper.flog import noLOG # pragma: no cover
fLOG = noLOG # pragma: no cover
def extended_checkings(fname, content, buf, extended):
for i, line in enumerate(content):
for name, fu in extended:
r = fu(fname, line)
if isinstance(r, tuple):
c, r = r
else:
c = 1
if r is not None:
buf.write("{0}:{1}:{4} F{2} {3}\n".format(
fname, i + 1, name, r, c))
def fkeep(s):
if len(s) == 0:
return False
if skip is not None:
for kip in skip:
if kip in s:
return False
return True
if max_line_length is not None:
if extended is None:
extended = []
else:
extended = extended.copy()
def check_lenght_line(fname, line):
if len(line) > max_line_length and not line.lstrip().startswith('#'):
if ">`_" in line:
return "line too long (link) {0} > {1}".format(len(line), max_line_length)
if ":math:`" in line:
return "line too long (:math:) {0} > {1}".format( # pragma: no cover
len(line), max_line_length)
if "ERROR: " in line:
return "line too long (ERROR:) {0} > {1}".format( # pragma: no cover
len(line), max_line_length)
return None
extended.append(("[ECL1]", check_lenght_line))
if ignore is None:
ignore = tuple()
elif isinstance(ignore, list):
ignore = tuple(ignore)
if neg_pattern is None:
neg_pattern = ".*[/\\\\]((_venv)|([.]git)|(__pycache__)|(temp_)|([.]egg)|(bin)).*"
try:
regneg_filter = None if neg_pattern is None else re.compile(
neg_pattern)
except re.error as e: # pragma: no cover
raise ValueError("Unable to compile '{0}'".format(neg_pattern)) from e
# pycodestyle
fLOG("[check_pep8] code style on '{0}'".format(folder))
files_to_check = []
skipped = []
buf = StringIO()
with redirect_stdout(buf):
for file in explore_folder_iterfile(folder, pattern=pattern,
recursive=recursive):
if regneg_filter is not None:
if regneg_filter.search(file):
skipped.append(file)
continue
if file.endswith("__init__.py"):
ig = ignore + ('F401',)
else:
ig = ignore
if file is None:
raise RuntimeError( # pragma: no cover
"file cannot be None")
if len(file) == 0:
raise RuntimeError( # pragma: no cover
"file cannot be empty")
# code style
files_to_check.append(file)
try:
style = pycodestyle.StyleGuide(
ignore=ig, complexity=complexity, format='pylint',
max_line_length=max_line_length)
res = style.check_files([file])
except TypeError as e: # pragma: no cover
ext = "This is often due to an instruction from . import... The imported module has no name."
raise TypeError("Issue with pycodesyle for module '{0}' ig={1} complexity={2}\n{3}".format(
file, ig, complexity, ext)) from e
if extended is not None:
with open(file, "r", errors="ignore") as f:
content = f.readlines()
extended_checkings(file, content, buf, extended)
if res.total_errors + res.file_errors > 0:
res.print_filename = True
lines = [_ for _ in buf.getvalue().split("\n") if fkeep(_)]
if len(lines) > stop_after:
raise PEP8Exception( # pragma: no cover
"{0} lines\n{1}".format(len(lines), "\n".join(lines)))
lines = [_ for _ in buf.getvalue().split("\n") if fkeep(_)]
if len(lines) > 10:
raise PEP8Exception( # pragma: no cover
"{0} lines\n{1}".format(len(lines), "\n".join(lines)))
if len(files_to_check) == 0:
mes = skipped[0] if skipped else "-no skipped file-"
raise FileNotFoundError( # pragma: no cover
"No file found in '{0}'\n pattern='{1}'\nskipped='{2}'".format(
folder, pattern, mes))
# pylint
if not run_lint:
return "\n".join(lines)
fLOG("[check_pep8] pylint with {0} files".format(len(files_to_check)))
memout = sys.stdout
try:
fLOG('', OutputStream=memout)
regular_print = False
except TypeError: # pragma: no cover
regular_print = True
def myprint(s):
"local print, chooses the right function"
if regular_print: # pragma: no cover
memout.write(s + "\n")
else: # pragma: no cover
fLOG(s, OutputStream=memout)
neg_pat = ".*temp[0-9]?_.*,doc_.*"
if neg_pattern is not None:
neg_pat += ',' + neg_pattern
if run_cmd_filter is not None:
verbose = True # pragma: no cover
PyLinterRunV = _get_PyLinterRunV()
sout = StringIO()
serr = StringIO()
with redirect_stdout(sout):
with redirect_stderr(serr):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
opt = ["--ignore-patterns=" + neg_pat, "--persistent=n",
'--jobs=1', '--suggestion-mode=n', "--score=n",
'--max-args=30', '--max-locals=50', '--max-returns=30',
'--max-branches=50', '--max-parents=25',
'--max-attributes=50', '--min-public-methods=0',
'--max-public-methods=100', '--max-bool-expr=10',
'--max-statements=200',
'--msg-template={abspath}:{line}: {msg_id}: {msg} (pylint)']
if pylint_ignore:
opt.append('--disable=' + ','.join(pylint_ignore))
if max_line_length:
opt.append("--max-line-length=%d" % max_line_length)
if verbose: # pragma: no cover
for i, name in enumerate(files_to_check):
cop = list(opt)
cop.append(name)
if run_cmd_filter is None or not run_cmd_filter(name):
myprint(
"[check_pep8] lint file {0}/{1} - '{2}'\n".format(i + 1, len(files_to_check), name))
PyLinterRunV(cop, do_exit=False)
else:
# delayed import to speed up import time of pycode
from ..loghelper import run_cmd
# runs from command line
myprint(
"[check_pep8] cmd-lint file {0}/{1} - '{2}'\n".format(i + 1, len(files_to_check), name))
cmd = "{0} -m pylint {1}".format(
sys.executable, " ".join('"{0}"'.format(_) for _ in cop))
out = run_cmd(cmd, wait=True)[0]
lines.extend(_ for _ in out.split(
'\n') if _.strip('\r '))
else:
opt.extend(files_to_check)
PyLinterRunV(opt, do_exit=False)
pylint_lines = sout.getvalue().split('\n')
pylint_lines = [
_ for _ in pylint_lines if (
'(pylint)' in _ and fkeep(_) and _[0] != ' ' and len(_.split(':')) > 2)]
pylint_lines = [_ for _ in pylint_lines if not _.startswith(
"except ") and not _.startswith("else:") and not _.startswith(
"try:") and "# noqa" not in _]
lines.extend(pylint_lines)
if len(lines) > 0:
raise PEP8Exception(
"{0} lines\n{1}".format(len(lines), "\n".join(lines)))
return "\n".join(lines)
def add_missing_development_version(names, root, hide=False):
"""
Looks for development version of a given module and add paths to
``sys.path`` after having checked they are working.
@param names name or names of the module to import
@param root folder where to look (assuming all modules location
at the same place in a flat hierarchy)
@param hide hide warnings when importing a module (might be a lot)
@return added paths
"""
# delayed import to speed up import time
from ..loghelper import sys_path_append
if not isinstance(names, list):
names = [names]
root = os.path.abspath(root)
if os.path.isfile(root):
root = os.path.dirname(root)
if not os.path.exists(root):
raise FileNotFoundError(root) # pragma: no cover
spl = os.path.split(root)
py27 = False
if spl[-1].startswith("ut_"):
if "dist_module27" in root:
# python 27
py27 = True
newroot = os.path.join(root, "..", "..", "..", "..")
else:
newroot = os.path.join(root, "..", "..", "..")
else:
newroot = root
newroot = os.path.normpath(os.path.abspath(newroot))
found = os.listdir(newroot)
dirs = [os.path.join(newroot, _) for _ in found]
paths = []
for name in names:
exc = None
try:
if hide:
with warnings.catch_warnings(record=True):
importlib.import_module(name)
else:
importlib.import_module(name)
continue
except ImportError as e: # pragma: no cover
# it requires a path
exc = e
if name not in found:
raise FileNotFoundError( # pragma: no cover
"Unable to find a subfolder '{0}' in '{1}' (py27={3})\nFOUND:\n{2}\nexc={4}".format(
name, newroot, "\n".join(dirs), py27, exc))
if py27: # pragma: no cover
this = os.path.join(newroot, name, "dist_module27", "src")
if not os.path.exists(this):
this = os.path.join(newroot, name, "dist_module27")
else: # pragma: no cover
this = os.path.join(newroot, name, "src")
if not os.path.exists(this):
this = os.path.join(newroot, name)
if not os.path.exists(this): # pragma: no cover
raise FileNotFoundError(
"unable to find a subfolder '{0}' in '{1}' (*py27={3})\nFOUND:\n{2}".format(
this, newroot, "\n".join(dirs), py27))
with sys_path_append(this): # pragma: no cover
if hide:
with warnings.catch_warnings(record=True):
importlib.import_module(name)
else:
importlib.import_module(name)
paths.append(this) # pragma: no cover
return paths
|
sdpython/pyquickhelper
|
src/pyquickhelper/pycode/utils_tests_helper.py
|
Python
|
mit
| 20,810
|
import re
from LUIObject import LUIObject
from LUISprite import LUISprite
from LUILabel import LUILabel
from LUIInitialState import LUIInitialState
from LUILayouts import LUIHorizontalStretchedLayout
__all__ = ["LUIInputField"]
class LUIInputField(LUIObject):
""" Simple input field, accepting text input. This input field supports
entering text and navigating. Selecting text is (currently) not supported.
The input field also supports various keyboard shortcuts:
[pos1] Move to the beginning of the text
[end] Move to the end of the text
[arrow_left] Move one character to the left
[arrow_right] Move one character to the right
[ctrl] + [arrow_left] Move to the left, skipping over words
[ctrl] + [arrow_right] Move to the right, skipping over words
[escape] Un-focus input element
"""
re_skip = re.compile("\W*\w+\W")
def __init__(self, parent=None, width=200, placeholder=u"Enter some text ..", value=u"", **kwargs):
""" Constructs a new input field. An input field always needs a width specified """
LUIObject.__init__(self, x=0, y=0, solid=True)
self.set_width(width)
self._layout = LUIHorizontalStretchedLayout(parent=self, prefix="InputField", width="100%")
# Container for the text
self._text_content = LUIObject(self)
self._text_content.margin = (5, 7, 5, 7)
self._text_content.clip_bounds = (0,0,0,0)
self._text_content.set_size("100%", "100%")
# Scroller for the text, so we can move right and left
self._text_scroller = LUIObject(parent=self._text_content)
self._text_scroller.center_vertical = True
self._text = LUILabel(parent=self._text_scroller, text="")
# Cursor for the current position
self._cursor = LUISprite(self._text_scroller, "blank", "skin", x=0, y=0, w=2, h=15)
self._cursor.color = (0.5, 0.5, 0.5)
self._cursor.margin.top = 2
self._cursor.z_offset = 20
self._cursor_index = 0
self._cursor.hide()
self._value = value
# Placeholder text, shown when out of focus and no value exists
self._placeholder = LUILabel(parent=self._text_content, text=placeholder, shadow=False,
center_vertical=True, alpha=0.2)
# Various states
self._tickrate = 1.0
self._tickstart = 0.0
self._render_text()
if parent is not None:
self.parent = parent
LUIInitialState.init(self, kwargs)
@property
def value(self):
""" Returns the value of the input field """
return self._value
@value.setter
def value(self, new_value):
""" Sets the value of the input field """
self._value = new_value
self._render_text()
self.trigger_event("changed", self._value)
def clear(self):
""" Clears the input value """
self.value = u""
@property
def cursor_pos(self):
""" Set the cursor position """
return self._cursor_index
@cursor_pos.setter
def cursor_pos(self, pos):
""" Set the cursor position """
if pos >= 0:
self._cursor_index = max(0, min(len(self._value), pos))
else:
self._cursor_index = max(len(self._value) + pos + 1, 0)
self._reset_cursor_tick()
self._render_text()
def on_tick(self, event):
""" Tick handler, gets executed every frame """
frame_time = globalClock.get_frame_time() - self._tickstart
show_cursor = frame_time % self._tickrate < 0.5 * self._tickrate
if show_cursor:
self._cursor.color = (0.5, 0.5, 0.5, 1)
else:
self._cursor.color = (1, 1, 1, 0)
def on_click(self, event):
""" Internal on click handler """
self.request_focus()
def on_mousedown(self, event):
""" Internal mousedown handler """
local_x_offset = self._text.text_handle.get_relative_pos(event.coordinates).x
self.cursor_pos = self._text.text_handle.get_char_index(local_x_offset)
def _reset_cursor_tick(self):
""" Internal method to reset the cursor tick """
self._tickstart = globalClock.get_frame_time()
def on_focus(self, event):
""" Internal focus handler """
self._cursor.show()
self._placeholder.hide()
self._reset_cursor_tick()
self._layout.color = (0.9, 0.9, 0.9, 1)
def on_keydown(self, event):
""" Internal keydown handler. Processes the special keys, and if none are
present, redirects the event """
key_name = event.message
if key_name == "backspace":
self._value = self._value[:max(0, self._cursor_index - 1)] + self._value[self._cursor_index:]
self.cursor_pos -= 1
self.trigger_event("changed", self._value)
elif key_name == "delete":
post_value = self._value[min(len(self._value), self._cursor_index + 1):]
self._value = self._value[:self._cursor_index] + post_value
self.cursor_pos = self._cursor_index
self.trigger_event("changed", self._value)
elif key_name == "arrow_left":
if event.get_modifier_state("alt") or event.get_modifier_state("ctrl"):
self.cursor_skip_left()
else:
self.cursor_pos -= 1
elif key_name == "arrow_right":
if event.get_modifier_state("alt") or event.get_modifier_state("ctrl"):
self.cursor_skip_right()
else:
self.cursor_pos += 1
elif key_name == "escape":
self.blur()
elif key_name == "home":
self.cursor_pos = 0
elif key_name == "end":
self.cursor_pos = len(self.value)
self.trigger_event(key_name, self._value)
def on_keyrepeat(self, event):
""" Internal keyrepeat handler """
self.on_keydown(event)
def on_textinput(self, event):
""" Internal textinput handler """
self._value = self._value[:self._cursor_index] + event.message + \
self._value[self._cursor_index:]
self.cursor_pos = self._cursor_index + len(event.message)
self.trigger_event("changed", self._value)
def on_blur(self, event):
""" Internal blur handler """
self._cursor.hide()
if len(self._value) < 1:
self._placeholder.show()
self._layout.color = (1, 1, 1, 1)
def _render_text(self):
""" Internal method to render the text """
self._text.set_text(self._value)
self._cursor.left = self._text.left + \
self._text.text_handle.get_char_pos(self._cursor_index) + 1
max_left = self.width - 15
if self._value:
self._placeholder.hide()
else:
if not self.focused:
self._placeholder.show()
# Scroll if the cursor is outside of the clip bounds
rel_pos = self.get_relative_pos(self._cursor.get_abs_pos()).x
if rel_pos >= max_left:
self._text_scroller.left = min(0, max_left - self._cursor.left)
if rel_pos <= 0:
self._text_scroller.left = min(0, - self._cursor.left - rel_pos)
def cursor_skip_left(self):
""" Moves the cursor to the left, skipping the previous word """
left_hand_str = ''.join(reversed(self.value[0:self.cursor_pos]))
match = self.re_skip.match(left_hand_str)
if match is not None:
self.cursor_pos -= match.end() - 1
else:
self.cursor_pos = 0
def cursor_skip_right(self):
""" Moves the cursor to the right, skipping the next word """
right_hand_str = self.value[self.cursor_pos:]
match = self.re_skip.match(right_hand_str)
if match is not None:
self.cursor_pos += match.end() - 1
else:
self.cursor_pos = len(self.value)
|
tobspr/LUI
|
Builtin/LUIInputField.py
|
Python
|
mit
| 8,056
|
# -----------
# User Instructions:
#
# Modify the the search function so that it returns
# a shortest path as follows:
#
# [['>', 'v', ' ', ' ', ' ', ' '],
# [' ', '>', '>', '>', '>', 'v'],
# [' ', ' ', ' ', ' ', ' ', 'v'],
# [' ', ' ', ' ', ' ', ' ', 'v'],
# [' ', ' ', ' ', ' ', ' ', '*']]
#
# Where '>', '<', '^', and 'v' refer to right, left,
# up, and down motions. Note that the 'v' should be
# lowercase. '*' should mark the goal cell.
#
# You may assume that all test cases for this function
# will have a path from init to goal.
# ----------
grid = [[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
delta_name = ['^', '<', 'v', '>']
def search(grid,init,goal,cost):
# ----------------------------------------
# modify code below
# ----------------------------------------
closed = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
closed[init[0]][init[1]] = 1
g_grid = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))] #fill with g values
expand = [[' ' for row in range(len(grid[0]))] for col in range(len(grid))]
x = init[0]
y = init[1]
g = 0
g_grid[x][y] = g
open = [[g, x, y]]
found = False # flag that is set when search is complete
resign = False # flag set if we can't find expand
while not found and not resign:
if len(open) == 0:
resign = True
return 'fail'
else:
open.sort()
open.reverse()
next = open.pop()
x = next[1]
y = next[2]
g = next[0]
if x == goal[0] and y == goal[1]:
found = True
expand[x][y] = '*'
else:
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
g2 = g + cost
g_grid[x2][y2] = g2
open.append([g2, x2, y2])
closed[x2][y2] = 1
for n in range(g2, -1, -1):
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if g_grid[x2][y2] == (n-1):
expand[x2][y2] = delta_name[(i+2)%4]
x = x2
y = y2
return expand # make sure you return the shortest path
result = search(grid,init,goal,cost)
for row in result:
print(row)
|
jjaviergalvez/CarND-Term3-Quizzes
|
search/print-path.py
|
Python
|
mit
| 2,943
|
# Mostly from http://peterdowns.com/posts/first-time-with-pypi.html
from distutils.core import setup
setup(
name = 'pmdp',
packages = ['pmdp'],
version = '0.3',
description = 'A poor man\'s data pipeline',
author = 'Dan Goldin',
author_email = 'dangoldin@gmail.com',
url = 'https://github.com/dangoldin/poor-mans-data-pipeline',
download_url = 'https://github.com/dangoldin/poor-mans-data-pipeline/tarball/0.3',
keywords = ['data', 'data-pipeline'],
classifiers = [],
)
|
dangoldin/poor-mans-data-pipeline
|
setup.py
|
Python
|
mit
| 491
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import pytest
from dataproperty import get_integer_digit, get_number_of_digit
nan = float("nan")
inf = float("inf")
class Test_get_integer_digit:
@pytest.mark.parametrize(
["value", "expected"],
[
[0, 1],
[-0, 1],
[0.99, 1],
[-0.99, 1],
[".99", 1],
["-.99", 1],
[1.01, 1],
[-1.01, 1],
[9.99, 1],
[-9.99, 1],
["9.99", 1],
["-9.99", 1],
["0", 1],
["-0", 1],
[10, 2],
[-10, 2],
[99.99, 2],
[-99.99, 2],
["10", 2],
["-10", 2],
["99.99", 2],
["-99.99", 2],
[100, 3],
[-100, 3],
[999.99, 3],
[-999.99, 3],
["100", 3],
["-100", 3],
["999.99", 3],
["-999.99", 3],
[10000000000000000000, 20],
[-10000000000000000000, 20],
# float not enough precision
[10000000000000000000.99, 20],
[-10000000000000000000.99, 20],
["10000000000000000000", 20],
["-10000000000000000000", 20],
["99999999999999099999.99", 20],
["-99999999999999099999.99", 20],
],
)
def test_normal(self, value, expected):
assert get_integer_digit(value) == expected
@pytest.mark.parametrize(
["value", "expected"],
[
[999999999999999999999999999999.9999999999, 31],
[-999999999999999999999999999999.9999999999, 31],
["999999999999999999999999999999.9999999999", 30],
["-999999999999999999999999999999.9999999999", 30],
],
)
def test_abnormal(self, value, expected):
assert get_integer_digit(value) == expected
@pytest.mark.parametrize(
["value", "exception"],
[
[True, ValueError],
[False, ValueError],
[None, ValueError],
["test", ValueError],
["a", ValueError],
["0xff", ValueError],
[nan, ValueError],
[inf, ValueError],
],
)
def test_exception(self, value, exception):
with pytest.raises(exception):
get_integer_digit(value)
class Test_get_number_of_digit:
@pytest.mark.parametrize(
["value", "expected"],
[
[0, (1, 0)],
[-0, (1, 0)],
["0", (1, 0)],
["-0", (1, 0)],
[10, (2, 0)],
[-10, (2, 0)],
["10", (2, 0)],
["-10", (2, 0)],
[10.1, (2, 1)],
[-10.1, (2, 1)],
["10.1", (2, 1)],
["-10.1", (2, 1)],
[10.01, (2, 2)],
[-10.01, (2, 2)],
[10.001, (2, 3)],
[-10.001, (2, 3)],
[100.1, (3, 1)],
[-100.1, (3, 1)],
[100.01, (3, 2)],
[-100.01, (3, 2)],
[0.1, (1, 1)],
[-0.1, (1, 1)],
["0.1", (1, 1)],
["-0.1", (1, 1)],
[0.99, (1, 2)],
[-0.99, (1, 2)],
[".99", (1, 2)],
["-.99", (1, 2)],
[0.01, (1, 2)],
[-0.01, (1, 2)],
["0.01", (1, 2)],
["-0.01", (1, 2)],
[0.001, (1, 3)],
[-0.001, (1, 3)],
["0.001", (1, 3)],
["-0.001", (1, 3)],
[0.0001, (1, 4)],
[-0.0001, (1, 4)],
["0.0001", (1, 4)],
["-0.0001", (1, 4)],
[0.00001, (1, 5)],
[-0.00001, (1, 5)],
["0.00001", (1, 5)],
["-0.00001", (1, 5)],
[2e-05, (1, 5)],
[-2e-05, (1, 5)],
["2e-05", (1, 5)],
["-2e-05", (1, 5)],
["0.000000000000001", (1, 15)],
["1e+15", (16, 0)],
],
)
def test_normal(self, value, expected):
assert get_number_of_digit(value) == expected
@pytest.mark.parametrize(
["value", "max_decimal_places", "expected"],
[
[0, 5, (1, 0)],
["0.000000000000001", 5, (1, 5)],
],
)
def test_normal_max_decimal_places(self, value, max_decimal_places, expected):
assert get_number_of_digit(value, max_decimal_places=max_decimal_places) == expected
@pytest.mark.parametrize(
["value"], [[None], [True], [inf], [nan], ["0xff"], ["test"], ["いろは".encode()]]
)
def test_nan(self, value):
integer_digits, decimal_places = get_number_of_digit(value)
assert integer_digits is None
assert decimal_places is None
|
thombashi/DataProperty
|
test/test_function.py
|
Python
|
mit
| 4,806
|
#We don't use sagenb.notebook.run_notebook because we want the server in the same python environment as our app so we have access to the Notebook and Worksheet objects.
#########
# Flask #
#########
import os, random
from guru.globals import GURU_PORT, GURU_NOTEBOOK_DIR
import sagenb.notebook.notebook as notebook
from sagenb.misc.misc import find_next_available_port
import flask_server.base as flask_base
def startServer(notebook_to_use=None, open_browser=False, debug_mode=False):
#notebook_directory = os.path.join(DOT_SAGENB, "sage_notebook.sagenb")
#Setup the notebook.
if notebook_to_use is None:
#We assume the notebook is empty.
notebook_to_use = notebook.load_notebook(notebook_directory)
notebook_to_use.user_manager().add_user('admin', 'admin','rljacobson@gmail.com',force=True)
notebook_to_use.save() #Write out changes to disk.
notebook_directory = notebook_to_use._dir
#Setup the flask app.
opts={}
opts['startup_token'] = '{0:x}'.format(random.randint(0, 2**128))
startup_token = opts['startup_token']
flask_base.notebook = notebook_to_use
#create_app will now use notebook_to_use instead of the provided location.
flask_app = flask_base.create_app(interface="localhost", port=8081,secure=False, **opts)
sagenb_pid = os.path.join(notebook_directory, "sagenb.pid")
with open(sagenb_pid, 'w') as pidfile:
pidfile.write(str(os.getpid()))
#What does this block even do?
import logging
logger=logging.getLogger('werkzeug')
logger.setLevel(logging.WARNING)
#logger.setLevel(logging.INFO) # to see page requests
#logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
port = find_next_available_port('localhost', GURU_PORT)
notebook_to_use.port = port
#MAKE THIS HAPPEN IN flask_base: g.username = session['username'] = 'admin'
if open_browser:
from sagenb.misc.misc import open_page
open_page('localhost', port, False, '/?startup_token=%s' % startup_token)
try:
if debug_mode:
flask_app.run(host='localhost', port=port, threaded=True,
ssl_context=None, debug=True, use_reloader=False)
else:
flask_app.run(host='localhost', port=port, threaded=True,
ssl_context=None, debug=False)
finally:
#save_notebook(flask_base.notebook)
os.unlink(sagenb_pid)
|
rljacobson/Guru
|
guru/RunFlask.py
|
Python
|
mit
| 2,460
|
"""
search.py
"""
from flask import Flask, request, redirect, abort, make_response
from flask import render_template, flash
import bibserver.dao
from bibserver import auth
import json, httplib
from bibserver.config import config
import bibserver.util as util
import logging
from logging.handlers import RotatingFileHandler
LOG_FILENAME="./app.log"
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=5)
handler.setFormatter(formatter)
log.addHandler(handler)
class Search(object):
def __init__(self,path,current_user):
self.path = path.replace(".json","")
self.current_user = current_user
# facets -> convert to aggs
self.search_options = {
'search_url': '/query?',
'search_index': 'elasticsearch',
'paging': { 'from': 0, 'size': 10 },
#'predefined_filters': {},
#'facets': config['search_facet_fields'],
'result_display': config['search_result_display'],
'search_sortby': [{'display':'year', 'field':'year.exact'},
{'display':'author','field':'author.name'},
{'display':'journal','field':'journal.name'}],
'searchbox_fieldselect': [
{'display':'author','field':'author.name'},
{'display':'journal','field':'journal.name'}]#,
#'addremovefacets': config['add_remove_facets'] # (full list could also be pulled from DAO)
}
self.parts = self.path.strip('/').split('/')
def find(self):
log.debug(self.parts[0])
log.debug(self.parts)
log.debug(len(self.parts))
if bibserver.dao.Account.get(self.parts[0]):
if len(self.parts) == 1:
return self.account() # user account
elif len(self.parts) == 2:
if self.parts[1] == "collections":
return self.collections()
else:
return self.collection() # get a collection
elif len(self.parts) == 3:
return self.record() # get a record in collection
elif self.parts[0] == 'collections':
return self.collections() # get search list of all collections
elif len(self.parts) == 1:
if self.parts[0] != 'search':
self.search_options['q'] = self.parts[0]
return self.default() # get search result of implicit search term
elif len(self.parts) == 2:
return self.implicit_facet() # get search result of implicit facet filter
else:
abort(404)
def default(self):
# default search page
if util.request_wants_json():
res = bibserver.dao.Record.query()
resp = make_response(
json.dumps([i['_source'] for i in res._hits],
sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=None
)
# TODO: convert facet => aggs
def implicit_facet(self):
self.search_options['predefined_filters'][self.parts[0]+config['facet_field']] = self.parts[1]
# remove the implicit facet from facets
for count,facet in enumerate(self.search_options['facets']):
if facet['field'] == self.parts[0]+config['facet_field']:
del self.search_options['facets'][count]
if util.request_wants_json():
res = bibserver.dao.Record.query(terms=self.search_options['predefined_filters'])
resp = make_response( json.dumps([i['_source'] for i in res._hits], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=None,
implicit=self.parts[0]+': ' + self.parts[1]
)
def collections(self):
if len(self.parts) == 1:
if util.request_wants_json():
res = bibserver.dao.Collection.query(size=1000000)
colls = [i['_source'] for i in res._hits]
resp = make_response( json.dumps(colls, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
# search collection records
self.search_options['search_url'] = '/query/collection?'
self.search_options['facets'] = [{'field':'owner','size':100},{'field':'_created','size':100}]
self.search_options['result_display'] = [[{'pre':'<h3>','field':'label','post':'</h3>'}],[{'field':'description'}],[{'pre':'created by ','field':'owner'}]]
self.search_options['result_display'] = config['collections_result_display']
return render_template('collection/index.html', current_user=self.current_user, search_options=json.dumps(self.search_options), collection=None)
elif len(self.parts) == 2:
if self.parts[0] == "collections":
acc = bibserver.dao.Account.get(self.parts[1])
else:
acc = bibserver.dao.Account.get(self.parts[0])
if acc:
resp = make_response( json.dumps([coll.data for coll in acc.collections], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
abort(404)
elif len(self.parts) == 3:
coll = bibserver.dao.Collection.get_by_owner_coll(self.parts[1],self.parts[2])
if coll:
coll.data['records'] = len(coll)
resp = make_response( json.dumps(coll.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
abort(404)
else:
abort(404)
def record(self):
found = None
res = bibserver.dao.Record.query(terms = {
'owner'+config['facet_field']:self.parts[0],
'collection'+config['facet_field']:self.parts[1],
'id'+config['facet_field']:self.parts[2]
})
if res.total == 0:
rec = bibserver.dao.Record.get(self.parts[2])
if rec: found = 1
elif res.total == 1:
rec = bibserver.dao.Record.get(res._hits[0]['_id'])
found = 1
else:
found = 2
if not found:
abort(404)
elif found == 1:
collection = bibserver.dao.Collection.get_by_owner_coll(rec.data['owner'],rec.data['collection'])
if request.method == 'DELETE':
if rec:
if not auth.collection.update(self.current_user, collection):
abort(401)
rec.delete()
abort(404)
else:
abort(404)
elif request.method == 'POST':
if rec:
if not auth.collection.update(self.current_user, collection):
abort(401)
rec.data = request.json
rec.save()
resp = make_response( json.dumps(rec.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
if util.request_wants_json():
resp = make_response( json.dumps(rec.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if auth.collection.update(self.current_user, collection) else False
# make a list of all the values in the record, for autocomplete on the search field
searchvals = []
def valloop(obj):
if isinstance(obj,dict):
for item in obj:
valloop(obj[item])
elif isinstance(obj,list):
for thing in obj:
valloop(thing)
else:
searchvals.append(obj)
valloop(rec.data)
# get fuzzy like this
host = str(config['ELASTIC_SEARCH_HOST']).rstrip('/')
db_path = config['ELASTIC_SEARCH_DB']
fullpath = '/' + db_path + '/record/' + rec.id + '/_mlt?mlt_fields=title&min_term_freq=1&percent_terms_to_match=1&min_word_len=3'
c = httplib.HTTPConnection(host)
c.request('GET', fullpath)
resp = c.getresponse()
res = json.loads(resp.read())
mlt = [i['_source'] for i in res['hits']['hits']]
# get any notes
notes = bibserver.dao.Note.about(rec.id)
# check service core for more data about the record
# TODO: should maybe move this into the record dao or something
# TODO: also, add in any other calls to external APIs
servicecore = ""
apis = config['external_apis']
if apis['servicecore']['key']:
try:
servicecore = "not found in any UK repository"
addr = apis['servicecore']['url'] + rec.data['title'].replace(' ','%20') + "?format=json&api_key=" + apis['servicecore']['key']
import urllib2
response = urllib2.urlopen( addr )
data = json.loads(response.read())
if 'ListRecords' in data and len(data['ListRecords']) != 0:
record = data['ListRecords'][0]['record']['metadata']['oai_dc:dc']
servicecore = "<h3>Availability</h3><p>This article is openly available in an institutional repository:</p>"
servicecore += '<p><a target="_blank" href="' + record["dc:source"] + '">' + record["dc:title"] + '</a><br />'
if "dc:description" in record:
servicecore += record["dc:description"] + '<br /><br />'
servicecore += '</p>'
except:
pass
# render the record with all extras
return render_template('record.html',
record=json.dumps(rec.data),
prettyrecord=self.prettify(rec.data),
objectrecord = rec.data,
searchvals=json.dumps(searchvals),
admin=admin,
notes=notes,
servicecore=servicecore,
mlt=mlt,
searchables=json.dumps(config["searchables"], sort_keys=True)
)
else:
if util.request_wants_json():
resp = make_response( json.dumps([i['_source'] for i in res._hits], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('record.html', multiple=[i['_source'] for i in res._hits])
def account(self):
self.search_options['predefined_filters']['owner'+config['facet_field']] = self.parts[0]
acc = bibserver.dao.Account.get(self.parts[0])
if request.method == 'DELETE':
if not auth.user.update(self.current_user,acc):
abort(401)
if acc: acc.delete()
return ''
elif request.method == 'POST':
if not auth.user.update(self.current_user,acc):
abort(401)
info = request.json
if info.get('_id',False):
if info['_id'] != self.parts[0]:
acc = bibserver.dao.Account.get(info['_id'])
else:
info['api_key'] = acc.data['api_key']
info['_created'] = acc.data['_created']
info['collection'] = acc.data['collection']
info['owner'] = acc.data['collection']
acc.data = info
if 'password' in info and not info['password'].startswith('sha1'):
acc.set_password(info['password'])
acc.save()
resp = make_response( json.dumps(acc.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
if util.request_wants_json():
if not auth.user.update(self.current_user,acc):
abort(401)
resp = make_response( json.dumps(acc.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if auth.user.update(self.current_user,acc) else False
recordcount = bibserver.dao.Record.query(terms={'owner':acc.id}).total
collcount = bibserver.dao.Collection.query(terms={'owner':acc.id}).total
return render_template('account/view.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
record=json.dumps(acc.data),
recordcount=recordcount,
collcount=collcount,
admin=admin,
account=acc,
superuser=auth.user.is_super(self.current_user)
)
def collection(self):
# show the collection that matches parts[1]
self.search_options['predefined_filters']['owner'] = self.parts[0]
self.search_options['predefined_filters']['collection'] = self.parts[1]
# remove the collection facet
for count,facet in enumerate(self.search_options['facets']):
if facet['field'] == 'collection'+config['facet_field']:
del self.search_options['facets'][count]
# look for collection metadata
metadata = bibserver.dao.Collection.get_by_owner_coll(self.parts[0],self.parts[1])
if request.method == 'DELETE':
if metadata != None:
if not auth.collection.update(self.current_user, metadata):
abort(401)
else: metadata.delete()
return ''
else:
if not auth.collection.create(self.current_user, None):
abort(401)
else:
size = bibserver.dao.Record.query(terms={'owner':self.parts[0],'collection':self.parts[1]}).total
for rid in bibserver.dao.Record.query(terms={'owner':self.parts[0],'collection':self.parts[1]},size=size)._hits:
record = bibserver.dao.Record.get(rid['_id'])
if record: record.delete()
return ''
elif request.method == 'POST':
if metadata != None:
metadata.data = request.json
metadata.save()
return ''
else: abort(404)
else:
if util.request_wants_json():
out = {"metadata":metadata.data,"records":[]}
out['metadata']['records'] = len(metadata)
out['metadata']['query'] = request.url
for rec in metadata.records:
out['records'].append(rec.data)
resp = make_response( json.dumps(out, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if metadata != None and auth.collection.update(self.current_user, metadata) else False
if metadata and '_display_settings' in metadata:
self.search_options.update(metadata['_display_settings'])
users = bibserver.dao.Account.query(size=1000000) # pass the userlist for autocomplete admin addition (could be ajax'd)
userlist = [i['_source']['_id'] for i in users['hits']['hits']]
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=metadata.data,
record = json.dumps(metadata.data),
userlist=json.dumps(userlist),
request=request,
admin=admin
)
def prettify(self,record):
result = '<p>'
# given a result record, build how it should look on the page
img = False
if img:
result += '<img class="thumbnail" style="float:left; width:100px; margin:0 5px 10px 0; max-height:150px;" src="' + img[0] + '" />'
# add the record based on display template if available
display = config['search_result_display']
lines = ''
for lineitem in display:
line = ''
for obj in lineitem:
thekey = obj['field']
parts = thekey.split('.')
if len(parts) == 1:
res = record
elif len(parts) == 2:
res = record.get(parts[0],'')
elif len(parts) == 3:
res = record[parts[0]][parts[1]]
counter = len(parts) - 1
if res and isinstance(res, dict):
thevalue = res.get(parts[counter],'') # this is a dict
else:
thevalue = []
for row in res:
thevalue.append(row[parts[counter]])
if thevalue and len(thevalue):
line += obj.get('pre','')
if isinstance(thevalue, list):
for index,val in enumerate(thevalue):
if index != 0 and index != len(thevalue)-1: line += ', '
line += val
else:
line += thevalue
line += obj.get('post','')
if line:
lines += line + "<br />"
if lines:
result += lines
else:
result += json.dumps(record,sort_keys=True,indent=4)
result += '</p>'
return result
|
jasonzou/MyPapers
|
bibserver/search.py
|
Python
|
mit
| 19,340
|
import json
import os
import avasdk
from zipfile import ZipFile, BadZipFile
from avasdk.plugins.manifest import validate_manifest
from avasdk.plugins.hasher import hash_plugin
from django import forms
from django.core.validators import ValidationError
from .validators import ZipArchiveValidator
class PluginArchiveField(forms.FileField):
default_validators = [ZipArchiveValidator()]
label = 'Plugin .zip'
def get_prefix(self, archive):
files = archive.namelist()
return os.path.commonpath(files)
def get_manifest(self, archive):
try:
with ZipFile(archive.temporary_file_path()) as plugin:
prefix = self.get_prefix(plugin)
prefix = prefix + '/' if len(prefix) else ''
with plugin.open('{}manifest.json'.format(prefix)) as myfile:
manifest = json.loads(myfile.read())
validate_manifest(manifest)
return manifest
except BadZipFile:
raise ValidationError('Bad .zip format')
except FileNotFoundError:
raise ValidationError('Error with upload, please try again')
except KeyError:
raise ValidationError('No manifest.json found in archive')
except json.JSONDecodeError:
raise ValidationError('Error with manifest.json, bad Json Format')
except avasdk.exceptions.ValidationError as e:
raise ValidationError('Error in manifest.json ({})'.format(e))
def get_readme(self, archive):
try:
with ZipFile(archive.temporary_file_path()) as plugin:
prefix = self.get_prefix(plugin)
prefix = prefix + '/' if len(prefix) else ''
with plugin.open('{}/README.md'.format(prefix)) as myfile:
readme = myfile.read()
return readme
except FileNotFoundError:
raise ValidationError('Error with upload, please try again')
except KeyError:
return None
def clean(self, data, initial=None):
f = super().clean(data, initial)
manifest = self.get_manifest(f)
readme = self.get_readme(f)
return {
'zipfile': f,
'manifest': manifest,
'readme': readme,
'checksum': hash_plugin(f.temporary_file_path()),
}
class UploadPluginForm(forms.Form):
archive = PluginArchiveField()
|
ava-project/ava-website
|
website/apps/plugins/forms.py
|
Python
|
mit
| 2,440
|
from django.core.management.base import BaseCommand
from optparse import make_option
from django.utils import timezone
from orcamentos.proposal.models import Proposal
class Command(BaseCommand):
help = ''' Conclui orçamento. '''
option_list = BaseCommand.option_list + (
make_option('--num', help='número do orçamento'),
make_option('--price', help='preço'),
)
def handle(self, num, price, *args, **kwargs):
proposal = Proposal.objects.get(num_prop=num)
# Se o status for 'aprovado', então não pode concluir.
if proposal.status == 'a':
print('Este orçamento já virou contrato.')
else:
# verifica se o novo valor é positivo.
if float(price) <= 0 or float(price) is None:
print('O valor deve ser positivo.')
else:
proposal.price = price
proposal.status = 'co'
proposal.date_conclusion = timezone.now()
proposal.save()
print('Orçamento concluído com sucesso.')
|
rg3915/orcamentos
|
orcamentos/core/management/commands/conclude_proposal.py
|
Python
|
mit
| 1,080
|
"""
Django settings for paulpruitt_net project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from secrets import SECRET_KEY, DB_USER, DB_PASSWORD
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'taggit',
'pblog'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'paulpruitt_net.urls'
WSGI_APPLICATION = 'paulpruitt_net.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE' : 'django.db.backends.postgresql_psycopg2',
'NAME' : 'site',
'USER' : DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST' : '127.0.0.1',
'PORT' : '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/srv/www/site/static'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Do not allow IFrames
X_FRAME_OPTIONS = 'DENY'
|
ppruitt/site
|
site/paulpruitt_net/settings.py
|
Python
|
mit
| 2,273
|
"""
HTTP handeler to serve specific endpoint request like
http://myserver:9004/endpoints/mymodel
For how generic endpoints requests is served look
at endpoints_handler.py
"""
import json
import logging
import shutil
from tabpy.tabpy_server.common.util import format_exception
from tabpy.tabpy_server.handlers import ManagementHandler
from tabpy.tabpy_server.handlers.base_handler import STAGING_THREAD
from tabpy.tabpy_server.management.state import get_query_object_path
from tabpy.tabpy_server.psws.callbacks import on_state_change
from tabpy.tabpy_server.handlers.util import AuthErrorStates
from tornado import gen
class EndpointHandler(ManagementHandler):
def initialize(self, app):
super(EndpointHandler, self).initialize(app)
def get(self, endpoint_name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing GET for /endpoints/{endpoint_name}")
self._add_CORS_header()
if not endpoint_name:
self.write(json.dumps(self.tabpy_state.get_endpoints()))
else:
if endpoint_name in self.tabpy_state.get_endpoints():
self.write(json.dumps(self.tabpy_state.get_endpoints()[endpoint_name]))
else:
self.error_out(
404,
"Unknown endpoint",
info=f"Endpoint {endpoint_name} is not found",
)
@gen.coroutine
def put(self, name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing PUT for /endpoints/{name}")
try:
if not self.request.body:
self.error_out(400, "Input body cannot be empty")
self.finish()
return
try:
request_data = json.loads(self.request.body.decode("utf-8"))
except BaseException as ex:
self.error_out(
400, log_message="Failed to decode input body", info=str(ex)
)
self.finish()
return
# check if endpoint exists
endpoints = self.tabpy_state.get_endpoints(name)
if len(endpoints) == 0:
self.error_out(404, f"endpoint {name} does not exist.")
self.finish()
return
new_version = int(endpoints[name]["version"]) + 1
self.logger.log(logging.INFO, f"Endpoint info: {request_data}")
err_msg = yield self._add_or_update_endpoint(
"update", name, new_version, request_data
)
if err_msg:
self.error_out(400, err_msg)
self.finish()
else:
self.write(self.tabpy_state.get_endpoints(name))
self.finish()
except Exception as e:
err_msg = format_exception(e, "update_endpoint")
self.error_out(500, err_msg)
self.finish()
@gen.coroutine
def delete(self, name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing DELETE for /endpoints/{name}")
try:
endpoints = self.tabpy_state.get_endpoints(name)
if len(endpoints) == 0:
self.error_out(404, f"endpoint {name} does not exist.")
self.finish()
return
# update state
try:
endpoint_info = self.tabpy_state.delete_endpoint(name)
except Exception as e:
self.error_out(400, f"Error when removing endpoint: {e.message}")
self.finish()
return
# delete files
if endpoint_info["type"] != "alias":
delete_path = get_query_object_path(
self.settings["state_file_path"], name, None
)
try:
yield self._delete_po_future(delete_path)
except Exception as e:
self.error_out(400, f"Error while deleting: {e}")
self.finish()
return
self.set_status(204)
self.finish()
except Exception as e:
err_msg = format_exception(e, "delete endpoint")
self.error_out(500, err_msg)
self.finish()
on_state_change(
self.settings, self.tabpy_state, self.python_service, self.logger
)
@gen.coroutine
def _delete_po_future(self, delete_path):
future = STAGING_THREAD.submit(shutil.rmtree, delete_path)
ret = yield future
raise gen.Return(ret)
|
tableau/TabPy
|
tabpy/tabpy_server/handlers/endpoint_handler.py
|
Python
|
mit
| 4,926
|
# PPPPPPPP
# PP PP t hh
# PP - PP tt hh
# PP PP yy - yy tttttttt hh hhhh ooooooo nn nnnnn --------------
# PPPPPPP yy yy tt hhh hh oo - oo nnn nn -------------
# PP -- yy yy - tt - hh - hh oo - oo nn - nn -------------
# PP ------- yyy -- tt - hh - hh oo - oo nn - nn -------------
# PP -------- yy --- tt - hh - hh oo - oo nn - nn -------------
# PP ------- yy ----- tt - hh - hh oo - oo nn - nn -------------
# PP ----- yy ------- tt hh - hh ooooooo nn - nn -------------
# <== коментарий
input('Press \'Enter\' to exit') # Задержка вывода консоли
"""> python -m idlelib.idle """ # запуск IDLE из командный строки для текущей папки
"""> cd 1D\Python\ """
# pip => python install packeges
"""
pip --proxy http://192.168.1.37:2718
pip install --proxy=http://192.168.1.37:2718 package
pip install --proxy=http://192.168.1.37:2718 xlwings # exl
python get-pip.py --proxy="http://192.168.1.37:2718"
"""
# Run Python versions
"""
C:\Python33\python
py
py -2
py -3.1
"""
# PART 1 Getting Started
# Module Imports and Reloads
import script1 # one time import file
reload(script1) # reload scripts
import imp
imp.reload()
from imp import reload
reload(module)
script1.X # > Spam!
from script1 import X # recommend always using import instead of from
X # > Spam!
exec(open('script1').read()) # Run Module Files (код запущенного модуля вставляется в код, так он может перезаписать старые значения)
# PART 2 Types and Operations
# CHAPTER 4 Introducing Python Object Types
# Python's Core Data Types
"""
#Object type
Numbers 1234, 3.1415, 3+4j, 0b111, Decimal(), Fraction()
Strings 'spam', "Bob's", b'a\x01c', u'sp\xc4m'
Lists [1, [2, 'three'], 4.5], list(range(10))
Dictionaries {'food': 'spam', 'taste': 'yum'}, dict(hours=10)
Tuples (1, 'spam', 4, 'U'), tuple('spam'), namedtuple
Files open('eggs.txt'), open(r'C:\ham.bin', 'wb')
Sets set('abc'), {'a', 'b', 'c'}
Other core types Booleans, types, None
Program unit types Functions, modules, classes
Implementation-related types Compiled code, stack tracebacks
"""
# Numbers
print(3.1415 * 2) # => 6.283
123 + 222 # => 345
1.5 * 4 # => 6.0
2 ** 100 # => 126765060022~~~
len(str(2 ** 1000000)) # => 301030 How many digits in really BIG number
import math
math.pi # => 3.141592653589793
math.sqrt(85) # => 9.219544457292887
import random
random.random() # => 0.7335334012811705
random.choice([1, 2, 3, 4]) # => 3
# Strings
# Sequence Operations
S = 'Spam'
len(S) # => 4
S[0] # => 'S'
S[1] # => 'p'
S[-1] # => 'm'
S[len(S)-1] # => 'm' Negative indexing, the hard way
S[-2] # => 'a'
S[1:3] # => 'pa'
S[1:] # => 'pam' [1:len(S)]
S[0:3] # => 'Spa'
S[:3] # => 'Spa' [0:3]
S[:-1] # => 'Spa' [0:-1]
S[:] # => 'Spam' [0:len(S)]
S + 'xyz' # => 'Spamxyz' Concatenation
S * 4 # => 'SpamSpamSpamSpam' Repetition
# Immutability
S[0] = 'z' # => Error
S = 'z' + S[1:]
S # => 'zpam'
S.find('pa') # => 1
S.replace('pa', 'XYZ') # => 'zXYZm'
S # => 'zpam'
line = 'aaa,bbb,ccccc,dd\n'
line.split(',') # => ['aaa', 'bbb', 'ccccc', 'dd\n']
line.rstrip() # => 'aaa,bbb,ccccc,dd'
line.rstrip().split(',') # => ['aaa', 'bbb', 'ccccc', 'dd']
S.upper() # => ZPAM
S.isalpha() # => True
S.isdigit() # => False
S = 'shrubery'
L = list(S)
L # => ['s', 'h', 'r', 'u', 'b', 'b', 'e', 'r', 'y']
L[1] = 'c'
''.join(L) # => 'scrubbery'
B = bytearray(b'spam')
B.extend(b'eggs')
B # => bytearray(b'spameggs')
B.decode() # => 'spameggs'
# Formatting
'%s, eggs, and %s' % ('spam', 'SPAM!') # => 'spam, eggs, and SPAM!' #
'{0}, eggs, and {1}'.format('spam', 'SPAM!') # => 'spam, '
'{}, eggs, and {}'.format('sapm', 'SPAM!') # => 'spam, eggs, and SPAM!'
# numeric reports
'{:,.2f}'.format(296999.2567) # => '296,999.26' # Separators, decimal digits
'%.2f | %+05d' % (3.14159, -42) # => '3.14 | -0042'
# Getting Help
dir(S)
help(S.replace)
help(S)
# data type str, list, dict
# Other Ways to Code Strings
S = 'A\nB\tC'
# \n is end-of-line
# \t is tab
len(S) # => 5 # Each stands for just one character
ord('\n') # => 10 # binary value in ASCII
S = 'A\0B\0C'
len(S) # => 5
S # => 'A\x00B\x00C' # Non-printables are displayed as \xNN hex escapes
msg = """
aaaaaaaaaaaaa
bbb'''bbbbbbb""bbbbbb'bbb
ccccccccccc
"""
msg # => '\aaaaaaaaaaaaa\nbbb\'\'\'bbbbbbb""bbbbbb\'bbb\nccccccccccc\n'
# raw string literal
r'C:\text\new'
# Unicode Strings
'sp\xc4m' # => 'spÄm' # normal str string are Unicode text
b'a\xo1c' # => b'a\x01c' # bytes string are byte-based data
u'sp\u00c4m' # => 'spÄm' # The 2.X Unicode literal works in 3.3+: just str
'spam'.encode('utf8') # => b'spam'
'spam'.encode('utf16') # => b'\xff\xfes\x00p\x00a\x00m\x00'
'spam'.encode('ASCII') # => b'spam'
'sp\xc4\u00c4\U000000c4m' # => 'spÄÄÄm'
'x' + b'y'.decode()
'x'.encode() + b'y'
# Pattern Matching
import re
match = re.match('Hello[ \t]*(.*)world', 'Hello Python world')
match.group(1) # => 'Python '
match = re.match('[/:](.*)[/:](.*)[/:](.*)', '/usr/home:lumberjack')
match.groups() # => ('usr', 'home', 'lumberjack')
re.split('[/:]', '/usr/home:lumberjack') # => ['', 'usr', 'home', 'lumberjack']
a = 0
# Lists
# Sequence Operations
L = [123, 'spam', 1.23]
len(L) # => 3
L[0] # => 123
L[:-1] # => [123, 'spam']
L = [4, 5, 6] # => [123, 'spam', 1.23, 4, 5, 6]
L * 2 # => [123, 'spam', 1.23, 123, 'spam', 1.23]
L # => [123, 'spam', 1.23]
# Type-Specific Operations
L.append('NI')
L # => [123, 'spam', 1.23, 'NI']
L.pop(2) # => 1.23
L # => [123, 'spam', 'NI']
M = ['bb', 'aa', 'cc']
M.sort()
M # => ['aa', 'bb', 'cc']
M.reverse()
M # => ['cc', 'bb', 'aa']
# Nesting
M = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
M # => [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
M[1] # => [4, 5, 6]
M[1][2] # => 6
# Comprehensions
col2 = [row[1] for row in M] # Collect the items in column 2
col2 # => [2, 5, 8]
[row[1] + 1 for row in M] # => [3, 6, 9] # Add 1 to each item in column 2
[row[1] for row in M if row[1] % 2 == 0] # => [2, 8] # Filter out odd items
diag = [M[i][i] for i in [0, 1, 2]] # Collect a diagonal from matrix
diag # => [1, 5, 9]
doubles = [c * 2 for c in 'spam'] # => Reapeat characters in string
doubles # => ['ss', 'pp', 'aa', 'mm']
list(range(4)) # => [0, 1, 2, 3]
list(range(-6, 7, 2)) # => [-6, -4, -2, 0, 2, 4, 6]
[[x ** 2, x ** 3] for x in range(4)] # => [[0, 0], [1, 1], [4, 8], [9, 27]]
[[x, x /2, x * 2] for x in range(-6, 7, 2) if x > 0] # => [[2, 1.0, 4], [4, 2.0, 8], [6, 3.0, 12]]
G = (sum(row) for row in M) # Create a generator of row sums
next(G) # => 6
next(G) # => 15 # Run the iteration prorocol next()
list(map(sum, M)) # => [6, 15, 24] # Map sum over items in M
# Dictionaries
# Mapping Operations
D = {'food': 'Spam', 'quantity': 4, 'color': 'pink'}
D['food'] # => 'Spam'
D['quantity'] += 1 # add 1 to 'quantity' value
D # => {'color': 'pink', 'food': 'Spam', 'quantity': 5}
D = {}
D['name'] = 'Bob'
D['job'] = 'dev'
D['age'] = 40
D # => {'name': 'Bob', 'age': 40, 'job': 'dev'}
print(D['name']) # => Bob
bob1 = dict(name='Bob', job='dev', age=40)
bob1 # => {'name': 'Bob', 'age': 40, 'job': 'dev'}
bob2 = dict(zip(['name', 'job', 'age'], ['Bob', 'dev', 40])) # Zipping
bob2 # => {'name': 'Bob', 'age': 40, 'job': 'dev'}
# Nesting Revisited
rec = {'name':{'first': 'Bob', 'last': 'Smith'},
'jobs': ['dev', 'mgr'],
'age' : 40.5}
rec['name'] # => {'last': 'Smith', 'first': 'Bob'}
rec['name']['last'] # => 'Smith'
rec['jobs'] # => ['dev', 'mgr']
rec['jobs'][-1] # => 'mgr'
rec['jobs'].append('janitor')
rec # => {'jobs': ['dev', 'mgr', 'janitor'], 'name': {'last': 'Smith', 'first': 'Bob'}, 'age': 40.5}
# Sorting Keys:for loops
D = {'a': 1, 'b':2, 'c':3}
D # => {'b': 2, 'c': 3, 'a': 1}
Ks = list(D.keys())
Ks # => ['b', 'c', 'a']
Ks.sort()
Ks # => ['a', 'b', 'c']
for key in Ks:
print(key, '=>', D[key]) # => a => 1
# b => 2
# c => 3
for key in sorted(D):
print(key, '=>', D[key]) # => a => 1
# b => 2
# c => 3
# Tuples
T = (1, 2, 3, 4)
len(T) # => 4
T + (5, 6) # => (1, 2, 3, 4, 5, 6)
T[0] # => 1
T.index(4) # => 3
T.count(4) # => 1
T[0] = 2 # => ...error...
T = (2, ) + T[1:]
T # => (2, 2, 3, 4)
# Files
f = open('data.txt', 'w')
f.write('Hello\n') # => 6
f.write('world\n') # => 6
F.close()
# Binary Bytes Files
import struct
packed = struct.pack('>i4sh', 7, b'spam', 8)
packed # => b'\x00\x00\x00\x07spam\x00\x08'
file = open('data.bin', 'wb')
file.write(packed) # => 10
file.close()
data = open('data.bin', 'rb').read()
data # => b'\x00\x00\x00\x07spma\x00\x08'
data[4:8] # => b'spma'
list(data) # => [0, 0, 0, 7, 115, 112, 109, 97, 0, 8]
struct.unpack('>i4sh', data) # => (7, b'spma', 8)
# Unicode Text Files
S = 'sp\xc4m'
S # => 'spÄm'
S[2] # => 'Ä'
file = open('unidata.txt', 'w', encoding='utf-8')
file.write(S) # => 4
file.close()
text = open('unidata.txt',encoding='utf-8').read()
text # => 'spÄm'
len(text) # => 4
# Other Core Types
import decimal
d = decimal.Decimal('3.141')
d + 1 # => Decimal('4.141')
from fractions import Fraction
f = Fraction(2, 3)
f + 1 # => Fraction(5, 3)
1 > 2, 1 < 2 # => (False, True)
bool('spam') # => True
# User-Defined Classes
class Worker:
def __init__(self, name, pay):
self.name = name
self.pay = pay
def lastName(self):
return self.name.split()[-1]
def giveRaise(self, percent):
self.pay *= (1.0 + percent)
bob = Worker('Bob Smith', 50000)
sue = Worker('Sue Jones', 60000)
bob.lastName() # => 'Smith'
sue.lastName() # => 'Jones'
sue.giveRaise(.1)
sue.pay # => 66000.0
# CHAPTER 5 Numeric Types
# Numeric Type Basics
# Python Expression Operators
yield x # Generator function send protocol
lambda args: expression # Anonymous function generation
x if y else z # Ternary selection (x is evaluted only if y is true)
x or y # Logical OR (y is evaluted only if x if false)
x and y # Logical AND (y is evaluted only)
not x # Logical negation
x in y,x not in y # Membership (iterables, sets)
x is y, x is not y # Object identity tests
x < y, x <= y, x > y, x >= y # Magnitude comparison, set subset and superset;
x == y, x != y # Value equality ooperators
x | y # Bitwise OR, set union
x ^ y # Bitwise XOR, set symmetric difference
x & y # Bitwise AND, set intersection
x << y, x >> y # Shift x left or right by y bits
x + y # Addition, concatenation
x - y # Subtraction, set difference
x * y # Multilication, repetition
x % y # Remainder, format
x / y, x // y # Division: true and floor
-x, +x # Negation, identity
~x # Bitwise NOT (inversion)
x ** y # Power (exponentiation)
x[i] # Indexing (sequence, mapping, others)
x[i:j:k] # Slicing
x(...) # Call (function, method, calss, other callable)
a.attr # Attribute reference
(...) # Tulpe, expression, generator expression
[...] # List, list comprehension
{...} # Dictionary, set, set and dictionary comprehensions
# Numbers in Action
# Comparisons: Normal and Chained
1.1 + 2.2 == 3.3 # => False
1.1 + 2.2 # => 3.3000000000000003
int(1.1 + 2.2) == int(3.3) # => True
# Floor versus truncation
import math
math.floor(2.5) # => 2
math.floot(-2.5) # => -3
math.trunc(2.5) # => 2
math.trunc(-2.5) # => -2
# Complex Numbers
1j * 1J # => (-1+0j)
2 + 1j * 3 # => (2+3j)
# Hex, Octal, Binary: Literals and Conversions
oct(64), hex(64), bin(64) # => ('0o100', '0x40', '0b1000000')
64, 0o100, 0x40, 0b1000000 # => (64, 64, 64, 64)
int('64'), int('100', 8), int('40', 16), int('1000000', 2) # => (64, 64, 64, 64)
int('0x40', 16), int('0b1000000', 2) # => (64, 64)
eval('64'), eval('0o100'), eval('0x40'), eval('0b1000000') # => (64, 64, 64, 64)
'{0:o}, {1:x}, {2:b}'.format(64, 64, 64) # => '100, 40, 1000000'
'%o, %x, %x, %X' % (64, 64, 255, 255) # => '100, 40, ff, FF'
# Other Built-in Numeric Tools
import math
math.pi, math.e # =>(3.141592653589793, 2.718281828459045)
math.sin(2 * math.pi / 180) # => 0.03489949670250097
math.sqrt(144), math.sqrt(2) # => (12.0, 1.4142135623730951)
pow(2, 4), 2 ** 4, 2.0 ** 4.0 # => (16, 16, 16.0)
min(3, 1, 2, 4), max(3, 1, 2, 4) # => (1, 4)
math.floor(2.567), math.floor(-2.567) # => (2, -3)
math.trunc(2.567), math.trunc(-2.567) # => (2, -2)
int(2.567), int(-2.567) # => (2, -2)
round(2.567), round(2.467), round(2.567, 2) # => (3, 2, 2.57)
'%.1f' % 2.567, '{0:.2f}'.format(2.567) # => ('2.6', '2.57')
import random
random.random() # => 0.9726485651691155
random.randint(1, 10) # => 1
random.choice(['life of Brian', 'Holy Grail', 'Meaning of Life']) # => 'Holy Grail'
suits = ['hearts', 'clubs', 'diamonds', 'spades']
random.shuffle(suits)
suits # => ['clubs', 'diamonds', 'hearts', 'spades']
# Other Numeric Types
# Decimal Type
# Decimal basics
0.1 + 0.1 +0.1 - 0.3 # => 5.551115123125783e-17
Decimal('0.1') + Decimal('0.1') + Decimal('0.1') - Decimal('0.3') # => Decimal('0.0')
# Fraction Type
# Fraction basics
from fractions import Fraction
x = Fraction(1, 3)
y = Fraction(4, 6)
x, y # => (Fraction(1, 3), Fraction(2, 3))
print(x, y) # => 1/3 2/3
print(x + y, x - y, x * y) # => 1 -1/3 2/9
Fraction('.25') # => Fraction(1, 4)
# Sets
engineers = {'bob', 'sue', 'ann', 'vic'}
managers = {'tom', 'sue'}
'bob' in engineers # => True
engineers & managers # => {'sue'}
engineers | managers # => {'bob', 'sue', 'ann', 'vic', 'tom'}
engineers - managers # => {'vic', 'bob', 'ann'}
managers - engineers # => {'tom'}
engineers > managers # => False
{'bob', 'sue'} < engineers # =>True
(managers | engineers) > managers # => True
managers ^ engineers # => {'vic', 'bob', 'ann', 'tom'}
(managers | engineers) - (managers ^ engineers) # => {'sue'}
# Booleans O_o
type(True) # => <class 'bool'>
isinstance(True, int) # => True
True == 1 # => True
True is 1 # => False
True or False # => True
True + 4 # => 5
# CHAPTER 6 The Dynamic Typing Interlude
# CHAPTER 7 String Fundamentals
# String Basics
S = '' # Empty string
S = "spam's" # Double quotes, same as single
S = 's\np\ta\x00m' # Escape sequences
S = """...multiline...""" # Triple-quoted block string
S = r'\temp\spam' # Raw string (no escapes)
B = b'sp\xc4m' # Byte strings
U = u'sp\u00c4m' # Unicdoe strings
S1 + S2, S * 3 # Concatenate, repeat
S[i], S[i:j], len(S) # Index, slice, length
"a %s parrot" % kind # String formatting учзкуыышщт
"a {0} parrot".format(kind) # String formatting method
S.find('pa') # String methods: search
S.rstrip() # remove whitespace
S.replace('pa', 'xx') # replacement
S.split(',') # split on delimiter
S.isdigit() # content test
S.lower() # case conversion
S.endswith('spam') # end test
'spam'.join(strlist) # delimiter join
S.encode('latin-1') # Unicode encoding
B.decode('utf8') # Unicode decoding
for x in S: print(x) # Iteration, membership
'spam' in S
[c * 2 for c in S]
map(ord, S)
re.match('sp(.*)am', line) # Pattern matching: library module
# String Literals
# Escape Sequences Represent Special Characters
# String backslash characters # Like in C, C++ and other
"\newline" # Ignored (continuation line)
"\\" # Backslash (stores one \)
"\'"
"\""
"\a" # Bell
"\b" # Backspace
"\f" # Formfeed
"\n" # Newline (linefeed)
"\r" # Carriage return
"\t" # Horizontal tab
"\v" # Vertical tab
"\xhh" # Cgaracter with hex value hh (exactly 2 digits)
"\ooo" # Character with octal value ooo (up to 3 digits)
"\0" # Null: binary 0 character (doesn't end string)
"\N{ id }" # Unicode database ID
"\uhhh" # Unicode character with 16-bit hex value
"\Uhhhhhh" # Unicode character with 32-bit hex value
"\other" # Not an escape (keeps both \ and other)
# Strings in Action
# Changing Strings
S = 'spam'
S[0] = 'x' # => TypeError # Raises an error!
S = S + 'SPAM!' # To change a string, make a new one
S # => 'spamSPAM!'
S = S[:4] + 'Burger' + S[-1]
S # => 'sapmBurger!'
S = 'splot'
S = S.replace('pl', 'pamal')
S # => 'spamalot'
# String Methods
str = 'String Methods'
str.capitalize() # => 'String methods'
str.casefold() # => 'string methods'
str.center(30, '-') # => '--------String Methods--------'
str.count('t',0,-1) # => 2 # (sub[,start[,end]])
# String Formatting Expressions
'My {1[king]} runs {0.platform}'.format(sys, {'king': 'laptop'}) # => 'My laptop runs win32'
'My {map[kind]} runs {sys.platform}'.format(sys=sys, map={'kind': 'laptop'}) # => 'My laptop runs win32'
# CHAPTER 8 Lists and Dictionates
# Lists
L = [] # An empty list
L = [123, 'abc', 1.23, {}] # Four items: indexes 0..3
L = ['Bob', 40.0, ['dev', 'mgr']] # Nested sublists
L = list('spam') # List of an iterable's items
L = list(range(-4, 4)) # list of successive integers
L[i] # Index
L[i][j] # Index of index
L[i:j] # slice
len(L) # length
L1 + L2 # Concatenate
L * 3 # repeat
for x in L: print(x) # Iteration
3 in L # membership
L.append(4) # Methods: growing
L.extend([5,6,7])
L.insert(i, X)
L.index(X) # Methods: searching
L.count(X)
L.sort() # Methods: sorting, reversing
L.reverse()
L.copy()
L.clear()
L.pop(i) # Methods, statements: shrinking
L.remove(X)
del L[i]
del L[i:j]
L[i:j] = []
L[i] = 3 # Index assignment, slice assignment
L[i:j] = [4,5,6]
L = [x**2 for x in range(5)] # List comprehensions and maps
List(map(ord, 'spam'))
# Dictionares
D = {} # Empty dictionary
D = {'name': 'Bob', 'age': 40} # Two-item dictionary
E = {'cto': {'name': 'Bob', 'age': 40}} # Nesting
D = dict(name='Bob', age=40) # Alternative construction techniques:
D = dict([('name', 'Bob'), ('age', 40)]) # keywords, key/value pairs, zipped key/value pairs, key lists
D = dict(zip(keyslist, valueslist))
D = dict.fromkeys(['name', 'age'])
D['name'] # Indexing by key
E['cto']['age']
'age' in D # Membership: key present test
D.keys() # Methods: all keys,
D.values() # all values,
D.items() # all key+value tuples,
D.copy() # copy (top-level),
D.clear() # clear (remove all items),
D.update(D2) # merge by keys,
D.get(key, default?) # fetch by key, if absent default (or None),
D.pop(key, default?) # remove by key, if absent default (or error)
D.setdefault(key, default?) # fetch by key, if absent set default (or None),
D.popitem() # remove/return any (key, value) pair; etc.
len(D) # Length: number of stored entries
D[key] = 42 # Adding/changing keys
del D[key] # Deleting entries by key
list(D.keys()) # Dictionary views (Python 3.X)
D1.keys() & D2.keys()
D.viewkeys(), D.viewvalues() # Dictionary views (Python 2.7)
D = {x: x*2 for x in range(10)} # Dictionary comprehensions (Python 3.X, 2.7)
# CHAPTER 9 Tuples, Files, and Everything Else
# Tuples
() # AN empty tuple
T = (0,) # A one-item tuple (not an expression)
T = (0, 'Ni', 1.2, 3) # A four-item tuple
T = 0, 'Ni', 1.2, 3 # Another four-item tuple (same as prior line)
T = ('Bob', ('dev', 'mgr')) # Nested tuples
T = tuple('spam') # Tuple of items in an iterable
T[i] # Index,
T[i][j] # index of index,
T[i:j] # slice,
len(T) # length
T1 + T2 # Concatenate
T * 3 # repeat
for x in T: print(x) # Iteration, membership
'spam' in T
[x ** 2 for x in T]
T.index('Ni') # Methods in 2.6, 2.7, and 3.X: search, count
T.count('Ni')
namedtuple('Emp', ['name', 'jobs']) # Named tuple extension type
# Files
output = open(r'C:\spam', 'w') # Create output file ('w' means write)
input = open('data', 'r') # Create input file ('r' means read)
input = open('data')
open('testjson.txt',, encoding='utf-8') # Same as prior line ('r' is the default)
aString = input.read() # Read entire file into a single string
aString = input.read(N) # Read up to next N characters (or bytes) into a string
aString = input.readline() # Read next line (including \n newline) into a string
aList = input.readlines() # Read entire file into list of line strings (with \n)
output.write(aString) # Write a string of characters (or bytes) into file
output.writelines(aList) # Write all line strings in a list into file
output.close() # Manual close (done for you when file is collected)
output.flush() # Flush output buffer to disk without closing
anyFile.seek(N) # Change file position to offset N for next operation
for line in open('data'): # use line File iterators read line by line
open('f.txt', encoding='latin-1') # Python 3.X Unicode text files (str strings)
open('f.bin', 'rb') # Python 3.X bytes files (bytes strings)
codecs.open('f.txt', encoding='utf8') # Python 2.X Unicode text files (unicode strings)
open('f.bin', 'rb') # Python 2.X bytes files (str strings)
# PART 3 Statements and Syntax
# CHAPTER 10 Introducing Python Statements
# Python statements
# \/ Example # \/ Statement # \/ Role
a, b = 'good', 'bad' # Assignment # Creating references
log.write("spam, ham") # Calls and other expressions # Running functions
print('The Killer', joke) # print calls # Printing objects
if "python" in text: # if/elif/else # Selecting actions
print(text)
for x in mylist: # for/else # Iteration
print(x)
while X > Y: # while/else # General loops
print('hello')
while True: # pass # Empty placeholder
pass
while True: # break # Loop exit
if exittest(): break
while True: # continue # Loop continue
if skiptest(): continue
def f(a, b, c=1, *d): # def # Functions and methods
print(a+b+c+d[0])
def f(a, b, c=1, *d): # return # Functions results
return a+b+c+d[0]
def gen(n): # yield # Generator functions
for i in n: yield i*2
x = 'old' # global # Namespaces
def function():
global x, y; x = 'new'
def outer(): # nonlocal # Namespaces (3.X)
x = 'old'
def function():
nonlocal x; x = 'new'
import sys # import # Module access
from sys import stdim # from # Attribute access
class Subclass(Superclass): # class # Building objects
staticData = []
def method(self): pass
try: # try/except/finally # Catching exceptions
action()
except:
print('action error')
raise EndSearch(location) # raise # Triggering exceptions
assert X > Y, 'X too small' # assert # Debugging checks
with open('data') as myfile:# with/as # Context managers
process(myfile)
del data[k] # del # Deleting references
del data[i:j]
del obj.attr
del variable
# CHAPTER 11 Assignments, Expressions, and Prints
# Assignment statement forms
spam = 'Spam' # Basic form
spam, ham = 'yum', 'YUM' # Tuple assignment (positional)
[spam, ham] = ['yum', 'YUM'] # List assignment (positional)
a, b, c, d = 'spam' # Sequence assignment, generalized
a, *b = 'spam' # Extended sequence unpacking (Python 3.X)
spam = ham = 'lunch' # Multiple-target assignment
spams += 42 # Augmented assignment (equivalent to spams = spams + 42)
# Augmented assignment statements
X += Y, X &= Y, X -= Y, X |= Y
X *= Y, X ^= Y, X /= Y, X >>=Y
X %= Y, X <<=Y, X **=Y, X //=Y
# Python reserved words
False; class; finally; is; return;
None; continue; for; lambda; try;
True; def; from; nonlocal; while;
and; del; global; not; with;
as; elif; if; or; yield;
assert; else; import; pass;
break; except; in; raise;
# Common Python expression statements
spam(eggs, ham) # Function calls
spam.ham(eggs) # Method calls
spam # Printing variables in the interactive interpreter
print(a, b, c, sep='') # Printing operations in Python 3.X
yield x ** 2 # Yielding expression statements
# Printing, the hard way
import sys
sys.stdout.write('hello world\n') # => hello world
sys.stdout = open('log.txt', 'a') # Redirects prints to a file
...
print(x, y, x) # Shows up in log.txt
log = open('log.txt', 'a') # 3.X
print(x, y, z, file=log) # Print to a file-like object
# CHAPTER 12 if Tests and Syntax Rules
# if Tests and Syntax Rules
if test1: # if test
statements1 # Associated block
elif test2: # Optional elifs
statements2
else: # Optio
statements3
# Loops
while test: # Loop test
statements # Loop body
else: # Optional else
statements # Run if didn't exit loop with break
break # Jumps out of the closest enclosing loop (past the entire loop statement)
continue # Jumps to the top of the closest enclosing loop (to the loop's header line)
pass # Does nothing at all: it's an empty statement placeholder
... # Alternative to pass
Loop else block # Runs if and only if the loop is exited normally (without hitting a break)
# CHAPTER 13 while and for Loops
# while Loops
while :
statements
if test: break # Exit loop now, skip else if present
if test: continue # Go to top of loop now, to test1
else:
statements # Run if we didn't hit a 'break'
# for Loops
for target in object: # Assign object items to target
statements # Repeated loop body: use target
else: # Optional else part
statements # If we didn't hit a 'break'
[a for a in dir(list) if not a.startswith('__')]
[x for x in ['spam', '', 'ni'] if bool(x)]
[x for x in ['spam', '', 'ni'] if x]
# Parallel Traversals: zip and map
L1 = [1,2,3,4]
L2 = [5,6,7,8]
zip(L1, L2) # => <zip object at 0x026523C8>
list(zip(L1, L2)) # => [(1, 5), (2, 6), (3, 7), (4, 8)] # list() required in 3.X, not 2.X
# CHAPTER 14 Iterations and Comprehensions
# iteration
L = [1, 2, 3, 4, 5]
for i in range(len(L)):
L[i] += 10
L # => [11, 12, 13, 14, 15]
L = [x + 10 for x in L]
L # => [21, 22, 23, 24, 25]
[x + y for x in 'abc' for y in 'lmn']
# => ['al', 'am', 'an', 'bl', 'bm', 'bn', 'cl', 'cm', 'cn']
res = []
for x in 'abc':
for y in 'lmn':
res.append(x + y)
res
# => ['al', 'am', 'an', 'bl', 'bm', 'bn', 'cl', 'cm', 'cn']
''' need to learn more'''
# CHAPTER 15 The Documentation Interlude
# Documentation
# Python documentation sources
# comments # In-file documentation
'The dir function '# Lists of attributes available in objects
'Docstrings:__doc__ '# In-file documentation attached to objects
'PyDoc: the help function '# Interactive help for objects
'PyDocy: HTML reports '# Module documentation in a browser
'Sphinx third-party tool '# Richer documentation for larger projects
'The standart manual set '# Official language and library descriptions
'Web resources '# Online tutorials, examples, and so on
'Published books '# Commercially polished reference texts
# PyDoc
"""
c:\code>
python -m pydoc -b
py -3 -m pydoc -b
C:\python33\python -m pydoc -b
"""# => Server ready at http://localhost:62135
# PART 4 Functions and Generators
# CHAPTER 16 Function Basics
# Examples # Statement or expression
myfunc('spam', 'eggs', meat=ham, *rest) # Call expressions
def printer(message): # def
print('Hello ' + str(message))
def adder(a, b=1, *c): # return
return a + b + c[0]
x = 'old' # global
def changer():
global x; x = 'new'
def outer(): # nonlocal (3.X)
x = 'old'
def changer():
nonlocal x; x = 'new'
def squares(x): # yield
for i in range(x): yield i ** 2
funcs = [lambda x: x**2, lambda x: x**3] # lambda
# def Statements
def name(arg1, arg2, ... argN):
statements
...
return value
for x in xrange(1,10):
pass
# def Executes at Runtime
if test:
def func(): # Define func this way
...
else:
def func(): # Or else this way
...
...
func() # Call the version selected and built
othername = func # Assign function object
othername() # Call func again
def func(): ... # Create function object
func() # Call object
func.attr = value # Attach attributes
#Definition
def intersect(seq1, seq2):
res = [] # Start empty
for x in seq1: # Scan seq1
if x in seq2: # Common item?
res.append(x) # Add to end
return res
s1 = "SPAM"
s2 = "SCAM"
intersect(s1, s2) # String
# => ['S', 'A', 'M']
[x for x in s1 if x in s2]
# => ['S', 'A', 'M']
x = intersect([1, 2, 3], (1, 4)) # Mixed types
x # Saved result object
# => [1]
# CHAPTER 17 Scopes
# Python Scope Basics
X = 99 # Global (module) scope X
def func():
X = 88 # Local (function) scope X: a different variable
# Name Resolution: The LEGB Rule
"""
Built-in (Python)
Names preassigned in the built-in names modules: open, range,
SyntaxError...
Global (module)
Names assigned at the top-level of module file, or declared
globla in a def within the file.
Enclosing function locals
Names in the local scope of any and all enclosing functions
(def or lambda), from inner to outer.
Local (function)
Names assigned in any way within a function (def
or lambda), and not declared global in that function.
"""
# Scope Example
# Global scope
X = 99 # X and func assigned in module: global
def func(Y): # Y and Z assigned in function: locals
# Local scope
Z = X + Y # X is a global
return Z
func(1) # func in module: result=100
# The global Statement
X = 88 # Global X
def func():
global X
X = 99 # Global X: outside def
func()
print(X) # Prints 99
y, z = 1, 2 # Global variables in module
def all_global():
global x # Declare globals assigned
x = y + z # No need to declare y, z: LEGB rule
# Other Ways to Access Globals
# thismod.py
var = 99
def local():
var = 0 # Change local var
def glob1():
global var # Declare global (normal)
var += 1 # Change global var
def glob2():
var = 0 # Change local var
import thismod # Import myself
thismod.var += 1 # Change global var
def glob3():
var = 0 # Change local var
import sys # Import system table
glob = sys.modules['thismod'] # Get module object (or use __name__)
glob.var += 1 # Change global var
def test():
print(var)
local(); glob1(); glob2(); glob3()
print(var)
# consol
import thismod
thismod.test() # => 99 102
thismod.var # => 102
# Scopes and Nested Functions
# Function factory (a.k.a. closures)
# A simple function factory
def maker(N):
def action(X):
return X ** N # Make and return action
return action # action retains N from enclosing
f = maker(2)
g = maker(3) # g remembers 3, f remembers 2
g(4) # => 64 # 4 ** 3
f(4) # => 16 # 4 ** 2
def maker(N):
return lambda X: X ** N # lambda functions retain state too
# Retaining Enclosing Scope State with Defaults
def f1():
x = 88
def f2(x=x): # Remember enclosing scope X with defaults
print(x)
f2()
f1() # => 88
def f1():
x = 88
f2(x)
def f2(x):
print(x)
f1() # => 88
# The nonlocal Statement in 3.X
# nonlocal Basics
nonlocal # skip my local scope entirely
# CHAPTER 18 Arguments
# Argument-Passing Basics
# Special Argument-Mathching Modes
# Argument Matching Syntax
# Syntax # Location # Interpretation
func(value) # Caller # Normal argument: matched by position
func(name=value) # Caller # Keyword argument: matched by name
func(*iterable) # Caller # Pass all objects in iterable as individual positional arguments
func(**dict) # Caller # Pass all key/value pairs in dict as individual keyword arguments
def func(name): # Function # Normal argument: matches any passed value by position or name
def func(name=value): # Function # Default argument value, if not passed in the call
def func(*name): # Function # Matches and collects remaining positional arguments in a tuple
def func(**name): # Function # Matches and collects remaining keyword arguments in a dictionary
def func(*other, name): # Function # Arguments that must be passed by keyword only in calls (3.X)
def func(*, name=value):# Function # Arguments that must be passed by keyword only in calls (3.X)
# Combining keywords and defaults
def func(spam, eggs, toast=0, ham=0):
print((spam, eggs, toasr, ham))
func(1, 2) # => (1, 2, 0, 0)
func(1, ham=1, eggs=0) # => (1, 0, 0, 1)
func(spam=1, eggs=0) # => (1, 0, 0, 0)
func(toast=1, eggs=2, spam=3) # => (3, 2, 1, 0)
func(1, 2, 3, 4) # => (1, 2, 3, 4)
# Arbitrary Arguments Examples
# Headers: Collections aruments
def f(a, *pargs, **kargs): print(a, pargs, kargs)
f(1, 2, 3, 4, x=5, y=6) # => 1 (2, 3, 4) {'y': 6, 'x': 5}
# ┌──────────────────────────────────────────┐
# │ Learning Python 5E -- 546 (598 /1594) │
# ├──────────────────────────────────────────┤
# │ Изучаем Python 4E -- (463 / 1280) │
# └──────────────────────────────────────────┘
|
SonyStone/pylib
|
Python_tutorial.py
|
Python
|
mit
| 36,258
|
from boto3.dynamodb.conditions import Attr
def update_users_followers(username, follower_id, table, remove=False):
'''
Find all the users that %username% follows and
update their "followers" list and "followers_count" amount
'''
item = table.get_item(Key={'username': username}).get('Item', False)
item['followers'].remove(follower_id) if remove else item['followers'].append(follower_id)
table.update_item(
Key={
'username': username
},
UpdateExpression='SET followers = :val1',
ExpressionAttributeValues={
':val1': item['followers'],
},
)
def follow_user(username, user_id, table):
item = table.get_item(Key={'username': username})['Item']
new_follow = set([user_id]) - set(item['follow']) - set([username])
if not new_follow:
return False
new_item = table.update_item(
Key={
'username': username
},
UpdateExpression='SET follow = list_append(follow, :val1), follow_count = follow_count + :val2',
ExpressionAttributeValues={
':val1': list(new_follow),
':val2': len(new_follow)
},
ReturnValues="UPDATED_NEW"
)
update_users_followers(user_id, username, table, remove=False)
return True
# update_user_real_follow_count(username)
def get_followers_list(username, table):
user_following = table.get_item(Key={'username': username})['Item']['follow']
return table.scan(
FilterExpression=Attr('username').is_in(user_following)
)['Items']
def unfollow_user(username, user_id, table):
item = table.get_item(Key={'username': username})['Item']
item['follow'].remove(user_id)
table.update_item(
Key={
'username': username
},
UpdateExpression='SET follow = :val1',
ExpressionAttributeValues={
':val1': item['follow'],
}
)
update_users_followers(user_id, username, table, remove=True)
def create_user(update, table):
username = str(update['message']['chat']['id'])
followers = table.scan(
FilterExpression=Attr('follow').contains(username)
)
table.put_item(
Item={
'username': username,
'first_name': update.message.from_user.first_name.upper(),
'last_name': update.message.from_user.last_name.upper() if update.message.from_user.last_name else None,
'follow': [],
'follow_count': 0,
'followers': [x['username'] for x in followers['Items']],
'photo_id': 0
}
)
def update_user(update, table):
username = str(update['message']['chat']['id'])
followers = table.scan(
FilterExpression=Attr('follow').contains(username)
)
item = table.get_item(Key={'username': username})['Item']
item['first_name'] = update.message.from_user.first_name.upper()
if update.message.from_user.last_name:
item['last_name'] = update.message.from_user.last_name.upper()
item['follow_count'] = len(item['follow'])
item['followers'] = [x['username'] for x in followers['Items']]
item['photo_id'] = item.get('photo_id', 0)
table.put_item(Item=item)
def update_user_photo(photo, username, table):
table.update_item(
Key={
'username': username
},
UpdateExpression='SET photo_id = :val1',
ExpressionAttributeValues={
':val1': photo[-1]['file_id'],
},
)
|
vz10/secretBot
|
db_actions.py
|
Python
|
mit
| 3,577
|
# -*- coding: utf-8 -*-
# author: bambooom
'''
My Diary Web App - CLI for client
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
from bs4 import BeautifulSoup
import re
HELP = '''
Input h/help/? for help.
Input q/quit to quit the process.
Input s/sync to sync the diary log.
Input lt/ListTags to list all tags.
Input st:TAG to set or delete tags
Input FLUSH to clear all diary entries.
'''
url = "http://bambooomdiary.sinaapp.com/"
def get_log_all():
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
log = ''
for i in soup.find_all('pre'):
log += i.get_text()+'\n'
return log
def get_log_bytag(tags):
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
ti=list(soup.find_all('i', class_='etime'))
ta=list(soup.find_all('i', class_='tags'))
di=list(soup.find_all('pre',class_='diary'))
for i in range(len(list(ti))):
if ta[i].get_text() == 'TAG:'+tags:
print "%s %s" %(ti[i].get_text(),di[i].get_text())
def get_tags():
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
temp =[]
for i in soup.find_all('i', class_='tags'):
temp.append(i.get_text())
tag_set = list(set(temp))
for i in tag_set:
print i
def delete_log():
res = raw_input('ARE YOU SURE?(y/n)>')
if res.lower() == 'y':
response = requests.delete(url)
print "All clear!Restart a new diary!"
else:
print "Well, keep going on!"
def write_log(message, tags):
values = {'newdiary':message,'tags':tags}
response = requests.post(url, data=values)
def client():
print HELP
tags=''
while True:
print 'TAG:'+tags
message = raw_input('Input>')
if message in ['h','help','?']:
print HELP
elif message in ['s','sync']:
get_log_bytag(tags)
elif message in ['q','quit']:
print 'Bye~'
break
elif message in ['lt','ListTags']:
get_tags()
elif message.startswith('st:'):
tags = message[3:]
elif message == 'FLUSH':
delete_log()
else:
write_log(message,tags)
if __name__ == '__main__':
client()
|
bambooom/OMOOC2py
|
_src/om2py5w/5wex0/client.py
|
Python
|
mit
| 2,054
|
from pytest import fixture
from functional.core import (
builder,
PreparedImagesOutputChecker,
PDFDocumentChecker,
DjVuDocumentChecker)
@fixture()
def checker_classes():
""" Run all checkers in one test for optimization reason. """
return [
PreparedImagesOutputChecker,
PDFDocumentChecker,
DjVuDocumentChecker]
@fixture()
def toc_checker_classes():
return [PDFDocumentChecker, DjVuDocumentChecker]
def put_transform_contents(builder, directory):
builder.save_transform_ini(
directory,
"[transform]\n" +
"justconvert: yes\n")
def check_all_valid(builder, checkers):
for class_ in checkers:
assert builder.valid(class_)
def check_all_invalid(builder, checkers):
for class_ in checkers:
assert not builder.valid(class_)
def test_checker_valid_page(builder, checker_classes):
builder.create_unused_image("000-001", "0001.jpg")
builder.create_used_image("001-002", "0001.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
put_transform_contents(builder, "001-002")
builder.run_program()
check_all_valid(builder, checker_classes)
def test_checker_invalid_page(builder, checker_classes):
builder.create_used_image("000-001", "0001.jpg")
builder.create_unused_image("001-002", "0001.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
put_transform_contents(builder, "001-002")
builder.run_program()
check_all_invalid(builder, checker_classes)
def test_checker_valid_order(builder, checker_classes):
builder.create_used_image("000-001", "0000.jpg")
builder.create_used_image("000-001", "0001.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_valid(builder, checker_classes)
def test_checker_valid_reference_override(builder, checker_classes):
builder.create_used_image("000-001", "0000.jpg")
builder.override_reference_image()
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_valid(builder, checker_classes)
def test_checker_invalid_reference_override(builder, checker_classes):
(builder.create_used_image("000-001", "0000.jpg")
.add_border(20, 20, 20, 20, (0, 0, 0)))
(builder.override_reference_image()
.add_border(50, 50, 50, 50, (0, 0, 0)))
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_invalid(builder, checker_classes)
def test_checker_invalid_order(builder, checker_classes):
builder.create_used_image("000-001", "0001.jpg")
builder.create_used_image("000-001", "0000.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-001")
builder.run_program()
check_all_invalid(builder, checker_classes)
def test_checker_invalid_count(builder, checker_classes):
builder.create_used_image("000-002", "0000.jpg")
builder.create_used_image("000-002", "0001.jpg")
builder.create_unused_image("000-002", "0002.jpg")
builder.save_images()
builder.save_toc([])
put_transform_contents(builder, "000-002")
builder.run_program()
check_all_invalid(builder, checker_classes)
def prepare_three_images(builder):
for i in range(1, 4):
builder.create_used_image("001-003", "%04d.jpg" % i)
builder.save_images()
put_transform_contents(builder, "001-003")
def test_checker_valid_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"],
[0, 3, "Page 3"]
])
builder.run_program()
check_all_valid(builder, toc_checker_classes)
def test_checker_invalid_level_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[0, 2, "Page 2"]
])
check_all_invalid(builder, toc_checker_classes)
def test_checker_invalid_pagenum_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[1, 3, "Page 2"]
])
check_all_invalid(builder, toc_checker_classes)
def test_checker_invalid_description_toc(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2 2 2"]
])
check_all_invalid(builder, toc_checker_classes)
def test_checker_invalid_toc_extra_line(builder, toc_checker_classes):
prepare_three_images(builder)
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"]
])
builder.run_program()
builder.save_toc([
[0, 1, "Page 1"],
[1, 2, "Page 2"],
[2, 3, "Page 3"]
])
check_all_invalid(builder, toc_checker_classes)
|
atrosinenko/lecture-notes-compiler
|
tests/functional/core_test.py
|
Python
|
mit
| 5,334
|
import collections
import contextlib
import copy
import json
import warnings
import numpy
import six
from chainer.backends import cuda
from chainer import configuration
from chainer import serializer as serializer_module
from chainer import variable
def _copy_variable(value):
if isinstance(value, variable.Variable):
return copy.copy(value)
return value
class Reporter(object):
"""Object to which observed values are reported.
Reporter is used to collect values that users want to watch. The reporter
object holds a mapping from value names to the actually observed values.
We call this mapping `observations`.
When a value is passed to the reporter, an object called `observer` can be
optionally attached. In this case, the name of the observer is added as the
prefix of the value name. The observer name should be registered
beforehand.
See the following example::
>>> from chainer import Reporter, report, report_scope
>>>
>>> reporter = Reporter()
>>> observer = object() # it can be an arbitrary (reference) object
>>> reporter.add_observer('my_observer', observer)
>>> observation = {}
>>> with reporter.scope(observation):
... reporter.report({'x': 1}, observer)
...
>>> observation
{'my_observer/x': 1}
There are also a global API to add values::
>>> observation = {}
>>> with report_scope(observation):
... report({'x': 1}, observer)
...
>>> observation
{'my_observer/x': 1}
The most important application of Reporter is to report observed values
from each link or chain in the training and validation procedures.
:class:`~chainer.training.Trainer` and some extensions prepare their own
Reporter object with the hierarchy of the target link registered as
observers. We can use :func:`report` function inside any links and chains
to report the observed values (e.g., training loss, accuracy, activation
statistics, etc.).
Attributes:
observation: Dictionary of observed values.
"""
def __init__(self):
self._observer_names = {}
self.observation = {}
def __enter__(self):
"""Makes this reporter object current."""
_reporters.append(self)
def __exit__(self, exc_type, exc_value, traceback):
"""Recovers the previous reporter object to the current."""
_reporters.pop()
@contextlib.contextmanager
def scope(self, observation):
"""Creates a scope to report observed values to ``observation``.
This is a context manager to be passed to ``with`` statements. In this
scope, the observation dictionary is changed to the given one.
It also makes this reporter object current.
Args:
observation (dict): Observation dictionary. All observations
reported inside of the ``with`` statement are written to this
dictionary.
"""
old = self.observation
self.observation = observation
self.__enter__()
yield
self.__exit__(None, None, None)
self.observation = old
def add_observer(self, name, observer):
"""Registers an observer of values.
Observer defines a scope of names for observed values. Values observed
with the observer are registered with names prefixed by the observer
name.
Args:
name (str): Name of the observer.
observer: The observer object. Note that the reporter distinguishes
the observers by their object ids (i.e., ``id(owner)``), rather
than the object equality.
"""
self._observer_names[id(observer)] = name
def add_observers(self, prefix, observers):
"""Registers multiple observers at once.
This is a convenient method to register multiple objects at once.
Args:
prefix (str): Prefix of each name of observers.
observers: Iterator of name and observer pairs.
"""
for name, observer in observers:
self._observer_names[id(observer)] = prefix + name
def report(self, values, observer=None):
"""Reports observed values.
The values are written with the key, prefixed by the name of the
observer object if given.
.. note::
As of v2.0.0, if a value is of type :class:`~chainer.Variable`, the
variable is copied without preserving the computational graph and
the new variable object purged from the graph is stored to the
observer. This behavior can be changed by setting
``chainer.config.keep_graph_on_report`` to ``True``.
Args:
values (dict): Dictionary of observed values.
observer: Observer object. Its object ID is used to retrieve the
observer name, which is used as the prefix of the registration
name of the observed value.
"""
if not configuration.config.keep_graph_on_report:
values = {k: _copy_variable(v) for k, v in six.iteritems(values)}
if observer is not None:
observer_id = id(observer)
if observer_id not in self._observer_names:
raise KeyError(
'Given observer is not registered to the reporter.')
observer_name = self._observer_names[observer_id]
for key, value in six.iteritems(values):
name = '%s/%s' % (observer_name, key)
self.observation[name] = value
else:
self.observation.update(values)
_reporters = []
def get_current_reporter():
"""Returns the current reporter object."""
return _reporters[-1]
def report(values, observer=None):
"""Reports observed values with the current reporter object.
Any reporter object can be set current by the ``with`` statement. This
function calls the :meth:`Report.report` method of the current reporter.
If no reporter object is current, this function does nothing.
.. admonition:: Example
The most typical example is a use within links and chains. Suppose that
a link is registered to the current reporter as an observer (for
example, the target link of the optimizer is automatically registered to
the reporter of the :class:`~chainer.training.Trainer`). We can report
some values from the link as follows::
class MyRegressor(chainer.Chain):
def __init__(self, predictor):
super(MyRegressor, self).__init__(predictor=predictor)
def __call__(self, x, y):
# This chain just computes the mean absolute and squared
# errors between the prediction and y.
pred = self.predictor(x)
abs_error = F.sum(F.abs(pred - y)) / len(x)
loss = F.mean_squared_error(pred, y)
# Report the mean absolute and squared errors.
report({'abs_error': abs_error, 'squared_error': loss}, self)
return loss
If the link is named ``'main'`` in the hierarchy (which is the default
name of the target link in the
:class:`~chainer.training.updaters.StandardUpdater`),
these reported values are
named ``'main/abs_error'`` and ``'main/squared_error'``. If these values
are reported inside the :class:`~chainer.training.extension.Evaluator`
extension, ``'validation/'`` is added at the head of the link name, thus
the item names are changed to ``'validation/main/abs_error'`` and
``'validation/main/squared_error'`` (``'validation'`` is the default
name of the Evaluator extension).
Args:
values (dict): Dictionary of observed values.
observer: Observer object. Its object ID is used to retrieve the
observer name, which is used as the prefix of the registration name
of the observed value.
"""
if _reporters:
current = _reporters[-1]
current.report(values, observer)
@contextlib.contextmanager
def report_scope(observation):
"""Returns a report scope with the current reporter.
This is equivalent to ``get_current_reporter().scope(observation)``,
except that it does not make the reporter current redundantly.
"""
current = _reporters[-1]
old = current.observation
current.observation = observation
yield
current.observation = old
def _get_device(x):
if numpy.isscalar(x):
return cuda.DummyDevice
else:
return cuda.get_device_from_array(x)
class Summary(object):
"""Online summarization of a sequence of scalars.
Summary computes the statistics of given scalars online.
"""
def __init__(self):
self._x = 0
self._x2 = 0
self._n = 0
def add(self, value, weight=1):
"""Adds a scalar value.
Args:
value: Scalar value to accumulate. It is either a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
weight: An optional weight for the value. It is a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
Default is 1 (integer).
"""
with _get_device(value):
self._x += weight * value
self._x2 += weight * value * value
self._n += weight
def compute_mean(self):
"""Computes the mean."""
x, n = self._x, self._n
with _get_device(x):
return x / n
def make_statistics(self):
"""Computes and returns the mean and standard deviation values.
Returns:
tuple: Mean and standard deviation values.
"""
x, n = self._x, self._n
xp = cuda.get_array_module(x)
with _get_device(x):
mean = x / n
var = self._x2 / n - mean * mean
std = xp.sqrt(var)
return mean, std
def serialize(self, serializer):
try:
self._x = serializer('_x', self._x)
self._x2 = serializer('_x2', self._x2)
self._n = serializer('_n', self._n)
except KeyError:
warnings.warn('The previous statistics are not saved.')
class DictSummary(object):
"""Online summarization of a sequence of dictionaries.
``DictSummary`` computes the statistics of a given set of scalars online.
It only computes the statistics for scalar values and variables of scalar
values in the dictionaries.
"""
def __init__(self):
self._summaries = collections.defaultdict(Summary)
def add(self, d):
"""Adds a dictionary of scalars.
Args:
d (dict): Dictionary of scalars to accumulate. Only elements of
scalars, zero-dimensional arrays, and variables of
zero-dimensional arrays are accumulated. When the value
is a tuple, the second element is interpreted as a weight.
"""
summaries = self._summaries
for k, v in six.iteritems(d):
w = 1
if isinstance(v, tuple):
w = v[1]
v = v[0]
if isinstance(w, variable.Variable):
w = w.array
if not numpy.isscalar(w) and not getattr(w, 'ndim', -1) == 0:
raise ValueError(
'Given weight to {} was not scalar.'.format(k))
if isinstance(v, variable.Variable):
v = v.array
if numpy.isscalar(v) or getattr(v, 'ndim', -1) == 0:
summaries[k].add(v, weight=w)
def compute_mean(self):
"""Creates a dictionary of mean values.
It returns a single dictionary that holds a mean value for each entry
added to the summary.
Returns:
dict: Dictionary of mean values.
"""
return {name: summary.compute_mean()
for name, summary in six.iteritems(self._summaries)}
def make_statistics(self):
"""Creates a dictionary of statistics.
It returns a single dictionary that holds mean and standard deviation
values for every entry added to the summary. For an entry of name
``'key'``, these values are added to the dictionary by names ``'key'``
and ``'key.std'``, respectively.
Returns:
dict: Dictionary of statistics of all entries.
"""
stats = {}
for name, summary in six.iteritems(self._summaries):
mean, std = summary.make_statistics()
stats[name] = mean
stats[name + '.std'] = std
return stats
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
names = list(self._summaries.keys())
serializer('_names', json.dumps(names))
for index, name in enumerate(names):
self._summaries[name].serialize(
serializer['_summaries'][str(index)])
else:
self._summaries.clear()
try:
names = json.loads(serializer('_names', ''))
except KeyError:
warnings.warn('The names of statistics are not saved.')
return
for index, name in enumerate(names):
self._summaries[name].serialize(
serializer['_summaries'][str(index)])
|
rezoo/chainer
|
chainer/reporter.py
|
Python
|
mit
| 13,600
|
import scrapenhl_globals
import scrape_game
def scrape_games(season, games, force_overwrite = False, pause = 1, marker = 10):
"""
Scrapes the specified games.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
games : iterable of ints (e.g. list)
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously raw html files. If False, will not scrape if files already found.
pause : float or int
The time to pause between requests to the NHL API. Defaults to 1 second
marker : float or int
The number of times to print progress. 10 will print every 10%; 20 every 5%.
"""
import time
import datetime
starttime = time.time()
games = sorted(list(games))
marker_i = [len(games)//marker * i for i in range(marker)]
marker_i[-1] = len(games) - 1
marker_i_set = set(marker_i)
for i in range(len(games)):
game = games[i]
newscrape = scrape_game.scrape_game(season, game, force_overwrite)
if newscrape: #only sleep if had to scrape a new game
time.sleep(pause)
if i in marker_i_set:
print('Done through', season, game, ' ~ ', round((marker_i.index(i)) * 100/marker), '% in',
str(datetime.timedelta(seconds = time.time() - starttime)))
print('Done scraping games in', season)
def scrape_season(season, startgame = None, endgame = None, force_overwrite = False, pause = 1):
"""
Scrapes games for the specified season.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
startgame : int
The game id at which scraping will start. For example, midway through a season, this can be the last game
scraped.
This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously raw html files. If False, will not scrape if files already found.
pause : float or int
The time to pause between requests to the NHL API. Defaults to 1 second
"""
if season != 2012:
games = [20000 + x for x in range(1, 1231)]
else:
games = [20000 + x for x in range(1, 721)]
for round in range(1, 5):
for series in range(1, 8//round + 1):
for game in range(1, 8):
games.append(int('30{0:d}{1:d}{2:d}'.format(round, series, game)))
if startgame is not None:
games = [g for g in games if g >= startgame]
if endgame is not None:
games = [g for g in games if g <= endgame]
scrape_games(season, games, force_overwrite, pause, 10)
def get_team_pbplog_filename(season, team):
return '{0:s}Team logs/{2:s}{1:d}_pbp.feather'.format(scrapenhl_globals.SAVE_FOLDER, season, team)
def get_team_toilog_filename(season, team):
return '{0:s}Team logs/{2:s}{1:d}_toi.feather'.format(scrapenhl_globals.SAVE_FOLDER, season, team)
def update_teamlogs(season, force_overwrite = False):
import os
import feather
import pandas as pd
import os.path
basic_gamelog = scrapenhl_globals.get_quick_gamelog_file()
teams = {x for x in \
basic_gamelog.query('Season == {0:d}'.format(season))['Home'].drop_duplicates()} | \
{x for x in \
basic_gamelog.query('Season == {0:d}'.format(season))['Away'].drop_duplicates()}
temp = basic_gamelog
### List files in correct format
allfiles = os.listdir(scrapenhl_globals.get_season_folder(season))
pbpfiles = {int(x[:5]): x for x in allfiles if x[-12:] == '_parsed.zlib'}
toifiles = {int(x[:5]): x for x in allfiles if x[-19:] == '_shifts_parsed.zlib'}
for team in teams:
teamgames = {int(g) for g in basic_gamelog.query('Season == {0:d} & (Home == "{1:s}" | Away == "{1:s}")'.format(
season, team))['Game'].values}
current_pbp = None
games_already_done = set()
if os.path.exists(get_team_pbplog_filename(season, team)):
current_pbp = feather.read_dataframe(get_team_pbplog_filename(season, team))
games_already_done = {x for x in current_pbp.Game}
dflist = []
if not force_overwrite and current_pbp is not None:
dflist.append(current_pbp)
teamgames = {int(g) for g in teamgames if g not in games_already_done}
### TODO do I need to flip any columns?
#if force_overwrite:
for game in teamgames:
try:
df = pd.read_hdf(scrape_game.get_parsed_save_filename(season, game))
df = df.assign(Game = game)
if df is not None:
dflist.append(df)
except FileNotFoundError:
pass
if len(dflist) > 0:
new_pbp = pd.concat(dflist)
for col in new_pbp.columns:
if new_pbp[col].dtype == 'object':
new_pbp[col] = new_pbp[col].astype(str)
feather.write_dataframe(new_pbp, get_team_pbplog_filename(season, team))
current_toi = None
games_already_done = set()
if os.path.exists(get_team_toilog_filename(season, team)):
current_toi = feather.read_dataframe(get_team_toilog_filename(season, team))
games_already_done = {x for x in current_toi.Game}
### TODO issues here
dflist = []
if not force_overwrite:
dflist.append(current_toi)
teamgames = {g for g in teamgames if g not in games_already_done}
#if force_overwrite:
for game in teamgames:
try:
df = pd.read_hdf(scrape_game.get_parsed_shifts_save_filename(season, game))
df = df.assign(Game = game)
cols_to_replace = {col for col in df.columns if str.isdigit(col[-1]) if col[:3] != team}
df.rename(columns = {col: 'Opp' + col[3:] for col in cols_to_replace}, inplace = True)
if df is not None:
dflist.append(df)
except FileNotFoundError:
pass
import pandas as pd
dflist = [df for df in dflist if df is not None]
if len(dflist) > 0:
new_toi = pd.concat(dflist)
for col in new_toi.columns:
if new_toi[col].dtype == 'object':
new_toi[col] = new_toi[col].astype(str)
feather.write_dataframe(new_toi, get_team_toilog_filename(season, team))
def get_team_toilog(season, team):
import feather
return feather.read_dataframe(get_team_toilog_filename(season, team))
def get_team_pbplog(season, team):
import feather
return feather.read_dataframe(get_team_pbplog_filename(season, team))
def get_season_schedule_url(season):
return 'https://statsapi.web.nhl.com/api/v1/schedule?startDate={0:d}-09-01&endDate={1:d}-06-25'.format(season,
season + 1)
def parse_games(season, games, force_overwrite = False, marker = 10):
"""
Parses the specified games.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
games : iterable of ints (e.g. list)
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously parsed files. If False, will not parise if files already found.
marker : float or int
The number of times to print progress. 10 will print every 10%; 20 every 5%.
"""
import time
import datetime
starttime = time.time()
games = sorted(list(games))
marker_i = [len(games) // marker * i for i in range(marker)]
marker_i[-1] = len(games) - 1
marker_i_set = set(marker_i)
for i in range(len(games)):
game = games[i]
scrape_game.parse_game(season, game, force_overwrite)
if i in marker_i_set:
print('Done through', season, game, ' ~ ', round((marker_i.index(i)) * 100 / marker), '% in',
str(datetime.timedelta(seconds=time.time() - starttime)))
print('Done parsing games in', season)
def autoupdate(season = scrapenhl_globals.MAX_SEASON):
"""
Scrapes unscraped games for the specified season.
This is a convenience function that finds the highest completed game in a year and scrapes up to that point only.
This reduces unnecessary requests for unplayed games.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
"""
import urllib.request
url = get_season_schedule_url(season)
with urllib.request.urlopen(url) as reader:
page = reader.read().decode('latin-1')
import json
jsonpage = json.loads(page)
completed_games = set()
for gameday in jsonpage['dates']:
for game in gameday['games']:
if game['status']['abstractGameState'] == 'Final':
completed_games.add(int(str(game['gamePk'])[-5:]))
scrape_games(season, completed_games)
parse_games(season, completed_games)
def read_completed_games_from_url(season):
import urllib.request
url = get_season_schedule_url(season)
with urllib.request.urlopen(url) as reader:
page = reader.read().decode('latin-1')
import json
jsonpage = json.loads(page)
completed_games = set()
for gameday in jsonpage['dates']:
for game in gameday['games']:
if game['status']['abstractGameState'] == 'Final':
completed_games.add(int(str(game['gamePk'])[-5:]))
return completed_games
def reparse_season(season = scrapenhl_globals.MAX_SEASON):
"""
Re-parses entire season.
:param season: int
The season of the game. 2007-08 would be 2007.
:return:
"""
completed_games = read_completed_games_from_url(season)
parse_games(season, completed_games, True)
def rewrite_globals(start_from_scratch = True, seasons = None):
"""
Recreates global files: PLAYER_IDS, BASIC_GAMELOG, TEAM_IDS, CORRECTED_PLAYERNAMES
Parameters
-----------
seasons : list of int or None
The seasons of the games. 2007-08 would be 2007. Should only be provided when start_from_scratch is False.
start_from_scratch: bool
If True, will search through all files; if False, will look only at missing games in BASIC_GAMELOG.
False not yet implemented.
"""
import os.path
import zlib
import json
import pandas as pd
import time
import datetime
if seasons is None:
seasons = [i for i in range(2007, scrapenhl_globals.MAX_SEASON + 1)]
elif isinstance(seasons, int):
seasons = [seasons]
if start_from_scratch:
import os
try:
os.remove(scrapenhl_globals.PLAYER_ID_FILE)
except FileNotFoundError:
pass
try:
os.remove(scrapenhl_globals.TEAM_ID_FILE)
except FileNotFoundError:
pass
try:
os.remove(scrapenhl_globals.BASIC_GAMELOG_FILE)
except FileNotFoundError:
pass
for season in seasons:
starttime = time.time()
games = read_completed_games_from_url(season)
marker = 20
games = sorted(list(games))
marker_i = [len(games) // marker * i for i in range(marker)]
marker_i[-1] = len(games) - 1
marker_i_set = set(marker_i)
for i in range(len(games)):
game = games[i]
#print(season, game)
filename = scrape_game.get_parsed_save_filename(season, game)
if os.path.exists(scrape_game.get_json_save_filename(season, game)):
r = open(scrape_game.get_json_save_filename(season, game), 'rb')
page = r.read()
r.close()
page = zlib.decompress(page)
try:
data = json.loads(page.decode('latin-1'))
teamdata = data['liveData']['boxscore']['teams']
scrape_game.update_team_ids_from_json(teamdata)
scrape_game.update_player_ids_from_json(teamdata)
scrape_game.update_quick_gamelog_from_json(data)
except json.JSONDecodeError:
pass
if i in marker_i_set:
print('Done through', season, game, ' ~ ', round((marker_i.index(i)) * 100 / marker), '% in ',
str(datetime.timedelta(seconds = time.time() - starttime)))
print('Done with', season)
if __name__ == "__main__":
for season in range(2015, 2017):
autoupdate(season)
update_teamlogs(season, True) #have an error at some point in 2013 #and 2014
pass
|
muneebalam/scrapenhl
|
scrapenhl/scrape_season.py
|
Python
|
mit
| 13,272
|
#!/usr/bin/env python
import os
import sys
sys.path.append('/var/www/vehicle-journal/vjournal')
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vjournal.settings.prod")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
cslansing/vehicle-journal
|
manage.py
|
Python
|
mit
| 310
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""build query for doclistview and return results"""
import frappe, json
import frappe.permissions
from frappe.model.db_query import DatabaseQuery
from frappe import _
@frappe.whitelist()
def get():
args = get_form_params()
data = compress(execute(**args), args = args)
return data
def execute(doctype, *args, **kwargs):
return DatabaseQuery(doctype).execute(*args, **kwargs)
def get_form_params():
"""Stringify GET request parameters."""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if isinstance(data.get("filters"), basestring):
data["filters"] = json.loads(data["filters"])
if isinstance(data.get("fields"), basestring):
data["fields"] = json.loads(data["fields"])
if isinstance(data.get("docstatus"), basestring):
data["docstatus"] = json.loads(data["docstatus"])
if isinstance(data.get("save_user_settings"), basestring):
data["save_user_settings"] = json.loads(data["save_user_settings"])
else:
data["save_user_settings"] = True
# queries must always be server side
data.query = None
return data
def compress(data, args = {}):
"""separate keys and values"""
from frappe.desk.query_report import add_total_row
if not data: return data
values = []
keys = data[0].keys()
for row in data:
new_row = []
for key in keys:
new_row.append(row[key])
values.append(new_row)
if args.get("add_total_row"):
meta = frappe.get_meta(args.doctype)
values = add_total_row(values, keys, meta)
return {
"keys": keys,
"values": values
}
@frappe.whitelist()
def save_report():
"""save report"""
data = frappe.local.form_dict
if frappe.db.exists('Report', data['name']):
d = frappe.get_doc('Report', data['name'])
else:
d = frappe.new_doc('Report')
d.report_name = data['name']
d.ref_doctype = data['doctype']
d.report_type = "Report Builder"
d.json = data['json']
frappe.get_doc(d).save()
frappe.msgprint(_("{0} is saved").format(d.name))
return d.name
@frappe.whitelist()
def export_query():
"""export from report builder"""
form_params = get_form_params()
form_params["limit_page_length"] = None
form_params["as_list"] = True
doctype = form_params.doctype
add_totals_row = None
file_format_type = form_params["file_format_type"]
del form_params["doctype"]
del form_params["file_format_type"]
if 'add_totals_row' in form_params and form_params['add_totals_row']=='1':
add_totals_row = 1
del form_params["add_totals_row"]
frappe.permissions.can_export(doctype, raise_exception=True)
db_query = DatabaseQuery(doctype)
ret = db_query.execute(**form_params)
if add_totals_row:
ret = append_totals_row(ret)
data = [['Sr'] + get_labels(db_query.fields, doctype)]
for i, row in enumerate(ret):
data.append([i+1] + list(row))
if file_format_type == "CSV":
# convert to csv
import csv
from cStringIO import StringIO
f = StringIO()
writer = csv.writer(f)
for r in data:
# encode only unicode type strings and not int, floats etc.
writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r))
f.seek(0)
frappe.response['result'] = unicode(f.read(), 'utf-8')
frappe.response['type'] = 'csv'
frappe.response['doctype'] = doctype
elif file_format_type == "Excel":
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(data, doctype)
frappe.response['filename'] = doctype + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
def append_totals_row(data):
if not data:
return data
data = list(data)
totals = []
totals.extend([""]*len(data[0]))
for row in data:
for i in xrange(len(row)):
if isinstance(row[i], (float, int)):
totals[i] = (totals[i] or 0) + row[i]
data.append(totals)
return data
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
label = df.label if df else fieldname.title()
if label in labels:
label = doctype + ": " + label
labels.append(label)
return labels
@frappe.whitelist()
def delete_items():
"""delete selected items"""
import json
il = json.loads(frappe.form_dict.get('items'))
doctype = frappe.form_dict.get('doctype')
for i, d in enumerate(il):
try:
frappe.delete_doc(doctype, d)
if len(il) >= 5:
frappe.publish_realtime("progress",
dict(progress=[i+1, len(il)], title=_('Deleting {0}').format(doctype)),
user=frappe.session.user)
except Exception:
pass
@frappe.whitelist()
def get_sidebar_stats(stats, doctype, filters=[]):
cat_tags = frappe.db.sql("""select tag.parent as category, tag.tag_name as tag
from `tabTag Doc Category` as docCat
INNER JOIN tabTag as tag on tag.parent = docCat.parent
where docCat.tagdoc=%s
ORDER BY tag.parent asc,tag.idx""",doctype,as_dict=1)
return {"defined_cat":cat_tags, "stats":get_stats(stats, doctype, filters)}
@frappe.whitelist()
def get_stats(stats, doctype, filters=[]):
"""get tag info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag in columns: continue
tagcount = frappe.get_list(doctype, fields=[tag, "count(*)"],
#filters=["ifnull(`%s`,'')!=''" % tag], group_by=tag, as_list=True)
filters = filters + ["ifnull(`%s`,'')!=''" % tag], group_by = tag, as_list = True)
if tag=='_user_tags':
stats[tag] = scrub_user_tags(tagcount)
stats[tag].append(["No Tags", frappe.get_list(doctype,
fields=[tag, "count(*)"],
filters=filters +["({0} = ',' or {0} is null)".format(tag)], as_list=True)[0][1]])
else:
stats[tag] = tagcount
return stats
@frappe.whitelist()
def get_filter_dashboard_data(stats, doctype, filters=[]):
"""get tags info"""
import json
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag["name"] in columns: continue
tagcount = []
if tag["type"] not in ['Date', 'Datetime']:
tagcount = frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters = filters + ["ifnull(`%s`,'')!=''" % tag["name"]],
group_by = tag["name"],
as_list = True)
if tag["type"] not in ['Check','Select','Date','Datetime','Int',
'Float','Currency','Percent'] and tag['name'] not in ['docstatus']:
stats[tag["name"]] = list(tagcount)
if stats[tag["name"]]:
data =["No Data", frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters=filters + ["({0} = '' or {0} is null)".format(tag["name"])],
as_list=True)[0][1]]
if data and data[1]!=0:
stats[tag["name"]].append(data)
else:
stats[tag["name"]] = tagcount
return stats
def scrub_user_tags(tagcount):
"""rebuild tag list for tags"""
rdict = {}
tagdict = dict(tagcount)
for t in tagdict:
if not t:
continue
alltags = t.split(',')
for tag in alltags:
if tag:
if not tag in rdict:
rdict[tag] = 0
rdict[tag] += tagdict[t]
rlist = []
for tag in rdict:
rlist.append([tag, rdict[tag]])
return rlist
# used in building query in queries.py
def get_match_cond(doctype):
cond = DatabaseQuery(doctype).build_match_conditions()
return ((' and ' + cond) if cond else "").replace("%", "%%")
def build_match_conditions(doctype, as_condition=True):
match_conditions = DatabaseQuery(doctype).build_match_conditions(as_condition=as_condition)
if as_condition:
return match_conditions.replace("%", "%%")
else:
return match_conditions
def get_filters_cond(doctype, filters, conditions):
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
else:
value = frappe.db.escape(f[1]) if isinstance(f[1], basestring) else f[1]
flt.append([doctype, f[0], '=', value])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
query.build_filter_conditions(flt, conditions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond
|
rohitwaghchaure/frappe
|
frappe/desk/reportview.py
|
Python
|
mit
| 8,532
|
import datetime
import time
import calendar
from django.core.urlresolvers import reverse
from django.http import Http404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import Http404
from django.shortcuts import render, redirect
from rolepermissions.decorators import has_role_decorator
from models import McEvent
from forms import McEventForm
# Create your views here.
month_names = 'January February March April May June July August September October November December'.split()
#Default calendar view
@login_required
def index(request):
cur_year, cur_month = time.localtime()[:2]
events = McEvent.objects.all()
context = {
'events': events,
}
return redirect('/calendar/%s/%s/' % (cur_year, cur_month))
@login_required
def event_list(request):
#only want events that occur this month; will change later
cur_year, cur_month, cur_day = time.localtime()[:3]
events = McEvent.objects.filter(start_date__year=cur_year, start_date__month=cur_month)
context = {
'events': events,
}
return render(request, 'mccalendar/event_list.html', context)
@login_required
def month(request, year=None, month=None, change=None):
year = int(year) if year else time.localtime()[0]
month = int(month) if month else time.localtime()[1]
#apply next/previous if applicable
if change in ('next', 'prev'):
if change=='next':
month = month+1
if month == 13:
month = 1
year += 1
elif change=='prev':
month = month-1
if month == 0:
month = 12
year -= 1
return redirect('/calendar/%s/%s/' % (year, month))
cur_year, cur_month, cur_day = time.localtime()[:3]
cal = calendar.Calendar()
cal.setfirstweekday(calendar.SUNDAY)
month_days = cal.itermonthdays(year, month)
events = []
lst=[[]]
week = 0
for day in month_days:
entries = current = False
if day:
events = McEvent.objects.filter(
start_date__lte=datetime.date(year, month, day),
end_date__gte=datetime.date(year, month, day))
if day == cur_day and year == cur_year and month == cur_month:
current = True
lst[week].append((day, events, current))
if len(lst[week]) == 7:
lst.append([])
week += 1
context = {
'year': year,
'month': month,
'day': day,
'month_name': month_names[month-1],
'month_days': lst,
}
return render(request, 'mccalendar/month.html', context)
@login_required
def day(request, year=None, month=None, day=None):
year = int(year) if year else time.localtime()[0]
month = int(month) if month else time.localtime()[1]
events = McEvent.objects.filter(
start_date__lte=datetime.date(year, month, day),
end_date__gte=datetime.date(year, month, day))
context = {
'year': year,
'month': month,
'day': day,
'month_name': month_names[month-1],
'events': events,
}
return render(request, 'mccalendar/day.html', context)
@login_required
@has_role_decorator('staff')
def create_event(request):
if request.method == 'POST':
event = McEvent(owner=request.user.mcuser)
form = McEventForm(request.POST, instance=event)
if (form.is_valid()):
form.save()
return redirect(reverse('mccalendar:event_detail', args=[event.id]))
else:
form = McEventForm(request.POST, instance=event)
else:
form = McEventForm()
context = {
'form':form,
'form_url': reverse('mccalendar:create_event')
}
return render(request, 'mccalendar/edit_event.html', context)
@login_required
@has_role_decorator('staff')
def edit_event(request, event_id=None):
try:
event = McEvent.objects.get(id=event_id)
except McEvent.DoesNotExist:
return redirect('mccalendar:create_event')
if request.method == 'POST':
form = McEventForm(request.POST, instance=event)
if (form.is_valid()):
form.save()
return redirect(reverse('mccalendar:event_detail', args=[event.id]))
else:
form = McEventForm(request.POST, instance=event)
else:
form = McEventForm(instance=event)
context = {
'form':form,
'form_url': reverse('mccalendar:edit_event', args=[event_id])
}
return render(request, 'mccalendar/edit_event.html', context)
@login_required
def event_detail(request, event_id):
try:
event = McEvent.objects.get(id=event_id)
except McEvent.DoesNotExist:
raise Http404('Event %s does not exist' %event_id)
else:
context = {
'event': event,
}
return render(request, 'mccalendar/event_detail.html', context)
|
mcdermott-scholars/mcdermott
|
mccalendar/views.py
|
Python
|
mit
| 4,577
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import Normalize
from sklearn.datasets import load_iris
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/svm/plot_rbf_parameters.py
|
Python
|
mit
| 8,095
|
import pytest
from named_dates.named_dates import\
day_of_nth_weekday, NoNthWeekdayError
# For reference throughout these tests, October 1, 2015 is
# a Thursday (weekday = 3).
def test_weekday_equals_first_of_month():
# Tests that day_of_nth_weekday works when the requested weekday is the
# first weekday is the month.
assert day_of_nth_weekday(2015, 10, 3, nth=1) == 1
assert day_of_nth_weekday(2015, 10, 3, nth=2) == 8
assert day_of_nth_weekday(2015, 10, 3, nth=3) == 15
assert day_of_nth_weekday(2015, 10, 3, nth=4) == 22
assert day_of_nth_weekday(2015, 10, 3, nth=5) == 29
with pytest.raises(NoNthWeekdayError):
day_of_nth_weekday(2015, 10, 3, nth=0)
with pytest.raises(NoNthWeekdayError):
day_of_nth_weekday(2015, 10, 3, nth=6)
def test_weekday_greater_than_first_of_month():
# Tests that day_of_nth_weekday works when the requested weekday is
# greater than the first weekday of the month.
assert day_of_nth_weekday(2015, 10, 5, nth=1) == 3
assert day_of_nth_weekday(2015, 10, 5, nth=2) == 10
assert day_of_nth_weekday(2015, 10, 5, nth=5) == 31
with pytest.raises(NoNthWeekdayError):
day_of_nth_weekday(2015, 10, 5, nth=6)
def test_weekday_less_than_first_of_month():
# Tests that day_of_nth_weekday works when the requested weekday is
# less than the first weekday of the month.
assert day_of_nth_weekday(2015, 10, 1, nth=1) == 6
assert day_of_nth_weekday(2015, 10, 1, nth=2) == 13
assert day_of_nth_weekday(2015, 10, 1, nth=3) == 20
assert day_of_nth_weekday(2015, 10, 1, nth=4) == 27
with pytest.raises(NoNthWeekdayError):
day_of_nth_weekday(2015, 10, 1, nth=5)
def test_from_end():
# October 31 is a Saturday (day 5)
assert day_of_nth_weekday(2015, 10, 5, nth=1, from_end=True) == 31
assert day_of_nth_weekday(2015, 10, 5, nth=2, from_end=True) == 24
assert day_of_nth_weekday(2015, 10, 5, nth=5, from_end=True) == 3
with pytest.raises(NoNthWeekdayError):
assert day_of_nth_weekday(2015, 10, 5, nth=6, from_end=True)
assert day_of_nth_weekday(2015, 10, 3, nth=1, from_end=True) == 29
assert day_of_nth_weekday(2015, 10, 3, nth=2, from_end=True) == 22
assert day_of_nth_weekday(2015, 10, 3, nth=5, from_end=True) == 1
with pytest.raises(NoNthWeekdayError):
assert day_of_nth_weekday(2015, 10, 3, nth=6, from_end=True)
assert day_of_nth_weekday(2015, 10, 6, nth=1, from_end=True) == 25
assert day_of_nth_weekday(2015, 10, 6, nth=2, from_end=True) == 18
assert day_of_nth_weekday(2015, 10, 6, nth=4, from_end=True) == 4
with pytest.raises(NoNthWeekdayError):
assert day_of_nth_weekday(2015, 10, 6, nth=5, from_end=True)
def test_bad_kwargs_disallowed():
with pytest.raises(TypeError):
day_of_nth_weekday(2015, 1, 1, bad_kwarg=1)
|
pschoenfelder/named-dates
|
tests/test_day_of_nth_weekday.py
|
Python
|
mit
| 2,862
|
#!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|99)|"
r"0.20.(0|1|99)|"
r"0.21.99"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
deeponion/deeponion
|
contrib/seeds/makeseeds.py
|
Python
|
mit
| 8,058
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
class OldestUnique:
def __init__(self):
self.uniq = {}
self.seen = set()
self.head = None
self.tail = None
def feed(self, value):
if value in self.uniq:
# unlink from list but leave in uniq dict
node = self.uniq[value]
if node.prev is not None:
node.prev.next = node.next
else:
self.head = node.next
if node.next is not None:
node.next.prev = node.prev
else:
self.tail = node.prev
elif value not in self.seen:
node = Node(value)
if self.head is None:
self.tail = node
else:
node.next = self.head
self.head.prev = node
self.head = node
self.uniq[value] = node
self.seen.add(value)
def query(self):
if self.tail is not None:
return self.tail.value
|
frasertweedale/drill
|
py/oldest_unique.py
|
Python
|
mit
| 1,106
|
"""
MIT License
Copyright (c) 2017 cgalleguillosm, AlessioNetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from sys import maxsize
from random import seed
from abc import abstractmethod, ABC
from sortedcontainers.sortedlist import SortedListWithKey
from enum import Enum
from copy import deepcopy
from accasim.base.resource_manager_class import ResourceManager
from accasim.base.allocator_class import AllocatorBase
class DispatcherError(Exception):
pass
class JobVerification(Enum):
REJECT = -1 # All jobs are rejected
NO_CHECK = 0 # No verification
CHECK_TOTAL = 1 # Total requested resources are verified
CHECK_REQUEST = 2 # Each node x resources are verified
class SchedulerBase(ABC):
"""
This class allows to implement dispatching methods by integrating with an implementation of this class an allocator (:class:`accasim.base.allocator_class.AllocatorBase`).
An implementation of this class could also serve as a entire dispatching method if the allocation class is not used as default (:class:`.allocator` = None), but the resource manager must
be set on the allocator using :func:`accasim.base.allocator_class.AllocatorBase.set_resource_manager`.
"""
MAXSIZE = maxsize
ALLOW_MAPPING_SAME_NODE = True
def __init__(self, _seed, allocator=None, job_check=JobVerification.CHECK_REQUEST, **kwargs):
"""
Construct a scheduler
:param seed: Seed for the random state
:param resource_manager: A Resource Manager object for dealing with system resources.
:param allocator: Allocator object to be used by the scheduler to allocater after schedule generation. If an allocator isn't defined, the scheduler class must generate the entire dispatching plan.
:param job_check: A job may be rejected if it doesnt comply with:
- JobVerification.REJECT: Any job is rejected
- JobVerification.NO_CHECK: All jobs are accepted
- JobVerification.CHECK_TOTAL: If the job requires more resources than the available in the system.
- JobVerification.CHECK_REQUEST: if an individual request by node requests more resources than the available one.
:param kwargs:
- skip_jobs_on_allocation: If the allocator is predefined and this parameter is true, the allocator will try to allocate jobs as much as possible.
Otherwise, the allocation will stop after the first fail.
"""
seed(_seed)
self._counter = 0
self.allocator = None
self._logger = logging.getLogger('accasim')
self._system_capacity = None
self._nodes_capacity = None
self.resource_manager = None
if allocator:
assert isinstance(allocator, AllocatorBase), 'Allocator not valid for scheduler'
self.allocator = allocator
# self.set_resource_manager(resource_manager)
assert(isinstance(job_check, JobVerification)), 'job_check invalid type. {}'.format(job_check.__class__)
if job_check == JobVerification.REJECT:
print('All jobs will be rejected, and for performance purposes the rejection messages will be omitted.')
self._job_check = job_check
# Check resources
self._min_required_availability = kwargs.pop('min_resources', None) # ['core', 'mem']s
# Skip jobs during allocation
self.skip_jobs_on_allocation = kwargs.pop('skip_jobs_on_allocation', False)
@property
def name(self):
"""
Name of the schedulign method
"""
raise NotImplementedError
@abstractmethod
def get_id(self):
"""
Must return the full ID of the scheduler, including policy and allocator.
:return: the scheduler's id.
"""
raise NotImplementedError
@abstractmethod
def scheduling_method(self, cur_time, es_dict, es):
"""
This function must map the queued events to available nodes at the current time.
:param cur_time: current time
:param es_dict: dictionary with full data of the job events
:param es: events to be scheduled
:return a tuple of (time to schedule, event id, list of assigned nodes), an array jobs id of rejected jobs
"""
raise Exception('This function must be implemented!!')
def set_resource_manager(self, resource_manager):
"""
Set a resource manager.
:param resource_manager: An instantiation of a resource_manager class or None
"""
if resource_manager:
if self.allocator:
self.allocator.set_resource_manager(resource_manager)
assert isinstance(resource_manager, ResourceManager), 'Resource Manager not valid for scheduler'
self.resource_manager = resource_manager
else:
self.resource_manager = None
def schedule(self, cur_time, es_dict, es):
"""
Method for schedule. It calls the specific scheduling method.
:param cur_time: current time
:param es_dict: dictionary with full data of the events
:param es: events to be scheduled
:return: a tuple of (time to schedule, event id, list of assigned nodes), array of rejected job ids.
"""
assert(self.resource_manager is not None), 'The resource manager is not defined. It must defined prior to run the simulation.'
self._counter += 1
self._logger.debug("{} Dispatching: #{} decision".format(cur_time, self._counter))
self._logger.debug('{} Dispatching: {} queued jobs'.format(cur_time, len(es)))
self._logger.debug('{} Dispatching: {}'.format(cur_time, self.resource_manager.current_usage))
rejected = []
# At least a job need 1 core and 1 kb/mb/gb of mem to run
if self._min_required_availability and any([self.resource_manager.resources.full[res] for res in self._min_required_availability]):
self._logger.debug("There is no availability of one of the min required resource to run a job. The dispatching process will be delayed until there is enough resources.")
return [(None, e, []) for e in es], rejected
accepted = []
# Verify jobs with the defined Job Policy
for e in es:
job = es_dict[e]
if not job.get_checked() and not self._check_job_request(job):
if self._job_check != JobVerification.REJECT:
self._logger.warning('{} has been rejected by the dispatcher. ({})'.format(e, self._job_check))
rejected.append(e)
continue
accepted.append(job)
to_allocate = []
# On accepted jobs by policy, try to schedule with the scheduling policy
if accepted:
to_allocate, to_reject = self.scheduling_method(cur_time, accepted, es_dict)
rejected += to_reject
for e in to_reject:
self._logger.warning('{} has been rejected by the dispatcher. (Scheduling policy)'.format(e))
# If there are scheduled jobs and an allocator defined, try to allocate the scheduled jobs.
if to_allocate and self.allocator:
dispatching_plan = self.allocator.allocate(to_allocate, cur_time, skip=self.skip_jobs_on_allocation)
else:
dispatching_plan = to_allocate
return dispatching_plan, rejected
def _check_job_request(self, _job):
"""
Simple method that checks if the loaded _job violates the system's resource constraints.
:param _job: Job object
:return: True if the _job is valid, false otherwise
"""
_job.set_checked(True)
if self._job_check == JobVerification.REJECT:
return False
elif self._job_check == JobVerification.NO_CHECK:
return True
elif self._job_check == JobVerification.CHECK_TOTAL:
# We verify that the _job does not violate the system's resource constraints by comparing the total
if not self._system_capacity:
self._system_capacity = self.resource_manager.system_capacity('total')
return not any([_job.requested_resources[res] * _job.requested_nodes > self._system_capacity[res] for res in _job.requested_resources.keys()])
elif self._job_check == JobVerification.CHECK_REQUEST:
if not self._nodes_capacity:
self._nodes_capacity = self.resource_manager.system_capacity('nodes')
# We verify the _job request can be fitted in the system
_requested_resources = _job.requested_resources
_requested_nodes = _job.requested_nodes
_fits = 0
_diff_node = 0
for _node, _attrs in self._nodes_capacity.items():
# How many time a request fits on the node
_nfits = min([_attrs[_attr] // req for _attr, req in _requested_resources.items() if req > 0 ])
# Update current number of times the current job fits in the nodes
if _nfits > 0:
_fits += _nfits
_diff_node += 1
if self.ALLOW_MAPPING_SAME_NODE:
# Since _fits >> _diff_node this logical comparison is omitted.
if _fits >= _requested_nodes:
return True
else:
if _diff_node >= _requested_nodes:
return True
return False
raise DispatcherError('Invalid option.')
def __str__(self):
return self.get_id()
class SimpleHeuristic(SchedulerBase):
"""
Simple scheduler, sorts the event depending on the chosen policy.
If a single job allocation fails, all subsequent jobs fail too.
Sorting as name, sort funct parameters
"""
def __init__(self, seed, allocator, name, sorting_parameters, **kwargs):
SchedulerBase.__init__(self, seed, allocator, **kwargs)
self.name = name
self.sorting_parameters = sorting_parameters
def get_id(self):
"""
Returns the full ID of the scheduler, including policy and allocator.
:return: the scheduler's id.
"""
return '-'.join([self.__class__.__name__, self.name, self.allocator.get_id()])
def scheduling_method(self, cur_time, jobs, es_dict):
"""
This function must map the queued events to available nodes at the current time.
:param cur_time: current time
:param es_dict: dictionary with full data of the events
:param es: events to be scheduled
:return: a tuple of (time to schedule, event id, list of assigned nodes), an array jobs id of rejected jobs
"""
to_reject = []
to_schedule = SortedListWithKey(jobs, **self.sorting_parameters)
return to_schedule, to_reject
class FirstInFirstOut(SimpleHeuristic):
"""
**FirstInFirstOut scheduling policy.**
The first come, first served (commonly called FirstInFirstOut ‒ first in, first out)
process scheduling algorithm is the simplest process scheduling algorithm.
"""
name = 'FIFO'
""" Name of the Scheduler policy. """
sorting_arguments = {
'key': lambda x: x.queued_time
}
""" This sorting function allows to sort the jobs in relation of the scheduling policy. """
def __init__(self, _allocator, _seed=0, **kwargs):
"""
FirstInFirstOut Constructor
"""
SimpleHeuristic.__init__(self, _seed, _allocator, self.name, self.sorting_arguments, **kwargs)
class LongestJobFirst(SimpleHeuristic):
"""
**LJF scheduling policy.**
Longest Job First (LJF) sorts the jobs, where the longest jobs are preferred over the shortest ones.
"""
name = 'LJF'
""" Name of the Scheduler policy. """
sorting_arguments = {
'key': lambda x:-x.expected_duration
}
""" This sorting function allows to sort the jobs in relation of the scheduling policy. """
def __init__(self, _allocator, _resource_manager=None, _seed=0, **kwargs):
"""
LJF Constructor
"""
SimpleHeuristic.__init__(self, _seed, _allocator, self.name, self.sorting_arguments, **kwargs)
class ShortestJobFirst(SimpleHeuristic):
"""
**SJF scheduling policy.**
Shortest Job First (SJF) sorts the jobs, where the shortest jobs are preferred over the longest ones.
"""
name = 'SJF'
""" Name of the Scheduler policy. """
sorting_arguments = {
'key': lambda x: x.expected_duration
}
""" This sorting function allows to sort the jobs in relation of the scheduling policy. """
def __init__(self, _allocator, _resource_manager=None, _seed=0, **kwargs):
"""
SJF Constructor
"""
SimpleHeuristic.__init__(self, _seed, _allocator, self.name, self.sorting_arguments, **kwargs)
class EASYBackfilling(SchedulerBase):
"""
EASY Backfilling scheduler.
Whenever a job cannot be allocated, a reservation is made for it. After this, the following jobs are used to
backfill the schedule, not allowing them to use the reserved nodes.
This dispatching methods includes its own calls to the allocator over the dispatching process.
Then it isn't use the auto allocator call, after the schedule generation.
"""
name = 'EBF'
""" Name of the Scheduler policy. """
def __init__(self, allocator, seed=0, **kwargs):
"""
Easy BackFilling Constructor
"""
SchedulerBase.__init__(self, seed, allocator=None, **kwargs)
self._blocked_job_id = None
self._reserved_slot = (None, [],)
self.nonauto_allocator = allocator
self.allocator_rm_set = False
# self.nonauto_allocator.set_resource_manager(resource_manager)
def get_id(self):
"""
Returns the full ID of the scheduler, including policy and allocator.
:return: the scheduler's id.
"""
return '-'.join([self.name, self.nonauto_allocator.name])
def scheduling_method(self, cur_time, queued_jobs, es_dict):
"""
This function must map the queued events to available nodes at the current time.
:param cur_time: current time
:param queued_jobs: Jobs to be dispatched
:param es_dict: dictionary with full data of the events
:return: a list of tuples (time to schedule, event id, list of assigned nodes), and a list of rejected job ids
"""
if not self.allocator_rm_set:
self.nonauto_allocator.set_resource_manager(self.resource_manager)
self.allocator_rm_set = True
avl_resources = self.resource_manager.current_availability
self.nonauto_allocator.set_resources(avl_resources)
to_dispatch = []
to_reject = []
_to_fill = []
_prev_blocked = None
_time_reached = False
if self._reserved_slot[0] and self._reserved_slot[0] <= cur_time:
_time_reached = True
# Tries to allocate the blocked job
self._logger.trace('There is a blocked job {} with {}'.format(self._blocked_job_id, self._reserved_slot))
# assert(self._blocked_job_id == queued_jobs[0].id), 'The first element is not the blocked one. ({} != {})'.format(self._blocked_job_id, queued_jobs[0].id)
blocked_job = queued_jobs[0]
queued_jobs = queued_jobs[1:]
allocation = self.nonauto_allocator.allocating_method(blocked_job, cur_time, skip=False)
if allocation[-1]:
self._logger.trace('{}: {} blocked job can be allocated. Unblocking'.format(cur_time, self._blocked_job_id))
self._blocked_job_id = None
self._reserved_slot = (None, [])
_prev_blocked = [allocation]
else:
# There are jobs still using the reserved nodes
self._logger.trace('{} job is still blocked. Reservation {}'.format(self._blocked_job_id, self._reserved_slot))
# Add the current allocation for the (un)blocked job.
to_dispatch += [allocation]
if self._blocked_job_id is None and queued_jobs:
# Tries to perform a FIFO allocation if there is no blocked job
# Returns the (partial) allocation and the idx for the blocked job, also sets the self._blocked_job_id var
_allocated_jobs, blocked_idx = self._try_fifo_allocation(queued_jobs, cur_time)
# There is a blocked job
if not (blocked_idx is None):
# If there is no a reservation, calculate it for the blocked job
if not self._reserved_slot[0]:
blocked_job = queued_jobs[blocked_idx]
self._logger.trace('Blocked {} Job: Calculate the reservation'.format(self._blocked_job_id))
# Current reservation (future time, reserved nodes)
self._reserved_slot = self._calculate_slot(cur_time, deepcopy(avl_resources), _allocated_jobs[:blocked_idx], _prev_blocked, blocked_job, es_dict)
self._logger.trace('Blocked {} Job: Nodes {} are reserved at {}'.format(self._blocked_job_id, self._reserved_slot[1], self._reserved_slot[0]))
# Include the blocked job
to_dispatch += _allocated_jobs[:blocked_idx + 1]
_to_fill = queued_jobs[blocked_idx + 1:]
else:
to_dispatch += _allocated_jobs
else:
if not _time_reached:
# The blocked job
to_dispatch += [(None, self._blocked_job_id, [])]
# All the remaining queued jobs
_to_fill = queued_jobs[1:]
else:
# The remaining queued jobs
_to_fill = queued_jobs
if _to_fill:
self._logger.trace('Blocked job {}. {} jobs candidates to fill the gap'.format(self._blocked_job_id, len(_to_fill)))
# Filling the gap between cur_time and res_time
(reserved_time, reserved_nodes) = self._reserved_slot
filling_allocation = self.nonauto_allocator.allocating_method(_to_fill, cur_time, \
reserved_time=reserved_time,
reserved_nodes=[],
skip=True
)
# Include the remaining jobs
to_dispatch += filling_allocation
return to_dispatch, to_reject
def _try_fifo_allocation(self, queued_jobs, cur_time):
"""
Allocates as many jobs as possible using the FIFO approach. As soon as one allocation fails, all subsequent jobs fail too.
Then, the return tuple contains info about the allocated jobs (assigned nodes and such) and also the position of the blocked job.
:param queued_jobs: List of job objects
:param cur_time: current time
:return job allocation, and position of the blocked job in the list
"""
# Try to allocate jobs as in FIFO
_allocated_jobs = self.nonauto_allocator.allocating_method(queued_jobs, cur_time, skip=False)
# Check if there is a blocked job (a job without an allocation)
blocked_idx = None
for i, (_, job_id, allocated_nodes) in enumerate(_allocated_jobs):
if not allocated_nodes:
self._blocked_job_id = job_id
blocked_idx = i
break
return _allocated_jobs, blocked_idx
def _calculate_slot(self, cur_time, avl_resources, decided_allocations, prev_blocked, blocked_job, es_dict):
"""
Computes a reservation for the blocked job, by releasing incrementally the resources used by the running
events and recently allocated jobs. The earliest slot in which blocked_job fits is chosen.
:param avl_resources: Actual available resources
:param decided_allocations: Allocated jobs on the current iteration.
:param prev_blocked: Allocation corresponding to the previous blocked job which has been unblocked during this iteration
:param blocked_jobs: Event to be fitted in the time slot
:param es_dist: Job dictionary
:return: a tuple of time of the slot and nodes
"""
current_allocations = self.resource_manager.current_allocations
# Creates a list the jobs sorted by soonest ending time first
future_endings = SortedListWithKey(key=lambda x:x[1])
# Running jobs
for job_id, resources in current_allocations.items():
future_endings.add((job_id, es_dict[job_id].start_time + es_dict[job_id].expected_duration, resources))
# Previous blocked job has been scheduled
if prev_blocked:
decided_allocations += prev_blocked
# Current allocated job
for (_, job_id, nodes) in decided_allocations:
_dec_alloc = {}
for node in nodes:
if not(node in _dec_alloc):
_dec_alloc[node] = {k:v for k, v in es_dict[job_id].requested_resources.items()}
else:
for res, v in es_dict[job_id].requested_resources.items():
_dec_alloc[node][res] += v
future_endings.add((job_id, cur_time + es_dict[job_id].expected_duration, _dec_alloc))
_required_alloc = blocked_job.requested_nodes
_requested_resources = blocked_job.requested_resources
_partial_alloc = {}
# Calculate the partial allocation on the current system state
for node, resources in avl_resources.items():
new_alloc = min([resources[req] // _requested_resources[req] for req in _requested_resources])
if new_alloc > 0:
_partial_alloc[node] = new_alloc
# Calculate the partial allocation on the next future endings
for (job_id, res_time, used_nodes) in future_endings:
for node, used_resources in used_nodes.items():
if not(node in avl_resources):
avl_resources[node] = {r:0 for r in _requested_resources}
for r, v in used_resources.items():
avl_resources[node][r] += v
cur_alloc = _partial_alloc.get(node, 0)
new_alloc = min([avl_resources[node][req] // _requested_resources[req] for req in _requested_resources])
_diff = new_alloc - cur_alloc
if _diff > 0:
_partial_alloc[node] = _partial_alloc.get(node, 0) + _diff
# At this point the blocked job can be allocated
if sum(_partial_alloc.values()) >= _required_alloc:
ctimes = 0
nodes = []
for node, times in _partial_alloc.items():
ctimes += times
nodes.append(node)
if ctimes >= _required_alloc:
break
return (res_time, nodes,)
raise DispatcherError('Can\'t find the slot.... no end? :(')
|
cgalleguillosm/accasim
|
accasim/base/scheduler_class.py
|
Python
|
mit
| 26,009
|
# -*- coding: utf-8 -*-
u"""歌詞コーパスの取得
"""
import argparse
import requests
import logging
import urllib, urllib2
import re
import time
from BeautifulSoup import BeautifulSoup
verbose = False
logger = None
def init_logger():
global logger
logger = logging.getLogger('GetLyrics')
logger.setLevel(logging.DEBUG)
log_fmt = '%(asctime)s/%(name)s[%(levelname)s]: %(message)s'
logging.basicConfig(format=log_fmt)
def getSimilarArtist(artist):
u"""類似アーティストのリストを取得
Last.fm APIを使用
"""
params = {"method":"artist.getSimilar", "artist":artist,
"api_key":"1d4a537bc937e81b88719933eed12ea0"}
r = requests.get("http://ws.audioscrobbler.com/2.0/", params=params)
soup = BeautifulSoup(r.content)
artist_list = soup("name")
p = re.compile(r"<[^>]*?>")
for i, v in enumerate(artist_list):
artist_list[i] = p.sub("", str(v))
if verbose:
logger.info("Retrieved " + str(len(artist_list)) \
+ " similar artists")
return artist_list
def getArtistId(artist):
u"""j-lyrics.netでのアーティストのIDを取得
"""
params = {"ka": artist,}
baseurl = "http://search.j-lyric.net/index.php"
r = requests.get(baseurl, params=params)
soup = BeautifulSoup(r.content)
urls = soup.find("div", id="lyricList").findAll("a")
r = re.compile(r'http://j-lyric.net/artist/\w+/')
for url in urls:
href = url.get("href")
if href.startswith("http://j-lyric.net/artist/"):
return href.split("/")[-2]
if verbose:
logger.warning(artist + ": Not found")
return None
def getLyricUrlList(artist):
u"""アーティストのすべての歌詞ページのUrlを取得
j-lyrics.net
"""
artist_id = getArtistId(artist)
baseurl = "http://j-lyric.net/artist/" + artist_id + "/"
r = requests.get(baseurl)
soup = BeautifulSoup(r.content)
a_tags = soup.find("div", id="lyricList").findAll("a")
urls = map(lambda t: (t.string, t.get("href")), a_tags)
# 歌詞以外のリンクを除く
urls = filter(lambda t: t[1].startswith('/artist/'), urls)
urls = map(lambda url: (url[0], "http://j-lyric.net" + url[1]),
urls)
return urls
def getLyricText(url):
u"""歌詞を取得して返す
"""
r = requests.get(url)
soup = BeautifulSoup(r.content)
# TODO: refactoring
text = str(soup.find("p", id="lyricBody"))
text = text.replace('<p id="lyricBody">', '').replace('</p>', '')
text = text.replace('\r', '').replace('\n', '')
return text.replace('\n', '').replace('<br />', '<BR>')
def printArtistList(artist_list):
u"""Last.fmから取得したアーティストのリストを取得
"""
for artist_name in artist_list:
print(artist_name)
def main(args):
global verbose
verbose = args.verbose
artist_list = getSimilarArtist(args.artist)
artist_list = [args.artist,] + artist_list
print("artist\ttitle\ttext")
for artist in artist_list[:args.n_artists]:
urls = getLyricUrlList(artist)
if verbose:
logger.info('{}: {} songs'.format(artist, len(urls)))
for i, url in enumerate(urls, start=1):
if verbose:
if i%10 == 0: logger.info("Wrote " + str(i) + " songs")
lyric = getLyricText(url[1])
print("{artist}\t{title}\t{text}".format(
artist=artist,
title=url[0].encode("utf-8"),
text=lyric))
time.sleep(1.0) # Wait one second
if __name__ == '__main__':
init_logger()
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--artist", default="湘南乃風", help="artist name")
parser.add_argument("-n", "--n-artists", dest="n_artists",
default=10,
help="max number of artists")
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose output")
args = parser.parse_args()
main(args)
|
jntkym/rappers
|
getlyrics.py
|
Python
|
mit
| 3,885
|
#!/usr/bin/env python3
class OutputParser(object):
def __init__(self, fmt, file_to_parse):
self.fmt = fmt.upper()
self.file_to_parse = file_to_parse
def parse(self):
""" Wrapper function in case I want to be able to parse other formats at some time """
if self.fmt == "GFF3":
return self.parse_gff3()
def parse_gff3(self):
""" Function to handle parsing of the GFF3 file format, returns a list of Query objects """
queries = {}
with open(self.file_to_parse, 'r') as IN:
for line in IN:
if line.startswith("##"):
if line.startswith("##FASTA"):
break
continue
#elements = line[:-1].split("\t")
# make a dictionary of the elements in the line
hit_dict = {key: value for key, value in zip(["query", "source", "type", "start", "end", "score", "strand", "phase", "attributes"], line[:-1].split("\t"))}
if hit_dict["type"] == "polypeptide":
queries[hit_dict["query"]] = Query(hit_dict)
if hit_dict["score"] == ".":
continue
if hit_dict["query"] in queries:
queries[hit_dict["query"]].add_hit(hit_dict)
else:
print("WARNING! Not found in dict")
sys.exit()
return queries.values()
class Query(object):
def __init__(self, hit_dict):
self.length = int(hit_dict["end"]) - int(hit_dict["start"])
self.name = hit_dict["query"]
self.hits = []
def add_hit(self, hit_dict):
""" Need to move most of this processing to the GFF stuff """
elements = hit_dict["attributes"].split(";")
## Need to make sure all the params I want here are defined so I don't run into issues later
hit_dict["go_terms"] = []
for element in elements:
if element.startswith("Name="):
hit_dict["subject"] = element[5:]
if element.startswith("signature_desc="):
hit_dict["desc"] = element[15:]
if element.startswith("Ontology_term="):
element = element.replace("\"", "")
terms = element[14].split(",")
hit_dict["go_terms"] = element[14:].split(",")
# convert all number like things to numbers
for key in hit_dict:
if not isinstance(hit_dict[key], list):
try:
hit_dict[key] = float(hit_dict[key])
except ValueError:
continue
self.hits.append(hit_dict)
def remove_bad_hits(self, max_e=.05, min_cov=.5):
hits_to_keep = []
for hit in self.hits:
if hit["score"] > max_e:
continue
if (hit["end"] - hit["start"]) < min_cov * self.length:
continue
hits_to_keep.append(hit)
self.hits = hits_to_keep
def get_all_GO_terms(self):
""" Returns a list with all the GO terms for the hits. Do this after remove_bad_hits """
terms = {}
for hit in self.hits:
terms[hit["go_terms"]] == 1
return list(terms.keys())
def get_all_pfams(self):
""" Returns a list with all the pfams the query has hits to """
terms = {}
for hit in self.hits:
if hit["source"] == "Pfam":
terms[hit["subject"]] = 1
return list(terms.keys())
class InterproScanner(object):
def __init__(self,
# interproscan vars
fasta_in,
out_path="interproscan_results.txt",
fmt="GFF3",
bin="/proj/dangl_lab/apps/my_interproscan/interproscan-5.3-46.0/interproscan.sh",
othr_args="",
# bsub vars
threads=8,
queue="week",
stdout="interproscanner_bsub.out",
stderr="interproscanner_bsub.err",
# batch vars
seqs_per_file=25
):
self.ips_vars = {
'fasta_in': [fasta_in],
'out_path': out_path,
'fmt': fmt,
'bin': bin,
'other_args': othr_args
}
self.bsub_vars = {
'threads': threads,
'queue': queue,
'stdout': stdout,
'stderr': stderr
}
self.batch_vars = {
'seqs_per_file'
}
def run_interproscan(self):
# count sequences
seqs = 0
with open(self.ips_vars["fasta_in"][0], 'r') as IN:
for line in IN:
if line.startswith(">"):
seqs += 1
# split fasta if needed
fas_files = []
if seqs > self.batch_vars["seqs_per_file"]:
fas_files = self._split_fasta()
# run command on each fasta
for fas_file in fas_files:
command = self._build_command(fas_file)
print("Executing: " + command)
def _split_fasta(self):
""" Splits a the fasta into multiple parts and changes the fasta in to the parts """
counter = 1
file_indx = 1
fastas = []
with open(self.ips_vars["fasta_in"][0], 'r') as IN:
OUT = open("tmp_iproscanner_{}.fasta".format(file_indx), 'w')
fastas.append("tmp_iproscanner_{}.fasta".format(file_indx))
for line in IN:
# increment counter for each header
if line.startswith(">"):
counter += 1
# reset and open new file if counter is enough
if counter > self.batch_vars["seqs_per_file"]:
counter = 1
file_indx += 1
OUT.close()
OUT = open("tmp_iproscanner_{}.fasta".format(file_indx), 'w')
fastas.append("tmp_iporscanner_{}.fasta".format(file_indx))
OUT.write(line)
self.fasta_in = fastas
def _build_command(self, fasta_file):
""" Builds a command to run interproscan for a given fasta file """
# shell with out and err
command = "bsub -o {} -e {}".format(self.bsub_params["stdout"], self.bsub_params["stderr"])
# add threading
hosts = self.bsub_params["threads"] // 8 # use as few hosts as possible
command += "-n {} -R 'span[hosts={}']".format(self.bsub_params["threads"], hosts)
# add interpro with base options and file
command += "{} --goterms -dp -i {}".format(self.ips_params["bin"], fasta_file)
# add output options
command += "-o {} -f {}".format(self.ips_params["out_path"]. self.ips_params["fmt"])
# add any other options
command += self.ips_params["other_args"]
return command
def go_term_enrichment(queries):
term_counts = {}
for qry in queries:
qry.remove_bad_hits(max_e=.05, min_cov=.01)
terms = qry.get_all_GO_terms()
for term in terms:
term_counts[term] = term_counts.get(term, 0) + 1
for term in sorted(term_counts, key=lambda key: term_counts[key], reverse=True):
print(term + "\t" + str(term_counts[term]))
def pfam_dump(queries):
for qry in queries:
qry.remove_bad_hits(max_e=.05, min_cov=.01)
#print("Getting pfams for " + qry.name)
pfams = qry.get_all_pfams()
for pfam in pfams:
print(pfam + "\t" + qry.name)
if __name__ == "__main__":
import sys
my_file = sys.argv[1]
outp = OutputParser(fmt="GFF3", file_to_parse=my_file)
queries = outp.parse()
pfam_dump(queries)
|
hunter-cameron/Bioinformatics
|
python/interproscanner.py
|
Python
|
mit
| 7,965
|
# This module file is for parsing the github project.
from AutoDoApp.parser.ParserCommunicator import ParserCommunicator
import os
import codecs
import git
import stat
from django.conf import settings
class Parser(ParserCommunicator):
def __init__(self):
self.git_dir = ""
self.dir_dict = {}
self.class_dict = {} # key: full path file_name, value: a list containing class names
self.method_dict = {} # key: class name, value: a list containing method names defined in the class
self.instance_dict = {} # key: class name, value: a list containing instance names inside the class
self.variable_dict = {} # key: instance name, value : a list containing invoked methods
self.file_list = []
self.req_list = []
self.license = ""
def task_request(self, project_id, user_id):
raise NotImplementedError("Implement this method!")
def task_complete(self, project_id, user_id):
raise NotImplementedError("Implement this method!")
def parse_api(self):
return self.method_dict
def parse_readme(self):
raise NotImplementedError("Implement this method!")
def parse_graph(self):
raise NotImplementedError("Implement this method!")
def parse_project(self, git_url):
self.__init__()
self.__clone_repository(git_url=git_url)
self.__parse_directory_structure()
self.__traverse_directories()
# Output formatting
graph = []
for callee_class_name in self.variable_dict:
for caller_class_name, instance_name in self.variable_dict[callee_class_name]:
for invoked_method in self.instance_dict[instance_name]:
graph.append((caller_class_name, callee_class_name, invoked_method))
name = "".join(git_url.split('/')[-1:])
tu = [graph, name, self.parse_api(), self.req_list, self.license]
return tuple(tu)
def __clone_repository(self, git_url):
if "github.com" not in git_url:
raise ValueError("Wrong Github Address!")
git_dir = os.path.join(settings.BASE_DIR, "git_project")
git_dir = os.path.join(git_dir, "".join(git_url.split('/')[-1:]))
self.git_dir = git_dir
if os.path.isdir(git_dir): # If there is a directory
for root, dirs, files in os.walk(top=git_dir, topdown=False):
for name in files:
os.chmod(os.path.join(root, name), stat.S_IWRITE)
os.remove(os.path.join(root, name))
for name in dirs:
os.chmod(os.path.join(root, name), stat.S_IWRITE)
os.rmdir(os.path.join(root, name))
os.rmdir(git_dir)
os.mkdir(git_dir) # Building a directory
repo = git.Repo.init(git_dir)
origin = repo.create_remote('origin', git_url)
origin.fetch()
origin.pull(origin.refs[0].remote_head)
def __parse_directory_structure(self):
# Root directory setup
root_dir = self.git_dir
# Traverse each directory to parse sub-directories and their files
for dir_name, subdir_list, file_list in os.walk(root_dir):
r_index = dir_name.rfind("/")
if dir_name.startswith(".", r_index+1) or dir_name.startswith("_", r_index+1):
continue
tmp_list = []
for f_name in file_list: # Check suffix of python project
if f_name[-3:] == ".py":
tmp_list.append(f_name)
elif "requirement" in f_name and ".txt" in f_name:
self.req_list = self.__parse_requirements(f_name)
elif "LICENSE" in f_name:
self.license = self.__parse_license(f_name)
if len(tmp_list) > 0: # We will not add empty directory into the dictionary
dir_name = dir_name.replace("\\", "/")
self.dir_dict[dir_name] = tmp_list
def __traverse_directories(self):
# To traverse each source file, searching directories
for key in self.dir_dict:
for item in self.dir_dict[key]:
if key.endswith("/"):
path = key + item
else:
path = key + "/" + item
self.file_list.append(path)
for path in self.file_list:
self.__traverse_source_file_path1(path=path)
for path in self.file_list:
self.__traverse_source_file_path2(path=path)
def __traverse_source_file_path1(self, path):
# traverse each python file
f = codecs.open(path, mode='r', encoding='utf-8')
class_list = []
lines = f.readlines()
# Path1: traverse class name
for line in lines:
line = line.strip()
if not line.startswith("class"):
continue
else:
tokens = line.split("class")
cls_name = tokens[1].strip()
cls_name = cls_name.replace(":", "")
class_list.append(cls_name)
if len(class_list) > 0:
self.class_dict[path] = class_list
# Path2: traverse method name
method_list = []
cur_context = ""
for line in lines:
line = line.strip()
if line.startswith("class"):
if len(method_list) > 0:
self.method_dict[cur_context] = method_list
method_list.clear()
tokens = line.split("class")
cur_context = tokens[1].strip()
cur_context = cur_context.replace(":", "")
if not line.startswith("def"):
continue
else:
tokens = line.split("def")
method_name = tokens[1].strip()
method_name = method_name.replace(":", "")
method_list.append(method_name)
if len(method_list) > 0:
self.method_dict[cur_context] = method_list
def __traverse_source_file_path2(self, path):
f = codecs.open(path, mode='r', encoding='utf-8')
lines = f.readlines()
# Path3: traverse source file to parse instance variables and its call relationships
# traverse information is stored at self.instance_dict.
# Structure: first dict : {class name : second_dict}
# second dict : {instance_variable : invoked method list}
invoked_method_dict = {}
cur_context = {}
cls_name = ""
for line in lines:
line = line.strip()
if "." in line and "(" in line: # Just method call
tokens = line.split(".")
instance_name = tokens[0]
if len(tokens) > 1 and "." in tokens[1]:
method_name = tokens[1].split(".")[0].strip()
method_name = method_name.split("(")[0]
elif len(tokens) > 1:
method_name = tokens[1].split("(")[0]
if 'method_name' in locals(): # At least method call
if instance_name not in self.instance_dict:
self.instance_dict[instance_name] = [method_name+tokens[1]]
else:
self.instance_dict[instance_name].append(method_name+tokens[1])
elif "(" in line and "=" in line: # Possible to instance creation
tokens = line.split("=")
instance_name = tokens[0].strip()
if len(tokens) > 1:
tokens = tokens[1].split("(")
if len(tokens) > 1:
instance_type = tokens[0].strip()
for files in self.class_dict:
for class_name in self.class_dict[files]:
if class_name.split("(")[0] == instance_type:
if instance_type not in self.variable_dict:
self.variable_dict[instance_type] = [(cls_name, instance_name)] # Callee class
else:
self.variable_dict[instance_type].append((cls_name, instance_name))
elif "class" in line:
tokens = line.split("class")
cls_name = tokens[1].strip()
cls_name = cls_name.replace(":", "")
def __parse_requirements(self, f_name):
req_loc = os.path.join(self.git_dir, f_name)
f = open(req_loc, 'r')
req = f.readlines()
return req
def __parse_license(self, f_name):
li_loc = os.path.join(self.git_dir, f_name)
f = open(li_loc, 'r')
return f.readline()
def prev_parse_project(self):
raise NotImplementedError("Implement this method!")
def calculate_diff_between(self, curr, prev):
raise NotImplementedError("Implement this method!")
def test(self):
self.parse_project("https")
if __name__ == "__main__":
p = Parser()
p.test()
|
AutoDo/AutoDo
|
AutoDoApp/parser/Parser.py
|
Python
|
mit
| 9,117
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Grouper establishes a group of parenting nodes for which
each level is setted in equivalent hdf5 structure.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Hdformaters.Hdformater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import functools
from ShareYourSystem.Standards.Classors import Doer
from ShareYourSystem.Functers import Switcher
#</ImportSpecificModules>
#<DefineFunctions>
def getGroupedPathStrWithPathStrsList(_PathStrsList):
#Reduce
PathStr=functools.reduce(
lambda _TotalPathStr,_PathStr:
_TotalPathStr+_PathStr
if (len(_TotalPathStr)>0 and _TotalPathStr[-1]=='/') and (len(_PathStr)>0 and _PathStr[0]!='/'
) or (len(_TotalPathStr)>0 and _TotalPathStr[-1]!='/') and (len(_PathStr)>0 and _PathStr[0]=='/')
else
_TotalPathStr[:-1]+_PathStr
if (len(_TotalPathStr)>0 and _TotalPathStr[-1]=='/') and (len(_PathStr)>0 and _PathStr[0]=='/'
)
else _TotalPathStr+'/'+_PathStr
if '/' not in [_PathStr,_TotalPathStr]
else "",
_PathStrsList
)
#Maybe add / at the beginning
if (len(PathStr)>0 and PathStr[0]!='/') or PathStr=="":
PathStr='/'+PathStr
#Return
return PathStr
#</DefineFunctions>
#<DefineClass>
@DecorationClass()
class GrouperClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'GroupedParentVariable',
'GroupedInt',
'GroupedKeyStr',
'GroupedDeriveParentersList',
'GroupedPathStrsList',
'GroupedPathStr'
]
#@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':BaseClass.__init__}]})
def default_init(
self,
_GroupedParentVariable=None,
_GroupedInt=-1,
_GroupedKeyStr="",
_GroupedDeriveParentersList=None,
_GroupedPathStrsList=None,
_GroupedPathStr="/",
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
#set
self.HdformatingFileKeyStr=SYS.InflectEngine.plural(
Doer.getDoStrWithDoerStr(
self.__class__.NameStr
)
)+'.hdf5'
def do_group(self):
#debug
'''
self.debug(('self.',self,['ParentingNodeStr']))
'''
#Parent
self.parent()
#Check
if len(self.ParentedDeriveParentersList)>0:
UppestParentPointer=self.ParentedDeriveParentersList[-1]
else:
UppestParentPointer=self
#Then get also from the UppestParentPointer its UppestGroupedParentVariable
if hasattr(UppestParentPointer,'GroupedDeriveParentersList'):
if len(UppestParentPointer.GroupedDeriveParentersList)>0:
UppestGroupedParentVariable=UppestParentPointer.GroupedDeriveParentersList.GroupedDeriveParentersList[-1]
else:
UppestGroupedParentVariable=UppestParentPointer
#Definition of the Link
self.HdformatedFileVariable=UppestGroupedParentVariable.
HdformatedFileVariableKeyStr="HdformatedFileVariable"
#debug
#self.debug('UppestParentPointer.GroupingPathStr is '+UppestParentPointer.GroupingPathStr)
#Point on the FilePointer of the uppest grouped Parent
self.__setattr__(
HdformatedFileVariableKeyStr,
getattr(
UppestGroupedParentVariable,
"HdformatedFileVariable"
)
)
#Get it definitely !
FilePointer=getattr(self,HdformatedFileVariableKeyStr)
#debug
#print('FilePointer is ',FilePointer)
#Create a group in the hdf5 file
if FilePointer!=None:
#debug
'''
self.debug(('self.',self,['NodedPathStr']))
'''
#set the GroupedPathStr
self.GroupedPathStr=getGroupedPathStrWithPathStrsList(
[
UppestGroupedParentVariable.GroupedPathStr,
self.ParentedNodePathStr
]
)
#debug
'''
self.debug(('self.',self,['GroupedPathStr']))
'''
#Check if the Path exists
if self.GroupedPathStr not in FilePointer:
#set all the intermediate Paths before
PathStrsList=self.GroupedPathStr.split('/')[1:]
ParsingChildPathStr="/"
#set the PathStr from the top to the down (integrativ loop)
for ChildPathStr in PathStrsList:
#Go deeper
NewParsingChildPathStr=ParsingChildPathStr+ChildPathStr
#Create the group if not already
if NewParsingChildPathStr not in FilePointer:
if self.HdformatingModuleStr=="tables":
FilePointer.create_group(ParsingChildPathStr,ChildPathStr)
elif self.HdformatingModuleStr=="h5py":
Group=FilePointer[ParsingChildPathStr]
Group.create_group(ChildPathStr)
#Prepare the next group
ParsingChildPathStr=NewParsingChildPathStr+'/'
#Return self
return self
#</DefineClass>
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Noders/Grouper/Drafts/__init__ copy.py
|
Python
|
mit
| 4,755
|
"""
问题描述:给定一个正数数组arr,其中所有的值都为整数,以下是最小不可组成和的概念:
1)把arr每个子集内的所有元素加起来会出现很多值,其中最小的记为min,最大的记为max.
2)在区间[min,max]上,如果有数不可以被arr某一个子集相加得到,那么其中最小的那个数是
arr的最小不可组成和.
3)在区间[min,max]上,如果所有的数都可以被arr的某一个子集相加得到,那么max+1是arr
的最小不可组成和.
请写函数返回正数数组arr的最小不可组成和.
举例:
arr=[3, 2, 5],子集{2}相加产生2为min,子集{3, 2, 5}相加产生10为max.在区间[2, 10]
上,4、6和9不能被任何子集相加得到,其中4是arr的最小不可组成和.
arr=[1,2,4]。子集{1}相加产生1为min,子集{1,2,4}相加产生7为max。在区间[1,7]上,任何
数都可以被子集相加得到,所以8是arr的最小不可组成和.
进阶题目:
如果已知正数数组arr中肯定有1这个数,是否能更快地得到最小不可组成和?
"""
import sys
class UnformedSum:
@classmethod
def unformed_sum(cls, arr):
if not arr:
return 1
my_set = set()
cls.process(arr, 0, 0, my_set)
min_value = sys.maxsize
for i in arr:
min_value = min([min_value, i])
i = min_value
while True:
if i not in my_set:
return i
i += 1
@classmethod
def process(cls, arr, index, cur_sum, cur_set):
if index == len(arr):
cur_set.add(cur_sum)
return
cls.process(arr, index+1, cur_sum+arr[index], cur_set)
cls.process(arr, index+1, cur_sum, cur_set)
@classmethod
def unformed_sum_dp(cls, arr):
if not arr:
return 1
max_sum = sum(arr)
min_value = min(arr)
dp = [False for _ in range(max_sum+1)]
dp[0] = True
for i in arr:
dp[i] = True
for i in range(len(arr)):
for j in range(arr[i]+1, max_sum-arr[i]+1):
if dp[j] is True:
dp[j+arr[i]] = True
for i in range(min_value, len(dp)):
if not dp[i]:
return i
return max_sum+1
@classmethod
def unformed_sum_contined_1(cls, arr):
if not arr:
return 1
sorted_arr = sorted(arr)
sum_val = 0
for i in range(len(sorted_arr)):
if sum_val + 1 < sorted_arr[i]:
return sum_val + 1
sum_val += arr[i]
return sum_val + 1
if __name__ == '__main__':
print(UnformedSum.unformed_sum_dp([3, 2, 5]))
print(UnformedSum.unformed_sum_dp([1, 2, 4]))
print(UnformedSum.unformed_sum_contined_1([1, 2, 4]))
|
ResolveWang/algrithm_qa
|
other/q14.py
|
Python
|
mit
| 2,801
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
General EasyBuild support for installing FastQC
@author: Emilio Palumbo
"""
import os
import stat
from easybuild.tools.filetools import run_cmd
from easybuild.easyblocks.generic.packedbinary import PackedBinary
class EB_FastQC(PackedBinary):
"""Easyblock implementing the build step for FastQC,
this is just give execution permission to the `fastqc` binary before installing.
"""
def install_step(self):
"""Overwrite install_step from PackedBinary"""
os.chdir(self.builddir)
os.chmod("FastQC/fastqc", os.stat("FastQC/fastqc").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
super(EB_FastQC, self).install_step()
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/f/fastqc.py
|
Python
|
mit
| 1,735
|
# -*- coding: utf-8 -*-
"""
object file io is a Python object to single file I/O framework. The word
'framework' means you can use any serialization/deserialization algorithm here.
- dump: dump python object to a file.
- safe_dump: add atomic writing guarantee for ``dump``.
- load: load python object from a file.
Features:
1. ``compress``: built-in compress/decompress options.
2. ``overwrite``: an option to prevent from overwrite existing file.
3. ``verbose``: optional built-in logger can display help infomation.
Usage:
suppose you have a function (dumper function, has to take python object as
input, and return a binary object) can dump python object to binary::
import pickle
def dump(obj):
return pickle.dumps(obj)
def load(binary):
return pickle.loads(binary)
You just need to add a decorator, and new function will do all magic for you:
from obj_file_io import dump_func, safe_dump_func, load_func
@dump_func
def dump(obj):
return pickle.dumps(obj)
@safe_dump_func
def safe_dump(obj):
return pickle.dumps(obj)
@load_func
def load(binary):
return pickle.loads(binary)
**中文文档**
object file io是一个将Python对象对单个本地文件的I/O
"""
import os
import time
import zlib
import logging
import inspect
from atomicwrites import atomic_write
# logging util
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
def prt_console(message, verbose):
"""Print message to console, if ``verbose`` is True.
"""
if verbose:
logger.info(message)
def _check_serializer_type(serializer_type):
if serializer_type not in ["binary", "str"]:
raise ValueError("serializer_type has to be one of 'binary' or 'str'!")
# dump, load
def _dump(obj, abspath, serializer_type,
dumper_func=None,
compress=True,
overwrite=False,
verbose=False,
**kwargs):
"""Dump object to file.
:param abspath: The file path you want dump to.
:type abspath: str
:param serializer_type: 'binary' or 'str'.
:type serializer_type: str
:param dumper_func: A dumper function that takes an object as input, return
binary or string.
:type dumper_func: callable function
:param compress: default ``False``. If True, then compress binary.
:type compress: bool
:param overwrite: default ``False``, If ``True``, when you dump to
existing file, it silently overwrite it. If ``False``, an alert
message is shown. Default setting ``False`` is to prevent overwrite
file by mistake.
:type overwrite: boolean
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
_check_serializer_type(serializer_type)
if not inspect.isfunction(dumper_func):
raise TypeError("dumper_func has to be a function take object as input "
"and return binary!")
prt_console("\nDump to '%s' ..." % abspath, verbose)
if os.path.exists(abspath):
if not overwrite:
prt_console(
" Stop! File exists and overwrite is not allowed",
verbose,
)
return
st = time.clock()
b_or_str = dumper_func(obj, **kwargs)
if serializer_type is "str":
b = b_or_str.encode("utf-8")
else:
b = b_or_str
if compress:
b = zlib.compress(b)
with atomic_write(abspath, overwrite=overwrite, mode="wb") as f:
f.write(b)
elapsed = time.clock() - st
prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose)
if serializer_type is "str":
return b_or_str
else:
return b
def _load(abspath, serializer_type,
loader_func=None,
decompress=True,
verbose=False,
**kwargs):
"""load object from file.
:param abspath: The file path you want load from.
:type abspath: str
:param serializer_type: 'binary' or 'str'.
:type serializer_type: str
:param loader_func: A loader function that takes binary as input, return
an object.
:type loader_func: callable function
:param decompress: default ``False``. If True, then decompress binary.
:type decompress: bool
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
_check_serializer_type(serializer_type)
if not inspect.isfunction(loader_func):
raise TypeError("loader_func has to be a function take binary as input "
"and return an object!")
prt_console("\nLoad from '%s' ..." % abspath, verbose)
if not os.path.exists(abspath):
raise ValueError("'%s' doesn't exist." % abspath)
st = time.clock()
with open(abspath, "rb") as f:
b = f.read()
if decompress:
b = zlib.decompress(b)
if serializer_type is "str":
obj = loader_func(b.decode("utf-8"), **kwargs)
else:
obj = loader_func(b, **kwargs)
elapsed = time.clock() - st
prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose)
return obj
def dump_func(serializer_type):
"""A decorator for ``_dump(dumper_func=dumper_func, **kwargs)``
"""
def outer_wrapper(dumper_func):
def wrapper(*args, **kwargs):
return _dump(
*args,
dumper_func=dumper_func, serializer_type=serializer_type,
**kwargs
)
return wrapper
return outer_wrapper
def load_func(serializer_type):
"""A decorator for ``_load(loader_func=loader_func, **kwargs)``
"""
def outer_wrapper(loader_func):
def wrapper(*args, **kwargs):
return _load(
*args,
loader_func=loader_func, serializer_type=serializer_type,
**kwargs
)
return wrapper
return outer_wrapper
|
MacHu-GWU/single_file_module-project
|
sfm/obj_file_io.py
|
Python
|
mit
| 6,059
|
# encoding: utf-8
'''Template filters
'''
def j(s):
"""Escape for JavaScript or encode as JSON"""
pass
try:
from cjson import encode as _json
except ImportError:
try:
from minjson import write as _json
except ImportError:
import re
_RE = re.compile(r'(["\'\\])')
def _json(s):
return repr(_RE.sub(r'\\\1', s)).replace('\\\\','\\')
j = _json
|
rsms/smisk
|
lib/smisk/mvc/template/filters.py
|
Python
|
mit
| 375
|
#!/usr/bin/python3
#JMP:xkozub03
import sys
import re
import Config
from Config import exit_err
macro_list = {};
def init_list(redef_opt):
def_macro = Macro("@def");
set_macro = Macro("@set");
let_macro = Macro("@let");
null_macro = Macro("@null");
_def_macro = Macro("@__def__");
_set_macro = Macro("@__set__");
_let_macro = Macro("@__let__");
def_macro.is_def = True;
set_macro.is_set = True;
let_macro.is_let = True;
null_macro.is_null = True;
_def_macro.is_def = True;
_set_macro.is_set = True;
_let_macro.is_let = True;
def_macro.set_redef(redef_opt);
_def_macro.set_redef(redef_opt);
def_macro.add_arg("$frst");
def_macro.add_arg("$scnd");
def_macro.add_arg("$thrd");
set_macro.add_arg("$frst");
let_macro.add_arg("$frst");
let_macro.add_arg("$scnd");
_def_macro.add_arg("$frst");
_def_macro.add_arg("$scnd");
_def_macro.add_arg("$thrd");
_set_macro.add_arg("$frst");
_let_macro.add_arg("$frst");
_let_macro.add_arg("$scnd");
macro_list["@def"] = def_macro;
macro_list["@set"] = set_macro;
macro_list["@let"] = let_macro;
macro_list["@null"] = null_macro;
macro_list["@__def__"] = _def_macro;
macro_list["@__set__"] = _set_macro;
macro_list["@__let__"] = _let_macro;
class Macro:
name = "";
body = "";
args = {};
args_ord_name = [];
args_cnt = 0;
args_order = 0;
is_def = False;
is_set = False;
is_let = False;
is_null = False;
redef = True;
def __init__(self, name):
self.name = name;
self.body = "";
self.args = {};
self.args_ord_name = [];
self.args_cnt = 0;
self.args_order = 0;
self.is_def = False;
self.is_set = False;
self.is_let = False;
self.is_null = False;
self.redef = True;
return;
def set_redef(self, redef):
self.redef = redef;
def get_name(self):
return self.name;
def get_num_of_args(self):
return self.args_cnt;
def add_arg(self, name):
if name in self.args.keys():
#redefinice argumentu;
exit_err("Semantic error (argument redefinition - '" + name + "')", 56);
self.args[name] = '';
self.args_ord_name.append(name);
self.args_cnt += 1;
def set_next_arg(self, value):
if self.args_order == self.args_cnt:
#prilis mnoho parametru
sys.stderr.write("Syntax error\n");
sys.exit(56);
if self.is_def and self.args_order == 0 and value[0] != '@':
exit_err("Macro name expected ('" + value + "' given)", 57);
self.args[self.args_ord_name[self.args_order]] = value;
self.args_order += 1;
def set_body(self, body):
self.body = body;
def expand(self):
return _expand(self);
def expand_def(self):
return _expand_def(self);
def expand_set(self):
return _expand_set(self);
def expand_let(self):
return _expand_let(self);
def expand_null(self):
return _expand_null(self);
def _expand(self):
if self.args_order != self.args_cnt:
sys.stderr.write("Syntax error\n");
exit(56);
self.args_order = 0;
if self.is_def:
return self.expand_def();
if self.is_set:
return self.expand_set();
if self.is_null:
return self.expand_null();
if self.is_let:
return self.expand_let();
exp_body = self.body;
m = re.findall("((^|[^\$]*?)(\$[a-zA-Z_][a-zA-Z_0-9]*)(\s|\$|$|[^a-zA-Z_0-9]))", exp_body);
for rex in m:
if rex[2] in self.args.keys():
exp_body = exp_body.replace(rex[0], rex[1] + self.args[rex[2]] + rex[3]);
return exp_body;
def _expand_def(self):
name = self.args[self.args_ord_name[0]];
arg_list = self.args[self.args_ord_name[1]];
def_body = self.args[self.args_ord_name[2]];
new_macro = Macro(name);
if name == "@__def__" or name == "@__let__" or name == "@__set__":
exit_err("Redef __macro__ error", 57);
if name == "@null":
return "";
if self.redef and name in macro_list:
exit_err("Redef -r macro error", 57);
m = re.findall("\$[a-zA-Z_][a-zA-Z_0-9]*", arg_list);
for rex in m:
new_macro.add_arg(rex);
new_macro.set_body(def_body);
macro_list[name] = new_macro;
return "";
def _expand_set(self):
self.body = "";
set = self.args[self.args_ord_name[0]];
global ignore_white;
if set == "-INPUT_SPACES":
Config.ignore_white = True;
elif set == "+INPUT_SPACES":
Config.ignore_white = False;
else:
sys.stderr.write("Set error!\n");
sys.exit(56);
return self.body;
def _expand_let(self):
self.body = "";
first = self.args[self.args_ord_name[0]];
second = self.args[self.args_ord_name[1]];
if first[0] != '@' or second[0] != '@':
exit_err("let macro requires macro names as both arguments", 57);
if first == "@null":
return self.body;
if first == "@__def__" or first == "@__let__" or first == "@__set__":
exit_err("Redef __macro__ error", 57);
if second == "@null":
if first in macro_list:
del macro_list[first];
return self.body;
macro_list[first] = macro_list[second];
return self.body;
def _expand_null(self):
return "";
|
mikeek/FIT
|
IPP/proj_2/Macro.py
|
Python
|
mit
| 4,768
|
from staffjoy.resource import Resource
from staffjoy.resources.worker import Worker
from staffjoy.resources.schedule import Schedule
from staffjoy.resources.shift import Shift
from staffjoy.resources.shift_query import ShiftQuery
from staffjoy.resources.recurring_shift import RecurringShift
class Role(Resource):
PATH = "organizations/{organization_id}/locations/{location_id}/roles/{role_id}"
ID_NAME = "role_id"
def get_workers(self, **kwargs):
return Worker.get_all(parent=self, **kwargs)
def get_worker(self, id=id):
return Worker.get(parent=self, id=id)
def create_worker(self, **kwargs):
return Worker.create(parent=self, **kwargs)
def get_schedules(self, **kwargs):
return Schedule.get_all(parent=self, **kwargs)
def get_schedule(self, id):
return Schedule.get(parent=self, id=id)
def get_shifts(self, **kwargs):
return Shift.get_all(parent=self, **kwargs)
def get_shift(self, id):
return Shift.get(parent=self, id=id)
def create_shift(self, **kwargs):
return Shift.create(parent=self, **kwargs)
def get_shift_query(self, **kwargs):
return ShiftQuery.get_all(parent=self, **kwargs)
def get_recurring_shifts(self, **kwargs):
return RecurringShift.get_all(parent=self, **kwargs)
def get_recurring_shift(self, id):
return RecurringShift.get(parent=self, id=id)
def create_recurring_shift(self, **kwargs):
return RecurringShift.create(parent=self, **kwargs)
|
Staffjoy/client_python
|
staffjoy/resources/role.py
|
Python
|
mit
| 1,528
|
"""
WSGI config for eyrie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eyrie.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
ZeroCater/Eyrie
|
eyrie/wsgi.py
|
Python
|
mit
| 478
|
import os
import dec
print 'Building HOG feature extractor...'
os.system('python setup_features.py build')
os.system('python setup_features.py install')
print 'Preparing stl data. This could take a while...'
dec.make_stl_data()
|
piiswrong/dec
|
dec/make_stl_data.py
|
Python
|
mit
| 229
|
import pygame
import rospy
import time
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
#pygame setup
pygame.init()
pygame.display.set_mode([100,100])
delay = 100
interval = 50
pygame.key.set_repeat(delay, interval)
#really this should be passed in or something but for now if you want to change the name just do it here
robot_namespace = "qubo/"
effort = 50
num_thrusters = 8
rospy.init_node('keyboard_node', anonymous=False)
#rospy spins all these up in their own thread, no need to call spin()
roll_pub = rospy.Publisher(robot_namespace + "roll_cmd" , Float64, queue_size = 10 )
pitch_pub = rospy.Publisher(robot_namespace + "pitch_cmd" , Float64, queue_size = 10 )
yaw_pub = rospy.Publisher(robot_namespace + "yaw_cmd" , Float64, queue_size = 10 )
depth_pub = rospy.Publisher(robot_namespace + "depth_cmd" , Float64, queue_size = 10 )
surge_pub = rospy.Publisher(robot_namespace + "surge_cmd" , Float64, queue_size = 10 )
sway_pub = rospy.Publisher(robot_namespace + "sway_cmd" , Float64, queue_size = 10 )
thruster_pub = rospy.Publisher(robot_namespace + "thruster_cmds" , Float64MultiArray, queue_size = 10)
thruster_msg = Float64MultiArray()
pygame.key.set_repeat(10,10)
while(True):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
print event.key
keys_pressed = pygame.key.get_pressed()
sway = surge = yaw = depth = 0
thruster_msg.data = [0]*num_thrusters
if keys_pressed[pygame.K_a]:
sway_pub.publish(effort)
elif keys_pressed[pygame.K_d]:
sway_pub.publish(-effort)
if keys_pressed[pygame.K_w]:
surge_pub.publish(effort)
print "asdasd"
elif keys_pressed[pygame.K_s]:
surge_pub.publish(-effort)
if keys_pressed[pygame.K_q]:
yaw_pub.publish(effort)
elif keys_pressed[pygame.K_e]:
yaw_pub.publish(-effort)
if keys_pressed[pygame.K_r]:
depth_pub.publish(effort)
elif keys_pressed[pygame.K_f]:
depth_pub.publish(-effort)
if keys_pressed[pygame.K_MINUS]:
sign = -1
else:
sign = 1
#this only works because pygame.k_X is a number and k_0 - k_8 are contiguous
for i in range(0, 8):
if keys_pressed[pygame.K_0 + i]:
thruster_msg.data[i] = (effort*sign)
print thruster_msg.data
thruster_pub.publish(thruster_msg)
time.sleep(.05)
|
robotics-at-maryland/qubo
|
src/teleop/src/keyboard_controller.py
|
Python
|
mit
| 2,473
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Cd',
new_name='Release',
),
]
|
ThreeDRadio/playlists
|
backend/catalogue/migrations/0002_auto_20160628_1024.py
|
Python
|
mit
| 342
|
# !/usr/bin/env python3
# _*_ coding:utf8 _*_
# Power by zuosc 2016-10-23
'subclass demo 继承和多态'
class Animal(object):
def run(self):
print('Animal is running......')
class Dog(Animal):
def run(self):
print('Dog is running.....')
def eat(self):
print('dog is eating......')
class Cat(Animal):
pass
dog = Dog()
dog.run()
|
zuosc/PythonCode
|
OOP/subclass.py
|
Python
|
mit
| 378
|
#!/usr/bin/env python3
"""Project Euler - Problem 27 Module"""
def problem27(ab_limit):
"""Problem 27 - Quadratic primes"""
# upper limit
nr_primes = 2 * ab_limit * ab_limit + ab_limit
primes = [1] * (nr_primes - 2)
result = 0
for x in range(2, nr_primes):
if primes[x - 2] == 1:
# x is Prime, eliminate x*y for y > 1
y = (x - 2) + x
while y < nr_primes - 2:
primes[y] = 0
y += x
# Largest seq
l_seq = 0
for a in range(-ab_limit + 1, ab_limit):
for b in range(2, ab_limit):
if primes[b - 2] == 0:
continue # no prime
# check formula
seq = 1
x = 2
while True:
v = (x**2) + (a * x) + b
if v > 1 and primes[v - 2] == 1:
seq += 1
else:
break
x += 1
if seq > l_seq:
l_seq = seq
result = a * b
return result
def run():
"""Default Run Method"""
return problem27(1000)
if __name__ == '__main__':
print("Result: ", run())
|
rado0x54/project-euler
|
python/problem0027.py
|
Python
|
mit
| 1,182
|
from myhdl import *
from UK101AddressDecode import UK101AddressDecode
def bench():
AL = Signal(intbv(0)[16:])
MonitorRom = Signal(bool(0))
ACIA = Signal(bool(0))
KeyBoardPort = Signal(bool(0))
VideoMem = Signal(bool(0))
BasicRom = Signal(bool(0))
Ram = Signal(bool(0))
dut = UK101AddressDecode(
AL,
MonitorRom,
ACIA,
KeyBoardPort,
VideoMem,
BasicRom,
Ram)
@instance
def stimulus():
for i in range(0, 2**16):
AL.next = i
yield delay(10)
raise StopSimulation()
return dut, stimulus
sim = Simulation(traceSignals(bench))
sim.run()
|
jandecaluwe/myhdl-examples
|
crusty_UK101/UK101AddressDecode/bench.py
|
Python
|
mit
| 691
|
import copy
import json
from psycopg2.extensions import AsIs
from psycopg2.extras import RealDictCursor
from pg4nosql import DEFAULT_JSON_COLUMN_NAME, DEFAULT_ROW_IDENTIFIER
from pg4nosql.PostgresNoSQLQueryStructure import PostgresNoSQLQueryStructure
from pg4nosql.PostgresNoSQLResultItem import PostgresNoSQLResultItem
from pg4nosql.PostgresNoSQLUtil import to_nullable_string
class PostgresNoSQLTable(PostgresNoSQLQueryStructure):
__SQL_INSERT_JSON = "INSERT INTO %s(" + DEFAULT_JSON_COLUMN_NAME + " %s) VALUES(%s %s) RETURNING " + DEFAULT_ROW_IDENTIFIER
__SQL_GET_JSON = 'SELECT * FROM %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s'
__SQL_GET_COLUMNS = 'select column_name from information_schema.columns where table_name = %s'
__SQL_DELETE_JSON = 'DELETE FROM %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s'
__SQL_UPDATE_JSON = 'UPDATE %s SET ' + DEFAULT_JSON_COLUMN_NAME + '=%s %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s;'
__SQL_INSERT = "INSERT INTO %s(%s) VALUES(%s) RETURNING " + DEFAULT_ROW_IDENTIFIER
__SQL_UPDATE = 'UPDATE %s SET %s WHERE ' + DEFAULT_ROW_IDENTIFIER + '=%s;'
__SQL_QUERY_WITH_JOIN = 'SELECT %s FROM %s AS a JOIN %s AS b ON %s WHERE %s'
def __init__(self, name, connection):
super(PostgresNoSQLTable, self).__init__(name, connection)
self.super = super(PostgresNoSQLTable, self)
def commit(self):
"""
Use commit only if auto_commit in put or save are disabled!
:return: None
"""
self.connection.commit()
def insert(self, auto_commit=True, **data):
relational_data = data
relational_data_columns = ''
relational_data_values = ''
if relational_data:
relational_data_columns = ",".join(relational_data.keys())
data_list = map(str, map(to_nullable_string, relational_data.values()))
relational_data_values = ",".join(data_list)
self.cursor.execute(self.__SQL_INSERT, (AsIs(self.name),
AsIs(relational_data_columns),
AsIs(relational_data_values)))
if auto_commit:
self.commit()
return self.cursor.fetchone()[DEFAULT_ROW_IDENTIFIER]
def update(self, object_id, auto_commit=True, **relational_data):
relational_data_sql = ','.join(
"%s=%s" % (key, str(to_nullable_string(val))) for (key, val) in relational_data.items())
self.cursor.execute(self.__SQL_UPDATE, (AsIs(self.name),
AsIs(relational_data_sql), object_id))
if auto_commit:
self.commit()
def put(self, json_data, auto_commit=True, **relational_data):
relational_data.update({DEFAULT_JSON_COLUMN_NAME: json_data})
return self.insert(auto_commit=auto_commit, **relational_data)
def save(self, record, auto_commit=True):
data = copy.deepcopy(record.get_record())
object_id = data.pop(DEFAULT_ROW_IDENTIFIER)
self.update(object_id, auto_commit=auto_commit, **data)
def get(self, object_id):
self.cursor.execute(self.__SQL_GET_JSON, (AsIs(self.name), object_id))
record = self.cursor.fetchone()
if record is None:
return record
return PostgresNoSQLResultItem(record, self)
def query_join(self, table_name, on_statement, query='True', columns='*'):
self.cursor.execute(self.__SQL_QUERY_WITH_JOIN, (AsIs(columns),
AsIs(self.name),
AsIs(table_name),
AsIs(on_statement),
AsIs(query)))
rows = [item for item in self.cursor.fetchall()]
items = map(lambda r: PostgresNoSQLResultItem(r, self), rows)
return items
def query_one(self, query='True', columns='*'):
result = self.query(query, columns)
if not result:
return None
return result[0]
def get_columns(self):
self.cursor.execute(self.__SQL_GET_COLUMNS, (self.name,))
columns = map(lambda m: m['column_name'], self.cursor.fetchall())
return columns
def delete(self, object_id, auto_commit=True):
self.cursor.execute(self.__SQL_DELETE_JSON, (AsIs(self.name), object_id))
if auto_commit:
self.commit()
def execute(self, sql_query):
self.cursor.execute(sql_query)
return self.cursor.fetchall()
|
cansik/pg4nosql
|
pg4nosql/PostgresNoSQLTable.py
|
Python
|
mit
| 4,608
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
from flexget.manager import Session
try:
from flexget.plugins.api_trakt import ApiTrakt
lookup_series = ApiTrakt.lookup_series
lookup_episode = ApiTrakt.lookup_episode
except ImportError:
raise plugin.DependencyError(issued_by='trakt_lookup', missing='api_trakt',
message='trakt_lookup requires the `api_trakt` plugin')
log = logging.getLogger('trakt_lookup')
class PluginTraktLookup(object):
"""Retrieves trakt information for entries. Uses series_name,
series_season, series_episode from series plugin.
Example:
trakt_lookup: yes
Primarily used for passing trakt information to other plugins.
Among these is the IMDB url for the series.
This information is provided (via entry):
series info:
trakt_series_name
trakt_series_runtime
trakt_series_first_aired_epoch
trakt_series_first_aired_iso
trakt_series_air_time
trakt_series_content_rating
trakt_series_genres
trakt_sereis_banner_url
trakt_sereis_fanart_url
trakt_series_imdb_url
trakt_series_trakt_url
trakt_series_imdb_id
trakt_series_tvdb_id
trakt_series_actors
trakt_series_country
trakt_series_year
trakt_series_tvrage_id
trakt_series_status
trakt_series_overview
trakt_ep_name
trakt_ep_season
trakt_ep_number
trakt_ep_overview
trakt_ep_first_aired_epoch
trakt_ep_first_aired_iso
trakt_ep_image_url
trakt_ep_id
trakt_ep_tvdb_id
"""
# Series info
series_map = {
'trakt_series_name': 'title',
'trakt_series_runtime': 'runtime',
'trakt_series_first_aired_epoch': 'first_aired',
'trakt_series_first_aired_iso': 'first_aired_iso',
'trakt_series_air_time': 'air_time',
'trakt_series_content_rating': 'certification',
'trakt_series_genres': lambda series: [genre.name for genre in series.genre],
'trakt_series_network': 'network',
'trakt_series_banner_url': 'banner',
'trakt_series_fanart_url': 'fanart',
'trakt_series_poster_url': 'poster',
'imdb_url': lambda series: series.imdb_id and 'http://www.imdb.com/title/%s' % series.imdb_id,
'trakt_series_url': 'url',
'trakt_series_imdb_id': 'imdb_id',
'trakt_series_tvdb_id': 'tvdb_id',
'trakt_series_actors': lambda series: [actors.name for actors in series.actors],
'trakt_series_country': 'country',
'trakt_series_year': 'year',
'trakt_series_tvrage_id': 'tvrage_id',
'trakt_series_status': 'status',
'trakt_series_overview': 'overview'}
# Episode info
episode_map = {
'trakt_ep_name': 'episode_name',
'trakt_ep_first_aired_epoch': 'first_aired',
'trakt_ep_first_aired_iso': 'first_aired_iso',
'trakt_ep_image_url': 'screen',
'trakt_ep_overview': 'overview',
'trakt_season': 'season',
'trakt_episode': 'number',
'trakt_ep_id': lambda ep: 'S%02dE%02d' % (ep.season, ep.number),
'trakt_ep_tvdb_id': 'tvdb_id'}
schema = {'type': 'boolean'}
def lazy_series_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session(expire_on_commit=False) as session:
try:
series = lookup_series(entry.get('series_name', eval_lazy=False),
tvdb_id=entry.get('tvdb_id', eval_lazy=False), session=session)
except LookupError as e:
log.debug(e.message)
else:
entry.update_using_map(self.series_map, series)
def lazy_episode_lookup(self, entry):
with Session(expire_on_commit=False) as session:
lookupargs = {'title': entry.get('series_name', eval_lazy=False),
'tvdb_id': entry.get('tvdb_id', eval_lazy=False),
'seasonnum': entry['series_season'],
'episodenum': entry['series_episode'],
'session': session}
try:
episode = lookup_episode(**lookupargs)
except LookupError as e:
log.debug('Error looking up trakt episode information for %s: %s' % (entry['title'], e.args[0]))
else:
entry.update_using_map(self.episode_map, episode)
# Run after series and metainfo series
@plugin.priority(110)
def on_task_metainfo(self, task, config):
if not config:
return
for entry in task.entries:
if entry.get('series_name') or entry.get('tvdb_id', eval_lazy=False):
entry.register_lazy_func(self.lazy_series_lookup, self.series_map)
if 'series_season' in entry and 'series_episode' in entry:
entry.register_lazy_func(self.lazy_episode_lookup, self.episode_map)
@event('plugin.register')
def register_plugin():
plugin.register(PluginTraktLookup, 'trakt_lookup', api_ver=2)
|
patsissons/Flexget
|
flexget/plugins/metainfo/trakt_lookup.py
|
Python
|
mit
| 5,155
|
import re
def snake_to_camel(snake, upper_first=False):
# title-cased words
words = [word.title() for word in snake.split('_')]
if words and not upper_first:
words[0] = words[0].lower()
return ''.join(words)
def camel_to_snake(camel):
# first upper-cased camel
camel = camel[0].upper() + camel[1:]
return '_'.join(re.findall(r'[A-Z][^A-Z]*', camel)).lower()
|
jfcherng/sublime-TypeShort
|
functions.py
|
Python
|
mit
| 401
|
class Optimizer:
def __init__(self, model, params=None):
self.model = model
if params:
self.model.set_params(**params)
self.params = self.model.get_params()
self.__chain = list()
def step(self, name, values, skipped=False):
if not skipped:
self.__chain.append({
'pname': name,
'pvalues': values
})
return self
def solve(self, evaluator):
score = -1
for param in self.__chain:
self.model.set_params(**self.params) # set previous best param
results = [(evaluator(self.model.set_params(**{param['pname']: value})), value)
for value in param['pvalues']]
results = sorted(results, lambda a, b: -1 if a[0] < b[0] else 1)
print param['pname']
for result in results:
print result[1], ' : ', result[0]
# update best params
self.params[param['pname']] = results[0][1]
score = results[0][0]
return score
|
danielwpz/soybean
|
src/util/optimizer.py
|
Python
|
mit
| 1,090
|
#!/usr/bin/env python
import sys
import os
import re
try:
path = sys.argv[1]
length = int(sys.argv[2])
except:
print >>sys.stderr, "Usage: $0 <path> <length>"
sys.exit(1)
path = re.sub(os.getenv('HOME'), '~', path)
while len(path) > length:
dirs = path.split("/");
# Find the longest directory in the path.
max_index = -1
max_length = 3
for i in range(len(dirs) - 1):
if len(dirs[i]) > max_length:
max_index = i
max_length = len(dirs[i])
# Shorten it by one character.
if max_index >= 0:
dirs[max_index] = dirs[max_index][:max_length-3] + ".."
path = "/".join(dirs)
# Didn't find anything to shorten. This is as good as it gets.
else:
break
print(path)
|
werebus/dotfiles
|
bin/shorten_path.py
|
Python
|
mit
| 772
|
from rapt.treebrd.attributes import AttributeList
from ...treebrd.node import Operator
from ..base_translator import BaseTranslator
class SQLQuery:
"""
Structure defining the building blocks of a SQL query.
"""
def __init__(self, select_block, from_block, where_block=''):
self.prefix = ''
self.select_block = select_block
self.from_block = from_block
self.where_block = where_block
@property
def _basic_query(self):
if self.select_block:
return '{prefix}' \
'SELECT {select} FROM {relation}'
else:
return '{prefix}{relation}'
@property
def _sql_query_skeleton(self):
sql = self._basic_query
if self.where_block:
sql += ' WHERE {conditions}'
return sql
def to_sql(self):
"""
Construct a SQL query based on the stored blocks.
:return: a SQL query
"""
return self._sql_query_skeleton.format(
prefix=self.prefix, select=self.select_block,
relation=self.from_block, conditions=self.where_block)
class SQLSetQuery(SQLQuery):
"""
Structure defining the building blocks of a SQL query with set semantics.
"""
@property
def _basic_query(self):
return '{prefix}' \
'SELECT DISTINCT {select} FROM {relation}'
class Translator(BaseTranslator):
"""
A Translator defining the operations for translating a relational algebra
statement into a SQL statement using bag semantics.
"""
query = SQLQuery
@classmethod
def _get_temp_name(cls, node):
return node.name or '_{}'.format(id(node))
@classmethod
def _get_sql_operator(cls, node):
operators = {
Operator.union: 'UNION',
Operator.difference: 'EXCEPT',
Operator.intersect: 'INTERSECT',
Operator.cross_join: 'CROSS JOIN',
Operator.theta_join: 'JOIN',
Operator.natural_join: 'NATURAL JOIN',
}
return operators[node.operator]
def relation(self, node):
"""
Translate a relation node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self.query(select_block=str(node.attributes),
from_block=node.name)
def select(self, node):
"""
Translate a select node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
child_object = self.translate(node.child)
where_block = node.conditions
if child_object.where_block:
where_block = '({0}) AND ({1})'\
.format(child_object.where_block, node.conditions)
child_object.where_block = where_block
if not child_object.select_block:
child_object.select_block = str(node.attributes)
return child_object
def project(self, node):
"""
Translate a project node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
child_object = self.translate(node.child)
child_object.select_block = str(node.attributes)
return child_object
def rename(self, node):
"""
Translate a rename node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
child_object = self.translate(node.child)
from_block = '({child}) AS {name}({attributes})'.format(
child=child_object.to_sql(), name=node.name,
attributes=', '.join(node.attributes.names))
return self.query(str(node.attributes), from_block=from_block)
def assign(self, node):
"""
Translate an assign node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
child_object = self.translate(node.child)
child_object.prefix = 'CREATE TEMPORARY TABLE {name}({attributes}) AS '\
.format(name=node.name, attributes=', '.join(node.attributes.names))
return child_object
def natural_join(self, node):
"""
Translate an assign node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self._join(node)
def theta_join(self, node):
"""
Translate an assign node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self._join(node)
def cross_join(self, node):
"""
Translate a cross join node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self._join(node)
def union(self, node):
"""
Translate a union node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self._set_op(node)
def intersect(self, node):
"""
Translate an intersection node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self._set_op(node)
def difference(self, node):
"""
Translate an difference node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
return self._set_op(node)
def _join_helper(self, node):
sobject = self.translate(node)
if node.operator in {
Operator.cross_join, Operator.natural_join, Operator.theta_join
}:
return sobject.from_block
else:
return '({subquery}) AS {name}'.format(
subquery=sobject.to_sql(), name=self._get_temp_name(node))
def _join(self, node):
"""
Translate a join node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
select_block = str(node.attributes)
from_block = '{left} {operator} {right}'.format(
left=self._join_helper(node.left),
right=self._join_helper(node.right),
operator=self._get_sql_operator(node))
if node.operator == Operator.theta_join:
from_block = '{from_block} ON {conditions}'.format(
from_block=from_block,
conditions=node.conditions)
return self.query(select_block, from_block, '')
def _set_op(self, node):
"""
Translate a set operator node into SQLQuery.
:param node: a treebrd node
:return: a SQLQuery object for the tree rooted at node
"""
select_block = str(node.attributes)
from_block = '({left} {operator} ALL {right}) AS {name}'.format(
left=self.translate(node.left).to_sql(),
right=self.translate(node.right).to_sql(),
operator=self._get_sql_operator(node), name=self._get_temp_name(node))
return self.query(select_block=select_block, from_block=from_block)
class SetTranslator(Translator):
"""
A Translator defining the operations for translating a relational algebra
statement into a SQL statement using set semantics.
"""
query = SQLSetQuery
def _set_op(self, node):
"""
Translate a set operator node into SQLQuery, using set semantics.
:param node: a treebrd node
:return: a SQLSetQuery object for the tree rooted at node
"""
select_block = str(node.attributes)
from_block = '({left} {operator} {right}) AS {name}'.format(
left=self.translate(node.left).to_sql(),
right=self.translate(node.right).to_sql(),
operator=self._get_sql_operator(node), name=self._get_temp_name(node))
return self.query(select_block=select_block, from_block=from_block)
def translate(root_list, use_bag_semantics=False):
"""
Translate a list of relational algebra trees into SQL statements.
:param root_list: a list of tree roots
:param use_bag_semantics: flag for using relational algebra bag semantics
:return: a list of SQL statements
"""
translator = (Translator() if use_bag_semantics else SetTranslator())
return [translator.translate(root).to_sql() for root in root_list]
|
pyrapt/rapt
|
rapt/transformers/sql/sql_translator.py
|
Python
|
mit
| 8,668
|
from .fft_tools import zoom
import numpy as np
import matplotlib.pyplot as pl
def iterative_zoom(image, mindiff=1., zoomshape=[10,10],
return_zoomed=False, zoomstep=2, verbose=False,
minmax=np.min, ploteach=False, return_center=True):
"""
Iteratively zoom in on the *minimum* position in an image until the
delta-peak value is below `mindiff`
Parameters
----------
image : np.ndarray
Two-dimensional image with a *minimum* to zoom in on (or maximum, if
specified using `minmax`)
mindiff : float
Minimum difference that must be present in image before zooming is done
zoomshape : [int,int]
Shape of the "mini" image to create. Smaller is faster, but a bit less
accurate. [10,10] seems to work well in preliminary tests (though unit
tests have not been written)
return_zoomed : bool
Return the zoomed image in addition to the measured offset?
zoomstep : int
Amount to increase the zoom factor by on each iteration. Probably best to
stick with small integers (2-5ish).
verbose : bool
Print out information about zoom factor, offset at each iteration
minmax : np.min or np.max
Can zoom in on the minimum or maximum of the image
ploteach : bool
Primarily a debug tool, and to be used with extreme caution! Will open
a new figure at each iteration showing the next zoom level.
return_center : bool
Return the center position in original image coordinates? If False,
will retern the *offset from center* instead (but beware the
conventions associated with the concept of 'center' for even images).
Returns
-------
The y,x offsets (following numpy convention) of the center position of the
original image. If `return_zoomed`, returns (zoomed_image, zoom_factor,
offsets) because you can't interpret the zoomed image without the zoom
factor.
"""
image_zoom = image
argminmax = np.argmin if "min" in minmax.__name__ else np.argmax
zf = 1. # "zoom factor" initialized to 1 for the base shift measurement
offset = np.array([0]*image.ndim,dtype='float') # center offset
delta_image = (image_zoom - minmax(image_zoom))
xaxzoom = np.indices(image.shape)
if ploteach:
ii = 1
pl.figure(ii)
pl.clf()
pl.pcolor(np.arange(image.shape[0]+1)-0.5,np.arange(image.shape[1]+1)-0.5, image)
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(minpos[1],minpos[0],'wx')
# check to make sure the smallest *nonzero* difference > mindiff
while np.abs(delta_image[np.abs(delta_image)>0]).min() > mindiff:
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
center = xaxzoom[0][minpos],xaxzoom[1][minpos]
offset = xaxzoom[0][minpos]-(image.shape[0]-1)/2,xaxzoom[1][minpos]-(image.shape[1]-1)/2
zf *= zoomstep
xaxzoom, image_zoom = zoom.zoom_on_pixel(image, center, usfac=zf,
outshape=zoomshape, return_xouts=True)
delta_image = image_zoom-minmax(image_zoom)
# base case: in case you can't do any better...
# (at this point, you're all the way zoomed)
if np.all(delta_image == 0):
if verbose:
print("Can't zoom any further. zf=%i" % zf)
break
if verbose:
print(("Zoom factor %6i, center = %30s, offset=%30s, minpos=%30s, min|diff|=%15g" %
(zf, ",".join(["%15g" % c for c in center]),
",".join(["%15g" % c for c in offset]),
",".join(["%5i" % c for c in minpos]),
np.abs(delta_image[np.abs(delta_image)>0]).min()
)))
if ploteach:
ii += 1
pl.figure(ii)
pl.clf()
pl.pcolor(centers_to_edges(xaxzoom[1][0,:]),centers_to_edges(xaxzoom[0][:,0]),image_zoom)
pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.plot(center[1],center[0],'wx')
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(xaxzoom[1][minpos],
xaxzoom[0][minpos],
'w+')
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
pl.figure(1)
#pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
if return_center:
result = center
else:
result = offset
if return_zoomed:
return image_zoom,zf,result
else:
return result
def centers_to_edges(arr):
dx = arr[1]-arr[0]
newarr = np.linspace(arr.min()-dx/2,arr.max()+dx/2,arr.size+1)
return newarr
def iterative_zoom_1d(data, mindiff=1., zoomshape=(10,),
return_zoomed=False, zoomstep=2, verbose=False,
minmax=np.min, return_center=True):
"""
Iteratively zoom in on the *minimum* position in a spectrum or timestream
until the delta-peak value is below `mindiff`
Parameters
----------
data : np.ndarray
One-dimensional array with a *minimum* (or maximum, as specified by
minmax) to zoom in on
mindiff : float
Minimum difference that must be present in image before zooming is done
zoomshape : int
Shape of the "mini" image to create. Smaller is faster, but a bit less
accurate. 10 seems to work well in preliminary tests (though unit
tests have not been written)
return_zoomed : bool
Return the zoomed image in addition to the measured offset?
zoomstep : int
Amount to increase the zoom factor by on each iteration. Probably best to
stick with small integers (2-5ish).
verbose : bool
Print out information about zoom factor, offset at each iteration
minmax : np.min or np.max
Can zoom in on the minimum or maximum of the image
return_center : bool
Return the center position in original image coordinates? If False,
will retern the *offset from center* instead (but beware the
conventions associated with the concept of 'center' for even images).
Returns
-------
The x offsets of the center position of the original spectrum. If
`return_zoomed`, returns (zoomed_image, zoom_factor, offsets) because you
can't interpret the zoomed spectrum without the zoom factor.
"""
data_zoom = data
argminmax = np.argmin if "min" in minmax.__name__ else np.argmax
zf = 1. # "zoom factor" initialized to 1 for the base shift measurement
offset = 0.
delta_data = (data_zoom - minmax(data_zoom))
xaxzoom = np.arange(data.size)
# check to make sure the smallest *nonzero* difference > mindiff
while np.abs(delta_data[np.abs(delta_data)>0]).min() > mindiff:
minpos = argminmax(data_zoom)
center = xaxzoom.squeeze()[minpos],
offset = xaxzoom.squeeze()[minpos]-(data.size-1)/2,
zf *= zoomstep
xaxzoom, data_zoom = zoom.zoom_on_pixel(data, center, usfac=zf,
outshape=zoomshape, return_xouts=True)
delta_data = data_zoom-minmax(data_zoom)
# base case: in case you can't do any better...
# (at this point, you're all the way zoomed)
if np.all(delta_data == 0):
if verbose:
print("Can't zoom any further. zf=%i" % zf)
break
if verbose:
print(("Zoom factor %6i, center = %30s, offset=%30s, minpos=%30s, mindiff=%30s" %
(zf, "%15g" % center,
"%15g" % offset,
"%15g" % minpos,
"%15g" % np.abs(delta_data[np.abs(delta_data)>0]).min(),
)))
if return_center:
result = center
else:
result = offset
if return_zoomed:
return data_zoom,zf,result
else:
return result
def centers_to_edges(arr):
dx = arr[1]-arr[0]
newarr = np.linspace(arr.min()-dx/2,arr.max()+dx/2,arr.size+1)
return newarr
|
keflavich/image_registration
|
image_registration/iterative_zoom.py
|
Python
|
mit
| 8,555
|
"""Tests for IPython.lib.display.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from tempfile import NamedTemporaryFile, mkdtemp
from os.path import split, join as pjoin, dirname
import sys
try:
import pathlib
except ImportError:
pass
from unittest import TestCase, mock
import struct
import wave
from io import BytesIO
# Third-party imports
import nose.tools as nt
import numpy
# Our own imports
from IPython.lib import display
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
#--------------------------
# FileLink tests
#--------------------------
def test_instantiation_FileLink():
"""FileLink: Test class can be instantiated"""
fl = display.FileLink('example.txt')
# TODO: remove if when only Python >= 3.6 is supported
if sys.version_info >= (3, 6):
fl = display.FileLink(pathlib.PurePath('example.txt'))
def test_warning_on_non_existant_path_FileLink():
"""FileLink: Calling _repr_html_ on non-existant files returns a warning
"""
fl = display.FileLink('example.txt')
nt.assert_true(fl._repr_html_().startswith('Path (<tt>example.txt</tt>)'))
def test_existing_path_FileLink():
"""FileLink: Calling _repr_html_ functions as expected on existing filepath
"""
tf = NamedTemporaryFile()
fl = display.FileLink(tf.name)
actual = fl._repr_html_()
expected = "<a href='%s' target='_blank'>%s</a><br>" % (tf.name,tf.name)
nt.assert_equal(actual,expected)
def test_existing_path_FileLink_repr():
"""FileLink: Calling repr() functions as expected on existing filepath
"""
tf = NamedTemporaryFile()
fl = display.FileLink(tf.name)
actual = repr(fl)
expected = tf.name
nt.assert_equal(actual,expected)
def test_error_on_directory_to_FileLink():
"""FileLink: Raises error when passed directory
"""
td = mkdtemp()
nt.assert_raises(ValueError,display.FileLink,td)
#--------------------------
# FileLinks tests
#--------------------------
def test_instantiation_FileLinks():
"""FileLinks: Test class can be instantiated
"""
fls = display.FileLinks('example')
def test_warning_on_non_existant_path_FileLinks():
"""FileLinks: Calling _repr_html_ on non-existant files returns a warning
"""
fls = display.FileLinks('example')
nt.assert_true(fls._repr_html_().startswith('Path (<tt>example</tt>)'))
def test_existing_path_FileLinks():
"""FileLinks: Calling _repr_html_ functions as expected on existing dir
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
fl = display.FileLinks(td)
actual = fl._repr_html_()
actual = actual.split('\n')
actual.sort()
# the links should always have forward slashes, even on windows, so replace
# backslashes with forward slashes here
expected = ["%s/<br>" % td,
" <a href='%s' target='_blank'>%s</a><br>" %\
(tf2.name.replace("\\","/"),split(tf2.name)[1]),
" <a href='%s' target='_blank'>%s</a><br>" %\
(tf1.name.replace("\\","/"),split(tf1.name)[1])]
expected.sort()
# We compare the sorted list of links here as that's more reliable
nt.assert_equal(actual,expected)
def test_existing_path_FileLinks_alt_formatter():
"""FileLinks: Calling _repr_html_ functions as expected w/ an alt formatter
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
def fake_formatter(dirname,fnames,included_suffixes):
return ["hello","world"]
fl = display.FileLinks(td,notebook_display_formatter=fake_formatter)
actual = fl._repr_html_()
actual = actual.split('\n')
actual.sort()
expected = ["hello","world"]
expected.sort()
# We compare the sorted list of links here as that's more reliable
nt.assert_equal(actual,expected)
def test_existing_path_FileLinks_repr():
"""FileLinks: Calling repr() functions as expected on existing directory """
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
fl = display.FileLinks(td)
actual = repr(fl)
actual = actual.split('\n')
actual.sort()
expected = ['%s/' % td, ' %s' % split(tf1.name)[1],' %s' % split(tf2.name)[1]]
expected.sort()
# We compare the sorted list of links here as that's more reliable
nt.assert_equal(actual,expected)
def test_existing_path_FileLinks_repr_alt_formatter():
"""FileLinks: Calling repr() functions as expected w/ alt formatter
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
def fake_formatter(dirname,fnames,included_suffixes):
return ["hello","world"]
fl = display.FileLinks(td,terminal_display_formatter=fake_formatter)
actual = repr(fl)
actual = actual.split('\n')
actual.sort()
expected = ["hello","world"]
expected.sort()
# We compare the sorted list of links here as that's more reliable
nt.assert_equal(actual,expected)
def test_error_on_file_to_FileLinks():
"""FileLinks: Raises error when passed file
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
nt.assert_raises(ValueError,display.FileLinks,tf1.name)
def test_recursive_FileLinks():
"""FileLinks: Does not recurse when recursive=False
"""
td = mkdtemp()
tf = NamedTemporaryFile(dir=td)
subtd = mkdtemp(dir=td)
subtf = NamedTemporaryFile(dir=subtd)
fl = display.FileLinks(td)
actual = str(fl)
actual = actual.split('\n')
nt.assert_equal(len(actual), 4, actual)
fl = display.FileLinks(td, recursive=False)
actual = str(fl)
actual = actual.split('\n')
nt.assert_equal(len(actual), 2, actual)
def test_audio_from_file():
path = pjoin(dirname(__file__), 'test.wav')
display.Audio(filename=path)
class TestAudioDataWithNumpy(TestCase):
def test_audio_from_numpy_array(self):
test_tone = get_test_tone()
audio = display.Audio(test_tone, rate=44100)
nt.assert_equal(len(read_wav(audio.data)), len(test_tone))
def test_audio_from_list(self):
test_tone = get_test_tone()
audio = display.Audio(list(test_tone), rate=44100)
nt.assert_equal(len(read_wav(audio.data)), len(test_tone))
def test_audio_from_numpy_array_without_rate_raises(self):
nt.assert_raises(ValueError, display.Audio, get_test_tone())
def test_audio_data_normalization(self):
expected_max_value = numpy.iinfo(numpy.int16).max
for scale in [1, 0.5, 2]:
audio = display.Audio(get_test_tone(scale), rate=44100)
actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
nt.assert_equal(actual_max_value, expected_max_value)
def test_audio_data_without_normalization(self):
max_int16 = numpy.iinfo(numpy.int16).max
for scale in [1, 0.5, 0.2]:
test_tone = get_test_tone(scale)
test_tone_max_abs = numpy.max(numpy.abs(test_tone))
expected_max_value = int(max_int16 * test_tone_max_abs)
audio = display.Audio(test_tone, rate=44100, normalize=False)
actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
nt.assert_equal(actual_max_value, expected_max_value)
def test_audio_data_without_normalization_raises_for_invalid_data(self):
nt.assert_raises(
ValueError,
lambda: display.Audio([1.001], rate=44100, normalize=False))
nt.assert_raises(
ValueError,
lambda: display.Audio([-1.001], rate=44100, normalize=False))
def simulate_numpy_not_installed():
return mock.patch('numpy.array', mock.MagicMock(side_effect=ImportError))
@simulate_numpy_not_installed()
class TestAudioDataWithoutNumpy(TestAudioDataWithNumpy):
# All tests from `TestAudioDataWithNumpy` are inherited.
def test_audio_raises_for_nested_list(self):
stereo_signal = [list(get_test_tone())] * 2
nt.assert_raises(
TypeError,
lambda: display.Audio(stereo_signal, rate=44100))
def get_test_tone(scale=1):
return numpy.sin(2 * numpy.pi * 440 * numpy.linspace(0, 1, 44100)) * scale
def read_wav(data):
with wave.open(BytesIO(data)) as wave_file:
wave_data = wave_file.readframes(wave_file.getnframes())
num_samples = wave_file.getnframes() * wave_file.getnchannels()
return struct.unpack('<%sh' % num_samples, wave_data)
def test_code_from_file():
c = display.Code(filename=__file__)
assert c._repr_html_().startswith('<style>')
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/IPython/lib/tests/test_display.py
|
Python
|
mit
| 9,210
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import organizations
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
author="Ben Lopatin",
author_email="ben@wellfire.co",
name='django-organizations',
version=organizations.__version__,
description='Group accounts for Django',
long_description=readme + '\n\n' + history,
url='https://github.com/bennylope/django-organizations/',
license='BSD License',
platforms=['OS Independent'],
packages=[
'organizations',
'organizations.backends',
'organizations.south_migrations',
'organizations.templatetags',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
install_requires=[
'Django>=1.4.2',
],
test_suite='tests',
include_package_data=True,
zip_safe=False,
)
|
bennylope/django-site-broadcasts
|
test.py
|
Python
|
mit
| 1,601
|
"""A set of AST classes with types and aliases filled in."""
import collections
import type_context
class Select(collections.namedtuple(
'Select', ['select_fields', 'table', 'where_expr', 'group_set',
'limit', 'type_ctx'])):
"""A compiled query.
Fields:
select_fields: A list of SelectField, one for each item being selected.
table: The table expression to select from.
where_expr: A filter to apply on the selected table expression. Note
that this filter should always be valid; if the user didn't specify
a WHERE clause, this is the literal true.
group_set: Either None, indicating that no grouping should be done, or
a GroupSet object. If there were groups explicitly specified by
GROUP BY, then the GroupSet always exists and is nonempty. If there
was no GROUP BY but the select is an aggregate select, the GroupSet
exists and is empty (since grouping by nothing puts everything into
the same group).
limit: Either a number with the number of rows to limit the results to,
or None if there is no limit.
type_ctx: A type context describing the names and types of the fields
returned from this select clause.
"""
def with_type_ctx(self, type_ctx):
return Select(self.select_fields, self.table, self.where_expr,
self.group_set, self.limit, type_ctx)
class SelectField(collections.namedtuple('SelectField', ['expr', 'alias'])):
pass
class GroupSet(collections.namedtuple(
'GroupSet', ['alias_groups', 'field_groups'])):
"""Information about the groups to use for a query.
Fields:
alias_groups: A set of string names of aliases for select fields that
we should group by. These are special because they need to be
compiled and evaluated differently from normal select fields.
field_groups: A list of ColumnRefs referencing columns in the table
expression of the SELECT statement.
"""
# This special GroupSet means "group by nothing". In other words, everything
# should end up in the same group (which happens when an aggregate function is
# used, but no GROUP BY groups are specified explicitly). It's almost enough to
# just omit all alias and field groups, but we also need to make sure that we
# include the group even if there are no rows in the table being selected.
TRIVIAL_GROUP_SET = GroupSet(set(), [])
class TableExpression(object):
"""Abstract class for all table expression ASTs."""
def __init__(self, *_):
assert hasattr(self, 'type_ctx')
class NoTable(collections.namedtuple('NoTable', []), TableExpression):
@property
def type_ctx(self):
return type_context.TypeContext.from_full_columns(
collections.OrderedDict())
class Table(collections.namedtuple('Table', ['name', 'type_ctx']),
TableExpression):
def with_type_ctx(self, type_ctx):
return Table(self.name, type_ctx)
class TableUnion(collections.namedtuple('TableUnion', ['tables', 'type_ctx']),
TableExpression):
pass
class Join(collections.namedtuple('Join', ['table1', 'table2',
'conditions', 'is_left_outer',
'type_ctx']),
TableExpression):
"""Table expression for a join operation.
Fields:
table1: A table expression on the left side of the join.
table2: A table expression on the right side of the join.
conditions: A list of JoinFields objects, each of which specifies a
field from table1 joined on a field from table2.
is_left_outer: A boolean for whether or not this is a left outer join.
type_ctx: The resulting type context.
"""
class JoinFields(collections.namedtuple('JoinFields', ['column1', 'column2'])):
"""A single pair of fields to join on.
Fields:
column1: A ColumnRef referencing table1.
column2: A ColumnRef referencing table2.
"""
class Expression(object):
"""Abstract interface for all expression ASTs."""
def __init__(self, *args):
assert hasattr(self, 'type')
class FunctionCall(collections.namedtuple(
'FunctionCall', ['func', 'args', 'type']), Expression):
"""Expression representing a call to a built-in function.
Fields:
func: A runtime.Function for the function to call.
args: A list of expressions to pass in as the function's arguments.
type: The result type of the expression.
"""
class AggregateFunctionCall(collections.namedtuple(
'AggregateFunctionCall', ['func', 'args', 'type']), Expression):
"""Expression representing a call to a built-in aggregate function.
Aggregate functions are called differently from regular functions, so we
need to have a special case for them in the AST format.
Fields:
func: A runtime.Function for the function to call.
args: A list of expressions to pass in as the function's arguments.
type: The result type of the expression.
"""
class Literal(collections.namedtuple(
'Literal', ['value', 'type']), Expression):
pass
class ColumnRef(collections.namedtuple(
'ColumnRef', ['table', 'column', 'type']), Expression):
"""References a column from the current context."""
|
burnhamup/tinyquery
|
tinyquery/typed_ast.py
|
Python
|
mit
| 5,461
|
"""
Init
"""
from __future__ import unicode_literals
import datetime
import os
import subprocess
VERSION = (2, 2, 13, 'final', 0)
def get_version(version=None):
"""
Returns a PEP 386-compliant version number from VERSION.
"""
if not version:
version = VERSION
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_git_changeset():
"""
Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir,
universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
RyanNoelk/ClanLadder
|
django_ajax/__init__.py
|
Python
|
mit
| 1,900
|
# encoding: utf-8
import os
def emulator_rom_launch_command(emulator, rom):
"""Generates a command string that will launch `rom` with `emulator` (using
the format provided by the user). The return value of this function should
be suitable to use as the `Exe` field of a Steam shortcut"""
# Normalizing the strings is just removing any leading/trailing quotes.
# The beautiful thing is that strip does nothing if it doesnt contain quotes,
# so normalizing it then adding quotes should do what I want 100% of the time
normalize = lambda s: s.strip("\"")
add_quotes = lambda s: "\"%s\"" % s
# We don't know if the user put quotes around the emulator location. If
# so, we dont want to add another pair and screw things up.
#
# The user didnt give us the ROM information, but screw it, I already
# have some code to add quotes to a string, might as well use it.
quoted_location = add_quotes(normalize(emulator.location))
quoted_rom = add_quotes(normalize(rom.path))
# The format string contains a bunch of specifies that users can use to
# substitute values in at runtime. Right now the only supported values are:
# %l - The location of the emulator (to avoid sync bugs)
# %r - The location of the ROM (so the emulator knows what to launch)
# %fn - The ROM filename without its extension (for emulators that utilize separete configuration files)
#
# More may be added in the future, but for now this is what we support
return (
emulator.format
.replace("%l", quoted_location)
.replace("%r", quoted_rom)
.replace("%fn", os.path.splitext(os.path.basename(rom.path))[0])
)
def emulator_startdir(emulator):
"""Returns the directory which stores the emulator. The return value of this
function should be suitable to use as the 'StartDir' field of a Steam
shortcut"""
return os.path.dirname(emulator.location)
|
scottrice/Ice
|
ice/emulators.py
|
Python
|
mit
| 1,887
|
#!/usr/bin/env python
#
# This example shows the different aspects of user/team management.
#
import sys
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 4:
print(('usage: %s <sysdig-token> team-name user-name' % sys.argv[0]))
print('You can find your token at https://app.sysdigcloud.com/#/settings/user')
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token, sdc_url='https://app.sysdigcloud.com')
team_name = sys.argv[2]
user_name = sys.argv[3]
print(('Trying to invite a user:', user_name))
ok, res = sdclient.create_user_invite(user_name)
if not ok:
if res == 'user ' + user_name + ' already exists':
print(('User creation failed because', user_name, 'already exists. Continuing.'))
else:
print(('User creation failed:', res, '. Exiting.'))
sys.exit(1)
else:
print('User creation succeeded')
# Possible failures on Team creation might include having reached the
# max limit on Teams for this customer account or if the Team by that
# name already exists. Since a previous successful run of this test
# would have deleted the Team by the same name, and we need to be able
# to configure Teams for this test to pass, we'll treat both types of
# error as a genuine fail of the test.
print(('Now trying to create a team with name:', team_name))
ok, res = sdclient.create_team(team_name)
if not ok:
print(('Team creation failed:', res, '. Exiting.'))
sys.exit(1)
else:
print(('Team creation succeeded.', res))
print(('Now trying to find team with name:', team_name))
ok, res = sdclient.get_team(team_name)
if not ok:
print(('Could not get team info:', res, '. Exiting.'))
sys.exit(1)
else:
print('Team fetch succeeded')
print(('Now trying to edit team:', team_name))
memberships = {
'admin@draios.com': 'ROLE_TEAM_MANAGER',
'john-doe@sysdig.com': 'ROLE_TEAM_READ'
}
ok, res = sdclient.edit_team(team_name, description='Nextgen2', memberships=memberships)
if not ok:
print(('Could not edit team:', res, '. Exiting.'))
sys.exit(1)
else:
print('Edited team to change description and add users')
print(('Now trying to edit user:', user_name))
ok, res = sdclient.edit_user(user_name, firstName='Just', lastName='Edited3', systemRole='ROLE_CUSTOMER')
if not ok:
print(('Could not edit user:', res, '. Exiting.'))
sys.exit(1)
else:
print('Edit user succeeded')
print(('Now trying to delete the team:', team_name))
ok, res = sdclient.delete_team(team_name)
if not ok:
print(('Could not delete team:', res, '. Exiting.'))
sys.exit(1)
else:
print('Delete team succeeded')
sys.exit(0)
|
draios/python-sdc-client
|
examples/user_team_mgmt.py
|
Python
|
mit
| 2,675
|
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
row = len(word1) + 1
col = len(word2) + 1
dp = [[0] * col for _ in range(row)]
for i in range(col):
dp[0][i] = i
for i in range(row):
dp[i][0] = i
for i in range(1, row):
for j in range(1, col):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = dp[i - 1][j - 1] + 1
dp[i][j] = min(dp[i][j], dp[i - 1][j] + 1, dp[i][j - 1] + 1)
return dp[row - 1][col - 1]
|
ChuanleiGuo/AlgorithmsPlayground
|
LeetCodeSolutions/python/72_Edit_Distance.py
|
Python
|
mit
| 727
|
from linkedlist import SinglyLinkedListNode
def reverseList(head):
tail=None
last=None
tempNode = head
while tempNode is not None:
currentNode, tempNode = tempNode, tempNode.next
currentNode.next = tail
tail = currentNode
return tail
def reverseListKNode(head,k):
tempHead= None
tempTail= None
while head is not None:
tempNode = head
last = None
tk=k
while tempNode is not None and tk > 0:
currentNode,nextNode = tempNode,tempNode.next
currentNode.next = last
last=currentNode
tempNode = nextNode
tk-=1
if tempHead is not None:
tempTail.next = last
head.next = nextNode
else:
tempHead = last
head.next= nextNode
tempTail = head
head=nextNode
return tempHead
def printLinkedList(head):
while head is not None:
print head.data,
head=head.next
print ''
def createList(list):
lastNode=None
head=None
for i in list:
node= SinglyLinkedListNode(i)
if lastNode == None:
lastNode = node
head = node
else:
lastNode.next = node
lastNode=node
return head
a=(i for i in xrange(1,11))
list = createList(a)
printLinkedList(list)
newList=reverseListKNode(list,2)
printLinkedList(newList)
|
pankajanand18/python-tests
|
linkedlists/reverserlist.py
|
Python
|
mit
| 1,457
|
# vim:ts=4 sw=4 expandtab softtabstop=4
import unittest
import warnings
from collections import OrderedDict
import jsonmerge
import jsonmerge.strategies
from jsonmerge.exceptions import (
HeadInstanceError,
BaseInstanceError,
SchemaError
)
from jsonmerge.jsonvalue import JSONValue
import jsonschema
try:
Draft6Validator = jsonschema.validators.Draft6Validator
except AttributeError:
Draft6Validator = None
warnings.simplefilter("always")
class TestMerge(unittest.TestCase):
def test_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b")
def test_overwrite(self):
schema = {'mergeStrategy': 'overwrite'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b")
def test_version(self):
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
def test_version_does_not_duplicate(self):
# Don't record change if it didn't change
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
def test_version_meta(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a", merge_options={
'version': {'metadata': {'uri': 'http://example.com/a'}}})
base = merger.merge(base, "b", merge_options={
'version': {'metadata': {'uri': 'http://example.com/b'}}})
self.assertEqual(base, [
{'value': "a",
'uri': 'http://example.com/a'},
{'value': "b",
'uri': 'http://example.com/b'}])
def test_version_meta_not_obj(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.merge(None, "a", merge_options={'version': {'metadata': 'foo'}})
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
base = merger.merge(None, 'a', meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_version_ignoredups_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'ignoreDups': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
def test_version_unique_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'unique': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
def test_version_ignoredups_true(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}])
def test_version_last(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 1}}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "b"}])
def test_version_base_not_a_list(self):
schema = {'mergeStrategy': 'version'}
base = "a"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_not_a_list_of_objects(self):
schema = {'mergeStrategy': 'version'}
base = ["a"]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_no_value_in_object(self):
schema = {'mergeStrategy': 'version'}
base = [{}]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema)
def test_version_base_empty_list(self):
schema = {'mergeStrategy': 'version'}
base = []
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': 'b'}])
def test_append(self):
schema = {'mergeStrategy': 'append'}
base = None
base = jsonmerge.merge(base, ["a"], schema)
base = jsonmerge.merge(base, ["b"], schema)
self.assertEqual(base, ["a", "b"])
def test_append_type_error(self):
schema = {'mergeStrategy': 'append'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_append_type_error_base(self):
schema = {'mergeStrategy': 'append'}
base = "ab"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, ["a"], schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertEqual(base, {'a': "a", 'b': "b"})
def test_merge_empty_schema(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': {'b': 'c'}}, schema)
self.assertEqual(base, {'a': {'b': 'c'}})
def test_merge_trivial(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertTrue(isinstance(base, dict))
self.assertEqual(base, {'a': "a", 'b': "b"})
def test_merge_null(self):
schema = {'mergeStrategy': 'objectMerge'}
base = {'a': 'a'}
head = {'a': None}
r = jsonmerge.merge(base, head, schema)
self.assertEqual(head, r)
def test_merge_type_error(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_type_error_base(self):
schema = {'mergeStrategy': 'objectMerge'}
base = "ab"
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, {'foo': 1}, schema)
self.assertEqual(cm.exception.value.ref, "#")
def test_merge_overwrite(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'a': "b"}, schema)
self.assertEqual(base, {'a': "b"})
def test_merge_objclass(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'OrderedDict'}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
base = merger.merge(base, {'a': "b"})
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
self.assertEqual(base, {'a': "b", 'c': "a"})
def test_merge_objclass2(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'a': {'mergeStrategy': 'objectMerge',
'mergeOptions': { 'objClass': 'OrderedDict'}}}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, {'a': {'b': 'c'}, 'd': {'e': 'f'}})
self.assertIsInstance(base, dict)
self.assertIsInstance(base['a'], OrderedDict)
self.assertIsInstance(base['d'], dict)
def test_merge_objclass_bad_cls(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'foo'}}
merger = jsonmerge.Merger(schema)
base = None
with self.assertRaises(SchemaError) as cm:
merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertEqual(cm.exception.value.ref, '#')
def test_merge_objclass_menu(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'foo'}}
class MyDict(dict):
pass
objclass_menu = {'foo': MyDict}
merger = jsonmerge.Merger(schema, objclass_menu=objclass_menu)
base = None
base = merger.merge(base, {'c': "a", 'a': "a"})
self.assertTrue(isinstance(base, MyDict))
def test_merge_objclass_def(self):
schema = {'mergeStrategy': 'objectMerge'}
merger = jsonmerge.Merger(schema, objclass_def='OrderedDict')
base = None
base = merger.merge(base, OrderedDict([('c', "a"), ('a', "a")]))
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
base = merger.merge(base, {'a': "b"})
self.assertIsInstance(base, OrderedDict)
self.assertEqual([k for k in base], ['c', 'a'])
self.assertEqual(base, {'a': "b", 'c': "a"})
def test_merge_append(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'a': {'mergeStrategy': 'append'}
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
def test_merge_append_pattern(self):
schema = {'mergeStrategy': 'objectMerge',
'patternProperties': {
'a': {'mergeStrategy': 'append'}
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
def test_merge_append_additional(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'b': {'mergeStrategy': 'overwrite'}
},
'additionalProperties': {
'mergeStrategy': 'append'
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
def test_merge_additional_bool(self):
schema = {'additionalProperties': True}
base = {}
head = {'a': 'a'}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {'a': 'a'})
def test_example(self):
head1 = {
'buyer': {
'id': {
'name': "Test old",
},
'uri': 'Test uri old',
}
}
head2 = {
'buyer': {
'id': {
'name': "Test new"
},
'uri': 'Test uri new',
},
'award': "Award"
}
base_expect = {
'buyer': {
'id': {
'name': [
{'value': "Test old"},
{'value': "Test new"},
]
},
'uri': 'Test uri new',
},
'award': "Award"
}
schema = {
'mergeStrategy': 'objectMerge',
'properties': {
'buyer': {
'properties': {
'id': {
'properties': {
'name': {
'mergeStrategy': 'version',
}
}
},
'uri': {
'mergeStrategy': 'overwrite',
}
},
},
'award': {
'mergeStrategy': 'overwrite',
}
},
}
base = None
base = jsonmerge.merge(base, head1, schema)
base = jsonmerge.merge(base, head2, schema)
self.assertEqual(base, base_expect)
def test_internal_refs(self):
schema = {
'id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "#/definitions/a"},
},
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
def test_external_refs(self):
schema_1 = {
'id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "schema_2.json#/definitions/a"},
},
}
schema_2 = {
'id': 'http://example.com/schema_2.json',
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema_1)
# merge() would otherwise make a HTTP request
merger.cache_schema(schema_2)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
@unittest.skipIf(Draft6Validator is None, 'jsonschema too old')
def test_external_refs_draft6(self):
schema_1 = {
'$id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "schema_2.json#/definitions/a"},
},
}
schema_2 = {
'$id': 'http://example.com/schema_2.json',
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema_1, validatorclass=Draft6Validator)
# merge() would otherwise make a HTTP request
merger.cache_schema(schema_2)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
def test_oneof(self):
schema = {
'oneOf': [
{
'type': 'array',
'mergeStrategy': 'append'
},
{
'type': 'object'
}
]
}
merger = jsonmerge.Merger(schema)
base = [1]
base = merger.merge(base, [2])
self.assertEqual(base, [1,2])
base = {'a': 1}
base = merger.merge(base, {'b': 2})
self.assertEqual(base, {'a': 1, 'b': 2})
base = [1]
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, {'b': 2})
self.assertEqual(cm.exception.value.ref, '#')
def test_oneof_recursive(self):
# Schema to merge all arrays with "append" strategy and all objects
# with the default "objectMerge" strategy.
schema = {
"oneOf": [
{
"type": "array",
"mergeStrategy": "append"
},
{
"type": "object",
"additionalProperties": {
"$ref": "#"
}
},
{
"type": "string"
},
]
}
base = {"a": ["1"], "b": "3", "c": {"d": ["4"], "e": "f"}}
head = {"a": ["2"], "b": "4", "g": "7", "c": {"d": ["3"]}}
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, {"a": ["1", "2"], "b": "4", "g": "7", "c": {"d": ["4", "3"], "e": "f"}})
def test_oneof_overwrite_toplevel(self):
schema = {
'mergeStrategy': 'overwrite',
'oneOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
self.assertEqual(merger.merge([2, 3, 4], 'a'), 'a')
self.assertEqual(merger.merge('a', [2, 3, 4]), [2, 3, 4])
def test_oneof_multiple_validate(self):
schema = {
'oneOf': [
{
'type': 'array',
'maxItems': 3,
'mergeStrategy': 'append'
},
{
'type': 'array',
'minItems': 2,
'mergeStrategy': 'overwrite'
}
]
}
merger = jsonmerge.Merger(schema)
base = [1]
base = merger.merge(base, [2])
self.assertEqual(base, [1, 2])
base = [1, 2]
with self.assertRaises(HeadInstanceError) as cm:
base = merger.merge(base, [3, 4])
def test_anyof(self):
schema = {
'anyOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.merge([2, 3, 4], 'a')
self.assertEqual(cm.exception.value.ref, '#')
def test_anyof_overwrite_toplevel(self):
schema = {
'mergeStrategy': 'overwrite',
'anyOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
self.assertEqual(merger.merge([2, 3, 4], 'a'), 'a')
self.assertEqual(merger.merge('a', [2, 3, 4]), [2, 3, 4])
def test_custom_strategy(self):
schema = {'mergeStrategy': 'myStrategy'}
class MyStrategy(jsonmerge.strategies.Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
if base is None:
ref = ""
else:
ref = base.ref
return JSONValue("foo", ref)
merger = jsonmerge.Merger(schema=schema,
strategies={'myStrategy': MyStrategy()})
base = None
base = merger.merge(base, {'a': 1})
self.assertEqual(base, "foo")
def test_merge_by_id(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"properties": {
"id": {"type": "string"},
"field": {"type": "number"},
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 2}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_when_key_is_empty_should_do_nothing(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"ignoreId": ""},
"items": {
"properties": {
"id": {"type": "string"},
"field": {"type": "number"},
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "", "field": ""}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_no_items(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "id"},
}
a = [
{"id": "A", "field": 1},
]
b = [
{"id": "A", "field": 2},
]
# by default, it should fall back to "replace" strategy for integers.
expected = [
{"id": "A", "field": 2},
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_simple_ref(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "key"}
}
a = [
{"key": "A", "field": 1},
]
b = [
{"key": "A", "field": 2},
]
expected = [
{"key": "A", "field": 2},
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_no_key(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
a = [
{"id": "A", "field": 1},
]
b = [
{'field': 2}
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
# it should ignore array elements that do not have the id
self.assertEqual(base, a)
def test_merge_by_id_compex_ref(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "/foo/bar"},
}
a = [
{'foo': {'bar': 1}, 'baz': 1}
]
b = [
{'foo': {'bar': 2}}
]
c = [
{'foo': {'bar': 1}, 'baz': 2}
]
# by default, it should fall back to "replace" strategy for integers.
expected = [
{'foo': {'bar': 1}, 'baz': 2},
{'foo': {'bar': 2}}
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
base = merger.merge(base, c)
self.assertEqual(base, expected)
def test_merge_by_id_complex_id(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
a = [
{"id": ["A", {"B": "C"} ], "field": 1},
{"id": ["A", {"B": "D"} ], "field": 2},
{"id": ["A", {"B": "E"} ], "field": 3},
]
b = [
{"id": ["A", {"B": "D"} ], "field": 4},
{"id": ["E", {"B": "C"} ], "field": 5},
]
merger = jsonmerge.Merger(schema)
c = merger.merge(a, b)
expected = [
{"id": ["A", {"B": "C"} ], "field": 1},
{"id": ["A", {"B": "D"} ], "field": 4},
{"id": ["A", {"B": "E"} ], "field": 3},
{"id": ["E", {"B": "C"} ], "field": 5},
]
self.assertEqual(expected, c)
def test_merge_by_id_with_complex_array(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"properties": {
"id": {"type": "string"},
"field": {
"type": "array",
"items": {
"properties": {
"xx": {
"type": "string"
}
}
}
}
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": [{"xx": "testA1"}, {"xx": "testA2"}]},
{"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA4"}]}
]
}
b = {
"awards": [
{"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA6"}]},
{"id": "C", "field": [{"xx": "testA7"}, {"xx": "testA8"}]}
]
}
expected = {
"awards": [
{"id": "A", "field": [{"xx": "testA1"}, {"xx": "testA2"}]},
{"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA6"}]},
{"id": "C", "field": [{"xx": "testA7"}, {"xx": "testA8"}]}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_with_subschema(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"field": {
"type": "number",
"mergeStrategy": "version"
}
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 2}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": [{"value": 1}]},
{"id": "B", "field": [{"value": 2}, {"value": 3}]},
{"id": "C", "field": [{"value": 4}]}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected)
def test_merge_by_id_items_array(self):
schema = {
"mergeStrategy": "arrayMergeById",
"items": [
{},
{},
]
}
head = [
{'id': 'A'},
{'id': 'B'}
]
merger = jsonmerge.Merger(schema)
base = None
with self.assertRaises(SchemaError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/items')
def test_merge_by_id_only_integers(self):
# arrayMergeById strategy can be used to treat simple arrays of
# integers as Python sets by setting idRef to root (i.e. pointing to
# the array element itself)
#
# https://github.com/avian2/jsonmerge/issues/24
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "/"},
}
base = [ 1, 2 ]
head = [ 2, 3 ]
expected = [ 1, 2, 3]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_bad_head_type(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = {'foo': 'bar'}
base = []
merger = jsonmerge.Merger(schema)
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#')
def test_merge_by_id_bad_base_type(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = []
base = {'foo': 'bar'}
merger = jsonmerge.Merger(schema)
with self.assertRaises(BaseInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#')
def test_merge_by_id_no_base_id(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = [ {'id': 'a'} ]
base = [ {} ]
merger = jsonmerge.Merger(schema)
r = merger.merge(base, head)
self.assertEqual(r, [ {}, {'id': 'a'} ])
def test_merge_by_id_non_unique_base(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a'},
{'id': 'a'}
]
head = [
{'id': 'a',
'foo': 1}
]
merger = jsonmerge.Merger(schema)
with self.assertRaises(BaseInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/1')
def test_merge_by_id_non_unique_head(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a',
'foo': 1},
]
head = [
{'id': 'a',
'foo': 2},
{'id': 'a',
'foo': 3}
]
merger = jsonmerge.Merger(schema)
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/1')
def test_merge_by_id_order_issue_31_1(self):
# There was an issue with arrayMergeById where head value would be
# merged with the last item in the base list, not the matching item.
# The result was then assigned to the matching item.
#
# If the last item in the base list was just created in the same
# arrayMergeById (i.e. by another item in the head list), then merge
# would fail with "Unresolvable JSON pointer".
#
# https://github.com/avian2/jsonmerge/pull/31
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a', 'val': {'a': 1}},
{'id': 'b', 'val': {'b': 2}},
]
head = [
{'id': 'a', 'val': {'c': 3}}
]
expected = [
# bug would produce {'b': 2, 'c': 3} here
{'id': 'a', 'val': {'a': 1, 'c': 3}},
{'id': 'b', 'val': {'b': 2}},
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_order_issue_31_2(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a', 'val': {'a': 1}},
{'id': 'b', 'val': {'b': 2}},
]
head = [
# this caused "Unresolvable JSON pointer"
{'id': 'c', 'val': {'c': 3}},
{'id': 'a', 'val': {'c': 3}}
]
expected = [
{'id': 'a', 'val': {'a': 1, 'c': 3}},
{'id': 'b', 'val': {'b': 2}},
{'id': 'c', 'val': {'c': 3}}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_subclass_get_key(self):
class MyArrayMergeById(jsonmerge.strategies.ArrayMergeById):
def get_key(self, walk, item, idRef):
return item.val[-1]
schema = {'mergeStrategy': 'myArrayMergeById'}
merger = jsonmerge.Merger(schema=schema,
strategies={'myArrayMergeById': MyArrayMergeById()})
base = [
[ 'a', 'b', 'id1' ],
[ 'c', 'id2' ],
]
head = [
[ 'e', 'f', 'g', 'id3' ],
[ 'd', 'id1' ],
]
expected = [
[ 'd', 'id1' ],
[ 'c', 'id2' ],
[ 'e', 'f', 'g', 'id3' ],
]
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_multiple_ids(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'mergeOptions': { 'idRef': ['/a', '/b'] }
}
base = [
{
'a': 1,
'b': 2
},
{
'a': 1,
'b': 1,
}
]
head = [
{
'a': 1,
'b': 1,
'c': 2,
},
{
# incomplete key, ignored
'b': 1,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
expected = [
{
'a': 1,
'b': 2
},
{
'a': 1,
'b': 1,
'c': 2,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_merge_by_id_multiple_ids_ignore(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'mergeOptions': {
'idRef': ['/a', '/b'],
'ignoreId': [1, 2],
}
}
base = [
{
'a': 1,
'b': 1,
}
]
head = [
{
# ignoreId matches
'a': 1,
'b': 2,
'c': 2,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
expected = [
{
'a': 1,
'b': 1
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected)
def test_append_with_maxitems(self):
schema = {
"mergeStrategy": "append",
"maxItems": 2,
}
merger = jsonmerge.Merger(schema)
head = ["a"]
base = None
base = merger.merge(base, head)
base = merger.merge(base, head)
base = merger.merge(base, head)
schema2 = merger.get_schema()
jsonschema.validate(head, schema2)
jsonschema.validate(base, schema2)
def test_append_with_unique(self):
schema = {
"mergeStrategy": "append",
"uniqueItems": True,
}
merger = jsonmerge.Merger(schema)
head = ["a"]
base = None
base = merger.merge(base, head)
base = merger.merge(base, head)
schema2 = merger.get_schema()
jsonschema.validate(head, schema2)
jsonschema.validate(base, schema2)
def test_slash_in_property_name(self):
base = {'a': 0}
head = {'b': {'c/d': 1}}
base = jsonmerge.merge(base, head)
self.assertEqual(base, {'a': 0, 'b': {'c/d': 1}})
def test_tilde_in_property_name(self):
base = {'a': 0}
head = {'~1': 1}
base = jsonmerge.merge(base, head)
self.assertEqual(base, {'a': 0, '~1': 1})
def test_discard(self):
schema = {'mergeStrategy': 'discard'}
base = "a"
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "a")
def test_discard_objectmerge_null(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'discard'
}
} }
base = {}
head = {'a': 1}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {})
def test_discard_arraymergebyid(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'items': {
'mergeStrategy': 'discard'
} }
base = [ {'id': 1, 'val': 1} ]
head = [ {'id': 1, 'val': 2} ]
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, [{'id': 1, 'val': 1}])
def test_discard_arraymergebyid_null(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'items': {
'mergeStrategy': 'discard'
} }
base = [ ]
head = [ {'id': 1, 'val': 1} ]
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, [])
def test_discard_null_keep(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'discard',
'mergeOptions': {
'keepIfUndef': True
}
}
} }
base = {}
head = {'a': 1}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {'a': 1})
head = {'a': 2}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {'a': 1})
def test_bad_strategy(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'invalidStrategy'
} } }
base = {'a': 1 }
head = {'a': 2 }
with self.assertRaises(SchemaError) as cm:
jsonmerge.merge(base, head, schema)
self.assertEqual(cm.exception.value.ref, '#/properties/a')
def test_nan(self):
# float('nan') == float('nan') evaluates to false.
#
# https://github.com/avian2/jsonmerge/issues/39
base = {
"foo": 1,
"bar": float('nan')
}
head = {
"foo": 1,
"bar": float('nan')
}
base = jsonmerge.merge(base, head)
def test_merge_by_index(self):
schema = {
'mergeStrategy': 'arrayMergeByIndex'
}
base = [ {'a': 0 }, {'b': 1} ]
head = [ {'c': 2 }, {'d': 3} ]
result = jsonmerge.merge(base, head, schema)
self.assertEqual(result, [ {'a': 0, 'c': 2}, {'b': 1, 'd': 3} ])
def test_merge_by_index_empty(self):
schema = {
'mergeStrategy': 'arrayMergeByIndex'
}
base = [ ]
head = [ {'c': 2 }, {'d': 3} ]
result = jsonmerge.merge(base, head, schema)
self.assertEqual(result, [ {'c': 2}, {'d': 3} ])
class TestGetSchema(unittest.TestCase):
def test_default_overwrite(self):
schema = {'description': 'test'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'description': 'test'})
def test_default_object_merge_trivial(self):
schema = {'type': 'object'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'type': 'object'})
def test_default_object_merge(self):
schema = {
'properties': {
'foo': {
'mergeStrategy': 'version',
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'properties': {
'foo': {
'type': 'array',
'items': {
'properties': {
'value': {},
}
}
}
}
})
def test_overwrite(self):
schema = {'mergeStrategy': 'overwrite'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {})
def test_append(self):
schema = {'type': 'array',
'mergeStrategy': 'append'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'type': 'array'})
def test_version(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {}
}
}
})
def test_version_ref_twice(self):
schema = {
'properties': {
'a': {
'$ref': '#/definitions/item'
},
'b': {
'$ref': '#/definitions/item'
},
},
'definitions': {
'item': {
'type': 'object',
'mergeStrategy': 'version'
}
}
}
expected = {
'properties': {
'a': {
'$ref': '#/definitions/item'
},
'b': {
'$ref': '#/definitions/item'
},
},
'definitions': {
'item': {
'type': 'array',
'items': {
'properties': {
'value': {
'type': 'object',
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(expected, schema2)
def test_version_meta(self):
schema = {'type': 'object',
'mergeStrategy': 'version'}
meta = {
'properties': {
'date': {},
'version': {}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema(merge_options={
'version': {'metadataSchema': meta}})
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {'type': 'object'},
'date': {},
'version': {}
}
}
})
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
merger.get_schema(meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_version_meta_in_schema(self):
schema = {
'type': 'object',
'mergeStrategy': 'version',
'mergeOptions': {
'metadataSchema': {
'properties': {
'date': {},
'version': {},
},
},
},
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {'type': 'object'},
'date': {},
'version': {}
}
}
})
def test_version_limit(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 5}}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {}
}
},
'maxItems': 5
})
def test_object_merge_simple(self):
schema = {'mergeStrategy': 'objectMerge'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {})
def test_object_merge_nested(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'foo': {'mergeStrategy': 'version'}
}}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'properties': {
'foo': {
'type': 'array',
'items': {
'properties': {
'value': {}
}
}
}
}
})
def test_anyof_descend(self):
# We don't support descending through 'anyOf', since each branch could
# have its own rules for merging. How could we then decide which rule
# to follow?
schema = {
'anyOf': [
{'properties': {'a': {}}},
{'properties': {'b': {}}}
]
}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.get_schema()
self.assertEqual(cm.exception.value.ref, '#')
def test_anyof_dont_descend(self):
# However, 'anyOf' should be fine if we don't descend through it (e.g.
# if it's after a 'overwrite' strategy for instance.
schema = {
'properties': {
'a': {
'mergeStrategy': 'overwrite',
'properties': {
'b': {
'anyOf': [
{'properties': {'c': {}}},
{'properties': {'d': {}}},
]
}
}
}
}
}
expected = {
'properties': {
'a': {
'properties': {
'b': {
'anyOf': [
{'properties': {'c': {}}},
{'properties': {'d': {}}},
]
}
}
}
}
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema)
def test_external_refs(self):
schema_1 = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/foo'
}
# get_schema() shouldn't do external HTTP requests for schemas.
merger = jsonmerge.Merger(schema_1)
mschema = merger.get_schema()
d = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/foo'
}
self.assertEqual(d, mschema)
def test_internal_refs(self):
schema = {
'id': 'http://example.com/schema_1.json',
'mergeStrategy': 'overwrite',
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': {
'properties': {
'baz': {}
}
}
}
}
expected = {
'id': 'http://example.com/schema_1.json',
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': {
'properties': {
'baz': {}
}
}
}
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema)
def test_ref_to_non_object_is_an_error(self):
schema = {
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': []
}
}
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.get_schema()
self.assertEqual(cm.exception.value.ref, '#/properties/foo')
def test_reference_in_meta(self):
schema = {'mergeStrategy': 'version'}
meta_schema = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/meta'
}
schema_2 = {
'id': 'http://example.com/schema_2.json',
'definitions': {
'meta': {
'properties': {
'foo': {
'type': 'string'
},
'bar': {
'enum': [ 'a', 'b' ]
}
}
}
}
}
merger = jsonmerge.Merger(schema)
merger.cache_schema(schema_2)
mschema = merger.get_schema(merge_options={
'version': {'metadataSchema': meta_schema}})
self.assertEqual(mschema,
{
'type': 'array',
'items': {
'properties': {
'value': {},
'foo': {'type': 'string'},
'bar': {'enum': ['a', 'b'] },
}
}
})
def test_local_reference_in_meta(self):
schema = {
'properties': {
'content': {
'mergeStrategy': 'version',
'mergeOptions': {
'metadataSchema': {
'$ref': '#/definitions/metadata',
},
},
},
},
'definitions': {
'metadata': {
'properties': {
'revision': {
'type': 'number',
},
},
},
},
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(mschema, {
'properties': {
'content': {
'type': 'array',
'items': {
'properties': {
'value': {},
'revision': {
'type': 'number',
},
},
},
},
},
'definitions': {
'metadata': {
'properties': {
'revision': {
'type': 'number',
},
},
},
},
})
def test_array_in_schema(self):
schema = {
'mergeStrategy': 'overwrite',
'enum': [
"foo",
"bar",
]
}
expected = {
'enum': [
"foo",
"bar",
]
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema)
def test_version_adds_array_type(self):
schema = {
"type": "object",
"properties": {
"buyer": {
"properties": {
"id": {
"type": "object",
"properties": {
"name": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
}
}
expected = {
"type": "object",
"properties": {
"buyer": {
"properties": {
"id": {
"type": "object",
"properties": {
"name": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id(self):
schema = {
"mergeStrategy": "arrayMergeById",
"items": {
'type': 'object'
}
}
expected = {
"items": {
'type': 'object'
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id_with_depth(self):
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id_with_depth_twice(self):
# Here were have a $ref that get_schema() should descend into twice.
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
},
"test2": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
},
"test2": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
self.maxDiff = None
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_by_id_with_depth_no_ref(self):
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
},
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_append_additional(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'b': {'mergeStrategy': 'overwrite'}
},
'additionalProperties': {
'mergeStrategy': 'append'
}}
expected = {'properties': {
'b': {},
},
'additionalProperties': {}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_merge_additional_bool(self):
schema = {'additionalProperties': True}
base = {}
head = {'a': 'a'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, schema)
def test_oneof(self):
schema = {
'oneOf': [
{
'type': 'array',
'mergeStrategy': 'append'
},
{
'type': 'object'
}
]
}
expected = {
'oneOf': [
{
'type': 'array',
},
{
'type': 'object'
}
]
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_oneof_recursive(self):
# Schema to merge all arrays with "append" strategy and all objects
# with the default "objectMerge" strategy.
schema = {
"oneOf": [
{
"type": "array",
"mergeStrategy": "append"
},
{
"type": "object",
"additionalProperties": {
"$ref": "#"
}
},
{
"type": "string"
},
]
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, schema)
def test_oneof_toplevel(self):
schema = {
"mergeStrategy": "version",
"oneOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
expected = {
"type": "array",
"items": {
"properties": {
"value": {
"oneOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_anyof_toplevel(self):
schema = {
"mergeStrategy": "version",
"anyOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
expected = {
"type": "array",
"items": {
"properties": {
"value": {
"anyOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected)
def test_discard(self):
schema = { 'type': 'string',
'mergeStrategy': 'discard' }
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
expected = { 'type': 'string' }
self.assertEqual(schema2, expected)
def test_bad_strategy(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'invalidStrategy'
} } }
merger = jsonmerge.Merger(schema)
with self.assertRaises(SchemaError) as cm:
merger.get_schema()
self.assertEqual(cm.exception.value.ref, '#/properties/a')
def test_merge_by_index(self):
schema = {
'type': 'array',
'mergeStrategy': 'arrayMergeByIndex'
}
merger = jsonmerge.Merger(schema)
result = merger.get_schema()
self.assertEqual(result, {'type': 'array'})
def test_merge_by_index_name_in_exception(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'arrayMergeByIndex'
}
}
}
head = {'a': {}}
base = {'a': []}
merger = jsonmerge.Merger(schema)
with self.assertRaises(HeadInstanceError) as cm:
merger.merge(base, head)
self.assertIn('arrayMergeByIndex', str(cm.exception))
class TestExceptions(unittest.TestCase):
def test_str_with_ref(self):
e = SchemaError("Test error", JSONValue({}, '#'))
self.assertEqual(str(e), 'Test error: #')
def test_str(self):
e = SchemaError("Test error")
self.assertEqual(str(e), 'Test error')
def test_str_with_name(self):
e = SchemaError("Test error", JSONValue({}, '#'), 'test')
self.assertEqual(str(e), "'test' merge strategy: Test error: #")
if __name__ == '__main__':
unittest.main()
|
avian2/jsonmerge
|
tests/test_jsonmerge.py
|
Python
|
mit
| 69,662
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from annotations.models import Corpus
from annotations.exports import export_fragments
from core.utils import CSV, XLSX
class Command(BaseCommand):
help = 'Exports existing Fragments for the given Corpus and Languages'
def add_arguments(self, parser):
parser.add_argument('corpus', type=str)
parser.add_argument('languages', nargs='+', type=str)
parser.add_argument('--add_lemmata', action='store_true', dest='add_lemmata', default=False)
parser.add_argument('--add_indices', action='store_true', dest='add_indices', default=False)
parser.add_argument('--xlsx', action='store_true', dest='format_xlsx', default=False)
parser.add_argument('--doc', dest='document')
parser.add_argument('--formal_structure')
def handle(self, *args, **options):
# Retrieve the Corpus from the database
try:
corpus = Corpus.objects.get(title=options['corpus'])
except Corpus.DoesNotExist:
raise CommandError('Corpus with title {} does not exist'.format(options['corpus']))
format_ = XLSX if options['format_xlsx'] else CSV
for language in options['languages']:
if not corpus.languages.filter(iso=language):
raise CommandError('Language {} does not exist'.format(language))
filename = 'fragments_{lang}.{ext}'.format(lang=language, ext=format_)
export_fragments(filename, format_, corpus, language,
document=options['document'],
add_lemmata=options['add_lemmata'],
add_indices=options['add_indices'],
formal_structure=options['formal_structure'])
|
UUDigitalHumanitieslab/timealign
|
annotations/management/commands/export_fragments.py
|
Python
|
mit
| 1,823
|
from pymongo import MongoClient
from pymongo.collection import Collection
from pymongo.errors import AutoReconnect
from django.conf import settings
from types import FunctionType
import functools
import time
__all__ = ("connection", "connections", "db", "get_db")
"""
Goals:
* To provide a clean universal handler for Mongo, similar to how Django does it
for other db connections, but Mongo is unique and simple enough to just live on
it's own.
* To wrap the pymongo Collection methods automatically with a reconnect decorator
in case a server is temporarily down, or a replica set is in the middle of failing
over to a secondary server.
"""
"""
In settings.py:
MONGODB = {
'default': {
'NAME': 'db1' # Default database to connect to
'LOCATION': [ # An array of host strings, similar to the CACHES setting.
'localhost:27017',
]
}
}
Usage:
from mongodb import connections, connection, db
connections['default'].db1.messages.find({'key': 'value'}) # manually select the 'default' connection
connection.db1.messages.find({'key': 'value'}) # manually specific the database to be used to override "NAME"
db.messages.find({'key': 'value'}) # Just let the library use all of the defaults
"""
def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely. See also http://jira.mongodb.org/browse/PYTHON-216
"""
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for x in xrange(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
pass
raise
return _reconnector
class ConnectionDoesNotExist(Exception):
pass
class CollectionWrapper(object):
def __init__(self, collection):
self._collection = collection
def __getattr__(self, func):
old = getattr(self._collection, func)
if type(old) is FunctionType:
return with_reconnect(old)
return old
def __repr__(self):
return "<CollectionWrapper %s>" % self._collection.__repr__()
def __str__(self):
return "<CollectionWrapper %s>" % self._collection.__str__()
class DatabaseWrapper(object):
def __init__(self, database):
self._database = database
def __getattr__(self, func):
old = getattr(self._database, func)
if type(old) is FunctionType:
return with_reconnect(old)
elif isinstance(old, Collection):
return CollectionWrapper(old)
return old
def __getitem__(self, func):
old = getattr(self._database, func)
if isinstance(old, Collection):
return CollectionWrapper(old)
return old
def __repr__(self):
return "<DatabaseWrapper %s>" % self._database.__repr__()
def __str__(self):
return "<DatabaseWrapper %s>" % self._database.__str__()
class ConnectionWrapper(object):
def __init__(self, connection, default=None):
self._connection = connection
self._databases = {}
self._default = default
def __getattr__(self, alias):
if self._default is not None and alias == "default":
alias = self._default
if alias in self._databases:
return self._databases[alias]
database = DatabaseWrapper(self._connection[alias])
self._databases[alias] = database
return database
def __repr__(self):
return "<ConnectionWrapper %s>" % self._connection.__repr__()
def __str__(self):
return "<ConnectionWrapper %s>" % self._connection.__str__()
class MongoHandler(object):
def __init__(self, databases):
self.databases = databases
self._connections = {}
def __getitem__(self, alias):
if alias in self._connections:
return self._connections[alias]
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn = MongoClient(
[node for node in self.databases[alias]["LOCATION"]], authSource=self.databases[alias]["authSource"]
)
self._connections[alias] = ConnectionWrapper(conn, self.databases[alias]["NAME"])
return self._connections[alias]
def get_db():
connections = MongoHandler(settings.MONGODB)
connection = connections["default"]
return connection.default
db = get_db()
|
lang-uk/lang.org.ua
|
languk/corpus/mongodb.py
|
Python
|
mit
| 4,932
|
# coding:utf-8
import json
from django.http import HttpResponse
from django.shortcuts import render
from aircraft_config import AC_WQAR_CONFIG
from list2string_and_echarts_function import Echarts_option
from list2string_and_echarts_function import LIST_to_STR
from influxdb_function import influxDB_interface
from main_web.models import Stencil
from arrow_time import today_date_for_influxd_sql
from arrow_time import ten_day_ago_for_influxd_sql
from arrow_time import three_day_ago_for_influxd_sql
def all_childtable_index_list(request):
if request.method == 'POST':
post_data = request.POST
date_range = post_data["date_range"]
date_start = date_range.split(' to ')[0]
date_end = date_range.split(' to ')[1]
else:
date_start = today_date_for_influxd_sql()
date_end = today_date_for_influxd_sql()
where_str = " WHERE time > " + "'" + date_start + "'" + " - 8h" + " AND time < " + "'" + date_end + "'" + " + 16h"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("DB_sector_index", "*", "index", where_str)
if sector_index <> {}:
df = sector_index['index']
result_json = df.to_json(orient="records")
return render(request, 'all_childtable_index_list.html', {'result_json': result_json,
'date_start': date_start,
'date_end': date_end})
else:
return render(request, 'all_childtable_index_list.html', {'date_start': date_start,
'date_end': date_end + " no data"})
# 该页面未使用
def query_index(request):
if request.method == 'POST':
post_data = request.POST
if post_data["date_start"]=='' or post_data["date_end"] == '':
date_start = ten_day_ago_for_influxd_sql()
date_end = today_date_for_influxd_sql()
else:
date_start = post_data["date_start"]
date_end = post_data["date_end"]
AC_id = post_data["AC_id"]
where_str = " WHERE time > " + "'" + date_start + "'" + " AND time < " + "'" + date_end + "'" + " + 1d" + " AND AC=" + "'" + AC_id + "'"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("DB_sector_index", "*", "index", where_str)
if sector_index <> {}:
df = sector_index['index']
result_json = df.to_json(orient="records")
return render(request, 'single_plane_query.html', {'result_json': result_json,
'date_start': date_start,
'date_end': date_end,
'AC_id': AC_id})
else:
return render(request, 'single_plane_query.html', {'date_start': date_start,
'date_end': date_end + " no data",
'AC_id': AC_id})
else:
return render(request, 'single_plane_query.html')
def runup_list(request):
if request.method == 'POST':
post_data = request.POST
date_range = post_data["date_range"]
date_start = date_range.split(' to ')[0]
date_end = date_range.split(' to ')[1]
else:
date_start = ten_day_ago_for_influxd_sql()
date_end = today_date_for_influxd_sql()
where_str = " WHERE time > " + "'" + date_start + "'" + " - 8h" + " AND time < " + "'" + date_end + "'" + " + 16h"
where_str = where_str + " AND FLT_status='GROUND'"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("DB_sector_index", "*", "index", where_str)
df = sector_index['index']
result_json = df.to_json(orient="records")
return render(request, 'all_childtable_index_list.html',{'result_json': result_json})
def tendency_total(request):
if request.method == 'POST':
post_data = request.POST
date_range = post_data["date_range"]
date_start = date_range.split(' to ')[0]
date_end = date_range.split(' to ')[1]
tendency_type = post_data["tendency_type"]
where_str = " WHERE time > " + "'" + date_start + "'" + " AND time < " + "'" + date_end + "'" + " + 1d"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("tendency", "*", tendency_type, where_str)
if sector_index <> {}:
df = sector_index[tendency_type]
result_json = df.to_json(orient="records")
return render(request, 'tendency_total.html', {'result_json': result_json,
'date_start': date_start,
'date_end': date_end,
})
else:
return render(request, 'tendency_total.html', {'date_start': date_start,
'date_end': date_end + " no data",
})
else:
date_start = ten_day_ago_for_influxd_sql()
date_end = today_date_for_influxd_sql()
where_str = " WHERE time > " + "'" + date_start + "'" + " AND time < " + "'" + date_end + "'" + " + 1d"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("tendency", "*", "tendency_total", where_str)
df = sector_index['tendency_total']
result_json = df.to_json(orient="records")
return render(request, 'tendency_total.html', {'result_json': result_json,
'date_start': date_start,
'date_end': date_end})
def tendency_single(request):
all_aircraft_list = json.dumps(AC_WQAR_CONFIG().all_aircraft())
if request.method == 'POST':
post_data = request.POST
date_range = post_data["date_range"]
date_start = date_range.split(' to ')[0]
date_end = date_range.split(' to ')[1]
para_name = post_data["para_name"]
para_name_sed = "\"" + para_name + "\""
exclude_list = ["AC","AC_sector","FLT_number","FLT_status","update_date","update_time"]
exclude_list = map(lambda x:"\"" + x + "\"", exclude_list)
exclude_list.append(para_name_sed)
query_para = ",".join(exclude_list)
print query_para
AC_id = post_data["AC_id"]
AC_id_sed = "\'" + AC_id + "\'"
qar_conf = AC_WQAR_CONFIG().juge_config(AC_id)
if qar_conf == "737_7":
tendency_type = "tendency_737_7"
elif qar_conf == "737_3C":
tendency_type = "tendency_737_3C"
where_str = " WHERE AC = " + AC_id_sed + " AND " + "time > " + "'" + date_start + "'" + " AND time < " + "'" + date_end + "'" + " + 1d"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("tendency", query_para, tendency_type, where_str)
if sector_index <> {}:
df = sector_index[tendency_type]
result_json = df.to_json(orient="records")
return render(request, 'tendency_single.html', {'all_ac':all_aircraft_list,
'result_json': result_json,
'date_start': date_start,
'date_end': date_end,
})
else:
return render(request, 'tendency_single.html', {'all_ac':all_aircraft_list,
'date_start': date_start,
'date_end': date_end + " no data",
})
else:
return render(request, 'tendency_single.html', {'all_ac':all_aircraft_list,
'result_json': {}})
def tendency_single_para_list(request):
AC_id = request.GET.get('AC_id', None)
qar_conf = AC_WQAR_CONFIG().juge_config(AC_id)
if qar_conf == "737_7":
mes = "tendency_737_7"
elif qar_conf == "737_3C":
mes = "tendency_737_3C"
para_object = influxDB_interface().show_fields("tendency", mes)
para_list = []
exclude_list = ["AC","AC_sector","FLT_number","FLT_status","update_date","update_time"]
for item in para_object:
para_name = item['fieldKey']
if para_name not in exclude_list:
para_list.append(para_name)
para_json = json.dumps(para_list)
return HttpResponse(para_json)
def childtable(request, sector_id):
result_list = []
query_stencil = Stencil.objects.all()
for item in query_stencil:
dic_index = {
'NAME':item.NAME
}
result_list.append(dic_index)
return render(request, 'childtable.html', {'sector_id': sector_id,
'stencil_option': result_list})
def ajax_some_para(request):
list_str = LIST_to_STR()
post_NAME = request.GET.get('value_conf', None)
print post_NAME
post_flight_id = request.GET.get('flight_id', None)
print post_flight_id
aircraft_id = post_flight_id[0:6]
#获取模版内参数列表
stencil_object = Stencil.objects.get(NAME = post_NAME)
list_3C, list_7 = LIST_to_STR().make_para_id_list()
list_units_3C, list_units_7 = LIST_to_STR().make_para_units_list()
list_WQAR256 = list_str.str_to_int(stencil_object.WQAR_737_3C)
list_WQAR512 = list_str.str_to_int(stencil_object.WQAR_737_7)
ac_wqar_config = AC_WQAR_CONFIG()
echarts_option_256 = stencil_object.echarts_737_3C
echarts_option_512 = stencil_object.echarts_737_7
dic_units = {}
list_para_name = []
if aircraft_id in ac_wqar_config.WQAR_7_SERISE_list:
model = list_WQAR512
list_units = list_units_7
ac_conf = '737_7'
for item in model:
list_para_name.append(list_7[int(item)])
dic_units[list_7[int(item)] ]= list_units[int(item)]
str_echarts_option = echarts_option_512
elif aircraft_id in ac_wqar_config.WQAR_3C_SERISE_list:
model = list_WQAR256
list_units = list_units_3C
ac_conf = '737_3C'
for item in model:
list_para_name.append(list_3C[int(item)])
dic_units[list_3C[int(item)] ]= list_units[int(item)]
str_echarts_option = echarts_option_256
else:
return HttpResponse("无此机号")
print list_para_name
query_result = influxDB_interface().list_query(
"CKG_QAR",
list_para_name,
ac_conf,
post_flight_id)
query_result.index = range(1,(len(query_result.index)+1),1)
new_df = query_result.fillna('-')
list_c1_c2 = new_df.to_dict(orient="records")
para_name_dic = {}
for key in list_c1_c2[0]:
para_name_dic[key] = key
list_c1_c2.insert(0, para_name_dic)
list_c1_c2.append(dic_units) # 单位暂时不加上,待填坑
# 传递echarts设置信息
ec_op = Echarts_option()
echarts_config_option = ec_op.str_to_obj(str_echarts_option)
# 得出echarts_option中的逻辑值参数表列表
list_index_of_logic_echarts = ec_op.judge_logic_echart(echarts_config_option)
result_json = json.dumps([list_c1_c2, echarts_config_option, list_index_of_logic_echarts])
return HttpResponse(result_json)
def eFlow_total(request):
date_start = three_day_ago_for_influxd_sql()
date_end = today_date_for_influxd_sql()
where_str = " WHERE time > " + "'" + date_start + "'" + " AND time < " + "'" + date_end + "'" + " + 1d"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("tendency", "*", "tendency_737_7", where_str)
df = sector_index['tendency_737_7']
eflow_ac_list = ["B-7181",
"B-7892",
"B-7595",
"B-7596",
"B-7597",
"B-7598",
"B-7890",
"B-7893",
"B-7895",
"B-7896",
"B-7891",
"B-7897",
"B-1417",
"B-1416"]
df_eflow = df[df['AC'].isin(eflow_ac_list)]
result_json = df_eflow.to_json(orient="records")
return render(request, 'eFlow_total.html', {'result_json': result_json,
'date_start': date_start,
'date_end': date_end})
|
waterwoodwind/influxDB_web
|
main_web/views_query.py
|
Python
|
mit
| 12,937
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "DASHD" not in os.environ:
os.environ["DASHD"] = buildDir + '/src/dashd' + EXEEXT
if "DASHCLI" not in os.environ:
os.environ["DASHCLI"] = buildDir + '/src/dash-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs dash_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs dash_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs dash_hash to pass
'invalidtxrequest.py', # NOTE: needs dash_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs dash_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs dash_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Dash Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
ediston/energi
|
qa/pull-tester/rpc-tests.py
|
Python
|
mit
| 8,724
|
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
import jsonfield
from .hooks import hookset
from .utils import load_path_attr
class UserState(models.Model):
"""
this stores the overall state of a particular user.
"""
user = models.OneToOneField(User, null=True, on_delete=models.SET_NULL)
data = jsonfield.JSONField(default=dict, blank=True)
@classmethod
def for_user(cls, user):
assert user.is_authenticated(), "user must be authenticated"
user_state, _ = cls.objects.get_or_create(user=user)
return user_state
def get(self, key):
return self.data.get(key)
def set(self, key, value):
self.data[key] = value
self.save()
class ActivityState(models.Model):
"""
this stores the overall state of a particular user doing a particular
activity across all sessions of that activity.
"""
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
activity_key = models.CharField(max_length=300)
activity_class_path = models.CharField(max_length=300)
# how many sessions have been completed by this user
completed_count = models.IntegerField(default=0)
data = jsonfield.JSONField(default=dict, blank=True)
class Meta:
unique_together = [("user", "activity_key")]
@property
def activity_class(self):
return load_path_attr(self.activity_class_path)
@property
def in_progress(self):
return next(iter(self.sessions.filter(completed=None)), None)
@property
def latest(self):
session, _ = self.sessions.get_or_create(completed=None)
return session
@property
def last_completed(self):
return self.sessions.filter(completed__isnull=False).order_by("-started").first()
@property
def all_sessions(self):
return self.sessions.order_by("started")
@classmethod
def state_for_user(cls, user, activity_key):
assert user.is_authenticated(), "user must be authenticated"
return cls.objects.filter(user=user, activity_key=activity_key).first()
@property
def progression(self):
if self.in_progress:
return "continue"
elif self.activity_class.repeatable:
return "repeat"
else:
return "completed"
class ActivitySessionState(models.Model):
"""
this stores the state of a particular session of a particular user
doing a particular activity.
"""
activity_state = models.ForeignKey(ActivityState, related_name="sessions", on_delete=models.CASCADE)
started = models.DateTimeField(default=timezone.now)
completed = models.DateTimeField(null=True) # NULL means in progress
data = jsonfield.JSONField(default=dict, blank=True)
class Meta:
unique_together = [("activity_state", "started")]
def mark_completed(self):
self.completed = timezone.now()
self.save()
self.activity_state.completed_count = models.F("completed_count") + 1
self.activity_state.save()
def activities_for_user(user):
activities = {
"available": [],
"inprogress": [],
"completed": [],
"repeatable": []
}
for key, activity_class_path in hookset.all_activities():
activity = load_path_attr(activity_class_path)
state = ActivityState.state_for_user(user, key)
user_num_completions = ActivitySessionState.objects.filter(
user=user,
activity_key=key,
completed__isnull=False
).count()
activity_entry = {
"activity_key": key,
"title": activity.title,
"description": activity.description,
"state": state,
"user_num_completions": user_num_completions,
"repeatable": activity.repeatable,
}
if state:
if state.in_progress:
activities["inprogress"].append(activity_entry)
elif activity.repeatable:
activities["repeatable"].append(activity_entry)
else:
activities["completed"].append(activity_entry)
else:
activities["available"].append(activity_entry)
return activities
|
pinax/pinax-lms-activities
|
pinax/lms/activities/models.py
|
Python
|
mit
| 4,304
|
import logging
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import requests
from companies.models import Company
logger = logging.getLogger('jobs.management.commands')
class Command(BaseCommand):
help = 'Update currently listed companies'
def handle(self, *args, **options):
logger.info('Started updating currently listed companies')
companies = Company.objects.filter(is_index=False)
r = requests.get(settings.COMPANY_LIST_URL)
records = r.json()['records']
for record in records:
symbol = record['securitySymbol']
name = record['securityName']
listing_date = record['listingDate'].split()[0]
status = record['securityStatus']
try:
company = companies.get(symbol=symbol)
companies = companies.exclude(id=company.id)
except Company.DoesNotExist:
company = Company(symbol=symbol)
company.name = name
company.is_currently_listed = True
company.is_suspended = True if status == 'S' else False
company.listing_date = datetime.strptime(listing_date, '%Y-%m-%d').date()
company.save()
companies.update(is_currently_listed=False, is_suspended=False)
logger.info('Finished updating currently listed companies')
|
rodxavier/open-pse-initiative
|
django_project/jobs/management/commands/update_listed_companies.py
|
Python
|
mit
| 1,437
|
# -*- coding: utf-8 -*-
# Copyright © 2013-2015 Damián Avila, Chris Warrick and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on nbconvert."""
from __future__ import unicode_literals, print_function
import io
import os
import sys
try:
import IPython
from IPython.nbconvert.exporters import HTMLExporter
if IPython.version_info[0] >= 3: # API changed with 3.0.0
from IPython import nbformat
current_nbformat = nbformat.current_nbformat
from IPython.kernel import kernelspec
else:
import IPython.nbformat.current as nbformat
current_nbformat = 'json'
kernelspec = None
from IPython.config import Config
flag = True
except ImportError:
flag = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
class CompileIPynb(PageCompiler):
"""Compile IPynb into HTML."""
name = "ipynb"
friendly_name = "Jupyter/IPython Notebook"
demote_headers = True
default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'
def set_site(self, site):
"""Set Nikola site."""
self.logger = get_logger('compile_ipynb', STDERR_HANDLER)
super(CompileIPynb, self).set_site(site)
def compile_html_string(self, source, is_two_file=True):
"""Export notebooks as HTML strings."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
HTMLExporter.default_template = 'basic'
c = Config(self.site.config['IPYNB_CONFIG'])
exportHtml = HTMLExporter(config=c)
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
(body, resources) = exportHtml.from_notebook_node(nb_json)
return body
def compile_html(self, source, dest, is_two_file=True):
"""Compile source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf8") as out_file:
out_file.write(self.compile_html_string(source, is_two_file))
def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Read metadata directly from ipynb file.
As ipynb file support arbitrary metadata as json, the metadata used by Nikola
will be assume to be in the 'nikola' subfield.
"""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
source = post.source_path
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
# Metadata might not exist in two-file posts or in hand-crafted
# .ipynb files.
return nb_json.get('metadata', {}).get('nikola', {})
def create_post(self, path, **kw):
"""Create a new post."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
kernel = kw.pop('ipython_kernel', None)
# is_page is not needed to create the file
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if content.startswith("{"):
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
if IPython.version_info[0] >= 3:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
else:
nb = nbformat.new_notebook()
nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]
if kernelspec is not None:
if kernel is None:
kernel = self.default_kernel
self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel))
IPYNB_KERNELS = {}
ksm = kernelspec.KernelSpecManager()
for k in ksm.find_kernel_specs():
IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
IPYNB_KERNELS[k]['name'] = k
del IPYNB_KERNELS[k]['argv']
if kernel not in IPYNB_KERNELS:
self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
raise Exception('Unknown kernel "{0}"'.format(kernel))
nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
else:
# Older IPython versions don’t need kernelspecs.
pass
if onefile:
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf8") as fd:
if IPython.version_info[0] >= 3:
nbformat.write(nb, fd, 4)
else:
nbformat.write(nb, fd, 'ipynb')
|
masayuko/nikola
|
nikola/plugins/compile/ipynb.py
|
Python
|
mit
| 6,349
|
SUCCESS = 0
FAILURE = 1 # NOTE: click.abort() uses this
# for when tests are already running
ALREADY_RUNNING = 2
|
naphatkrit/easyci
|
easyci/exit_codes.py
|
Python
|
mit
| 115
|
"""Support for Waterfurnaces."""
from datetime import timedelta
import logging
import threading
import time
import voluptuous as vol
from waterfurnace.waterfurnace import WaterFurnace, WFCredentialError, WFException
from homeassistant.components import persistent_notification
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "waterfurnace"
UPDATE_TOPIC = f"{DOMAIN}_update"
SCAN_INTERVAL = timedelta(seconds=10)
ERROR_INTERVAL = timedelta(seconds=300)
MAX_FAILS = 10
NOTIFICATION_ID = "waterfurnace_website_notification"
NOTIFICATION_TITLE = "WaterFurnace website status"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass: HomeAssistant, base_config: ConfigType) -> bool:
"""Set up waterfurnace platform."""
config = base_config[DOMAIN]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
wfconn = WaterFurnace(username, password)
# NOTE(sdague): login will throw an exception if this doesn't
# work, which will abort the setup.
try:
wfconn.login()
except WFCredentialError:
_LOGGER.error("Invalid credentials for waterfurnace login")
return False
hass.data[DOMAIN] = WaterFurnaceData(hass, wfconn)
hass.data[DOMAIN].start()
discovery.load_platform(hass, Platform.SENSOR, DOMAIN, {}, config)
return True
class WaterFurnaceData(threading.Thread):
"""WaterFurnace Data collector.
This is implemented as a dedicated thread polling a websocket in a
tight loop. The websocket will shut itself from the server side if
a packet is not sent at least every 30 seconds. The reading is
cheap, the login is less cheap, so keeping this open and polling
on a very regular cadence is actually the least io intensive thing
to do.
"""
def __init__(self, hass, client):
"""Initialize the data object."""
super().__init__()
self.hass = hass
self.client = client
self.unit = self.client.gwid
self.data = None
self._shutdown = False
self._fails = 0
def _reconnect(self):
"""Reconnect on a failure."""
self._fails += 1
if self._fails > MAX_FAILS:
_LOGGER.error("Failed to refresh login credentials. Thread stopped")
persistent_notification.create(
self.hass,
"Error:<br/>Connection to waterfurnace website failed "
"the maximum number of times. Thread has stopped",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
self._shutdown = True
return
# sleep first before the reconnect attempt
_LOGGER.debug("Sleeping for fail # %s", self._fails)
time.sleep(self._fails * ERROR_INTERVAL.total_seconds())
try:
self.client.login()
self.data = self.client.read()
except WFException:
_LOGGER.exception("Failed to reconnect attempt %s", self._fails)
else:
_LOGGER.debug("Reconnected to furnace")
self._fails = 0
def run(self):
"""Thread run loop."""
@callback
def register():
"""Connect to hass for shutdown."""
def shutdown(event):
"""Shutdown the thread."""
_LOGGER.debug("Signaled to shutdown")
self._shutdown = True
self.join()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
self.hass.add_job(register)
# This does a tight loop in sending read calls to the
# websocket. That's a blocking call, which returns pretty
# quickly (1 second). It's important that we do this
# frequently though, because if we don't call the websocket at
# least every 30 seconds the server side closes the
# connection.
while True:
if self._shutdown:
_LOGGER.debug("Graceful shutdown")
return
try:
self.data = self.client.read()
except WFException:
# WFExceptions are things the WF library understands
# that pretty much can all be solved by logging in and
# back out again.
_LOGGER.exception("Failed to read data, attempting to recover")
self._reconnect()
else:
self.hass.helpers.dispatcher.dispatcher_send(UPDATE_TOPIC)
time.sleep(SCAN_INTERVAL.total_seconds())
|
rohitranjan1991/home-assistant
|
homeassistant/components/waterfurnace/__init__.py
|
Python
|
mit
| 5,047
|
LONG_HORN = 750
LONG_HORN_SPACE = LONG_HORN/2
SHORT_HORN = 400
SHORT_HORN_SPACE = SHORT_HORN/2
sequences = {
'CreekFleet': {
# Three long horns immediately (three minutes to start)
0: 'LLL',
# Two long horns a minute later (two minutes to start)
60000: 'LL',
# One short, three long horns 30s later (one minute 30s to start)
90000: 'LSSS',
# One long horn 30s later (one minute to start)
120000: 'L',
# Three short horns 30s later (30s to start)
150000: 'SSS',
160000: 'SS', # Two short horns 10s later (20s to start)
170000: 'S', # One short horn 10s later (10s to start)
175000: 'S', # One short horn 5s later (5s to start)
176000: 'S', # One short horn 1s later (4s to start)
177000: 'S', # One short horn 1s later (3s to start)
178000: 'S', # One short horn 1s later (2s to start)
179000: 'S', # One short horn 1s later (1s to start)
180000: 'L' # One long horn 1s later (START!)
},
'ISAF': {
# One short horn immediately (five minutes to start)
0: 'S',
# One short horn a minute later (four minutes to start)
60000: 'S',
# One long horn 3m later (one minute to start)
240000: 'L',
# One short horn 1m later (START!)
300000: 'S'
}
}
with open('firmware\\CreekFleet_Timer\\src\\horn.h', 'w') as f:
for option, sequence in sequences.items():
print(option)
HORN_TIMES = []
HORN_COMMANDS = []
for horn, blasts in sequence.items():
stepTime = horn
for blast in blasts:
HORN_TIMES.append(stepTime)
HORN_COMMANDS.append(True)
if blast == 'L':
stepTime += LONG_HORN
HORN_TIMES.append(stepTime)
HORN_COMMANDS.append(False)
stepTime += LONG_HORN_SPACE
if blast == 'S':
stepTime += SHORT_HORN
HORN_TIMES.append(stepTime)
HORN_COMMANDS.append(False)
stepTime += SHORT_HORN_SPACE
f.write(
f'uint8_t {option.upper()}_NUM_HORNS = {len(HORN_TIMES)};\n')
f.write(f'uint64_t {option.upper()}_HORN_TIMES[] = {{\n')
f.write(
',\n'.join([f'\t{time:.0f}' for time in HORN_TIMES])
+ '\n')
f.write(f'}};\nbool {option.upper()}_HORN_COMMANDS[] = {{\n')
f.write(
',\n'.join(
[f'\t{("false","true")[command]}' for command in HORN_COMMANDS])
+ '\n')
f.write('};\n')
print(list(zip(HORN_TIMES, HORN_COMMANDS)))
|
agmlego/Creekfleet_Timer
|
firmware/horn_sequence.py
|
Python
|
mit
| 2,928
|
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from ...utils import update_sentry_404s
class Command(BaseCommand):
def handle(self, *args, **kwargs):
update_sentry_404s()
|
DjangoAdminHackers/django-link-report
|
link_report/management/commands/update_sentry_404s.py
|
Python
|
mit
| 232
|
import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
# find problematic ones
df[df.metaDatasetsUsed.isnull()]
list1 = df.metaDatasetsUsed.str.split(",").tolist()
df1 = pd.DataFrame(list1)
for i in range(df1.columns.size):
df1[i] = df1[i].str.strip()
stacked = df1.stack()
stacked_value_counts = stacked.value_counts()
greater_than = stacked_value_counts[stacked_value_counts > 3]
table_content_inside=""
list_ids_dataset_names = ["KDD99","NSL-KDD","DARPA","Kyoto","ISCX"]
table_content_inside=""
for dataset_name in greater_than.index:
dataset_count = greater_than[dataset_name]
dataset_name_in_table = dataset_name
dataset_name_in_table = dataset_name
if(dataset_name in list_ids_dataset_names):
dataset_name_in_table = "\\rowcolor{Gray}\n" + dataset_name + "* "
line = "{dataset_name} & {dataset_count} \\\\ \n".format(
dataset_name = dataset_name_in_table
,dataset_count = dataset_count
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table}[!ht]
\\centering
\\caption{ \\textbf{Most used Datasets}. * denotes IDS datasets. Datasets that are used less than three is not included.}
\\label{table-metaDatasetsUsed}
\\begin{tabular}{ll}
\\toprule
\\textbf{Dataset Name } & \\textbf{Article Count} \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{table}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
#print table_content_full
filename = "../latex/table-metaDatasetsUsed.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
|
ati-ozgur/KDD99ReviewArticle
|
HelperCodes/create_table_metaDatasetsUsed.py
|
Python
|
mit
| 1,724
|
import abjad
from abjad.tools import abctools
class TimespanSpecifier(abctools.AbjadValueObject):
### CLASS VARIABLES ###
__slots__ = (
'_forbid_fusing',
'_forbid_splitting',
'_minimum_duration',
)
### INITIALIZER ###
def __init__(
self,
forbid_fusing=None,
forbid_splitting=None,
minimum_duration=None,
):
if forbid_fusing is not None:
forbid_fusing = bool(forbid_fusing)
self._forbid_fusing = forbid_fusing
if forbid_splitting is not None:
forbid_splitting = bool(forbid_splitting)
self._forbid_splitting = forbid_splitting
if minimum_duration is not None:
minimum_duration = abjad.Duration(minimum_duration)
self._minimum_duration = minimum_duration
### PUBLIC PROPERTIES ###
@property
def forbid_fusing(self):
return self._forbid_fusing
@property
def forbid_splitting(self):
return self._forbid_splitting
@property
def minimum_duration(self):
return self._minimum_duration
|
josiah-wolf-oberholtzer/consort
|
consort/tools/TimespanSpecifier.py
|
Python
|
mit
| 1,111
|
import re
from collections import defaultdict, Counter
from ttlser import natsort
from pyontutils.core import LabelsBase, Collector, Source, resSource, ParcOnt
from pyontutils.core import makePrefixes
from pyontutils.config import auth
from pyontutils.namespaces import nsExact
from pyontutils.namespaces import NIFRID, ilx, ilxtr, TEMP
from pyontutils.namespaces import NCBITaxon, UBERON
from pyontutils.namespaces import PAXMUS, PAXRAT, paxmusver, paxratver
from pyontutils.namespaces import rdf, rdfs, owl
from pyontutils.combinators import annotations
from nifstd_tools.parcellation import log
from nifstd_tools.parcellation import Atlas, Label, LabelRoot, LocalSource, parcCore
from nifstd_tools.parcellation import RegionRoot, RegionsBase
log = log.getChild('pax')
class DupeRecord:
def __init__(self, alt_abbrevs=tuple(), structures=tuple(), figures=None, artiris=tuple()):
self.alt_abbrevs = alt_abbrevs
self.structures = structures
self.artiris = artiris
class Artifacts(Collector):
collects = Atlas
class PaxMouseAt(Atlas):
""" Any atlas artifact with Paxinos as an author for the adult rat. """
iri = ilx['paxinos/uris/mouse'] # ilxtr.paxinosMouseAtlas
class_label = 'Paxinos Mouse Atlas'
PaxMouseAtlas = Atlas(iri=PaxMouseAt.iri,
species=NCBITaxon['10090'],
devstage=UBERON['0000113'], # TODO this is 'Mature' which may not match... RnorDv:0000015 >10 weeks...
region=UBERON['0000955'],
)
PaxMouse2 = PaxMouseAt(iri=paxmusver['2'], # ilxtr.paxm2,
label='The Mouse Brain in Stereotaxic Coordinates 2nd Edition',
synonyms=('Paxinos Mouse 2nd',),
abbrevs=tuple(),
shortname='PAXMOUSE2', # TODO upper for atlas lower for label?
copyrighted='2001',
version='2nd Edition', # FIXME ??? delux edition??? what is this
citation='???????',)
PaxMouse3 = PaxMouseAt(iri=paxmusver['3'], # ilxtr.paxm3,
label='The Mouse Brain in Stereotaxic Coordinates 3rd Edition',
synonyms=('Paxinos Mouse 3rd',),
abbrevs=tuple(),
shortname='PAXMOUSE3', # TODO upper for atlas lower for label?
copyrighted='2008',
version='3rd Edition',
citation='???????',)
PaxMouse4 = PaxMouseAt(iri=paxmusver['4'], # ilxtr.paxm4,
label='The Mouse Brain in Stereotaxic Coordinates 4th Edition',
synonyms=('Paxinos Mouse 4th',),
abbrevs=tuple(),
shortname='PAXMOUSE4', # TODO upper for atlas lower for label?
copyrighted='2012',
version='4th Edition',
citation='???????',)
class PaxRatAt(Atlas):
""" Any atlas artifact with Paxinos as an author for the adult rat. """
iri = ilx['paxinos/uris/rat'] # ilxtr.paxinosRatAtlas
class_label = 'Paxinos Rat Atlas'
PaxRatAtlas = Atlas(iri=PaxRatAt.iri,
species=NCBITaxon['10116'],
devstage=UBERON['0000113'], # TODO this is 'Mature' which may not match... RnorDv:0000015 >10 weeks...
region=UBERON['0000955'],
citation=('Paxinos, George, Charles RR Watson, and Piers C. Emson. '
'"AChE-stained horizontal sections of the rat brain '
'in stereotaxic coordinates." Journal of neuroscience '
'methods 3, no. 2 (1980): 129-149.'),)
PaxRat4 = PaxRatAt(iri=ilx['paxinos/uris/rat/versions/4'], # ilxtr.paxr4,
label='The Rat Brain in Stereotaxic Coordinates 4th Edition',
synonyms=('Paxinos Rat 4th',),
abbrevs=tuple(),
shortname='PAXRAT4', # TODO upper for atlas lower for label?
copyrighted='1998',
version='4th Edition',)
PaxRat6 = PaxRatAt(iri=ilx['paxinos/uris/rat/versions/6'], # ilxtr.paxr6,
label='The Rat Brain in Stereotaxic Coordinates 6th Edition',
synonyms=('Paxinos Rat 6th',),
abbrevs=tuple(),
shortname='PAXRAT6', # TODO upper for atlas lower for label?
copyrighted='2007',
version='6th Edition',)
PaxRat7 = PaxRatAt(iri=ilx['paxinos/uris/rat/versions/7'], # ilxtr.paxr7,
label='The Rat Brain in Stereotaxic Coordinates 7th Edition',
synonyms=('Paxinos Rat 7th',
'Paxinos and Watson\'s The Rat Brain in Stereotaxic Coordinates 7th Edition', # branding >_<
),
abbrevs=tuple(),
shortname='PAXRAT7', # TODO upper for atlas lower for label?
copyrighted='2014',
version='7th Edition',)
class PaxSr_6(resSource):
sourceFile = auth.get_path('resources') / 'paxinos09names.txt'
artifact = Artifacts.PaxRat6
@classmethod
def loadData(cls):
with open(cls.source, 'rt') as f:
lines = [l.rsplit('#')[0].strip() for l in f.readlines() if not l.startswith('#')]
return [l.rsplit(' ', 1) for l in lines]
@classmethod
def processData(cls):
structRecs = []
out = {}
for structure, abrv in cls.raw:
structRecs.append((abrv, structure))
if abrv in out:
out[abrv][0].append(structure)
else:
out[abrv] = ([structure], ())
return structRecs, out
@classmethod
def validate(cls, structRecs, out):
print(Counter(_[0] for _ in structRecs).most_common()[:5])
print(Counter(_[1] for _ in structRecs).most_common()[:5])
assert len(structRecs) == len([s for sl, _ in out.values() for s in sl]), 'There are non-unique abbreviations'
errata = {}
return out, errata
class PaxSrAr(resSource):
artifact = None
@classmethod
def parseData(cls):
a, b = cls.raw.split('List of Structures')
if not a:
los, loa = b.split('List of Abbreviations')
else:
los = b
_, loa = a.split('List of Abbreviations')
sr = []
for l in los.split('\n'):
if l and not l[0] == ';':
if ';' in l:
l, *comment = l.split(';')
l = l.strip()
print(l, comment)
#asdf = l.rsplit(' ', 1)
#print(asdf)
struct, abbrev = l.rsplit(' ', 1)
sr.append((abbrev, struct))
ar = []
for l in loa.split('\n'):
if l and not l[0] == ';':
if ';' in l:
l, *comment = l.split(';')
l = l.strip()
print(l, comment)
#asdf = l.rsplit(' ', 1)
#print(asdf)
abbrev, rest = l.split(' ', 1)
parts = rest.split(' ')
#print(parts)
for i, pr in enumerate(parts[::-1]):
#print(i, pr)
z = pr[0].isdigit()
if not z or i > 0 and z and pr[-1] != ',':
break
struct = ' '.join(parts[:-i])
figs = tuple(tuple(int(_) for _ in p.split('-'))
if '-' in p
else (tuple(f'{nl[:-1]}{l}'
for nl, *ls in p.split(',')
for l in (nl[-1], *ls))
if ',' in p or p[-1].isalpha()
else int(p))
for p in (_.rstrip(',') for _ in parts[-i:]))
figs = tuple(f for f in figs if f) # zero marks abbrevs in index that are not in figures
#print(struct)
ar.append((abbrev, struct, figs))
return sr, ar
@classmethod
def processData(cls):
sr, ar = cls.parseData()
out = {}
achild = {}
for a, s, f in ar:
if ', layer 1' in s or s.endswith(' layer 1'): # DTT1 ends in ' layer 1' without a comma
achild[a[:-1]] = a
continue # remove the precomposed, we will deal with them systematically
if a not in out:
out[a] = ([s], f)
else:
if s not in out[a][0]:
print(f'Found new label from ar for {a}:\n{s}\n{out[a][0]}')
out[a][0].append(s)
schild = {}
for a, s in sr:
if ', layer 1' in s or s.endswith(' layer 1'):
schild[a[:-1]] = a
continue # remove the precomposed, we will deal with them systematically
if a not in out:
out[a] = ([s], tuple())
else:
if s not in out[a][0]:
print(f'Found new label from sr for {a}:\n{s}\n{out[a][0]}')
out[a][0].append(s)
#raise TypeError(f'Mismatched labels on {a}: {s} {out[a][0]}')
return sr, ar, out, achild, schild
@classmethod
def validate(cls, sr, ar, out, achild, schild):
def missing(a, b):
am = a - b
bm = b - a
return am, bm
sabs = set(_[0] for _ in sr)
aabs = set(_[0] for _ in ar)
ssts = set(_[1] for _ in sr)
asts = set(_[1] for _ in ar)
ar2 = set(_[:2] for _ in ar)
aam, sam = missing(aabs, sabs)
asm, ssm = missing(asts, ssts)
ar2m, sr2m = missing(ar2, set(sr))
print('OK to skip')
print(sorted(aam))
print('Need to be created')
print(sorted(sam))
print()
print(sorted(asm))
print()
print(sorted(ssm))
print()
#print(sorted(ar2m))
#print()
#print(sorted(sr2m))
#print()
assert all(s in achild for s in schild), f'somehow the kids dont match {achild} {schild}\n' + str(sorted(set(a) - set(s) | set(s) - set(a)
for a, s in ((tuple(sorted(achild.items())),
tuple(sorted(schild.items()))),)))
for k, (structs, figs) in out.items():
for struct in structs:
assert not re.match('\d+-\d+', struct) and not re.match('\d+$', struct), f'bad struct {struct} in {k}'
errata = {'nodes with layers':achild}
return out, errata
class PaxSrAr_4(PaxSrAr):
sourceFile = auth.get_path('resources') / 'pax-4th-ed-indexes.txt'
artifact = Artifacts.PaxRat4
class PaxSrAr_6(PaxSrAr):
sourceFile = auth.get_path('resources') / 'pax-6th-ed-indexes.txt'
artifact = Artifacts.PaxRat6
class PaxMSrAr_2(PaxSrAr):
sourceFile = auth.get_path('resources') / 'paxm-2nd-ed-indexes.txt'
artifact = Artifacts.PaxMouse2
class PaxMSrAr_3(PaxSrAr):
sourceFile = auth.get_path('resources') / 'paxm-3rd-ed-indexes.txt'
artifact = Artifacts.PaxMouse3
class PaxTree_6(Source):
source = '~/ni/dev/nifstd/paxinos/tree.txt'
artifact = Artifacts.PaxRat6
@classmethod
def loadData(cls):
with open(os.path.expanduser(cls.source), 'rt') as f:
return [l for l in f.read().split('\n') if l]
@classmethod
def processData(cls):
out = {}
recs = []
parent_stack = [None]
old_depth = 0
layers = {}
for l in cls.raw:
depth, abbrev, _, name = l.split(' ', 3)
depth = len(depth)
if old_depth < depth: # don't change
parent = parent_stack[-1]
parent_stack.append(abbrev)
old_depth = depth
elif old_depth == depth:
if len(parent_stack) - 1 > depth:
parent_stack.pop()
parent = parent_stack[-1]
parent_stack.append(abbrev)
elif old_depth > depth: # bump back
for _ in range(old_depth - depth + 1):
parent_stack.pop()
parent = parent_stack[-1]
parent_stack.append(abbrev)
old_depth = depth
struct = None if name == '-------' else name
o = (depth, abbrev, struct, parent)
if '-' in abbrev:
# remove the precomposed, we will deal with them systematically
maybe_parent, rest = abbrev.split('-', 1)
if rest.isdigit() or rest == '1a' or rest == '1b': # Pir1a Pir1b
if parent == 'Unknown': # XXX special cases
if maybe_parent == 'Pi': # i think this was probably caused by an ocr error from Pir3 -> Pi3
continue
assert maybe_parent == parent, f'you fall into a trap {maybe_parent} {parent}'
if parent not in layers:
layers[parent] = []
layers[parent].append((layer, o)) # FIXME where does layer come from here?
# I think this comes from the previous iteration of the loop?!
elif struct is not None and ', layer 1' in struct:
# remove the precomposed, we will deal with them systematically
parent_, layer = abbrev[:-1], abbrev[-1]
if parent_ == 'CxA' and parent == 'Amy': # XXX special cases
parent = 'CxA'
elif parent == 'Unknown':
if parent_ == 'LOT':
parent = 'LOT'
elif parent_ == 'Tu':
parent = 'Tu'
assert parent_ == parent, f'wrong turn friend {parent_} {parent}'
if parent not in layers:
layers[parent] = []
layers[parent].append((layer, o))
else:
recs.append(o)
out[abbrev] = ([struct], (), parent)
errata = {'nodes with layers':layers}
return recs, out, errata
@classmethod
def validate(cls, trecs, tr, errata):
print(Counter(_[1] for _ in trecs).most_common()[:5])
('CxA1', 2), ('Tu1', 2), ('LOT1', 2), ('ECIC3', 2)
assert len(tr) == len(trecs), 'Abbreviations in tr are not unique!'
return tr, errata
class PaxFix4(LocalSource):
artifact = Artifacts.PaxRat4
_data = ({
# 1-6b are listed in fig 19 of 4e, no 3/4, 5a, or 5b
'1':(['layer 1 of cortex'], tuple()),
'1a':(['layer 1a of cortex'], tuple()),
'1b':(['layer 1b of cortex'], tuple()),
'2':(['layer 2 of cortex'], tuple()),
'3':(['layer 3 of cortex'], tuple()),
'3/4':(['layer 3/4 of cortex'], tuple()),
'4':(['layer 4 of cortex'], tuple()),
'5':(['layer 5 of cortex'], tuple()),
'5a':(['layer 5a of cortex'], tuple()),
'5b':(['layer 5b of cortex'], tuple()),
'6':(['layer 6 of cortex'], tuple()),
'6a':(['layer 6a of cortex'], tuple()),
'6b':(['layer 6b of cortex'], tuple()),
}, {})
class PaxFix6(LocalSource):
artifact = Artifacts.PaxRat6
_data = ({
'1':(['layer 1 of cortex'], tuple()),
'1a':(['layer 1a of cortex'], (8,)),
'1b':(['layer 1b of cortex'], (8,)),
'2':(['layer 2 of cortex'], tuple()),
'3':(['layer 3 of cortex'], tuple()),
'3/4':(['layer 3/4 of cortex'], (94,)),
'4':(['layer 4 of cortex'], tuple()),
'5':(['layer 5 of cortex'], tuple()),
'5a':(['layer 5a of cortex'], (52, 94)),
'5b':(['layer 5b of cortex'], tuple()),
'6':(['layer 6 of cortex'], tuple()),
'6a':(['layer 6a of cortex'], tuple()),
'6b':(['layer 6b of cortex'], tuple()),
}, {})
class PaxFix(LocalSource):
_data = ({
'1':(['layer 1'], tuple()),
'1a':(['layer 1a'], (8,)),
'1b':(['layer 1b'], (8,)),
'2':(['layer 2'], tuple()),
'3':(['layer 3'], tuple()),
'3/4':(['layer 3/4'], (94,)),
'4':(['layer 4'], tuple()),
'5':(['layer 5'], tuple()),
'5a':(['layer 5a'], (52, 94)),
'5b':(['layer 5b'], tuple()),
'6':(['layer 6'], tuple()),
'6a':(['layer 6a'], tuple()),
'6b':(['layer 6b'], tuple()),
}, {})
class PaxMFix(LocalSource):
_data = ({}, {})
class PaxLabels(LabelsBase):
""" Base class for processing paxinos indexes. """
__pythonOnly = True
path = 'ttl/generated/parcellation/'
imports = parcCore,
_fixes = []
_dupes = {}
_merge = {}
@property
def fixes_abbrevs(self):
fixes_abbrevs = set()
for f in self._fixes:
fixes_abbrevs.add(f[0])
for dupe in self._dupes.values():
fixes_abbrevs.add(dupe.alt_abbrevs[0])
return fixes_abbrevs
@property
def fixes_prov(self):
_fixes_prov = {}
for f in self._fixes:
for l in f[1][0]:
_fixes_prov[l] = [ParcOnt.wasGeneratedBy.format(line=getSourceLine(self.__class__))] # FIXME per file
return _fixes_prov
@property
def dupes_structs(self):
ds = {'cerebellar lobules', 'cerebellar lobule'}
for dupe in self._dupes.values():
for struct in dupe.structures:
ds.add(struct)
return ds
@property
def fixes(self):
_, _, collisions, _ = self.records()
for a, (ss, f, arts) in self._fixes:
if (a, ss[0]) in collisions:
f.update(collisions[a, ss[1]]) # have to use 1 since we want "layer n" as the pref
yield a, ([], ss, f, arts)
def _prov(self, iri, abrv, struct, struct_prov, extras, alt_abbrevs, abbrev_prov):
# TODO asssert that any triple for as ap at is actually in the graph...
annotation_predicate = ilxtr.literalUsedBy
definition_predicate = ilxtr.isDefinedBy # TODO more like 'symbolization used in'
for abbrev in [abrv] + alt_abbrevs: # FIXME multiple annotations per triple...
t = iri, Label.propertyMapping['abbrevs'], abbrev
if t not in self._prov_dict:
self._prov_dict[t] = []
for s in [struct] + extras:
if (abbrev, s) in abbrev_prov:
for artifact in abbrev_prov[abbrev, s]:
if 'github' in artifact:
continue
else:
predicate = annotation_predicate
self._prov_dict[t].append((predicate, artifact))
if struct in struct_prov:
t = iri, Label.propertyMapping['label'], struct
if t not in self._prov_dict:
self._prov_dict[t] = []
for artifact in struct_prov[struct]:
if 'github' in artifact:
predicate = definition_predicate
else:
predicate = annotation_predicate
self._prov_dict[t].append((predicate, artifact))
for extra in extras:
t = iri, Label.propertyMapping['synonyms'], extra
if t not in self._prov_dict:
self._prov_dict[t] = []
for artifact in struct_prov[extra]:
if 'github' in artifact:
predicate = definition_predicate
else:
predicate = annotation_predicate
self._prov_dict[t].append((predicate, artifact))
def _makeIriLookup(self):
# FIXME need to validate that we didn't write the graph first...
g = Graph().parse(self._graph.filename, format='turtle')
ids = [s for s in g.subjects(rdf.type, owl.Class) if self.namespace in s]
index0 = Label.propertyMapping['abbrevs'],
index1 = Label.propertyMapping['label'], Label.propertyMapping['synonyms']
out = {}
for i in ids:
for p0 in index0:
for o0 in g.objects(i, p0):
for p1 in index1:
for o1 in g.objects(i, p1):
key = o0, o1
value = i
if key in out:
raise KeyError(f'Key {key} already in output!')
out[key] = value
return out
def _triples(self):
self._prov_dict = {}
combined_record, struct_prov, _, abbrev_prov = self.records()
for k, v in self.fixes_prov.items():
if k in struct_prov:
struct_prov[k].extend(v)
else:
struct_prov[k] = v
for i, (abrv, (alts, (structure, *extras), figures, artifacts)) in enumerate(
sorted(list(combined_record.items()) + list(self.fixes),
key=lambda d:natsort(d[1][1][0] if d[1][1][0] is not None else 'zzzzzzzzzzzzzzzzzzzz'))): # sort by structure not abrev
iri = self.namespace[str(i + 1)] # TODO load from existing
struct = structure if structure else 'zzzzzz'
self._prov(iri, abrv, struct, struct_prov, extras, alts, abbrev_prov)
yield from Label(labelRoot=self.root,
#ifail='i fail!', # this indeed does fail
label=struct,
altLabel=None,
synonyms=extras,
abbrevs=(abrv, *alts), # FIXME make sure to check that it is not a string
iri=iri, # FIXME error reporint if you try to put in abrv is vbad
#extra_triples = str(processed_figures), # TODO
)
processed_figures = figures # TODO these are handled in regions pass to PaxRegions
if figures:
for artifact in artifacts:
PaxRegion.addthing(iri, figures) # artifact is baked into figures
for t, pairs in self._prov_dict.items():
if pairs:
yield from annotations(pairs, *t)
def validate(self):
# check for duplicate labels
labels = list(self.graph.objects(None, rdfs.label))
assert len(labels) == len(set(labels)), f'There are classes with duplicate labels! {Counter(labels).most_common()[:5]}'
# check for unexpected duplicate abbreviations
abrevs = list(self.graph.objects(None, NIFRID.abbrev))
# remove expected numeric/layer/lobule duplicates
filt = [a for a in abrevs if not a.isdigit() and a.value not in ('6a', '6b')]
assert len(filt) == len(set(filt)), f'DUPES! {Counter(filt).most_common()[:5]}'
# check for abbreviations without corresponding structure ie 'zzzzzz'
syns = list(self.graph.objects(None, NIFRID.synonym))
for thing in labels + syns:
trips = [(s, o) for s in self.graph.subjects(None, thing) for p, o in self.graph.predicate_objects(s)]
assert 'zzzzzz' not in thing, f'{trips} has bad label/syn suggesting a problem with the source file'
return self
def records(self):
combined_record = {}
struct_prov = {}
collisions = {}
abbrev_prov = {}
merge = {**self._merge, **{v:k for k, v in self._merge.items()}}
fa = self.fixes_abbrevs
ds = self.dupes_structs
def do_struct_prov(structure, source=None, artiri=None):
if artiri is None:
artiri = source.artifact.iri
if structure not in struct_prov:
struct_prov[structure] = [artiri]
elif artiri not in struct_prov[structure]:
struct_prov[structure].append(artiri)
def do_abbrev_prov(abbrev, primary_struct, source=None, artiri=None, overwrite=False):
if artiri is None:
artiri = source.artifact.iri
if overwrite:
abbrev_prov[abbrev, primary_struct] = artiri if isinstance(artiri, list) else [artiri]
else:
if (abbrev, primary_struct) not in abbrev_prov:
abbrev_prov[abbrev, primary_struct] = [artiri]
elif artiri not in abbrev_prov[abbrev, primary_struct]:
abbrev_prov[abbrev, primary_struct].append(artiri) # include all the prov we can
for se in self.sources:
source, errata = se
for t in se.isVersionOf:
self.addTrip(*t)
for a, (ss, f, *_) in source.items(): # *_ eat the tree for now
# TODO deal with overlapping layer names here
if a in fa: # XXX this is now just for dupes...
if ss[0] in ds:
print('TODO', a, ss, f)
collisions[a, ss[0]] = {se.artifact.iri:f}
continue # skip the entries that we create manually TODO
do_abbrev_prov(a, ss[0], se)
for s in ss:
do_struct_prov(s, se)
if a in combined_record:
_, structures, figures, artifacts = combined_record[a]
if f:
assert (se.artifact.iri not in figures or
figures[se.artifact.iri] == f), f'>1 figures {a} {figures} {bool(f)}'
figures[se.artifact.iri] = f
for s in ss:
if s is not None and s not in structures:
structures.append(s)
if se.artifact.iri not in artifacts:
artifacts.append(se.artifact.iri)
elif a in merge and merge[a] in combined_record:
alt_abbrevs, structures, figures, artifacts = combined_record[merge[a]]
for struct in structures: # allow merge of terms with non exact matching but warn
if struct not in ss:
if ss: log.warning(f'adding structure {struct} in merge of {a}')
ss.append(struct)
for aa in alt_abbrevs:
do_abbrev_prov(aa, ss[0], se)
alt_abbrevs.append(a)
figures[se.artifact.iri] = f
if se.artifact.iri not in artifacts:
artifacts.append(se.artifact.iri)
else:
ss = [s for s in ss if s is not None]
alt_abbrevs = self._dupes[a].alt_abbrevs if a in self._dupes else []
for aa in alt_abbrevs:
for artiri in self._dupes[a].artiris: # TODO check if matches current source art iri?
do_abbrev_prov(aa, ss[0], artiri=artiri)
if ss: # skip terms without structures
combined_record[a] = alt_abbrevs, ss, {se.artifact.iri:f}, [se.artifact.iri]
if alt_abbrevs: # TODO will need this for some abbrevs too...
artiris = self._dupes[a].artiris
for s in self._dupes[a].structures:
if s not in ss:
ss.append(s)
for artiri in artiris:
artifacts = combined_record[a][-1]
if artiri not in artifacts:
artifacts.append(artiri)
do_struct_prov(s, artiri=artiri)
#abbrev_prov[a, ss[0]] = [se.artifact.iri] # FIXME overwritten?
do_abbrev_prov(a, ss[0], se)
for alt in alt_abbrevs:
if alt not in abbrev_prov:
for artiri in artiris:
do_abbrev_prov(alt, ss[0], artiri=artiri)
# TODO elif...
return combined_record, struct_prov, collisions, abbrev_prov
class PaxMouseLabels(PaxLabels):
""" Compilation of all labels used to name mouse brain regions
in atlases created using Paxinos and Franklin\'s methodology."""
# TODO FIXME align indexes where possible to paxrat???
filename = 'paxinos-mus-labels'
name = 'Paxinos & Franklin Mouse Parcellation Labels'
shortname = 'paxmus'
namespace = PAXMUS
prefixes = {**makePrefixes('NIFRID', 'ilxtr', 'prov', 'dcterms'),
'PAXMUS':str(PAXMUS),
'paxmusver':str(paxmusver),
}
sources = PaxMFix, PaxMSrAr_2, PaxMSrAr_3
root = LabelRoot(iri=nsExact(namespace), # PAXMUS['0'],
label='Paxinos mouse parcellation label root',
shortname=shortname,
definingArtifactsS=(Artifacts.PaxMouseAt.iri,),
)
_merge = {
'4/5Cb':'4&5Cb',
'5N':'Mo5',
'12N':'12',
'AngT':'Ang',
'ANS':'Acc',
'ASt':'AStr',
'hif':'hf',
'MnM':'MMn',
'MoDG':'Mol',
'och':'ox',
'PHA':'PH', # FIXME PH is reused in 3rd
'ST':'BST',
'STIA':'BSTIA',
'STLD':'BSTLD',
'STLI':'BSTLI',
'STLJ':'BSTLJ',
'STLP':'BSTLP',
'STLV':'BSTLV',
'STMA':'BSTMA',
'STMP':'BSTMP',
'STMPI':'BSTMPI',
'STMPL':'BSTMPL',
'STMPM':'BSTMPM',
'STMV':'BSTMV',
'STS':'BSTS',
}
class PaxRatLabels(PaxLabels):
""" Compilation of all labels used to name rat brain regions
in atlases created using Paxinos and Watson\'s methodology."""
filename = 'paxinos-rat-labels'
name = 'Paxinos & Watson Rat Parcellation Labels'
shortname = 'paxrat'
namespace = PAXRAT
prefixes = {**makePrefixes('NIFRID', 'ilxtr', 'prov', 'dcterms'),
'PAXRAT':str(PAXRAT),
'paxratver':str(paxratver),
}
# sources need to go in the order with which we want the labels to take precedence (ie in this case 6e > 4e)
sources = PaxFix, PaxSrAr_6, PaxSr_6, PaxSrAr_4, PaxFix6, PaxFix4 #, PaxTree_6() # tree has been successfully used for crossreferencing, additional terms need to be left out at the moment (see in_tree_not_in_six)
root = LabelRoot(iri=nsExact(namespace), # PAXRAT['0'],
label='Paxinos rat parcellation label root',
shortname=shortname,
#definingArtifactsS=None,#Artifacts.PaxRatAt.iri,
definingArtifactsS=(Artifacts.PaxRatAt.iri,),
)
_fixes = []
_dupes = {
# for 4e the numbers in the index are to the cranial nerve nuclei entries
'3N':DupeRecord(alt_abbrevs=['3'], structures=['oculomotor nucleus'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'4N':DupeRecord(alt_abbrevs=['4'], structures=['trochlear nucleus'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'6N':DupeRecord(alt_abbrevs=['6'], structures=['abducens nucleus'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'7N':DupeRecord(alt_abbrevs=['7'], structures=['facial nucleus'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'10N':DupeRecord(alt_abbrevs=['10'], structures=['dorsal motor nucleus of vagus'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
# FIXME need comments about the index entries
'1Cb':DupeRecord(alt_abbrevs=['1'], structures=['cerebellar lobule 1'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'2Cb':DupeRecord(alt_abbrevs=['2'], structures=['cerebellar lobule 2'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'2/3Cb':DupeRecord(alt_abbrevs=['2&3'], structures=['cerebellar lobules 2&3'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'3Cb':DupeRecord(alt_abbrevs=['3'], structures=['cerebellar lobule 3'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'4Cb':DupeRecord(alt_abbrevs=['4'], structures=['cerebellar lobule 4'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'4/5Cb':DupeRecord(alt_abbrevs=['4&5'], structures=['cerebellar lobules 4&5'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'5Cb':DupeRecord(alt_abbrevs=['5'], structures=['cerebellar lobule 5'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'6Cb':DupeRecord(alt_abbrevs=['6'], structures=['cerebellar lobule 6'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'6aCb':DupeRecord(alt_abbrevs=['6a'], structures=['cerebellar lobule 6a'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'6bCb':DupeRecord(alt_abbrevs=['6b'], structures=['cerebellar lobule 6b'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'6cCb':DupeRecord(alt_abbrevs=['6c'], structures=['cerebellar lobule 6c'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'7Cb':DupeRecord(alt_abbrevs=['7'], structures=['cerebellar lobule 7'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'8Cb':DupeRecord(alt_abbrevs=['8'], structures=['cerebellar lobule 8'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'9Cb':DupeRecord(alt_abbrevs=['9'], structures=['cerebellar lobule 9'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
'10Cb':DupeRecord(alt_abbrevs=['10'], structures=['cerebellar lobule 10'], figures={}, artiris=[Artifacts.PaxRat4.iri]),
}
_merge = { # abbrevs that have identical structure names
'5N':'Mo5',
'12N':'12',
'ANS':'Acc',
'ASt':'AStr',
'AngT':'Ang',
'MnM':'MMn',
'MoDG':'Mol',
'PDPO':'PDP',
'PTg':'PPTg',
'STIA':'BSTIA',
'STL':'BSTL',
'STLD':'BSTLD',
'STLI':'BSTLI',
'STLJ':'BSTLJ',
'STLP':'BSTLP',
'STLV':'BSTLV',
'STM':'BSTM',
'STMA':'BSTMA',
'STMP':'BSTMP',
'STMPI':'BSTMPI',
'STMPL':'BSTMPL',
'STMPM':'BSTMPM',
'STMV':'BSTMV',
'hif':'hf',
'och':'ox',
}
def curate(self):
fr, err4 = PaxSrAr_4()
sx, err6 = PaxSrAr_6()
sx2, _ = PaxSr_6()
tr, err6t = PaxTree_6()
sfr = set(fr)
ssx = set(sx)
ssx2 = set(sx2)
str_ = set(tr)
in_four_not_in_six = sfr - ssx
in_six_not_in_four = ssx - sfr
in_tree_not_in_six = str_ - ssx
in_six_not_in_tree = ssx - str_
in_six2_not_in_six = ssx2 - ssx
in_six_not_in_six2 = ssx - ssx2
print(len(in_four_not_in_six), len(in_six_not_in_four),
len(in_tree_not_in_six), len(in_six_not_in_tree),
len(in_six2_not_in_six), len(in_six_not_in_six2),
)
tr_struct_abrv = {}
for abrv, ((struct, *extra), _, parent) in tr.items():
tr_struct_abrv[struct] = abrv
if abrv in sx:
#print(abrv, struct, parent)
if struct and struct not in sx[abrv][0]:
print(f'Found new label from tr for {abrv}:\n{struct}\n{sx[abrv][0]}\n')
# can't run these for tr yet
#reduced = set(tr_struct_abrv.values())
#print(sorted(_ for _ in tr if _ not in reduced))
#assert len(tr_struct_abrv) == len(tr), 'mapping between abrvs and structs is not 1:1 for tr'
sx2_struct_abrv = {}
for abrv, ((struct, *extra), _) in sx2.items():
sx2_struct_abrv[struct] = abrv
if abrv in sx:
if struct and struct not in sx[abrv][0]:
print(f'Found new label from sx2 for {abrv}:\n{struct}\n{sx[abrv][0]}\n')
reduced = set(sx2_struct_abrv.values())
print(sorted(_ for _ in reduced if _ not in sx2)) # ah inconsistent scoping rules in class defs...
assert len(sx2_struct_abrv) == len(sx2), 'there is a duplicate struct'
sx_struct_abrv = {}
for abrv, ((struct, *extra), _) in sx.items():
sx_struct_abrv[struct] = abrv
reduced = set(sx_struct_abrv.values())
print(sorted(_ for _ in reduced if _ not in sx))
assert len(sx_struct_abrv) == len(sx), 'there is a duplicate struct'
# TODO test whether any of the tree members that were are going to exclude have children that we are going to include
names_match_not_abbervs = {}
tree_no_name = {_:tr[_] for _ in sorted(in_tree_not_in_six) if not tr[_][0][0]}
tree_with_name = {_:tr[_] for _ in sorted(in_tree_not_in_six) if tr[_][0][0]}
not_in_tree_with_figures = {_:sx[_] for _ in sorted(in_six_not_in_tree) if sx[_][-1]}
a = f'{"abv":<25} | {"structure name":<60} | parent abv\n' + '\n'.join(f'{k:<25} | {v[0][0]:<60} | {v[-1]}' for k, v in tree_with_name.items())
b = f'{"abv":<25} | {"structure name":<15} | parent abv\n' + '\n'.join(f'{k:<25} | {"":<15} | {v[-1]}' for k, v in tree_no_name.items())
c = f'abv | {"structure name":<60} | figures (figure ranges are tuples)\n' + '\n'.join(f'{k:<6} | {v[0][0]:<60} | {v[-1]}' for k, v in not_in_tree_with_figures.items())
with open(os.path.expanduser('~/ni/dev/nifstd/paxinos/tree-with-name.txt'), 'wt') as f: f.write(a)
with open(os.path.expanduser('~/ni/dev/nifstd/paxinos/tree-no-name.txt'), 'wt') as f: f.write(b)
with open(os.path.expanduser('~/ni/dev/nifstd/paxinos/not-in-tree-with-figures.txt'), 'wt') as f: f.write(c)
#match_name_not_abrev = set(v[0][0] for v in tree_with_name.values()) & set(v[0][0] for v in sx.values())
_match_name_not_abrev = {}
for a, (alts, (s, *extra), f, *_) in PaxRatLabels().records()[0].items():
if s not in _match_name_not_abrev:
_match_name_not_abrev[s] = [a]
elif a not in _match_name_not_abrev[s]:
_match_name_not_abrev[s].append(a)
match_name_not_abrev = {k:v for k, v in _match_name_not_abrev.items() if len(v) > 1}
abrv_match_not_name = {k:v[0] for k, v in PaxRatLabels().records()[0].items() if len(v[0]) > 1}
_ = [print(k, *v[0]) for k, v in PaxRatLabels().records()[0].items() if len(v[0]) > 1]
breakpoint()
#self.in_tree_not_in_six = in_tree_not_in_six # need for skipping things that were not actually named by paxinos
class PaxRecord:
# TODO collisions
def __init__(self, source, abbreviation, structure, artifacts,
figures=tuple(),
synonyms=tuple(),
altAbbrevs=tuple()):
self.source = source
self.abbreviation = abbreviation
self.structure = structure
self.artifacts = artifacts
def __iter__(self):
pass
def __hash__(self):
return hash(self.abbreviation)
class PaxRegion(RegionsBase):
__pythonOnly = True # TODO
path = 'ttl/generated/parcellation/'
filename = 'paxinos-rat-regions'
name = 'Paxinos & Watson Rat Parcellation Regions'
shortname = 'paxratr'
comment = ('Intersection between labels and atlases for all regions '
'delineated using Paxinos and Watson\'s methodology.')
prefixes = {**makePrefixes('NIFRID', 'ilxtr', 'prov', 'ILXREPLACE')}
# sources need to go in the order with which we want the labels to take precedence (ie in this case 6e > 4e)
#sources = PaxSrAr_6(), PaxSr_6(), PaxSrAr_4(), PaxTree_6() # tree has been successfully used for crossreferencing, additional terms need to be left out at the moment (see in_tree_not_in_six)
root = RegionRoot(iri=TEMP['FIXME'], # FIXME these should probably be EquivalentTo Parcellation Region HasLabel some label HasAtlas some atlas...
label='Paxinos rat parcellation region root',
shortname=shortname,
)
# atlas version
# label identifier
# figures
things = {}
@classmethod
def addthing(cls, thing, value):
cls.things[thing] = value
|
tgbugs/pyontutils
|
nifstd/nifstd_tools/parcellation/paxinos.py
|
Python
|
mit
| 40,653
|
class WallsGate(object):
def dfs(self, rooms):
queue = [(i, j, 0) for i, rows in enumerate(rooms) for j, v in enumerate(rows) if not v]
while queue:
i, j, step = queue.pop()
if rooms[i][j] > step:
rooms[i][j] = step
for newi, newj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
if 0 <= newi < len(rooms) and 0 <= newj < len(rooms[0]) and step < rooms[newi][newj]:
queue.append((newi, newj, step + 1))
def bfs(self, rooms):
row=len(rooms)
col=len(rooms[0])
queue=[]
for i in xrange(row):
for j in xrange(col):
if rooms[i][j]==0:
queue.append(i*col+j)
while queue:
x=queue.pop(0)
i,j=x/col,x%col
for newi,newj in (i+1,j),(i-1,j),(i,j+1),(i,j-1):
if 0 <= newi < len(rooms) and 0 <= newj < len(rooms[0]) and rooms[newi][newj]==INF:
rooms[newi][newj]=rooms[i][j]+1
queue.append(newi*col+newj)
def naivedfs(self, rooms):
for i in xrange(len(rooms)):
for j in xrange(len(rooms[0])):
if rooms[i][j]==0:
self._dfsrev(rooms,i,j)
def _dfsrev(self,rooms,i,j):
for newi,newj in (i+1,j),(i-1,j),(i,j+1),(i,j-1):
if 0 <= newi < len(rooms) and 0 <= newj < len(rooms[0]) and rooms[newi][newj]<rooms[i][j]:
rooms[newi][newj]=rooms[i][j]+1
self._dfsrev(rooms,newi,newi)
|
Tanych/CodeTracking
|
distance.py
|
Python
|
mit
| 1,572
|
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
_unset_related_many_to_many(obj, field)
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for obj in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
"""Return ALL objects.
"""
return self._get_base_queryset()
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def delete(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
|
pmuller/django-softdeletion
|
django_softdeletion/models.py
|
Python
|
mit
| 5,119
|
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
class NonAnonChat(LineReceiver):
def __init__(self, protocols):
self.users = {'john':'john', 'adastra':'adastra'}
self.userName = None
self.userLogged = False
self.protocols = protocols
def connectionMade(self):
self.sendLine('Your Username: ')
def connectionLost(self, reason):
for protocol in self.protocols:
if protocol != self:
protocol.sendLine('Connection Lost: %s '%(reason))
def lineReceived(self, line):
if self.userName == None:
if self.users.has_key(line):
self.userName = line
self.sendLine('Password: ')
else:
self.sendLine('Wrong Username')
elif self.userLogged == False:
if self.users[self.userName] == line:
self.userLogged = True
self.protocols.append(self)
else:
self.sendLine('Wrong Password')
elif self.userLogged == True:
for protocol in self.protocols:
if protocol != self:
protocol.sendLine('%s Said: %s ' %(self.userName, line))
class NonAnonFactory(Factory):
def __init__(self):
self.protocols = []
def buildProtocol(self, addr):
return NonAnonChat(self.protocols)
reactor.listenTCP(8000, NonAnonFactory())
reactor.run()
|
coolhacks/python-hacks
|
examples/pyHacks/TwistedChat.py
|
Python
|
mit
| 1,269
|
# test_sampleParser.py
import os
from ..sampleParser import SampleParser
class TestSampleParser:
def setup(self):
self.folderName = os.path.join('.', 'tests', 'Export')
self.parser = SampleParser(self.folderName)
def test_getDirectoryFiles(self):
files = self._obtainDirectory()
assert len(files) > 0
def test_storeFileNamesByPatternInDictionary(self):
files = self._obtainDirectory()
assert len(files) > 0
for _file in files:
self.parser.storeFileNamesByPatternInDictionary(_file)
sampleDictionary = self.parser.getSampleDictionary()
assert len(sampleDictionary) == 4
print ("SampleParser: ", sampleDictionary)
# each item in the dictionary should have two samples for each sample type
for sample in sampleDictionary.items():
assert len(sample) == 2
def test_readFileIntoArray(self):
files = self._obtainDirectory()
assert len(files) > 0
assert len(self.parser.readFileIntoArray(files[0])) > 0
def _obtainDirectory(self):
return self.parser.getDirectoryFiles()
|
jmeline/wifi_signal_analysis
|
src/tests/test_sampleParser.py
|
Python
|
mit
| 1,146
|
# PyAutoGUI: Cross-platform GUI automation for human beings.
# BSD license
# Al Sweigart al@inventwithpython.com (Send me feedback & suggestions!)
"""
IMPORTANT NOTE!
To use this module on Mac OS X, you need the PyObjC module installed.
For Python 3, run:
sudo pip3 install pyobjc-core
sudo pip3 install pyobjc
For Python 2, run:
sudo pip install pyobjc-core
sudo pip install pyobjc
(There's some bug with their installer, so install pyobjc-core first or else
the install takes forever.)
To use this module on Linux, you need Xlib module installed.
For Python 3, run:
sudo pip3 install python3-Xlib
For Python 2, run:
sudo pip install Xlib
To use this module on Windows, you do not need anything else.
You will need PIL/Pillow to use the screenshot features.
"""
from __future__ import absolute_import, division, print_function
__version__ = '0.9.33'
import collections
import sys
import time
KEY_NAMES = ['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(',
')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`',
'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~',
'accept', 'add', 'alt', 'altleft', 'altright', 'apps', 'back' 'backspace',
'browserback', 'browserfavorites', 'browserforward', 'browserhome',
'browserrefresh', 'browsersearch', 'browserstop', 'capslock', 'clear',
'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete',
'divide', 'down', 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f10',
'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f2', 'f20',
'f21', 'f22', 'f23', 'f24', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja',
'kana', 'kanji', 'launchapp1', 'launchapp2', 'launchmail',
'launchmediaselect', 'left', 'modechange', 'multiply', 'nexttrack',
'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6',
'num7', 'num8', 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn',
'pgup', 'playpause', 'prevtrack', 'print', 'printscreen', 'prntscrn',
'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',
'shift', 'shiftleft', 'shiftright', 'sleep', 'stop', 'subtract', 'tab',
'up', 'volumedown', 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen',
'command', 'option', 'optionleft', 'optionright']
KEYBOARD_KEYS = KEY_NAMES # keeping old KEYBOARD_KEYS for backwards compatibility
def isShiftCharacter(character):
"""Returns True if the key character is uppercase or shifted."""
return character.isupper() or character in '~!@#$%^&*()_+{}|:"<>?'
# The platformModule is where we reference the platform-specific functions.
if sys.platform.startswith('java'):
#from . import _pyautogui_java as platformModule
raise NotImplementedError('Jython is not yet supported by PyAutoGUI.')
elif sys.platform == 'darwin':
from . import _pyautogui_osx as platformModule
elif sys.platform == 'win32':
from . import _pyautogui_win as platformModule
else:
from . import _pyautogui_x11 as platformModule
# TODO: Having module-wide user-writable global variables is bad. It makes
# restructuring the code very difficult. For instance, what if we decide to
# move the mouse-related functions to a separate file (a submodule)? How that
# file will access this module vars? It will probably lead to a circular
# import.
# In seconds. Any duration less than this is rounded to 0.0 to instantly move
# the mouse.
MINIMUM_DURATION = 0.1
# If sleep_amount is too short, time.sleep() will be a no-op and the mouse
# cursor moves there instantly.
# TODO: This value should vary with the platform. http://stackoverflow.com/q/1133857
MINIMUM_SLEEP = 0.05
PAUSE = 0.1 # The number of seconds to pause after EVERY public function call. Useful for debugging.
FAILSAFE = True
# General Functions
# =================
def getPointOnLine(x1, y1, x2, y2, n):
"""Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module.
"""
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
return (x, y)
def linear(n):
"""Trivial linear tweening function.
Copied from pytweening module.
"""
if not 0.0 <= n <= 1.0:
raise ValueError('Argument must be between 0.0 and 1.0.')
return n
def _autoPause(pause, _pause):
if _pause:
if pause is not None:
time.sleep(pause)
elif PAUSE != 0:
time.sleep(PAUSE)
def _unpackXY(x, y):
"""If x is a sequence and y is None, returns x[0], y[0]. Else, returns x, y.
On functions that receive a pair of x,y coordinates, they can be passed as
separate arguments, or as a single two-element sequence.
"""
if isinstance(x, collections.Sequence):
if len(x) == 2:
if y is None:
x, y = x
else:
raise ValueError('When passing a sequence at the x argument, the y argument must not be passed (received {0}).'.format(repr(y)))
else:
raise ValueError('The supplied sequence must have exactly 2 elements ({0} were received).'.format(len(x)))
else:
pass
return x, y
def position(x=None, y=None):
"""Returns the current xy coordinates of the mouse cursor as a two-integer
tuple.
Args:
x (int, None, optional) - If not None, this argument overrides the x in
the return value.
y (int, None, optional) - If not None, this argument overrides the y in
the return value.
Returns:
(x, y) tuple of the current xy coordinates of the mouse cursor.
"""
posx, posy = platformModule._position()
posx = int(posx)
posy = int(posy)
if x is not None:
posx = int(x)
if y is not None:
posy = int(y)
return posx, posy
def size():
"""Returns the width and height of the screen as a two-integer tuple.
Returns:
(width, height) tuple of the screen size, in pixels.
"""
return platformModule._size()
def onScreen(x, y=None):
"""Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False.
"""
x, y = _unpackXY(x, y)
x = int(x)
y = int(y)
width, height = platformModule._size()
return 0 <= x < width and 0 <= y < height
# Mouse Functions
# ===============
def mouseDown(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs pressing a mouse button down (but not up).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse down happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
mouse down happens. None by default.
button (str, int, optional): The mouse button pressed down. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button)
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position() # TODO - this isn't right. We need to check the params.
if button == 1 or str(button).lower() == 'left':
platformModule._mouseDown(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._mouseDown(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._mouseDown(x, y, 'right')
_autoPause(pause, _pause)
def mouseUp(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs releasing a mouse button up (but not down beforehand).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse up happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
mouse up happens. None by default.
button (str, int, optional): The mouse button released. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button)
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position()
if button == 1 or str(button).lower() == 'left':
platformModule._mouseUp(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._mouseUp(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._mouseUp(x, y, 'right')
_autoPause(pause, _pause)
def click(x=None, y=None, clicks=1, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs pressing a mouse button down and then immediately releasing it.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where
the click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
clicks (int, optional): The number of clicks to perform. 1 by default.
For example, passing 2 would do a doubleclick.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3)")
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position()
for i in range(clicks):
_failSafeCheck()
if button == 1 or str(button).lower() == 'left':
platformModule._click(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._click(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._click(x, y, 'right')
else:
# These mouse buttons for hor. and vert. scrolling only apply to x11:
platformModule._click(x, y, button)
time.sleep(interval)
_autoPause(pause, _pause)
def rightClick(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a right mouse button click.
This is a wrapper function for click('right', x, y).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
click(x, y, 1, 0.0, 'right', _pause=False)
_autoPause(pause, _pause)
def middleClick(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a middle mouse button click.
This is a wrapper function for click('right', x, y).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
click(x, y, 1, 0.0, 'middle', _pause=False)
_autoPause(pause, _pause)
def doubleClick(x=None, y=None, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a double click.
This is a wrapper function for click('left', x, y, 2, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
"""
_failSafeCheck()
click(x, y, 2, interval, button, _pause=False)
_autoPause(pause, _pause)
def tripleClick(x=None, y=None, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a triple click..
This is a wrapper function for click('left', x, y, 3, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
"""
_failSafeCheck()
click(x, y, 3, interval, button, _pause=False)
_autoPause(pause, _pause)
def scroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs a scroll of the mouse scroll wheel.
Whether this is a vertical or horizontal scroll depends on the underlying
operating system.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._scroll(clicks, x, y)
_autoPause(pause, _pause)
def hscroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs an explicitly horizontal scroll of the mouse scroll wheel,
if this is supported by the operating system. (Currently just Linux.)
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._hscroll(clicks, x, y)
_autoPause(pause, _pause)
def vscroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs an explicitly vertical scroll of the mouse scroll wheel,
if this is supported by the operating system. (Currently just Linux.)
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._vscroll(clicks, x, y)
_autoPause(pause, _pause)
def moveTo(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Moves the mouse cursor to a point on the screen.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
Returns:
None
"""
x, y = _unpackXY(x, y)
_failSafeCheck()
_mouseMoveDrag('move', x, y, 0, 0, duration, tween)
_autoPause(pause, _pause)
def moveRel(xOffset=None, yOffset=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Moves the mouse cursor to a point on the screen, relative to its current
position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
Returns:
None
"""
xOffset, yOffset = _unpackXY(xOffset, yOffset)
_failSafeCheck()
_mouseMoveDrag('move', None, None, xOffset, yOffset, duration, tween)
_autoPause(pause, _pause)
def dragTo(x=None, y=None, duration=0.0, tween=linear, button='left', pause=None, _pause=True):
"""Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
mouseDown(button=button, _pause=False)
_mouseMoveDrag('drag', x, y, 0, 0, duration, tween, button)
mouseUp(button=button, _pause=False)
_autoPause(pause, _pause)
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True):
"""Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen, relative to its current position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
if xOffset is None:
xOffset = 0
if yOffset is None:
yOffset = 0
if type(xOffset) in (tuple, list):
xOffset, yOffset = xOffset[0], xOffset[1]
if xOffset == 0 and yOffset == 0:
return # no-op case
_failSafeCheck()
mousex, mousey = platformModule._position()
mouseDown(button=button, _pause=False)
_mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button)
mouseUp(button=button, _pause=False)
_autoPause(pause, _pause)
def _mouseMoveDrag(moveOrDrag, x, y, xOffset, yOffset, duration, tween, button=None):
"""Handles the actual move or drag event, since different platforms
implement them differently.
On Windows & Linux, a drag is a normal mouse move while a mouse button is
held down. On OS X, a distinct "drag" event must be used instead.
The code for moving and dragging the mouse is similar, so this function
handles both. Users should call the moveTo() or dragTo() functions instead
of calling _mouseMoveDrag().
Args:
moveOrDrag (str): Either 'move' or 'drag', for the type of action this is.
x (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
xOffset (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
yOffset (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
# The move and drag code is similar, but OS X requires a special drag event instead of just a move event when dragging.
# See https://stackoverflow.com/a/2696107/1893164
assert moveOrDrag in ('move', 'drag'), "moveOrDrag must be in ('move', 'drag'), not %s" % (moveOrDrag)
if sys.platform != 'darwin':
moveOrDrag = 'move' # Only OS X needs the drag event specifically.
xOffset = int(xOffset) if xOffset is not None else 0
yOffset = int(yOffset) if yOffset is not None else 0
if x is None and y is None and xOffset == 0 and yOffset == 0:
return # Special case for no mouse movement at all.
startx, starty = position()
x = int(x) if x is not None else startx
y = int(y) if y is not None else starty
# x, y, xOffset, yOffset are now int.
x += xOffset
y += yOffset
width, height = size()
# Make sure x and y are within the screen bounds.
x = max(0, min(x, width - 1))
y = max(0, min(y, height - 1))
# If the duration is small enough, just move the cursor there instantly.
steps = [(x, y)]
if duration > MINIMUM_DURATION:
# Non-instant moving/dragging involves tweening:
num_steps = max(width, height)
sleep_amount = duration / num_steps
if sleep_amount < MINIMUM_SLEEP:
num_steps = int(duration / MINIMUM_SLEEP)
sleep_amount = duration / num_steps
steps = [
getPointOnLine(startx, starty, x, y, tween(n / num_steps))
for n in range(num_steps)
]
# Making sure the last position is the actual destination.
steps.append((x, y))
for tweenX, tweenY in steps:
if len(steps) > 1:
# A single step does not require tweening.
time.sleep(sleep_amount)
_failSafeCheck()
tweenX = int(round(tweenX))
tweenY = int(round(tweenY))
if moveOrDrag == 'move':
platformModule._moveTo(tweenX, tweenY)
elif moveOrDrag == 'drag':
platformModule._dragTo(tweenX, tweenY, button)
else:
raise NotImplementedError('Unknown value of moveOrDrag: {0}'.format(moveOrDrag))
_failSafeCheck()
# Keyboard Functions
# ==================
def isValidKey(key):
"""Returns a Boolean value if the given key is a valid value to pass to
PyAutoGUI's keyboard-related functions for the current platform.
This function is here because passing an invalid value to the PyAutoGUI
keyboard functions currently is a no-op that does not raise an exception.
Some keys are only valid on some platforms. For example, while 'esc' is
valid for the Escape key on all platforms, 'browserback' is only used on
Windows operating systems.
Args:
key (str): The key value.
Returns:
bool: True if key is a valid value, False if not.
"""
return platformModule.keyboardMapping.get(key, None) != None
def keyDown(key, pause=None, _pause=True):
"""Performs a keyboard key press without the release. This will put that
key in a held down state.
NOTE: For some reason, this does not seem to cause key repeats like would
happen if a keyboard key was held down on a text field.
Args:
key (str): The key to be pressed down. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyDown(key)
_autoPause(pause, _pause)
def keyUp(key, pause=None, _pause=True):
"""Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyUp(key)
_autoPause(pause, _pause)
def press(keys, presses=1, interval=0.0, pause=None, _pause=True):
"""Performs a keyboard key press down, followed by a release.
Args:
key (str, list): The key to be pressed. The valid names are listed in
KEYBOARD_KEYS. Can also be a list of such strings.
presses (integer, optiional): the number of press repetition
1 by default, for just one press
interval (float, optional): How many seconds between each press.
0.0 by default, for no pause between presses.
pause (float, optional): How many seconds in the end of function process.
None by default, for no pause in the end of function process.
Returns:
None
"""
if type(keys) == str:
keys = [keys] # put string in a list
else:
lowerKeys = []
for s in keys:
if len(s) > 1:
lowerKeys.append(s.lower())
else:
lowerKeys.append(s)
interval = float(interval)
for i in range(presses):
for k in keys:
_failSafeCheck()
platformModule._keyDown(k)
platformModule._keyUp(k)
time.sleep(interval)
_autoPause(pause, _pause)
def typewrite(message, interval=0.0, pause=None, _pause=True):
"""Performs a keyboard key press down, followed by a release, for each of
the characters in message.
The message argument can also be list of strings, in which case any valid
keyboard name can be used.
Since this performs a sequence of keyboard presses and does not hold down
keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()
function for that.
Args:
message (str, list): If a string, then the characters to be pressed. If a
list, then the key names of the keys to press in order. The valid names
are listed in KEYBOARD_KEYS.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(interval)
_failSafeCheck()
for c in message:
if len(c) > 1:
c = c.lower()
press(c, _pause=False)
time.sleep(interval)
_failSafeCheck()
_autoPause(pause, _pause)
def hotkey(*args, **kwargs):
"""Performs key down presses on the arguments passed in order, then performs
key releases in reverse order.
The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a
"Ctrl-Shift-C" hotkey/keyboard shortcut press.
Args:
key(s) (str): The series of keys to press, in order. This can also be a
list of key strings to press.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(kwargs.get('interval', 0.0))
_failSafeCheck()
for c in args:
if len(c) > 1:
c = c.lower()
platformModule._keyDown(c)
time.sleep(interval)
for c in reversed(args):
if len(c) > 1:
c = c.lower()
platformModule._keyUp(c)
time.sleep(interval)
_autoPause(kwargs.get('pause', None), kwargs.get('_pause', True))
class FailSafeException(Exception):
pass
def _failSafeCheck():
if FAILSAFE and position() == (0, 0):
raise FailSafeException('PyAutoGUI fail-safe triggered from mouse moving to upper-left corner. To disable this fail-safe, set pyautogui.FAILSAFE to False.')
def displayMousePosition(xOffset=0, yOffset=0):
"""This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor."""
print('Press Ctrl-C to quit.')
if xOffset != 0 or yOffset != 0:
print('xOffset: %s yOffset: %s' % (xOffset, yOffset))
resolution = size()
try:
while True:
# Get and print the mouse coordinates.
x, y = position()
positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4)
if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= resolution[1]:
pixelColor = ('NaN', 'NaN', 'NaN')
else:
pixelColor = pyscreeze.screenshot().getpixel((x, y))
positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3)
positionStr += ', ' + str(pixelColor[1]).rjust(3)
positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')'
sys.stdout.write(positionStr)
sys.stdout.write('\b' * len(positionStr))
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.flush()
|
osspeak/osspeak
|
osspeak/pyautogui/__init__.py
|
Python
|
mit
| 36,647
|
# -*- coding: utf-8 -*-
'''
watdo.tests
~~~~~~~~~~~
This module contains tests for watdo.
This particular file contains tools for testing.
:copyright: (c) 2013 Markus Unterwaditzer
:license: MIT, see LICENSE for more details.
'''
|
untitaker/watdo
|
tests/__init__.py
|
Python
|
mit
| 256
|
from django.conf.urls import include, url
from ginger.views import utils
__all__ = ('include', 'url', 'scan', 'scan_to_include')
def scan(module, predicate=None):
view_classes = utils.find_views(module, predicate=predicate)
urls = []
for view in view_classes:
if hasattr(view, 'as_urls'):
urls.extend(view.as_urls())
else:
urls.append(view.as_url())
pattern = urls
return pattern
def scan_to_include(module, predicate=None, app_name=None, namespace=None):
return scan(module, predicate), app_name, namespace
|
vivsh/django-ginger
|
ginger/conf/urls.py
|
Python
|
mit
| 580
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('agency', '0013_auto_20150726_0001'),
]
operations = [
migrations.AlterField(
model_name='feedinfo',
name='feed_publisher_name',
field=models.CharField(max_length=50, verbose_name='Name', choices=[(b'EPTTC', 'EPTTC')]),
),
]
|
renanalencar/hermes
|
agency/migrations/0014_auto_20150726_1411.py
|
Python
|
mit
| 468
|
'''
Problem 2
@author: Kevin Ji
'''
def sum_even_fibonacci( max_value ):
# Initial two elements
prev_term = 1
cur_term = 2
temp_sum = 2
while cur_term < max_value:
next_term = prev_term + cur_term
prev_term = cur_term
cur_term = next_term
if cur_term % 2 == 0:
temp_sum += cur_term
return temp_sum
print( sum_even_fibonacci( 4000000 ) )
|
mc10/project-euler
|
problem_2.py
|
Python
|
mit
| 430
|
import urllib
import ast
import xchat
__module_name__ = "Define"
__module_author__ = "TingPing"
__module_version__ = "2"
__module_description__ = "Show word definitions"
# based on google dictionary script by Sridarshan Shetty - http://sridarshan.co.cc
def define(word, word_eol, userdata):
if len(word) >= 2:
_word = xchat.strip(word[1])
_number = 1
if len(word) >= 3:
_number = int(xchat.strip(word[2]))
else:
xchat.prnt('Define Usage: /define word [number]')
xchat.prnt(' number being alternate definition')
return xchat.EAT_ALL
url="http://www.google.com/dictionary/json?callback=s&q=" + _word + "&sl=en&tl=en&restrict=pr,de&client=te"
obj=urllib.urlopen(url);
content=obj.read()
obj.close()
content=content[2:-10]
dic=ast.literal_eval(content)
if dic.has_key("webDefinitions"):
webdef=dic["webDefinitions"]
webdef=webdef[0]
webdef=webdef["entries"]
index=1
for i in webdef:
if index == _number:
if i["type"]=="meaning":
ans=i["terms"]
op=ans[0]['text']
split=op.split(';')
xchat.prnt(_word + ': ' + split[0].strip())
index+=1
return xchat.EAT_ALL
else:
xchat.prnt('Description unavailable for ' + _word)
return xchat.EAT_ALL
xchat.hook_command("define", define)
xchat.prnt(__module_name__ + ' version ' + __module_version__ + ' loaded.')
|
TingPing/plugins
|
XChat/define.py
|
Python
|
mit
| 1,320
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerNetworkInterfacesOperations:
"""LoadBalancerNetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets associated load balancer network interfaces.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/aio/operations/_load_balancer_network_interfaces_operations.py
|
Python
|
mit
| 5,642
|
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import os
import json
import unittest
import six
import gruvi
from gruvi import jsonrpc
from gruvi.jsonrpc import JsonRpcError, JsonRpcVersion
from gruvi.jsonrpc import JsonRpcProtocol, JsonRpcClient, JsonRpcServer
from gruvi.jsonrpc_ffi import ffi as _ffi, lib as _lib
from gruvi.transports import TransportError
from support import UnitTest, MockTransport
_keepalive = None
def set_buffer(ctx, buf):
global _keepalive # See note in JsonRpcProtocol
_keepalive = ctx.buf = _ffi.from_buffer(buf)
ctx.buflen = len(buf)
ctx.offset = 0
def split_string(s):
ctx = _ffi.new('struct split_context *')
set_buffer(ctx, s)
_lib.json_split(ctx)
return ctx
JsonRpcProtocol.default_version = '1.0'
class TestJsonSplitter(UnitTest):
def test_simple(self):
r = b'{ "foo": "bar" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r))
def test_leading_whitespace(self):
r = b' { "foo": "bar" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r))
r = b' \t\n{ "foo": "bar" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r))
def test_trailing_whitespace(self):
r = b'{ "foo": "bar" } '
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r)-1)
error = _lib.json_split(ctx)
self.assertEqual(error, ctx.error) == _lib.INCOMPLETE
self.assertEqual(ctx.offset, len(r))
def test_brace_in_string(self):
r = b'{ "foo": "b{r" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r))
r = b'{ "foo": "b}r" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r))
def test_string_escape(self):
r = b'{ "foo": "b\\"}" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, len(r))
def test_error(self):
r = b' x { "foo": "bar" }'
ctx = split_string(r)
self.assertEqual(ctx.error, _lib.ERROR)
self.assertEqual(ctx.offset, 1)
r = b'[ { "foo": "bar" } ]'
ctx = split_string(r)
self.assertEqual(ctx.error, _lib.ERROR)
self.assertEqual(ctx.offset, 0)
def test_multiple(self):
r = b'{ "foo": "bar" } { "baz": "qux" }'
ctx = split_string(r)
self.assertEqual(ctx.error, 0)
self.assertEqual(ctx.offset, 16)
error = _lib.json_split(ctx)
self.assertEqual(error, ctx.error) == 0
self.assertEqual(ctx.offset, len(r))
def test_incremental(self):
r = b'{ "foo": "bar" }'
ctx = _ffi.new('struct split_context *')
for i in range(len(r)-1):
set_buffer(ctx, r[i:i+1])
error = _lib.json_split(ctx)
self.assertEqual(error, ctx.error) == _lib.INCOMPLETE
self.assertEqual(ctx.offset, 1)
set_buffer(ctx, r[-1:])
error = _lib.json_split(ctx)
self.assertEqual(error, ctx.error) == 0
self.assertEqual(ctx.offset, 1)
class TestJsonRpcV1(UnitTest):
def setUp(self):
super(TestJsonRpcV1, self).setUp()
self.version = JsonRpcVersion.create('1.0')
def test_check_request(self):
v = self.version
msg = {'id': 1, 'method': 'foo', 'params': []}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
msg = {'id': None, 'method': 'foo', 'params': []}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
def test_check_request_missing_id(self):
v = self.version
msg = {'method': 'foo', 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_missing_method(self):
v = self.version
msg = {'id': 1, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_illegal_method(self):
v = self.version
msg = {'id': 1, 'method': None, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': 1, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': {}, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': [], 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': [1], 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_missing_params(self):
v = self.version
msg = {'id': 1, 'method': 'foo'}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_illegal_params(self):
v = self.version
msg = {'id': 1, 'method': 'foo', 'params': None}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': 'foo', 'params': 1}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': 'foo', 'params': 'foo'}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'method': 'foo', 'params': {}}
def test_check_request_extraneous_fields(self):
v = self.version
msg = {'id': 1, 'method': 'foo', 'params': [], 'bar': 'baz'}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response(self):
v = self.version
msg = {'id': 1, 'result': 'foo', 'error': None}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_null_result(self):
v = self.version
msg = {'id': 1, 'result': None, 'error': None}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_error(self):
v = self.version
msg = {'id': 1, 'result': None, 'error': {'code': 1}}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_missing_id(self):
v = self.version
msg = {'result': 'foo', 'error': None}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_missing_result(self):
v = self.version
msg = {'id': 1, 'error': None}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_missing_error(self):
v = self.version
msg = {'id': 1, 'result': None}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_illegal_error(self):
v = self.version
msg = {'id': 1, 'result': None, 'error': 1}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'result': None, 'error': 'foo'}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'id': 1, 'result': None, 'error': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_result_error_both_set(self):
v = self.version
msg = {'id': 1, 'result': 1, 'error': 0}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_extraneous_fields(self):
v = self.version
msg = {'id': 1, 'result': 1, 'error': None, 'bar': 'baz'}
self.assertRaises(ValueError, v.check_message, msg)
def test_create_request(self):
v = self.version
msg = v.create_request('foo', [])
self.assertIsInstance(msg['id'], six.string_types)
self.assertEqual(msg['method'], 'foo')
self.assertEqual(msg['params'], [])
self.assertEqual(len(msg), 3)
def test_create_request_notification(self):
v = self.version
msg = v.create_request('foo', [], notification=True)
self.assertIsNone(msg['id'])
self.assertEqual(msg['method'], 'foo')
self.assertEqual(msg['params'], [])
self.assertEqual(len(msg), 3)
def test_create_response(self):
v = self.version
req = {'id': 'gruvi.0'}
msg = v.create_response(req, 1)
self.assertEqual(msg['id'], req['id'])
self.assertEqual(msg['result'], 1)
self.assertIsNone(msg['error'])
self.assertEqual(len(msg), 3)
def test_create_response_null_result(self):
v = self.version
req = {'id': 'gruvi.0'}
msg = v.create_response(req, None)
self.assertEqual(msg['id'], req['id'])
self.assertIsNone(msg['result'])
self.assertIsNone(msg['error'])
self.assertEqual(len(msg), 3)
def test_create_response_error(self):
v = self.version
req = {'id': 'gruvi.0'}
msg = v.create_response(req, error={'code': 1})
self.assertEqual(msg['id'], req['id'])
self.assertIsNone(msg['result'])
self.assertEqual(msg['error'], {'code': 1})
self.assertEqual(len(msg), 3)
class TestJsonRpcV2(UnitTest):
def setUp(self):
super(TestJsonRpcV2, self).setUp()
self.version = JsonRpcVersion.create('2.0')
def test_check_request(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo', 'params': []}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo', 'params': {}}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
def test_check_request_notification(self):
v = self.version
msg = {'jsonrpc': '2.0', 'method': 'foo', 'params': []}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
msg = {'jsonrpc': '2.0', 'method': 'foo', 'params': {}}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
def test_check_request_missing_version(self):
v = self.version
msg = {'id': 1, 'method': 'foo', 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_missing_method(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_illegal_method(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'method': None, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 1, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': {}, 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': [], 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': [1], 'params': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_request_missing_params(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo'}
self.assertEqual(v.check_message(msg), jsonrpc.REQUEST)
def test_check_request_illegal_params(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo', 'params': None}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo', 'params': 1}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo', 'params': 'foo'}
def test_check_request_extraneous_fields(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'method': 'foo', 'params': [], 'bar': 'baz'}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'result': 'foo'}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_null_result(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'result': None}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_error(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'error': {'code': 1}}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_missing_id(self):
v = self.version
msg = {'jsonrpc': '2.0', 'result': 'foo'}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_null_id(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': None, 'result': 'foo'}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_error_missing_id(self):
v = self.version
msg = {'jsonrpc': '2.0', 'error': {'code': 10}}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_error_null_id(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': None, 'error': {'code': 1}}
self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE)
def test_check_response_missing_result_and_error(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_illegal_error(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'error': 1}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'error': 'foo'}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'error': []}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_result_error_both_present(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'result': None, 'error': None}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'result': 1, 'error': None}
self.assertRaises(ValueError, v.check_message, msg)
msg = {'jsonrpc': '2.0', 'id': 1, 'result': None, 'error': {'code': 10}}
self.assertRaises(ValueError, v.check_message, msg)
def test_check_response_extraneous_fields(self):
v = self.version
msg = {'jsonrpc': '2.0', 'id': 1, 'result': 1, 'error': None, 'bar': 'baz'}
self.assertRaises(ValueError, v.check_message, msg)
def test_create_request(self):
v = self.version
msg = v.create_request('foo', [])
self.assertEqual(msg['jsonrpc'], '2.0')
self.assertIsInstance(msg['id'], six.string_types)
self.assertEqual(msg['method'], 'foo')
self.assertEqual(msg['params'], [])
self.assertEqual(len(msg), 4)
def test_create_request_notification(self):
v = self.version
msg = v.create_request('foo', [], notification=True)
self.assertEqual(msg['jsonrpc'], '2.0')
self.assertNotIn('id', msg)
self.assertEqual(msg['method'], 'foo')
self.assertEqual(msg['params'], [])
self.assertEqual(len(msg), 3)
def test_create_response(self):
v = self.version
req = {'id': 'gruvi.0'}
msg = v.create_response(req, 1)
self.assertEqual(msg['jsonrpc'], '2.0')
self.assertEqual(msg['id'], req['id'])
self.assertEqual(msg['result'], 1)
self.assertNotIn('error', msg)
self.assertEqual(len(msg), 3)
def test_create_response_null_result(self):
v = self.version
req = {'id': 'gruvi.0'}
msg = v.create_response(req, None)
self.assertEqual(msg['jsonrpc'], '2.0')
self.assertEqual(msg['id'], req['id'])
self.assertIsNone(msg['result'])
self.assertNotIn('error', msg)
self.assertEqual(len(msg), 3)
def test_create_response_error(self):
v = self.version
req = {'id': 'gruvi.0'}
msg = v.create_response(req, error={'code': 1})
self.assertEqual(msg['jsonrpc'], '2.0')
self.assertEqual(msg['id'], req['id'])
self.assertNotIn('result', msg)
self.assertEqual(msg['error'], {'code': 1})
self.assertEqual(len(msg), 3)
class TestJsonRpcProtocol(UnitTest):
def setUp(self):
super(TestJsonRpcProtocol, self).setUp()
self.transport = MockTransport()
self.protocol = JsonRpcProtocol(self.message_handler)
self.transport.start(self.protocol)
self.messages = []
self.protocols = []
def message_handler(self, message, transport, protocol):
self.messages.append(message)
self.protocols.append(protocol)
def get_messages(self):
# run dispatcher thread so that it calls our message handler
gruvi.sleep(0)
return self.messages
def test_simple(self):
m = b'{ "id": "1", "method": "foo", "params": [] }'
proto = self.protocol
proto.data_received(m)
mm = self.get_messages()
self.assertEqual(len(mm), 1)
self.assertIsInstance(mm[0], dict)
self.assertEqual(mm[0], {'id': '1', 'method': 'foo', 'params': []})
pp = self.protocols
self.assertEqual(len(pp), 1)
self.assertIs(pp[0], proto)
def test_multiple(self):
m = b'{ "id": "1", "method": "foo", "params": [] }' \
b'{ "id": "2", "method": "bar", "params": [] }'
proto = self.protocol
proto.data_received(m)
mm = self.get_messages()
self.assertEqual(len(mm), 2)
self.assertEqual(mm[0], {'id': '1', 'method': 'foo', 'params': []})
self.assertEqual(mm[1], {'id': '2', 'method': 'bar', 'params': []})
pp = self.protocols
self.assertEqual(len(pp), 2)
self.assertIs(pp[0], proto)
self.assertIs(pp[1], proto)
def test_whitespace(self):
m = b' { "id": "1", "method": "foo", "params": [] }' \
b' { "id": "2", "method": "bar", "params": [] }'
proto = self.protocol
proto.data_received(m)
mm = self.get_messages()
self.assertEqual(len(mm), 2)
self.assertEqual(mm[0], {'id': '1', 'method': 'foo', 'params': []})
self.assertEqual(mm[1], {'id': '2', 'method': 'bar', 'params': []})
def test_incremental(self):
m = b'{ "id": "1", "method": "foo", "params": [] }'
proto = self.protocol
for i in range(len(m)-1):
proto.data_received(m[i:i+1])
self.assertEqual(self.get_messages(), [])
proto.data_received(m[-1:])
mm = self.get_messages()
self.assertEqual(len(mm), 1)
self.assertEqual(mm[0], {'id': '1', 'method': 'foo', "params": []})
def test_framing_error(self):
m = b'xxx'
proto = self.protocol
proto.data_received(m)
self.assertEqual(self.get_messages(), [])
self.assertIsInstance(proto._error, JsonRpcError)
def test_encoding_error(self):
m = b'{ xxx\xff }'
proto = self.protocol
proto.data_received(m)
self.assertEqual(self.get_messages(), [])
self.assertIsInstance(proto._error, JsonRpcError)
def test_illegal_json(self):
m = b'{ "xxxx" }'
proto = self.protocol
proto.data_received(m)
self.assertEqual(self.get_messages(), [])
self.assertIsInstance(proto._error, JsonRpcError)
def test_illegal_jsonrpc(self):
m = b'{ "xxxx": "yyyy" }'
proto = self.protocol
proto.data_received(m)
self.assertEqual(self.get_messages(), [])
self.assertIsInstance(proto._error, JsonRpcError)
def test_maximum_message_size_exceeded(self):
proto = self.protocol
proto.max_message_size = 100
message = {'id': 1, 'method': 'foo', 'params': ['x'*100]}
message = json.dumps(message).encode('utf8')
self.assertGreater(len(message), proto.max_message_size)
proto.data_received(message)
self.assertEqual(self.get_messages(), [])
self.assertIsInstance(proto._error, JsonRpcError)
def test_flow_control(self):
# Write more messages than the protocol is willing to pipeline. Flow
# control should kick in and alternate scheduling of the producer and
# the consumer.
proto, trans = self.protocol, self.transport
self.assertTrue(trans._reading)
proto.max_pipeline_size = 10
message = b'{ "id": 1, "method": "foo", "params": [] }'
interrupted = 0
for i in range(1000):
proto.data_received(message)
if not trans._reading:
interrupted += 1
gruvi.sleep(0) # run dispatcher
self.assertTrue(trans._reading)
mm = self.get_messages()
self.assertEqual(len(mm), 1000)
self.assertEqual(interrupted, 100)
message = json.loads(message.decode('utf8'))
for m in mm:
self.assertEqual(m, message)
def echo_app(message, transport, protocol):
if message.get('method') != 'echo':
protocol.send_response(message, error={'code': jsonrpc.METHOD_NOT_FOUND})
else:
protocol.send_response(message, message['params'])
def reflect_app(message, transport, protocol):
if message.get('method') != 'echo':
return
value = protocol.call_method('echo', *message['params'])
protocol.send_response(message, value)
def notification_app():
notifications = []
def application(message, transport, protocol):
if message.get('id') is None:
notifications.append((message['method'], message['params']))
elif message['method'] == 'get_notifications':
protocol.send_response(message, notifications)
return application
class TestJsonRpc(UnitTest):
def test_errno(self):
code = jsonrpc.SERVER_ERROR
self.assertIsInstance(code, int)
name = jsonrpc.errorcode[code]
self.assertIsInstance(name, str)
self.assertEqual(getattr(jsonrpc, name), code)
desc = jsonrpc.strerror(code)
self.assertIsInstance(desc, str)
def test_call_method_tcp(self):
server = JsonRpcServer(echo_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
result = client.call_method('echo', 'foo')
self.assertEqual(result, ['foo'])
server.close()
client.close()
def test_call_method_pipe(self):
server = JsonRpcServer(echo_app)
server.listen(self.pipename(abstract=True))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
result = client.call_method('echo', 'foo')
self.assertEqual(result, ['foo'])
server.close()
client.close()
def test_call_method_ssl(self):
server = JsonRpcServer(echo_app)
server.listen(('localhost', 0), **self.ssl_s_args)
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr, **self.ssl_c_args)
result = client.call_method('echo', 'foo')
self.assertEqual(result, ['foo'])
server.close()
client.close()
def test_call_method_no_args(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
result = client.call_method('echo')
self.assertEqual(result, [])
server.close()
client.close()
def test_call_method_multiple_args(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
result = client.call_method('echo', 'foo', 'bar')
self.assertEqual(result, ['foo', 'bar'])
server.close()
client.close()
def test_call_method_error(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
exc = self.assertRaises(JsonRpcError, client.call_method, 'echo2')
self.assertIsInstance(exc, JsonRpcError)
self.assertIsInstance(exc.error, dict)
self.assertEqual(exc.error['code'], jsonrpc.METHOD_NOT_FOUND)
server.close()
client.close()
def test_send_notification(self):
server = JsonRpcServer(notification_app())
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
client.send_notification('notify_foo', 'foo')
notifications = client.call_method('get_notifications')
self.assertEqual(notifications, [['notify_foo', ['foo']]])
server.close()
client.close()
def test_call_method_ping_pong(self):
server = JsonRpcServer(reflect_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient(echo_app)
client.connect(addr)
result = client.call_method('echo', 'foo')
self.assertEqual(result, ['foo'])
server.close()
client.close()
def test_send_evil(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
exc = None
try:
chunk = b'{' * 1024
while True:
client.transport.write(chunk)
gruvi.sleep(0)
except Exception as e:
exc = e
self.assertIsInstance(exc, TransportError)
server.close()
client.close()
def test_send_whitespace(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
exc = None
try:
chunk = b' ' * 1024
while True:
client.transport.write(chunk)
gruvi.sleep(0)
except Exception as e:
exc = e
self.assertIsInstance(exc, TransportError)
server.close()
client.close()
def test_send_random(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
client = JsonRpcClient()
client.connect(addr)
exc = None
try:
while True:
chunk = os.urandom(1024)
client.transport.write(chunk)
gruvi.sleep(0)
except Exception as e:
exc = e
self.assertIsInstance(exc, TransportError)
server.close()
client.close()
def test_connection_limit(self):
server = JsonRpcServer(echo_app)
server.listen(('127.0.0.1', 0))
addr = server.addresses[0]
server.max_connections = 2
clients = []
exc = None
try:
for i in range(3):
client = JsonRpcClient(timeout=2)
client.connect(addr)
client.call_method('echo')
clients.append(client)
except Exception as e:
exc = e
self.assertIsInstance(exc, TransportError)
self.assertEqual(len(server.connections), server.max_connections)
for client in clients:
client.close()
server.close()
if __name__ == '__main__':
unittest.main()
|
geertj/gruvi
|
tests/test_jsonrpc.py
|
Python
|
mit
| 27,989
|
'''
Simple pull of account info
'''
import requests
import datetime
import pickle
import json
import time
import sys
account_url = 'https://api.toodledo.com/3/account/get.php?access_token='
tasks_get_url = 'https://api.toodledo.com/3/tasks/get.php?access_token='
'''
Fields you can use to filter when you get tasks:
https://api.toodledo.com/3/tasks/index.php under "Task Datatypes"
'''
def load_token(token):
token = pickle.load( open(token, 'rb'))
return token
def sync(token):
token = load_token(token)
get_account = requests.get('{}{}'.format(account_url, token['access_token']))
#cur_task = int(get_account.text['lastedit_task'])
return get_account.text
def query_tasks(token, days, completion_state='1', fields='tag,context,goal'):
token = load_token(token)
# Get Tasks from Monday (ie 4 days ago since we cron for friday)
start_date = datetime.date.today() - datetime.timedelta(days=days)
# Make it Epoch Time
start_date = int(time.mktime(start_date.timetuple()))
start_date = str(start_date)
# Get ALL tasks from start_date'
# Comp codes -- 1 == completed, 0 == incomplete, -1 == both
get_tasks = requests.get('{}{}&after={}&comp={}&fields={}'.format(tasks_get_url, token['access_token'], start_date, completion_state, fields))
pickle.dump(get_tasks.text, open('tasks_queried.pkl', 'wb'))
return get_tasks.text
def parse_to_json(response):
data = pickle.load(open(response, 'rb'))
return json.loads(data)
def arrange_date(epoch_time):
completion = time.strftime('%A, %b %d, %Y', time.gmtime(epoch_time))
return completion
def display_tasks(task_dump, context_pickle, days=4):
task_dump = parse_to_json(task_dump)
contexts = make_context_hash(context_pickle)
start_date = datetime.date.today() - datetime.timedelta(days=days)
start_date = datetime.date.strftime(start_date, '%A, %b %d, %Y')
end_date = datetime.date.today()
end_date = datetime.date.strftime(end_date, '%A, %b %d, %Y')
print 'Tasks Created between {} and {}.'.format(start_date, end_date)
print 'Total Tasks: ', task_dump[0]['total']
for i in range(len(task_dump)):
#print task_dump[i]
# print contexts
if 'completed' in task_dump[i]:
if task_dump[i]['completed'] == 0:
print 'Incomplete Task: {}'.format(task_dump[i]['title'])
elif contexts[task_dump[i]['context']] != 'Standing Meeting':
comp_date = arrange_date(task_dump[i]['completed'])
print 'Completed Task : {}, Completed {}'.format(task_dump[i]['title'], comp_date)
else:
pass
#test = display_tasks('tasks_queried.pkl', 4)
def format_task(task):
'''
Take a dictionary formatted task from display tasks and print it
out to something human readable.
'''
comp_date = arrange_date(task['completed'])
print 'Completed Task : {}, Completed {}'.format(task['title'], comp_date)
def get_completed_tasks():
query = query_tasks('auth_token.pkl', 4, '1')
return query
def get_incomplete_tasks():
query = query_tasks('auth_token.pkl', 4, '0')
return query
def get_all_tasks():
query = query_tasks('auth_token.pkl', 4, '-1')
return query
def get_defined_list_ids(token, defined_list):
valid_lists = ['goals', 'contexts']
if defined_list.lower() not in valid_lists:
print 'Not a valid user defined list, exiting...'
sys.exit(2)
token = load_token(token)
query = requests.get('http://api.toodledo.com/3/{}/get.php?access_token={}'.format(defined_list, token['access_token']))
pickle.dump(query.text, open('{}_queried.pkl'.format(defined_list), 'wb'))
return query.text
def make_context_hash(defined_list_pickle):
contexts = pickle.load( open(defined_list_pickle, 'rb'))
contexts = json.loads(contexts)
out = {}
for i in range(len(contexts)):
out[contexts[i]['id']] = contexts[i]['name']
return out
#tasks = get_completed_tasks()
#print tasks
if __name__ == '__main__':
tdump = display_tasks('tasks_queried.pkl', 4)
|
gadeleon/toodledo_cli_client
|
toodle_sync.py
|
Python
|
mit
| 3,843
|
import sys
def genfib():
first, second = 0, 1
while True:
yield first
first, second = second, first + second
def fib(number):
fibs = genfib()
for i in xrange(number + 1):
retval = fibs.next()
return retval
if __name__ == '__main__':
inputfile = sys.argv[1]
with open(inputfile, 'r') as f:
for line in f:
if line:
print '{}'.format(fib(int(line.strip())))
|
MikeDelaney/CodeEval
|
easy/fibonacci/fib.py
|
Python
|
mit
| 449
|
import time
import logging
def time_zone(t):
if t.tm_isdst == 1 and time.daylight == 1:
tz_sec = time.altzone
tz_name = time.tzname[1]
else:
tz_sec = time.timezone
tz_name = time.tzname[0]
if tz_sec > 0:
tz_sign = '-'
else:
tz_sign = '+'
tz_offset = '%s%02d%02d' % (tz_sign, abs(tz_sec)//3600, abs(tz_sec//60)%60)
return (tz_offset, tz_name)
class JsubFormatter(logging.Formatter):
# Add this method in order to display time zone offset correctly under python 2.x
def formatTime(self, record, datefmt=None):
ct = time.localtime(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime('%Y-%m-%d %H:%M:%S', ct)
ms = '%03d' % record.msecs
tz_offset, tz_name = time_zone(ct)
s = '%s.%03d %s %s' % (t, record.msecs, tz_offset, tz_name)
return s
_FORMATTER = JsubFormatter('[%(asctime)s][%(name)s|%(levelname)s]: %(message)s')
#_FORMATTER = logging.Formatter('[%(asctime)s](%(name)s:%(levelname)s) %(message)s', '%Y-%m-%d %H:%M:%S')
def add_stream_logger(level):
logger = logging.getLogger('JSUB')
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setFormatter(_FORMATTER)
logger.addHandler(ch)
|
jsubpy/jsub
|
jsub/log.py
|
Python
|
mit
| 1,326
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# {{pkglts pysetup,
from os import walk
from os.path import abspath, normpath
from os.path import join as pj
from setuptools import setup, find_packages
short_descr = "Set of data structures used in openalea such as : graph, grid, topomesh"
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
def parse_requirements(fname):
with open(fname, 'r') as f:
txt = f.read()
reqs = []
for line in txt.splitlines():
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
reqs.append(line)
return reqs
# find version number in /src/$pkg_pth/version.py
version = {}
with open("src/openalea/container/version.py") as fp:
exec(fp.read(), version)
setup(
name='openalea.container',
version=version["__version__"],
description=short_descr,
long_description=readme + '\n\n' + history,
author="revesansparole",
author_email='revesansparole@gmail.com',
url='',
license="mit",
zip_safe=False,
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=parse_requirements("requirements.txt"),
tests_require=parse_requirements("dvlpt_requirements.txt"),
entry_points={
# 'console_scripts': [
# 'fake_script = openalea.fakepackage.amodule:console_script', ],
# 'gui_scripts': [
# 'fake_gui = openalea.fakepackage.amodule:gui_script',],
# 'wralea': wralea_entry_points
},
keywords='',
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 2.7'
],
test_suite='nose.collector',
)
# }}
|
revesansparole/oacontainer
|
setup.py
|
Python
|
mit
| 1,872
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
amtrak
Parse a trip itinerary of amtrak services copied into a text file.
Running the file will take a trip.txt file and output a .json with one record
for each amtrak service in the trip. You can also use the main method of the
module. Both cases, the first parameter would be the input and the second one
would be the output.
Example:
$ python amtrak.py
$ python amtrak.py trip.txt
$ python amtrak.py trip.txt /json/amtrak-trip.json
import amtrak
amtrak.main()
amtrak.main("trip.txt")
amtrak.main("trip.txt", "/json/amtrak-trip.json")
See the "trip.txt" file in this directory for an example of the type of amtrak
itinerary e-mail that is supported for the parsers in this repo.
"""
from __future__ import unicode_literals
import copy
import json
import arrow
import sys
from modules import parsers
class AmtrakServiceParser(object):
"""Parse Amtrak service information from confirmation email lines.
Attributes:
name (str): Name of the service.
departure_station (str): Station where the service starts.
departure_state (str): State where the service starts.
departure_city (str): City where the service starts.
departure_date (str): Date and time when the service starts.
arrival_station (str): Station where the service ends.
arrival_state (str): State where the service ends.
arrival_city (str): City where the service ends.
arrival_date (str): Date and time when the service ends.
accommodation (str): Type of accommodation.
"""
def __init__(self):
self.name = None
self.departure_station = None
self.departure_state = None
self.departure_city = None
self.departure_date = None
self.arrival_station = None
self.arrival_state = None
self.arrival_city = None
self.arrival_date = None
self.accommodation = None
def parse(self, line):
"""Parse one line of the amtrak itinerary.
It will add information to the parser until last item has been parsed,
then it will return a new record and clean the parser from any data.
Args:
line (str): A line of an amtrak itinerary.
Returns:
dict: All the information parsed of a single service.
Example:
{"name": "49 Lake Shore Ltd.",
"departure_station": "New York (Penn Station), New York",
"departure_state": "New York",
"departure_city": "New York",
"departure_date": '2015-05-18T15:40:00+00:00',
"arrival_station": "Chicago (Chicago Union Station), Illinois",
"arrival_state": "Illinois",
"arrival_city": "Chicago",
"arrival_date": '2015-05-19T09:45:00+00:00',
"accommodation": "1 Reserved Coach Seat"}
"""
for parser in parsers.get_parsers():
if parser.accepts(line):
key, value = parser.parse(line)
if not key == "date":
self.__dict__[key] = value
# date could be departure or arrival, departure is always first
else:
if not self.departure_date:
self.departure_date = value.isoformat()
else:
self.arrival_date = value.isoformat()
if self._service_info_complete():
RV = copy.copy(self.__dict__)
self.__init__()
else:
RV = None
return RV
def _service_info_complete(self):
for value in self.__dict__.values():
if not value:
return False
return True
def parse_services(filename='trip.txt'):
"""Parse all services from an amtrak itinerary.
Args:
filename (str): Path to a text file with an amtrak itinerary.
Yields:
dict: New record with data about a service.
"""
parser = AmtrakServiceParser()
with open(filename, 'rb') as f:
for line in f.readlines():
new_record = parser.parse(line)
if new_record:
yield new_record
def add_calc_fields(service):
"""Write the duration of a service into an new field."""
service["duration"] = _calc_duration(service)
return service
def _calc_duration(service):
"""Calculates the duration of a service."""
duration = arrow.get(service["arrival_date"]) - \
arrow.get(service["departure_date"])
return round(duration.total_seconds() / 60 / 60, 1)
def write_services_to_json(services, file_name="./json/amtrak-trip.json"):
"""Write parsed services to a json file.
Args:
services (dict): Parsed services.
file_name (str): Path of the json file to write in.
"""
with open(file_name, "w") as f:
f.write(json.dumps(services, indent=4))
def main(filename='trip.txt', file_name="./json/amtrak-trip.json"):
services = [add_calc_fields(service) for service
in parse_services(filename)]
write_services_to_json(services, file_name)
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
else:
main()
|
abenassi/amtrak-trip
|
amtrak.py
|
Python
|
mit
| 5,392
|
#!/bin/python3
import bisect
def is_palindrome(n):
return str(n) == str(n)[::-1]
def generate_palindromes():
return [i * j
for i in range(100, 1000)
for j in range(100, 1000)
if is_palindrome(i * j)]
def find_lt(a, x):
'Find rightmost value less than x'
i = bisect.bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
palindromes = sorted(generate_palindromes())
test_cases = int(input().strip())
for _ in range(test_cases):
n = int(input().strip())
print(find_lt(palindromes, n))
|
rootulp/hackerrank
|
python/euler004.py
|
Python
|
mit
| 570
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations:
"""ExpressRouteCircuitPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitPeering"]:
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitPeeringListResult"]:
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/aio/operations/_express_route_circuit_peerings_operations.py
|
Python
|
mit
| 21,960
|