text
stringlengths 8
6.05M
|
|---|
# class DjangoStudent():
# def __init__(self, name, laptop):
# self.name = name
# self.computer = laptop
# mystudent = DjangoStudent("Ejiro", "Macbook")
# print(mystudent.name)
# print(mystudent.computer)
# class car():
# def __init__(self, brand, price):
# self.brand = brand
# self.price = price
# mystudent = car("Lexus", "$100000")
# print(mystudent.brand)
# print(mystudent.price)
# class BankApp():
# def __init__(self, name, balance):
# if not isinstance(balance, (int, float)):
# raise TypeError(f'Expected int or float but got {type(balance)}')
# self.name = name
# self.balance = balance
# def deposit(self, amount):
# self.balance += amount
# return self.balance
# def name_tolower(self):
# self.name = self.name.lower()
# return self.name
# customer1 = BankApp('Tunde', 105.44)
# print(customer1.name_tolower())
# print(customer1.deposit(1000))
# class vehicle():
# def __init__(self, max_speed, mileage):
# self.max_speed = max_speed
# self.mileage = mileage
# def acceleration(self, time):
# a = self.mileage*2/time**2
# return a
# car1 = vehicle(15, 250)
# a = car1.acceleration(10)
# print(a)
# print(car1.max_speed)
# print(car1.mileage)
class Employee():
def __init__(self, name, salary, designation):
self.name = name
self.salary = salary
self.designation = designation
@property
def bonus(self):
return (self.salary * 0.1) + self.salary
def report(self):
return f"Hi {self.name}. Your take home salary is {self.bonus}"
a = Employee('Tosin', 10, 'Q/A')
print(a.report())
class Supervisors(Employee):
def __init__(self, name, salary, designation, branch):
self.branch = branch
super().__init__(name, salary, designation)
# def bonus(self):
# return self.salary + (self.salary * 0.17)
b = Supervisors('Tunde', 10000, 'Accountant', 'Sabo')
print(b.bonus)
|
class plugin:
handle = "plugin"
method = "string"
do_init = True
def init( self ):
print( "Plugin initialised" )
def run( server, nick, channel, message ):
if channel[0] == "#":
reply_to = channel
else:
reply_to = nick
server.send_message( reply_to, "You posted \"%s\" to %s." % ( message[:-1], channel ))
|
import os
import sys
import argparse
def parse_arguments(argv):
parser = argparse.ArgumentParser(
description='Start Aurora server on http://<host>:<port>'
)
parser.add_argument(dest='path', nargs='?',
help='Path where to find the Aurora content',
default='.')
parser.add_argument('-p', '--port', dest='port',
help='Port to serve HTTP files at (default: 5000)',
default=DEFAULTS['port'])
parser.add_argument('-H', '--host', dest='host',
help='IP to serve HTTP files at (default: localhost)',
default=DEFAULTS['host'])
parser.add_argument('-c', '--config', dest='config',
help='Config file for the application (default: conf.py)',
default=DEFAULTS['config_file'])
args = parser.parse_args(argv)
return args
def main(argv=sys.argv[1:]):
# TODO: need to add which directory to poll
# I think we might actually just want to point to a config.py or something
parser = argparse.ArgumentParser(description='Aurora server')
args = parser.parse_args(argv)
port = args.port if args.port else DEFAULTS['port']
host = args.host if args.host else DEFAULTS['host']
print(f'Running Aurora on http://{host}:{port}')
|
from django.apps import AppConfig
class VidhubConfig(AppConfig):
name = 'vidhub'
|
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
sys.path.append(path.dirname(path.dirname(path.abspath(path.dirname(__file__)))))
from hust_sc_gantry import beamline_phase_ellipse_multi_delta
from work.optim.A04geatpy_problem import *
from work.optim.A04run import *
from cctpy import *
if __name__ == '__main__':
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()
bl = create_gantry_beamline([
# -5.0,-10.0,-98.2,-96.5,103.0,102.8,81.3,71.0,60.1,85.9,-7267.0,10599.9,25.0,19.0
# 3.6, 7.8 , 30.4, -70.8, 83.1 , 88.4, 85.1 , 90.8, 76.5 , 82.2, - 14216.6, 10375.6, 17.0, 16.0
# -1.6 , 8.3 , 57.7 , -29.9 , 89.9 ,76.2 , 90.3 , 92.1 , 82.1 , 90.3 , -9486.8 , 10766.7 , 23.0 , 22.0
# 3.6, 7.8 , 30.4 , -70.8 , 83.1 , 88.4 , 85.1 ,90.8 , 76.5 ,82.2 ,-14216.6 , 10375.6 , 17.0 , 16.0
# -1.0 , 6.8 , 51.2 , -42.8 , 82.4 , 92.7 , 88.6 , 97.2 , 74.6 , 99.5 ,-9577.0 ,12469.0 , 20.0 ,19.0 , 1.6 ,0.5 , 0.4 , 0.3 , 0.2
# -2.5 , 9.3 , -68.7 , 39.6 ,81.6 , 74.0, 97.0, 99.6 , 96.4 , 98.9 , -9662.6 ,12395.0 ,25.0 , 24.0 ,1.6 ,0.5 , 0.5 , 0.3, 0.2 ,
-2.6 , 9.2 , -64.5 , 60.5 , 81.5 , 74.0 , 96.1, 100.0 , 96.3 , 99.6 , -9673.1 , 12390.0, 25.0 , 24.0 ,1.6 , 0.5 , 0.5 , 0.3, 0.2
])
beamline_phase_ellipse_multi_delta(
bl, 8, [-0.05, 0, 0.05]
)
|
def pillow(arr):
s1, s2 = arr
return bool({i for i, a in enumerate(s1) if a == 'n'} &
{i for i, b in enumerate(s2) if b == 'B'})
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Huh, the ffs and clz symbols appear not to be there with nm, but ctypes finds it::
simon:opticks blyth$ nm /usr/lib/libc++.1.dylib | grep ffs
simon:opticks blyth$ nm /usr/lib/libc++.1.dylib | c++filt | grep ffs
simon:opticks blyth$ nm /usr/lib/libc.dylib | grep ffs
simon:opticks blyth$
From one of the many libs of libSystem::
simon:opticks blyth$ otool -L /usr/lib/libSystem.B.dylib
/usr/lib/libSystem.B.dylib:
/usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1)
/usr/lib/system/libcache.dylib (compatibility version 1.0.0, current version 62.0.0)
/usr/lib/system/libcommonCrypto.dylib (compatibility version 1.0.0, current version 60049.0.0)
/usr/lib/system/libcompiler_rt.dylib (compatibility version 1.0.0, current version 35.0.0)
/usr/lib/system/libcopyfile.dylib (compatibility version 1.0.0, current version 103.92.1)
/usr/lib/system/libcorecrypto.dylib (compatibility version 1.0.0, current version 1.0.0)
/usr/lib/system/libdispatch.dylib (compatibility version 1.0.0, current version 339.92.1)
...
::
In [77]: map(cpp.fls, [0x1 << n for n in range(22)])
Out[77]: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
::
In [97]: map(cpp.fls,[0x1,0x1f,0x1ff,0x1fff,0x1ffff,0x1fffff,0x1ffffff,0x1fffffff,0x1ffffffff,0x1fffffffff,0x1fffffffffffffff])
Out[97]: [1, 5, 9, 13, 17, 21, 25, 29, 32, 32, 32]
::
simon:opticks blyth$ nm /usr/lib/system/libcompiler_rt.dylib | grep clz
0000000000005ded S $ld$hide$os10.4$___clzdi2
0000000000005def S $ld$hide$os10.4$___clzsi2
0000000000005df1 S $ld$hide$os10.4$___clzti2
0000000000005dee S $ld$hide$os10.5$___clzdi2
0000000000005df0 S $ld$hide$os10.5$___clzsi2
0000000000005df2 S $ld$hide$os10.5$___clzti2
0000000000001fc5 T ___clzdi2
0000000000001fe2 T ___clzsi2
000000000000205c T ___clzti2
simon:opticks blyth$
simon:opticks blyth$
simon:opticks blyth$ nm /usr/lib/system/libcompiler_rt.dylib | grep ffs
0000000000005e0b S $ld$hide$os10.4$___ffsdi2
0000000000005e0d S $ld$hide$os10.4$___ffsti2
0000000000005e0c S $ld$hide$os10.5$___ffsdi2
0000000000005e0e S $ld$hide$os10.5$___ffsti2
0000000000002d77 T ___ffsdi2
0000000000002d94 T ___ffsti2
simon:opticks blyth$
"""
import ctypes
try:
cpp = ctypes.cdll.LoadLibrary('libc++.1.dylib')
except OSError:
# /usr/lib64/libstdc++.so.6
cpp = ctypes.cdll.LoadLibrary('libstdc++.so.6')
pass
try:
rt = ctypes.cdll.LoadLibrary('/usr/lib/system/libcompiler_rt.dylib')
except OSError:
rt = ctypes.cdll.LoadLibrary('librt.so.1')
cppffs_ = lambda _:cpp.ffs(_)
cppfls_ = lambda _:cpp.fls(_)
ffs_ = lambda x:(x&-x).bit_length()
def clz_(x):
"""
https://en.wikipedia.org/wiki/Find_first_set
"""
n = 0 ;
if x == 0: return 32
while (x & 0x80000000) == 0:
n += 1
x <<= 1
pass
return n
def test_clz():
print(" %10s : %6s %6s %6s " % ("test_clz", "clz", "32-clz", "fls"))
for i in [0x0,0xf,0xff,0xfff,0xffff,0xfffff,0xffffff,0xfffffff,0xffffffff]:
c = clz_(i)
f = cppfls_(i)
print(" %10x : %6u %6u %6u " % (i, c, 32-c, f))
def test_ffs():
for i in range(-1,17):
n = 0x1 << i if i > -1 else 0
print(" i %2d n:%8d n:0x%5x cpp.ffs_: %2d ffs_: %2d " % (i, n, n, cppffs_(n), ffs_(n) ))
if __name__ == '__main__':
test_ffs()
#test_clz()
|
from typing import Dict, Sequence
import math
import os
import logging
from itertools import chain
import imageio
import numpy as np
from osmo_camera import tiff, raw, rgb
from osmo_camera.file_structure import (
create_output_directory,
get_files_with_extension,
datetime_from_filename,
)
def generate_summary_images(
raw_image_paths: Sequence[str],
ROI_definitions: Dict[str, tuple],
raw_images_dir: str,
) -> str:
""" Pick some representative images and draw ROIs on them for reference
Args:
raw_image_paths: A list of paths to raw image files
ROI_definitions: Definitions of Regions of Interest (ROIs) to summarize. A map of {ROI_name: ROI_definition}
Where ROI_definition is a 4-tuple in the format provided by cv2.selectROI: (start_col, start_row, cols, rows)
raw_images_dir: The directory of images to process
Returns:
The name of the directory where the summary images are saved
"""
summary_images_dir = create_output_directory(raw_images_dir, "summary images")
# Pick a representative sample of images (assumes images are prefixed with iso-ish datetimes)
raw_image_paths = sorted(raw_image_paths)
sample_image_paths = [
raw_image_paths[0], # First
raw_image_paths[math.floor(len(raw_image_paths) / 2)], # Middle
raw_image_paths[-1], # Last
]
# Draw ROIs on them and save
for image_path in sample_image_paths:
rgb_image = raw.open.as_rgb(image_path)
rgb_image_with_ROIs = rgb.annotate.draw_ROIs_on_image(
rgb_image, ROI_definitions
)
# Save in new directory, with same name but as a .png.
filename_root, extension = os.path.splitext(os.path.basename(image_path))
summary_image_path = os.path.join(summary_images_dir, f"{filename_root}.tiff")
tiff.save.as_tiff(rgb_image_with_ROIs, summary_image_path)
print(f"Summary images saved in: {summary_images_dir}\n")
return summary_images_dir
def get_experiment_image_filepaths(
local_sync_directory_path, experiment_directories=None
):
""" Get a list of all .jpeg files in a list of experiment directories.
Args:
local_sync_directory_path: The path to the local directory where images are synced.
experiment_directories: Optional. A list of experiment directory names to search for images. Defaults to None.
If experiment_directories is None, search for images in local_sync_directory_path.
Return:
A list of paths to all .jpeg images in the provided experiment directories.
"""
if experiment_directories is None:
experiment_directories = [""]
all_filepaths = [
get_files_with_extension(
os.path.join(local_sync_directory_path, experiment_directory), ".jpeg"
)
for experiment_directory in experiment_directories
]
return list(chain(*all_filepaths))
def scale_image(PIL_image, image_scale_factor):
""" Scale a PIL image, multiplying dimensions by a given scale factor.
Args:
PIL_image: A PIL image to be scaled.
image_scale_factor: The multiplier used to scale the image width and height.
"""
width, height = PIL_image.size
return PIL_image.resize(
(round(width * image_scale_factor), round(height * image_scale_factor))
)
def _annotate_image(rgb_image, ROI_definitions, show_timestamp, filepath):
image_with_ROIs = rgb.annotate.draw_ROIs_on_image(rgb_image, ROI_definitions)
if show_timestamp:
timestamp = datetime_from_filename(os.path.basename(filepath))
return rgb.annotate.draw_text_on_image(image_with_ROIs, str(timestamp))
return image_with_ROIs
def _open_filter_annotate_and_scale_image(
filepath, ROI_definitions, image_scale_factor, color_channels, show_timestamp
):
rgb_image = raw.open.as_rgb(filepath)
filtered_image = rgb.filter.select_channels(rgb_image, color_channels)
annotated_image = _annotate_image(
filtered_image, ROI_definitions, show_timestamp, filepath
)
PIL_image = rgb.convert.to_PIL(annotated_image)
scaled_image = scale_image(PIL_image, image_scale_factor)
return np.array(scaled_image)
def generate_summary_gif(
filepaths,
ROI_definitions,
name="summary",
image_scale_factor=0.25,
color_channels="rgb",
show_timestamp=True,
):
""" Compile a list of images into a summary GIF with ROI definitions overlayed.
Saves GIF to the current working directory.
Args:
filepaths: List of image file names to be compiled into the GIF.
ROI_definitions: A map of {ROI_name: ROI_definition}
Where ROI_definition is a 4-tuple in the format provided by cv2.selectROI:
(start_col, start_row, cols, rows)
name: Optional. String name of the file to be saved. Defaults to 'summary'
image_scale_factor: Optional. Number multiplier used to scale images to adjust file size. Defaults to 1/4.
color_channels: Optional. Lowercase string of rgb channels to show in the output image. Defaults to 'rgb'.
show_timestamp: Optional. Boolean indicating whether to write image timestamps in output GIF
Defaults to True.
Returns:
The name of the GIF file that was saved.
"""
output_filename = f"{name}.gif"
images = [
_open_filter_annotate_and_scale_image(
filepath,
ROI_definitions,
image_scale_factor,
color_channels,
show_timestamp,
)
for filepath in filepaths
]
imageio.mimsave(output_filename, images)
return output_filename
def generate_summary_video(
filepaths,
ROI_definitions,
name="summary",
image_scale_factor=1,
color_channels="rgb",
show_timestamp=True,
fps=1,
):
""" Compile a list of images into a summary video with ROI definitions overlayed.
Saves video to the current working directory.
Args:
filepaths: List of image file names to be compiled into the video.
ROI_definitions: A map of {ROI_name: ROI_definition}
Where ROI_definition is a 4-tuple in the format provided by cv2.selectROI:
(start_col, start_row, cols, rows)
name: Optional. String name of the file to be saved. Defaults to 'summary'
image_scale_factor: Optional. Number multiplier used to scale images to adjust file size. Defaults to 1.
color_channels: Optional. Lowercase string of rgb channels to show in the output image. Defaults to 'rgb'.
show_timestamp: Optional. Boolean indicating whether to write image timestamps in output video.
Defaults to True.
fps: Optional. Integer video frames-per-second. Defaults to 1.
Returns:
The name of the summary video file that was saved
"""
output_filename = f"{name}.mp4"
writer = imageio.get_writer(output_filename, fps=fps)
# Suppress a warning message about shoehorning image dimensions into mpeg block sizes
logger = logging.getLogger("imageio_ffmpeg")
logger.setLevel(logging.ERROR)
for filepath in filepaths:
prepared_image = _open_filter_annotate_and_scale_image(
filepath,
ROI_definitions,
image_scale_factor,
color_channels,
show_timestamp,
)
writer.append_data(prepared_image)
writer.close()
logger.setLevel(logging.WARNING)
return output_filename
|
import client
import datetime
def sync_saved(sc, all_playlists):
"""Sync with changes made to saved tracks."""
print('=== Syncing Saved Tracks ===')
saved_tracks = set(sc.get_all_saved_tracks())
playlist_all_uri = all_playlists['all']
playlist_all_tracks, _ = sc.get_all_songs_in_playlist(playlist_all_uri)
playlist_all_tracks = set(playlist_all_tracks)
# Sync newly added tracks.
new_tracks = list(saved_tracks - playlist_all_tracks)
print(f'{len(new_tracks)} new tracks\nNew tracks: {new_tracks}')
if len(new_tracks) > 0:
sc.add_tracks_to_playlist(playlist_all_uri, new_tracks)
sc.add_tracks_to_playlist(all_playlists['current'], new_tracks)
# Sync removed tracks.
removed_tracks = list(playlist_all_tracks - saved_tracks)
print(f'{len(removed_tracks)} removed tracks\nRemoved tracks: {removed_tracks}')
if len(removed_tracks) > 0:
playlist_uris = [uris for _, uris in all_playlists.items()]
sc.remove_tracks_from_all_playlists(playlist_uris, removed_tracks)
sc.add_tracks_to_playlist(all_playlists['retention'], removed_tracks)
def sync_current(sc, all_playlists):
"""Sync the 'current' playlist."""
print('=== Syncing Playlist Current ===')
playlist_current_uri = all_playlists['current']
tracks, dates = sc.get_all_songs_in_playlist(playlist_current_uri)
today = datetime.date.today()
expired_songs = [
track for track, date in zip(tracks, dates)
if (today - datetime.datetime.strptime(date, '%Y-%m-%d')) > datetime.timedelta(weeks=4)
]
print(f'{len(expired_songs)} expired songs\nExpired songs: {expired_songs}')
sc.remove_tracks_from_playlist(playlist_current_uri, expired_songs)
def main():
dt = datetime.datetime.now()
print(f'[{str(dt)}] Starting sync')
sc = client.get_spotify_client()
all_playlists = sc.get_all_owned_playlists()
sync_saved(sc, all_playlists)
print(f'Finished sync')
if __name__ == '__main__':
main()
|
import numpy as np
import builtins
def digitsum(n):
return sum([int(i) for i in str(n)])
multiplier = list(range(1,100))
nums = [1 for i in multiplier]
maxsum = 0
for i in multiplier:
for j, val in enumerate(nums):
temp = val * multiplier[j]
nums[j] = temp
digits = digitsum(temp)
if digits > maxsum:
maxsum = digits
# print(nums)
print(maxsum)
|
# How many circular primes below 1,000,000? Circular if all rotations of digits are prime.
# ====================================================================================
# Want to grasp this one:
import eulerlib
def compute():
isprime = eulerlib.list_primality(999999) # List of True, False,.... for whether each number is prime.
def is_circular_prime(n):
s = str(n)
# Returns true if all elements pass a test:
return all(isprime[int(s[i : ] + s[ : i])] for i in range(len(s))) # Cycling
ans = sum(1 # Ahhh instead of i for i in .... we're just adding 1 each time
for i in range(len(isprime))
if is_circular_prime(i))
return str(ans)
if __name__ == "__main__":
print(compute())
# n = 125
# s = str(n)
# print(int(s[2: ] + s[ : 2]))
|
#!/usr/bin/env python
"""
OTFMaker.py: Module for creating basic on-the-fly primitives. Provides a basic class hierarchy
for constructing geometry from streams of vertex coordinates and triangle faces.
"""
__author__ = "John McDermott"
__email__ = "JFMcDermott428@gmail.com"
__version__ = "1.0.0"
__status__ = "Development"
import numpy as np
import viz
EPSILON = 0.000001 # splitting constant for FLOP stabilitiy
def get_box(x,y,z):
"""
This function returns an on-the-fly box made with viz.TRIANGLE primitives. The box is
centered about 0,0,0 with side lengths of x,y,z
"""
# get box vertex coordinates
box_coords = get_box_coords(x,y,z)
# create OTF box geometry
viz.startLayer(viz.TRIANGLES)
for idx in [0,5,4,5,0,1,3,7,6,3,6,2,7,4,6,6,4,5,2,1,3,3,1,0,3,0,7,7,0,4,6,5,2,2,5,1]:
viz.vertex(box_coords[idx])
return viz.endLayer()
def get_box_coords(x,y,z):
"""
This function returns vertex coordinates of a box centered about 0,0,0 with side
lengths of x,y,z
"""
# vertex coordinates of the box
return ((-0.5*x, -0.5*y, -0.5*z ), ( 0.5*x, -0.5*y, -0.5*z ), ( 0.5*x, 0.5*y, -0.5*z ),
(-0.5*x, 0.5*y, -0.5*z ), (-0.5*x, -0.5*y, 0.5*z ), ( 0.5*x, -0.5*y, 0.5*z ),
( 0.5*x, 0.5*y, 0.5*z ), (-0.5*x, 0.5*y, 0.5*z ))
def get_screen_quad(h,w,z_offs=0.0):
"""
This function returns a rectangular on-the-fly quad object parented to the screen
"""
viz.startLayer(viz.QUADS)
viz.vertex(0,0,z_offs)
viz.vertex(0,h,z_offs)
viz.vertex(w,h,z_offs)
viz.vertex(w,0,z_offs)
return viz.endLayer(parent=viz.SCREEN)
class OTF:
"""
Base class for loading on the fly primitives from sequences of vertices and triangle-faces
"""
def __init__(self, verts, faces):
viz.startLayer(viz.TRIANGLE_STRIP)
for f in faces:
viz.vertex(verts[f[0]].tolist())
viz.vertex(verts[f[1]].tolist())
viz.vertex(verts[f[2]].tolist())
self.otf = viz.endLayer()
def drawOrder(self, order, node='', bin=viz.BIN_OPAQUE, op=viz.OP_DEFAULT):
self.otf.drawOrder(order, node, bin, op)
def color(self, c):
self.otf.color(c)
def disable(self, VIZ_FLAG):
self.otf.disable(VIZ_FLAG)
def dynamic(self):
self.otf.dynamic()
def parent(self, p):
self.otf.parent(p)
def blendFunc(self, src, dst):
self.otf.blendFunc(src, dst)
def apply(self, shader):
self.otf.apply(shader)
def texture(self, tex, unit=0):
self.otf.texture(tex, unit=unit)
def getNormal(self, idx):
return self.otf.getNormal(idx)
def setNormal(self, idx, nx, ny, nz):
self.otf.setNormal(idx,nx,ny,nz)
def getVisible(self):
return self.otf.getVisible()
def visible(self, VIZ_FLAG):
self.otf.visible(VIZ_FLAG)
def remove(self):
self.otf.remove()
def getVertex(self, n):
return self.otf.getVertex(n)
def getVertexCount(self):
return self.otf.getVertexCount()
def setVertex(self, n, v):
return self.otf.setVertex(n,v)
class SlicePlane(OTF):
"""
A dynmaic on-the-fly primitive used by the ViewAlignedPlanes class. Given a list of
viz.TRIANGLE vertex coordinates, vertices will be added or deleted from the geometry
as necessary.
"""
def __init__(self, vCoordList=None):
viz.startLayer(viz.TRIANGLES)
for i in range(6):
viz.vertex(0,0,0)
self.otf = viz.endLayer()
if vCoordList is not None:
self.setVertexCoords(vCoordList)
def setVertexCoords(self, vCoordList):
if self.getVertexCount() > len(vCoordList):
self.otf.clearVertices()
for n, vCoord in enumerate(vCoordList):
if n > self.getVertexCount() - 1:
self.otf.addVertex(vCoord[0], vCoord[1], vCoord[2])
else:
self.otf.setVertex(n, vCoord[0], vCoord[1], vCoord[2])
class ViewAlignedPlanes:
"""
A set of view-aligned slice polygons for sampling and rendering scalar data volumes.
Updating the ViewAlignedPlanes realigns all planes to be orthogonal to the given view vector.
"""
def __init__(self, xs=512, ys=512, zs=512, n_slices=256):
# number of slices currently used
self.n_slices = n_slices
# 3D image scale dimensions
self.xs, self.ys, self.zs = float(xs), float(ys), float(zs)
# max dimension (the other two are normalized with respect to this dimension
# to give a max bounding box side length of 1
self.ms = max(self.xs, self.ys, self.zs )
# vertex order for openGL triangle fan OTF object
self.otf_indices = [0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5]
# list to store triangle slice coordinates
self.sliceCoordList = []
# This list holds the 8 vertex coordinates of the bounding box which is centered
# at the origin and has a length of one in the image stacking the lengths of the
# box are normalized with respect to the longest side, which is scaled to 1.
# self.vertexList = np.array(get_box_coords(xs/self.ms, ys/self.ms, zs/self.ms))
self.vertexList = np.array(get_box_coords(1.0,1.0,1.0))
# This list contains vertex index pairs for all edges. For example, the first
# element in the list is [0,1] which means that edge 0 connects bounding box
#vertices v0 and v1.
self.edges = np.array(
[[0,1],[1,2],[2,3],[3,0],[0,4],[1,5],[2,6],[3,7],[4,5],[5,6],[6,7],[7,4]]
)
# From the nearest vertex v on the bounding box, there are three unique paths to
# the farthest vertex from the camera. edgeList holds all possible edge paths for
# all 8 front-facing vertex conditions.
self.edgeList = np.array([
[0, 1, 5, 6, 4, 8, 11, 9, 3, 7, 2, 10], # edge paths when v = v0
[0, 4, 3, 11, 1, 2, 6, 7, 5, 9, 8, 10], # edge paths when v = v1
[1, 5, 0, 8, 2, 3, 7, 4, 6, 10, 9, 11], # edge paths when v = v2
[7, 11, 10, 8, 2, 6, 1, 9, 3, 0, 4, 5], # edge paths when v = v3
[8, 5, 9, 1, 11, 10, 7, 6, 4, 3, 0, 2], # edge paths when v = v4
[9, 6, 10, 2, 8, 11, 4, 7, 5, 0, 1, 3], # edge paths when v = v5
[9, 8, 5, 4, 6, 1, 2, 0, 10, 7, 11, 3], # edge paths when v = v6
[10, 9, 6, 5, 7, 2, 3, 1, 11, 4, 8, 0] # edge paths when v = v7
])
# compute vertex intersection positions and create slice plane geometry list
self.update_vertices(viz.MainView)
self.slicePlaneList = [SlicePlane(v) for v in self.sliceCoordList]
def __iter__(self):
""" the ViewAlignedPlanes object can be iterated over to get its SlicePlanes """
for slicePlane in self.slicePlaneList:
yield slicePlane
def align(self, view):
# compute vertex intersection positions
self.update_vertices(view)
# update texture slice geometry
for n, vCoordList in enumerate(self.sliceCoordList):
self.slicePlaneList[n].setVertexCoords(vCoordList)
def apply(self, shader):
""" apply shader to geometery """
for slicePlane in self:
slicePlane.apply(shader)
def texture(self, tex, unit=0):
""" apply texture to geometery """
for slicePlane in self:
slicePlane.texture(tex, unit)
def drawOrder(self, order, node='', bin=viz.BIN_OPAQUE, op=viz.OP_DEFAULT):
for slicePlane in self:
slicePlane.drawOrder(order, node, bin, op)
def disable(self, VIZ_FLAG):
for slicePlane in self:
slicePlane.disable(VIZ_FLAG)
def dynamic(self):
for slicePlane in self:
slicePlane.dynamic()
def blendFunc(self, src, dst):
for slicePlane in self:
slicePlane.blendFunc(src, dst)
def update_vertices(self, view):
"""
This function updates the slice plane vertex coordinates contained by the vertexBuffer
of the ViewAlignedPlanes such that all planes are aligned orthongonally to the viewing
vector. Intersection tests are performed for each plane with the bounding box sampling
region and the geometry is clipped accordingly by further relocating up to 3 vertices
to degenerate positions.
"""
# get the min and max distances between bounding box vertices and the view vector\
viewMat = view.getMatrix()
viewVector = np.array([viewMat.val(2,0), viewMat.val(2,1), viewMat.val(2,2)])
viewVector = (viewVector /np.linalg.norm(viewVector))
self.viewDir = viewVector
max_dist = np.dot(viewVector, self.vertexList[0])
min_dist = max_dist
max_index = 0
for i in range(1,8):
dist = np.dot(viewVector, self.vertexList[i])
if dist > max_dist:
max_dist = dist
max_index = i
if dist < min_dist:
min_dist = dist
# stabilize flops with machine epsilon expansion
min_dist -= EPSILON
max_dist += EPSILON
# initialize direction vector matrices and lambda intersection value vectors
L = np.zeros(12)
iL = np.zeros(12)
vecDir = np.zeros((12,3))
vecSta = np.zeros((12,3))
# set the minimum distance as the plane_dist
plane_dist = min_dist
# get the plane increment
plane_dist_inc = (max_dist-min_dist)/(float(self.n_slices))
# populate with direction vectors and intersection values for all edges
for i in range(12):
# get the start position vertex and view vectors via LUT
vecSta[i] = self.vertexList[self.edges[self.edgeList[max_index][i]][0]]
vecDir[i] = self.vertexList[self.edges[self.edgeList[max_index][i]][1]]-vecSta[i]
# dot the view vector with the direction vector
denom = np.dot(vecDir[i], viewVector)
# get plane intersection values
if denom != 0.0:
L [i] = ((plane_dist - np.dot(vecSta[i], viewVector)) / denom)
iL[i] = (plane_dist_inc / denom)
else:
L [i] = -1.0
iL[i] = 0.0
intersection = np.zeros((6,3))
dL = np.zeros(12)
self.sliceCoordList = []
for i in reversed(range(self.n_slices)):
self.sliceCoordList.append([])
# determine the lambda value for all edges
for e in range(12):
dL[e] = L[e] + (i*iL[e])
# perform intersection checking for all 12 edges
if((dL[0] >= 0.0) and (dL[0] < 1.0)):
intersection[0] = vecSta[0] + dL[0] * vecDir[0]
elif((dL[1] >= 0.0) and (dL[1] < 1.0)):
intersection[0] = vecSta[1] + dL[1] * vecDir[1]
elif((dL[3] >= 0.0) and (dL[3] < 1.0)):
intersection[0] = vecSta[3] + dL[3] * vecDir[3]
else:
continue
if((dL[2] >= 0.0) and (dL[2] < 1.0)):
intersection[1] = vecSta[2] + dL[2] * vecDir[2]
elif((dL[0] >= 0.0) and (dL[0] < 1.0)):
intersection[1] = vecSta[0] + dL[0] * vecDir[0]
elif((dL[1] >= 0.0) and (dL[1] < 1.0)):
intersection[1] = vecSta[1] + dL[1] * vecDir[1]
else:
intersection[1] = vecSta[3] + dL[3] * vecDir[3]
if ((dL[4] >= 0.0) and (dL[4] < 1.0)):
intersection[2] = vecSta[4] + dL[4] * vecDir[4]
elif((dL[5] >= 0.0) and (dL[5] < 1.0)):
intersection[2] = vecSta[5] + dL[5] * vecDir[5]
else:
intersection[2] = vecSta[7] + dL[7] * vecDir[7]
if((dL[6] >= 0.0) and (dL[6] < 1.0)):
intersection[3] = vecSta[6] + dL[6] * vecDir[6]
elif((dL[4] >= 0.0) and (dL[4] < 1.0)):
intersection[3] = vecSta[4] + dL[4] * vecDir[4]
elif((dL[5] >= 0.0) and (dL[5] < 1.0)):
intersection[3] = vecSta[5] + dL[5] * vecDir[5]
else:
intersection[3] = vecSta[7] + dL[7] * vecDir[7]
if((dL[8] >= 0.0) and (dL[8] < 1.0)):
intersection[4] = vecSta[8] + dL[8] * vecDir[8]
elif((dL[9] >= 0.0) and (dL[9] < 1.0)):
intersection[4] = vecSta[9] + dL[9] * vecDir[9]
else:
intersection[4] = vecSta[11] + dL[11] * vecDir[11]
if((dL[10]>= 0.0) and (dL[10]< 1.0)):
intersection[5] = vecSta[10] + dL[10] * vecDir[10]
elif((dL[8] >= 0.0) and (dL[8] < 1.0)):
intersection[5] = vecSta[8] + dL[8] * vecDir[8]
elif((dL[9] >= 0.0) and (dL[9] < 1.0)):
intersection[5] = vecSta[9] + dL[9] * vecDir[9]
else:
intersection[5] = vecSta[11] + dL[11] * vecDir[11]
# add vertex positions to main slice vertex buffer
for tri_index in self.otf_indices:
self.sliceCoordList[-1].append([v for v in intersection[tri_index]])
#------------------------------------------------------------------------------------------
# Unit Test
#------------------------------------------------------------------------------------------
if __name__ == '__main__':
import vizcam
viz.go()
# draggable pivot view camera
pivotNav = vizcam.PivotNavigate(center=[0,0,0])
sb = ViewAlignedPlanes(n_slices=32)
vizact.onkeyup('f', sb.align, viz.MainView)
|
import requests
import sys
from argparse import Action
from argparse import ArgumentParser
class SanitizeInput(Action):
def __call__(self, parser, namespace, values, option_string=None):
if 'http' in values or values.startswith('/'):
setattr(namespace, self.dest, values)
elif 'file' in values:
setattr(namespace, self.dest, values.split('file://')[1])
else:
raise ValueError('Input file must be http://, https:// or file://.')
def build_parser():
parser = ArgumentParser()
parser.add_argument('-i', '--input', action=SanitizeInput, default=None, help='Input URL or local file path.')
parser.add_argument('-f', '--find', action='store', default='', help='String to find in log.')
parser.add_argument('-o', '--output', action='store', default=None, help='Output file name.')
parser.add_argument('-s', '--split', action='store', default='', help='String to split matching log entries on.')
return parser
def download_log_from_url(url):
response = requests.get(url)
if response.status_code == 200:
return response.text.rstrip().split('\r\n')
return None
def open_log_from_local_file(path):
with open(path, 'r+') as local_logs:
logs = local_logs.read().rstrip().split('\n')
return logs
return None
def main(args):
parser = build_parser()
parsed_args, remainder = parser.parse_known_args()
if 'http' in parsed_args.input:
logs = download_log_from_url(parsed_args.input)
elif 'file' in parsed_args.input:
logs = open_log_from_local_file(parsed_args.input)
else:
logs = []
results = []
search_term = parsed_args.find
for entry in logs:
if search_term in entry:
results.append(entry)
output_file_name = parsed_args.output
split_on = parsed_args.split or ''
if output_file_name is not None:
with open(output_file_name, 'w'):
pass
if results:
for result in results:
if len(split_on) > 1:
sanitized_result = result.split(split_on)[1]
elif len(split_on) == 1:
sanitized_result = result[result.find(split_on):]
else:
sanitized_result = result
if output_file_name is not None:
with open(output_file_name, 'a') as results_file:
results_file.write(''.join([sanitized_result, '\n']))
else:
print(sanitized_result)
if __name__ == '__main__':
# parser = build_parser()
# parsed_args, remainder = parser.parse_known_args()
# print(parsed_args)
main(sys.argv[1:])
|
#%%
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QIcon, QFont, QColor
from PyQt5 import QtWidgets
from utils import utils
from config import ConfigConstants
from config.Language import language as LG
from globalObjects import GlobalData
import numpy as np
import pickle
class ParameterDialog:
def __init__(self):
super(ParameterDialog, self).__init__()
self.parameter_dialog = QtWidgets.QDialog()
self.parameter_dialog.setObjectName("parameter_dialog")
self.parameter_dialog.resize(610, 500)
self.parameter_dialog.setWindowTitle(LG("camera parameters"))
self.parameter_dialog.setWindowIcon(QIcon(utils.get_path(["iconImages", "PyroVision.png"])))
self.mainLayout = QtWidgets.QVBoxLayout(self.parameter_dialog)
self.mainLayout.setObjectName("mainLayout")
########Full text label: rolling shutter########
self.textExplain1 = QtWidgets.QTextEdit(self.parameter_dialog)
self.textExplain1.setObjectName("textExplain1")
self.textExplain1.setText(LG("camera text"))
self.textExplain1.setMinimumSize(QSize(605, 80))
self.textExplain1.setMaximumSize(QSize(605, 80))
self.textExplain1.setReadOnly(True)
self.textExplain1.setStyleSheet("QTextEdit {background-color: rgb(240, 240, 240);}")
self.mainLayout.addWidget(self.textExplain1)
########selected calibration########
# self.calibrationObject = subject_found[0]
# self.calibrationMethod = QtWidgets.QLabel(self.calibration_dialog)
# self.calibrationMethod.setObjectName("calibrationMethod")
# self.calibrationMethod.setText("calibration method: " + self.calibrationObject.name + " 2D calibration")
# self.calibrationMethod.setFont(QFont('MS Shell Dlg 2', 10))
# self.calibrationMethod.setAlignment(Qt.AlignCenter)
# self.mainLayout.addWidget(self.calibrationMethod)
########cmo rolling shutter parameters########
########Scan Direction###########
self.rollingType = QtWidgets.QHBoxLayout()
self.rollingType.setObjectName("rollingType")
#Description for combo box
self.typeLabel = QtWidgets.QLabel(self.parameter_dialog)
self.typeLabel.setObjectName("typeLabel")
self.typeLabel.setText(LG("choose scan direction"))
self.typeLabel.setMinimumSize(QSize(480, 32))
self.typeLabel.setMaximumSize(QSize(480, 32))
self.rollingType.addWidget(self.typeLabel)
# #spacer
# spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
# self.rollingType.addItem(spacerItem1)
#combo box
self.typeComboBox=QtWidgets.QComboBox(self.parameter_dialog)
self.typeComboBox.setMinimumSize(QSize(125, 26))
self.typeComboBox.setMaximumSize(QSize(125, 26))
self.typeComboBox.setObjectName("typeComboBox")
self.typeComboBox.addItems(["top to bottom", "left to right", "bottom to top", "right to left"])
#self.typeComboBox.activated[str].connect(self.ComboMode)
self.rollingType.addWidget(self.typeComboBox)
self.mainLayout.addLayout(self.rollingType)
###########Time to scan##########
self.scanTime = QtWidgets.QHBoxLayout()
self.scanTime.setObjectName("scanTime")
#Description for text edit 1
self.editLabel1 = QtWidgets.QLabel(self.parameter_dialog)
self.editLabel1.setObjectName("editLabel1")
self.editLabel1.setText(LG("scanning duration"))
self.editLabel1.setMinimumSize(QSize(480, 32))
self.editLabel1.setMaximumSize(QSize(480, 32))
self.scanTime.addWidget(self.editLabel1)
# #spacer
# spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
# self.scanTime.addItem(spacerItem2)
#Text edit for scan time
self.scanTimeText = QtWidgets.QPlainTextEdit(self.parameter_dialog)
self.scanTimeText.setMinimumSize(QSize(80, 28))
self.scanTimeText.setMaximumSize(QSize(80, 28))
self.scanTimeText.setObjectName("scanTimeText")
self.scanTimeText.appendPlainText(str(round(GlobalData.cmoParameter["scanDuration"]*1000, 3)))
self.scanTime.addWidget(self.scanTimeText)
#self.stickLength.setEnabled(False)
#Unit label for text edit 1
self.editUnit1 = QtWidgets.QLabel(self.parameter_dialog)
self.editUnit1.setObjectName("editUnit1")
self.editUnit1.setText("ms")
self.editUnit1.setMinimumSize(QSize(40, 32))
self.editUnit1.setMaximumSize(QSize(40, 32))
self.scanTime.addWidget(self.editUnit1)
self.mainLayout.addLayout(self.scanTime)
########Time between scan########
self.gapTime = QtWidgets.QHBoxLayout()
self.gapTime.setObjectName("gapTime")
#Description for time between scan
self.editLabel2 = QtWidgets.QLabel(self.parameter_dialog)
self.editLabel2.setMinimumSize(QSize(480, 32))
self.editLabel2.setMaximumSize(QSize(480, 32))
self.editLabel2.setObjectName("editLabel2")
self.gapTime.addWidget(self.editLabel2)
self.editLabel2.setText(LG("scanning gap"))
# #spacer
# spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
# self.gapTime.addItem(spacerItem3)
#Text edit for gap time
self.gapTimeEdit = QtWidgets.QPlainTextEdit(self.parameter_dialog)
self.gapTimeEdit.setMinimumSize(QSize(80, 28))
self.gapTimeEdit.setMaximumSize(QSize(80, 28))
self.gapTimeEdit.setObjectName("gapTimeEdit")
self.gapTimeEdit.appendPlainText(str(round(GlobalData.cmoParameter["scanGap"]*1000, 3)))
self.gapTime.addWidget(self.gapTimeEdit)
#self.blCoordinateX.setEnabled(False)
#Unit label for gap time
self.editUnit2 = QtWidgets.QLabel(self.parameter_dialog)
self.editUnit2.setObjectName("editUnit2")
self.editUnit2.setText("ms")
self.editUnit2.setMinimumSize(QSize(40, 32))
self.editUnit2.setMaximumSize(QSize(40, 32))
self.gapTime.addWidget(self.editUnit2)
self.mainLayout.addLayout(self.gapTime)
########spacing#########
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.mainLayout.addItem(spacerItem)
########save calibration or quit dialog#######
self.buttonsLayout = QtWidgets.QHBoxLayout()
self.buttonsLayout.setObjectName("buttonsLayout")
self.saveBtn = QtWidgets.QPushButton(self.parameter_dialog)
self.saveBtn.setMinimumSize(QSize(300, 28))
self.saveBtn.setMaximumSize(QSize(300, 28))
self.saveBtn.setObjectName("saveBtn")
self.saveBtn.clicked.connect(self.saveSettings)
self.saveBtn.setText(LG("save"))
self.buttonsLayout.addWidget(self.saveBtn)
self.cancelBtn = QtWidgets.QPushButton(self.parameter_dialog)
self.cancelBtn.setMinimumSize(QSize(300, 28))
self.cancelBtn.setMaximumSize(QSize(300, 28))
self.cancelBtn.setObjectName("cancelBtn")
self.cancelBtn.clicked.connect(self.cancel_Btn)
self.cancelBtn.setText(LG("cancel"))
self.buttonsLayout.addWidget(self.cancelBtn)
self.mainLayout.addLayout(self.buttonsLayout)
# def ComboMode(self, mode):
# GlobalData.cmoParameter["type"] = mode
def saveSettings(self, event):
GlobalData.cmoParameter["type"] = str(self.typeComboBox.currentText())
GlobalData.cmoParameter["scanDuration"] = float(self.scanTimeText.toPlainText())/1000
GlobalData.cmoParameter["scanGap"] = float(self.gapTimeEdit.toPlainText())/1000
self.parameter_dialog.close()
def cancel_Btn(self):
self.parameter_dialog.close()
|
# coding:utf-8
import time
import socket
import struct
def disp_binary(data: bytes, split: str = r'\x', order: str = '>', sign: str = 'B'):
"""
显示bytes字符串
:param data: 源bytes
:param split: 字符串分隔符,默认为\\x
:param order: data struct.unpack解码顺序
:param sign: data struct.unpack解码符号如B,H等
:return: bytes对应的二进制字符串
"""
f = '{}{}'.format(order, sign * len(data))
return ''.join("{}{:02x}".format(split, b) for b in struct.unpack(f, data))
def fomate_bytes(data, reg_num: int = 0, encoding: str = 'utf-16be', reg_size: int = 2,
fill_info: bytes = b'\x00'):
"""
将data编码成指定长度的bytes
:param data: 要编码的对象,如果不是bytes就encode
:param reg_num: 最长寄存器数目,默认为0,为0则不限制长度
:param encoding: 编码方式,默认utf-16be
:param reg_size: 寄存器大小,默认2个字节,为0则不限制大小
:param fill_info: 长度不足的填充信息,默认填充\x00
:return: 返回编码后的bytes,如果输入为bytes不会修改编码
"""
result: bytes = b''
expect_len = reg_num * reg_size
# 如果传进来的是bytes就直接用,否则就encode一下
if not isinstance(data, bytes):
result = data.encode(encoding)
else:
result = data
fill_len = expect_len - len(result)
if fill_len > 0:
result += (fill_info * fill_len)
if expect_len and len(result) > expect_len:
return result[:expect_len]
else:
return result
def get_time_str():
"""
获取当前时间的字符串
:return: 当前时间字符串
"""
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
class DeviceInfoType:
"""
设备信息封装类
"""
def __init__(self):
self.dev_id = ''
self.dev_model = ''
self.sub_dev_type = 3022
self.soft_version = ''
self.region_name = ''
self.region_count = 0
self.region_id_name_bytes = b''
self.region_status_bytes = b''
# 最多64个分区
self.max_region_count = 64
# 每个分区的ID和名字分别占用30个寄存器
self.region_reg_num = 30
def get_region_info_bytes(self):
"""
获取分区表的bytes存放在self.region_bytes中
:return: None
"""
if not self.region_id_name_bytes:
tmp_br = bytearray()
if self.dev_id and self.region_name:
if self.region_count > self.max_region_count:
self.region_count = self.max_region_count
for c in range(self.region_count):
new_dev_id = str(self.sub_dev_type) + self.dev_id[8:20] + "{:04d}".format(c + 1)[:4]
tmp_br.extend(fomate_bytes(new_dev_id, self.region_reg_num))
new_region_name = self.region_name + "{:04d}".format(c + 1)[:4]
tmp_br.extend(fomate_bytes(new_region_name, self.region_reg_num))
# left_count = self.max_region_count - self.region_count
# if left_count:
# tmp_br.extend(fomate_bytes('', left_count * 30 * 2))
self.region_id_name_bytes = bytes(tmp_br)
def get_region_status_bytes(self):
"""
获取分区状态bytes
:return:分区状态的bytes
"""
if not self.region_status_bytes:
tmp_br = bytearray()
class ModbusType:
def __init__(self, data: bytes = b''):
self.recv_valid = False
self.reg_size = 2
self.dev_info = DeviceInfoType()
self.dev_info.dev_id = '10212019201988800001'
self.dev_info.dev_model = 'ITC-7800A'
self.dev_info.soft_version = '5.2'
self.dev_info.region_name = '分区'
self.dev_info.region_count = 2
# 事务标识
self.seq = b''
# 协议标识,modbus为固定\x00\x00
self.pro_tag = b''
# 消息长度1,指的modbus协议中报文头中那个2个字节的长度
self.recv_msg_len = b''
self.send_msg_len = b''
# 单元标识,一般固定为\xff
self.unit_tag = b''
# 命令类型目前仅支持两个 \x04为读寄存器,\x01为写寄存器
self.recv_cmd_type = b''
self.send_cmd_type = b''
# 要读写的寄存器起始地址
self.reg_addr = b''
# 读写寄存器的数目
self.reg_num = 0
# 收到的二进制
self.recv_data: bytes = data
self.send_data_bytes: bytes = b''
self.init_basic_info()
def init_basic_info(self):
data = self.recv_data
print("[{}]recv:{}".format(get_time_str(), disp_binary(data)))
if data and len(data) >= 12:
self.recv_valid = True
# 事务标识
self.seq = data[0:2]
# 协议标识,modbus为固定\x00\x00
self.pro_tag = data[2:4]
# 消息长度
self.recv_msg_len = data[4:6]
# 单元标识,一般固定为\xff
self.unit_tag = data[6:7]
# 命令类型 \x04为读寄存器,\x01为写寄存器
self.recv_cmd_type = data[7:8]
# 要读写的寄存器起始地址
self.reg_addr = data[8:10]
# 读写寄存器的数目
self.reg_num = struct.unpack('>H', data[10:12])[0]
print("seq:{} cmd_type:{} reg_addr:{} reg_num:{}".format(self.seq, self.recv_cmd_type,
struct.unpack(">H", self.reg_addr)[0],
self.reg_num))
else:
self.recv_valid = False
print("Invalid recive, ignore!")
def get_reply_msg(self):
if self.reg_addr == b'\x10\x69':
self.build_1069_4201_reply()
elif self.reg_addr == b'\x10\x7d':
self.build_107D_4221_reply()
elif self.reg_addr == b'\x52\x08':
# \x00\x03\x00\x00\x00\x06\xff\x04\x52\x08\x0f\x00
# 查询分区名称和编码
self.build_5208_21000_reply()
elif self.reg_addr == b'\x27\x10':
# \x00\x04\x00\x00\x00\x06\xff\x04\x27\x10\x03\x20
# 查询所有会话状态
self.build_2710_10000_reply()
elif self.reg_addr == b'\x00\x01':
# \x00\x08\x00\x00\x00\x06\xff\x04\x00\x01\x00\x80
# 查询分区状态
self.build_0001_1_reply()
else:
print("Not support reg_addr:", disp_binary(self.reg_addr))
def build_read_reg_reply(self, data_str, send_msg_len: bytes = None, send_data_len: bytes = None, fill=False):
bytes_len = self.reg_num if fill else 0
data_bytes = fomate_bytes(data_str, bytes_len)
send_msg_len_int = len(data_bytes) + 3
if not send_msg_len:
send_msg_len = struct.pack('>H', send_msg_len_int)
if not send_data_len:
if send_msg_len_int - 3 > 0xFF:
send_data_len = b'\xFF'
else:
send_data_len = struct.pack('>B', send_msg_len_int - 3)
self.send_data_bytes = (self.seq + self.pro_tag) + send_msg_len + (
self.unit_tag + self.recv_cmd_type) + send_data_len + data_bytes
def build_1069_4201_reply(self):
'''查询设备ID'''
self.build_read_reg_reply(self.dev_info.dev_id + self.dev_info.dev_model)
def build_107D_4221_reply(self):
'''查询软件版本'''
self.build_read_reg_reply(self.dev_info.soft_version)
def build_5208_21000_reply(self):
'''查询分区名称和编码'''
self.dev_info.get_region_info_bytes()
self.build_read_reg_reply(self.dev_info.region_id_name_bytes)
def build_2710_10000_reply(self):
'''查询所有会话状态'''
self.build_read_reg_reply('',fill=True)
def build_0001_1_reply(self):
'''查询分区状态'''
self.dev_info.get_region_status_bytes()
self.build_read_reg_reply(self.dev_info.region_status_bytes)
import time
import datetime
if __name__ == '__main__':
b1 = 12
print(disp_binary(bytes([b1,])))
exit(666)
print("[{}] start...".format(get_time_str()))
address = ('172.26.92.152', 502)
resv_buff = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s = socket.socket()
s.bind(address)
s.listen(5)
ss, addr = s.accept()
print('[{}]got connected from{}'.format(get_time_str(), addr))
while True:
ra = ss.recv(resv_buff)
while len(ra) <= 0:
continue
resv_info = ModbusType(ra)
if not resv_info.recv_valid:
continue
resv_info.get_reply_msg()
if resv_info.send_data_bytes:
print("[{}]send:{}".format(get_time_str(), disp_binary(resv_info.send_data_bytes)))
ss.send(resv_info.send_data_bytes)
# continue
# send_bytes: bytes = b''
# # 事务标识
# seq = ra[0:2]
# # 协议标识,modbus为固定\x00\x00
# pro_tag = ra[2:4]
# # 消息长度
# msg_len = ra[4:6]
# # 单元标识,一般固定为\xff
# unit_tag = ra[6:7]
# # 命令类型 \x04为读寄存器,\x01为写寄存器
# cmd_type = ra[7:8]
# # 要读写的寄存器起始地址
# reg_addr = ra[8:10]
# # 读写寄存器的数目
# reg_num = ra[10:12]
# # print("seq:{} cmd_type:{} reg_addr:{} reg_num:{}".format(seq, cmd_type, struct.unpack(">H", reg_addr)[0],
# # struct.unpack(">H", reg_num)[0]))
# if reg_addr == b'\x10\x69':
# # \x00\x01\x00\x00\x00\x06\xff\x04\x10\x69\x00\x1e
# # 查询设备ID
# # dev_id = b'\x00\x01\x00\x00\x00\x3f\xff\x04\x3c' + fomate_bytes(b'10202019201988800001') + (b'\x00' * 20)
# send_msg_len = b'\x00\x3f'
# data_len = b'\x3c'
# send_bytes = seq + pro_tag + send_msg_len + unit_tag + cmd_type + data_len + fomate_bytes(
# '10202019201988800002', 30)
# elif reg_addr == b'\x10\x7d':
# # \x00\x02\x00\x00\x00\x06\xff\x04\x10\x7d\x00\x20
# # 查询软件版本
# # send_bytes = b'\x00\x02\x00\x00\x00\x43\xff\x04\x40' + fomate_bytes(b'BoschCallStation_3.0.0.1023') + (
# # b'\x00' * 10)
# send_msg_len = b'\x00\x43'
# data_len = b'\x40'
# send_bytes = seq + pro_tag + send_msg_len + unit_tag + cmd_type + data_len + fomate_bytes(
# 'BoschCallStation_3.0.0.1023', 32)
# elif reg_addr == b'\x52\x08':
# # \x00\x03\x00\x00\x00\x06\xff\x04\x52\x08\x0f\x00
# # 查询分区名称和编码
# send_msg_len = b'\x1E\x03'
# data_len = b'\x00'
# send_bytes = seq + pro_tag + send_msg_len + unit_tag + cmd_type + data_len + fomate_bytes(
# '30222019888000020001', 30) + fomate_bytes('分区0001', 30) + fomate_bytes('', 3780)
# elif reg_addr == b'\x27\x10':
# # \x00\x04\x00\x00\x00\x06\xff\x04\x27\x10\x03\x20
# # 查询所有会话状态
# send_msg_len = b'\x06\x43'
# data_len = b'\x40'
# send_bytes = seq + pro_tag + send_msg_len + unit_tag + cmd_type + data_len + fomate_bytes('', 800)
# elif reg_addr == b'\x00\x01':
# # \x00\x08\x00\x00\x00\x06\xff\x04\x00\x01\x00\x80
# # 查询分区状态
# send_msg_len = b'\x01\x03'
# data_len = b'\x00'
# send_bytes = seq + pro_tag + send_msg_len + unit_tag + cmd_type + data_len + fomate_bytes('', 128)
# if send_bytes:
# print("[{}]send:{}".format(get_time_str(), disp_binary(send_bytes)))
# ss.send(send_bytes)
|
import newt,tweepy,time
class StreamListener(tweepy.StreamListener):
def on_status(self, status):
try:
print status.text,str(self.count)
print '\n %s %s via %s\n' % (status.author.screen_name, status.created_at, status.source)
self.count=self.count-1
if self.count<0: streamer.disconnect()
except Exception, e:
# Catch any unicode errors while printing to console
# and just ignore them to avoid breaking application.
pass
api=newt.getTwitterAPI()
authl=newt.getTwitterAuth()
l = StreamListener()
streamer = tweepy.Stream(auth=authl, listener=l, timeout=300.0 )
setTerms = ['this','that']
print "gettong ready"
streamer.count=3
print streamer.count
print "sdsd"
streamer.filter(None,setTerms)
|
import numpy as np
from _neworder_core import MonteCarlo # type: ignore[import]
def as_np(mc: MonteCarlo) -> np.random.Generator:
"""
Returns an adapter enabling the MonteCarlo object to be used with numpy random functionality
"""
class _NpAdapter(np.random.BitGenerator):
def __init__(self, rng: MonteCarlo):
super().__init__(0)
self.rng = rng
self.rng.init_bitgen(self.capsule) # type: ignore
return np.random.Generator(_NpAdapter(mc)) # type: ignore
|
"""Test the PackageTask class"""
import os
import tempfile
import shutil
import time
from nose.tools import assert_equals, assert_raises, assert_not_equals
from nose.tools import assert_not_in, assert_in
from ckanpackager.tasks.package_task import PackageTask
from ckanpackager.lib.utils import BadRequestError
from ckanpackager.lib.statistics import CkanPackagerStatistics
from ckanpackager.tests import smtpretty
class DummyPackageTask(PackageTask):
"""The PackageTask class has some abstract methods, this is a minimal
implementation.
"""
def __init__(self, *args, **kargs):
super(DummyPackageTask, self).__init__(*args, **kargs)
self._create_zip_invoked = False
def schema(self):
return {
'carrot': (True, None),
'cake': (False, self._process_cake)
}
def create_zip(self, resource):
if self.request_params['carrot'] == 'break':
raise Exception('this is broken')
if self.request_params['carrot'] == 'create-zip':
w = resource.get_writer()
w.write('-')
resource.create_zip('cp {input} {output}')
else:
resource.set_zip_file_name('the-zip-file.zip')
def host(self):
return 'example.com'
def _process_cake(self, cake):
return 'nice '+ str(cake)
class TestPackageTask(object):
"""Test the DummyPackageTask task."""
def setUp(self):
"""Setup up test config&folders"""
self._temp_db_folder = tempfile.mkdtemp()
self._config = {
'STORE_DIRECTORY': tempfile.mkdtemp(),
'TEMP_DIRECTORY': tempfile.mkdtemp(),
'STATS_DB': 'sqlite:///' + os.path.join(self._temp_db_folder, 'db'),
'ANONYMIZE_EMAILS': False,
'CACHE_TIME': 60*60*24,
'EMAIL_FROM': '{resource_id}-{zip_file_name}-{ckan_host}-from',
'EMAIL_BODY': '{resource_id};{zip_file_name};{ckan_host} body',
'EMAIL_SUBJECT': '{resource_id};{zip_file_name};{ckan_host} subject',
'SMTP_HOST': '127.0.0.1',
'SMTP_PORT': 2525
}
def tearDown(self):
"""Remove temp folders"""
shutil.rmtree(self._temp_db_folder)
shutil.rmtree(self._config['STORE_DIRECTORY'])
shutil.rmtree(self._config['TEMP_DIRECTORY'])
def test_config_is_available_to_subclasses(self):
"""Test that the config is available to sub-classes"""
p = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c'},
{'hello': 'world'}
)
assert_equals(p.config['hello'], 'world')
def test_missing_required_parameters_raises(self):
"""Test an exception is raised if a required parameters is missing.
"""
with assert_raises(BadRequestError) as context:
p = DummyPackageTask(
{'email': 'a', 'resource_id': 'a', 'cake': 'a'},
{}
)
def test_schema_parameters_accepted(self):
"""Ensure that parameters defined in the schema are accepted and added
to the request parameter
"""
p = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c'},
{'hello': 'world'}
)
assert_equals(p.request_params['carrot'], 'a')
def test_unknown_parameters_ignored(self):
"""Ensure that parameters not defined in the schema are ignored"""
p = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c', 'john': 'doe'},
{'hello': 'world'}
)
assert_not_in('john', p.request_params)
def test_parameter_process_function_invoked(self):
"""Check that the defined process functions are called"""
p = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c',
'cake': 'and sweet'},
{'hello': 'world'}
)
assert_equals(p.request_params['cake'], 'nice and sweet')
def test_email_and_resource_id_added_to_schema(self):
"""Test that email and resource id are added to schema, by ensuring
they are required parameters and that parameters are passed through
"""
with assert_raises(BadRequestError) as context:
p = DummyPackageTask({'resource_id': 'a', 'carrot': 'a'}, {})
with assert_raises(BadRequestError) as context:
p = DummyPackageTask({'email': 'a', 'carrot': 'a'}, {})
p = DummyPackageTask({'email': 'a', 'resource_id': 'b', 'carrot': 'c'}, {})
assert_equals(p.request_params['email'], 'a')
assert_equals(p.request_params['resource_id'], 'b')
def test_same_task_has_same_str_rep(self):
"""Checks that the same task always returns the same str"""
p = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c'},
{'hello': 'world'}
)
assert_equals(str(p), str(p))
def test_different_tasks_have_different_str_rep(self):
"""Checks that two different tasks have two different str"""
p1 = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c'},
{'hello': 'world'}
)
p2 = DummyPackageTask(
{'carrot': 'a1', 'email': 'b1', 'resource_id':'c1'},
{'hello': 'world1'}
)
assert_not_equals(str(p1), str(p2))
def test_same_task_at_different_time_have_different_str_rep(self):
"""Checks that two different tasks have two different str"""
p1 = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c'},
{'hello': 'world'}
)
time.sleep(1)
p2 = DummyPackageTask(
{'carrot': 'a', 'email': 'b', 'resource_id':'c'},
{'hello': 'world'}
)
assert_not_equals(str(p1), str(p2))
@smtpretty.activate(2525)
def test_email_sent(self):
"""Test that the email is sent as expected"""
t = DummyPackageTask({
'resource_id': 'the-resource-id',
'email': 'recipient@example.com',
'carrot': 'cake'
}, self._config)
t.run()
assert_equals(len(smtpretty.messages), 1)
assert_equals(smtpretty.last_message.recipients, ['recipient@example.com'])
@smtpretty.activate(2525)
def test_email_placeholders(self):
"""Test that the email placeholders are inserted"""
t = DummyPackageTask({
'resource_id': 'the-resource-id',
'email': 'recipient@example.com',
'carrot': 'cake'
}, self._config)
t.run()
assert_equals(len(smtpretty.messages), 1)
assert_equals(
smtpretty.last_message.mail_from,
'the-resource-id-the-zip-file.zip-example.com-from'
)
assert_equals(
smtpretty.last_message.headers['subject'],
"the-resource-id;the-zip-file.zip;example.com subject",
)
assert_equals(
"the-resource-id;the-zip-file.zip;example.com body",
smtpretty.last_message.body,
)
@smtpretty.activate(2525)
def test_request_is_logged(self):
t = DummyPackageTask({
'resource_id': 'the-resource-id',
'email': 'recipient@example.com',
'carrot': 'cake'
}, self._config)
t.run()
stats = CkanPackagerStatistics(self._config['STATS_DB'], self._config['ANONYMIZE_EMAILS'])
requests = stats.get_requests()
assert_equals(1, len(requests))
assert_equals('the-resource-id', requests[0]['resource_id'])
assert_equals('recipient@example.com', requests[0]['email'])
@smtpretty.activate(2525)
def test_error_is_logged(self):
t = DummyPackageTask({
'resource_id': 'the-resource-id',
'email': 'recipient@example.com',
'carrot': 'break'
}, self._config)
with assert_raises(Exception) as context:
t.run()
stats = CkanPackagerStatistics(self._config['STATS_DB'], self._config['ANONYMIZE_EMAILS'])
errors = stats.get_errors()
assert_equals(1, len(errors))
assert_equals('the-resource-id', errors[0]['resource_id'])
assert_equals('recipient@example.com', errors[0]['email'])
assert_in('this is broken', errors[0]['message'])
def test_speed_is_slow_when_resource_not_cached(self):
t = DummyPackageTask({
'resource_id': 'the-resource-id',
'email': 'recipient@example.com',
'carrot': 'cake'
}, self._config)
assert_equals('slow', t.speed())
@smtpretty.activate(2525)
def test_speed_is_fast_when_resource_is_cached(self):
t = DummyPackageTask({
'resource_id': 'the-resource-id',
'email': 'recipient@example.com',
'carrot': 'create-zip'
}, self._config)
t.run()
assert_equals('fast', t.speed())
|
from flask import Flask, jsonify
application = Flask(__name__)
@application.route("/")
def hello():
return "Hello World!"
@application.route("/api/v2/test")
def test():
return jsonify("{\"hello\": \"World!\"}")
if __name__ == "__main__":
application.run()
|
import xml.dom.minidom
import os
file_path = os.path.join(os.path.dirname(__file__), 'movies.xml')
print(file_path)
class GetXml(object):
def __init__(self, filepath):
self.filepath = filepath
self.file = xml.dom.minidom.parse(self.filepath) # 打开xml文件
self.DOMTree = self.file.documentElement # 获得文档元素对象
def get_xml_value(self, nodename=None, n=0):
itemlist = self.DOMTree.getElementsByTagName(nodename) # 获取节点的一组标签,返回的是一个数组
item = itemlist[n]
return item.firstChild.data
def get_xml_attr(self, parentname=None, childname=None, n=0):
itemlist = self.DOMTree.getElementsByTagName(parentname)
item = itemlist[n]
return item.getAttribute(childname) # 获得元素的属性所对应的值
if __name__ == "__main__":
file_path = os.path.join(os.path.dirname(__file__), 'movies.xml')
print(file_path)
gx = GetXml(file_path)
print(gx.get_xml_value("type"))
print(gx.get_xml_attr("type", "nick"))
|
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.models import User
from .models import Profil
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profil.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profil.save()
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 13:46:18 2021
@author: ad
"""
page_list = [] #페이지 request 리스트 ['page_num','state_time']
page_queue = [] #대기큐
MM=[] #프레임
t = 0 #가상의 현재 시간
Fault = 0
#파일 read
file = input("읽을 파일을 입력: ")
f = open(f"./example_page/{file}.txt",'r')
lines = f.readlines()
f.close()
#page 추출
for line in lines:
line = line.replace('\n','')
line = int(line)
line = [line, 0]
page_list.append(line)
#Frame 개수 추출
frame_num=page_list.pop(0)
frame_num=int(frame_num[0])
#page 개수 추출
page_num = page_list.pop(0)
page_num = int(page_num[0])
def in_MM(page):
global frame_num
for i in range(len(MM)):
if page[0] == MM[i][0]:
return i
return -1
def in_queue(page):
global frame_num
for i in range(len(page_queue)):
if page[0] == page_queue[i][0]:
return i
return -1
for _ in range(page_num):
#현재 request한 페이지
page = page_list.pop(0) #['page_num','state_time']
#print(page)
if len(MM) < frame_num: #Frame에 자리가 있을 때
#MM에 페이지 유무 검사
page_in_MM = in_MM(page)
if page_in_MM != -1:
print(f'[{t}] Page request {page[0]} --> Hit')
else:
print(f'[{t}] Page request {page[0]} --> Initial filling')
MM.append(page)
else: #Frame이 꽉 찼을 때
#MM에 페이지 유무 검사
page_in_MM = in_MM(page)
if page_in_MM != -1:
print(f'[{t}] Page request {page[0]} --> Hit')
else:
#queue에 페이지 유무 검사
page_in_queue = in_queue(page)
old_page_index = 0 #가장 메모리에 오래 머문 페이지프레임
for i in range(1, frame_num):
if MM[old_page_index][1] < MM[i][1] :
old_page_index = i
Fault += 1
print(f'[{t}] Page request {page[0]} --> Fault ({Fault})')
old_page = MM.pop(old_page_index)
old_page[1] = 0 #머문 시간 초기화
page_queue.append(old_page) #가장 오래 머문 페이지프레임 큐의 맨 뒤에 추가 (내리기)
#대기큐에 있는 페이지 올리기
if page_in_queue != -1:
MM.append(page_queue.pop(page_in_queue))
#MM에도 없고 대기큐에도 없는 경우 (새로운 페이지)
else:
MM.append(page)
#시간 증가
t = t+1
#MM에 있는 페이지프레임들 머무는 시간 증가
for page_frame in MM:
page_frame[1] += 1
#print(MM)
#print(page_queue)
#print()
print()
Hit_ratio = (page_num-Fault)/page_num
print(f'Hit ratio = {round(Hit_ratio,2)}({page_num-Fault}/{page_num})')
|
class Solution(object):
def heapsort(self, list):
def heapify(list, len, a):
#重複步驟後,heapify只會做最前面三項(0,1,2)的排序
largest = a
left = 2*a+1
right = 2*a+2
if left < len and list[largest] < list[left] :
largest = left
if right < len and list[largest] < list[right]:
largest = right
if largest != a:
swap = list[a] #建一個swap寄放母項
list[a] = list[largest] #將較大的子項成為母項 大的換上去
list[largest] = swap #將寄放在swap的母項成為子項 小的換下來
heapify(list, len, largest) #換過的largest已成為2a或2a+1項,繼續heapify下面的元素
l_len = len(list)
for a in range((l_len//2)-1, -1, -1):
heapify(list, l_len, a)
#由下往上3個3個做heapify
#假如有7項,那就會從第2項開始heapify
for a in range(l_len-1, -1, -1):
sure = list[0]
list[0] = list[a]
list[a] = sure
heapify(list, a, 0)
#若a=1的時候(只剩一個元素)就會停止
return list
#Reference: https://exceptionnotfound.net/heap-sort-csharp-the-sorting-algorithm-family-reunion/
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-12 16:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20160611_1745'),
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='AttributeCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='ResourceAttributeValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=300)),
('attribute', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Attribute')),
('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Resource')),
],
),
migrations.AddField(
model_name='attribute',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.AttributeCategory'),
),
]
|
import click
from src.input.DefaultInput import DefaultInput
class CliInput(DefaultInput):
def start(self):
while True:
click.echo("Waiting for pressing h or l or q to abort...")
c = click.getchar()
click.echo()
if c == 'h':
self.down()
elif c == 'l':
self.up()
else:
click.echo("Abort!")
break
|
l1 = ['abcd', 786, 2.23, 'john', 70.2]
l2 = [123, 'apples']
print l1
print l1[0]
print l1[1:3]
print l1[2:]
print l2 * 2
print l1 + l2
print l1[5:]
|
from flask import jsonify
from flask import request
from flask import Blueprint
from ..controllers.projects import get_users_by_project
from ..controllers.projects import get_project_by_id
from ..controllers.projects import register_project
from ..controllers.submissions import get_project_submissions
from flask_jwt import jwt_required
from flask_jwt import current_identity
def init(app):
bp = Blueprint('projects', __name__)
@bp.route('/project', methods=['GET', 'POST'])
@jwt_required()
def register():
if request.method == 'POST':
title = request.json['title']
subtitle = request.json['subtitle']
category = request.json['category']
knowledge_area = request.json['knowledge_area']
students = request.json['students']
tutors = request.json['tutors']
create_by = request.json['create_by']
return register_project(title, subtitle, category, knowledge_area, students, tutors, create_by)
else:
project_id = request.args.get('id')
return get_project_by_id(project_id)
@bp.route('/project/<int:id_project>/users', methods=['GET'])
@jwt_required()
def get_users_from_project(id_project):
return get_users_by_project(id_project)
@bp.route('/project/<int:id_project>/submissions', methods=['GET'])
@jwt_required()
def get_submissions_from_project(id_project):
return get_project_submissions(id_project)
@bp.route('/project/<int:id_project>/revisions', methods=['GET'])
@jwt_required()
def get_revisions_from_project(id_project):
pass
app.register_blueprint(bp, url_prefix="/api/v1")
|
import math
class Pagination:
def __init__(self, pagination_query, items_per_page = 50, range_size = 7):
self.items_per_page = items_per_page
self.range_size = range_size
# self.alchemy_service = alchemy_service
self.pagination_query = pagination_query
self.item_count = pagination_query.query_count()
self.total_pages = math.ceil(self.item_count / self.items_per_page)
def has_prev_page(self, page_number):
return (page_number > 1)
def has_next_page(self, page_number):
return (page_number < self.total_pages)
def get_page(self, page_number, page_action = None):
page = None
if page_action:
if "first" == page_action:
page = self.get_page_number(1)
elif "prev" == page_action:
page = self.get_prev_page(page_number)
elif "next" == page_action:
page = self.get_next_page(page_number)
elif "last" == page_action:
page = self.get_page_number(self.total_pages)
if page == None:
page = self.get_page_number(page_number)
return page
def get_prev_page(self, page_number):
page = None
if self.has_prev_page(page_number):
page = self.get_page_number(page_number - 1)
return page
def get_next_page(self, page_number):
page = None
if self.has_next_page(page_number):
page = self.get_page_number(page_number + 1)
return page
def get_page_number(self, page_number):
page = None
if self.item_count <= 0 or page_number <= 0 or page_number > self.total_pages:
return page
p = page_number - 1
offset = p * self.items_per_page
limit = self.items_per_page
item_list = self.pagination_query.query_items(offset, limit)
page = Page(page_number, item_list)
page.has_prev = self.has_prev_page(page_number)
if page.has_prev:
page.page_number_prev = page_number - 1
page.has_next = self.has_next_page(page_number)
if page.has_next:
page.page_number_next = page_number + 1
p = page_number
n = page_number
page.range_list.append(page_number)
# determine range size
range_size = self.range_size - 1
if self.total_pages < range_size:
range_size = self.total_pages
# build range
while len(page.range_list) < range_size:
if self.has_prev_page(p):
p = p - 1
page.range_list.insert(0, p)
if self.has_next_page(n):
n = n + 1
page.range_list.append(n)
half_range = math.floor(self.range_size * 0.5)
page.show_first = (page_number - half_range) > 1
page.show_last = (page_number + half_range) < self.total_pages
page.total_pages = self.total_pages
return page
class Page:
def __init__(self, page_number, item_list):
self.page_number = page_number
self.item_list = item_list
self.range_list = []
def __repr__(self):
item_count = len(self.item_list)
return "Item count: {item_count} - Page number: {page_number}".format(item_count = item_count, page_number = self.page_number)
|
import sys
try:
import phi
except ImportError:
print("phiflow is not installed. Visit https://tum-pbs.github.io/PhiFlow/Installation_Instructions.html for more information."
"\nrun 'pip install phiflow' to install the latest stable version or add the phiflow source directory to your Python PATH.", file=sys.stderr)
exit(1)
from phi._troubleshoot import assert_minimal_config, troubleshoot
try:
assert_minimal_config()
except AssertionError as fail_err:
print("\n".join(fail_err.args), file=sys.stderr)
exit(1)
print(f"\nInstallation verified. {troubleshoot()}")
|
from girder.models.setting import Setting
from girder.plugins.imagespace.settings import ImageSpaceSetting
class FlannSetting(ImageSpaceSetting):
requiredSettings = ('IMAGE_SPACE_FLANN_INDEX',)
def validateImageSpaceFlannIndex(self, doc):
return doc.rstrip('/')
|
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
soma = n1 + n2
print("A soma dos valores é {}".format(soma))
|
# Generated by Django 2.1.7 on 2019-03-11 20:37
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Part',
fields=[
('id',
models.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField()),
('input', models.TextField(blank=True, max_length=10000)),
('solution', models.TextField(blank=True, max_length=10000)),
],
),
migrations.CreateModel(
name='Problem',
fields=[
('id',
models.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True)),
('description', models.TextField(max_length=10000)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id',
models.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('submission', models.TextField(max_length=10000)),
('part', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
to='contest.Part')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,
to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='part',
name='problem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
to='contest.Problem'),
),
]
|
import requests
import json
import csv
from time import sleep
url = "https://www.mcdonalds.com.cn/ajaxs/search_by_point"
headers = {
'Connection': 'Keep-Alive',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Accept-Encoding': 'gzip, deflate,br'
}
latlon = []
name = []
address = []
csv_file = csv.reader(open('mdl-latlonlist.csv', 'r', encoding='utf-8'))
for stu in csv_file:
latlon.append(stu)
f = open('McDonalds-s171127-1.csv', "w", encoding="utf-8")
f1 = open('McDonalds-s-chongfu.text', "a", encoding="utf-8")
f2 = open('McDonalds-s-zong.text', "a", encoding="utf-8")
try:
for lg in range(len(latlon)):
print(latlon[lg])
sleep(5)
response = requests.post(url, data={'point': latlon[lg]}, verify=False)
sleep(5)
print(response.text)
data = json.loads(response.text)
print(data)
s = data['datas']
f2.write(str(s))
print(s)
print(type(s))
print(len(s))
if len(s) != 0:
for i in s:
f.write('sellatlon')
f.write(',')
for k, v in i.items():
f.write(k)
f.write(',')
f.write('\n')
break
for i in s:
if (i['_name'] not in name) or (i['_address'] not in address):
sellatlon = str(latlon[lg]).replace(',', ',')
f.write(sellatlon)
f.write(',')
name.append(i['_name'])
address.append(i['_address'])
for k, v in i.items():
v = str(v).replace(',', ',')
f.write(v)
f.write(',')
f.write('\n')
else:
chongfu = str(i['_name'])+"$"+str([i['_address']])
f1.write(chongfu)
f.close()
f2.close()
f1.close()
except:
print('error')
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 10:20:31 2020
@author: anusk
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
def EBMA(targetFrame, anchorFrame, blocksize):
accuracy = 1
p =16
frameH, frameW = anchorFrame.shape
print(anchorFrame.shape)
predictFrame = np.zeros(anchorFrame.shape)
k=0
dx =[]
dy=[]
ox = []
oy =[]
rangestart = [0,0]
rangeEnd =[0,0]
for m in range(0, frameW, blocksize):
rangestart[1] = m*accuracy -p*accuracy
rangeEnd[1] = m*accuracy + blocksize*accuracy + p*accuracy
if rangestart[1] < 0:
rangestart[1] =0
if rangeEnd[1]> frameW*accuracy:
rangeEnd[1] = frameW*accuracy
for n in range(0, frameH, blocksize):
rangestart[0] = n*accuracy -p*accuracy
rangeEnd[0] = n*accuracy + blocksize*accuracy + p*accuracy
if rangestart[0] < 0:
rangestart[0] =0
if rangeEnd[0]> frameH*accuracy:
rangeEnd[0] = frameH*accuracy
"""
EBMA ALGORITHM
"""
anchorblock = anchorFrame[n:n+blocksize, m:m+blocksize]
mv_x = 0
mv_y = 0
dx.append(0)
dy.append(0)
error = 255*blocksize*blocksize*100
for x in range(rangestart[1], rangeEnd[1]-blocksize):
for y in range(rangestart[0], rangeEnd[0]-blocksize):
targetblock = targetFrame[y:y+blocksize, x:x+blocksize]
anchorblock = np.float64(anchorblock)
targetblock = np.float64(targetblock)
temp_error = np.sum(np.uint8(np.absolute(anchorblock -targetblock)))
if temp_error < error:
error = temp_error
mv_x = y/accuracy-n
mv_y = x/accuracy-m
predictFrame[n:n+blocksize, m:m+blocksize] = targetblock
while len(dx)<=k:
dx.append(0)
dy.append(0)
dx[k]= mv_x
dy[k]= mv_y
ox.append(n)
oy.append(m)
k = k + 1
mv_d = [np.array(dx), np.array(dy)]
mv_o = [np.array(ox), np.array(oy)]
return np.uint8(predictFrame), mv_o, mv_d
if __name__ == "__main__":
anchorframe = cv2.imread('foremanY69.png',0)
targetframe = cv2.imread('foremanY72.png',0)
newFrame, origin, direction= EBMA(targetframe, anchorframe, 16)
cv2.imshow('new frame', newFrame)
cv2.waitKey(0)
cv2.destroyWindow('new frame')
|
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = staticfiles_urlpatterns()
urlpatterns += patterns('',
# REST framework
url(r'^$', 'api_docs.views.overview', name='overview'),
url(r'^(?P<topic_name>[\w\.-]+)/$', 'api_docs.views.topic_view', name='topic'),
url(r'^(?P<topic_name>[\w\.-]+)/(?P<language_name>[\w\.-]+)/$', 'api_docs.views.language_view', name='language'),
url(r'^(?P<topic_name>[\w\.-]+)/(?P<language_name>[\w\.-]+)/(?P<release_version>[\w\.-]+)/$', 'api_docs.views.version_view', name='version'),
url(r'^(?P<topic_name>[\w\.-]+)/(?P<language_name>[\w\.-]+)/\+release/$', 'api_docs.views.release_version', name='release_version'),
url(r'^(?P<topic_name>[\w\.-]+)/(?P<language_name>[\w\.-]+)/(?P<release_version>[\w\.-]+)/(?P<element_fullname>[\w\.\-\:]+)/$', 'api_docs.views.element_view', name='element'),
)
|
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from copy import deepcopy, copy
import timeit
import time
import multiprocessing
from sklearn import model_selection
from tqdm import tqdm
from sklearn.model_selection import ParameterGrid
from .BaseCrossVal import BaseCrossVal
from ..utils import binary_metrics, multiclass_metrics, dict_perc, dict_median
class KFold(BaseCrossVal):
""" Exhaustitive search over param_dict calculating binary metrics.
Parameters
----------
model : object
This object is assumed to store bootlist attributes in .model (e.g. modelPLS.model.x_scores_).
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
param_dict : dict
List of attributes to calculate and return bootstrap confidence intervals.
folds: : a positive integer, (default 10)
The number of folds used in the computation.
bootnum : a positive integer, (default 100)
The number of bootstrap samples used in the computation for the plot.
Methods
-------
Run: Runs all necessary methods prior to plot.
Plot: Creates a R2/Q2 plot.
"""
def __init__(self, model, X, Y, param_dict, folds=5, n_mc=1, n_boot=0, n_cores=-1, ci=95, stratify=True):
super().__init__(model=model, X=X, Y=Y, param_dict=param_dict, folds=folds, n_mc=n_mc, n_boot=n_boot, n_cores=n_cores, ci=ci, stratify=stratify)
if stratify == True:
self.crossval_idx = model_selection.StratifiedKFold(n_splits=folds, shuffle=True)
else:
self.crossval_idx = model_selection.KFold(n_splits=folds, shuffle=True)
def calc_ypred(self):
"""Calculates ypred full and ypred cv."""
time.sleep(0.5) # Sleep for 0.5 secs to finish printing
# Start Timer
start = timeit.default_timer()
# FULL
try:
full = Parallel(n_jobs=self.n_cores)(delayed(self._calc_full_loop)(i) for i in tqdm(range(len(self.param_list)), desc="1/2"))
except:
print("TerminatedWorkerError was raised due to excessive memory usage. n_cores was reduced to 1.")
full = Parallel(n_jobs=1)(delayed(self._calc_full_loop)(i) for i in tqdm(range(len(self.param_list)), desc="1/2"))
self.ypred_full = []
self.x_scores_full = []
self.y_loadings_ = []
self.pctvar_ = []
self.w1 = []
self.w2 = []
for i in range(len(self.param_list)):
self.ypred_full.append(full[i][0])
self.x_scores_full.append(full[i][1])
self.y_loadings_.append(full[i][2])
self.pctvar_.append(full[i][3])
self.w1.append(full[i][4])
self.w2.append(full[i][5])
self.loop_w1 = self.w1 * self.n_mc
self.loop_w2 = self.w2 * self.n_mc
# Actual loop CV including Monte-Carlo reps
self.loop_mc = self.param_list * self.n_mc
try:
ypred = Parallel(n_jobs=self.n_cores)(delayed(self._calc_cv_loop)(i) for i in tqdm(range(len(self.loop_mc)), desc="2/2"))
except:
print("TerminatedWorkerError was raised due to excessive memory usage. n_cores was reduced to 1.")
ypred = Parallel(n_jobs=1)(delayed(self._calc_cv_loop)(i) for i in tqdm(range(len(self.loop_mc)), desc="2/2"))
# Split ypred into full / cv and put in final format
# Format :::> self.ypred_full -> parameter_type -> monte-carlo -> y_true / y_pred
self.ypred_cv = [[] for i in range(len(self.param_list))]
self.x_scores_cv = [[] for i in range(len(self.param_list))]
self.loop_mc_numbers = list(range(len(self.param_list))) * self.n_mc
for i in range(len(self.loop_mc)):
j = self.loop_mc_numbers[i] # Location to append to
self.ypred_cv[j].append(ypred[i][0])
self.x_scores_cv[j].append(ypred[i][1])
# Stop timer
stop = timeit.default_timer()
self.parallel_time = (stop - start) / 60
print("Time taken: {:0.2f} minutes with {} cores".format(self.parallel_time, self.n_cores))
def _calc_full_loop(self, i):
parami = self.param_list[i]
model_i = self.model(**parami)
# model_i.set_params(parami)
# Full
if model_i.__name__ == "cimcb.model.NN_SigmoidSigmoid" or model_i.__name__ == "cimcb.model.NN_SigmoidSigmoid":
model_i.compiled = False
model_i.train(self.X, self.Y)
ypred_full_i = model_i.test(self.X)
ypred_full = ypred_full_i
x_scores_full = model_i.model.x_scores_
y_loadings_ = model_i.model.y_loadings_
pctvar_ = model_i.model.pctvar_
if model_i.__name__ == "cimcb.model.NN_SigmoidSigmoid" or model_i.__name__ == "cimcb.model.NN_LinearSigmoid":
w1 = model_i.model.w1
w2 = model_i.model.w2
else:
w1 = 0
w2 = 0
return [ypred_full, x_scores_full, y_loadings_, pctvar_, w1, w2]
def _calc_cv_loop(self, i):
"""Core component of calc_ypred."""
# Set hyper - parameters
params_i = self.loop_mc[i]
model_i = self.model()
model_i.set_params(params_i)
# Full
if model_i.__name__ == "cimcb.model.NN_SigmoidSigmoid" or model_i.__name__ == "cimcb.model.NN_LinearSigmoid":
model_i.train(self.X, self.Y, w1=self.loop_w1[i], w2=self.loop_w2[i])
else:
model_i.train(self.X, self.Y)
model_i.compiled = True
# CV (for each fold)
ypred_cv_i, x_scores_cv = self._calc_cv_ypred(model_i, self.X, self.Y, w1=self.loop_w1[i], w2=self.loop_w2[i])
ypred_cv = ypred_cv_i
return [ypred_cv_i, x_scores_cv]
def calc_stats(self):
"""Calculates binary statistics from ypred full and ypred cv."""
# Calculate for each parameter and append
stats_list = []
std_list = []
self.full_loop = []
self.cv_loop = []
for i in range(len(self.param_list)):
full_loop = []
cv_loop = []
# Get all monte-carlo
for k in range(len(self.ypred_cv[i])):
cv_mc = binary_metrics(self.Y, self.ypred_cv[i][k], parametric=self.model.parametric)
cv_loop.append(cv_mc)
# Average binary metrics
stats_full_i = binary_metrics(self.Y, self.ypred_full[i], parametric=self.model.parametric)
stats_cv_i = dict_median(cv_loop)
# Rename columns
stats_full_i = {k + "full": v for k, v in stats_full_i.items()}
stats_cv_i = {k + "cv": v for k, v in stats_cv_i.items()}
stats_cv_i["R²"] = stats_full_i.pop("R²full")
stats_cv_i["Q²"] = stats_cv_i.pop("R²cv")
# Combine and append
stats_combined = {**stats_full_i, **stats_cv_i}
stats_list.append(stats_combined)
# Save loop -> full_loop is a placeholder
self.full_loop.append(cv_loop)
self.cv_loop.append(cv_loop)
# Keep std if n_mc > 1
if self.n_mc > 1:
std_full_i = dict_perc(cv_loop, ci=self.ci)
std_cv_i = dict_perc(cv_loop, ci=self.ci)
std_full_i = {k + "full": v for k, v in std_full_i.items()}
std_cv_i = {k + "cv": v for k, v in std_cv_i.items()}
std_cv_i["R²"] = std_full_i.pop("R²full")
std_cv_i["Q²"] = std_cv_i.pop("R²cv")
std_combined = {**std_full_i, **std_cv_i}
std_list.append(std_combined)
self.table = self._format_table(stats_list) # Transpose, Add headers
self.table = self.table.reindex(index=np.sort(self.table.index))
if self.n_mc > 1:
self.table_std = self._format_table(std_list) # Transpose, Add headers
self.table_std = self.table_std.reindex(index=np.sort(self.table_std.index))
return self.table
def _calc_cv_ypred(self, model_i, X, Y, w1, w2):
"""Method used to calculate ypred cv."""
ypred_cv_i = [None] * len(Y)
x_scores_cv_i = [None] * len(Y)
np.random.seed(seed=None)
for train, test in self.crossval_idx.split(Y,Y):
try:
X_train = X[train, :]
Y_train = Y[train]
X_test = X[test, :]
except TypeError:
X_train = []
Y_train = Y[train]
X_test =[]
for j in self.X:
X_train.append(j[train, :])
X_test.append(j[test, :])
if model_i.__name__ == "cimcb.model.NN_SigmoidSigmoid" or model_i.__name__ == "cimcb.model.NN_LinearSigmoid":
model_i.compiled = True
model_i.train(X_train, Y_train, w1=w1, w2=w2)
else:
model_i.train(X_train, Y_train)
ypred_cv_i_j = model_i.test(X_test)
# Return value to y_pred_cv in the correct position # Better way to do this
for (idx, val) in zip(test, ypred_cv_i_j):
ypred_cv_i[idx] = val.tolist()
# Calc x_scores_cv is applicable
if "model.x_scores_" in model_i.bootlist:
x_scores_cv_i_j = model_i.model.x_scores_
for (idx, val) in zip(test, x_scores_cv_i_j):
x_scores_cv_i[idx] = val.tolist()
return ypred_cv_i, x_scores_cv_i
|
import datetime
from typing import Any, Dict, List, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
from wfdb.io import _signal
from wfdb.io import util
from wfdb.io.header import HeaderSyntaxError, rx_record, rx_segment, rx_signal
"""
Notes
-----
In the original WFDB package, certain fields have default values, but
not all of them. Some attributes need to be present for core
functionality, i.e. baseline, whereas others are not essential, yet have
defaults, i.e. base_time.
This inconsistency has likely resulted in the generation of incorrect
files, and general confusion. This library aims to make explicit,
whether certain fields are present in the file, by setting their values
to None if they are not written in, unless the fields are essential, in
which case an actual default value will be set.
The read vs write default values are different for 2 reasons:
1. We want to force the user to be explicit with certain important
fields when writing WFDB records fields, without affecting
existing WFDB headers when reading.
2. Certain unimportant fields may be dependencies of other
important fields. When writing, we want to fill in defaults
so that the user doesn't need to. But when reading, it should
be clear that the fields are missing.
"""
int_types = (int, np.int64, np.int32, np.int16, np.int8)
float_types = (float, np.float64, np.float32) + int_types
_SPECIFICATION_COLUMNS = [
"allowed_types",
"delimiter",
"dependency",
"write_required",
"read_default",
"write_default",
]
RECORD_SPECS = pd.DataFrame(
index=[
"record_name",
"n_seg",
"n_sig",
"fs",
"counter_freq",
"base_counter",
"sig_len",
"base_time",
"base_date",
],
columns=_SPECIFICATION_COLUMNS,
dtype="object",
data=[
[(str,), "", None, True, None, None], # record_name
[int_types, "/", "record_name", True, None, None], # n_seg
[int_types, " ", "record_name", True, None, None], # n_sig
[float_types, " ", "n_sig", True, 250, None], # fs
[float_types, "/", "fs", False, None, None], # counter_freq
[float_types, "(", "counter_freq", False, None, None], # base_counter
[int_types, " ", "fs", True, None, None], # sig_len
[
(datetime.time,),
" ",
"sig_len",
False,
None,
"00:00:00",
], # base_time
[(datetime.date,), " ", "base_time", False, None, None], # base_date
],
)
SIGNAL_SPECS = pd.DataFrame(
index=[
"file_name",
"fmt",
"samps_per_frame",
"skew",
"byte_offset",
"adc_gain",
"baseline",
"units",
"adc_res",
"adc_zero",
"init_value",
"checksum",
"block_size",
"sig_name",
],
columns=_SPECIFICATION_COLUMNS,
dtype="object",
data=[
[(str,), "", None, True, None, None], # file_name
[(str,), " ", "file_name", True, None, None], # fmt
[int_types, "x", "fmt", False, 1, None], # samps_per_frame
[int_types, ":", "fmt", False, None, None], # skew
[int_types, "+", "fmt", False, None, None], # byte_offset
[float_types, " ", "fmt", True, 200.0, None], # adc_gain
[int_types, "(", "adc_gain", True, 0, None], # baseline
[(str,), "/", "adc_gain", True, "mV", None], # units
[int_types, " ", "adc_gain", False, None, 0], # adc_res
[int_types, " ", "adc_res", False, None, 0], # adc_zero
[int_types, " ", "adc_zero", False, None, None], # init_value
[int_types, " ", "init_value", False, None, None], # checksum
[int_types, " ", "checksum", False, None, 0], # block_size
[(str,), " ", "block_size", False, None, None], # sig_name
],
)
SEGMENT_SPECS = pd.DataFrame(
index=["seg_name", "seg_len"],
columns=_SPECIFICATION_COLUMNS,
dtype="object",
data=[
[(str), "", None, True, None, None], # seg_name
[int_types, " ", "seg_name", True, None, None], # seg_len
],
)
# Specifications of all WFDB header fields, except for comments
FIELD_SPECS = pd.concat((RECORD_SPECS, SIGNAL_SPECS, SEGMENT_SPECS))
class BaseHeaderMixin(object):
"""
Mixin class with multi-segment header methods. Inherited by Record and
MultiRecord classes.
Attributes
----------
N/A
"""
def get_write_subset(self, spec_type):
"""
Get a set of fields used to write the header; either 'record'
or 'signal' specification fields. Helper function for
`get_write_fields`. Gets the default required fields, the user
defined fields, and their dependencies.
Parameters
----------
spec_type : str
The set of specification fields desired. Either 'record' or
'signal'.
Returns
-------
write_fields : list or dict
For record fields, returns a list of all fields needed. For
signal fields, it returns a dictionary of all fields needed,
with keys = field and value = list of channels that must be
present for the field.
"""
if spec_type == "record":
write_fields = []
record_specs = RECORD_SPECS.copy()
# Remove the n_seg requirement for single segment items
if not hasattr(self, "n_seg"):
record_specs.drop("n_seg", inplace=True)
for field in record_specs.index[-1::-1]:
# Continue if the field has already been included
if field in write_fields:
continue
# If the field is required by default or has been
# defined by the user
if (
record_specs.loc[field, "write_required"]
or getattr(self, field) is not None
):
req_field = field
# Add the field and its recursive dependencies
while req_field is not None:
write_fields.append(req_field)
req_field = record_specs.loc[req_field, "dependency"]
# Add comments if any
if getattr(self, "comments") is not None:
write_fields.append("comments")
# signal spec field. Need to return a potentially different list for each channel.
elif spec_type == "signal":
# List of lists for each channel
write_fields = []
signal_specs = SIGNAL_SPECS.copy()
for ch in range(self.n_sig):
# The fields needed for this channel
write_fields_ch = []
for field in signal_specs.index[-1::-1]:
if field in write_fields_ch:
continue
item = getattr(self, field)
# If the field is required by default or has been defined by the user
if signal_specs.loc[field, "write_required"] or (
item is not None and item[ch] is not None
):
req_field = field
# Add the field and its recursive dependencies
while req_field is not None:
write_fields_ch.append(req_field)
req_field = signal_specs.loc[
req_field, "dependency"
]
write_fields.append(write_fields_ch)
# Convert the list of lists to a single dictionary.
# keys = field and value = list of channels in which the
# field is required.
dict_write_fields = {}
# For fields present in any channel:
for field in set(
[i for write_fields_ch in write_fields for i in write_fields_ch]
):
dict_write_fields[field] = []
for ch in range(self.n_sig):
if field in write_fields[ch]:
dict_write_fields[field].append(ch)
write_fields = dict_write_fields
return write_fields
class HeaderMixin(BaseHeaderMixin):
"""
Mixin class with single-segment header methods. Inherited by Record class.
Attributes
----------
N/A
"""
def set_defaults(self):
"""
Set defaults for fields needed to write the header if they have
defaults.
Parameters
----------
N/A
Returns
-------
N/A
Notes
-----
- This is NOT called by `rdheader`. It is only automatically
called by the gateway `wrsamp` for convenience.
- This is also not called by `wrheader` since it is supposed to
be an explicit function.
- This is not responsible for initializing the attributes. That
is done by the constructor.
See also `set_p_features` and `set_d_features`.
"""
rfields, sfields = self.get_write_fields()
for f in rfields:
self.set_default(f)
for f in sfields:
self.set_default(f)
def wrheader(self, write_dir="", expanded=True):
"""
Write a WFDB header file. The signals are not used. Before
writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
expanded : bool, optional
Whether the header file should include `samps_per_frame` (this
should only be true if the signal files are written using
`expanded=True`).
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
"""
# Get all the fields used to write the header
# sig_write_fields is a dictionary of
# {field_name:required_channels}
rec_write_fields, sig_write_fields = self.get_write_fields()
if not expanded:
sig_write_fields.pop("samps_per_frame", None)
# Check the validity of individual fields used to write the header
# Record specification fields (and comments)
for field in rec_write_fields:
self.check_field(field)
# Signal specification fields.
for field in sig_write_fields:
self.check_field(field, required_channels=sig_write_fields[field])
# Check the cohesion of fields used to write the header
self.check_field_cohesion(rec_write_fields, list(sig_write_fields))
# Write the header file using the specified fields
self.wr_header_file(rec_write_fields, sig_write_fields, write_dir)
def get_write_fields(self):
"""
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields, and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Parameters
----------
N/A
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
"""
# Record specification fields
rec_write_fields = self.get_write_subset("record")
# Add comments if any
if self.comments != None:
rec_write_fields.append("comments")
# Get required signal fields if signals are present.
self.check_field("n_sig")
if self.n_sig > 0:
sig_write_fields = self.get_write_subset("signal")
else:
sig_write_fields = None
return rec_write_fields, sig_write_fields
def _auto_signal_file_names(self):
fmt = self.fmt or [None] * self.n_sig
spf = self.samps_per_frame or [None] * self.n_sig
num_groups = 0
group_number = []
prev_fmt = prev_spf = None
channels_in_group = 0
for ch_fmt, ch_spf in zip(fmt, spf):
if ch_fmt != prev_fmt:
num_groups += 1
channels_in_group = 0
elif ch_fmt in ("508", "516", "524"):
if channels_in_group >= 8 or ch_spf != prev_spf:
num_groups += 1
channels_in_group = 0
group_number.append(num_groups)
channels_in_group += 1
prev_fmt = ch_fmt
prev_spf = ch_spf
if num_groups < 2:
return [self.record_name + ".dat"] * self.n_sig
else:
digits = len(str(group_number[-1]))
return [
self.record_name + "_" + str(g).rjust(digits, "0") + ".dat"
for g in group_number
]
def set_default(self, field):
"""
Set the object's attribute to its default value if it is missing
and there is a default. Not responsible for initializing the
attribute. That is done by the constructor.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
"""
# Record specification fields
if field in RECORD_SPECS.index:
# Return if no default to set, or if the field is already
# present.
if (
RECORD_SPECS.loc[field, "write_default"] is None
or getattr(self, field) is not None
):
return
setattr(self, field, RECORD_SPECS.loc[field, "write_default"])
# Signal specification fields
# Setting entire list default, not filling in blanks in lists.
elif field in SIGNAL_SPECS.index:
# Specific dynamic case
if field == "file_name" and self.file_name is None:
self.file_name = self._auto_signal_file_names()
return
item = getattr(self, field)
# Return if no default to set, or if the field is already
# present.
if (
SIGNAL_SPECS.loc[field, "write_default"] is None
or item is not None
):
return
# Set more specific defaults if possible
if field == "adc_res" and self.fmt is not None:
self.adc_res = _signal._fmt_res(self.fmt)
return
setattr(
self,
field,
[SIGNAL_SPECS.loc[field, "write_default"]] * self.n_sig,
)
def check_field_cohesion(self, rec_write_fields, sig_write_fields):
"""
Check the cohesion of fields used to write the header.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
Returns
-------
N/A
"""
# If there are no signal specification fields, there is nothing to check.
if self.n_sig > 0:
# The length of all signal specification fields must match n_sig
# even if some of its elements are None.
for f in sig_write_fields:
if len(getattr(self, f)) != self.n_sig:
raise ValueError(
"The length of field: " + f + " must match field n_sig."
)
# Each file_name must correspond to only one fmt, (and only one byte offset if defined).
datfmts = {}
for ch in range(self.n_sig):
if self.file_name[ch] not in datfmts:
datfmts[self.file_name[ch]] = self.fmt[ch]
else:
if datfmts[self.file_name[ch]] != self.fmt[ch]:
raise ValueError(
"Each file_name (dat file) specified must have the same fmt"
)
datoffsets = {}
if self.byte_offset is not None:
# At least one byte offset value exists
for ch in range(self.n_sig):
if self.byte_offset[ch] is None:
continue
if self.file_name[ch] not in datoffsets:
datoffsets[self.file_name[ch]] = self.byte_offset[ch]
else:
if (
datoffsets[self.file_name[ch]]
!= self.byte_offset[ch]
):
raise ValueError(
"Each file_name (dat file) specified must have the same byte offset"
)
def wr_header_file(self, rec_write_fields, sig_write_fields, write_dir):
"""
Write a header file using the specified fields. Converts Record
attributes into appropriate WFDB format strings.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
write_dir : str
The directory in which to write the header file.
Returns
-------
N/A
"""
# Create record specification line
record_line = ""
# Traverse the ordered dictionary
for field in RECORD_SPECS.index:
# If the field is being used, add it with its delimiter
if field in rec_write_fields:
string_field = str(getattr(self, field))
# Certain fields need extra processing
if field == "fs" and isinstance(self.fs, float):
if round(self.fs, 8) == float(int(self.fs)):
string_field = str(int(self.fs))
elif field == "base_time" and "." in string_field:
string_field = string_field.rstrip("0")
elif field == "base_date":
string_field = "/".join(
(string_field[8:], string_field[5:7], string_field[:4])
)
record_line += (
RECORD_SPECS.loc[field, "delimiter"] + string_field
)
# The 'base_counter' field needs to be closed with ')'
if field == "base_counter":
record_line += ")"
header_lines = [record_line]
# Create signal specification lines (if any) one channel at a time
if self.n_sig > 0:
signal_lines = self.n_sig * [""]
for ch in range(self.n_sig):
# Traverse the signal fields
for field in SIGNAL_SPECS.index:
# If the field is being used, add each of its
# elements with the delimiter to the appropriate
# line
if (
field in sig_write_fields
and ch in sig_write_fields[field]
):
signal_lines[ch] += SIGNAL_SPECS.loc[
field, "delimiter"
] + str(getattr(self, field)[ch])
# The 'baseline' field needs to be closed with ')'
if field == "baseline":
signal_lines[ch] += ")"
header_lines += signal_lines
# Create comment lines (if any)
if "comments" in rec_write_fields:
comment_lines = ["# " + comment for comment in self.comments]
header_lines += comment_lines
util.lines_to_file(self.record_name + ".hea", write_dir, header_lines)
class MultiHeaderMixin(BaseHeaderMixin):
"""
Mixin class with multi-segment header methods. Inherited by
MultiRecord class.
Attributes
----------
N/A
"""
n_seg: int
seg_len: Sequence[int]
segments: Optional[Sequence]
def set_defaults(self):
"""
Set defaults for fields needed to write the header if they have
defaults. This is NOT called by rdheader. It is only called by the
gateway wrsamp for convenience. It is also not called by wrheader since
it is supposed to be an explicit function. Not responsible for
initializing the attributes. That is done by the constructor.
Parameters
----------
N/A
Returns
-------
N/A
"""
for field in self.get_write_fields():
self.set_default(field)
def wrheader(self, write_dir=""):
"""
Write a multi-segment WFDB header file. The signals or segments are
not used. Before writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
"""
# Get all the fields used to write the header
write_fields = self.get_write_fields()
# Check the validity of individual fields used to write the header
for field in write_fields:
self.check_field(field)
# Check the cohesion of fields used to write the header
self.check_field_cohesion()
# Write the header file using the specified fields
self.wr_header_file(write_fields, write_dir)
def get_write_fields(self):
"""
Get the list of fields used to write the multi-segment header.
Parameters
----------
N/A
Returns
-------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
"""
# Record specification fields
write_fields = self.get_write_subset("record")
# Segment specification fields are all mandatory
write_fields = write_fields + ["seg_name", "seg_len"]
# Comments
if self.comments != None:
write_fields.append("comments")
return write_fields
def set_default(self, field):
"""
Set a field to its default value if there is a default.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
"""
# Record specification fields
if field in RECORD_SPECS:
# Return if no default to set, or if the field is already present.
if (
RECORD_SPECS[field].write_def is None
or getattr(self, field) is not None
):
return
setattr(self, field, RECORD_SPECS[field].write_def)
def check_field_cohesion(self):
"""
Check the cohesion of fields used to write the header.
Parameters
----------
N/A
Returns
-------
N/A
"""
# The length of seg_name and seg_len must match n_seg
for f in ["seg_name", "seg_len"]:
if len(getattr(self, f)) != self.n_seg:
raise ValueError(
"The length of field: " + f + " does not match field n_seg."
)
# Check the sum of the 'seg_len' fields against 'sig_len'
if np.sum(self.seg_len) != self.sig_len:
raise ValueError(
"The sum of the 'seg_len' fields do not match the 'sig_len' field"
)
def wr_header_file(self, write_fields, write_dir):
"""
Write a header file using the specified fields.
Parameters
----------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
write_dir : str
The output directory in which the header is written.
Returns
-------
N/A
"""
# Create record specification line
record_line = ""
# Traverse the ordered dictionary
for field in RECORD_SPECS.index:
# If the field is being used, add it with its delimiter
if field in write_fields:
record_line += RECORD_SPECS.loc[field, "delimiter"] + str(
getattr(self, field)
)
header_lines = [record_line]
# Create segment specification lines
segment_lines = self.n_seg * [""]
# For both fields, add each of its elements with the delimiter
# to the appropriate line
for field in SEGMENT_SPECS.index:
for seg_num in range(self.n_seg):
segment_lines[seg_num] += SEGMENT_SPECS.loc[
field, "delimiter"
] + str(getattr(self, field)[seg_num])
header_lines = header_lines + segment_lines
# Create comment lines (if any)
if "comments" in write_fields:
comment_lines = ["# " + comment for comment in self.comments]
header_lines += comment_lines
util.lines_to_file(self.record_name + ".hea", header_lines, write_dir)
def get_sig_segments(self, sig_name=None):
"""
Get a list of the segment numbers that contain a particular signal
(or a dictionary of segment numbers for a list of signals).
Only works if information about the segments has been read in.
Parameters
----------
sig_name : str, list
The name of the signals to be segmented.
Returns
-------
sig_dict : dict
Segments for each desired signal.
sig_segs : list
Segments for the desired signal.
"""
if self.segments is None:
raise Exception(
"The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rsegment_fieldsments=True"
)
# Default value = all signal names.
if sig_name is None:
sig_name = self.get_sig_name()
if isinstance(sig_name, list):
sig_dict = {}
for sig in sig_name:
sig_dict[sig] = self.get_sig_segments(sig)
return sig_dict
elif isinstance(sig_name, str):
sig_segs = []
for i in range(self.n_seg):
if (
self.seg_name[i] != "~"
and sig_name in self.segments[i].sig_name
):
sig_segs.append(i)
return sig_segs
else:
raise TypeError("sig_name must be a string or a list of strings")
def get_sig_name(self):
"""
Get the signal names for the entire record.
Parameters
----------
N/A
Returns
-------
sig_name : str, list
The name of the signals to be segmented.
"""
if self.segments is None:
raise Exception(
"The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rd_segments=True"
)
if self.layout == "fixed":
for i in range(self.n_seg):
if self.seg_name[i] != "~":
sig_name = self.segments[i].sig_name
break
else:
sig_name = self.segments[0].sig_name
return sig_name
def contained_ranges(self, sig_name: str) -> List[Tuple[int, int]]:
"""
Given a signal name, return the sample ranges that contain signal values,
relative to the start of the full record. Does not account for NaNs/missing
values.
This function is mainly useful for variable layout records, but can also be
used for fixed-layout records. Only works if the headers from the individual
segment records have already been read in.
Parameters
----------
sig_name : str
The name of the signal to query.
Returns
-------
ranges : List[Tuple[int, int]]
Tuple pairs which specify thee sample ranges in which the signal is contained.
The second value of each tuple pair will be one beyond the signal index.
eg. A length 1000 signal would generate a tuple of: (0, 1000), allowing
selection using signal[0:1000].
"""
if self.segments is None:
raise Exception(
"The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rd_segments=True"
)
ranges = []
seg_start = 0
range_start = None
# TODO: Add shortcut for fixed-layout records
# Cannot process segments only because missing segments are None
# and do not contain length information.
for seg_num in range(self.n_seg):
seg_len = self.seg_len[seg_num]
segment = self.segments[seg_num]
if seg_len == 0:
continue
# Open signal range
if (
range_start is None
and segment is not None
and sig_name in segment.sig_name
):
range_start = seg_start
# Close signal range
elif range_start is not None and (
segment is None or sig_name not in segment.sig_name
):
ranges.append((range_start, seg_start))
range_start = None
seg_start += seg_len
# Account for final segment
if range_start is not None:
ranges.append((range_start, seg_start))
return ranges
def contained_combined_ranges(
self,
sig_names: Sequence[str],
) -> List[Tuple[int, int]]:
"""
Given a collection of signal name, return the sample ranges that
contain all of the specified signals, relative to the start of the
full record. Does not account for NaNs/missing values.
This function is mainly useful for variable layout records, but can also be
used for fixed-layout records. Only works if the headers from the individual
segment records have already been read in.
Parameters
----------
sig_names : List[str]
The names of the signals to query.
Returns
-------
ranges : List[Tuple[int, int]]
Tuple pairs which specify thee sample ranges in which the signal is contained.
The second value of each tuple pair will be one beyond the signal index.
eg. A length 1000 signal would generate a tuple of: (0, 1000), allowing
selection using signal[0:1000].
"""
# TODO: Add shortcut for fixed-layout records
if len(sig_names) == 0:
return []
combined_ranges = self.contained_ranges(sig_names[0])
if len(sig_names) > 1:
for name in sig_names[1:]:
combined_ranges = util.overlapping_ranges(
combined_ranges, self.contained_ranges(name)
)
return combined_ranges
def wfdb_strptime(time_string: str) -> datetime.time:
"""
Given a time string in an acceptable WFDB format, return
a datetime.time object.
Valid formats: SS, MM:SS, HH:MM:SS, all with and without microsec.
Parameters
----------
time_string : str
The time to be converted to a datetime.time object.
Returns
-------
datetime.time object
The time converted from str format.
"""
n_colons = time_string.count(":")
if n_colons == 0:
time_fmt = "%S"
elif n_colons == 1:
time_fmt = "%M:%S"
elif n_colons == 2:
time_fmt = "%H:%M:%S"
if "." in time_string:
time_fmt += ".%f"
return datetime.datetime.strptime(time_string, time_fmt).time()
def _parse_record_line(record_line: str) -> dict:
"""
Extract fields from a record line string into a dictionary.
Parameters
----------
record_line : str
The record line contained in the header file
Returns
-------
record_fields : dict
The fields for the given record line.
"""
# Dictionary for record fields
record_fields: Dict[str, Any] = {}
# Read string fields from record line
match = rx_record.match(record_line)
if match is None:
raise HeaderSyntaxError("invalid syntax in record line")
(
record_fields["record_name"],
record_fields["n_seg"],
record_fields["n_sig"],
record_fields["fs"],
record_fields["counter_freq"],
record_fields["base_counter"],
record_fields["sig_len"],
record_fields["base_time"],
record_fields["base_date"],
) = match.groups()
for field in RECORD_SPECS.index:
# Replace empty strings with their read defaults (which are
# mostly None)
if record_fields[field] == "":
record_fields[field] = RECORD_SPECS.loc[field, "read_default"]
# Typecast non-empty strings for non-string (numerical/datetime)
# fields
else:
if RECORD_SPECS.loc[field, "allowed_types"] == int_types:
record_fields[field] = int(record_fields[field])
elif RECORD_SPECS.loc[field, "allowed_types"] == float_types:
record_fields[field] = float(record_fields[field])
# cast fs to an int if it is close
if field == "fs":
fs = float(record_fields["fs"])
if round(fs, 8) == float(int(fs)):
fs = int(fs)
record_fields["fs"] = fs
elif field == "base_time":
record_fields["base_time"] = wfdb_strptime(
record_fields["base_time"]
)
elif field == "base_date":
record_fields["base_date"] = datetime.datetime.strptime(
record_fields["base_date"], "%d/%m/%Y"
).date()
# This is not a standard WFDB field, but is useful to set.
if record_fields["base_date"] and record_fields["base_time"]:
record_fields["base_datetime"] = datetime.datetime.combine(
record_fields["base_date"], record_fields["base_time"]
)
return record_fields
def _parse_signal_lines(signal_lines):
"""
Extract fields from a list of signal line strings into a dictionary.
Parameters
----------
signal_lines : list
The name of the signal line that will be used to extact fields.
Returns
-------
signal_fields : dict
The fields for the given signal line.
"""
n_sig = len(signal_lines)
# Dictionary for signal fields
signal_fields = {}
# Each dictionary field is a list
for field in SIGNAL_SPECS.index:
signal_fields[field] = n_sig * [None]
# Read string fields from signal line
for ch in range(n_sig):
match = rx_signal.match(signal_lines[ch])
if match is None:
raise HeaderSyntaxError("invalid syntax in signal line")
(
signal_fields["file_name"][ch],
signal_fields["fmt"][ch],
signal_fields["samps_per_frame"][ch],
signal_fields["skew"][ch],
signal_fields["byte_offset"][ch],
signal_fields["adc_gain"][ch],
signal_fields["baseline"][ch],
signal_fields["units"][ch],
signal_fields["adc_res"][ch],
signal_fields["adc_zero"][ch],
signal_fields["init_value"][ch],
signal_fields["checksum"][ch],
signal_fields["block_size"][ch],
signal_fields["sig_name"][ch],
) = match.groups()
for field in SIGNAL_SPECS.index:
# Replace empty strings with their read defaults (which are mostly None)
# Note: Never set a field to None. [None]* n_sig is accurate, indicating
# that different channels can be present or missing.
if signal_fields[field][ch] == "":
signal_fields[field][ch] = SIGNAL_SPECS.loc[
field, "read_default"
]
# Special case: missing baseline defaults to ADCzero if present
if field == "baseline" and signal_fields["adc_zero"][ch] != "":
signal_fields["baseline"][ch] = int(
signal_fields["adc_zero"][ch]
)
# Typecast non-empty strings for numerical fields
else:
if SIGNAL_SPECS.loc[field, "allowed_types"] is int_types:
signal_fields[field][ch] = int(signal_fields[field][ch])
elif SIGNAL_SPECS.loc[field, "allowed_types"] is float_types:
signal_fields[field][ch] = float(signal_fields[field][ch])
# Special case: adc_gain of 0 means 200
if (
field == "adc_gain"
and signal_fields["adc_gain"][ch] == 0
):
signal_fields["adc_gain"][ch] = 200.0
return signal_fields
def _read_segment_lines(segment_lines):
"""
Extract fields from segment line strings into a dictionary.
Parameters
----------
segment_line : list
The name of the segment line that will be used to extact fields.
Returns
-------
segment_fields : dict
The fields for the given segment line.
"""
# Dictionary for segment fields
segment_fields = {}
# Each dictionary field is a list
for field in SEGMENT_SPECS.index:
segment_fields[field] = [None] * len(segment_lines)
# Read string fields from signal line
for i in range(len(segment_lines)):
match = rx_segment.match(segment_lines[i])
if match is None:
raise HeaderSyntaxError("invalid syntax in segment line")
(
segment_fields["seg_name"][i],
segment_fields["seg_len"][i],
) = match.groups()
# Typecast strings for numerical field
if field == "seg_len":
segment_fields["seg_len"][i] = int(segment_fields["seg_len"][i])
return segment_fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Found(models.Model):
found_name = models.CharField(u'项目名称', max_length=256)
manager = models.CharField(u'负责人', max_length=20)
money = models.FloatField(u'金额', blank=True, null=True)
grant_no = models.CharField(u'批准号', max_length=20, blank=True, null=True)
company = models.CharField(u'依托单位', max_length=64, blank=True, null=True)
category = models.CharField(u'资助类别', max_length=64, blank=True, null=True)
grant_year = models.IntegerField(u'批准时间', blank=True, null=True)
class ProvinceFound(models.Model):
found_name = models.CharField(u'项目名称', max_length=256)
manager = models.CharField(u'负责人', max_length=20)
province = models.CharField(u'省份', max_length=20)
money = models.FloatField(u'金额', blank=True, null=True)
grant_no = models.CharField(u'批准号', max_length=20, blank=True, null=True)
company = models.CharField(u'申请单位', max_length=64, blank=True, null=True)
category = models.CharField(u'资助类别', max_length=64, blank=True, null=True)
grant_year = models.IntegerField(u'批准时间', blank=True, null=True)
|
import random
import string
import subprocess
import itertools
import types
import prettytable
import re
def randstr(n=4, fixed=True, charset=None):
if not n:
return ''
if not fixed:
n = random.randint(1, n)
if not charset:
charset = string.letters + string.digits
return ''.join(random.choice(charset) for x in range(n))
def sxor(s1, s2):
return ''.join(chr(ord(a) ^ ord(b))
for a, b in zip(s1, itertools.cycle(s2)))
def divide(str, min_size, max_size, split_size):
it = iter(str)
size = len(str)
for i in range(split_size - 1, 0, -1):
s = random.randint(min_size, size - max_size * i)
yield ''.join(itertools.islice(it, 0, s))
size -= s
yield ''.join(it)
def stringify(data, table_border = True):
# TODO: Check that is prettytable-0.7.2 that supports the
# dynamic table columns number setting. Version 0.5 does not.
output = ''
# Empty outputs. False is probably a good output value
if data and not data:
output = ''
else:
table = prettytable.PrettyTable()
# List outputs.
if isinstance(data, (types.ListType, types.TupleType)):
if len(data) > 0:
columns_num = 1
if isinstance(data[0], (types.ListType, types.TupleType)):
columns_num = len(data[0])
for row in data:
if isinstance(row, (types.ListType, types.TupleType)):
table.add_row(row)
else:
table.add_row([row])
# Dict outputs are display as tables
elif isinstance(data, types.DictType) and data:
# Populate the rows
randomitem = next(data.itervalues())
if isinstance(randomitem, (types.ListType, types.TupleType)):
for field in data:
table.add_row([field] + data[field])
else:
for field in data:
table.add_row([field, str(data[field])])
# Else, try to stringify
else:
output = str(data)
if not output:
table.header = False
table.align = 'l'
table.border = table_border
output = table.get_string()
return output
def getstatusoutput(cmd):
"""
Return (status, output) of executing cmd in a shell.
This new implementation should work on all platforms.
"""
pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = str.join("", pipe.stdout.readlines())
sts = pipe.wait()
if sts is None:
sts = 0
return sts, output
def shorten_string(body, keep_header = 0, keep_trailer = 0):
"""
Smartly shorten a given string.
"""
# Cut header
if (keep_header
and not keep_trailer
and len(body) > keep_header):
return '..%s' % body[:keep_header]
# Cut footer
if (keep_trailer
and not keep_header
and len(body) > keep_trailer):
return '..%s' % body[-keep_header:]
if (keep_header
and keep_trailer
and len(body) > keep_header + keep_trailer):
return '%s .. %s' % (body[:keep_header], body[-keep_trailer:])
return body
|
from PIL import ImageGrab as IG
import pyautogui as pa
import sys
import os
import time
import re
pa.FAILSAFE = True
sec_between_keys = 0.25
sec_between_term = 3
sec_sleep = 0.5
#스크린샷
def screenGrab():
box = ()
im = IG.grab(box)
im.save(os.getcwd() + '\\img\\full_snap__' + str(int(time.time())) + '.png', 'PNG')
def waitWindow(p_x, p_y, p_r, p_g, p_b):
i = 1
time.sleep(2)
while True:
if pa.pixelMatchesColor(p_x, p_y, (p_r, p_g, p_b)) == True: # 수덕원 예약 아이콘 좌표 및 색상코드
break
print('%s번째 시도중...' %(i))
i += 1
time.sleep(0.25)
#나이스 화면 띄우기에서 비밀번호 입력전까지
#나이스 사이트 접속, 상용구 이
pa.typewrite(',skdl', interval=0.15, pause=0.25) #상용구로 나이스접속
waitWindow(1143, 307, 251, 15, 12)
#나이스 로그인 - 아이디 입력
pa.click(x= 1174, y = 386, interval=0.02, pause=0.25)
pa.typewrite('driver1', interval=0.15) #아이디 입력
pa.hotkey('tab', interval=0.02) #탭키 누르기
pa.hotkey('enter', interval=0.02) #엔터키 입력
#인증서화면이 나올때 까지 기다리기
waitWindow(1038, 706, 14, 110, 166)
pa.click(x=874, y=339, interval=0.02, pause=0.5) #이동식디스크 아이콘 클릭
pa.click(x=912, y=398, interval=0.02) #D드라이브 선택
pa.click(x=1000, y=622, interval=0.02) #비번 입력칸 선택
pa.click(x=1000, y=622, interval=0.02) #비번 입력칸 선택
pa.click(x=1000, y=622, interval=0.02) #비번 입력칸 선택
|
from django.shortcuts import render, get_object_or_404
from .models import Blog
def all_blogs(response):
instance = Blog.objects.all()
return render(response, "one.html", {'returned' : instance})
def details(response, id):
id = get_object_or_404(Blog, pk = id)
return render(response, "details.html", {"blog_id":id})
|
"""@package docstring
Provides the base request handler.
"""
import wsgiref.handlers
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
import openradar.db
import datetime
import os
import simplejson
class RequestHandler(webapp.RequestHandler):
def respondWithDictionaryAsJSON(self, d):
self.response.out.write(simplejson.dumps(d) + "\n")
def respondWithText(self, text):
self.response.out.write(unicode(text))
self.response.out.write("\n")
"""Supplies a common template generation function.
When you call generate(), we augment the template variables supplied with
the current user in the 'user' variable and the current webapp request
in the 'request' variable.
"""
def respondWithTemplate(self, template_name, template_values={}):
values = {
'request': self.request,
'debug': self.request.get('debug'),
'application_name': 'Open Radar',
'user': users.GetCurrentUser(),
'login_url': users.CreateLoginURL(self.request.uri),
'logout_url': users.CreateLogoutURL('http://' + self.request.host + '/'),
}
values.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('../templates', template_name))
self.response.out.write(unicode(template.render(path, values)))
def GetCurrentUser(self):
if 'Authorization' in self.request.headers:
auth = self.request.headers['Authorization']
if auth:
apikey = openradar.db.APIKey().fetchByAPIKey(auth)
if apikey:
return apikey.user
return users.GetCurrentUser()
|
#!/usr/bin/env python
import numpy as np
from scipy.signal import hilbert
from scipy import integrate
def namodel_py(effadse, wdos, delta, ergy):
fermi = np.argmax(ergy >= 0)
htwdos = np.imag(hilbert(wdos, axis=0))
lorentzian = (1/np.pi) * (delta)/((ergy - effadse)**2 + delta**2)
dos_ads = wdos/((ergy - effadse - htwdos)**2 + wdos**2)/np.pi
chem = wdos/(ergy - effadse - htwdos)
integrand = [x - np.pi if x > 0 else x for x in np.arctan(chem)]
integ = integrate.cumtrapz(integrand, ergy, axis=0)[fermi - 1]/np.pi #fermi-1 is the index to calculate until fermi level
na = integrate.cumtrapz(lorentzian, ergy, axis =0)[fermi - 1]
# need to correct for when beta = 0
# wdos = delta
wdos_ = np.array([delta for i in range(0, len(ergy))])
chem_ = wdos_/(ergy - effadse) # htwdos is just 0
integrand_ = [x - np.pi if x > 0 else x for x in np.arctan(chem_)]
integ_ = integrate.cumtrapz(integrand_, ergy, axis=0)[fermi - 1]/np.pi #fermi-1 is the index to calculate until fermi level
energy_NA = 2*(integ - integ_)
return energy_NA, dos_ads, na
|
# 反转一个单链表。
#
# 示例:
#
# 输入: 1->2->3->4->5->NULL
# 输出: 5->4->3->2->1->NULL
#
# 进阶:
# 你可以迭代或递归地反转链表。你能否用两种方法解决这道题?
# Related Topics 链表
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
# 简单粗暴数组法
# if not head:
# return head
# res = []
# while head:
# res.append(head.val)
# head = head.next
# res = res[::-1]
# reversed_head = ListNode(None)
# head = reversed_head
# for _, val in enumerate(res):
# head.next = ListNode(val)
# head = head.next
# return reversed_head.next
# 经典链表哨兵法!!
reversed_head = None
current = head
while current:
reversed_head, reversed_head.next, current = current, reversed_head, current.next
return reversed_head
# leetcode submit region end(Prohibit modification and deletion)
|
"""
Este archivo ejemplifica la creacion de una topologia de mininet
En este caso estamos creando una topologia muy simple con la siguiente forma
host --- switch --- switch --- host
"""
import os
from mininet.topo import Topo
class Example(Topo):
def __init__(self, half_ports = 2, **opts):
Topo.__init__(self, **opts)
niveles = int(os.environ['HEIGHT'])
cant_de_sw = 0
for i in range(1, niveles + 1):
cant_de_sw += 2 ** (i - 1)
print("Cantidad de switches " + str(cant_de_sw))
switches = [None] * cant_de_sw
# Primero creo los 3 hosts que se conectan a la raiz
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
sw_count = 1
host_count = 4
for nivel in range(1, niveles + 1):
pos = 2 ** (nivel - 1) - 1
for i in range(0, pos + 1):
switches[pos + i] = self.addSwitch('sw' + str(sw_count))
sw_count += 1
# Agrego los links
sw1 = switches[0]
self.addLink(sw1, h1)
self.addLink(sw1, h2)
self.addLink(sw1, h3)
for nivel in range(1, niveles + 1):
pos = 2 ** (nivel - 1) - 1
if nivel == niveles:
# Estoy en el ultimo nivel, entonces tengo que agregarle a cada sw de este nivel
# un host
for i in range(0, pos + 1):
sw_actual = switches[pos + i]
host_it = self.addHost('h' + str(host_count))
host_count += 1
self.addLink(sw_actual, host_it)
break
for i in range(0, pos + 1):
print("actual " + str(pos) + " + " + str(i))
sw_actual = switches[pos + i]
pos_sig_nivel = 2 ** nivel - 1
for j in range(0, pos_sig_nivel + 1):
print("sig " + str(pos_sig_nivel) + " + " + str(j))
sw_it = switches[pos_sig_nivel + j]
self.addLink(sw_actual, sw_it)
topos = {'example': Example}
|
from psycopg2.extras import RealDictCursor
import database_common
import bcrypt
import os
def random_api_key():
"""
:return: salt in secret key
"""
return os.urandom(100)
# query func verific daca username este deja in BD
@database_common.connection_handler
def username_exists(cursor: RealDictCursor, username: str):
query = """
SELECT *
FROM users
WHERE username = %(username)s;
"""
args = {'username': username}
cursor.execute(query, args)
return cursor.fetchone()
# query func adaug username in DB
@database_common.connection_handler
def register_user(cursor: RealDictCursor, username: str, text_password: str, submission_time: int):
"""
Checks for valid username.
If username is valid, inserts the new user into the database
"""
if username_exists(username):
return False
query = """
INSERT INTO users (username,password,submission_time)
VALUES (%(username)s,%(password)s,%(submission_time)s)
"""
args = {"username": username, "password": encrypt_password(
text_password), "submission_time": submission_time}
return cursor.execute(query, args)
# func care transforma plain text password in hash salt pass
def encrypt_password(password):
# By using bcrypt, the salt is saved into the hash itself
hashed_pass = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
return hashed_pass.decode('utf-8')
def verify_password(text_password, hashed_pass):
return bcrypt.checkpw(text_password.encode('utf-8'), hashed_pass.encode('utf-8'))
@database_common.connection_handler
def users_data(cursor: RealDictCursor) -> list:
query = """
SELECT *
FROM users
"""
cursor.execute(query)
return cursor.fetchall()
@database_common.connection_handler
def check_user(cursor, username):
query = """
SELECT id, password
FROM users
WHERE username ILIKE %(username)s;
"""
args = {
"username": username
}
cursor.execute(query, args)
return cursor.fetchone()
@database_common.connection_handler
def votes(cursor: RealDictCursor) -> list:
query = """
SELECT *
FROM planet_votes
"""
cursor.execute(query)
return cursor.fetchall()
@database_common.connection_handler
def vote_planet(cursor: RealDictCursor, planet_id: int, planet_name: str, user_id: int, submission_time: int) -> list:
query = """
INSERT INTO planet_votes (planet_id, planet_name, user_id, submission_time)
VALUES (%(planet_id)s, %(planet_name)s, %(user_id)s, %(submission_time)s)
"""
args = {'planet_id': planet_id, 'planet_name': planet_name, 'user_id': user_id, 'submission_time': submission_time}
cursor.execute(query, args)
@database_common.connection_handler
def planets_votes(cursor: RealDictCursor, user_id: int) -> list:
query = """
SELECT pv.planet_name planet, COUNT(pv.planet_name) AS count, u.id, pv.user_id
FROM planet_votes AS pv
LEFT JOIN users u ON pv.user_id = u.id
WHERE u.id = %(user_id)s
GROUP BY planet, u.id, pv.user_id
"""
args = {'user_id': user_id}
cursor.execute(query, args)
return cursor.fetchall()
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import get_user_model
User = get_user_model()
class SignUpForm(UserCreationForm):
"""Prepares help texts, class and placeholder attributes.
Define methods to increase and decrese token_count amount,
betting and check if bet is possible.
"""
# error_messages = {
# 'invalid_code': _(
# "invalid code.The code doent exist"
# ),
# }
username = forms.CharField(
max_length=50,
required=True,
label="",
# help_text='E.g 07200200200 or 01200200200',
widget=forms.TextInput(
attrs={
"class": "form-control",
"placeholder": "Phone Number. ie 071001000",
}
),
)
# first_name = forms.CharField(max_length=30, required=False,
# label='', help_text='Optional',
# widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'First name...'
# }))
# last_name = forms.CharField(max_length=30, required=False,
# label='', help_text='Optional',
# widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Last name...'
# }))
# phone_number = forms.CharField(max_length=150, required=True,
# label='',
# help_text='E.g 07200200200 or 01200200200',
# widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Phone Number...'
# }))
email = forms.EmailField(
max_length=254,
required=True,
label="",
# help_text='Required.Enter valid email.Required wen if you forot password.',
widget=forms.EmailInput(
attrs={"class": "form-control", "placeholder": "Email..."}
),
)
referer_code = forms.CharField(
max_length=150,
# required=True,
label="",
# help_text='Dont have ? Enter ADMIN',
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Referer Code"}
),
)
password1 = forms.CharField(
required=True,
label="",
widget=forms.PasswordInput(
attrs={"class": "form-control", "placeholder": "Password..."}
),
)
password2 = forms.CharField(
required=True,
label="",
widget=forms.PasswordInput(
attrs={"class": "form-control", "placeholder": "Confirm password..."}
),
)
class Meta:
model = User
fields = ("username", "email", "referer_code", "password1", "password2")
# error_messages = {
# 'referer_code': {'required': "Daru code required.Dont have ? Enter ADMIN"}
# }
# def cleaned_daru_code(self):
# user =User.objects.get(username=self.username)
# if self.daru_code not in User.codes():
# raise ValidationError(
# self.error_messages['invalid_code'],
# code='invalid_code',
# )
|
import cython_mpi4py
cython_mpi4py()
|
import torch
import torchvision
from PIL import Image
import torchvision.transforms as transforms
def get_detection_model():
model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91,
pretrained=True)
model.to('cpu')
checkpoint = torch.load('./water_meter_detection.pth', map_location='cpu')
model.load_state_dict(checkpoint['model'])
model.eval()
return model
def transform_image(image_bytes):
my_transforms = transforms.Compose([transforms.ToTensor()])
# image = Image.open(io.BytesIO(image_bytes))
image = Image.open(image_bytes)
return my_transforms(image).unsqueeze(0)
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import output
from esphome.const import CONF_ID
empty_binary_output_ns = cg.esphome_ns.namespace('empty_binary_output')
EmptyBinaryOutput = empty_binary_output_ns.class_('EmptyBinaryOutput', output.BinaryOutput,
cg.Component)
CONFIG_SCHEMA = output.BINARY_OUTPUT_SCHEMA.extend({
cv.Required(CONF_ID): cv.declare_id(EmptyBinaryOutput),
}).extend(cv.COMPONENT_SCHEMA)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield output.register_output(var, config)
yield cg.register_component(var, config)
|
import lasagne
from utct.common.functor import Functor
class MnistModel(Functor):
def __init__(self):
super(MnistModel, self).__init__()
#self.param_bounds = {
# #'mdl_conv1a_nf': (6, 128),
# #'mdl_conv1b_nf': (6, 128),
# #'mdl_conv2a_nf': (6, 128),
# #'mdl_conv2b_nf': (6, 128),
# #'mdl_fc1_nh': (10, 500),
# 'mdl_drop2a_p': (0.0, 0.25),
# 'mdl_drop2b_p': (0.0, 0.25),
# 'mdl_drop3_p': (0.0, 0.50)}
def __call__(self,
input_var=None,
num_classes=10,
nl_type=lasagne.nonlinearities.rectify,
mdl_conv1a_nf=40,
mdl_conv1b_nf=60,
mdl_conv2a_nf=50,
mdl_conv2b_nf=75,
mdl_fc1_nh=75,
mdl_drop2a_p=0.033,
mdl_drop2b_p=0.097,
mdl_drop3_p=0.412,
**kwargs):
data = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=input_var, name='data')
conv1a = lasagne.layers.Conv2DLayer(incoming=data, num_filters=int(mdl_conv1a_nf), filter_size=(3, 3),
pad=(1, 1), nonlinearity=nl_type, name='conv1a')
conv1b = lasagne.layers.Conv2DLayer(incoming=conv1a, num_filters=int(mdl_conv1b_nf), filter_size=(3, 3),
pad=(1, 1), nonlinearity=nl_type, name='conv1b')
pool1 = lasagne.layers.MaxPool2DLayer(incoming=conv1b, pool_size=(2, 2), name='pool1')
conv2a = lasagne.layers.Conv2DLayer(incoming=pool1, num_filters=int(mdl_conv2a_nf), filter_size=(3, 3),
pad=(1, 1), nonlinearity=nl_type, name='conv2a')
drop2a = lasagne.layers.DropoutLayer(incoming=conv2a, p=mdl_drop2a_p, name="drop2a")
conv2b = lasagne.layers.Conv2DLayer(incoming=drop2a, num_filters=int(mdl_conv2b_nf), filter_size=(3, 3),
pad=(1, 1), nonlinearity=nl_type, name='conv2b')
drop2b = lasagne.layers.DropoutLayer(incoming=conv2b, p=mdl_drop2b_p, name="drop2b")
pool2 = lasagne.layers.MaxPool2DLayer(incoming=drop2b, pool_size=(2, 2), name='pool2')
fc1 = lasagne.layers.DenseLayer(incoming=pool2, num_units=int(mdl_fc1_nh), nonlinearity=nl_type, name='fc1')
drop3 = lasagne.layers.DropoutLayer(incoming=fc1, p=mdl_drop3_p, name="drop3")
fc2 = lasagne.layers.DenseLayer(incoming=drop3, num_units=num_classes,
nonlinearity=lasagne.nonlinearities.softmax, name='fc2')
return fc2
|
"""
Examen Parcial 3
Carrillo Medina Alexis Adrian (CMAA)
Nombre del programa: Parcial3.py
"""
#----- Seccion de bibliotecas
import numpy as np
import matplotlib.pyplot as plt
#----- Codigo
# La validacion se encuentra en el metodo main
#---------- Metodos auxiliares -----------
def sustDelante(A,b):
# Vector auxiliar para la solucion
sol=np.zeros([b.shape[0],b.shape[1]])
# Definicion de substitucion hacia adelante
for i in range(b.shape[1]):
# sol=bi
sol[0,i]=b[0,i]
for j in range(0,i):
# bi - sum_0^i lij*yj = x
sol[0,i]-=A[i,j]*sol[0,j]
# x/lii
sol[0,i]/=A[i,i]
return sol
def sustAtras(A,b):
# Vector auxiliar para la solucion
sol=np.zeros((b.shape[0],b.shape[1]))
# Definicion de substitucion hacia atras
for i in range(b.shape[1]-1,-1,-1):
# sol=bi
sol[0,i]=b[0,i]
for j in range(i+1,b.shape[1]):
# yi - sum_i+1^n-1 = x
sol[0,i]-=A[i,j]*sol[0,j]
# x/uii
sol[0,i]/=A[i,i]
return sol
#---------- 1.1 --------------------------
# Funcion Auxiliar para transponer un vector
def TransposeVector(A):
New=[]
for i in range(A.shape[0]):
New.append(A[i,0])
New=np.array(New)
return New
def factQR(A):
# Dimension de A
ren,col=A.shape
# Matriz auxiliar de 0
Q=np.matrix(np.zeros((ren,col)))
R=np.matrix(np.zeros((col,col)))
for i in range(col):
# Primera columna
Q[:,i]=A[:,i]
for j in range(i):
# Substraemos del vector Ai su coeficiente en la direccion Qi
R[j,i]=TransposeVector(Q[:,j]) * A[:,i]
Q[:,i]-=R[j,i] * Q[:,j]
# Normalizamos
R[i,i]=np.linalg.norm(Q[:,i],2)
Q[:,i]=Q[:,i]/R[i,i]
return Q,R
#---------- 1.2 --------------------------
def cholesky(A):
# Dimension de A
ren,col=A.shape
# Matriz auxiliar de 0
L=np.matrix(np.zeros((ren,col)))
for k in range(ren):
for i in range(k+1):
# Suma auxiliar 1
sum1=0
if(k==i):
# Suma auxiliar 2
sum2=0
for j in range(k):
sum2+=L[k,j]**2
# Primera formula para diagonales
L[k,k]=np.sqrt(A[k,k]-sum2)
else:
for j in range(i):
sum1=L[i,j]*L[k,j]
# Segunda formula para entradas debajo de la diagonal
L[k,i]=(A[k,i]-sum1)/L[i,i]
# Metodo auxiliar para guardar la transpuesta de L en L
for k in range(1,ren):
for i in range(col):
if(i!=k):
if (L[k,i]!=0):
L[i,k]=L[k,i]
return L
#---------- 2.1 --------------------------
def minimos (A,b):
# Definimos el sistema solucionar
NewA=np.matmul(A.T,A)
Newb=np.matmul(A.T,b)
# Cholesky para obtener la solucion
L=cholesky(NewA)
# Solucion 1
y=sustDelante(L,Newb)
# Solucion 2
x=sustAtras(L,y)
return x
#---------- 2.2 --------------------------
def ecuacion(alfa,beta,x):
# Regresamo el valor de x en la ecuacion
return alfa + beta*x
#---------- 2.3 --------------------------
def grafMinimos(A,b):
# Solucionamos el sistema
sol=minimos(A,b)
alfa=sol[0][0]
beta=sol[0][1]
# Apartir de b
# Obtenemos los valores de x
vals=[]
for i in range(A.shape[0]):
vals.append(A[:,1][i,0])
vals=np.array(vals)
# Vector auxiliar
x=np.linspace(0,7,20)
# Valores de la recta
y5=ecuacion(alfa,beta,5)
y6=ecuacion(alfa,beta,6)
y=ecuacion(alfa,beta,x)
# Graficamos
plt.plot(x,y,label="Ajuste por minimos")
plt.plot(5,y5,"o",label="x = 5",)
plt.plot(6,y6,"o",label="x = 6")
plt.plot(vals,b,"o",label="Valores")
plt.legend(shadow=True)
plt.show()
#---------- Metodo Main ------------------
def main():
# Validacion 1.1 ----------
A=np.matrix([[(int)(np.random.uniform(1,80)) for i in range(3)],
[(int)(np.random.uniform(1,80)) for i in range(3)],
[(int)(np.random.uniform(1,80)) for i in range(3)]])
Q,R=factQR(A)
Result=np.dot(Q,R)
for i in range(len(A)):
for j in range(len(A[0])):
assert np.round(Result[i,j],0)==A[i,j]
# Pruebas
print("\n Prubas: descomposicion QR")
print("\n Q")
print(Q)
print("\n R")
print(R)
print("\n A original")
print(A)
print("\n A por QR")
print(Result)
# Validacion 1.2 ----------
A=np.matrix([[6,15,55],[15,55,225],[55,225,979]])
L=cholesky(A)
Ltest = np.linalg.cholesky(A)
LTranspose=Ltest.T
Ltrue=Ltest+LTranspose
for i in range(Ltrue.shape[0]):
for j in range(Ltrue.shape[1]):
if(i==j):
Ltrue[i,j]/=2
for i in range(Ltrue.shape[0]):
for j in range(Ltrue.shape[1]):
assert L[i,j]==Ltrue[i,j]
LNew=np.matrix(np.zeros((L.shape[0],L.shape[1])))
LTransposeNew=np.matrix(np.zeros((L.shape[0],L.shape[1])))
for i in range(L.shape[0]):
for j in range(L.shape[1]):
if(i==j):
LTransposeNew[i,j]=L[i,j]
LNew[i,j]=L[i,j]
else:
if(j > i):
LTransposeNew[i,j]=L[i,j]
else:
LNew[i,j]=L[i,j]
newA = np.matmul(LNew,LTransposeNew)
for i in range(L.shape[0]):
for j in range(L.shape[1]):
assert np.round(newA[i,j],0)==A[i,j]
# Pruebas
print("\n Pruebas: Descomposicion cholesky")
print("\n Matriz A")
print(A)
# La verdadera matriz tiene a L.T
# Como parte de sus entradas
print("\n Matriz Cholesky L")
print(LNew)
# Validacion 2.1 ----------
A=np.matrix([[1,1],[1,2],[1,3],[1,4]])
b=np.array([20.73,20.77,19.90,18.73])
sol=minimos(A,b)
solTest=np.linalg.lstsq(A,b,rcond=None)
for i in range(sol.shape[1]):
assert np.round(sol[0,i],6)==np.round(solTest[0][i],6)
# Pruebas
print("\n Matriz A")
print(A)
print("\n Vector b")
print(b)
print("\nValores de la recta por minimos cuadrados")
print(sol)
# Validacion 2.2 ----------
y=ecuacion(1,1,1)
assert y==2
# Pruebas
print("\n Valor de y")
print(y)
# Validacion 2.3 ----------
grafMinimos(A,b)
if __name__=='__main__':
main()
|
"""Common functionality for testing the v1 API."""
from ..base import TestCase as BaseTestCase
from ..base import APITestCase as BaseAPITestCase
class NamespaceMixin(object):
"""Designate the namespace for tests."""
namespace = 'v1'
__test__ = True # Run these tests if disabled in base class
class TestCase(BaseTestCase, NamespaceMixin):
"""Useful methods for testing."""
class APITestCase(BaseAPITestCase, NamespaceMixin):
"""Useful methods for testing API endpoints."""
|
"""
script to scrape a website for doctor addresses
"""
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
with webdriver.Chrome() as driver:
# init driver
wait = WebDriverWait(driver, 10)
# load main page
driver.get('https://healthy.kaiserpermanente.org/northern-california/doctors-locations#/search-result')
# set start and end values
num_docs = 0
target_docs = 50
# loop until we have at least 50 docs
while num_docs < target_docs:
# wait until pagination link is available (indicates loading of details is done)
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'kpPagination__link')))
# get list of doctors in page
doctors = driver.find_elements_by_class_name('detail-data')
# get info for each doctor and print it out
for doctor in doctors:
print('Physician Name:', doctor.find_element_by_class_name('doctorTitle').get_attribute('text'))
print('Physician Specialty:', doctor.find_element_by_class_name('specialtyMargin').get_attribute('textContent'))
print('Practicing Address:', str(doctor.find_element_by_class_name('doctorOffice').get_attribute('textContent')).strip())
print(str(doctor.find_element_by_class_name('doctorAddress').get_attribute('textContent')).strip())
try:
print('Phone:', str(doctor.find_element_by_class_name('doctorPhone').get_attribute('textContent'))[5:])
except:
print('Phone: n/a')
print('---')
num_docs += 1
# just wait a bit to make sure the browser is ready etc
sleep(5)
# click the Next button
driver.execute_script("x=document.getElementsByClassName('kpPagination__link');x[x.length-1].click();")
|
def num_divisible_room(x, y, n, m):
# x, y are the dividing factors
# n is total number of floors in the building
# m_k is number of rooms on floor k
room_nums = []
for floor in range(0, n):
for room in range(0, int(m[floor])):
room_nums.append((floor + 1) * 100 + (room + 1))
counter = 0
for room_num in room_nums:
if room_num % x == 0 or room_num % y == 0:
counter += 1
return counter
import sys, string
if len(sys.argv) == 2:
try:
f = open(sys.argv[1], 'r')
except IOError:
print("usage: python divisible_room_num.py [input_file_name]")
sys.exit(1)
c = int(f.readline())
outputs = []
for i in range(0, c):
n = f.readline();
while n == '\n':
n = f.readline()
n = int(n)
m = string.split(f.readline())
temp = string.split(f.readline())
x = int(temp[0])
y = int(temp[1])
outputs.append("Case #" + str(i + 1) + ": " + str(num_divisible_room(x, y, n, m)))
f.readline()
for output in outputs:
print(output)
f.close()
elif len(sys.argv) == 1:
c = int(raw_input())
print('')
outputs = []
for i in range(0, c):
n = int(raw_input())
m = string.split(raw_input())
temp = string.split(raw_input())
x = int(temp[0])
y = int(temp[1])
outputs.append("Case #" + str(i + 1) + ": " + str(num_divisible_room(x, y, n, m)))
for output in outputs:
print(output)
else:
print("usage: python divisible_room_num.py [input_file_name]")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_window.ui',
# licensing of 'main_window.ui' applies.
#
# Created: Thu Jan 2 17:55:43 2020
# by: pyside2-uic running on PySide2 5.9.0~a1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(803, 616)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.vlayout_image = QtWidgets.QVBoxLayout(self.layoutWidget)
self.vlayout_image.setContentsMargins(0, 0, 0, 0)
self.vlayout_image.setObjectName("vlayout_image")
self.wingview_layout = QtWidgets.QVBoxLayout()
self.wingview_layout.setObjectName("wingview_layout")
self.vlayout_image.addLayout(self.wingview_layout)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_feature_size = QtWidgets.QLabel(self.layoutWidget)
self.label_feature_size.setObjectName("label_feature_size")
self.verticalLayout_5.addWidget(self.label_feature_size)
self.slider_feature_size = QtWidgets.QSlider(self.layoutWidget)
self.slider_feature_size.setSingleStep(1)
self.slider_feature_size.setSliderPosition(20)
self.slider_feature_size.setOrientation(QtCore.Qt.Horizontal)
self.slider_feature_size.setObjectName("slider_feature_size")
self.verticalLayout_5.addWidget(self.slider_feature_size)
self.vlayout_image.addLayout(self.verticalLayout_5)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_image_size = QtWidgets.QLabel(self.layoutWidget)
self.label_image_size.setObjectName("label_image_size")
self.verticalLayout_4.addWidget(self.label_image_size)
self.slider_image_size = QtWidgets.QSlider(self.layoutWidget)
self.slider_image_size.setMaximum(1024)
self.slider_image_size.setOrientation(QtCore.Qt.Horizontal)
self.slider_image_size.setObjectName("slider_image_size")
self.verticalLayout_4.addWidget(self.slider_image_size)
self.vlayout_image.addLayout(self.verticalLayout_4)
self.tableWidget = QtWidgets.QTableWidget(self.splitter)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.verticalLayout_3.addWidget(self.splitter)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btn_label_wings = QtWidgets.QPushButton(self.centralwidget)
self.btn_label_wings.setObjectName("btn_label_wings")
self.horizontalLayout.addWidget(self.btn_label_wings)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.verticalLayout_2.addWidget(self.progressBar)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 803, 25))
self.menuBar.setObjectName("menuBar")
self.menuFile = QtWidgets.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
self.menuTools = QtWidgets.QMenu(self.menuBar)
self.menuTools.setObjectName("menuTools")
MainWindow.setMenuBar(self.menuBar)
self.actionAdd_Wings = QtWidgets.QAction(MainWindow)
self.actionAdd_Wings.setObjectName("actionAdd_Wings")
self.actionOpen_Existing_Project = QtWidgets.QAction(MainWindow)
self.actionOpen_Existing_Project.setObjectName("actionOpen_Existing_Project")
self.actionSave_Project = QtWidgets.QAction(MainWindow)
self.actionSave_Project.setObjectName("actionSave_Project")
self.actionExport_CSV = QtWidgets.QAction(MainWindow)
self.actionExport_CSV.setObjectName("actionExport_CSV")
self.actionSet_Scale = QtWidgets.QAction(MainWindow)
self.actionSet_Scale.setObjectName("actionSet_Scale")
self.actionTest = QtWidgets.QAction(MainWindow)
self.actionTest.setObjectName("actionTest")
self.actionLoad_Model = QtWidgets.QAction(MainWindow)
self.actionLoad_Model.setObjectName("actionLoad_Model")
self.menuFile.addAction(self.actionAdd_Wings)
self.menuFile.addAction(self.actionOpen_Existing_Project)
self.menuFile.addAction(self.actionSave_Project)
self.menuFile.addAction(self.actionExport_CSV)
self.menuFile.addAction(self.actionLoad_Model)
self.menuTools.addAction(self.actionSet_Scale)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuTools.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow", None, -1))
self.label_feature_size.setText(QtWidgets.QApplication.translate("MainWindow", "Feature Size", None, -1))
self.label_image_size.setText(QtWidgets.QApplication.translate("MainWindow", "Image Size", None, -1))
self.tableWidget.horizontalHeaderItem(0).setText(QtWidgets.QApplication.translate("MainWindow", "Image Path", None, -1))
self.tableWidget.horizontalHeaderItem(1).setText(QtWidgets.QApplication.translate("MainWindow", "Wing Area", None, -1))
self.tableWidget.horizontalHeaderItem(2).setText(QtWidgets.QApplication.translate("MainWindow", "Scale [mm/pixel]", None, -1))
self.btn_label_wings.setText(QtWidgets.QApplication.translate("MainWindow", "Compute Keypoints", None, -1))
self.menuFile.setTitle(QtWidgets.QApplication.translate("MainWindow", "File", None, -1))
self.menuTools.setTitle(QtWidgets.QApplication.translate("MainWindow", "Tools", None, -1))
self.actionAdd_Wings.setText(QtWidgets.QApplication.translate("MainWindow", "Add Wings", None, -1))
self.actionOpen_Existing_Project.setText(QtWidgets.QApplication.translate("MainWindow", "Open Existing Project", None, -1))
self.actionSave_Project.setText(QtWidgets.QApplication.translate("MainWindow", "Save Project", None, -1))
self.actionExport_CSV.setText(QtWidgets.QApplication.translate("MainWindow", "Export CSV", None, -1))
self.actionSet_Scale.setText(QtWidgets.QApplication.translate("MainWindow", "Set Scale", None, -1))
self.actionTest.setText(QtWidgets.QApplication.translate("MainWindow", "Test", None, -1))
self.actionLoad_Model.setText(QtWidgets.QApplication.translate("MainWindow", "Load Model", None, -1))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
from api.utils import *
def fill_match_info(page_url, match_info, start_time=0, end_time=90):
all_matches = get_all_matches(page_url, match_info['league'])
home_team_matches, guest_team_matches = all_matches['home_team_matches'], all_matches['guest_team_matches']
home_team_actions_list = []
guest_team_actions_list = []
TotalHome = len(home_team_matches)
TotalAway = len(guest_team_matches)
HomePositive = 0
HomeNegative = 0
AwayPositive = 0
AwayNegative = 0
Home_HT_1 = 0
Away_HT_1 = 0
for action in home_team_matches:
actions_list = get_match_info_from_link(action)
home_team_actions_list.append(actions_list)
if matching_matches(actions_list, start_time, end_time):
HomePositive += 1
else:
HomeNegative += 1
if matching_matches(actions_list, 0, 45):
Away_HT_1 += 1
for action in guest_team_matches:
actions_list = get_match_info_from_link(action)
guest_team_actions_list.append(actions_list)
if matching_matches(actions_list, start_time, end_time):
AwayPositive += 1
else:
AwayNegative += 1
if matching_matches(actions_list, 0, 45):
Home_HT_1 += 1
ForHome1_5 = number_of_goals_team(home_team_actions_list, 'home_team', 0, 5)
ForHome6_10 = number_of_goals_team(home_team_actions_list, 'home_team', 6, 10)
ForHome11_15 = number_of_goals_team(home_team_actions_list, 'home_team', 11, 15)
ForHome16_20 = number_of_goals_team(home_team_actions_list, 'home_team', 16, 20)
ForHome21_25 = number_of_goals_team(home_team_actions_list, 'home_team', 21, 25)
ForHome26_30 = number_of_goals_team(home_team_actions_list, 'home_team', 26, 30)
ForHome31_35 = number_of_goals_team(home_team_actions_list, 'home_team', 31, 35)
ForHome36_40 = number_of_goals_team(home_team_actions_list, 'home_team', 36, 40)
ForHome41_45 = number_of_goals_team(home_team_actions_list, 'home_team', 41, 45)
ForHome46_50 = number_of_goals_team(home_team_actions_list, 'home_team', 46, 50)
ForHome51_55 = number_of_goals_team(home_team_actions_list, 'home_team', 51, 55)
ForHome56_60 = number_of_goals_team(home_team_actions_list, 'home_team', 56, 60)
ForHome61_65 = number_of_goals_team(home_team_actions_list, 'home_team', 61, 65)
ForHome66_70 = number_of_goals_team(home_team_actions_list, 'home_team', 66, 70)
ForHome71_75 = number_of_goals_team(home_team_actions_list, 'home_team', 71, 75)
ForHome76_80 = number_of_goals_team(home_team_actions_list, 'home_team', 76, 80)
ForHome81_85 = number_of_goals_team(home_team_actions_list, 'home_team', 81, 85)
ForHome86_90 = number_of_goals_team(home_team_actions_list, 'home_team', 86, 90)
AgainstHome1_5 = number_of_goals_team(home_team_actions_list, 'guest_team', 0, 5)
AgainstHome6_10 = number_of_goals_team(home_team_actions_list, 'guest_team', 6, 10)
AgainstHome11_15 = number_of_goals_team(home_team_actions_list, 'guest_team', 11, 15)
AgainstHome16_20 = number_of_goals_team(home_team_actions_list, 'guest_team', 16, 20)
AgainstHome21_25 = number_of_goals_team(home_team_actions_list, 'guest_team', 21, 25)
AgainstHome26_30 = number_of_goals_team(home_team_actions_list, 'guest_team', 26, 30)
AgainstHome31_35 = number_of_goals_team(home_team_actions_list, 'guest_team', 31, 35)
AgainstHome36_40 = number_of_goals_team(home_team_actions_list, 'guest_team', 36, 40)
AgainstHome41_45 = number_of_goals_team(home_team_actions_list, 'guest_team', 41, 45)
AgainstHome46_50 = number_of_goals_team(home_team_actions_list, 'guest_team', 46, 50)
AgainstHome51_55 = number_of_goals_team(home_team_actions_list, 'guest_team', 51, 55)
AgainstHome56_60 = number_of_goals_team(home_team_actions_list, 'guest_team', 56, 60)
AgainstHome61_65 = number_of_goals_team(home_team_actions_list, 'guest_team', 61, 65)
AgainstHome66_70 = number_of_goals_team(home_team_actions_list, 'guest_team', 66, 70)
AgainstHome71_75 = number_of_goals_team(home_team_actions_list, 'guest_team', 71, 75)
AgainstHome76_80 = number_of_goals_team(home_team_actions_list, 'guest_team', 76, 80)
AgainstHome81_85 = number_of_goals_team(home_team_actions_list, 'guest_team', 81, 85)
AgainstHome86_90 = number_of_goals_team(home_team_actions_list, 'guest_team', 86, 90)
ForAway1_5 = number_of_goals_team(guest_team_actions_list, 'home_team', 0, 5)
ForAway6_10 = number_of_goals_team(guest_team_actions_list, 'home_team', 6, 10)
ForAway11_15 = number_of_goals_team(guest_team_actions_list, 'home_team', 11, 15)
ForAway16_20 = number_of_goals_team(guest_team_actions_list, 'home_team', 16, 20)
ForAway21_25 = number_of_goals_team(guest_team_actions_list, 'home_team', 21, 25)
ForAway26_30 = number_of_goals_team(guest_team_actions_list, 'home_team', 26, 30)
ForAway31_35 = number_of_goals_team(guest_team_actions_list, 'home_team', 31, 35)
ForAway36_40 = number_of_goals_team(guest_team_actions_list, 'home_team', 36, 40)
ForAway41_45 = number_of_goals_team(guest_team_actions_list, 'home_team', 41, 45)
ForAway46_50 = number_of_goals_team(guest_team_actions_list, 'home_team', 46, 50)
ForAway51_55 = number_of_goals_team(guest_team_actions_list, 'home_team', 51, 55)
ForAway56_60 = number_of_goals_team(guest_team_actions_list, 'home_team', 56, 60)
ForAway61_65 = number_of_goals_team(guest_team_actions_list, 'home_team', 61, 65)
ForAway66_70 = number_of_goals_team(guest_team_actions_list, 'home_team', 66, 70)
ForAway71_75 = number_of_goals_team(guest_team_actions_list, 'home_team', 71, 75)
ForAway76_80 = number_of_goals_team(guest_team_actions_list, 'home_team', 76, 80)
ForAway81_85 = number_of_goals_team(guest_team_actions_list, 'home_team', 81, 85)
ForAway86_90 = number_of_goals_team(guest_team_actions_list, 'home_team', 86, 90)
AgainstAway1_5 = number_of_goals_team(guest_team_actions_list, 'guest_team', 0, 5)
AgainstAway6_10 = number_of_goals_team(guest_team_actions_list, 'guest_team', 6, 10)
AgainstAway11_15 = number_of_goals_team(guest_team_actions_list, 'guest_team', 11, 15)
AgainstAway16_20 = number_of_goals_team(guest_team_actions_list, 'guest_team', 16, 20)
AgainstAway21_25 = number_of_goals_team(guest_team_actions_list, 'guest_team', 21, 25)
AgainstAway26_30 = number_of_goals_team(guest_team_actions_list, 'guest_team', 26, 30)
AgainstAway31_35 = number_of_goals_team(guest_team_actions_list, 'guest_team', 31, 35)
AgainstAway36_40 = number_of_goals_team(guest_team_actions_list, 'guest_team', 36, 40)
AgainstAway41_45 = number_of_goals_team(guest_team_actions_list, 'guest_team', 41, 45)
AgainstAway46_50 = number_of_goals_team(guest_team_actions_list, 'guest_team', 46, 50)
AgainstAway51_55 = number_of_goals_team(guest_team_actions_list, 'guest_team', 51, 55)
AgainstAway56_60 = number_of_goals_team(guest_team_actions_list, 'guest_team', 56, 60)
AgainstAway61_65 = number_of_goals_team(guest_team_actions_list, 'guest_team', 61, 65)
AgainstAway66_70 = number_of_goals_team(guest_team_actions_list, 'guest_team', 66, 70)
AgainstAway71_75 = number_of_goals_team(guest_team_actions_list, 'guest_team', 71, 75)
AgainstAway76_80 = number_of_goals_team(guest_team_actions_list, 'guest_team', 76, 80)
AgainstAway81_85 = number_of_goals_team(guest_team_actions_list, 'guest_team', 81, 85)
AgainstAway86_90 = number_of_goals_team(guest_team_actions_list, 'guest_team', 86, 90)
return {'Country': match_info['country'], 'Start': match_info['time'], 'League': match_info['league'],
'Home': match_info['home_team'], 'Away': match_info['guest_team'], 'TotalHome': TotalHome,
'TotalAway': TotalAway,
'HomePositive': HomePositive, 'HomeNegative': HomeNegative, 'AwayPositive': AwayPositive,
'AwayNegative': AwayNegative,
'Home_HT_1': Home_HT_1, 'Away_HT_1': Away_HT_1,
'ForHome1_5': ForHome1_5, 'ForHome6_10': ForHome6_10, 'ForHome11_15': ForHome11_15,
'ForHome16_20': ForHome16_20, 'ForHome21_25': ForHome21_25,
'ForHome26_30': ForHome26_30, 'ForHome31_35': ForHome31_35, 'ForHome36_40': ForHome36_40,
'ForHome41_45': ForHome41_45, 'ForHome46_50': ForHome46_50,
'ForHome51_55': ForHome51_55, 'ForHome56_60': ForHome56_60, 'ForHome61_65': ForHome61_65,
'ForHome66_70': ForHome66_70, 'ForHome71_75': ForHome71_75,
'ForHome76_80': ForHome76_80, 'ForHome81_85': ForHome81_85, 'ForHome86_90': ForHome86_90,
'AgainstHome1_5': AgainstHome1_5, 'AgainstHome6_10': AgainstHome6_10,
'AgainstHome11_15': AgainstHome11_15, 'AgainstHome16_20': AgainstHome16_20,
'AgainstHome21_25': AgainstHome21_25, 'AgainstHome26_30': AgainstHome26_30,
'AgainstHome31_35': AgainstHome31_35, 'AgainstHome36_40': AgainstHome36_40,
'AgainstHome41_45': AgainstHome41_45, 'AgainstHome46_50': AgainstHome46_50,
'AgainstHome51_55': AgainstHome51_55, 'AgainstHome56_60': AgainstHome56_60,
'AgainstHome61_65': AgainstHome61_65, 'AgainstHome66_70': AgainstHome66_70,
'AgainstHome71_75': AgainstHome71_75, 'AgainstHome76_80': AgainstHome76_80,
'AgainstHome81_85': AgainstHome81_85, 'AgainstHome86_90': AgainstHome86_90, 'ForAway1_5': ForAway1_5,
'ForAway6_10': ForAway6_10, 'ForAway11_15': ForAway11_15, 'ForAway16_20': ForAway16_20,
'ForAway21_25': ForAway21_25, 'ForAway26_30': ForAway26_30,
'ForAway31_35': ForAway31_35, 'ForAway36_40': ForAway36_40, 'ForAway41_45': ForAway41_45,
'ForAway46_50': ForAway46_50, 'ForAway51_55': ForAway51_55,
'ForAway56_60': ForAway56_60, 'ForAway61_65': ForAway61_65, 'ForAway66_70': ForAway66_70,
'ForAway71_75': ForAway71_75, 'ForAway76_80': ForAway76_80,
'ForAway81_85': ForAway81_85, 'ForAway86_90': ForAway86_90, 'AgainstAway1_5': AgainstAway1_5,
'AgainstAway6_10': AgainstAway6_10, 'AgainstAway11_15': AgainstAway11_15,
'AgainstAway16_20': AgainstAway16_20, 'AgainstAway21_25': AgainstAway21_25,
'AgainstAway26_30': AgainstAway26_30, 'AgainstAway31_35': AgainstAway31_35,
'AgainstAway36_40': AgainstAway36_40, 'AgainstAway41_45': AgainstAway41_45,
'AgainstAway46_50': AgainstAway46_50, 'AgainstAway51_55': AgainstAway51_55,
'AgainstAway56_60': AgainstAway56_60, 'AgainstAway61_65': AgainstAway61_65,
'AgainstAway66_70': AgainstAway66_70, 'AgainstAway71_75': AgainstAway71_75,
'AgainstAway76_80': AgainstAway76_80, 'AgainstAway81_85': AgainstAway81_85,
'AgainstAway86_90': AgainstAway86_90}
|
#!/usr/bin/env python3
import boto3
import botocore
import json
import re
import argparse
def boto3_client(resource):
"""Create Boto3 client."""
return boto3.client(resource)
def s3_list_buckets():
"""Gets all S3 buckets and adds to a set"""
bucket_list = set()
s3_bucket = boto3_client('s3').list_buckets()
for buckets in s3_bucket['Buckets']:
bucket_list.add(buckets['Name'])
return bucket_list
def s3_add_tags(tags,buckets):
"""Puts S3 Bucket tagging"""
boto3_client('s3').put_bucket_tagging(Bucket=buckets,Tagging=tags)
def s3_filter_prod():
"""Filters bucket list by environment"""
bucket_list = []
prod = re.compile("prod-|-prod$")
yield list(filter(prod.search, s3_list_buckets()))
def s3_add_tags_prod(file, env):
"""Adds tags to bucket based on file and environment"""
tags_file = open(file)
tags = json.load(tags_file)
for bucket_filter in s3_filter_prod():
for buckets in bucket_filter:
if env == 'prod':
s3_add_tags(tags,buckets)
def s3_filter_preview():
"""Filters bucket list by environment"""
preview = re.compile("preview-|-preview$")
yield list(filter(preview.search, s3_list_buckets()))
def s3_add_tags_preview(file, env):
"""Filters bucket list by environment"""
tags_file = open(file)
tags = json.load(tags_file)
for bucket_filter in s3_filter_preview():
for buckets in bucket_filter:
if env == 'preview':
s3_add_tags(tags,buckets)
def s3_filter_dev():
"""Filters bucket list by environment"""
dev = re.compile("dev-|-dev$")
yield list(filter(dev.search, s3_list_buckets()))
def s3_add_tags_dev(file, env):
"""Filters bucket list by environment"""
tags_file = open(file)
tags = json.load(tags_file)
for bucket_filter in s3_filter_dev():
for buckets in bucket_filter:
if env == 'dev':
s3_add_tags(tags,buckets)
def s3_filter_qa():
"""Filters bucket list by environment"""
qa = re.compile("qa-|-qa$")
yield list(filter(qa.search, s3_list_buckets()))
def s3_add_tags_qa(file, env):
"""Filters bucket list by environment"""
tags_file = open(file)
tags = json.load(tags_file)
for bucket_filter in s3_filter_qa():
for buckets in bucket_filter:
if env == 'qa':
s3_add_tags(tags,buckets)
def main():
parser = argparse.ArgumentParser(prog='json-to-tags', description='Puts tags to S3 buckets from a JSON file. WARNING: This will remove any tags that already exist that are not specified.')
parser.add_argument('-i', '--in', required=True, action='store', dest='input_file', type=str, help='path to where the input file is located.')
parser.add_argument('-e', '--environment', required=True, metavar='environment', type=str, help='enter the environment: prod, dev, qa, preview')
args = parser.parse_args()
env = args.environment
file = args.input_file
s3_add_tags_prod(file, env)
s3_add_tags_preview(file, env)
s3_add_tags_dev(file, env)
s3_add_tags_qa(file, env)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
import math
primes = []
def factors(num):
total = 0
r = num
for p in primes:
if p ** 2 > num:
return total + 1
f = False
while r % p == 0:
f = True
r //= p
if f:
total += 1
if r == 1:
return total
return total
n = 100000
arr = [True] * (n + 1)
count = 0
for i in range(2, int(math.sqrt(n)) + 1):
if arr[i]:
j = i * i
while j <= n:
arr[j] = False
j += i
for i in range(2, n + 1):
if arr[i]:
primes.append(i)
i = 650
count = 0
while True:
if factors(i) == 4:
count += 1
else:
count = 0
if count == 4:
print(i - 3)
break
i += 1
|
protocol_template = ['Protocol: ',
'Prefix: ',
'AD/Metric: ',
'Nex-Hop: ',
'Last update: ',
'Outbound Interface: ']
with open('ospf.txt', 'r') as f:
for line in f:
temp = line.replace('O', 'OSPF')
temp = temp.rstrip().split()
temp[2] = temp[2].strip('[]')
temp[4] = temp[4].rstrip(',')
temp[5] = temp[5].rstrip(',')
for i in range(6):
print('{:<20}{:<20}'.format(protocol_template[i], temp[i]))
print('\n')
|
# Catherine Maloney - CS 110 HW6
# I pledge my honor that I have abided by the Stevens honor system.
# Problem Two: Write a program that accepts a date in the form of month/day/year
# and outputs whether or not the date is valid. For example, 7/6/1956 is valid
# but 9/31/2000 is not. (Do you know why??). To simplify the program, you can
# ignore the impact of "leap years." (You can ignore February 29th and leap
# years).
def main():
date = input("Enter any date (in month/day/year format): ")
valid_date = True
long_months = [1, 3, 5, 7, 8, 10, 12]
list_date = date.split('/')
if len(list_date) != 3:
valid_date = False
else:
month, day, year = list_date
try:
month = int(month)
day = int(day)
year = int(year)
if month > 12 or month < 1 or day > 31 or day < 1 or year < 1:
valid_date = False
elif month not in long_months and day == 31:
valid_date = False
except:
valid_date = False
if valid_date:
print("This date is valid.")
else:
print("This date is not valid.")
main()
|
#!/usr/bin/env python
"""
This module provides a python wrapper for volume rendering in GLSL using texture lookup tables
and viewport-aligned slice plane geometry. The module also provides a convenient interface object
for initializing sampling geometry and easily configuring the shader uniform values on the fly.
"""
#------------------------------------------------------------------------------------------
# Meta
#------------------------------------------------------------------------------------------
__author__ = "John McDermott"
__email__ = "JFMcDermott428@gmail.com"
__version__ = "1.0.0"
__status__ = "Development"
#------------------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------------------
# vizard modules
import viz
# local modules
import FileParser
import OTFMaker
from echo import CallbackProperty
#------------------------------------------------------------------------------------------
# Vertex Shader
#------------------------------------------------------------------------------------------
_VS = """
#version 330 compatibility
//
// Pipelined output to Fragment Processor
//
out vec3 TexCoord;
void main( void ) {
//gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_Position = gl_ModelViewProjectionMatrix * vec4(gl_Vertex.xyz, 1);
gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;
TexCoord = gl_Vertex.xyz + vec3(0.5);
gl_FrontColor = vec4(1,1,1,1);
gl_BackColor = vec4(1,1,1,1);
}
"""
#------------------------------------------------------------------------------------------
# Fragment Shader
#------------------------------------------------------------------------------------------
_FS = """
#version 330 compatibility
//
// Pipelined input from Vertex Processor
//
// raster-interpolated uV coords
smooth in vec3 TexCoord;
//
// Uniforms
//
// 3D texture of volume data
uniform sampler3D volume;
// min threshold intensity
uniform float minIntensity = 0.1;
// max threshold intensity
uniform float maxIntensity = 1.0;
//
// Function Main
//
void main() {
float sample = texture3D(volume, TexCoord).r;
if((sample >= minIntensity) && (sample <= maxIntensity)) {
gl_FragColor = vec4(vec3(sample), 0.25);
} else {
discard;
}
}
"""
#------------------------------------------------------------------------------------------
# Classes
#------------------------------------------------------------------------------------------
class TexMapShader(object):
def __init__(self):
""" Wrapper interface object for GLSL TexMapp shader """
# create uniforms
self._u_volume = viz.addUniformInt('volume', 0)
self._u_minIntensity = viz.addUniformFloat('minIntensity', 0.1)
self._u_maxIntensity = viz.addUniformFloat('maxIntensity', 1.0)
# create shader
self._shader = viz.addShader(vert=_VS, frag=_FS)
# attach uniforms
self._shader.attach(self._u_volume)
self._shader.attach(self._u_minIntensity)
self._shader.attach(self._u_maxIntensity)
#=========================#
# Property accessors #
#=========================#
@CallbackProperty
def u_minIntensity(self):
return self._u_minIntensity.get()
@CallbackProperty
def u_maxIntensity(self):
return self._u_maxIntensity.get()
#=========================#
# Property mutators #
#=========================#
@u_minIntensity.setter
def u_minIntensity(self, val):
self._u_minIntensity.set(val)
@u_maxIntensity.setter
def u_maxIntensity(self, val):
self._u_maxIntensity.set(val)
#=========================#
# Methods #
#=========================#
def apply(self, Node):
Node.apply(self._shader)
def unapply(self):
self._shader.unapply()
class TexMapper(viz.EventClass):
"""
Encapsulates the rendering geometry, textures, and shader interface for slice plane
volume texture mapping
"""
def __init__(self, volTex3D, n_slices):
viz.EventClass.__init__(self)
# configure 3D volume texture
self.volTex3D = volTex3D
self.volTex3D.filter(viz.MIN_FILTER, viz.LINEAR_MIPMAP_LINEAR)
self.volTex3D.filter(viz.MAG_FILTER, viz.LINEAR)
self.volTex3D.wrap(viz.WRAP_S, viz.CLAMP)
self.volTex3D.wrap(viz.WRAP_T, viz.CLAMP)
self.volTex3D.wrap(viz.WRAP_R, viz.CLAMP)
# create GLSL shader interface object
self.shader = TexMapShader()
# create view-aligned texture slice geometry
x,y,z = self.volTex3D.getSize()
self.slicePlanes = OTFMaker.ViewAlignedPlanes(xs=x,ys=y,zs=z,n_slices=n_slices)
#self.slicePlanes.disable(viz.DEPTH_TEST)
self.slicePlanes.drawOrder(0, bin=viz.BIN_TRANSPARENT)
#self.slicePlanes.disable(viz.CULL_FACE)
self.slicePlanes.blendFunc(viz.GL_SRC_ALPHA,viz.GL_ONE_MINUS_SRC_ALPHA)
self.slicePlanes.dynamic()
# set textures
self.slicePlanes.texture(self.volTex3D, unit=0)
# apply shader
self.shader.apply(self.slicePlanes)
# register update callback to interal timer
self.callback(viz.TIMER_EVENT, self.update)
# start update callback timer
self.starttimer(1, 0.001, viz.FOREVER)
# update for view aligned planes
def update(self, _):
self.slicePlanes.align(viz.MainView)
#------------------------------------------------------------------------------------------
# Unit Main
#------------------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# CreateSamples
#
class CreateSamples(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "CreateSamples" # TODO make this more human readable by adding spaces
self.parent.categories = ["Examples"]
self.parent.dependencies = []
self.parent.contributors = ["John Doe (AnyWare Corp.)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# CreateSamplesWidget
#
class CreateSamplesWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
generalParametersCollapsibleButton = ctk.ctkCollapsibleButton()
generalParametersCollapsibleButton.text = "General parameters"
self.layout.addWidget(generalParametersCollapsibleButton)
# Layout within the dummy collapsible button
hlayout = qt.QHBoxLayout(generalParametersCollapsibleButton)
self.label=qt.QLabel("Volume Name:")
hlayout.addWidget(self.label)
self.volumeNameLine=qt.QLineEdit()
hlayout.addWidget(self.volumeNameLine)
self.volumeNameLine.connect('textChanged(QString)', self.onLabelChanged)
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Sample From Nothing"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# Sample Label map Button
#
self.labelButton = qt.QPushButton("Create Sample Label Map")
self.labelButton.toolTip = "Create sample label map."
self.labelButton.enabled = True
parametersFormLayout.addRow(self.labelButton)
#
# Sample Volume Button
#
self.volumeButton = qt.QPushButton("Create Sample Volume")
self.volumeButton.toolTip = "Create sample volume."
self.volumeButton.enabled = True
parametersFormLayout.addRow(self.volumeButton)
#
# Sample model Button
#
self.modelButton = qt.QPushButton("Create Sample Model")
self.modelButton.toolTip = "Create sample Model."
self.modelButton.enabled = True
parametersFormLayout.addRow(self.modelButton)
# connections
self.labelButton.connect('clicked(bool)', self.onLabelButton)
self.volumeButton.connect('clicked(bool)', self.onVolumeButton)
self.modelButton.connect('clicked(bool)', self.onModelButton)
parametersCollapsibleButton2 = ctk.ctkCollapsibleButton()
parametersCollapsibleButton2.text = "Sample From example"
self.layout.addWidget(parametersCollapsibleButton2)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton2)
#
# input volume selector
#
self.inputSelector = slicer.qMRMLNodeComboBox()
self.inputSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
# Keep the following line as an example
#self.inputSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", 0 )
self.inputSelector.selectNodeUponCreation = True
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = False
self.inputSelector.noneEnabled = True
self.inputSelector.showHidden = False
self.inputSelector.showChildNodeTypes = False
self.inputSelector.setMRMLScene( slicer.mrmlScene )
self.inputSelector.setToolTip( "reference image." )
parametersFormLayout.addRow("Reference Volume: ", self.inputSelector)
self.inputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSampleFromReferenceSelect)
#
# Sample From reference Button
#
self.referenceButton = qt.QPushButton("Create Sample Model from a reference")
self.referenceButton.toolTip = "Create sample Model from a reference."
parametersFormLayout.addRow(self.referenceButton)
self.referenceButton.connect('clicked(bool)', self.onReferenceButton)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onLabelChanged(self.volumeNameLine.text)
def ButtonsClickable(self, value):
self.labelButton.setEnabled(value)
self.volumeButton.setEnabled(value)
self.modelButton.setEnabled(value)
self.onSampleFromReferenceSelect()
def cleanup(self):
pass
def onLabelChanged(self,myString):
if not myString=='':
self.ButtonsClickable(True)
else:
self.ButtonsClickable(False)
def onSampleFromReferenceSelect(self):
self.referenceButton.enabled = self.inputSelector.currentNode() and self.volumeNameLine.text != ''
def onLabelButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text, labelmap=True)
def onVolumeButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text)
def onModelButton(self):
logic = CreateSamplesLogic()
logic.createModel()
def onReferenceButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text, labelmap=True, reference=self.inputSelector.currentNode())
#
# CreateSamplesLogic
#
class CreateSamplesLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setVolumeAsBackgroundImage(self, node):
count = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceCompositeNode')
for n in xrange(count):
compNode = slicer.mrmlScene.GetNthNodeByClass(n, 'vtkMRMLSliceCompositeNode')
compNode.SetBackgroundVolumeID(node.GetID())
return True
# Create sample labelmap with same geometry as input volume
def createVolume(self , volumeName, labelmap=False, reference=None):
if volumeName == '':
raise Exception('The name of the output volume cannot be empty')
value = 1
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
imageData = vtk.vtkImageData()
if reference == None:
mySpacing = (0.5,0.6,0.5)
myOrigin = (20,50,50)
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
imageData.SetDimensions(30,5,15)
imageData.AllocateScalars(vtk.VTK_DOUBLE, 1)
sampleVolumeNode.SetSpacing(mySpacing[0],mySpacing[1],mySpacing[2])
sampleVolumeNode.SetOrigin(myOrigin[0],myOrigin[1],myOrigin[2])
else:
sampleVolumeNode.Copy(reference)
imageData.DeepCopy(reference.GetImageData())
sampleVolumeNode.SetName(volumeName)
sampleVolumeNode.SetAndObserveImageData(imageData)
extent = imageData.GetExtent()
for x in xrange(extent[0], extent[1]+1):
for y in xrange(extent[2], extent[3]+1):
for z in xrange(extent[4], extent[5]+1):
if (x >= (extent[1]/4) and x <= (extent[1]/4) * 3) and (y >= (extent[3]/4) and y <= (extent[3]/4) * 3) and (z >= (extent[5]/4) and z <= (extent[5]/4) * 3):
imageData.SetScalarComponentFromDouble(x,y,z,0,value)
else:
imageData.SetScalarComponentFromDouble(x,y,z,0,0)
# Display labelmap
if labelmap:
sampleVolumeNode.SetLabelMap(1)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
labelmapVolumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
else:
volumeDisplayNode = slicer.vtkMRMLScalarVolumeDisplayNode()
slicer.mrmlScene.AddNode(volumeDisplayNode)
colorNode = slicer.util.getNode('Grey')
volumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
volumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(volumeDisplayNode.GetID())
self.setVolumeAsBackgroundImage(sampleVolumeNode)
return True
def createModel(self):
print "model"
class CreateSamplesTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
|
from neuron import *
from functions import *
from connection import *
class Layer(object):
"""docstring for Layer"""
def __init__(self):
self.neurons = []
def activate(self, INPUTS, HIDDL = False):
i = 0
out = []
if not HIDDL:
for N in self.neurons:
N.activate(INPUTS[i])
i += 1
else:
for N in self.neurons:
N.out = sigmoidal_function(INPUTS[i])
# print "Neuron "+ str(N.name) + ": "+ str(N.out)
i += 1
class InputLayer(Layer):
"""docstring for InputLayer"""
def __init__(self, NN):
super(InputLayer, self).__init__()
for i in range(NN):
self.neurons.append(InputNeuron(sigmoidal_function))
class HiddenLayer(Layer):
"""docstring for HiddenLayer"""
def __init__(self, BL, NN, BIAS):
super(HiddenLayer, self).__init__()
for i in range(NN):
connections = []
for N in BL.neurons:
connections.append(Connection(N))
connections.append(Connection(BIAS))
self.neurons.append(HiddenNeuron(input_function, sigmoidal_function, connections))
def activate(self, INPUTS = None):
if INPUTS == None:
for N in self.neurons:
N.activate()
else:
super(HiddenLayer, self).activate(INPUTS, True)
def activateAsInputLayer(self):
pass
|
"""Output samples of various Colorgorical settings."""
import itertools as it
import json
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import datetime
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import matplotlib.tri as tri
from model import model
from model.util import convert
import model.numpyColorgorical as npc
class MakeSamples():
def __init__(self):
self.colorgorical = model.Model()
self.repeats = 10
self.sizes = [3, 5, 8]
weights = [0, 0.5, 1]
weights = np.array(list(it.product(weights, weights, weights)))
# Remove duplicate settings
weights = weights[np.sum(weights, axis=1) != 0.5, :]
def isOk(row):
values, counts = [list(r) for r in np.unique(row, return_counts=True)]
if values == [0.5]: # all 0.5
return False
if 0 in values and 0.5 in values and\
counts[values.index(0)] == 1 and counts[values.index(0.5)] == 2:
return False
else:
return True
self.weights = np.array([w for w in weights if isOk(w)])
thisFilePath = os.path.dirname(os.path.realpath(__file__))
projectDir = os.path.abspath(os.path.join(thisFilePath, os.pardir))
outputPath = os.path.join(projectDir, 'examplePalettes')
self.outputPath = outputPath
self.paletteFile = os.path.join(outputPath, 'examplePalettes.json')
self.samplePalettes = None
def loadPalettes(self):
try:
with open(self.paletteFile, 'rb') as f:
self.samplePalettes = json.load(f)
except Exception as e:
print "Could not open examplePalettes/examplePalettes.json"
print e
def make(self):
def makeSwatch(weights):
def rep(size):
print "Making:"+str(weights)+" "+str(size)+" "+str(datetime.datetime.now())
# Thanks to http://stackoverflow.com/a/16973510/239924
def getUniqueIdx(pals):
palIdx = np.array([ sorted([npc.colorIndex(c).astype(int)[0] for c in p]) for p in pals ])
b = np.ascontiguousarray(palIdx).view(np.dtype((np.void, palIdx.dtype.itemsize * palIdx.shape[1])))
_, uniquePalIdx = np.unique(b, return_index=True)
return uniquePalIdx
pals = np.array([
self.colorgorical.makePreferablePalette(s, 10, weights=weights)
for r in xrange(self.repeats)
])
pals = pals[getUniqueIdx(pals)]
while pals.shape[0] < self.repeats:
newPals = np.array([
self.colorgorical.makePreferablePalette(s, 10, weights=weights)
for r in xrange(self.repeats - pals.shape[0])
])
pals = np.vstack((pals, newPals))
pals = pals[getUniqueIdx(pals)]
pals = pals[:self.repeats]
return [[list(color) for color in p] for p in pals]
weights = {
"ciede2000": weights[0], "nameDifference": weights[1],
"nameUniqueness": 0.0, "pairPreference": weights[2]
}
palettes = [ rep(s) for s in self.sizes ]
for p in palettes:
print p, '====='
print '\n\n'
return {'weights': weights, 'palettes': palettes,
'repeats': self.repeats, 'sizes': self.sizes}
self.samplePalettes = [makeSwatch(ws) for ws in self.weights]
with open(self.paletteFile, 'w') as f:
json.dump(self.samplePalettes, f)
def savedResultsExist(self):
return os.path.isfile(self.paletteFile)
def savePlots(self):
def saveWeight(weightSamples):
ws = weightSamples["weights"]
palettes = weightSamples["palettes"]
repeats = weightSamples["repeats"]
print ws, repeats, len(palettes[0])
sortedWeights = [ str(int(10*ws[key])) for key in sorted(ws.keys())]
shorthand = ["PD", "ND", "NU", "PP"]
name = "__".join(['-'.join(d) for d in zip(sortedWeights, shorthand)])
imgType = "eps"
fname = os.path.join(self.outputPath, name+"."+imgType)
rgbPalettes = [
np.vstack([
np.array([
[ np.array(convert.convertLabToRGB(color))/255.0 for color in repeat ],
[ np.array([1,1,1]) for color in repeat ]
])
for repeat in sizes
])
for sizes in palettes
]
def makeName(palette):
return '; '.join(['[' + ','.join([str(int(i)) for i in c]) + ']' for c in palette])
labNames = [[makeName(repeat)] for sizes in palettes for repeat in sizes]
fig = plt.figure(figsize=(24, 10), dpi=300)
sortedWeights = [ str(ws[key]) for key in sorted(ws.keys())]
figName = " ".join([':'.join(d) for d in zip(shorthand, sortedWeights)])
fig.suptitle("Slider settings:: "+figName, fontsize=30, x=0, fontweight="bold", color="#010101")
# http://matplotlib.org/users/gridspec.html
gs0 = gridspec.GridSpec(1, 2, width_ratios=[2,1.1])
gs0.update(left=0)
gs1 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=gs0[0], width_ratios=[3,5,8])
# gs1 = gridspec.GridSpec(1, 4, width_ratios=[5,3,5,8])
# gs1.update(left=0.23, right=0.68, wspace=0)
# gs = gridspec.GridSpec(2, 3, width_ratios=[3,5,8])
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
ax3 = fig.add_subplot(gs1[2])
gs2 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs0[1])
# gs2 = gridspec.GridSpec(1, 1)
# gs2.update(left=0.7, right=1, hspace=0.05)
ax4 = fig.add_subplot(gs2[:,:])
allButLast = repeats*2-1
ax1.imshow(rgbPalettes[0][:allButLast], interpolation="nearest")
ax2.imshow(rgbPalettes[1][:allButLast], interpolation="nearest")
ax3.imshow(rgbPalettes[2][:allButLast], interpolation="nearest")
table = ax4.table(cellText=labNames,loc='center')
table.auto_set_font_size(False)
table.set_fontsize(10)
for key, cell in table.get_celld().items():
cell.set_linewidth(0)
cell.set_height(0.03)
cell._text.set_color('#333333')
ax1.set_axis_off()
ax2.set_axis_off()
ax3.set_axis_off()
ax4.axis('tight')
ax4.set_axis_off()
fig.savefig(fname, dpi=300, bbox_inches='tight')
for weightSamples in self.samplePalettes:
saveWeight(weightSamples)
def writeTex(self):
img = [f for f in listdir(self.outputPath) if isfile(join(self.outputPath, f)) and '.eps' in f]
txt = []
for i in img:
txt.append("\\begin{figure*}")
txt.append(" \includegraphics[width=\\textwidth]{"+i+"}")
txt.append("\\end{figure*}")
with open(join(self.outputPath, "img.tex"), 'w') as f:
f.write('\n'.join(txt))
|
from tkinter import * # import tkinter
root = Tk() # creates tkinter windows and sets it to the root variable
def printhi(): # Creates a defintion (A definiton is a code which can be called on)
print("Hi") # The code in the definition in this case it prints Hi
title_label = Label(root,text="This is a test") # creates a Label for the root windows and sets the text
title_label.pack() # displays the label with the pack function
hi_button = Button(root,text="Click Me!",command=printhi)# Creates a button and applies it to the root window, the text arg sets the text on the button and the command arg sets the function to call when pressed.
hi_button.pack()# Displays The Button
|
from django.shortcuts import render, redirect,reverse
import MySQLdb
import pymysql
import mysql.connector
from django.http import HttpResponse
from Django_shop.settings import *
# Create your views here.
from .myclass import signin, salesInfo, shop_cashier
# Create your views here.
# =========实例化身份验证的对象=======
login_obj = signin.UserSignin()
# =========实例化收银模块的对象=======
current_customer = shop_cashier.Customer()
# =========实例化销售明细的对象=======
current_serial = salesInfo.SalesDetail()
def index(request):
return redirect(reverse('login'))
# ==== 用户登录 推荐写法 ====
def login(request):
if request.method == 'GET':
# 如果是GET方式,打开登录界面
return render(request, 'login.html')
elif request.method == "POST":
# 获取登录的用户名和密码并赋值实例变量
login_obj.loginId = request.POST.get('login')
login_obj.loginPwd = request.POST.get('password')
# 进行身份验证
login_obj.signin()
# 根据结果返回
if login_obj.signin_result: # 如果是TRUE
# 判断职位
if "收银" in login_obj.position_name:
return redirect(reverse('cashier') + "?username=" + login_obj.current_username)
elif "管理员" in login_obj.position_name:
return redirect(reverse('main') + "?username=" + login_obj.current_username)
else:
return render(request, 'login.html', context={'loginId': login_obj.loginId,
'loginPwd': login_obj.loginPwd,
'info': login_obj.error_info})
# 传统写法:登录页面
"""
def login(request):
error_info = "" # 定义变量用来存储错误信息
logins = []
if request.method == 'GET': # 如果是GET方式,打开登陆界面
return render(request, 'login.html')
elif request.method == "POST": # POST方式,验证输入的用户名和密码
# 1,获取页面上用户输入的用户名和密码
login_id = request.POST.get('login')
login_pwd = request.POST.get('password')
print("登录名:",login_id)
print("密码:",login_pwd)
# 2,获取数据库中的数据
mysql_db = pymysql.connect(DB_HOST,DB_USER,DB_PASSWORD,DB_NAME)
cursor = mysql_db.cursor() # 创建一个指针
sql = " Select LoginId,LoginPwd,UserName,IsEnable,PositionName " \
" from Login As T1 INNER JOIN Position As T2 on T1.PositionId = T2.PositionId "
try:
# 执行SQL获取结果
cursor.execute(sql)
logins = cursor.fetchall() # 返回的结果是元组的嵌套 ((),(),())
except Exception as e:
error_info = "联系数据库出现异常,具体原因:" + str(e)
finally:
mysql_db.close()
# 3. 用输入的账号在数据库中判断(打开navicat对照看)
# 先遍历一遍login表,login[1]是第一行。。以此类推
# login[1][x] 表示第一行的第x列,比如logins[1][3]表示LoginId为1001的IsEnable属性
for index in range(len(logins)):
# 登录名匹配,logins[i]中遍历一遍第一个属性(LoginId),看有没有哪一个等于用户输入的login_id
if logins[index][0] == login_id:
# 判断是否禁用,IsEnable为1不禁用,为2禁用
if logins[index][3] == 0:
error_info = "账号已禁用!请联系管理员"
break
# 如果密码匹配:
if logins[index][1] == login_pwd:
# 判断职位(暂时直接返回到收银员的界面)
# return redirect(reverse('admin'))
print("登录成功!!!!")
else:
print("密码哦哦哦哦哦!!!!")
error_info = "密码错误!"
break
# 判断用户名是否存在
if index == len(logins) - 1:
error_info = "登录账号不存在!"
# 很多场景没有处理:比如:登录名不存在,密码错误,已经禁用,按职位做跳转!
return render(request, 'login.html', context={'loginId': login_id,
'loginPwd': login_pwd,
'info': error_info})
"""
def cashier(request):
"""
收银页面
:param request:
:return:
"""
return render(request, 'cashier.html')
def main(request):
"""
管理员主页面
:param request:
:return:
"""
return render(request, 'main.html')
def sales_query(request):
"""
销售查询页面
:param request:
:return:
"""
return render(request, 'sales_query.html')
# def index(request):
# # 实例化一个连接
# mysql_db = pymysql.connect(DB_HOST,DB_USER,DB_PASSWORD,DB_NAME)
#
# cursor = mysql_db.cursor()
# # 准备sql语句
# sql = "select * from AdminModules"
#
# try:
# cursor.execute(sql)
# # 获取结果
# results = cursor.fetchall()
# # 返回结果
# return HttpResponse(str(results))
# except Exception as e:
# mysql_db.rollback()
# return HttpResponse("获取数据出现异常,具体原因:" + str(e))
|
inputFile = open("/Users/samuelcordano/Documents/adventOfCode/Day5_BinaryBoarding/inputFile.txt","r")
Lines = inputFile.readlines()
def problem2():
"""
What is the ID of your seat?
"""
listofIDs = []
counter =0
#Get list of IDs
for line in Lines:
counter +=1
currentInput = line.strip()
rowMin = 0
rowMax = 127
seatMin = 0
seatMax = 7
print(currentInput)
for element in currentInput:
if element =="F":
rowMax = rowMax - ((rowMax+1-rowMin)/2)
elif element == "B":
rowMin = rowMin + ((rowMax+1-rowMin)/2)
elif element == "R":
seatMin = seatMin + ((seatMax+1-seatMin)/2)
elif element =="L":
seatMax = seatMax - ((seatMax+1-seatMin)/2)
id = rowMin *8 + seatMin
listofIDs.append(id)
#Find missing ID
listofIDs.sort()
for n in range(len(listofIDs)):
idN = int(listofIDs[n])
idNplus1 = int(listofIDs[n+1])
if idNplus1-idN != 1:
print("Here is my ID: " )
print(idN+1)
return idN +1
problem2()
|
import os
__version__ = '2016.0'
here = os.path.dirname(__file__)
def get_theme_dir():
return here
|
# Generated by Django 3.2.3 on 2021-05-31 12:32
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
("images", "0003_alter_image_created_at"),
]
operations = [
migrations.AlterField(
model_name="image",
name="created_at",
field=models.DateTimeField(
default=datetime.datetime(2021, 5, 31, 12, 32, 33, 290012, tzinfo=utc)
),
),
migrations.CreateModel(
name="Thumbnail",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("thumbnail", models.URLField()),
("size", models.CharField(max_length=125)),
(
"org_image",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="images.image"
),
),
],
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
"""
try:
from NaoCreator.nao_scenario_creator import search_face, i_meet_you
from NaoCreator.setting import Setting
from time import sleep
from PlayerManager.player_manager import Player
import NaoCreator.Tool.speech_move as sm
except Exception as e:
print e
def i_know_you(face_name, current_player):
"""
Modifie le joueur actuel par celui qui a été reconnu par Nao.
:param face_name: Le nom du joueur reconnu
:param current_player: Le joueur actuel
"""
if face_name == current_player.player_name:
return
current_player.save()
Setting.CURRENT_PLAYER = Player(face_name)
def wait_for(current_player):
"""
Attend un joueur. Celui-ci devra se montrer à Nao ou appuyer sur un bumper pour continuer l'exécution.
:param current_player: Le joueur actuel
"""
print "[wait_for] Waiting for player..."
if Setting.BYPASS_WAIT_FOR:
return False
# On fait dire à Nao le text lors du wait_for
if hasattr(current_player.current_objective, "wait_for_text"):
sm.speech_and_move(current_player.current_objective.wait_for_text)
elif hasattr(current_player.current_quest, "wait_for_text"):
sm.speech_and_move(current_player.current_quest.wait_for_text)
elif hasattr(current_player.current_scenario, "wait_for_text"):
sm.speech_and_move(current_player.current_scenario.wait_for_text)
nb_unknown_face = 0
nb_known_face = 0
pass_to_i_now_you = False
Setting.naoLed.off("AllLeds")
Setting.naoLed.on("AllLedsBlue")
Setting.naoLed.on("AllLedsRed")
i = 1
old_player = current_player.player_name
while Setting.naoMemoryProxy.getData("MiddleTactilTouched") != 1 and not pass_to_i_now_you:
stop = Setting.naoMemoryProxy.getData("RightBumperPressed") == 1
if stop:
return False
nb_unknown_face, nb_known_face, current_color, finished, pass_to_i_now_you = search_face(i_meet_you, i_know_you,
nb_unknown_face, nb_known_face, "none",
[current_player])
sleep(.5)
print i
i += 1
print old_player , Setting.CURRENT_PLAYER.player_name
return old_player != Setting.CURRENT_PLAYER.player_name
|
# Generated by Django 2.0.2 on 2018-07-14 03:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0002_ordertext_userorder'),
]
operations = [
migrations.AlterField(
model_name='userorder',
name='mail',
field=models.EmailField(default='mail', max_length=64, unique=True, verbose_name='почта'),
),
migrations.AlterField(
model_name='userorder',
name='phone',
field=models.CharField(default='phone', max_length=64, unique=True, verbose_name='телефон'),
),
]
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from django.conf import settings
from django.core.management.base import BaseCommand
from data_aggregator.management.commands._mixins import RunJobMixin
from data_aggregator.models import AnalyticTypes, Job, JobType, TaskTypes, \
Term
from data_aggregator.utilities import datestring_to_datetime, get_relative_week
from data_aggregator.dao import JobDAO
from data_aggregator.threads import ThreadPool
from restclients_core.exceptions import DataFailureException
class RunJobCommand(BaseCommand, RunJobMixin):
def add_arguments(self, parser):
parser.add_argument("job_name",
type=str,
choices=[
AnalyticTypes.assignment,
AnalyticTypes.participation,
TaskTypes.create_terms,
TaskTypes.create_or_update_users,
TaskTypes.create_or_update_courses,
TaskTypes.reload_advisers,
TaskTypes.create_assignment_db_view,
TaskTypes.create_participation_db_view,
TaskTypes.create_rad_db_view,
TaskTypes.create_rad_data_file,
TaskTypes.create_student_categories_data_file,
TaskTypes.build_subaccount_activity_report],
help=("Name of job to run."))
parser.add_argument("--job_batch_size",
type=int,
help=("Number of jobs to process. Default is all "
"jobs."),
default=None,
required=False)
parser.add_argument("--num_parallel_jobs",
type=int,
help=("Size of job thread pool"),
default=20,
required=False)
def handle(self, *args, **options):
"""
Queries the Job model to check for unstarted jobs (jobs
where pid=None and start=None). For a batch of unstarted jobs,
calls the run_job method for each job.
"""
job_name = options["job_name"] # required
num_parallel_jobs = options["num_parallel_jobs"]
job_batch_size = options["job_batch_size"] # defaults to all jobs
jobs = Job.objects.claim_batch_of_jobs(
job_name,
batchsize=job_batch_size
)
try:
if jobs:
if settings.DATA_AGGREGATOR_THREADING_ENABLED:
with ThreadPool(processes=num_parallel_jobs) as pool:
pool.map(self.run_job, jobs)
else:
if num_parallel_jobs > 1:
logging.warning(
"Running single threaded. Multithreading is "
"disabled in Django settings.")
for job in jobs:
self.run_job(job)
else:
logging.debug(f"No active {job_name} jobs.")
except Exception as err:
for job in jobs:
if not job.message:
# save error message if one occurs but don't overwrite
# an existing error
tb = traceback.format_exc()
if tb:
job.message = tb
logging.error(tb)
else:
# Just in case the trace back is empty
msg = f"Unknown exception occured: {err}"
job.message = msg
logging.error(msg)
job.save()
class CreateJobCommand(BaseCommand):
def __init__(self, *args, **kwargs):
super().__init__()
def _add_subparser(self, subparsers, command_name,
command_help_message=None,
include_term=True, include_week=False,
include_course=False, include_account=False,
include_force=False,
default_sis_term_id=None,
default_week=None):
subparser = subparsers.add_parser(
command_name,
help=(command_help_message if command_help_message else
f"Run {command_name} command")
)
# we add the job name as a hidden argument so that it can be read
# when processing the command
if include_term:
subparser.add_argument(
"--sis_term_id",
type=str,
help=("Term to run job for."),
default=default_sis_term_id)
if include_week:
subparser.add_argument(
"--week",
type=int,
help=("Week to run job for."),
default=default_week)
if include_course:
subparser.add_argument(
"--canvas_course_id",
type=int,
help=("Canvas course id to create a job for."),
default=None,
required=False)
subparser.add_argument(
"--sis_course_id",
type=str,
help=("The sis-course-id to create a job for."),
default=None,
required=False)
if include_account:
subparser.add_argument(
'--subaccount_id',
type=str,
default='uwcourse',
help='The subaccount to create a job for.')
if include_force:
subparser.add_argument(
'--force',
action='store_true',
help='Force action.')
subparser.add_argument("--target_start_time",
type=str,
help=("iso8601 UTC start time for which the "
"job is active. YYYY-mm-ddTHH:MM:SS.ss"),
default=None,
required=False)
subparser.add_argument("--target_end_time",
type=str,
help=("iso8601 UTC end time for which the job "
"is active. YYYY-mm-ddTHH:MM:SS.ss"),
default=None,
required=False)
return subparsers
def add_arguments(self, parser):
subparsers = parser.add_subparsers(title="job_name",
dest="job_name")
subparsers.required = True
try:
term, _ = Term.objects.get_or_create_term_from_sis_term_id()
# use current term and week as default
curr_sis_term_id = term.sis_term_id
curr_week = get_relative_week(term.first_day_quarter,
tz_name="US/Pacific")
except DataFailureException as e:
logging.warning(f"Unable to set default term and week values. {e}")
curr_sis_term_id = None
curr_week = None
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_terms,
command_help_message=(
"Creates current term and all future terms."
))
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_or_update_courses,
command_help_message=(
"Loads or updates list of courses for the current term."
))
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_or_update_users,
command_help_message=(
"Loads or updates list of students for the current term."
),
default_sis_term_id=curr_sis_term_id)
subparsers = self._add_subparser(
subparsers,
TaskTypes.reload_advisers,
include_term=False,
command_help_message=(
"Loads or updates list of advisers for all students in the db."
))
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_participation_db_view,
include_week=True,
command_help_message=(
"Creates participation database view for given term and week."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_assignment_db_view,
include_week=True,
command_help_message=(
"Creates assignment database view for given term and week."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_rad_db_view,
include_week=True,
command_help_message=(
"Creates RAD database view for given term and week."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_rad_data_file,
include_week=True,
include_force=True,
command_help_message=(
"Creates RAD data file in GCS bucket."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
subparsers = self._add_subparser(
subparsers,
TaskTypes.create_student_categories_data_file,
command_help_message=(
"Creates Student Categories metadata data file in GCS bucket."
),
default_sis_term_id=curr_sis_term_id)
subparsers = self._add_subparser(
subparsers,
TaskTypes.build_subaccount_activity_report,
include_week=True,
include_account=True,
command_help_message=(
"Creates participation database view for given term and week."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
subparsers = self._add_subparser(
subparsers,
AnalyticTypes.assignment,
include_week=True,
include_course=True,
command_help_message=(
"Run active assignment jobs."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
subparsers = self._add_subparser(
subparsers,
AnalyticTypes.participation,
include_week=True,
include_course=True,
command_help_message=(
"Run active participation jobs."
),
default_sis_term_id=curr_sis_term_id,
default_week=curr_week)
def get_job_context(self, options):
context = {}
for key, value in options.items():
if value is not None:
context[key] = value
# don't include the job_name in the context since it is implied through
# the job type
context.pop("job_name")
# remove django options from context
context.pop("verbosity", None)
context.pop("settings", None)
context.pop("pythonpath", None)
context.pop("traceback", None)
context.pop("no_color", None)
context.pop("force_color", None)
context.pop("skip_checks", None)
return context
def create(self, options):
job_name = options['job_name']
target_start_time = options.get("target_start_time")
target_end_time = options.get("target_end_time")
if target_start_time is None:
target_date_start = Job.get_default_target_start()
else:
target_date_start = datestring_to_datetime(target_start_time)
if target_end_time is None:
target_date_end = Job.get_default_target_end()
else:
target_date_end = datestring_to_datetime(target_end_time)
context = self.get_job_context(options)
jobs = []
job_type, _ = JobType.objects.get_or_create(type=job_name)
if job_type.type == AnalyticTypes.assignment or \
job_type.type == AnalyticTypes.participation:
jobs = JobDAO().create_analytic_jobs(
job_type, target_date_start, target_date_end, context=context)
else:
# creates a single job or the given job type, target dates, and
# context
job = JobDAO().create_job(job_type, target_date_start,
target_date_end, context=context)
jobs.append(job)
return jobs
|
import sys
sys.path.append('../src')
from grafo import Grafo
from dijkstra import Dijkstra
g = Grafo()
g.agregar_vertice(0)
g.agregar_vertice(1)
g.agregar_vertice(2)
g.agregar_vertice(3)
g.agregar_vertice(4)
g.agregar_vertice(5)
g.agregar_arista_no_dirigida(0, 1, 7)
g.agregar_arista_no_dirigida(0, 2, 9)
g.agregar_arista_no_dirigida(0, 5, 14)
g.agregar_arista_no_dirigida(1, 2, 10)
g.agregar_arista_no_dirigida(1, 3, 15)
g.agregar_arista_no_dirigida(2, 3, 11)
g.agregar_arista_no_dirigida(2, 5, 2)
g.agregar_arista_no_dirigida(3, 4, 6)
g.agregar_arista_no_dirigida(4, 5, 9)
d = Dijkstra(g)
print "Nodo 0\n", d.dijkstra(0)
print "Nodo 1\n", d.dijkstra(1)
print "Nodo 2\n", d.dijkstra(2)
print "Nodo 3\n", d.dijkstra(3)
print "Nodo 4\n", d.dijkstra(4)
print "Nodo 5\n", d.dijkstra(5)
|
# -*- coding: utf-8 -*-
import os
version_info = (0, 0, 7)
__version__ = ".".join(map(str, version_info))
__path = os.path.dirname(__file__)
class Mixer(object):
'''Takes data chunks(requests) from sources, mix data in given proportion
and yields chunks.
Attributes:
num_req: int, total requests needed. Not used if one of generators
stops earlier.
'''
def __init__(self, num_req=10 ** 7):
self.num_req = num_req
self.gens = {}
def add_generator(self, name, frequency, miner):
'''Add one more data source to mix in.
Args:
name: str, name of data source(Helps to debug).
frequency: float, percentil of source in result stream
miner: generator obj, yelds requests.
Returns:
generator object, yield pids.
'''
assert isinstance(name, basestring)
assert isinstance(frequency, float)
self.gens[name] = {
'frq': frequency,
'miner': miner,
}
def __iter__(self):
'''Iterate over generator till one of them ends(raises StopIteration).
Returns:
yields requetst from original generators in given proportion.
'''
if len(self.gens) < 1:
raise ValueError('Have no generators to mix.')
state = {}
cntr = 0
max_freq = max(g['frq'] for g in self.gens.values())
for g_name, g_val in self.gens.iteritems():
state[g_name] = {
'proficit': 0,
'ratio': g_val['frq'] / max_freq,
}
while True:
for g_name, ctx in state.iteritems():
if cntr >= self.num_req:
raise StopIteration('{} achieved'.format(self.num_req))
ctx['proficit'] += ctx['ratio']
if ctx['proficit'] >= 1:
for i in xrange(int(ctx['proficit'])):
ctx['proficit'] -= 1
cntr += 1
yield self.gens[g_name]['miner'].next()
for each in self.__dict__.keys():
yield self.__getattribute__(each)
|
#!/usr/bin/env python3
import gzip
import sys
# Write a program that computes typical stats for sequence files
# See below for command line and output
"""
python3 fasta_stats.py transcripts.fasta.gz
Count: 232
Total: 278793
Min: 603
Max: 1991
Mean: 1201.7
NTs: 0.291 0.218 0.210 0.281
"""
|
import unittest
import attachment
import email_parser
import pop3
import threading
import socket
import ssl
def create_server():
def create_sock():
# server = ssl.SSLSocket(socket.socket()) # socket.AF_INET, socket.SOCK_STREAM
# server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server = socket.socket()
# server = ssl.SSLSocket(server)
server.bind(('', 995))
server.listen(1)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return server
server = create_sock()
conn, addr = server.accept()
# print('Connected with ' + addr[0] + ':' + str(addr[1]))
# conn = ssl.SSLSocket(conn)
def recv_data():
data = b''
block = 1024
while True:
conn.settimeout(0.5)
try:
current = conn.recv(block)
except socket.timeout:
current = b''
data += current
if current == b'':
break
return data
while 1:
# data = conn.recv(1024)
data = recv_data()
print('server:', data)
if not data or data == b'QUIT':
break
conn.send(data)
conn.close()
server.close()
# class Tests(unittest.TestCase):
# def setUp(self):
# threading.Thread(target=create_server).start()
try:
threading.Thread(target=create_server).start()
sock = socket.socket()
sock.connect(('localhost', 995))
def recv_data():
data = b''
block = 1024
while True:
sock.settimeout(0.5)
try:
current = sock.recv(block)
except socket.timeout:
current = b''
# except ConnectionAbortedError:
# pass
data += current
if current == b'':
break
return data.decode()
def client():
try:
for i in range(6):
print('client sended')
sock.send(b'QUIT1')
import time
# time.sleep(1)
print('client', recv_data())
except ConnectionAbortedError:
pass
finally:
sock.close()
threading.Thread(target=client).start()
# create_server()
except ConnectionAbortedError:
print("Closed")
|
#!/usr/bin/env python3
import sys
import subprocess
filename=sys.argv[1]
with open(filename) as file:
lines=file.readlines()
for line in lines:
oldfile=line.strip()
newfile=line.strip().replace("jane", "jdoe")
result = subprocess.run(["mv", oldfile, newfile], capture_output=True)
print("Oldfile: {} Newfile: {}".format(oldfile, newfile))
|
##
## Copyright (c) 2014 Rodolphe Breard
##
## Permission to use, copy, modify, and/or distribute this software for any
## purpose with or without fee is hereby granted, provided that the above
## copyright notice and this permission notice appear in all copies.
##
## THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
## WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
## MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
## ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
## ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
## OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
##
import re
common_tags = (
# Armors
('armor', re.compile('^ic[mtzfocb]a')),
('boots', re.compile('^ic[mtzfocb]a[lmh]b')),
('gloves', re.compile('^ic[mtzfocb]a[lmh]g')),
('pants', re.compile('^ic[mtzfocb]a[lmhc]p')),
('sleeves', re.compile('^ic[mtzfocb]a[lmh]s')),
('vest', re.compile('^ic[mtzfocb]a[lmh]v')),
('helmet', re.compile('^ic[mtzfocb]a[lmh]h')),
('light_armor', re.compile('^ic[mtzfocb]a[lc]')),
('medium_armor', re.compile('^ic[mtzfocb]am')),
('heavy_armor', re.compile('^ic[mtzfocb]ah')),
# Weapons
('weapon', re.compile('^ic[mtzfocb][mr]')),
# Melee weapons
('melee_weapon', re.compile('^ic[mtzfocb](kar|kam)?m')),
('one_handed', re.compile('^ic[mtzfocb](kar|kam)?m1')),
('two_handed', re.compile('^ic[mtzfocb](kar|kam)?m2')),
('sword', re.compile('^ic[mtzfocb](kar|kam)?m[12]ss[elbw]?')),
('mace', re.compile('^ic[mtzfocb](kar|kam)?m[12]bm[elbw]?')),
('axe', re.compile('^ic[mtzfocb](kar|kam)?m[12]sa[elbw]?')),
('spear', re.compile('^ic[mtzfocb](kar|kam)?m[12]ps[elbw]?')),
('dagger', re.compile('^ic[mtzfocb](kar|kam)?m[12]pd[elbw]?')),
('magic_amplifier', re.compile('^ic[mtzfocb](kar|kam)?m[12]ms[elbw]?')),
('pike', re.compile('^ic[mtzfocb](kar|kam)?m[12]pp[elbw]?')),
('staff', re.compile('^ic[mtzfocb](kar|kam)?m[12]bs[elbw]?')),
# Range weapon
('range_weapon', re.compile('^ic[mtzfocb](kar|kam)?r')),
('autolauncher', re.compile('^ic[mtzfocb](kar|kam)?r2a[elbw]?')),
('launcher', re.compile('^ic[mtzfocb](kar|kam)?r2l[elbw]?')),
('pistol', re.compile('^ic[mtzfocb](kar|kam)?r1p[elbw]?')),
('bowpistol', re.compile('^ic[mtzfocb](kar|kam)?r1b[elbw]?')),
('rifle', re.compile('^ic[mtzfocb](kar|kam)?r2r[elbw]?')),
('bowrifle', re.compile('^ic[mtzfocb](kar|kam)?r2b[elbw]?')),
# Ammo
('ammo', re.compile('ic[mtzfocb]p')),
('smashing_ammo', re.compile('ic[mtzfocb]p[12][abplr]b')),
('piercing_ammo', re.compile('ic[mtzfocb]p[12][abplr]p')),
('slashing_ammo', re.compile('ic[mtzfocb]p[12][abplr]s')),
('autolauncher_ammo', re.compile('ic[mtzfocb]p2a[bps]')),
('launcher_ammo', re.compile('ic[mtzfocb]p2l[bps]')),
('pistol_ammo', re.compile('ic[mtzfocb]p1p[bps]')),
('bowpistol_ammo', re.compile('ic[mtzfocb]p2b[bps]')),
('rifle_ammo', re.compile('ic[mtzfocb]p2r[bps]')),
('bowrifle_ammo', re.compile('ic[mtzfocb]p2b[bps]')),
# Jewels
('jewel', re.compile('^ic[mtzfocb]j[abdepr]')),
('anklet', re.compile('^ic[mtzfocb]ja')),
('bracelet', re.compile('^ic[mtzfocb]jb')),
('diadem', re.compile('^ic[mtzfocb]jd')),
('earring', re.compile('^ic[mtzfocb]je')),
('pendant', re.compile('^ic[mtzfocb]jp')),
('ring', re.compile('^ic[mtzfocb]jr')),
# Shields
('shield', re.compile('^ic[mtzfocb](kar|kam)?s')),
('large_shield', re.compile('^ic[mtzfocb](kar|kam)?ss')),
('buckler', re.compile('^ic[mtzfocb](kar|kam)?sb')),
# Tools
('tool', re.compile('^it')),
('tool', re.compile('^ic[mtzfocb](kar|kam)t')),
('armor_tool', re.compile('^itarmor')),
('armor_tool', re.compile('^ic[mtzfocb](kar|kam)tarmor')),
('ammo_tool', re.compile('^itammo')),
('ammo_tool', re.compile('^ic[mtzfocb](kar|kam)tammo')),
('melee_weapon_tool', re.compile('^itmwea')),
('melee_weapon_tool', re.compile('^ic[mtzfocb](kar|kam)tmwea')),
('range_weapon_tool', re.compile('^itrwea')),
('range_weapon_tool', re.compile('^ic[mtzfocb](kar|kam)trwea')),
('jewel_tool', re.compile('^itjewel')),
('jewel_tool', re.compile('^ic[mtzfocb](kar|kam)tjewel')),
('tool_tool', re.compile('^ittool')),
('tool_tool', re.compile('^ic[mtzfocb](kar|kam)ttool')),
('pick', re.compile('^itforage')),
('pick', re.compile('^ic[mtzfocb](kar|kam)tforage')),
# Teleporters
('teleporter', re.compile('^tp_')),
('karavan', re.compile('^tp_karavan')),
('kami', re.compile('^tp_kami')),
('fyros', re.compile('^tp_(karavan|kami)_(dyron|frahartowers|oflovaksoasis|outlawcanyon|pyr|sawdustmines|thescorchedcorridor|thesos)')),
('matis', re.compile('^tp_(karavan|kami)_(avalae|davae|fleetinggarden|groveofconfusion|hereticshovel|hiddensource|knollofdissent|natae|upperbog|yrkanis)')),
('tryker', re.compile('^tp_(karavan|kami)_(avendale|bountybeaches|crystabell|dewdrops|enchantedisle|fairhaven|lagoonsofloria|restingwater|thefount|windermeer|windsofmuse)')),
('zorai', re.compile('^tp_(karavan|kami)_(groveofumbra|havenofpurity|hoi_cho|jen_lai|knotofdementia|maidengrove|min_cho|thevoid|zora)')),
('prime_root', re.compile('^tp_(karavan|kami)_(almati|forbidden_depths|gate_of_obscurity|nexus_terre|the_abyss_of_ichor_matis|the_elusive_forest|the_land_of_continuity|the_sunken_city|the_trench_of_trials_zorai|the_under_spring_fyros|the_windy_gate)')),
# Materials
('material', re.compile('^m\d')),
('forest', re.compile('^m.*f\w\d{2}$')),
('lac', re.compile('^m.*l\w\d{2}$')),
('jungle', re.compile('^m.*j\w\d{2}$')),
('desert', re.compile('^m.*d\w\d{2}$')),
('prime_root', re.compile('^m.*p\w\d{2}$')),
('common', re.compile('^m.*c\w\d{2}$')),
# Enchantments
('crystalized_spell', re.compile('^crystalized_spell$')),
('sap_recharge', re.compile('^.*sap_recharge$')),
# Pets
('pet', re.compile('^ia[ps]')),
('packer', re.compile('^iap')),
('mount', re.compile('^ias')),
# marauder crystal
('marauder_crystal', re.compile('^marauder_teleport_crystal$')),
# Jobs items
('job_item', re.compile('^rpjobitem')),
# Consumables items
('fireworks', re.compile('^.*fireworks')),
('consumable', re.compile('^ip')),
('consumable', re.compile('^conso_')),
# Misc
('skin1', re.compile('^ic[mtzfocb].*_1$')),
('skin2', re.compile('^ic[mtzfocb].*_2$')),
('skin3', re.compile('^ic[mtzfocb].*_3$')),
('matis', re.compile('^icm')),
('tryker', re.compile('^ict')),
('zorai', re.compile('^icz')),
('fyros', re.compile('^icf')),
('catalyser', re.compile('^ixpca01$')),
('piece_of_kitin', re.compile('^slaughter_week_token$')),
)
material_type_re = re.compile('m(\d+)\D')
material_specific_tags = {
1: ['abhaya', 'wood', 'barrel', 'armor_shell'],
6: ['anete', 'fiber', 'grip', 'clothes'],
9: ['arma', 'spine', 'trigger', 'jewel_setting'],
14: ['beckers', 'bark', 'shaft', 'ammo_bullet'],
15: ['beng', 'amber', 'jewel', 'magic_focus'],
16: ['big', 'shell', 'blade', 'point'],
18: ['bodoc', 'horn', 'shaft', 'ammo_bullet'],
19: ['bodoc', 'skin', 'grip', 'clothes'],
20: ['bodoc', 'nail', 'trigger', 'jewel_setting'],
21: ['buo', 'fiber', 'grip', 'clothes'],
23: ['caprice', 'seed', 'trigger', 'jewel_setting'],
25: ['capryni', 'hoof', 'hammer', 'counterweight'],
31: ['cuty', 'shell', 'blade', 'point'],
37: ['dzao', 'fiber', 'grip', 'clothes'],
40: ['eyota', 'wood', 'barrel', 'armor_shell'],
43: ['gingo', 'claw', 'blade', 'point'],
44: ['gingo', 'leather', 'barrel', 'armor_shell'],
46: ['glue', 'resin', 'ammo_jacket', 'lining'],
48: ['goari', 'shell', 'barrel', 'armor_shell'],
49: ['gulatch', 'oil', 'explosive', 'stuffing'],
50: ['hash', 'amber', 'jewel', 'magic_focus'],
53: ['horny', 'shell', 'blade', 'point'],
64: ['kachine', 'wood', 'barrel', 'armor_shell'],
66: ['kincher', 'shell', 'barrel', 'armor_shell'],
67: ['kincher', 'sting', 'blade', 'point'],
68: ['kiban', 'shell', 'barrel', 'armor_shell'],
69: ['kipesta', 'shell', 'barrel', 'armor_shell'],
72: ['kipee', 'shell', 'barrel', 'armor_shell'],
73: ['kipucka', 'shell', 'barrel', 'armor_shell'],
74: ['kipucka', 'rostrum', 'firing_pin', 'armor_clip'],
76: ['kirosta', 'sting', 'blade', 'point'],
77: ['kipucker', 'secretion'],
78: ['kizoar', 'tail', 'firing_pin', 'armor_clip'],
81: ['lumper', 'skin', 'grip', 'clothes'],
82: ['lumper', 'spine', 'trigger', 'jewel_setting'],
83: ['lumper', 'whiskers', 'ammo_jacket', 'lining'],
86: ['mektoub', 'skin', 'grip', 'clothes'],
87: ['mektoub', 'trunk', 'ammo_jacket', 'lining'],
93: ['motega', 'wood', 'barrel', 'armor_shell'],
100: ['patee', 'node', 'hammer', 'counterweight'],
101: ['perfling', 'bark', 'shaft', 'ammo_bullet'],
102: ['pha', 'amber', 'jewel', 'magic_focus'],
103: ['pilan', 'oil', 'explosive', 'stuffing'],
106: ['ragus', 'claw', 'blade', 'point'],
107: ['ragus', 'leather', 'barrel', 'armor_shell'],
109: ['redhot', 'sap', 'firing_pin', 'armor_clip'],
113: ['sarina', 'seed', 'trigger', 'jewel_setting'],
115: ['saurona', 'seed', 'trigger', 'jewel_setting'],
116: ['jubla'],
117: ['sha', 'amber', 'jewel', 'magic_focus'],
118: ['shu', 'fiber', 'grip', 'clothes'],
119: ['silverweed', 'sap', 'firing_pin', 'armor_clip'],
123: ['smart', 'shell', 'blade', 'point'],
124: ['soo', 'amber', 'jewel', 'magic_focus'],
125: ['splinter', 'shell', 'blade', 'point'],
128: ['tama', 'wood', 'barrel', 'armor_shell'],
133: ['timari', 'skin', 'grip', 'clothes'],
134: ['torbak', 'claw', 'blade', 'point'],
135: ['torbak', 'fang', 'explosive', 'stuffing'],
136: ['torbak', 'horn', 'shaft', 'ammo_bullet'],
137: ['torbak', 'leather', 'barrel', 'armor_shell'],
140: ['varinx', 'fang', 'explosive', 'stuffing'],
141: ['varinx', 'leather', 'barrel', 'armor_shell'],
142: ['visc', 'sap', 'firing_pin', 'armor_clip'],
145: ['yber', 'leather', 'barrel', 'armor_shell'],
147: ['yelk', 'moss', 'ammo_jacket', 'lining'],
148: ['yelk', 'mushroom', 'jewel', 'magic_focus'],
149: ['yelk', 'nail', 'trigger', 'jewel_setting'],
152: ['yubo', 'skin', 'grip', 'clothes'],
153: ['zerx', 'bone', 'shaft', 'ammo_bullet'],
154: ['zerx', 'claw', 'blade', 'point'],
155: ['zun', 'amber', 'jewel', 'magic_focus'],
162: ['cratcha'],
163: ['cratcha'],
164: ['stinga'],
165: ['stinga'],
166: ['jubla'],
167: ['jubla'],
168: ['psykopla'],
169: ['psykopla'],
170: ['slaveni'],
171: ['slaveni'],
172: ['shooki'],
173: ['shooki'],
264: ['ragus', 'meat'],
266: ['capryni', 'meat'],
268: ['cray'],
269: ['igara', 'meat'],
270: ['izam', 'meat'],
273: ['bodoc', 'meat'],
281: ['kipee'],
282: ['kizoar'],
284: ['ocyx', 'bone', 'shaft', 'ammo_bullet'],
288: ['gingo', 'blood'],
289: ['ragus', 'blood'],
291: ['kipee', 'blood'],
294: ['cray', 'blood'],
296: ['capryni'],
298: ['gingo'],
299: ['torbak'],
300: ['ragus'],
303: ['zerx'],
304: ['bodoc'],
312: ['kitin_larva', 'generic'],
314: ['bodoc', 'skull'],
315: ['capryni', 'skull'],
316: ['gingo', 'skull'],
319: ['torbak', 'skull'],
321: ['igara', 'skull'],
322: ['izam', 'skull'],
324: ['ragus', 'skull'],
325: ['najab', 'skull'],
329: ['varinx'],
331: ['zerx', 'skull'],
335: ['javing', 'wing', 'explosive', 'stuffing'],
336: ['clopper', 'shell', 'barrel', 'armor_shell'],
338: ['varinx', 'bone', 'shaft', 'ammo_bullet'],
339: ['gingo', 'bone', 'shaft', 'ammo_bullet'],
341: ['cuttler', 'bone', 'shaft', 'ammo_bullet'],
343: ['ragus', 'bone', 'shaft', 'ammo_bullet'],
345: ['timari', 'tooth', 'ammo_jacket', 'lining'],
346: ['ragus', 'fang', 'explosive', 'stuffing'],
347: ['gingo', 'fang', 'explosive', 'stuffing'],
348: ['cuttler', 'fang', 'explosive', 'stuffing'],
349: ['yetin', 'fang', 'explosive', 'stuffing'],
350: ['messab', 'tooth', 'ammo_jacket', 'lining'],
356: ['zerx', 'fang', 'explosive', 'stuffing'],
359: ['messab', 'nail', 'trigger', 'jewel_setting'],
363: ['wombai', 'skin', 'grip', 'clothes'],
364: ['bolobi', 'skin', 'grip', 'clothes'],
365: ['messab', 'skin', 'grip', 'clothes'],
366: ['yber', 'wing', 'explosive', 'stuffing'],
367: ['bawaab', 'skin', 'grip', 'clothes'],
368: ['horncher', 'shell', 'barrel', 'armor_shell'],
369: ['najab', 'leather', 'barrel', 'armor_shell'],
371: ['izam', 'leather', 'barrel', 'armor_shell'],
372: ['igara', 'leather', 'barrel', 'armor_shell'],
374: ['bawaab', 'nail', 'trigger', 'jewel_setting'],
376: ['cuttler', 'leather', 'barrel', 'armor_shell'],
378: ['messab', 'hoof', 'hammer', 'counterweight'],
380: ['frippo', 'skin', 'grip', 'clothes'],
383: ['gubani', 'tooth', 'ammo_jacket', 'lining'],
384: ['ocyx', 'bone', 'shaft', 'ammo_bullet'],
385: ['jugula', 'fang', 'explosive', 'stuffing'],
386: ['tyrancha', 'claw', 'blade', 'point'],
387: ['kidinak', 'claw', 'blade', 'point'],
390: ['vorax', 'claw', 'blade', 'point'],
395: ['bawaab', 'meat'],
398: ['gnoof', 'meat'],
400: ['shalah', 'meat'],
401: ['ploderos', 'meat'],
404: ['arana', 'meat'],
406: ['javing', 'meat'],
407: ['cuttler', 'meat'],
415: ['bawaab', 'skull'],
420: ['shalah', 'skull'],
421: ['ploderos', 'skull'],
427: ['cuttler', 'skull'],
435: ['bawaab', 'blood'],
438: ['gnoof', 'blood'],
440: ['shalah', 'blood'],
441: ['ploderos', 'blood'],
444: ['arana', 'blood'],
446: ['javing', 'blood'],
447: ['cuttler', 'blood'],
453: ['gnoof'],
455: ['shalah'],
459: ['arana', 'moss'],
462: ['yber', 'bone', 'shaft', 'ammo_bullet'],
463: ['vorax', 'leather', 'barrel', 'armor_shell'],
464: ['vorax', 'bone', 'shaft', 'ammo_bullet'],
465: ['vorax', 'fang', 'explosive', 'stuffing'],
467: ['ocyx', 'claw', 'blade', 'point'],
468: ['najab', 'claw', 'blade', 'point'],
469: ['arana', 'wood', 'barrel', 'armor_shell'],
470: ['cray', 'shell', 'barrel', 'armor_shell'],
471: ['madakam', 'skin', 'grip', 'clothes'],
472: ['jubla', 'bud', 'jewel', 'magic_focus'],
473: ['stinga', 'bud', 'jewel', 'magic_focus'],
474: ['psykopla', 'bud', 'jewel', 'magic_focus'],
475: ['slaveni', 'bud', 'jewel', 'magic_focus'],
476: ['cratcha', 'bud', 'jewel', 'magic_focus'],
477: ['shooki', 'bud', 'jewel', 'magic_focus'],
479: ['kinrey', 'shell', 'barrel', 'armor_shell'],
480: ['kinrey', 'sting', 'blade', 'point'],
481: ['kinrey', 'mandible', 'shaft', 'ammo_bullet'],
485: ['kidinak', 'shell', 'barrel', 'armor_shell'],
487: ['kidinak', 'mandible', 'shaft', 'ammo_bullet'],
488: ['kidinak', 'tail', 'firing_pin', 'armor_clip'],
490: ['kizarak', 'shell', 'barrel', 'armor_shell'],
491: ['kizarak', 'sting', 'blade', 'point'],
492: ['kizarak', 'mandible', 'shaft', 'ammo_bullet'],
496: ['kipee', 'sting', 'blade', 'point'],
497: ['adriel', 'bark', 'shaft', 'ammo_bullet'],
498: ['arana', 'eye', 'jewel', 'magic_focus'],
499: ['arana', 'nail', 'trigger', 'jewel_setting'],
500: ['arana', 'pelvis', 'hammer', 'counterweight'],
501: ['arana', 'tooth', 'ammo_jacket', 'lining'],
502: ['arma', 'eye', 'jewel', 'magic_focus'],
503: ['arma', 'skin', 'grip', 'clothes'],
504: ['arma', 'pelvis', 'hammer', 'counterweight'],
505: ['arma', 'tooth', 'ammo_jacket', 'lining'],
506: ['bawaab', 'eye', 'jewel', 'magic_focus'],
507: ['bawaab', 'pelvis', 'hammer', 'counterweight'],
508: ['bawaab', 'tooth', 'ammo_jacket', 'lining'],
509: ['bodoc', 'eye', 'jewel', 'magic_focus'],
510: ['bodoc', 'pelvis', 'hammer', 'counterweight'],
511: ['bodoc', 'tooth', 'ammo_jacket', 'lining'],
512: ['bolobi', 'eye', 'jewel', 'magic_focus'],
514: ['bolobi', 'nail', 'trigger', 'jewel_setting'],
515: ['bolobi', 'pelvis', 'hammer', 'counterweight'],
516: ['bolobi', 'tooth', 'ammo_jacket', 'lining'],
517: ['capryni', 'eye', 'jewel', 'magic_focus'],
518: ['capryni', 'skin', 'grip', 'clothes'],
519: ['capryni', 'nail', 'trigger', 'jewel_setting'],
520: ['capryni', 'tooth', 'ammo_jacket', 'lining'],
521: ['clopper', 'mandible', 'shaft', 'ammo_bullet'],
522: ['clopper', 'secretion', 'explosive', 'stuffing'],
523: ['clopper', 'sting', 'blade', 'point'],
524: ['clopper', 'tail', 'firing_pin', 'armor_clip'],
525: ['cratcha', 'moss', 'ammo_jacket', 'lining'],
526: ['cray', 'claw', 'blade', 'point'],
527: ['cray', 'mandible', 'shaft', 'ammo_bullet'],
528: ['cray', 'secretion', 'explosive', 'stuffing'],
529: ['cray', 'tail', 'firing_pin', 'armor_clip'],
530: ['cuttler', 'claw', 'blade', 'point'],
531: ['cuttler', 'ligament', 'firing_pin', 'armor_clip'],
533: ['dante', 'sap', 'firing_pin', 'armor_clip'],
534: ['dung', 'resin', 'ammo_jacket', 'lining'],
535: ['enola', 'sap', 'firing_pin', 'armor_clip'],
536: ['frippo', 'eye', 'jewel', 'magic_focus'],
538: ['frippo', 'nail', 'trigger', 'jewel_setting'],
539: ['frippo', 'pelvis', 'hammer', 'counterweight'],
540: ['frippo', 'tooth', 'ammo_jacket', 'lining'],
541: ['fung', 'resin', 'ammo_jacket', 'lining'],
542: ['gingo', 'ligament', 'firing_pin', 'armor_clip'],
543: ['gnoof', 'eye', 'jewel', 'magic_focus'],
544: ['gnoof', 'skin', 'grip', 'clothes'],
545: ['gnoof', 'nail', 'trigger', 'jewel_setting'],
546: ['gnoof', 'pelvis', 'hammer', 'counterweight'],
547: ['gnoof', 'trunk', 'ammo_jacket', 'lining'],
548: ['goari', 'mandible', 'shaft', 'ammo_bullet'],
549: ['goari', 'secretion', 'explosive', 'stuffing'],
550: ['goari', 'sting', 'blade', 'point'],
551: ['goari', 'tail', 'firing_pin', 'armor_clip'],
552: ['gubani', 'eye', 'jewel', 'magic_focus'],
553: ['gubani', 'skin', 'grip', 'clothes'],
554: ['gubani', 'nail', 'trigger', 'jewel_setting'],
555: ['gubani', 'pelvis', 'hammer', 'counterweight'],
556: ['horncher', 'mandible', 'shaft', 'ammo_bullet'],
557: ['horncher', 'secretion', 'explosive', 'stuffing'],
558: ['horncher', 'sting', 'blade', 'point'],
559: ['horncher', 'tail', 'firing_pin', 'armor_clip'],
560: ['igara', 'beak', 'blade', 'point'],
561: ['igara', 'bone', 'shaft', 'ammo_bullet'],
562: ['igara', 'ligament', 'firing_pin', 'armor_clip'],
564: ['igara', 'wing', 'explosive', 'stuffing'],
565: ['irin', 'oil', 'explosive', 'stuffing'],
566: ['izam', 'beak', 'blade', 'point'],
567: ['izam', 'bone', 'shaft', 'ammo_bullet'],
568: ['izam', 'ligament', 'firing_pin', 'armor_clip'],
570: ['izam', 'wing', 'explosive', 'stuffing'],
571: ['javing', 'beak', 'blade', 'point'],
572: ['javing', 'bone', 'shaft', 'ammo_bullet'],
573: ['javing', 'ligament', 'firing_pin', 'armor_clip'],
574: ['javing', 'leather', 'barrel', 'armor_shell'],
575: ['jubla', 'moss', 'ammo_jacket', 'lining'],
576: ['jugula', 'bone', 'shaft', 'ammo_bullet'],
577: ['jugula', 'claw', 'blade', 'point'],
578: ['jugula', 'ligament', 'firing_pin', 'armor_clip'],
579: ['jugula', 'leather', 'barrel', 'armor_shell'],
580: ['kiban', 'mandible', 'shaft', 'ammo_bullet'],
581: ['kiban', 'secretion', 'explosive', 'stuffing'],
582: ['kiban', 'sting', 'blade', 'point'],
583: ['kiban', 'tail', 'firing_pin', 'armor_clip'],
584: ['kidinak', 'secretion', 'explosive', 'stuffing'],
585: ['kincher', 'mandible', 'shaft', 'ammo_bullet'],
586: ['kincher', 'secretion', 'explosive', 'stuffing'],
587: ['kincher', 'tail', 'firing_pin', 'armor_clip'],
588: ['kinrey', 'secretion', 'explosive', 'stuffing'],
589: ['kinrey', 'tail', 'firing_pin', 'armor_clip'],
590: ['kipee', 'mandible', 'shaft', 'ammo_bullet'],
591: ['kipee', 'secretion', 'explosive', 'stuffing'],
592: ['kipee', 'tail', 'firing_pin', 'armor_clip'],
593: ['kipesta', 'mandible', 'shaft', 'ammo_bullet'],
594: ['kipesta', 'sting', 'blade', 'point'],
595: ['kipesta', 'tail', 'firing_pin', 'armor_clip'],
596: ['kipesta', 'wing', 'explosive', 'stuffing'],
597: ['kipucka', 'claw', 'blade', 'point'],
598: ['kipucka', 'mandible', 'shaft', 'ammo_bullet'],
599: ['kipucka', 'secretion', 'explosive', 'stuffing'],
600: ['kirosta', 'mandible', 'shaft', 'ammo_bullet'],
601: ['kirosta', 'secretion', 'explosive', 'stuffing'],
602: ['kirosta', 'shell', 'barrel', 'armor_shell'],
603: ['kirosta', 'tail', 'firing_pin', 'armor_clip'],
604: ['kizarak', 'secretion', 'explosive', 'stuffing'],
605: ['kizarak', 'tail', 'firing_pin', 'armor_clip'],
606: ['kizoar', 'mandible', 'shaft', 'ammo_bullet'],
607: ['kizoar', 'shell', 'barrel', 'armor_shell'],
608: ['kizoar', 'sting', 'blade', 'point'],
609: ['kizoar', 'wing', 'explosive', 'stuffing'],
610: ['koorin', 'oil', 'explosive', 'stuffing'],
611: ['lumper', 'eye', 'jewel', 'magic_focus'],
612: ['lumper', 'pelvis', 'hammer', 'counterweight'],
613: ['madakam', 'eye', 'jewel', 'magic_focus'],
615: ['madakam', 'nail', 'trigger', 'jewel_setting'],
616: ['madakam', 'pelvis', 'hammer', 'counterweight'],
617: ['madakam', 'tooth', 'ammo_jacket', 'lining'],
618: ['mektoub', 'eye', 'jewel', 'magic_focus'],
619: ['mektoub', 'nail', 'trigger', 'jewel_setting'],
620: ['mektoub', 'pelvis', 'hammer', 'counterweight'],
621: ['messab', 'eye', 'jewel', 'magic_focus'],
623: ['mitexi', 'bark', 'shaft', 'ammo_bullet'],
624: ['moon', 'resin', 'ammo_jacket', 'lining'],
625: ['najab', 'bone', 'shaft', 'ammo_bullet'],
626: ['najab', 'fang', 'explosive', 'stuffing'],
627: ['najab', 'ligament', 'firing_pin', 'armor_clip'],
629: ['nita', 'node', 'hammer', 'counterweight'],
630: ['oath', 'bark', 'shaft', 'ammo_bullet'],
632: ['ocyx', 'fang', 'explosive', 'stuffing'],
633: ['ocyx', 'ligament', 'firing_pin', 'armor_clip'],
634: ['ocyx', 'shell', 'barrel', 'armor_shell'],
635: ['ploderos', 'eye', 'jewel', 'magic_focus'],
636: ['ploderos', 'skin', 'grip', 'clothes'],
637: ['ploderos', 'nail', 'trigger', 'jewel_setting'],
638: ['ploderos', 'pelvis', 'hammer', 'counterweight'],
639: ['ploderos', 'tooth', 'ammo_jacket', 'lining'],
640: ['psykopla', 'moss', 'ammo_jacket', 'lining'],
641: ['ragus', 'ligament', 'firing_pin', 'armor_clip'],
642: ['raspal', 'eye', 'jewel', 'magic_focus'],
643: ['raspal', 'skin', 'grip', 'clothes'],
644: ['raspal', 'nail', 'trigger', 'jewel_setting'],
645: ['raspal', 'pelvis', 'hammer', 'counterweight'],
646: ['raspal', 'tooth', 'ammo_jacket', 'lining'],
647: ['rendor', 'eye', 'jewel', 'magic_focus'],
648: ['rendor', 'skin', 'grip', 'clothes'],
649: ['rendor', 'nail', 'trigger', 'jewel_setting'],
650: ['rendor', 'pelvis', 'hammer', 'counterweight'],
651: ['rendor', 'tooth', 'ammo_jacket', 'lining'],
653: ['shalah', 'eye', 'jewel', 'magic_focus'],
654: ['shalah', 'skin', 'grip', 'clothes'],
655: ['shalah', 'nail', 'trigger', 'jewel_setting'],
656: ['shalah', 'pelvis', 'hammer', 'counterweight'],
657: ['shalah', 'tooth', 'ammo_jacket', 'lining'],
659: ['silvio', 'seed', 'trigger', 'jewel_setting'],
652: ['scratch', 'node', 'hammer', 'counterweight'],
658: ['shooki', 'moss', 'ammo_jacket', 'lining'],
660: ['slaveni', 'moss', 'ammo_jacket', 'lining'],
661: ['stinga', 'moss', 'ammo_jacket', 'lining'],
662: ['tansy', 'node', 'hammer', 'counterweight'],
663: ['timari', 'eye', 'jewel', 'magic_focus'],
664: ['timari', 'nail', 'trigger', 'jewel_setting'],
665: ['timari', 'pelvis', 'hammer', 'counterweight'],
666: ['torbak', 'ligament', 'firing_pin', 'armor_clip'],
667: ['tyrancha', 'bone', 'shaft', 'ammo_bullet'],
668: ['tyrancha', 'fang', 'explosive', 'stuffing'],
669: ['tyrancha', 'ligament', 'firing_pin', 'armor_clip'],
670: ['tyrancha', 'leather', 'barrel', 'armor_shell'],
671: ['varinx', 'claw', 'blade', 'point'],
672: ['varinx', 'ligament', 'firing_pin', 'armor_clip'],
673: ['vorax', 'ligament', 'firing_pin', 'armor_clip'],
675: ['wombai', 'eye', 'jewel', 'magic_focus'],
676: ['wombai', 'pelvis', 'hammer', 'counterweight'],
677: ['wombai', 'spine', 'trigger', 'jewel_setting'],
678: ['wombai', 'trunk', 'ammo_jacket', 'lining'],
679: ['yana', 'node', 'hammer', 'counterweight'],
680: ['yber', 'beak', 'blade', 'point'],
681: ['yber', 'ligament', 'firing_pin', 'armor_clip'],
682: ['yelk', 'skin', 'grip', 'clothes'],
683: ['yelk', 'pelvis', 'hammer', 'counterweight'],
684: ['yetin', 'bone', 'shaft', 'ammo_bullet'],
685: ['yetin', 'claw', 'blade', 'point'],
686: ['yetin', 'ligament', 'firing_pin', 'armor_clip'],
687: ['yetin', 'leather', 'barrel', 'armor_shell'],
688: ['yubo', 'eye', 'jewel', 'magic_focus'],
690: ['yubo', 'nail', 'trigger', 'jewel_setting'],
691: ['yubo', 'pelvis', 'hammer', 'counterweight'],
692: ['yubo', 'tooth', 'ammo_jacket', 'lining'],
693: ['zerx', 'ligament', 'firing_pin', 'armor_clip'],
694: ['zerx', 'leather', 'barrel', 'armor_shell'],
695: ['kipucker', 'claw'],
#696: ['pendant'],
710: ['goo'],
741: ['tekorn', 'op_mat', 'modified'],
742: ['maga', 'op_mat', 'modified'],
743: ['armilo', 'op_mat', 'modified'],
744: ['greslin', 'op_mat', 'modified'],
745: ['tekorn', 'op_mat', 'purified'],
746: ['maga', 'op_mat', 'purified'],
747: ['armilo', 'op_mat', 'purified'],
748: ['greslin', 'op_mat', 'purified'],
749: ['vedice', 'op_mat', 'modified'],
750: ['cheng', 'op_mat', 'modified'],
751: ['rubbarn', 'op_mat', 'modified'],
752: ['egiros', 'op_mat', 'modified'],
753: ['vedice', 'op_mat', 'purified'],
754: ['cheng', 'op_mat', 'purified'],
755: ['rubbarn', 'op_mat', 'purified'],
756: ['egiros', 'op_mat', 'purified'],
758: ['gingo', 'fur', 'stuffing'],
759: ['igara', 'skin', 'stuffing'],
#: ['', ],
}
|
from smbus import SMBus
import time
addr = 0x8 # bus address
bus = SMBus(1) # indicates /dev/ic2-1
while True:
bus.write_byte(addr, 0x41) # switch it on
time.sleep(0.01)
|
import logging
import sys, os
from abc import abstractmethod
import eons
from .IOFormatFunctor import IOFormatFunctor
class InputFormatFunctor(IOFormatFunctor):
def __init__(self, name=eons.INVALID_NAME()):
super().__init__(name)
#self.data will be returned, so we shouldn't be asking for it.
self.requiredKWArgs.remove("data")
#Input Functors will be expected to populate self.data with the contents of self.file
#The data member will be returned by UserFunction.
#This is done to help enforce consistency.
@abstractmethod
def ParseInput(self):
raise NotImplementedError
def UserFunction(self, **kwargs):
self.file = open(kwargs.get("file"), "r")
self.ParseInput() #populate self.data
return self.data
#Override DataFunctor to ensure we don't try to read the unneeded data kwarg.
def PreCall(self, **kwargs):
self.Clear()
|
#plot_ts_hydrographs.py
#python script to PET data files and create PET comparison plots
#Author: Ryan Spies
#rspies@lynker.com
print("Start Script")
import os
import matplotlib.pyplot as plt
from pylab import *
from matplotlib.ticker import AutoMinorLocator
import pandas as pd
maindir = os.getcwd()
############################### User Input ####################################
maindir = os.path.abspath(r'D:\projects\2021_twdb_wgrfc_calb\processed_data\PET')
rfc = 'WGRFC'
version = 'final' # choices: 'initial', 'draft', 'final'
#csv_read_NWS = open(maindir + os.sep + 'plot_input' + os.sep + rfc + '_Plot_PET_Initial_NWS_MAPEAdj.csv', 'r')
csv_read_FAO = open(maindir + os.sep + 'plot_input' + os.sep + rfc + '_Plot_PET_Initial_FAOPM.csv', 'r')
csv_read_apri = open(maindir + os.sep + 'plot_input' + os.sep + rfc + '_Plot_PET_apriori_ETD.csv', 'r')
csv_read_MAPE = open(maindir + os.sep + 'plot_input' + os.sep + rfc + '_Plot_PET_MAPE_ETD.csv', 'r')
csv_read_station = open(maindir + os.sep + 'plot_input' + os.sep + rfc + '_Plot_PET_station_climofill_ETD.csv', 'r')
csv_read_calb = maindir + os.sep + 'plot_input' + os.sep + rfc + '_Plot_PET_'+ version + '_calb.csv'
############################ End User Input ###################################
print('Be sure to check all units!!!!!!!!!')
#data_NWS = pd.read_csv(csv_read_NWS, delimiter=',', skip_footer=0, header=0)
data_FAO = pd.read_csv(csv_read_FAO, delimiter=',', header=0)
data_apri = pd.read_csv(csv_read_apri, delimiter=',', header=0)
data_MAPE = pd.read_csv(csv_read_MAPE, delimiter=',', header=0)
data_station_fill = pd.read_csv(csv_read_station, delimiter=',', header=0)
if version == 'draft' or version == 'final':
data_calb = pd.read_csv(open(csv_read_calb,'r'), delimiter=',', header=0)
#csv_read_calb.close()
# close files
#csv_read_NWS.close()
csv_read_FAO.close(); csv_read_MAPE.close()
all_basins = list(data_FAO.columns.values)
x = range(1,13,1) # month range
count = 1
plot_num = 1
#fig = plt.figure(figsize=(6.0,7.5))
#all_basins = ['NGET2']
for basin in all_basins:
if basin != 'month' and basin != 'Month':
fig, ax1 = plt.subplots(figsize=(6.0,6.0))
#fig = plt.figure(figsize=(6.0,8.5))
fig.suptitle(basin + ': Mid-month ET-Demand Comparison'+'\n(Units = mm/day)',fontsize=15)
#y1 = list(data_NWS[basin])[:12]
y2 = list(data_FAO[basin])[:12]
y3 = list(data_apri[basin])[:12]
y4 = list(data_MAPE[basin])[:12]
y5 = list(data_station_fill[basin])[:12]
#ax1 = plt.subplot(5,20,plot_num)
#ax1.plot(x,y1, color='black', markersize=4, linestyle='-', linewidth=1, zorder=5, label = 'NWS initial (MAPE * PEadj)')
ax1.plot(x,y2, color='orange', markersize=4, linestyle='-', linewidth=2.5, zorder=5, label = 'FAO-PM')
ax1.plot(x,y3, color='purple', markersize=4, linestyle='-', linewidth=2.5, zorder=5, label = 'Apriori ETD')
ax1.plot(x,y4, color='green', markersize=4, linestyle='-', linewidth=2.5, zorder=5, label = 'MAPE ETD')
ax1.plot(x,y5, color='brown', markersize=4, linestyle='-', linewidth=2.5, zorder=5, label = 'Station Climo ETD')
if version == 'draft' or version == 'final':
y6 = list(data_calb[basin])[:12]
ax1.plot(x,y6, color='aqua', markersize=5, linestyle='-', linewidth=2.5, zorder=5, label = 'MAPE * PEadj (' + version + 'calb)')
ax1.minorticks_on()
ax1.grid(which='major', axis='both', color='black', linestyle='-', zorder=3, alpha=0.3)
ax1.grid(which='minor', axis='both', color='grey', linestyle='-', zorder=3, alpha=0.3)
#ax1.set_xlabel('Month', fontsize='4')
#ax1.set_ylabel('PET [mm/day]', fontsize='8')
#labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
labels = ['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D']
ax1.set_xlabel('Month')
ax1.set_ylabel('PET mm/day')
plt.xticks(x, labels, fontsize=12)
ax1.tick_params(labelsize=12)
ax1.set_xlim([1,12])
ax1.set_ylim([0,13])
#ax1.legend(loc='upper left', prop={'size':4})
ax1.xaxis.set_minor_locator(AutoMinorLocator(1))
ax1.yaxis.set_minor_locator(AutoMinorLocator(1))
#ax1.xaxis.set_minor_locator(AutoMinorLocator(5))
plt.ioff()
#plt.title(basin, fontsize='13')
count +=2; plot_num += 1
#### add a single legend outside the subplots
ax1.legend(loc="center left",fontsize=12,ncol=2,bbox_to_anchor=(0.05,-0.3))
print(basin + ' saving figure...')
figname = maindir + os.sep + 'plots' + os.sep + version + os.sep + basin + '_PET_' + version + '_analysis_MAPE.png'
#plt.savefig(figname, bbox_inches='tight', dpi=300)
plt.tight_layout()
#subplots_adjust(left=None, bottom=None, right=None, top=0.9, wspace=0.25, hspace=0.5) #adjust white space (hspace is space on top of each subplot)
plt.savefig(figname, bbox_inches='tight', dpi=350)
plt.close()
plt.show()
print("End Script")
|
#!/usr/bin/env python
from Tkinter import *
import tkMessageBox
import tkSimpleDialog
import logic
import math
def error(message):
tkMessageBox.showerror(message=message)
class Heading(Label):
def __init__(self, parent, **kwargs):
Label.__init__(self, parent, font=('Sans-serif', 16, 'bold'), **kwargs)
class OperationChar(Frame):
def __init__(self, parent, character, operation, **kwargs):
Frame.__init__(self, parent, pady=2, **kwargs)
operation = ' is for ' + operation
Label(self, text=character, bg='gray', width=3).pack(side=LEFT)
Label(self, text=operation).pack(side=LEFT)
class HelpWindow(Toplevel):
def __init__(self, **kwargs):
Toplevel.__init__(self, **kwargs)
self.minsize(420, 460)
self.title('PyLogic Help')
Label(self, text='PyLogic is a utility for propositional logic.').pack(fill=X)
Heading(self, text='Characters for connectives').pack(fill=X, pady=(20, 5))
operations = Frame(self)
OperationChar(operations, '~', 'NOT').pack(fill=X)
OperationChar(operations, '^', 'AND').pack(fill=X)
OperationChar(operations, 'v', 'OR').pack(fill=X)
OperationChar(operations, 'xor', 'XOR').pack(fill=X)
OperationChar(operations, '|', 'NAND').pack(fill=X)
OperationChar(operations, 'nor', 'NOR').pack(fill=X)
OperationChar(operations, '->', 'the material conditional').pack(fill=X)
OperationChar(operations, '<->', 'the logical biconditional').pack(fill=X)
operations.pack()
Heading(self, text='Example expressions').pack(fill=X, pady=(20, 5))
Label(self, text='~p').pack(fill=X)
Label(self, text='p -> q').pack(fill=X)
Label(self, text='p ^ ~q').pack(fill=X)
Label(self, text='p v (p ^ q)').pack(fill=X)
TRUTH_TABLE, CIRCUIT_BUILDER = range(2)
class ButtonRowFrame(Frame):
def __init__(self, app, **kwargs):
Frame.__init__(self, app, **kwargs)
self.app = app
self.mode = TRUTH_TABLE
self.mode_swap_text = StringVar()
self.update_mode_swap_text()
Button(self, text='Exit', command=app.exit).pack(side=RIGHT)
Button(self, width=12, textvariable=self.mode_swap_text,
command=self.toggle_mode).pack(side=RIGHT)
def update_mode_swap_text(self):
if self.mode == TRUTH_TABLE:
self.mode_swap_text.set('Circuit Builder')
else:
self.mode_swap_text.set('Truth Tabler')
def toggle_mode(self):
self.app.main_frame.pack_forget()
if self.mode == TRUTH_TABLE:
self.app.main_frame = CircuitBuilder(self.app)
self.mode = CIRCUIT_BUILDER
else:
self.app.main_frame = TruthTableFrame(self.app)
self.mode = TRUTH_TABLE
self.app.main_frame.pack(fill=BOTH, expand=True, padx=10, pady=10)
self.update_mode_swap_text()
VARIABLE_DIMENSIONS = (120, 80)
OPERATION_DIMENSIONS = (60, 40)
def distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
class CircuitItem(object):
def __init__(self, x, y, item_id, label_id):
self.x = x
self.y = y
self.on = True
self.item_id = item_id
self.label_id = label_id
self.selected = True
def move(self, dx, dy):
self.x += dx
self.y += dy
def get_fill(self):
if self.on:
fill = 'springgreen3' if self.selected else 'springgreen2'
else:
fill = 'firebrick3' if self.selected else 'firebrick2'
return fill
def get_outline(self):
return 'black' if self.selected else 'gray'
def get_colors(self):
return (self.get_fill(), self.get_outline())
class CircuitOperation(CircuitItem):
def __init__(self, x, y, item_id, label_id, op_class, *variables):
super(CircuitOperation, self).__init__(x, y, item_id, label_id)
self.op_class = op_class
self.variables = variables
self.reevaluate()
def reevaluate(self):
terms = map(lambda i: (logic.F, logic.T)[i.on], self.variables)
self.on = self.op_class(*terms).evaluate({})
class CircuitVariable(CircuitItem):
def __init__(self, x, y, item_id, label_id, name):
super(CircuitVariable, self).__init__(x, y, item_id, label_id)
self.name = name
def toggle(self):
self.on = not self.on
class CircuitCanvasFrame(Frame):
def __init__(self, parent, app, **kwargs):
Frame.__init__(self, parent, **kwargs)
self.canvas = Canvas(self, width=1, height=1)
self.canvas.bind('<Button-1>', self.button_1)
self.canvas.bind('<ButtonRelease-1>', self.button_1_release)
self.canvas.bind('<B1-Motion>', self.button_1_motion)
self.canvas.pack(fill=BOTH, expand=True)
self.operations = []
self.items = []
self.down = False
self.item_id = None
self.line_ids = []
def move(self, item_id, dx, dy):
item = self.get_item(item_id)
item.move(dx, dy)
self.canvas.move(item_id, dx, dy)
self.canvas.move(item.label_id, dx, dy)
def button_1(self, event):
self.down = True
self.startx = event.x
self.starty = event.y
item_ids = self.canvas.find_overlapping(event.x, event.y, event.x, event.y)
for item_id in sorted(item_ids):
if self.get_item(item_id):
self.item_id = item_id
def button_1_motion(self, event):
self.down = False
dx = event.x - self.startx
dy = event.y - self.starty
self.startx = event.x
self.starty = event.y
if self.item_id:
self.move(self.item_id, dx, dy)
else:
for item in self.items:
self.move(item.item_id, dx, dy)
self.draw_lines()
def button_1_release(self, event):
if self.down:
# toggle selection on item
if self.item_id:
if self.get_item(self.item_id).selected:
self.deselect(self.item_id)
else:
self.select(self.item_id)
# deselect all items
else:
for item in self.items:
item.selected = False
self.update_item(item.item_id)
self.item_id = None
def get_item(self, item_id=None):
# return LogicItem object corresponding to item with ID of `item_id`
if item_id is None:
item_id = self.item_id
for item in self.items:
if item.item_id == item_id:
return item
return None
def update_item(self, item):
if isinstance(item, int):
return self.update_item(self.get_item(item))
fill, outline = item.get_colors()
self.canvas.itemconfig(item.item_id, fill=fill, outline=outline)
def select(self, item):
if isinstance(item, int):
return self.select(self.get_item(item))
item.selected = True
self.update_item(item)
def deselect(self, item):
if isinstance(item, int):
return self.deselect(self.get_item(item))
item.selected = False
self.update_item(item)
def add_item(self, label, dimensions):
# place items in the center of the canvas
x, y = self.canvas.winfo_width()/2, self.canvas.winfo_height()/2
width, height = dimensions
x1 = x - width/2
x2 = x + width/2
y1 = y - height/2
y2 = y + height/2
item_id = self.canvas.create_rectangle(x1, y1, x2, y2, width=2)
label_id = self.canvas.create_text(x, y, anchor=CENTER, text=label)
return (x, y, item_id, label_id)
def add_variable(self, name):
x, y, item_id, label_id = self.add_item(name, VARIABLE_DIMENSIONS)
self.items.append(CircuitVariable(x, y, item_id, label_id, name))
self.update_item(item_id)
def add_operation(self, op_class, name=None):
name = name if name else op_class.__name__
variables = []
for item in self.items:
if item.selected:
variables.append(item)
# validate operation
if op_class is logic.Not and len(variables) != 1:
return error('Not expressions can only have 1 term')
if issubclass(op_class, logic.BinaryOperation):
if len(variables) < 2:
return error('Logical connectives require at least 2 terms')
if op_class.two_args and len(variables) > 2:
return error('%s expressions can only have 2 terms' % name)
for item in variables:
self.deselect(item)
x, y, item_id, label_id = self.add_item(name.upper(), OPERATION_DIMENSIONS)
operation = CircuitOperation(x, y, item_id, label_id, op_class, *variables)
self.items.append(operation)
self.operations.append((operation, variables))
self.update_item(operation)
self.draw_lines()
def draw_lines(self):
# remove all lines
while self.line_ids:
self.canvas.delete(self.line_ids.pop())
# draw shortest line between each of the variables in each operation
for operation, variables in self.operations:
for variable in variables:
vw, vh = VARIABLE_DIMENSIONS if isinstance(variable, CircuitVariable) else OPERATION_DIMENSIONS
vw_half, vh_half = vw/2, vh/2
ow, oh = OPERATION_DIMENSIONS
ow_half, oh_half = ow/2, oh/2
vx, vy = variable.x, variable.y
ox, oy = operation.x, operation.y
v_points = ((vx, vy+vh_half),
(vx, vy-vh_half),
(vx+vw_half, vy),
(vx-vw_half, vy))
o_points = ((ox, oy+oh_half),
(ox, oy-oh_half),
(ox+ow_half, oy),
(ox-ow_half, oy))
dist = distance(v_points[0], o_points[0])
line = (v_points[0], o_points[0])
for v_point in v_points:
for o_point in o_points:
if distance(v_point, o_point) < dist:
dist = distance(v_point, o_point)
line = (v_point, o_point)
line_id = self.canvas.create_line(line[0][0], line[0][1],
line[1][0], line[1][1],
arrow=LAST)
self.line_ids.append(line_id)
def reevaluate(self):
# reevaluate each operation
for op, _ in self.operations:
op.reevaluate()
self.update_item(op)
def toggle(self):
names = []
for item in self.items:
# don't toggle operations
if isinstance(item, CircuitOperation):
if item.selected:
self.deselect(item)
continue
# toggle selected items
if item.selected:
if item.name not in names:
names.append(item.name)
for item in self.items:
if isinstance(item, CircuitVariable) and item.name in names:
item.toggle()
self.update_item(item)
# reevaluate everything (this is lazy)
self.reevaluate()
def remove(self):
for item in self.items:
# only removing items we've selected
if not item.selected:
continue
for op, variables in self.operations:
if item in variables and len(variables) <= 2:
if isinstance(item, CircuitVariable):
item_noun = "The variable '%s'" % item.name
else:
item_noun = 'That operation'
error('%s is part of an expression that cannot be reduced'
% item_noun)
break
# haven't encountered any conflicts
else:
self.items.remove(item)
for i, (op, variables) in enumerate(self.operations):
if item in variables:
variables.remove(item)
if op is item:
self.operations.pop(i)
# clear the rectangle and text from the canvas
self.canvas.delete(item.item_id)
self.canvas.delete(item.label_id)
# redraw the lines (lazy again, just removing now-obsolete lines)
self.draw_lines()
class CircuitButtons(Frame):
def __init__(self, parent, app, **kwargs):
Frame.__init__(self, parent, **kwargs)
self.app = app
self.parent = parent
def command(op_class, name=None):
def command_():
parent.canvas_frame.add_operation(op_class, name)
return command_
Button(self, text='Add Variable', command=self.add_variable).pack(side=LEFT)
Button(self, text='On/Off', command=parent.canvas_frame.toggle).pack(side=LEFT)
Button(self, text='Remove', command=parent.canvas_frame.remove).pack(side=LEFT)
Button(self, text='XNOR', command=command(logic.Biconditional, 'Xnor')).pack(side=RIGHT)
Button(self, text='NOR', command=command(logic.Nor)).pack(side=RIGHT)
Button(self, text='NAND', command=command(logic.Nand)).pack(side=RIGHT)
Button(self, text='XOR', command=command(logic.Xor)).pack(side=RIGHT)
Button(self, text='OR', command=command(logic.Or)).pack(side=RIGHT)
Button(self, text='AND', command=command(logic.And)).pack(side=RIGHT)
Button(self, text='NOT', command=command(logic.Not)).pack(side=RIGHT)
def add_variable(self):
name = tkSimpleDialog.askstring('Add variable', 'Name of new variable:')
if name is None:
return
if logic.isvar(name):
self.parent.canvas_frame.add_variable(name)
else:
error('Invalid variable name')
class CircuitBuilder(Frame):
def __init__(self, app, **kwargs):
Frame.__init__(self, app, **kwargs)
self.app = app
self.canvas_frame = CircuitCanvasFrame(self, app, relief=SUNKEN, bd=2)
self.canvas_frame.pack(fill=BOTH, expand=True)
CircuitButtons(self, app).pack(fill=X, pady=(10, 0))
class ControlFrame(Frame):
def __init__(self, parent, app, **kwargs):
Frame.__init__(self, parent, **kwargs)
self.app = app
self.parent = parent
self.expr = StringVar()
self.output = StringVar()
self.expr_entry = Entry(self, textvariable=self.expr)
self.expr_entry.bind('<Return>', self.evaluate)
self.expr_entry.focus_set()
self.expr_entry.pack(side=LEFT)
Button(self, text='Evaluate', command=self.evaluate).pack(side=LEFT)
def evaluate(self, event=None):
try:
tt = str(logic.truth_table(self.expr.get()))
self.parent.output(tt)
except logic.TooManyVariablesError as e:
error('Cannot generate truth table: ' + str(e))
except SyntaxError as e:
error('Syntax error: ' + str(e))
self.expr_entry.select_range(0, END)
class OutputFrame(Frame):
def __init__(self, parent, app, **kwargs):
Frame.__init__(self, parent, **kwargs)
self.app = app
self.output = StringVar()
Label(self, textvariable=self.output, font=('Courier', 16)).pack()
class TruthTableFrame(Frame):
def __init__(self, app, **kwargs):
Frame.__init__(self, app, **kwargs)
ControlFrame(self, app).pack(pady=20)
self.output_frame = OutputFrame(self, app)
self.output_frame.pack()
def output(self, output):
self.output_frame.output.set(output)
class App(Frame):
def __init__(self, root):
Frame.__init__(self, root)
self.root = root
root.title('PyLogic')
root.minsize(720, 480)
menu = Menu(root)
file_menu = Menu(menu, tearoff=0)
file_menu.add_command(label='Exit', command=self.exit)
menu.add_cascade(label='File', menu=file_menu)
help_menu = Menu(menu, tearoff=0)
help_menu.add_command(label='PyLogic Help', command=self.help)
menu.add_cascade(label='Help', menu=help_menu)
root.config(menu=menu)
ButtonRowFrame(self).pack(fill=X)
self.main_frame = TruthTableFrame(self)
self.main_frame.pack()
def exit(self):
self.root.destroy()
def help(self):
HelpWindow(padx=20, pady=20)
def main():
root = Tk()
App(root).pack(fill=BOTH, expand=True)
root.mainloop()
if __name__ == '__main__':
main()
|
from itertools import combinations
lst = [2, 5, 9, 4]
for subset in combinations(lst, 2):
print(subset)
|
Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #alistirma13
>>>
>>> for a in range(1,10):
for b in range(0,10):
for c in range(1,10):
for d in range(0,10):
if 1000*a+100*b+10*c+d==11*(10*a+b+10*c+d):
sayi1=10*a+b
sayi2=10*c+d
>>> sayi1
10
>>> sayi1
10
>>> sayi2
89
>>>
|
import warnings
from typing import Callable, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from ..utils import _log_api_usage_once, _make_ntuple
interpolate = torch.nn.functional.interpolate
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed
Args:
num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
eps (float): a value added to the denominator for numerical stability. Default: 1e-5
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
):
super().__init__()
_log_api_usage_once(self)
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
def _load_from_state_dict(
self,
state_dict: dict,
prefix: str,
local_metadata: dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x: Tensor) -> Tensor:
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
scale = w * (rv + self.eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})"
class ConvNormActivation(torch.nn.Sequential):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, ...]] = 3,
stride: Union[int, Tuple[int, ...]] = 1,
padding: Optional[Union[int, Tuple[int, ...], str]] = None,
groups: int = 1,
norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
dilation: Union[int, Tuple[int, ...]] = 1,
inplace: Optional[bool] = True,
bias: Optional[bool] = None,
conv_layer: Callable[..., torch.nn.Module] = torch.nn.Conv2d,
) -> None:
if padding is None:
if isinstance(kernel_size, int) and isinstance(dilation, int):
padding = (kernel_size - 1) // 2 * dilation
else:
_conv_dim = len(kernel_size) if isinstance(kernel_size, Sequence) else len(dilation)
kernel_size = _make_ntuple(kernel_size, _conv_dim)
dilation = _make_ntuple(dilation, _conv_dim)
padding = tuple((kernel_size[i] - 1) // 2 * dilation[i] for i in range(_conv_dim))
if bias is None:
bias = norm_layer is None
layers = [
conv_layer(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=dilation,
groups=groups,
bias=bias,
)
]
if norm_layer is not None:
layers.append(norm_layer(out_channels))
if activation_layer is not None:
params = {} if inplace is None else {"inplace": inplace}
layers.append(activation_layer(**params))
super().__init__(*layers)
_log_api_usage_once(self)
self.out_channels = out_channels
if self.__class__ == ConvNormActivation:
warnings.warn(
"Don't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead."
)
class Conv2dNormActivation(ConvNormActivation):
"""
Configurable block used for Convolution2d-Normalization-Activation blocks.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
kernel_size: (int, optional): Size of the convolving kernel. Default: 3
stride (int, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
dilation (int): Spacing between kernel elements. Default: 1
inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]] = 3,
stride: Union[int, Tuple[int, int]] = 1,
padding: Optional[Union[int, Tuple[int, int], str]] = None,
groups: int = 1,
norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
dilation: Union[int, Tuple[int, int]] = 1,
inplace: Optional[bool] = True,
bias: Optional[bool] = None,
) -> None:
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups,
norm_layer,
activation_layer,
dilation,
inplace,
bias,
torch.nn.Conv2d,
)
class Conv3dNormActivation(ConvNormActivation):
"""
Configurable block used for Convolution3d-Normalization-Activation blocks.
Args:
in_channels (int): Number of channels in the input video.
out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
kernel_size: (int, optional): Size of the convolving kernel. Default: 3
stride (int, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
dilation (int): Spacing between kernel elements. Default: 1
inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int, int]] = 3,
stride: Union[int, Tuple[int, int, int]] = 1,
padding: Optional[Union[int, Tuple[int, int, int], str]] = None,
groups: int = 1,
norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm3d,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
dilation: Union[int, Tuple[int, int, int]] = 1,
inplace: Optional[bool] = True,
bias: Optional[bool] = None,
) -> None:
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups,
norm_layer,
activation_layer,
dilation,
inplace,
bias,
torch.nn.Conv3d,
)
class SqueezeExcitation(torch.nn.Module):
"""
This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.
Args:
input_channels (int): Number of channels in the input image
squeeze_channels (int): Number of squeeze channels
activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
"""
def __init__(
self,
input_channels: int,
squeeze_channels: int,
activation: Callable[..., torch.nn.Module] = torch.nn.ReLU,
scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
) -> None:
super().__init__()
_log_api_usage_once(self)
self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)
self.activation = activation()
self.scale_activation = scale_activation()
def _scale(self, input: Tensor) -> Tensor:
scale = self.avgpool(input)
scale = self.fc1(scale)
scale = self.activation(scale)
scale = self.fc2(scale)
return self.scale_activation(scale)
def forward(self, input: Tensor) -> Tensor:
scale = self._scale(input)
return scale * input
class MLP(torch.nn.Sequential):
"""This block implements the multi-layer perceptron (MLP) module.
Args:
in_channels (int): Number of channels of the input
hidden_channels (List[int]): List of the hidden channel dimensions
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
bias (bool): Whether to use bias in the linear layer. Default ``True``
dropout (float): The probability for the dropout layer. Default: 0.0
"""
def __init__(
self,
in_channels: int,
hidden_channels: List[int],
norm_layer: Optional[Callable[..., torch.nn.Module]] = None,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
inplace: Optional[bool] = None,
bias: bool = True,
dropout: float = 0.0,
):
# The addition of `norm_layer` is inspired from the implementation of TorchMultimodal:
# https://github.com/facebookresearch/multimodal/blob/5dec8a/torchmultimodal/modules/layers/mlp.py
params = {} if inplace is None else {"inplace": inplace}
layers = []
in_dim = in_channels
for hidden_dim in hidden_channels[:-1]:
layers.append(torch.nn.Linear(in_dim, hidden_dim, bias=bias))
if norm_layer is not None:
layers.append(norm_layer(hidden_dim))
layers.append(activation_layer(**params))
layers.append(torch.nn.Dropout(dropout, **params))
in_dim = hidden_dim
layers.append(torch.nn.Linear(in_dim, hidden_channels[-1], bias=bias))
layers.append(torch.nn.Dropout(dropout, **params))
super().__init__(*layers)
_log_api_usage_once(self)
class Permute(torch.nn.Module):
"""This module returns a view of the tensor input with its dimensions permuted.
Args:
dims (List[int]): The desired ordering of dimensions
"""
def __init__(self, dims: List[int]):
super().__init__()
self.dims = dims
def forward(self, x: Tensor) -> Tensor:
return torch.permute(x, self.dims)
|
#!/usr/bin/env python
'''
script.py: part of singularity command line tool
Runtime executable, "shub"
'''
from singularity.package import package, docker2singularity
from singularity.runscript import get_runscript_template
from singularity.utils import check_install
from singularity.app import make_tree
from glob import glob
import argparse
import sys
import os
def main():
parser = argparse.ArgumentParser(
description="package Singularity containers for singularity hub.")
parser.add_argument("--image", dest='image', help="full path to singularity image (for use with --package)", type=str, default=None)
parser.add_argument("--docker2singularity", dest='docker', help="name of Docker image to export to Singularity (does not include runscript cmd)", type=str, default=None)
parser.add_argument("--outfolder", dest='outfolder', help="full path to folder for output, if not specified, will go to pwd", type=str, default=None)
parser.add_argument("--runscript", dest='runscript', help="specify extension to generate a runscript template in the PWD, or include --outfolder to change output directory. Currently supported types are py (python).", type=str, default=None)
parser.add_argument('--package', help="package a singularity container for singularity hub", dest='package', default=False, action='store_true')
parser.add_argument('--tree', help="view the guts of an image or package.", dest='tree', default=False, action='store_true')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
# Output folder will be pwd if not specified
if args.outfolder == None:
output_folder = os.getcwd()
else:
output_folder = args.outfolder
# If the user wants a runscript template.
if args.runscript != None:
get_runscript_template(output_folder=output_folder,
script_name="singularity",
language=args.runscript)
# If the user wants to export docker2singularity!
if args.docker != None:
docker2singularity(output_folder=output_folder,
docker_image=args.docker)
# We can only continue if singularity is installed
if check_install() == True:
# We also must have an image to work with
if args.image !=None:
image = os.path.abspath(args.image)
# Make sure the image exists!
if os.path.exists(image):
# the user wants to make a tree for an image
if args.tree == True:
make_tree(image)
# The user wants to package the image
elif args.package == True:
package(image_path=image,
output_folder=output_folder,
runscript=True,
software=True)
else:
print("Please specify a singularity image with --image.")
if __name__ == '__main__':
main()
|
import re
# 1. Amazon.com
str = "1-16 of 402 results for"
match = re.findall(r'([0-9\.\,]+) results', str)
print(int(match[0].replace(".", "").replace(",", "")))
# 2. Amazon.fr
str = "1-16 sur 28 résultats pour"
match = re.findall(r'([0-9\.\,]+) résultats', str)
print(int(match[0].replace(".", "").replace(",", "")))
# 3. Amazon.co.uk
str = "1-16 of 28 results for"
match = re.findall(r'([0-9\.\,]+) results', str)
print(int(match[0].replace(".", "").replace(",", "")))
# 4. Amazon.es
str = "1-16 de más de 4.000 resultados para"
match = re.findall(r'([0-9\.\,]+) resultados', str)
print(int(match[0].replace(".", "").replace(",", "")))
# 5. Amazon.it
str = "1-16 dei più di 4.000 risultati in"
match = re.findall(r'([0-9\.\,]+) risultati', str)
print(int(match[0].replace(".", "").replace(",", "")))
# 6. Amazon.de
str = "1-16 von 28 Ergebnissen oder Vorschlägen für "
match = re.findall(r'([0-9\.\,]+) Ergebnissen', str)
print(int(match[0].replace(".", "").replace(",", "")))
|
def MAPE(pred, test):
pred, test = np.array(pred), np.array(test)
sum = 0
for i in range(7):
if test[i] != 0:
sum += abs((test[i] - pred[i]) / test[i])
return (sum / 7) * 100
def X_generator(X):
X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8, X_9, X_10, X_sqrt = X, np.power(X, 2), np.power(X, 3), np.power(X, 4), np.power(X, 5), np.power(X, 6), np.power(X, 7), np.power(X, 8), np.power(X, 9), np.power(X, 10), np.sqrt(X)
X_set = np.column_stack((X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8, X_sqrt))
return X_set
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression, Lasso, Ridge #import linear, norm1, norm2 method
Data = pd.read_excel("COVID-19-10-08.xlsx")
Country_List = Data['countriesAndTerritories'].unique() #build country list
Output = pd.DataFrame(columns = Country_List) #declear output dataframe
for country in Country_List:
df = Data.loc[Data['countriesAndTerritories'] == country]
data = df['cases'].values #chose "cases" as data
data = data[::-1]
date = np.arange(1, len(data) + 8)
X, Y = date[:len(date) - 7], data
X = X.reshape(-1, 1)
Y = Y.reshape(-1, 1)
X_test = X_generator(date[len(date) - 7:])
Y_test = Y[len(Y) - 7:]
Y_selftest = Y[len(Y) - 7:]
minmape = 999999 #set minmape
days = 50 #set default days
for i in range(4, 21): #Try different time interval, pick the best duration for training
if len(data) > i * 10 + 1:
length = i * 10 + 1
else:
length = len(data)
X_selftrain = X_generator(X[len(X) - length:len(X) - 7])
Y_selftrain = Y[len(Y) - length:len(Y) - 7]
X_selftest = X_generator(X[len(X) - 7:])
# Linear
linearModel = LinearRegression(normalize=True)
linearModel.fit(X_selftrain, Y_selftrain)
#Lasso
LassoModel = Lasso(normalize=True)
LassoModel.fit(X_selftrain, Y_selftrain)
#Ridge
RidgeModel = Ridge(normalize=True)
RidgeModel.fit(X_selftrain, Y_selftrain)
pred = RidgeModel.predict(X_selftest) #Predict
for x in np.nditer(pred, op_flags=['readwrite']): #fix float data and negetive data
x[...] = int(x)
if x < 0:
x[...] = 0
mape = MAPE(pred, Y_selftest) #calculates mape
if mape < minmape:
minmape = mape
days = length
X_train = X_generator(X[len(X) - days + 7:])
Y_train = Y[len(Y) - days + 7:]
# Linear
linearModel = LinearRegression(normalize=True)
linearModel.fit(X_train, Y_train)
#Lasso
LassoModel = Lasso(normalize=True)
LassoModel.fit(X_train, Y_train)
#Ridge
RidgeModel = Ridge(normalize=True)
RidgeModel.fit(X_train, Y_train)
pred = RidgeModel.predict(X_test)
for x in np.nditer(pred, op_flags=['readwrite']): #fix float data and negetive data
x[...] = int(x)
if x < 0:
x[...] = 0
pred = np.reshape(pred, 7)
Output[country] = pred
Output.to_csv('COVID-19_Predict_Final.csv', index=True) #output prediction
|
import psutil
def count(iter): # used for counting instances in iterable
return sum(1 for _ in iter)
def list_processes(procs):
for proc in procs:
try:
print("Name: {} PID: {}".format(proc.name(), proc.pid))
# print(len(proc.open_files()))
# print(proc.open_files())
except:
print("Error reading {}".format(proc.name()))
def get_handles(procs):
for proc in procs:
try:
files = proc.open_files()
print("\n{} has {} handles\n".format(proc.name(), len(files)))
for a in files: # loop handles and chose only path
print(a[0])
except:
print("Error reading {}".format(proc.name()))
print('gathering processes')
procs = list(psutil.process_iter()) # convert to list since we need to reuse data
print('gathering finished')
print('{} proccesses found in total'.format(len(procs)))
print('Listing proccesses')
list_processes(procs)
print('Done listing proccesses')
print('Getting handles')
get_handles(procs)
|
import graphene
import CookBook.schema
class Query(CookBook.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-24 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CctvWorldInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_id', models.CharField(default=0, max_length=50, verbose_name='新闻ID')),
('url', models.CharField(default='', max_length=500, verbose_name='地址链接')),
('title', models.CharField(default='', max_length=200, verbose_name='标题')),
('summary', models.TextField(default='', verbose_name='简介')),
('label', models.CharField(default='', max_length=50, verbose_name='标签')),
('from_news', models.CharField(default='CCTV', max_length=50, verbose_name='来源')),
('content', models.TextField(default='', verbose_name='内容')),
('release_time', models.CharField(default='0000-00-00 00:00:00', max_length=200)),
('create_date', models.DateTimeField(auto_now=True, verbose_name='创建时间')),
],
),
]
|
# Pass along HARM's own diagnostics for comparison
# TODO implement
#diag = io.load_log(path)
#out_full['t_d'] = diag['t']
#out_full['Mdot_d'] = diag['mdot']
#out_full['Phi_d'] = diag['Phi']
#out_full['Ldot_d'] = diag['ldot']
#out_full['Edot_d'] = diag['edot']
#out_full['Lum_d'] = diag['lum_eht']
#out_full['divbmax_d'] = diag['divbmax']
|
from . import operaciones
from . import tipoOperacion
from . import tipoTipoOperacion
|
import define
from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
from binance_f.model.constant import *
import time
import cancelorders
import balance
def closetrade(symbol , data , position , intrade , balancemoney , file ,signal , symbolintrade):
if signal == "BUY" and position[define.intrade] == 1:
position[define.intrade] = 0
position[define.intrade] = 0
position[define.closeprice] = data[define.price]
position[define.lasttraderesult] = (position[define.closeprice] - position[define.openprice])/position[define.openprice]
file.write("ofline sell close on")
file.write(str(symbol))
file.write('\n')
print("ofline sell close on")
if position[define.sleep] == 0 and intrade == 1:
symbolintrade = -1
print (1)
intrade = 0
cancelorders.cancelorders(symbol , file)
request_client = RequestClient(api_key=define.api_key, secret_key=define.secret_key)
i = 0
while i == 0:
try:
print (2)
result = request_client.post_order(symbol=symbol, side=OrderSide.BUY, ordertype=OrderType.MARKET,positionSide="BOTH", reduceOnly='true',quantity=position[define.quantity])
print (3)
file.write("closetrade answer = ")
file.write(str(result))
file.write("\n")
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
file.write("online sell close on")
file.write(str(symbol))
file.write('\n')
i=0
while i == 0:
try:
balancemoney = balance.balance()
file.write("Balance = ")
file.write(str(balancemoney))
file.write("\n")
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
if signal == "SELL" and position[define.intrade] == 1:
file.write("ofline buy close on")
file.write(str(symbol))
file.write('\n')
print("ofline buy close on")
position[define.intrade] = 0
position[define.intrade] = 0
position[define.closeprice] = data[define.price]
position[define.lasttraderesult] = -1 *(position[define.closeprice] - position[define.openprice])/position[define.openprice]
if position[define.sleep] == 0 and intrade == 1:
symbolintrade = -1
intrade = 0
cancelorders.cancelorders(symbol , file)
request_client = RequestClient(api_key=define.api_key, secret_key=define.secret_key)
print (1)
i = 0
while i == 0:
try:
print(2)
result = request_client.post_order(symbol=symbol, side=OrderSide.SELL, ordertype=OrderType.MARKET, positionSide="BOTH", reduceOnly='true',quantity=position[define.quantity])
print (3)
file.write("closetrade answer = ")
file.write(str(result))
file.write("\n")
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
file.write("online buy close on")
file.write(str(symbol))
file.write('\n')
i=0
while i == 0:
try:
balancemoney = balance.balance()
file.write("Balance = ")
file.write(str(balancemoney))
file.write("\n")
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
return [ position , intrade , balancemoney , symbolintrade]
|
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
config.add_section('main')
config.set('main', 'number_of_ISPs', '4')
config.set('main', 'number_of_SIs', '10')
config.set('main', 'number_of_consumers', '400')
config.set('main', 'number_of_steps', '20')
config.set('main', 'market_share', '2, 4, 3, 1')
config.set('main', 'investment_URLLC', '80')
config.set('main', 'investment_mMTC', '20')
config.set('main', 'URLLC_spectrum_capacity', '10')
config.set('main', 'mMTC_spectrum_capacity', '20')
config.set('main', 'URLLC_infrastructure_cost', '80')
config.set('main', 'mMTC_infrastructure_cost', '20')
config.set('main', 'agent_year_update', '50, 75, 150, 75, 50')
# market_share=[2, 4, 3, 1]
# investment_URLLC=80, investment_mMTC=20, URLLC_spectrum_capacity=10,
# mMTC_spectrum_capacity=20, URLLC_infrastructure_cost=80, mMTC_infrastructure_cost=20
with open('config.ini', 'w') as f:
config.write(f)
|
from datetime import date
from unittest import TestCase
import pytest
from mock import patch
from .helpers import example_file
from popolo_data.importer import Popolo
from approx_dates.models import ApproxDate
EXAMPLE_SINGLE_MEMBERSHIP = b'''
{
"persons": [
{
"id": "SP-937-215",
"name": "Jean-Luc Picard"
}
],
"organizations": [
{
"id": "starfleet",
"name": "Starfleet"
}
],
"memberships": [
{
"person_id": "SP-937-215",
"organization_id": "starfleet",
"role": "student",
"start_date": "2327-12-01"
}
]
}
'''
EXAMPLE_MEMBERSHIP_ALL_FIELDS = b'''
{
"areas": [
{
"id": "dunny-on-the-wold",
"name": "Dunny-on-the-Wold"
}
],
"events": [
{
"classification": "legislative period",
"id": "pitt",
"name": "Parliamentary Period",
"start_date": "1783-12-19",
"end_date": "1801-01-01"
}
],
"persons": [
{
"id": "1234",
"name": "Edmund Blackadder"
}
],
"posts": [
{
"id": "dunny-on-the-wold-seat",
"label": "Member of Parliament for Dunny-on-the-Wold"
}
],
"organizations": [
{
"id": "commons",
"name": "House of Commons"
},
{
"id": "adder",
"name": "Adder Party",
"classification": "party"
}
],
"memberships": [
{
"area_id": "dunny-on-the-wold",
"end_date": "1784-05-23",
"legislative_period_id": "pitt",
"on_behalf_of_id": "adder",
"organization_id": "commons",
"person_id": "1234",
"post_id": "dunny-on-the-wold-seat",
"role": "candidates",
"start_date": "1784-03-01"
}
]
}
'''
EXAMPLE_MEMBERSHIP_ALL_FIELDS_NO_DATES = b'''
{
"areas": [
{
"id": "dunny-on-the-wold",
"name": "Dunny-on-the-Wold"
}
],
"events": [
{
"classification": "legislative period",
"id": "pitt",
"name": "Parliamentary Period",
"start_date": "1783-12-19",
"end_date": "1801-01-01"
}
],
"persons": [
{
"id": "1234",
"name": "Edmund Blackadder"
}
],
"posts": [
{
"id": "dunny-on-the-wold-seat",
"label": "Member of Parliament for Dunny-on-the-Wold"
}
],
"organizations": [
{
"id": "commons",
"name": "House of Commons"
},
{
"id": "adder",
"name": "Adder Party",
"classification": "party"
}
],
"memberships": [
{
"area_id": "dunny-on-the-wold",
"legislative_period_id": "pitt",
"on_behalf_of_id": "adder",
"organization_id": "commons",
"person_id": "1234",
"post_id": "dunny-on-the-wold-seat",
"role": "candidates"
}
]
}
'''
class TestMemberships(TestCase):
def test_empty_file_gives_no_memberships(self):
with example_file(b'{}') as filename:
popolo = Popolo.from_filename(filename)
assert len(popolo.memberships) == 0
def test_membership_should_not_have_name(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.memberships) == 1
m = popolo.memberships[0]
with pytest.raises(AttributeError):
m.name
def test_membership_has_person_id_and_organisation_id(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.memberships) == 1
m = popolo.memberships[0]
assert m.person_id == 'SP-937-215'
assert m.organization_id == 'starfleet'
def test_membership_returns_legislative_period_start_and_end(self):
#return sepcific start date and generic end date
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
print (m.effective_start_date)
assert m.effective_start_date == ApproxDate.from_iso8601('1784-03-01')
assert m.effective_end_date == ApproxDate.from_iso8601('1784-05-23')
#check it returns the start date when we are missing a more specific entry
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS_NO_DATES) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
assert m.effective_start_date == ApproxDate.from_iso8601('1783-12-19')
assert m.effective_end_date == ApproxDate.from_iso8601('1801-01-01')
def test_membership_has_role(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.memberships) == 1
m = popolo.memberships[0]
assert m.role == 'student'
def test_membership_foreign_keys(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.memberships) == 1
m = popolo.memberships[0]
assert m.area_id == 'dunny-on-the-wold'
assert m.on_behalf_of_id == 'adder'
assert m.legislative_period_id == 'pitt'
assert m.post_id == 'dunny-on-the-wold-seat'
def test_get_organization_from_membership(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.memberships) == 1
m = popolo.memberships[0]
assert m.start_date == date(2327, 12, 1)
def test_get_sentinel_end_date_from_membership(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
assert m.end_date.future
def test_organization_repr(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.memberships) == 1
m = popolo.memberships[0]
assert repr(m) == "<Membership: 'SP-937-215' at 'starfleet'>"
def test_equality_of_memberships(self):
with example_file(EXAMPLE_SINGLE_MEMBERSHIP) as fname:
# Create the same membership via two Popolo objects - they
# should still be equal.
popolo_a = Popolo.from_filename(fname)
assert len(popolo_a.memberships) == 1
m_a = popolo_a.memberships[0]
popolo_b = Popolo.from_filename(fname)
assert len(popolo_b.memberships) == 1
m_b = popolo_b.memberships[0]
assert m_a == m_b
assert not (m_a != m_b)
EXAMPLE_MULTIPLE_MEMBERSHIPS = b'''
{
"persons": [
{
"id": "SP-937-215",
"name": "Jean-Luc Picard"
},
{
"id": "SC-231-427",
"name": "William Riker"
}
],
"organizations": [
{
"id": "starfleet",
"name": "Starfleet"
},
{
"id": "gardening-club",
"name": "Boothby's Gardening Club"
}
],
"memberships": [
{
"person_id": "SP-937-215",
"organization_id": "starfleet",
"start_date": "2322",
"end_date": "2322"
},
{
"person_id": "SP-937-215",
"organization_id": "starfleet",
"start_date": "2323-12-01",
"end_date": "2327-12-01"
},
{
"person_id": "SP-937-215",
"organization_id": "gardening-club",
"start_date": "2323-01-01",
"end_date": "2327-11-31"
},
{
"person_id": "SC-231-427",
"organization_id": "starfleet",
"start_date": "2357-03-08"
}
]
}
'''
class TestPersonMemberships(TestCase):
def test_person_memberships_method(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo = Popolo.from_filename(fname)
person = popolo.persons.first
person_memberships = person.memberships
assert len(person_memberships) == 3
assert popolo.memberships[0] == person_memberships[0]
assert popolo.memberships[1] == person_memberships[1]
assert popolo.memberships[2] == person_memberships[2]
def test_membership_person_method(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo = Popolo.from_filename(fname)
person_picard = popolo.persons[0]
m = popolo.memberships[0]
assert m.person == person_picard
def test_membership_organization_method(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo = Popolo.from_filename(fname)
org_starfleet = popolo.organizations.first
m = popolo.memberships[0]
assert m.organization == org_starfleet
def test_membership_on_behalf_of_method(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
org_adder_party = popolo.organizations[1]
m = popolo.memberships[0]
assert m.on_behalf_of == org_adder_party
def test_membership_area_method(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
area_dunny = popolo.areas[0]
m = popolo.memberships[0]
assert m.area == area_dunny
def test_membership_post_method(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
post_dunny = popolo.posts[0]
m = popolo.memberships[0]
assert m.post == post_dunny
def test_membership_legislative_period(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
event_pitt = popolo.events[0]
m = popolo.memberships[0]
assert m.legislative_period == event_pitt
def test_membership_current_at_true(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
assert m.current_at(date(1784, 4, 30))
def test_membership_current_at_false_before(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
assert not m.current_at(date(1600, 1, 1))
def test_membership_current_at_false_after(self):
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
assert not m.current_at(date(1800, 1, 1))
@patch('popolo_data.base.date')
def test_membership_current_true(self, mock_date):
mock_date.today.return_value = date(1784, 4, 30)
mock_date.side_effect = lambda *args, **kw: date(*args, **kw)
with example_file(EXAMPLE_MEMBERSHIP_ALL_FIELDS) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships[0]
assert m.current
def test_hash_magic_method(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo_a = Popolo.from_filename(fname)
popolo_b = Popolo.from_filename(fname)
set_of_memberships = set()
for m in popolo_a.memberships:
set_of_memberships.add(m)
for m in popolo_b.memberships:
set_of_memberships.add(m)
assert len(set_of_memberships) == 4
def test_equality_and_inequality_not_implemented(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo = Popolo.from_filename(fname)
m = popolo.memberships.first
assert not (m == "a string, not a person")
assert (m != "a string not a person")
def test_person_membership_filtering(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo = Popolo.from_filename(fname)
person = popolo.persons.first
person_memberships = person.memberships
starfleet_memberships = \
person_memberships.filter(organization_id="starfleet")
assert len(starfleet_memberships) == 2
def test_person_membership_multiple_filtering(self):
with example_file(EXAMPLE_MULTIPLE_MEMBERSHIPS) as fname:
popolo = Popolo.from_filename(fname)
person = popolo.persons.first
person_memberships = person.memberships
starfleet_memberships = \
person_memberships.filter(organization_id="starfleet")
latest_starfleet_membership = \
starfleet_memberships.filter(start_date=date(2323, 12, 1))
assert len(latest_starfleet_membership) == 1
|
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import re
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = "secret"
mysql = MySQLConnector(app,'emails_assignment')
def success():
session['color_test'] = True
flash("SUCCESS! " + request.form['email'] + " IS VALID, AND WAS ADDED TO OUR DATABASE.")
query = "SELECT * FROM emails"
emails = mysql.query_db(query)
return emails
def d_success():
session['color_test'] = True
flash("SUCCESS! " + request.form['d_email'] + " IS VALID, AND WAS DELETED FROM OUR DATABASE.")
query = "SELECT * FROM emails"
emails = mysql.query_db(query)
return emails
@app.route('/')
def index():
session['color_test'] = False
session['test'] = False
return render_template('index.html')
@app.route('/process', methods=['POST'])
def process():
if len(request.form['email']) < 1:
flash("Email must not be blank!")
return redirect('/')
elif not email_regex.match(request.form['email']):
flash("Invalid email address!")
return redirect('/')
else:
session['test'] = True
query = "INSERT INTO emails (email, created_at) VALUES (:email, NOW())"
data = {
'email': request.form['email']
}
mysql.query_db(query, data)
emails = success()
return render_template("index.html", all_emails=emails)
@app.route('/delete', methods=['POST'])
def delete():
if len(request.form['d_email']) < 1:
flash("Email must not be blank!")
return redirect('/')
elif not email_regex.match(request.form['d_email']):
flash("Invalid email address!")
return redirect('/')
else:
session['test'] = True
query = "DELETE FROM emails WHERE email = :email"
data = {
'email': request.form['d_email']
}
mysql.query_db(query, data)
emails = d_success()
return render_template("index.html", all_emails=emails)
app.run(debug=True)
|
from django.urls import path, include
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'jogo', views.JogoViewSet, 'jogo')
router.register(r'simular', views.ExecutarJogoViewSet, 'simular')
urlpatterns = [
path('', include(router.urls))
]
|
from .base_processing_node import BaseProcessingNode, ProcessingArtifact
class DerivedPreviewProcessingNode(BaseProcessingNode):
def __init__(self, available_artifacts, outputs):
super(DerivedPreviewProcessingNode, self).__init__(available_artifacts, outputs)
self.fmt = 'json'
def get_artifacts(self):
for artifact in self.available_artifacts:
if artifact.datahub_type == 'derived/csv':
datahub_type = 'derived/preview'
resource_name = artifact.resource_name + '_preview'
output = ProcessingArtifact(
datahub_type, resource_name,
[artifact], [],
[('assembler.update_resource',
{
'name': artifact.resource_name,
'update': {
'name': resource_name,
'format': self.fmt,
'path': 'data/{}.{}'.format(resource_name, self.fmt),
'datahub': {
'type': "derived/preview",
'derivedFrom': [
artifact.resource_name.replace('_csv', '')
]
},
"forView": [
'datahub-preview-{}'.format(resource_name)
]
}
}),
('assembler.load_preview', {'limit': 2000}),
('assembler.load_views', {'limit': 2000})],
True, 'Generating views', 2000, content_type='application/json'
)
yield output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.