hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65a034d206cac79a6773ec4d4862bb22de93076b | 13,918 | py | Python | servertools/video.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | 1 | 2021-03-16T19:57:49.000Z | 2021-03-16T19:57:49.000Z | servertools/video.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | 3 | 2021-06-08T21:37:55.000Z | 2021-06-13T01:24:59.000Z | servertools/video.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | null | null | null | import re
import os
import tempfile
from datetime import datetime as dt
from typing import (
Optional,
List,
Tuple
)
import numpy as np
import cv2
import imutils
from moviepy.editor import (
VideoFileClip,
concatenate_videoclips,
ImageSequenceClip,
CompositeAudioClip,
VideoClip
)
class VidTools:
"""Class for general video editing"""
temp_dir = tempfile.gettempdir()
FPS = 20
RESIZE_PCT = 0.5
SPEEDX = 6
def __init__(self, vid_w: float = 640, vid_h: float = 360, fps: float = FPS, resize_perc: float = RESIZE_PCT,
speed_x: float = SPEEDX):
self.fps = fps
self.resize_perc = resize_perc
self.speed_x = speed_x
self.vid_w = vid_w
self.vid_h = vid_h
@staticmethod
def _get_trim_range_from_filename(fpath: str, start: dt, end: dt) -> Tuple[int, int]:
"""Looks at the filename, returns a start and end time to trim the clip with
based on the required start and end dates
"""
# 1. get seconds from clip start to motion start
# 2. get seconds from clip end to motion end
# 3. add as subclip((secs_from_start: float), (secs_from_end: float))
clip_ymd = re.search(r'\d{4}-\d{2}-\d{2}', fpath).group()
clip_st, clip_end = [dt.strptime(f'{clip_ymd} {x[0]}', '%Y-%m-%d %H:%M:%S')
for x in re.findall(r'((\d+:){2}\d{2})', fpath)]
# Determine if we need to crop the clip at all
secs_from_start = (start - clip_st).seconds if start > clip_st else 0
secs_from_end = -1 * (clip_end - end).seconds if clip_end > end else None
return secs_from_start, secs_from_end
def make_clip_from_filenames(self, start_dt: dt, end_dt: dt, file_list: List[str],
trim_files: bool = True, prefix: str = 'motion') -> str:
"""Takes in a list of file paths, determines the cropping necessary
based on the timerange in the path and downloads the video clip to a temp filepath"""
clips = []
for dl_file in file_list:
clip = VideoFileClip(dl_file)
if trim_files:
trim_st, trim_end = self._get_trim_range_from_filename(dl_file, start_dt, end_dt)
clip = clip.subclip(trim_st, trim_end)
clip = (clip.resize(self.resize_perc).speedx(self.speed_x))
# Append to our clips
clips.append(clip)
final = concatenate_videoclips(clips, method='compose')
fpath = os.path.join(self.temp_dir, f'{prefix}_{start_dt:%T}_to_{end_dt:%T}.mp4')
final.write_videofile(fpath)
return fpath
def concat_files(self, filepath_list: List[str]) -> str:
"""Concatenates a list of mp4 filepaths into one & saves it"""
clips = []
for filepath in filepath_list:
clip = VideoFileClip(filepath)
clips.append(clip)
final = concatenate_videoclips(clips, method='compose')
final_fpath = os.path.join(self.temp_dir, 'motion_concatenated_file.mp4')
final.write_videofile(final_fpath)
return final_fpath
def draw_on_motion(self, fpath: str, frames: List[np.ndarray] = None, min_area: int = 500,
min_frames: int = 10, threshold: int = 25, ref_frame_turnover: float = 20,
buffer_s: float = 1, motion_frames_only: bool = True) -> \
Tuple[bool, Optional[str], Optional[float]]:
"""Draws rectangles around motion items and re-saves the file
If True is returned, the file has some motion highlighted in it, otherwise it doesn't have any
Args:
fpath: the path to the mp4 file. can be None if frames is not None
frames: a list of frames to process instead of reading in from file.
min_area: the minimum contour area (pixels)
min_frames: the threshold of frames the final file must have. Fewer than this will return False
threshold: min threshold (out of 255). used when calculating img differences
ref_frame_turnover: the number of consecutive frames to use a single reference frame on
before resetting the reference
buffer_s: the seconds of buffer to include in video output before and after motion events
motion_frames_only: if True, will keep only frames with detectable motion on them
Returns:
tuple(
- bool, whether motion was detected in any of the frames
- filepath of the changed file (if any)
- the duration of the file
)
NB! threshold probably shouldn't exceed 254
"""
if frames is None:
clip = VideoFileClip(fpath)
frames = [x for x in clip.iter_frames()]
# Set the reference frame
ref_frame = clip.get_frame(0)
elif frames is not None:
clip = ImageSequenceClip(frames, fps=self.fps)
ref_frame = frames[0]
else:
raise ValueError('Arguments \'fpath\' and \'frames\' were both None. '
'One of these must not be empty in order for the script to function.')
keep_frames = [] # For determining which frames have motion
for i, frame in enumerate(frames):
rects, contours, drawn_frame = self._detect_contours(
ref_frame, frame, min_area, threshold, unique_only=False, color_correct_frame=True)
if motion_frames_only:
if rects > 0:
# We've drawn some rectangles on this
keep_frames.append(i)
else:
# Keeping all frames
keep_frames.append(i)
# Replace frame with drawn
frames[i] = drawn_frame
if i % ref_frame_turnover == 0:
print(f'Frame {i} reached.')
if i > 0:
# Reset the reference frame
ref_frame = frame
if len(keep_frames) == 0:
# Exit method... Nothing was determined to keep
return False, None, None
# Now loop through the frames we've marked and process them into clips
# Determine the amount of buffer frames from the seconds of buffer
buffer_frame = int(round(clip.fps * buffer_s, 0))
# Begin calculating the sequences
sequences = [] # For holding lists of sequences
sequence_frames = [keep_frames[0]]
for f in keep_frames[1:]:
last_seq = sequence_frames[-1]
if f == last_seq + 1:
sequence_frames.append(f)
elif f < last_seq + buffer_frame:
# Though our sequence isn't consecutive, it falls inside of the buffer. Add it
sequence_frames.append(f)
else:
# Frame is definitely outside the buffer. Make a new sequence
sequences.append((sequence_frames[0], sequence_frames[-1]))
sequence_frames = [f]
if len(sequence_frames) > 0:
sequences.append((sequence_frames[0], sequence_frames[-1]))
processed_clips = []
for start, end in sequences:
if end - start >= min_frames:
processed_clips.append(
self.develop_drawn_clip(org_clip=clip, sq_frames=[start, end], all_frames=frames,
buffer_s=buffer_s)
)
if len(processed_clips) > 0:
final_clip = concatenate_videoclips(processed_clips)
final_clip.write_videofile(fpath)
return True, fpath, final_clip.duration
return False, None, None
@staticmethod
def develop_drawn_clip(org_clip: VideoFileClip, sq_frames: List[float], all_frames: List[np.ndarray],
buffer_s: float = 1) -> VideoClip:
"""Calculates subclip start and end time, creates a subclip to reference.
Combines the drawn frames (with buffer) before transforming into a video clip
Adds original clip's audio to the video containing drawn frames.
Args:
org_clip: the original clip to leverage audio, duration data from
sq_frames: the sequence of frames that have motion annotations draw in them
all_frames: the full list of frames that we'll be slicing
buffer_s: the seconds of buffer to add before and after the motion area
"""
duration = org_clip.duration
tot_frames = len(all_frames)
# Calculate number of frames to buffer before and after motion areas
buffer_fr = int(org_clip.fps * buffer_s)
# Calculate the start and end frames with the buffers
st_with_buffer = sq_frames[0]
end_with_buffer = sq_frames[-1] + buffer_fr
start_frame_pos = st_with_buffer if st_with_buffer > 0 else 0
end_frame_pos = end_with_buffer if end_with_buffer < tot_frames else tot_frames - 1
# Calculate the start and end times for the start and end frames
start_t = ((start_frame_pos / tot_frames) * duration)
end_t = ((end_frame_pos / tot_frames) * duration)
# Cut the original clip to fit the buffer
cut_clip = org_clip.subclip(start_t, end_t)
# Generate the sequence of drawn clips
drawn_clip = ImageSequenceClip(all_frames[start_frame_pos:end_frame_pos], fps=org_clip.fps)
if drawn_clip.duration != cut_clip.duration:
# Cut the tail off the drawn clip to match the cut_clip.
drawn_clip = drawn_clip.subclip(0, end_t - start_t)
# Make the drawn clip a VideoClip by concatenating it with only itself. Add original clip's audio
drawn_clip = concatenate_videoclips([drawn_clip])
drawn_clip.audio = cut_clip.audio
return drawn_clip
def write_frames(self, frames: List[np.ndarray], filepath: str, audio: CompositeAudioClip = None) -> str:
"""Writes the frames to a given .mp4 filepath (h264 codec)"""
vclip = ImageSequenceClip(frames, fps=self.fps)
if audio is not None:
audio.set_duration(vclip.duration)
vclip.audio = audio
vclip.write_videofile(filepath, codec='libx264', fps=self.fps)
return filepath
@staticmethod
def _grayscale_frame(frame: np.ndarray, blur_lvl: int = 21) -> np.ndarray:
"""Converts a frame to grayscale"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (blur_lvl, blur_lvl), 0)
return gray
def _detect_contours(self, reference_frame: np.ndarray, cur_frame: np.ndarray,
min_area: int = 500, threshold: int = 25, contour_lim: int = 10,
prev_contours: List[np.ndarray] = None, unique_only: bool = False,
color_correct_frame: bool = False) -> Tuple[int, List[np.ndarray], np.ndarray]:
"""Methodology used to detect contours in image differences
Args:
reference_frame: the frame to use as base comparison
cur_frame: the frame to compare for changes
min_area: the minimum (pixel?) area of changes to be flagged as a significant change
threshold: seems like the gradient of the change (in grayscale?) to identify changes?
contour_lim: integer-wise means of detecting changes in contours (larger => more different)
prev_contours: List of previous contours (used for detecting unique contours
unique_only: if True, will perform unique contour analysis
color_correct_frame: if True, will try to apply a color correction to the frame before return
"""
# Compute absolute difference between current frame and first frame
ref_gray = self._grayscale_frame(reference_frame)
gray = self._grayscale_frame(cur_frame)
fdelta = cv2.absdiff(ref_gray, gray)
thresh = cv2.threshold(fdelta, threshold, 255, cv2.THRESH_BINARY)[1]
# Dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# Capture unique contours
unique_cnts = prev_contours.copy() if prev_contours is not None else []
# Loop over contours
rects = 0
for cnt in cnts:
# Ignore contour if it's too small
if cv2.contourArea(cnt) < min_area:
continue
if unique_only:
# Check for unique contours
if any([cv2.matchShapes(cnt, ucnt, 1, 0.0) > contour_lim for ucnt in unique_cnts]):
# Unique contour - add to group
# Otherwise compute the bounding box for the contour & draw it on the frame
self._draw_rectangle_over_contour(cnt, cur_frame, line_width=2, rgb=(0, 255, 0))
unique_cnts.append(cnt)
rects += 1
else:
# Just pick up any contours
self._draw_rectangle_over_contour(cnt, cur_frame, line_width=2, rgb=(0, 255, 0))
rects += 1
if color_correct_frame:
cur_frame = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2RGB)
return rects, unique_cnts, cur_frame
@staticmethod
def _draw_rectangle_over_contour(contour, frame: np.ndarray, line_width: float = 2,
rgb: Tuple[float, float, float] = (0, 255, 0)):
"""Draws a rectangle over the given contour's dimensions"""
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h), rgb, thickness=line_width)
| 47.993103 | 113 | 0.617402 |
988b6efacb690944496d7d44faec4d8583471ddf | 3,093 | py | Python | training/utils/create_gif.py | Bartzi/handwriting-determination | e8b1d84cd854f8cfd6af59fd46b120df910322d0 | [
"MIT"
] | 1 | 2021-06-14T05:49:21.000Z | 2021-06-14T05:49:21.000Z | training/utils/create_gif.py | Bartzi/handwriting-determination | e8b1d84cd854f8cfd6af59fd46b120df910322d0 | [
"MIT"
] | null | null | null | training/utils/create_gif.py | Bartzi/handwriting-determination | e8b1d84cd854f8cfd6af59fd46b120df910322d0 | [
"MIT"
] | null | null | null | import argparse
import os
import re
from collections import namedtuple
from PIL import Image
from PIL import ImageChops
from PIL.GifImagePlugin import getheader, getdata
SUPPORTED_IMAGETYPES = [".png", ".jpg", ".jpeg"]
ImageData = namedtuple("ImageData", ["file_name", "image"])
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
def create_loop_header(loops=0):
if loops == 0 or loops == float('inf'):
loops = 2 ** 16 - 1
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return [bb.encode('utf-8')]
def makedelta(fp, sequence):
"""Convert list of image frames to a GIF animation file"""
frames = 0
previous = None
for im in sequence:
# To specify duration, add the time in milliseconds to getdata(),
# e.g. getdata(im, duration=1000)
if not previous:
# global header
loops = 2 ** 16 - 1
for s in getheader(im, info={"loop": loops})[0] + getdata(im, duration=10, loop=2 ** 16 - 1):
fp.write(s)
else:
# delta frame
delta = ImageChops.subtract_modulo(im, previous)
bbox = delta.getbbox()
if bbox:
# compress difference
for s in getdata(im.crop(bbox), offset=bbox[:2], duration=10):
fp.write(s)
else:
# FIXME: what should we do in this case?
pass
previous = im.copy()
frames += 1
fp.write(b";")
return frames
def make_gif(image_dir, dest_file, pattern="(\d+)"):
sort_pattern = re.compile(pattern)
image_files = filter(lambda x: os.path.splitext(x)[-1] in SUPPORTED_IMAGETYPES, os.listdir(image_dir))
images = []
try:
print("loading images")
for file_name in image_files:
path = os.path.join(image_dir, file_name)
images.append(ImageData._make((file_name, Image.open(path).convert('P'))))
print("sorting images")
images_sorted = sorted(images, key=lambda x: int(re.search(sort_pattern, x.file_name).group(1)))
print("writing gif")
with open(dest_file, "wb") as out_file:
makedelta(out_file, [image.image for image in images_sorted])
finally:
for image in images:
image.image.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tool that creates a gif out of a number of given input images')
parser.add_argument("image_dir", help="path to directory that contains all images that shall be converted to a gif")
parser.add_argument("dest_file", help="path to destination gif file")
parser.add_argument("--pattern", default="(\d+)", help="naming pattern to extract the ordering of the images")
args = parser.parse_args()
make_gif(args.image_dir, args.dest_file, args.pattern) | 27.371681 | 120 | 0.606531 |
97c95be85ca60f572782532cc153d184be157af7 | 932 | py | Python | homeassistant/components/screenlogic/switch.py | learn-home-automation/core | c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7 | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/screenlogic/switch.py | learn-home-automation/core | c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7 | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/screenlogic/switch.py | learn-home-automation/core | c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7 | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support for a ScreenLogic 'circuit' switch."""
import logging
from screenlogicpy.const import DATA as SL_DATA, GENERIC_CIRCUIT_NAMES
from homeassistant.components.switch import SwitchEntity
from . import ScreenLogicCircuitEntity
from .const import DOMAIN, LIGHT_CIRCUIT_FUNCTIONS
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
ScreenLogicSwitch(
coordinator, circuit_num, circuit["name"] not in GENERIC_CIRCUIT_NAMES
)
for circuit_num, circuit in coordinator.data[SL_DATA.KEY_CIRCUITS].items()
if circuit["function"] not in LIGHT_CIRCUIT_FUNCTIONS
]
)
class ScreenLogicSwitch(ScreenLogicCircuitEntity, SwitchEntity):
"""Class to represent a ScreenLogic Switch."""
| 31.066667 | 86 | 0.726395 |
22ca3873306790bbf17e6fb2c9d9c09d7eaf91bc | 12,208 | py | Python | ChordDFS/ReadLog.py | alexscarlatos/ChordDFS | 78465e254647e6c7311067ef83d7d18f902aa4e0 | [
"MIT"
] | null | null | null | ChordDFS/ReadLog.py | alexscarlatos/ChordDFS | 78465e254647e6c7311067ef83d7d18f902aa4e0 | [
"MIT"
] | null | null | null | ChordDFS/ReadLog.py | alexscarlatos/ChordDFS | 78465e254647e6c7311067ef83d7d18f902aa4e0 | [
"MIT"
] | null | null | null | import os
import json
import sys
import re
from ChordMessage import ChordMessage as c_msg
from datetime import datetime
class MyLogger():
def __init__(self, ip, chord_id, log_file_path, client=False):
self.ip = ip
self.chord_id = chord_id
self.log_file_path = log_file_path
self.client = client
# Print that will show up in mininet output and get added to log file
def mnPrint(self, msg, debug=True):
# log only certain message types
try:
# filter msg types
messages = [c_msg.FIND_SUCCESSOR,c_msg.RETURN_SUCCESSOR,c_msg.GET_PREDECESSOR,c_msg.RETURN_PREDECESSOR,c_msg.NOTIFY_PREDECESSOR,c_msg.CHECK_ALIVE,c_msg.AM_ALIVE,c_msg.SOMEONE_DIED,\
c_msg.LEAVING]
for msg_type in messages:
if msg.find(msg_type) > 0:
return
except:
pass
if self.client:
# Format msg
msg = "<{0}_c>: {1}".format(self.ip, msg)
else:
# Format msg
msg = "<{0}, {1}>: {2}".format(self.ip, self.chord_id, msg)
# Print msg to stdout
if debug:
print(msg)
sys.stdout.flush() # need to flush output, else never show up
# Write msg to log file
with open(self.log_file_path, "a") as logFile:
logFile.write("{0} {1}\n".format(str(datetime.now()).replace(" ", "_"), msg))
def pretty_msg(self, msg):
'''Only print key,value pairs where value is not None'''
pretty = "{"
for key, value in msg.items():
if value is not None:
pretty += "{0}:{1},".format(key,value)
pretty = pretty[:-1] + "}"
return pretty
# functions for main application
def help():
help_str = '''Chord Log Application v1.0
ring print chord ring
stabilize get stabilization time
start application start time
end application end time
servers number of server nodes
clients number of client nodes
exit exit application
help print help screen
'''
def ring():
global log_str
chord_ring = ""
# find chord ids
ring_re = re.compile(r"chord_id is [0-9]+")
# get chord ids
num_re = re.compile(r'[0-9]+')
# sort ids
nodes = sorted(list(map(int,num_re.findall("".join(ring_re.findall(log_str))))))
for node in nodes:
chord_ring += "{0}->".format(node)
chord_ring += str(nodes[0])
return chord_ring
def start():
global sorted_entries
return sorted_entries[0]["time"]
def end():
global sorted_entries
return sorted_entries[-1]["time"]
def report():
global log_str
# report of log summaries etc
inserts_str = inserts()
gets_str = gets()
keys_tup = keys()
key_summary_str = key_summary(keys_tup[1])
report_str = \
'''
Start: {0}\n\
End: {1}\n\
# Servers: {2}\n\
# Clients: {3}\n\
Ring: {4}\n\
Stabilization Time: {5}\n\
Inserts Sent: {6}\n\
Inserts Rcvd: {7}\n\
Inserts Avg Hops: {8}\n\
Insert Loss Rate: {9}\n\
Gets Sent: {10}\n\
Gets Rcvd: {11}\n\
Gets Avg Hops: {12}\n\
Gets Loss Rate: {13}
Keys Dist:\n{14}\n\
Key Summary: {15}\n\
'''.format(start(),end(),servers(),clients(),ring(),stabilize(),\
inserts_str[0],inserts_str[1],inserts_str[2],inserts_str[3],\
gets_str[0],gets_str[1],gets_str[2],gets_str[3],\
keys_tup[0],key_summary_str)
return report_str
def stabilize():
global log_str
# example 2018-05-09_10:40:34.210800 <172.1.1.3, 11>: Successor updated by stabilize: key: 172.1.1.4, chord id: 47
stabilize_re = re.compile(r"[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6} <[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+, [0-9]+>: Successor updated by stabilize")
times_stab = stabilize_re.findall(log_str)
time_re = re.compile(r"[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6}")
times = time_re.findall("".join(times_stab))
start = datetime.strptime(times[0],"%H:%M:%S.%f")
end = datetime.strptime(times[-1],"%H:%M:%S.%f")
total = end - start
final = "{0} sec".format(total.total_seconds())
return final
def servers():
global log_str
# find chord ids
ring_re = re.compile(r"chord_id is [0-9]+")
nodes = ring_re.findall(log_str)
return len(nodes)
def clients():
global log_str
# find clients
# ex: I'm a chord client, my IP is 172.1.1.2
ring_re = re.compile(r"chord client, my IP is [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
ip_re = re.compile(r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
nodes = ip_re.findall("".join(ring_re.findall(log_str)))
return len(nodes)
def inserts():
global log_str, num_replicates
# example <172.1.1.2_c>: msg type:INSERT sent to 172.1.1.1:
'''2018-05-09_10:40:36.868994 <172.1.1.2_c>: msg type:INSERT rcvd from 172.1.1.1: msg:{client_ip:172.1.1.2,target:172.1.1.1,msg_type:INSERT,hops:7,filename:temp.txt,content:testingggg,suc_ip:172.1.1.1,key:5}'''
insert_sent_re = re.compile(r"<[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+_c>: msg type:INSERT sent to 172.1.1.1")
insert_sent = insert_sent_re.findall(log_str)
num_inserts_sent = len(insert_sent)
insert_re = re.compile(r"<[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+_c>: msg type:INSERT rcvd .*hops:[0-9]+")
inserts_arr = insert_re.findall(log_str)
num_inserts = int(len(inserts_arr)/num_replicates)
hops_re = re.compile(r"hops:[0-9]+")
hops = hops_re.findall("".join(inserts_arr))
num_re = re.compile(r'[0-9]+')
hops_nums = list(map(int,num_re.findall("".join(hops))))
if len(hops_nums) == 0:
avg_hops = 0
else:
avg_hops = sum(hops_nums)/len(hops_nums)
loss_rate = 0
if num_inserts_sent != 0:
loss_rate = 1 - (num_inserts/num_inserts_sent)
inserts_str = (num_inserts_sent,num_inserts,avg_hops,loss_rate)
return inserts_str
def gets():
global log_str
'''2018-05-09_10:41:42.752116 <172.1.1.2_c>: msg type:SEND_FILE rcvd from 172.1.1.1: msg:{client_ip:172.1.1.2,target:172.1.1.1,msg_type:SEND_FILE,hops:7,filename:temp.txt,content:testingggg,suc_ip:172.1.1.1,key:5}'''
get_sent_re = re.compile(r"<[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+_c>: msg type:SEND_FILE sent")
get_re = re.compile(r"<[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+_c>: msg type:SEND_FILE rcvd .*hops:[0-9]+")
get_sent = get_sent_re.findall(log_str)
num_get_sent = len(get_sent)
gets_arr = get_re.findall(log_str)
num_gets = int(len(gets_arr)/num_replicates)
hops_re = re.compile(r"hops:[0-9]+")
hops = hops_re.findall("".join(gets_arr))
num_re = re.compile(r'[0-9]+')
hops_nums = list(map(int,num_re.findall("".join(hops))))
if len(hops_nums) == 0:
avg_hops = 0
else:
avg_hops = sum(hops_nums)/len(hops_nums)
loss_rate = 0
if num_get_sent != 0:
loss_rate = 1 - num_gets/num_get_sent
gets_str = (num_get_sent,num_gets,avg_hops,loss_rate)
return gets_str
def keys():
'''
2018-05-09_15:35:05.715598 <172.1.1.3, 11>: entries: {}
2018-05-09_15:35:05.717302 <172.1.1.5, 35>: entries: {text.text:[33],newfile:[34]}
'''
entries_re = re.compile(r"[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6} <[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+, [0-9]+>: entries: {.*}")
entries = entries_re.findall(log_str)
dictionary_re = re.compile(r"{.*}")
key_map = {}
if len(entries) == 0:
return "None", {}
for entry in entries:
ipid_re = re.compile(r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+, [0-9]+")
time_re = re.compile(r"[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6}")
ip = ipid_re.findall(entry)[0]
ts = datetime.strptime(time_re.findall(entry)[0],"%H:%M:%S.%f")
n_entries = dictionary_re.findall(entry)
# only use most up to date info
if ip in key_map:
if key_map[ip]["timestamp"] < ts:
key_map[ip]["timestamp"] = ts
key_map[ip]["entries"] = n_entries
else:
key_map[ip] = {"timestamp":ts,"entries":n_entries}
return print_key_map(key_map), key_map
def print_key_map(key_map):
key_map_str = "\t"
for key in key_map.keys():
key_map_str += "{0}:{1}\n\t".format(key,print_list(key_map[key]["entries"]))
return key_map_str
def print_list(some_list):
list_str = ""
for value in some_list:
list_str += value + " "
return list_str
def key_summary(key_map):
file_set = set()
key_map_str = ""
for key in key_map.keys():
entries = key_map[key]["entries"][0].replace("}","").replace("{","").split(";")
empty = 0
for entry in entries:
filename = entry.split(":")[0]
if filename != "":
file_set.add(filename)
else:
empty += 1
key_map[key]["num_entries"] = len(entries) - empty
for key in key_map.keys():
key_map_str += "{0}-> # keys:{1}\n\t".format(key,key_map[key]["num_entries"])
key_map_str_head = "\n\tTotal Files: {0}\n\t".format(len(file_set))
return key_map_str_head + key_map_str
if __name__ == "__main__":
# Get every file in logs folder
logFileNames = []
for root, dirs, files in os.walk("nodes", topdown=False):
for f in files:
if f.endswith(".log"):
logFileNames.append(os.path.join(root, f))
# Get all entries from log files
entries = []
for logFileName in logFileNames:
logFile = open(logFileName)
for line in logFile:
timestamp = line.strip().split(" ", 1)[0]
# skip non timestamps
try:
timestamp = datetime.strptime(timestamp, "%Y-%m-%d_%H:%M:%S.%f")
except:
continue
entry = dict()
entry['time'] = timestamp
entry['log'] = line
entries.append(entry)
logFile.close()
log_str = ""
sorted_entries = sorted(entries, key=lambda e: e['time'])
# Print all entries in order
update = 0
iter = 0
for entry in sorted_entries:
update += 1
if update == 1000:
iter += 1
print("iter: {0}x1000".format(iter))
update = 0
log_str += entry['log']
# same compiled logs into 1
try:
with open("master.log", "w") as f_out:
f_out.write(log_str)
except:
pass
input_str = ""
try:
# Open config file
configFile = open("chordDFS.config")
config = json.loads(configFile.read())
configFile.close()
# Load parameters from config file
num_replicates = config['num_replicates']
except IOError as e:
print(e)
# just run the report
if len(sys.argv) > 1:
print(report())
sys.stdout.flush()
sys.exit(1)
while True:
input_str = input("Enter a command: ")
sys.stdout.flush()
if input_str == "help":
help()
if input_str == "exit":
break
if input_str == "ring":
print(ring())
if input_str == "start":
print(start())
if input_str == "end":
print(end())
if input_str == "stabilize":
print(stabilize())
if input_str == "servers":
print(servers())
if input_str == "clients":
print(clients())
if input_str == "report":
print(report())
if input_str == "inserts":
inserts_str = inserts()
print("Inserts Sent: {0}\nInserts Rcvd: {1}\nAvg Hops: {2}".format(inserts_str[0],inserts_str[1],inserts_str[2]))
if input_str == "gets":
gets_str = gets()
print("Gets: {0}\nAvg Hops: {1}".format(gets_str[0],gets_str[1]))
if input_str == "keys":
print(keys()[0])
sys.stdout.flush()
| 35.591837 | 224 | 0.559879 |
2571a6a74d0adfc879bf9fabdfa4ad5fa8e91ccd | 66,321 | py | Python | apps/devhub/views.py | acidburn0zzz/olympia | 7f766a5fdff255b827333d4fb01aa77546ed8c70 | [
"BSD-3-Clause"
] | 1 | 2015-12-01T03:53:51.000Z | 2015-12-01T03:53:51.000Z | apps/devhub/views.py | Acidburn0zzz/olympia | 7f766a5fdff255b827333d4fb01aa77546ed8c70 | [
"BSD-3-Clause"
] | 5 | 2021-02-02T23:09:35.000Z | 2021-09-08T02:47:20.000Z | apps/devhub/views.py | Acidburn0zzz/olympia | 7f766a5fdff255b827333d4fb01aa77546ed8c70 | [
"BSD-3-Clause"
] | null | null | null | import collections
import functools
import json
import os
import time
import uuid
from django import forms as django_forms
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.template import Context, loader
from django.utils.http import urlquote
from django.utils.timezone import now
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
import commonware.log
import waffle
from PIL import Image
from tower import ugettext as _
from tower import ugettext_lazy as _lazy
from waffle.decorators import waffle_switch
import amo
import amo.utils
from access import acl
from addons import forms as addon_forms
from addons.decorators import addon_view
from addons.models import Addon, AddonUser
from addons.tasks import unindex_addons
from addons.views import BaseFilter
from amo import messages
from amo.decorators import json_view, login_required, post_required
from amo.helpers import absolutify, urlparams
from amo.urlresolvers import reverse
from amo.utils import escape_all, MenuItem, send_mail_jinja
from api.models import APIKey
from applications.models import AppVersion
from devhub import perf
from devhub.decorators import dev_required
from devhub.forms import CheckCompatibilityForm
from devhub.models import ActivityLog, BlogPost, RssKey, SubmitStep
from devhub.utils import (ValidationAnnotator, ValidationComparator,
process_validation)
from editors.decorators import addons_reviewer_required
from editors.helpers import get_position, ReviewHelper
from files.models import File, FileUpload, FileValidation, ValidationAnnotation
from files.utils import is_beta, parse_addon
from lib.crypto.packaged import sign_file
from search.views import BaseAjaxSearch
from translations.models import delete_translation
from users.models import UserProfile
from versions.models import Version
from zadmin.models import ValidationResult
from . import forms, tasks, feeds, signals
log = commonware.log.getLogger('z.devhub')
paypal_log = commonware.log.getLogger('z.paypal')
# We use a session cookie to make sure people see the dev agreement.
MDN_BASE = 'https://developer.mozilla.org/Add-ons'
class AddonFilter(BaseFilter):
opts = (('name', _lazy(u'Name')),
('updated', _lazy(u'Updated')),
('created', _lazy(u'Created')),
('popular', _lazy(u'Downloads')),
('rating', _lazy(u'Rating')))
class ThemeFilter(BaseFilter):
opts = (('name', _lazy(u'Name')),
('created', _lazy(u'Created')),
('popular', _lazy(u'Downloads')),
('rating', _lazy(u'Rating')))
def addon_listing(request, default='name', theme=False):
"""Set up the queryset and filtering for addon listing for Dashboard."""
if theme:
qs = request.user.addons.filter(type=amo.ADDON_PERSONA)
else:
qs = Addon.with_unlisted.filter(authors=request.user).exclude(
type=amo.ADDON_PERSONA)
filter_cls = ThemeFilter if theme else AddonFilter
filter_ = filter_cls(request, qs, 'sort', default)
return filter_.qs, filter_
def index(request):
ctx = {'blog_posts': _get_posts()}
if request.user.is_authenticated():
user_addons = Addon.with_unlisted.filter(authors=request.user)
recent_addons = user_addons.order_by('-modified')[:3]
ctx['recent_addons'] = []
for addon in recent_addons:
ctx['recent_addons'].append({'addon': addon,
'position': get_position(addon)})
return render(request, 'devhub/index.html', ctx)
@login_required
def dashboard(request, theme=False):
addon_items = _get_items(
None, Addon.with_unlisted.filter(authors=request.user))[:4]
data = dict(rss=_get_rss_feed(request), blog_posts=_get_posts(),
timestamp=int(time.time()), addon_tab=not theme,
theme=theme, addon_items=addon_items)
if data['addon_tab']:
addons, data['filter'] = addon_listing(request)
data['addons'] = amo.utils.paginate(request, addons, per_page=10)
if theme:
themes, data['filter'] = addon_listing(request, theme=True)
data['themes'] = amo.utils.paginate(request, themes, per_page=10)
if 'filter' in data:
data['sorting'] = data['filter'].field
data['sort_opts'] = data['filter'].opts
return render(request, 'devhub/addons/dashboard.html', data)
@dev_required
def ajax_compat_status(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_status.html',
dict(addon=addon))
@dev_required
def ajax_compat_error(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_error.html',
dict(addon=addon))
@dev_required
def ajax_compat_update(request, addon_id, addon, version_id):
if not addon.accepts_compatible_apps():
raise http.Http404()
version = get_object_or_404(Version, pk=version_id, addon=addon)
compat_form = forms.CompatFormSet(request.POST or None,
queryset=version.apps.all())
if request.method == 'POST' and compat_form.is_valid():
for compat in compat_form.save(commit=False):
compat.version = version
compat.save()
for form in compat_form.forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
return render(request, 'devhub/addons/ajax_compat_update.html',
dict(addon=addon, version=version, compat_form=compat_form))
def _get_addons(request, addons, addon_id, action):
"""Create a list of ``MenuItem``s for the activity feed."""
items = []
a = MenuItem()
a.selected = (not addon_id)
(a.text, a.url) = (_('All My Add-ons'), reverse('devhub.feed_all'))
if action:
a.url += '?action=' + action
items.append(a)
for addon in addons:
item = MenuItem()
try:
item.selected = (addon_id and addon.id == int(addon_id))
except ValueError:
pass # We won't get here... EVER
url = reverse('devhub.feed', args=[addon.slug])
if action:
url += '?action=' + action
item.text, item.url = addon.name, url
items.append(item)
return items
def _get_posts(limit=5):
return BlogPost.objects.order_by('-date_posted')[0:limit]
def _get_activities(request, action):
url = request.get_full_path()
choices = (None, 'updates', 'status', 'collections', 'reviews')
text = {None: _('All Activity'),
'updates': _('Add-on Updates'),
'status': _('Add-on Status'),
'collections': _('User Collections'),
'reviews': _('User Reviews'),
}
items = []
for c in choices:
i = MenuItem()
i.text = text[c]
i.url, i.selected = urlparams(url, page=None, action=c), (action == c)
items.append(i)
return items
def _get_items(action, addons):
filters = dict(updates=(amo.LOG.ADD_VERSION, amo.LOG.ADD_FILE_TO_VERSION),
status=(amo.LOG.USER_DISABLE, amo.LOG.USER_ENABLE,
amo.LOG.CHANGE_STATUS, amo.LOG.APPROVE_VERSION,),
collections=(amo.LOG.ADD_TO_COLLECTION,
amo.LOG.REMOVE_FROM_COLLECTION,),
reviews=(amo.LOG.ADD_REVIEW,))
filter_ = filters.get(action)
items = (ActivityLog.objects.for_addons(addons)
.exclude(action__in=amo.LOG_HIDE_DEVELOPER))
if filter_:
items = items.filter(action__in=[i.id for i in filter_])
return items
def _get_rss_feed(request):
key, __ = RssKey.objects.get_or_create(user=request.user)
return urlparams(reverse('devhub.feed_all'), privaterss=key.key)
def feed(request, addon_id=None):
if request.GET.get('privaterss'):
return feeds.ActivityFeedRSS()(request)
addon_selected = None
if not request.user.is_authenticated():
url = reverse('users.login')
p = urlquote(request.get_full_path())
return http.HttpResponseRedirect('%s?to=%s' % (url, p))
else:
addons_all = Addon.with_unlisted.filter(authors=request.user)
if addon_id:
addon = get_object_or_404(Addon.with_unlisted.id_or_slug(addon_id))
addons = addon # common query set
try:
key = RssKey.objects.get(addon=addons)
except RssKey.DoesNotExist:
key = RssKey.objects.create(addon=addons)
addon_selected = addon.id
rssurl = urlparams(reverse('devhub.feed', args=[addon_id]),
privaterss=key.key)
if not acl.check_addon_ownership(request, addons, viewer=True,
ignore_disabled=True):
raise PermissionDenied
else:
rssurl = _get_rss_feed(request)
addon = None
addons = addons_all
action = request.GET.get('action')
items = _get_items(action, addons)
activities = _get_activities(request, action)
addon_items = _get_addons(request, addons_all, addon_selected, action)
pager = amo.utils.paginate(request, items, 20)
data = dict(addons=addon_items, pager=pager, activities=activities,
rss=rssurl, addon=addon)
return render(request, 'devhub/addons/activity.html', data)
@dev_required
def edit(request, addon_id, addon):
url_prefix = 'addons'
data = {
'page': 'edit',
'addon': addon,
'url_prefix': url_prefix,
'valid_slug': addon.slug,
'tags': addon.tags.not_blacklisted().values_list('tag_text',
flat=True),
'previews': addon.previews.all(),
}
if acl.action_allowed(request, 'Addons', 'Configure'):
data['admin_form'] = forms.AdminForm(instance=addon)
return render(request, 'devhub/addons/edit.html', data)
@dev_required(theme=True)
def edit_theme(request, addon_id, addon, theme=False):
form = addon_forms.EditThemeForm(data=request.POST or None,
request=request, instance=addon)
owner_form = addon_forms.EditThemeOwnerForm(data=request.POST or None,
instance=addon)
if request.method == 'POST':
if 'owner_submit' in request.POST:
if owner_form.is_valid():
owner_form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect('devhub.themes.edit', addon.slug)
elif form.is_valid():
form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect('devhub.themes.edit', addon.reload().slug)
else:
messages.error(request, _('Please check the form for errors.'))
return render(request, 'devhub/personas/edit.html', {
'addon': addon, 'persona': addon.persona, 'form': form,
'owner_form': owner_form})
@dev_required(owner_for_post=True, theme=True)
@post_required
def delete(request, addon_id, addon, theme=False):
# Database deletes only allowed for free or incomplete addons.
if not addon.can_be_deleted():
msg = _('Add-on cannot be deleted. Disable this add-on instead.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
form = forms.DeleteForm(request)
if form.is_valid():
reason = form.cleaned_data.get('reason', '')
addon.delete(msg='Removed via devhub', reason=reason)
messages.success(
request,
_('Theme deleted.') if theme else _('Add-on deleted.'))
return redirect('devhub.%s' % ('themes' if theme else 'addons'))
else:
if theme:
messages.error(
request,
_('Password was incorrect. Theme was not deleted.'))
return redirect(addon.get_dev_url())
else:
messages.error(
request,
_('Password was incorrect. Add-on was not deleted.'))
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def enable(request, addon_id, addon):
addon.update(disabled_by_user=False)
amo.log(amo.LOG.USER_ENABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
@post_required
def cancel(request, addon_id, addon):
if addon.status in amo.UNDER_REVIEW_STATUSES:
if addon.status == amo.STATUS_LITE_AND_NOMINATED:
addon.update(status=amo.STATUS_LITE)
else:
addon.update(status=amo.STATUS_NULL)
amo.log(amo.LOG.CHANGE_STATUS, addon.get_status_display(), addon)
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def disable(request, addon_id, addon):
addon.update(disabled_by_user=True)
amo.log(amo.LOG.USER_DISABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def unlist(request, addon_id, addon):
addon.update(is_listed=False, disabled_by_user=False)
amo.log(amo.LOG.ADDON_UNLISTED, addon)
unindex_addons.delay([addon.id])
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
def ownership(request, addon_id, addon):
fs, ctx = [], {}
# Authors.
qs = AddonUser.objects.filter(addon=addon).order_by('position')
user_form = forms.AuthorFormSet(request.POST or None, queryset=qs)
fs.append(user_form)
# Versions.
license_form = forms.LicenseForm(request.POST or None, addon=addon)
ctx.update(license_form.get_context())
if ctx['license_form']: # if addon has a version
fs.append(ctx['license_form'])
# Policy.
policy_form = forms.PolicyForm(request.POST or None, addon=addon)
ctx.update(policy_form=policy_form)
fs.append(policy_form)
def mail_user_changes(author, title, template_part, recipients):
from amo.utils import send_mail
t = loader.get_template(
'users/email/{part}.ltxt'.format(part=template_part))
send_mail(title,
t.render(Context({'author': author, 'addon': addon,
'site_url': settings.SITE_URL})),
None, recipients, use_blacklist=False, real_email=True)
if request.method == 'POST' and all([form.is_valid() for form in fs]):
# Authors.
authors = user_form.save(commit=False)
addon_authors_emails = list(
addon.authors.values_list('email', flat=True))
authors_emails = set(addon_authors_emails +
[author.user.email for author in authors])
for author in authors:
action = None
if not author.id or author.user_id != author._original_user_id:
action = amo.LOG.ADD_USER_WITH_ROLE
author.addon = addon
mail_user_changes(
author=author,
title=_('An author has been added to your add-on'),
template_part='author_added',
recipients=authors_emails)
elif author.role != author._original_role:
action = amo.LOG.CHANGE_USER_WITH_ROLE
mail_user_changes(
author=author,
title=_('An author has a role changed on your add-on'),
template_part='author_changed',
recipients=authors_emails)
author.save()
if action:
amo.log(action, author.user, author.get_role_display(), addon)
if (author._original_user_id and
author.user_id != author._original_user_id):
amo.log(amo.LOG.REMOVE_USER_WITH_ROLE,
(UserProfile, author._original_user_id),
author.get_role_display(), addon)
for author in user_form.deleted_objects:
amo.log(amo.LOG.REMOVE_USER_WITH_ROLE, author.user,
author.get_role_display(), addon)
authors_emails.add(author.user.email)
mail_user_changes(
author=author,
title=_('An author has been removed from your add-on'),
template_part='author_removed',
recipients=authors_emails)
if license_form in fs:
license_form.save()
if policy_form in fs:
policy_form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('owner'))
ctx.update(addon=addon, user_form=user_form)
return render(request, 'devhub/addons/owner.html', ctx)
@dev_required(owner_for_post=True)
def payments(request, addon_id, addon):
charity = None if addon.charity_id == amo.FOUNDATION_ORG else addon.charity
charity_form = forms.CharityForm(request.POST or None, instance=charity,
prefix='charity')
contrib_form = forms.ContribForm(request.POST or None, instance=addon,
initial=forms.ContribForm.initial(addon))
profile_form = forms.ProfileForm(request.POST or None, instance=addon,
required=True)
if request.method == 'POST':
if contrib_form.is_valid():
addon = contrib_form.save(commit=False)
addon.wants_contributions = True
valid = _save_charity(addon, contrib_form, charity_form)
if not addon.has_full_profile():
valid &= profile_form.is_valid()
if valid:
profile_form.save()
if valid:
addon.save()
messages.success(request, _('Changes successfully saved.'))
amo.log(amo.LOG.EDIT_CONTRIBUTIONS, addon)
return redirect(addon.get_dev_url('payments'))
errors = charity_form.errors or contrib_form.errors or profile_form.errors
if errors:
messages.error(request, _('There were errors in your submission.'))
return render(request, 'devhub/payments/payments.html',
dict(addon=addon, errors=errors, charity_form=charity_form,
contrib_form=contrib_form, profile_form=profile_form))
def _save_charity(addon, contrib_form, charity_form):
recipient = contrib_form.cleaned_data['recipient']
if recipient == 'dev':
addon.charity = None
elif recipient == 'moz':
addon.charity_id = amo.FOUNDATION_ORG
elif recipient == 'org':
if charity_form.is_valid():
addon.charity = charity_form.save()
else:
return False
return True
@dev_required
@post_required
def disable_payments(request, addon_id, addon):
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('payments'))
@dev_required
@post_required
def remove_profile(request, addon_id, addon):
delete_translation(addon, 'the_reason')
delete_translation(addon, 'the_future')
if addon.wants_contributions:
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('profile'))
@dev_required
def profile(request, addon_id, addon):
profile_form = forms.ProfileForm(request.POST or None, instance=addon)
if request.method == 'POST' and profile_form.is_valid():
profile_form.save()
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('profile'))
return render(request, 'devhub/addons/profile.html',
dict(addon=addon, profile_form=profile_form))
@login_required
@post_required
@json_view
def compat_application_versions(request):
app_id = request.POST['application']
f = CheckCompatibilityForm()
return {'choices': f.version_choices_for_app_id(app_id)}
@login_required
def validate_addon(request):
return render(request, 'devhub/validate_addon.html',
{'title': _('Validate Add-on'),
# Hack: we just need the "is_unlisted" field from this form.
'new_addon_form': forms.NewAddonForm(
None, None, request=request)})
@login_required
def check_addon_compatibility(request):
form = CheckCompatibilityForm()
return render(request, 'devhub/validate_addon.html',
{'appversion_form': form,
'title': _('Check Add-on Compatibility'),
# Hack: we just need the "is_unlisted" field from this form.
'new_addon_form': forms.NewAddonForm(
None, None, request=request)})
@dev_required
@json_view
def file_perf_tests_start(request, addon_id, addon, file_id):
if not waffle.flag_is_active(request, 'perf-tests'):
raise PermissionDenied
file_ = get_object_or_404(File, pk=file_id)
plats = perf.PLATFORM_MAP.get(file_.platform, None)
if plats is None:
log.info('Unsupported performance platform %s for file %s'
% (file_.get_platform_display(), file_))
# TODO(Kumar) provide a message about this
return {'success': False}
for app in perf.ALL_APPS:
for plat in plats:
tasks.start_perf_test_for_file.delay(file_.id, plat, app)
return {'success': True}
def handle_upload(filedata, user, app_id=None, version_id=None, addon=None,
is_standalone=False, is_listed=True, automated=False,
submit=False):
if addon:
# TODO: Handle betas.
automated = addon.automated_signing
is_listed = addon.is_listed
fu = FileUpload.from_post(filedata, filedata.name, filedata.size)
fu.update(automated_signing=automated, addon=addon)
log.info('FileUpload created: %s' % fu.pk)
if user.is_authenticated():
fu.user = user
fu.save()
if app_id and version_id:
app = amo.APPS_ALL.get(int(app_id))
if not app:
raise http.Http404()
ver = get_object_or_404(AppVersion, pk=version_id)
tasks.compatibility_check.delay(fu.pk, app.guid, ver.version)
elif submit:
tasks.validate_and_submit(addon, fu, listed=is_listed)
else:
tasks.validate(fu, listed=is_listed)
return fu
@login_required
@post_required
def upload(request, addon=None, is_standalone=False, is_listed=True,
automated=False):
filedata = request.FILES['upload']
app_id = request.POST.get('app_id')
version_id = request.POST.get('version_id')
fu = handle_upload(
filedata=filedata, user=request.user, app_id=app_id,
version_id=version_id, addon=addon, is_standalone=is_standalone,
is_listed=is_listed, automated=automated)
if addon:
return redirect('devhub.upload_detail_for_addon', addon.slug, fu.pk)
elif is_standalone:
return redirect('devhub.standalone_upload_detail', fu.pk)
else:
return redirect('devhub.upload_detail', fu.pk, 'json')
@post_required
@dev_required
def upload_for_addon(request, addon_id, addon):
return upload(request, addon=addon)
@login_required
@post_required
@json_view
def upload_manifest(request):
form = forms.NewManifestForm(request.POST)
if form.is_valid():
upload = FileUpload.objects.create()
tasks.fetch_manifest.delay(form.cleaned_data['manifest'], upload.pk)
return redirect('devhub.upload_detail', upload.pk, 'json')
else:
error_text = _('There was an error with the submission.')
if 'manifest' in form.errors:
error_text = ' '.join(form.errors['manifest'])
error_message = {'type': 'error',
'message': escape_all(error_text),
'tier': 1}
v = {'errors': 1, 'success': False, 'messages': [error_message]}
return {'validation': v, 'error': error_text}
@login_required
@json_view
def standalone_upload_detail(request, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
url = reverse('devhub.standalone_upload_detail', args=[uuid])
return upload_validation_context(request, upload, url=url)
@dev_required
@json_view
def upload_detail_for_addon(request, addon_id, addon, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
return json_upload_detail(request, upload, addon_slug=addon.slug)
@dev_required(allow_editors=True)
def file_validation(request, addon_id, addon, file_id):
file_ = get_object_or_404(File, id=file_id)
validate_url = reverse('devhub.json_file_validation',
args=[addon.slug, file_.id])
prev_file = ValidationAnnotator(file_).prev_file
if prev_file:
file_url = reverse('files.compare', args=[file_.id, prev_file.id,
'file', ''])
else:
file_url = reverse('files.list', args=[file_.id, 'file', ''])
context = {'validate_url': validate_url, 'file_url': file_url,
'file': file_, 'filename': file_.filename,
'timestamp': file_.created, 'addon': addon,
'automated_signing': file_.automated_signing}
if acl.check_addons_reviewer(request):
context['annotate_url'] = reverse('devhub.annotate_file_validation',
args=[addon.slug, file_id])
if file_.has_been_validated:
context['validation_data'] = file_.validation.processed_validation
return render(request, 'devhub/validation.html', context)
@post_required
@addons_reviewer_required
@json_view
def annotate_file_validation(request, addon_id, file_id):
file_ = get_object_or_404(File, pk=file_id)
form = forms.AnnotateFileForm(request.POST)
if not form.is_valid():
return {'status': 'fail',
'errors': dict(form.errors.items())}
message_key = ValidationComparator.message_key(
form.cleaned_data['message'])
updates = {'ignore_duplicates': form.cleaned_data['ignore_duplicates']}
annotation, created = ValidationAnnotation.objects.get_or_create(
file_hash=file_.original_hash, message_key=json.dumps(message_key),
defaults=updates)
if not created:
annotation.update(**updates)
return {'status': 'ok'}
@dev_required(allow_editors=True)
def bulk_compat_result(request, addon_id, addon, result_id):
qs = ValidationResult.objects.exclude(completed=None)
result = get_object_or_404(qs, pk=result_id)
job = result.validation_job
revalidate_url = reverse('devhub.json_bulk_compat_result',
args=[addon.slug, result.id])
return _compat_result(request, revalidate_url,
job.application, job.target_version,
for_addon=result.file.version.addon,
validated_filename=result.file.filename,
validated_ts=result.completed)
def _compat_result(request, revalidate_url, target_app, target_version,
validated_filename=None, validated_ts=None,
for_addon=None):
app_trans = dict((g, unicode(a.pretty)) for g, a in amo.APP_GUIDS.items())
ff_versions = (AppVersion.objects.filter(application=amo.FIREFOX.id,
version_int__gte=4000000000000)
.values_list('application', 'version')
.order_by('version_int'))
tpl = 'https://developer.mozilla.org/en/Firefox_%s_for_developers'
change_links = dict()
for app, ver in ff_versions:
major = ver.split('.')[0] # 4.0b3 -> 4
change_links['%s %s' % (amo.APP_IDS[app].guid, ver)] = tpl % major
return render(request, 'devhub/validation.html',
dict(validate_url=revalidate_url,
filename=validated_filename, timestamp=validated_ts,
target_app=target_app, target_version=target_version,
addon=for_addon, result_type='compat',
app_trans=app_trans, version_change_links=change_links))
@json_view
@csrf_exempt
@dev_required(allow_editors=True)
def json_file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, id=file_id)
try:
v_result = file.validation
except FileValidation.DoesNotExist:
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
# This API is, unfortunately, synchronous, so wait for the
# task to complete and return the result directly.
v_result = tasks.validate(file).get()
return {'validation': v_result.processed_validation, 'error': None}
@json_view
@csrf_exempt
@post_required
@dev_required(allow_editors=True)
def json_bulk_compat_result(request, addon_id, addon, result_id):
result = get_object_or_404(ValidationResult, pk=result_id,
completed__isnull=False)
validation = json.loads(result.validation)
return {'validation': process_validation(validation), 'error': None}
@json_view
def json_upload_detail(request, upload, addon_slug=None):
addon = None
if addon_slug:
addon = get_object_or_404(Addon, slug=addon_slug)
result = upload_validation_context(request, upload, addon=addon)
plat_exclude = []
if result['validation']:
try:
pkg = parse_addon(upload, addon=addon)
if not acl.submission_allowed(request.user, pkg):
raise django_forms.ValidationError(
_(u'You cannot submit this type of add-on'))
except django_forms.ValidationError, exc:
errors_before = result['validation'].get('errors', 0)
# FIXME: This doesn't guard against client-side
# tinkering.
for i, msg in enumerate(exc.messages):
# Simulate a validation error so the UI displays
# it as such
result['validation']['messages'].insert(
i, {'type': 'error',
'message': escape_all(msg), 'tier': 1,
'fatal': True})
if result['validation']['ending_tier'] < 1:
result['validation']['ending_tier'] = 1
result['validation']['errors'] += 1
if not errors_before:
return json_view.error(result)
else:
app_ids = set([a.id for a in pkg.get('apps', [])])
supported_platforms = []
for app in (amo.MOBILE, amo.ANDROID):
if app.id in app_ids:
supported_platforms.extend((amo.PLATFORM_ANDROID.id,))
app_ids.remove(app.id)
if len(app_ids):
# Targets any other non-mobile app:
supported_platforms.extend(amo.DESKTOP_PLATFORMS.keys())
s = amo.SUPPORTED_PLATFORMS.keys()
plat_exclude = set(s) - set(supported_platforms)
plat_exclude = [str(p) for p in plat_exclude]
# Does the version number look like it's beta?
result['beta'] = is_beta(pkg.get('version', ''))
result['platforms_to_exclude'] = plat_exclude
return result
def upload_validation_context(request, upload, addon_slug=None, addon=None,
url=None):
if addon_slug and not addon:
addon = get_object_or_404(Addon, slug=addon_slug)
if not url:
if addon:
url = reverse('devhub.upload_detail_for_addon',
args=[addon.slug, upload.uuid])
else:
url = reverse('devhub.upload_detail', args=[upload.uuid, 'json'])
full_report_url = reverse('devhub.upload_detail', args=[upload.uuid])
return {'upload': upload.uuid,
'validation': upload.processed_validation or '',
'error': None,
'url': url,
'full_report_url': full_report_url}
@login_required
def upload_detail(request, uuid, format='html'):
upload = get_object_or_404(FileUpload, uuid=uuid)
if format == 'json' or request.is_ajax():
return json_upload_detail(request, upload)
validate_url = reverse('devhub.standalone_upload_detail',
args=[upload.uuid])
if upload.compat_with_app:
return _compat_result(request, validate_url,
upload.compat_with_app,
upload.compat_with_appver)
context = {'validate_url': validate_url, 'filename': upload.name,
'automated_signing': upload.automated_signing,
'timestamp': upload.created}
if upload.validation:
context['validation_data'] = upload.processed_validation
return render(request, 'devhub/validation.html', context)
class AddonDependencySearch(BaseAjaxSearch):
# No personas.
types = [amo.ADDON_ANY, amo.ADDON_EXTENSION, amo.ADDON_THEME,
amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_LPAPP]
@dev_required
@json_view
def ajax_dependencies(request, addon_id, addon):
return AddonDependencySearch(request, excluded_ids=[addon_id]).items
@dev_required
def addons_section(request, addon_id, addon, section, editable=False):
basic = addon_forms.AddonFormBasic
models = {'basic': basic,
'media': addon_forms.AddonFormMedia,
'details': addon_forms.AddonFormDetails,
'support': addon_forms.AddonFormSupport,
'technical': addon_forms.AddonFormTechnical,
'admin': forms.AdminForm}
if section not in models:
raise http.Http404()
tags, previews, restricted_tags = [], [], []
cat_form = dependency_form = None
if section == 'basic':
tags = addon.tags.not_blacklisted().values_list('tag_text', flat=True)
cat_form = addon_forms.CategoryFormSet(request.POST or None,
addon=addon, request=request)
restricted_tags = addon.tags.filter(restricted=True)
elif section == 'media':
previews = forms.PreviewFormSet(
request.POST or None,
prefix='files', queryset=addon.previews.all())
elif section == 'technical':
dependency_form = forms.DependencyFormSet(
request.POST or None,
queryset=addon.addons_dependencies.all(), addon=addon,
prefix='dependencies')
# Get the slug before the form alters it to the form data.
valid_slug = addon.slug
if editable:
if request.method == 'POST':
if section == 'license':
form = models[section](request.POST)
else:
form = models[section](request.POST, request.FILES,
instance=addon, request=request)
if form.is_valid() and (not previews or previews.is_valid()):
addon = form.save(addon)
if previews:
for preview in previews.forms:
preview.save(addon)
editable = False
if section == 'media':
amo.log(amo.LOG.CHANGE_ICON, addon)
else:
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
valid_slug = addon.slug
if cat_form:
if cat_form.is_valid():
cat_form.save()
addon.save()
else:
editable = True
if dependency_form:
if dependency_form.is_valid():
dependency_form.save()
else:
editable = True
else:
if section == 'license':
form = models[section]()
else:
form = models[section](instance=addon, request=request)
else:
form = False
url_prefix = 'addons'
data = {'addon': addon,
'url_prefix': url_prefix,
'form': form,
'editable': editable,
'tags': tags,
'restricted_tags': restricted_tags,
'cat_form': cat_form,
'preview_form': previews,
'dependency_form': dependency_form,
'valid_slug': valid_slug}
return render(request, 'devhub/addons/edit/%s.html' % section, data)
@never_cache
@dev_required(theme=True)
@json_view
def image_status(request, addon_id, addon, theme=False):
# Default icon needs no checking.
if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon':
icons = True
# Persona icon is handled differently.
elif addon.type == amo.ADDON_PERSONA:
icons = True
else:
icons = storage.exists(os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id))
previews = all(storage.exists(p.thumbnail_path)
for p in addon.previews.all())
return {'overall': icons and previews,
'icons': icons,
'previews': previews}
@json_view
def ajax_upload_image(request, upload_type, addon_id=None):
errors = []
upload_hash = ''
if 'upload_image' in request.FILES:
upload_preview = request.FILES['upload_image']
upload_preview.seek(0)
upload_hash = uuid.uuid4().hex
loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash)
with storage.open(loc, 'wb') as fd:
for chunk in upload_preview:
fd.write(chunk)
is_icon = upload_type == 'icon'
is_persona = upload_type.startswith('persona_')
check = amo.utils.ImageCheck(upload_preview)
if (not check.is_image() or
upload_preview.content_type not in amo.IMG_TYPES):
if is_icon:
errors.append(_('Icons must be either PNG or JPG.'))
else:
errors.append(_('Images must be either PNG or JPG.'))
if check.is_animated():
if is_icon:
errors.append(_('Icons cannot be animated.'))
else:
errors.append(_('Images cannot be animated.'))
max_size = None
if is_icon:
max_size = settings.MAX_ICON_UPLOAD_SIZE
if is_persona:
max_size = settings.MAX_PERSONA_UPLOAD_SIZE
if max_size and upload_preview.size > max_size:
if is_icon:
errors.append(_('Please use images smaller than %dMB.') % (
max_size / 1024 / 1024 - 1))
if is_persona:
errors.append(_('Images cannot be larger than %dKB.') % (
max_size / 1024))
if check.is_image() and is_persona:
persona, img_type = upload_type.split('_') # 'header' or 'footer'
expected_size = amo.PERSONA_IMAGE_SIZES.get(img_type)[1]
with storage.open(loc, 'rb') as fp:
actual_size = Image.open(fp).size
if actual_size != expected_size:
# L10n: {0} is an image width (in pixels), {1} is a height.
errors.append(_('Image must be exactly {0} pixels wide '
'and {1} pixels tall.')
.format(expected_size[0], expected_size[1]))
if errors and upload_type == 'preview' and os.path.exists(loc):
# Delete the temporary preview file in case of error.
os.unlink(loc)
else:
errors.append(_('There was an error uploading your preview.'))
if errors:
upload_hash = ''
return {'upload_hash': upload_hash, 'errors': errors}
@dev_required
def upload_image(request, addon_id, addon, upload_type):
return ajax_upload_image(request, upload_type)
@dev_required
def version_edit(request, addon_id, addon, version_id):
version = get_object_or_404(Version, pk=version_id, addon=addon)
version_form = forms.VersionForm(
request.POST or None,
request.FILES or None,
instance=version
)
new_file_form = forms.NewFileForm(request.POST or None,
addon=addon, version=version,
request=request)
file_form = forms.FileFormSet(request.POST or None, prefix='files',
queryset=version.files.all())
file_history = _get_file_history(version)
data = {'version_form': version_form, 'file_form': file_form}
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
if addon.accepts_compatible_apps():
# We should be in no-caching land but this one stays cached for some
# reason.
qs = version.apps.all().no_cache()
compat_form = forms.CompatFormSet(request.POST or None, queryset=qs)
data['compat_form'] = compat_form
if (request.method == 'POST' and
all([form.is_valid() for form in data.values()])):
data['version_form'].save()
data['file_form'].save()
for deleted in data['file_form'].deleted_forms:
file = deleted.cleaned_data['id']
amo.log(amo.LOG.DELETE_FILE_FROM_VERSION,
file.filename, file.version, addon)
if 'compat_form' in data:
for compat in data['compat_form'].save(commit=False):
compat.version = version
compat.save()
for form in data['compat_form'].forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
messages.success(request, _('Changes successfully saved.'))
return redirect('devhub.versions.edit', addon.slug, version_id)
data.update(addon=addon, version=version, new_file_form=new_file_form,
file_history=file_history, is_admin=is_admin)
return render(request, 'devhub/versions/edit.html', data)
def _log_max_version_change(addon, version, appversion):
details = {'version': version.version,
'target': appversion.version.version,
'application': appversion.application}
amo.log(amo.LOG.MAX_APPVERSION_UPDATED,
addon, version, details=details)
def _get_file_history(version):
file_ids = [f.id for f in version.all_files]
addon = version.addon
file_history = (ActivityLog.objects.for_addons(addon)
.filter(action__in=amo.LOG_REVIEW_QUEUE))
files = dict([(fid, []) for fid in file_ids])
for log in file_history:
details = log.details
current_file_ids = details["files"] if 'files' in details else []
for fid in current_file_ids:
if fid in file_ids:
files[fid].append(log)
return files
@dev_required
@post_required
@transaction.commit_on_success
def version_delete(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(Version, pk=version_id, addon=addon)
if 'disable_version' in request.POST:
messages.success(request, _('Version %s disabled.') % version.version)
version.files.update(status=amo.STATUS_DISABLED)
version.addon.update_status()
else:
messages.success(request, _('Version %s deleted.') % version.version)
version.delete()
return redirect(addon.get_dev_url('versions'))
def check_validation_override(request, form, addon, version):
if version and form.cleaned_data.get('admin_override_validation'):
helper = ReviewHelper(request=request, addon=addon, version=version)
helper.set_data(
dict(operating_systems='', applications='',
comments=_(u'This upload has failed validation, and may '
u'lack complete validation results. Please '
u'take due care when reviewing it.')))
helper.actions['super']['method']()
def auto_sign_file(file_, is_beta=False):
"""If the file should be automatically reviewed and signed, do it."""
addon = file_.version.addon
validation = file_.validation
if file_.is_experiment: # See bug 1220097.
amo.log(amo.LOG.EXPERIMENT_SIGNED, file_)
sign_file(file_, settings.PRELIMINARY_SIGNING_SERVER)
elif addon.automated_signing and validation.passed_auto_validation:
# Passed validation: sign automatically without manual review.
helper = ReviewHelper(request=None, addon=addon,
version=file_.version)
# Provide the file to review/sign to the helper.
helper.set_data({'addon_files': [file_],
'comments': 'automatic validation'})
helper.handler.process_preliminary(auto_validation=True)
elif is_beta:
# Beta won't be reviewed. They will always get signed, and logged, for
# further review if needed.
if validation.passed_auto_validation:
amo.log(amo.LOG.BETA_SIGNED_VALIDATION_PASSED, file_)
else:
amo.log(amo.LOG.BETA_SIGNED_VALIDATION_FAILED, file_)
# Beta files always get signed with prelim cert.
sign_file(file_, settings.PRELIMINARY_SIGNING_SERVER)
def auto_sign_version(version, **kwargs):
# Sign all the files submitted, one for each platform.
for file_ in version.files.all():
auto_sign_file(file_, **kwargs)
@json_view
@dev_required
@post_required
def version_add(request, addon_id, addon):
form = forms.NewVersionForm(
request.POST,
request.FILES,
addon=addon,
request=request
)
if not form.is_valid():
return json_view.error(form.errors)
is_beta = form.cleaned_data['beta'] and addon.is_listed
pl = form.cleaned_data.get('supported_platforms', [])
version = Version.from_upload(
upload=form.cleaned_data['upload'],
addon=addon,
platforms=pl,
source=form.cleaned_data['source'],
is_beta=is_beta
)
rejected_versions = addon.versions.filter(
version=version.version, files__status=amo.STATUS_DISABLED)[:1]
if not version.releasenotes and rejected_versions:
# Let's reuse the release and approval notes from the previous
# rejected version.
last_rejected = rejected_versions[0]
version.releasenotes = amo.utils.translations_for_field(
last_rejected.releasenotes)
version.approvalnotes = last_rejected.approvalnotes
version.save()
log.info('Version created: %s for: %s' %
(version.pk, form.cleaned_data['upload']))
check_validation_override(request, form, addon, version)
if (addon.status == amo.STATUS_NULL and
form.cleaned_data['nomination_type']):
addon.update(status=form.cleaned_data['nomination_type'])
url = reverse('devhub.versions.edit',
args=[addon.slug, str(version.id)])
# Sign all the files submitted, one for each platform.
auto_sign_version(version, is_beta=is_beta)
return dict(url=url)
@json_view
@dev_required
@post_required
def version_add_file(request, addon_id, addon, version_id):
version = get_object_or_404(Version, pk=version_id, addon=addon)
new_file_form = forms.NewFileForm(request.POST, request.FILES, addon=addon,
version=version, request=request)
if not new_file_form.is_valid():
return json_view.error(new_file_form.errors)
upload = new_file_form.cleaned_data['upload']
is_beta = new_file_form.cleaned_data['beta'] and addon.is_listed
new_file = File.from_upload(upload, version,
new_file_form.cleaned_data['platform'],
is_beta, parse_addon(upload, addon))
source = new_file_form.cleaned_data['source']
if source:
version.update(source=source)
storage.delete(upload.path)
check_validation_override(request, new_file_form, addon, new_file.version)
file_form = forms.FileFormSet(prefix='files', queryset=version.files.all())
form = [f for f in file_form.forms if f.instance == new_file]
auto_sign_file(new_file, is_beta=is_beta)
return render(request, 'devhub/includes/version_file.html',
{'form': form[0], 'addon': addon})
@dev_required
def version_list(request, addon_id, addon):
qs = addon.versions.order_by('-created').transform(Version.transformer)
versions = amo.utils.paginate(request, qs)
new_file_form = forms.NewVersionForm(None, addon=addon, request=request)
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
data = {'addon': addon,
'versions': versions,
'new_file_form': new_file_form,
'position': get_position(addon),
'timestamp': int(time.time()),
'is_admin': is_admin}
return render(request, 'devhub/versions/list.html', data)
@dev_required
def version_bounce(request, addon_id, addon, version):
# Use filter since there could be dupes.
vs = (Version.objects.filter(version=version, addon=addon)
.order_by('-created'))
if vs:
return redirect('devhub.versions.edit', addon.slug, vs[0].id)
else:
raise http.Http404()
@json_view
@dev_required
def version_stats(request, addon_id, addon):
qs = Version.objects.filter(addon=addon)
reviews = (qs.annotate(reviews=Count('reviews'))
.values('id', 'version', 'reviews'))
d = dict((v['id'], v) for v in reviews)
files = qs.annotate(files=Count('files')).values_list('id', 'files')
for id, files in files:
d[id]['files'] = files
return d
Step = collections.namedtuple('Step', 'current max')
def submit_step(outer_step):
"""Wraps the function with a decorator that bounces to the right step."""
def decorator(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
step = outer_step
max_step = 7
# We only bounce on pages with an addon id.
if 'addon' in kw:
addon = kw['addon']
on_step = SubmitStep.objects.filter(addon=addon)
if on_step:
max_step = on_step[0].step
if max_step < step:
# The step was too high, so bounce to the saved step.
return redirect(_step_url(max_step), addon.slug)
elif step != max_step:
# We couldn't find a step, so we must be done.
return redirect(_step_url(7), addon.slug)
kw['step'] = Step(step, max_step)
return f(request, *args, **kw)
# Tell @dev_required that this is a function in the submit flow so it
# doesn't try to redirect into the submit flow.
wrapper.submitting = True
return wrapper
return decorator
def _step_url(step):
url_base = 'devhub.submit'
return '%s.%s' % (url_base, step)
@login_required
@submit_step(1)
def submit(request, step):
return render_agreement(request, 'devhub/addons/submit/start.html',
_step_url(2), step)
@login_required
@submit_step(2)
def submit_addon(request, step):
if request.user.read_dev_agreement is None:
return redirect(_step_url(1))
form = forms.NewAddonForm(
request.POST or None,
request.FILES or None,
request=request
)
if request.method == 'POST':
if form.is_valid():
data = form.cleaned_data
p = data.get('supported_platforms', [])
is_listed = not data['is_unlisted']
addon = Addon.from_upload(data['upload'], p, source=data['source'],
is_listed=is_listed)
AddonUser(addon=addon, user=request.user).save()
check_validation_override(request, form, addon,
addon.current_version)
if not addon.is_listed: # Not listed? Automatically choose queue.
if data.get('is_sideload'): # Full review needed.
addon.update(status=amo.STATUS_NOMINATED)
else: # Otherwise, simply do a prelim review.
addon.update(status=amo.STATUS_UNREVIEWED)
# Sign all the files submitted, one for each platform.
auto_sign_version(addon.versions.get())
SubmitStep.objects.create(addon=addon, step=3)
return redirect(_step_url(3), addon.slug)
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
return render(request, 'devhub/addons/submit/upload.html',
{'step': step, 'new_addon_form': form, 'is_admin': is_admin})
@dev_required
@submit_step(3)
def submit_describe(request, addon_id, addon, step):
form_cls = forms.Step3Form
form = form_cls(request.POST or None, instance=addon, request=request)
cat_form = addon_forms.CategoryFormSet(request.POST or None, addon=addon,
request=request)
if request.method == 'POST' and form.is_valid() and (
not addon.is_listed or cat_form.is_valid()):
addon = form.save(addon)
submit_step = SubmitStep.objects.filter(addon=addon)
if addon.is_listed:
cat_form.save()
submit_step.update(step=4)
return redirect(_step_url(4), addon.slug)
else: # Finished for unlisted addons.
submit_step.delete()
signals.submission_done.send(sender=addon)
return redirect('devhub.submit.7', addon.slug)
return render(request, 'devhub/addons/submit/describe.html',
{'form': form, 'cat_form': cat_form, 'addon': addon,
'step': step})
@dev_required
@submit_step(4)
def submit_media(request, addon_id, addon, step):
form_icon = addon_forms.AddonFormMedia(
request.POST or None,
request.FILES or None, instance=addon, request=request)
form_previews = forms.PreviewFormSet(
request.POST or None,
prefix='files', queryset=addon.previews.all())
if (request.method == 'POST' and
form_icon.is_valid() and form_previews.is_valid()):
addon = form_icon.save(addon)
for preview in form_previews.forms:
preview.save(addon)
SubmitStep.objects.filter(addon=addon).update(step=5)
return redirect(_step_url(5), addon.slug)
return render(request, 'devhub/addons/submit/media.html',
{'form': form_icon, 'addon': addon, 'step': step,
'preview_form': form_previews})
@dev_required
@submit_step(5)
def submit_license(request, addon_id, addon, step):
fs, ctx = [], {}
# Versions.
license_form = forms.LicenseForm(request.POST or None, addon=addon)
ctx.update(license_form.get_context())
fs.append(ctx['license_form'])
# Policy.
policy_form = forms.PolicyForm(request.POST or None, addon=addon)
fs.append(policy_form)
if request.method == 'POST' and all([form.is_valid() for form in fs]):
if license_form in fs:
license_form.save(log=False)
policy_form.save()
SubmitStep.objects.filter(addon=addon).update(step=6)
return redirect('devhub.submit.6', addon.slug)
ctx.update(addon=addon, policy_form=policy_form, step=step)
return render(request, 'devhub/addons/submit/license.html', ctx)
@dev_required
@submit_step(6)
def submit_select_review(request, addon_id, addon, step):
review_type_form = forms.ReviewTypeForm(request.POST or None)
updated_status = None
if request.method == 'POST' and review_type_form.is_valid():
updated_status = review_type_form.cleaned_data['review_type']
if updated_status:
addon.update(status=updated_status)
SubmitStep.objects.filter(addon=addon).delete()
signals.submission_done.send(sender=addon)
return redirect('devhub.submit.7', addon.slug)
return render(request, 'devhub/addons/submit/select-review.html',
{'addon': addon, 'review_type_form': review_type_form,
'step': step})
@dev_required
@submit_step(7)
def submit_done(request, addon_id, addon, step):
# Bounce to the versions page if they don't have any versions.
if not addon.versions.exists():
return redirect(addon.get_dev_url('versions'))
sp = addon.current_version.supported_platforms
is_platform_specific = sp != [amo.PLATFORM_ALL]
try:
author = addon.authors.all()[0]
except IndexError:
# This should never happen.
author = None
if author:
submitted_addons = (author.addons
.exclude(status=amo.STATUS_NULL).count())
if submitted_addons == 1:
# We can use locale-prefixed URLs because the submitter probably
# speaks the same language by the time he/she reads the email.
context = {
'app': unicode(request.APP.pretty),
'detail_url': absolutify(addon.get_url_path()),
'version_url': absolutify(addon.get_dev_url('versions')),
'edit_url': absolutify(addon.get_dev_url('edit')),
'full_review': addon.status == amo.STATUS_NOMINATED
}
tasks.send_welcome_email.delay(addon.id, [author.email], context)
return render(request, 'devhub/addons/submit/done.html',
{'addon': addon, 'step': step,
'is_platform_specific': is_platform_specific})
@dev_required
def submit_resume(request, addon_id, addon):
step = SubmitStep.objects.filter(addon=addon)
return _resume(addon, step)
def _resume(addon, step):
if step:
return redirect(_step_url(step[0].step), addon.slug)
return redirect(addon.get_dev_url('versions'))
@login_required
@dev_required
def submit_bump(request, addon_id, addon):
if not acl.action_allowed(request, 'Admin', 'EditSubmitStep'):
raise PermissionDenied
step = SubmitStep.objects.filter(addon=addon)
step = step[0] if step else None
if request.method == 'POST' and request.POST.get('step'):
new_step = request.POST['step']
if step:
step.step = new_step
else:
step = SubmitStep(addon=addon, step=new_step)
step.save()
return redirect(_step_url('bump'), addon.slug)
return render(request, 'devhub/addons/submit/bump.html',
dict(addon=addon, step=step))
@login_required
def submit_theme(request):
data = {}
if request.method == 'POST':
data = request.POST.dict()
if 'unsaved_data' in request.session and data['unsaved_data'] == '{}':
# Restore unsaved data on second invalid POST..
data['unsaved_data'] = request.session['unsaved_data']
form = addon_forms.ThemeForm(data=data or None,
files=request.FILES or None,
request=request)
if request.method == 'POST':
if form.is_valid():
addon = form.save()
return redirect('devhub.themes.submit.done', addon.slug)
else:
# Stored unsaved data in request.session since it gets lost on
# second invalid POST.
messages.error(request, _('Please check the form for errors.'))
request.session['unsaved_data'] = data['unsaved_data']
return render(request, 'devhub/personas/submit.html', dict(form=form))
@dev_required(theme=True)
def submit_theme_done(request, addon_id, addon, theme):
if addon.is_public():
return redirect(addon.get_url_path())
return render(request, 'devhub/personas/submit_done.html',
dict(addon=addon))
@dev_required(theme=True)
@post_required
def remove_locale(request, addon_id, addon, theme):
POST = request.POST
if 'locale' in POST and POST['locale'] != addon.default_locale:
addon.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
# You can only request one of the new review tracks.
REQUEST_REVIEW = (amo.STATUS_PUBLIC, amo.STATUS_LITE)
@dev_required
@post_required
def request_review(request, addon_id, addon, status):
status_req = int(status)
if status_req not in addon.can_request_review():
return http.HttpResponseBadRequest()
elif status_req == amo.STATUS_PUBLIC:
if addon.status == amo.STATUS_LITE:
new_status = amo.STATUS_LITE_AND_NOMINATED
else:
new_status = amo.STATUS_NOMINATED
elif status_req == amo.STATUS_LITE:
if addon.status in (amo.STATUS_PUBLIC, amo.STATUS_LITE_AND_NOMINATED):
new_status = amo.STATUS_LITE
else:
new_status = amo.STATUS_UNREVIEWED
addon.update(status=new_status)
msg = {amo.STATUS_LITE: _('Preliminary Review Requested.'),
amo.STATUS_PUBLIC: _('Full Review Requested.')}
messages.success(request, msg[status_req])
amo.log(amo.LOG.CHANGE_STATUS, addon.get_status_display(), addon)
return redirect(addon.get_dev_url('versions'))
@post_required
@addon_view
def admin(request, addon):
if not acl.action_allowed(request, 'Addons', 'Configure'):
raise PermissionDenied
form = forms.AdminForm(request, request.POST or None, instance=addon)
if form.is_valid():
form.save()
return render(request, 'devhub/addons/edit/admin.html',
{'addon': addon, 'admin_form': form})
def docs(request, doc_name=None):
mdn_docs = {
None: '',
'getting-started': '',
'reference': '',
'how-to': '',
'how-to/getting-started': '',
'how-to/extension-development': '#Extensions',
'how-to/other-addons': '#Other_types_of_add-ons',
'how-to/thunderbird-mobile': '#Application-specific',
'how-to/theme-development': '#Themes',
'themes': '/Themes/Background',
'themes/faq': '/Themes/Background/FAQ',
}
if waffle.switch_is_active('mdn-policy-docs'):
mdn_docs.update({
'policies': '/AMO/Policy',
'policies/submission': '/AMO/Policy/Submission',
'policies/reviews': '/AMO/Policy/Reviews',
'policies/maintenance': '/AMO/Policy/Maintenance',
'policies/recommended': '/AMO/Policy/Featured',
'policies/contact': '/AMO/Policy/Contact',
})
if waffle.switch_is_active('mdn-agreement-docs'):
# This will most likely depend on MDN being able to protect
# pages.
mdn_docs.update({
'policies/agreement': '/AMO/Policy/Agreement',
})
all_docs = ('policies',
'policies/submission',
'policies/reviews',
'policies/maintenance',
'policies/recommended',
'policies/agreement',
'policies/contact')
if doc_name in mdn_docs:
return redirect(MDN_BASE + mdn_docs[doc_name],
permanent=True)
if doc_name in all_docs:
filename = '%s.html' % doc_name.replace('/', '-')
return render(request, 'devhub/docs/%s' % filename)
raise http.Http404()
def search(request):
query = request.GET.get('q', '')
return render(request, 'devhub/devhub_search.html', {'query': query})
@login_required
@waffle_switch('signing-api')
def api_key_agreement(request):
next_step = reverse('devhub.api_key')
return render_agreement(request, 'devhub/api/agreement.html', next_step)
def render_agreement(request, template, next_step, step=None):
if request.method == 'POST':
request.user.update(read_dev_agreement=now())
return redirect(next_step)
if request.user.read_dev_agreement is None:
return render(request, template,
{'step': step})
else:
response = redirect(next_step)
return response
@login_required
@waffle_switch('signing-api')
@transaction.commit_on_success
def api_key(request):
if request.user.read_dev_agreement is None:
return redirect(reverse('devhub.api_key_agreement'))
try:
credentials = APIKey.get_jwt_key(user=request.user)
except APIKey.DoesNotExist:
credentials = None
if request.method == 'POST':
if credentials:
log.info('JWT key was made inactive: {}'.format(credentials))
credentials.update(is_active=False)
msg = _(
'Your old credentials were revoked and are no longer valid. '
'Be sure to update all API clients with the new credentials.')
messages.success(request, msg)
new_credentials = APIKey.new_jwt_credentials(request.user)
log.info('new JWT key created: {}'.format(new_credentials))
send_key_change_email(request.user.email, new_credentials.key)
return redirect(reverse('devhub.api_key'))
return render(request, 'devhub/api/key.html',
{'title': _('Manage API Keys'),
'credentials': credentials})
def send_key_change_email(to_email, key):
subject = _('New API key created')
template = 'devhub/email/new-key-email.ltxt'
send_mail_jinja(
subject=subject,
template=template,
context={'key': key},
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
| 36.560639 | 79 | 0.631369 |
f30c0ab57361a8a5849e69a4283e5dd0f8ddf309 | 52,700 | py | Python | tracdap-runtime/python/test/tracdap_test/rt/impl/test_data.py | martin-traverse/tracdap | 2df2f08bee352f4f5188953efe5a33aa1ae51f2d | [
"Apache-2.0"
] | 1 | 2022-03-25T15:26:46.000Z | 2022-03-25T15:26:46.000Z | tracdap-runtime/python/test/tracdap_test/rt/impl/test_data.py | martin-traverse/tracdap | 2df2f08bee352f4f5188953efe5a33aa1ae51f2d | [
"Apache-2.0"
] | 1 | 2022-03-25T15:29:22.000Z | 2022-03-28T14:02:39.000Z | tracdap-runtime/python/test/tracdap_test/rt/impl/test_data.py | martin-traverse/tracdap | 2df2f08bee352f4f5188953efe5a33aa1ae51f2d | [
"Apache-2.0"
] | 1 | 2022-03-08T13:32:45.000Z | 2022-03-08T13:32:45.000Z | # Copyright 2022 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import decimal
import unittest
import sys
import math
import pandas as pd
import pyarrow as pa
import tracdap.rt.metadata as _meta
import tracdap.rt.exceptions as _ex
import tracdap.rt.impl.data as _data
import tracdap.rt.impl.util as _util
class DataMappingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
_util.configure_logging()
@staticmethod
def sample_data():
return {
"boolean_field": [True, False, True, False],
"integer_field": [1, 2, 3, 4],
"float_field": [1.0, 2.0, 3.0, 4.0],
"decimal_field": [decimal.Decimal(1.0), decimal.Decimal(2.0), decimal.Decimal(3.0), decimal.Decimal(4.0)],
"string_field": ["hello", "world", "what's", "up"],
"date_field": [dt.date(2000, 1, 1), dt.date(2000, 1, 2), dt.date(2000, 1, 3), dt.date(2000, 1, 4)],
"datetime_field": [
dt.datetime(2000, 1, 1, 0, 0, 0), dt.datetime(2000, 1, 2, 1, 1, 1),
dt.datetime(2000, 1, 3, 2, 2, 2), dt.datetime(2000, 1, 4, 3, 3, 3)]
}
@staticmethod
def sample_schema():
trac_schema = _meta.SchemaDefinition(
_meta.SchemaType.TABLE,
_meta.PartType.PART_ROOT,
_meta.TableSchema(fields=[
_meta.FieldSchema("boolean_field", fieldType=_meta.BasicType.BOOLEAN),
_meta.FieldSchema("integer_field", fieldType=_meta.BasicType.INTEGER),
_meta.FieldSchema("float_field", fieldType=_meta.BasicType.FLOAT),
_meta.FieldSchema("decimal_field", fieldType=_meta.BasicType.DECIMAL),
_meta.FieldSchema("string_field", fieldType=_meta.BasicType.STRING),
_meta.FieldSchema("date_field", fieldType=_meta.BasicType.DATE),
_meta.FieldSchema("datetime_field", fieldType=_meta.BasicType.DATETIME),
]))
return _data.DataMapping.trac_to_arrow_schema(trac_schema)
@staticmethod
def one_field_schema(field_type: _meta.BasicType):
field_name = f"{field_type.name.lower()}_field"
trac_schema = _meta.SchemaDefinition(
_meta.SchemaType.TABLE,
_meta.PartType.PART_ROOT,
_meta.TableSchema(fields=[
_meta.FieldSchema(field_name, fieldType=field_type)]))
return _data.DataMapping.trac_to_arrow_schema(trac_schema)
def test_round_trip_basic(self):
sample_schema = self.sample_schema()
sample_data = self.sample_data()
table = pa.Table.from_pydict(sample_data, sample_schema) # noqa
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, self.sample_schema())
self.assertEqual(sample_schema, rt.schema)
self.assertEqual(table, rt)
def test_round_trip_nulls(self):
sample_schema = self.sample_schema()
sample_data = self.sample_data()
for col, values in sample_data.items():
values[0] = None
table = pa.Table.from_pydict(sample_data, sample_schema) # noqa
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, self.sample_schema())
self.assertEqual(sample_schema, rt.schema)
self.assertEqual(table, rt)
def test_pandas_dtypes(self):
sample_schema = self.sample_schema()
sample_data = self.sample_data()
table = pa.Table.from_pydict(sample_data, sample_schema) # noqa
df = _data.DataMapping.arrow_to_pandas(table)
expect_dtypes = [
pd.BooleanDtype(),
pd.Int64Dtype(),
# Pandas float dtype is only available from Pandas 1.2 onward, fallback is original NumPy float dtype
pd.Float64Dtype() if "Float64Dtype" in pd.__dict__ else pd.api.types.pandas_dtype(float),
# No special Dtype for decimals, these will just show up as objects
pd.api.types.pandas_dtype(object),
# Strings have a dedicated dtype!
pd.StringDtype(),
# Date/time types being converted as NumPy native (datetime64[ns])
pd.to_datetime([dt.date(1970, 1, 1)]).dtype,
pd.to_datetime([dt.datetime(1970, 1, 1)]).dtype]
self.assertListEqual(expect_dtypes, df.dtypes.to_list())
def test_edge_cases_integer(self):
schema = self.one_field_schema(_meta.BasicType.INTEGER)
table = pa.Table.from_pydict({"integer_field": [ # noqa
0,
sys.maxsize,
-sys.maxsize - 1
]}, schema)
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
def test_edge_cases_float(self):
# It may be helpful to check for / prohibit inf and -inf in some places, e.g. model outputs
# But still the data mapping should handle these values correctly if they are present
schema = self.one_field_schema(_meta.BasicType.FLOAT)
table = pa.Table.from_pydict({"float_field": [ # noqa
0.0,
sys.float_info.min,
sys.float_info.max,
sys.float_info.epsilon,
-sys.float_info.epsilon,
math.inf,
-math.inf
]}, schema)
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
def test_edge_cases_float_nan(self):
# For NaN, a special test that checks math.isnan on the round-trip result
# Because math.nan != math.nan
# Also, make sure to keep the distinction between NaN and None
schema = self.one_field_schema(_meta.BasicType.FLOAT)
table = pa.Table.from_pydict({"float_field": [math.nan]}, schema) # noqa
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
nan_value = rt.column(0)[0].as_py()
self.assertIsNotNone(nan_value)
self.assertTrue(math.isnan(nan_value))
def test_edge_cases_decimal(self):
# TRAC basic decimal has precision 38, scale 12
# Should allow for 26 places before the decimal place and 12 after
schema = self.one_field_schema(_meta.BasicType.DECIMAL)
table = pa.Table.from_pydict({"decimal_field": [ # noqa
decimal.Decimal(0.0),
decimal.Decimal(1.0) * decimal.Decimal(1.0).shift(25),
decimal.Decimal(1.0) / decimal.Decimal(1.0).shift(12),
decimal.Decimal(-1.0) * decimal.Decimal(1.0).shift(25),
decimal.Decimal(-1.0) / decimal.Decimal(1.0).shift(12)
]}, schema)
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
def test_edge_cases_string(self):
schema = self.one_field_schema(_meta.BasicType.STRING)
table = pa.Table.from_pydict({"string_field": [ # noqa
"", " ", " ", "\t", "\r\n", " \r\n ",
"a, b\",", "'@@'", "[\"\"%^&", "£££", "#@",
"Olá Mundo", "你好,世界", "Привет, мир", "नमस्ते दुनिया",
"𝜌 = ∑ 𝑃𝜓 | 𝜓 ⟩ ⟨ 𝜓 |"
]}, schema)
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
def test_edge_cases_date(self):
max_pandas_date = dt.date(1970, 1, 1) + dt.timedelta(microseconds=(1 << 63) / 1000)
min_pandas_date = dt.date(1970, 1, 1) - dt.timedelta(microseconds=(1 << 63) / 1000)
schema = self.one_field_schema(_meta.BasicType.DATE)
table = pa.Table.from_pydict({"date_field": [ # noqa
dt.date(1970, 1, 1),
dt.date(2000, 1, 1),
dt.date(2038, 1, 20),
max_pandas_date,
min_pandas_date
]}, schema)
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
def test_edge_cases_datetime(self):
# The extra second is subtracted to prevent sub-second part overflowing the max value
max_pandas_datetime = dt.datetime(1970, 1, 1) + dt.timedelta(microseconds=(1 << 63) / 1000 - 1000000)
min_pandas_datetime = dt.datetime(1970, 1, 1) - dt.timedelta(microseconds=(1 << 63) / 1000 - 1000000)
schema = self.one_field_schema(_meta.BasicType.DATETIME)
table = pa.Table.from_pydict({"datetime_field": [ # noqa
dt.datetime(1970, 1, 1, 0, 0, 0),
dt.datetime(2000, 1, 1, 0, 0, 0),
dt.datetime(2038, 1, 19, 3, 14, 8),
# Fractional seconds before and after the epoch
# Test fractions for both positive and negative encoded values
dt.datetime(1972, 1, 1, 0, 0, 0, 500000),
dt.datetime(1968, 1, 1, 23, 59, 59, 500000),
max_pandas_datetime,
min_pandas_datetime
]}, schema)
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
def test_pandas_date(self):
df = pd.DataFrame({"date_field": [pd.Timestamp(1970, 1, 1)]})
schema = self.one_field_schema(_meta.BasicType.DATE)
table = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, table.schema)
def test_pandas_datetime(self):
df = pd.DataFrame({"datetime_field": [pd.Timestamp(1970, 1, 1, 12, 30, 0)]})
schema = self.one_field_schema(_meta.BasicType.DATETIME)
table = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, table.schema)
def test_python_date(self):
df = pd.DataFrame({"date_field": [dt.date(1970, 1, 1)]})
schema = self.one_field_schema(_meta.BasicType.DATE)
table = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, table.schema)
def test_python_datetime(self):
df = pd.DataFrame({"datetime_field": [dt.datetime(1970, 1, 1, 12, 30, 0)]})
schema = self.one_field_schema(_meta.BasicType.DATETIME)
table = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, table.schema)
def test_time_zone_mapping(self):
epoch = dt.datetime(1970, 1, 1)
epoch_utc = dt.datetime(1970, 1, 1, tzinfo=dt.timezone.utc)
epoch_europe_london = dt.datetime(1970, 1, 1, tzinfo=dt.timezone(dt.timedelta(hours=+1), name="Europe/London"))
ts_no_zone = ("f", pa.timestamp("s"))
ts_utc = ("f", pa.timestamp("s", tz="UTC"))
ts_europe_london = ("f", pa.timestamp("s", tz="Europe/London"))
ts_ns_europe_london = ("f", pa.timestamp("ns", tz="Europe/London"))
def do_test(sample_val, sample_type):
schema = pa.schema([sample_type])
table = pa.Table.from_pydict({"f": [sample_val]}, schema) # noqa
df = _data.DataMapping.arrow_to_pandas(table)
rt = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, rt.schema)
self.assertEqual(table, rt)
# Also check TZ shows up correctly in the Pandas dtype
if sample_type[1].tz:
expected_dtype = pd.DatetimeTZDtype(tz=sample_type[1].tz)
else:
expected_dtype = pd.to_datetime([dt.datetime(1970, 1, 1)]).dtype
self.assertListEqual([expected_dtype], df.dtypes.to_list())
# Fail all time zone conversions for now
# I.e., insist time zones match exactly and any conversion is defined in model code
# It is easier to add auto-conversion later than remove it
# (Or maybe just never add it, there is great potential for ambiguity)!
do_test(epoch, ts_no_zone)
do_test(epoch_utc, ts_utc)
do_test(epoch_europe_london, ts_europe_london)
do_test(epoch_europe_london, ts_ns_europe_london)
class DataConformanceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
_util.configure_logging()
def test_fields_exact_match(self):
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict(DataMappingTest.sample_data(), DataMappingTest.sample_schema()) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, table.schema)
self.assertEqual(schema, conformed.schema)
def test_fields_missing(self):
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict( # noqa
{"boolean_field": [True, False, True, False]}, # noqa
pa.schema([("boolean_field", pa.bool_())]))
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_fields_missing_near_match(self):
schema = DataMappingTest.one_field_schema(_meta.BasicType.BOOLEAN)
table = pa.Table.from_pydict( # noqa
{"boolean_field_": [True, False, True, False]}, # noqa
pa.schema([("boolean_field_", pa.bool_())]))
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_fields_extra_dropped(self):
sample_schema = DataMappingTest.sample_schema()
sample_schema = pa.schema([
("integer_field_2", pa.int64()),
*zip(sample_schema.names, sample_schema.types)])
sample_data = DataMappingTest.sample_data()
sample_data["integer_field_2"] = sample_data["integer_field"]
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict(sample_data, sample_schema) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertNotEqual(schema, table.schema)
self.assertEqual(schema, conformed.schema)
def test_fields_case_insensitive_match(self):
schema = DataMappingTest.one_field_schema(_meta.BasicType.BOOLEAN)
table = pa.Table.from_pydict( # noqa
{"BOOLeaN_fIEld": [True, False, True, False]}, # noqa
pa.schema([("BOOLeaN_fIEld", pa.bool_())]))
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertNotEqual(schema, table.schema)
self.assertEqual(schema, conformed.schema)
def test_fields_ordering(self):
sample_schema = DataMappingTest.sample_schema()
sample_schema = pa.schema(reversed(list(zip(sample_schema.names, sample_schema.types))))
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict(DataMappingTest.sample_data(), sample_schema) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertNotEqual(schema, table.schema)
self.assertEqual(schema, conformed.schema)
def test_fields_duplicate_in_schema(self):
schema = DataMappingTest.sample_schema()
schema = pa.schema([
*zip(schema.names, schema.types),
("integer_field", pa.int64())])
table = pa.Table.from_pydict(DataMappingTest.sample_data(), DataMappingTest.sample_schema()) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_fields_duplicate_in_schema_case_insensitive(self):
schema = DataMappingTest.sample_schema()
schema = pa.schema([
*zip(schema.names, schema.types),
("inTEGer_fiEld", pa.int64())])
table = pa.Table.from_pydict(DataMappingTest.sample_data(), DataMappingTest.sample_schema()) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_fields_duplicate_in_data(self):
sample_schema = DataMappingTest.sample_schema()
sample_schema = pa.schema([
*zip(sample_schema.names, sample_schema.types),
("integer_field", pa.int64())])
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict(DataMappingTest.sample_data(), sample_schema) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_fields_duplicate_in_data_case_insensitive(self):
sample_schema = DataMappingTest.sample_schema()
sample_schema = pa.schema([
*zip(sample_schema.names, sample_schema.types),
("iNTEgeR_FieLd", pa.int64())])
sample_data = DataMappingTest.sample_data()
sample_data["iNTEgeR_FieLd"] = sample_data["integer_field"]
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict(sample_data, sample_schema) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_fields_combined_logic(self):
sample_schema = pa.schema([
("decimal_field", _data.DataMapping.trac_arrow_decimal_type()),
("string_field", pa.utf8()),
("FLOAT_FIELD", pa.float64()),
("FLOAT_FIELD_2", pa.float64()),
("FLOAT_FIELD_3", pa.float64()),
("booLEAn_field", pa.bool_()),
("integer_field", pa.uint32()),
("DATE_FIELD", pa.date32()),
("datetime_field", pa.timestamp("ms", tz=None)),
])
sample_data = {
"booLEAn_field": [True, False, True, False],
"integer_field": [1, 2, 3, 4],
"FLOAT_FIELD": [1.0, 2.0, 3.0, 4.0],
"FLOAT_FIELD_2": [1.0, 2.0, 3.0, 4.0],
"FLOAT_FIELD_3": [1.0, 2.0, 3.0, 4.0],
"decimal_field": [decimal.Decimal(1.0), decimal.Decimal(2.0), decimal.Decimal(3.0), decimal.Decimal(4.0)],
"string_field": ["hello", "world", "what's", "up"],
"DATE_FIELD": [dt.date(2000, 1, 1), dt.date(2000, 1, 2), dt.date(2000, 1, 3), dt.date(2000, 1, 4)],
"datetime_field": [
dt.datetime(2000, 1, 1, 0, 0, 0), dt.datetime(2000, 1, 2, 1, 1, 1),
dt.datetime(2000, 1, 3, 2, 2, 2), dt.datetime(2000, 1, 4, 3, 3, 3)]
}
schema = DataMappingTest.sample_schema()
table = pa.Table.from_pydict(sample_data, sample_schema) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertNotEqual(schema, table.schema)
self.assertEqual(schema, conformed.schema)
def test_boolean_same_type(self):
schema = DataMappingTest.one_field_schema(_meta.BasicType.BOOLEAN)
table = pa.Table.from_pydict({"boolean_field": [True, False, True, False]}, pa.schema([("boolean_field", pa.bool_())])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
def test_boolean_wrong_type(self):
schema = DataMappingTest.one_field_schema(_meta.BasicType.BOOLEAN)
table = pa.Table.from_pydict({"boolean_field": [1.0, 2.0, 3.0, 4.0]}) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
# Coercion does not include parsing string values
table = pa.Table.from_pydict({"boolean_field": ["True", "False", "True", "False"]}) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_integer_same_type(self):
schema = DataMappingTest.one_field_schema(_meta.BasicType.INTEGER)
table = pa.Table.from_pydict({"integer_field": [1, 2, 3, 4]}) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
def test_integer_width(self):
s16 = ("f", pa.int16())
s32 = ("f", pa.int32())
s64 = ("f", pa.int64())
u16 = ("f", pa.uint16())
u32 = ("f", pa.uint32())
u64 = ("f", pa.uint64())
schema = pa.schema([s64])
table = pa.Table.from_pydict({"f": [2 ** 31 - 1]}, pa.schema([s32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 15 - 1]}, pa.schema([s16])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([s32])
table = pa.Table.from_pydict({"f": [2 ** 31]}, pa.schema([s64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [2 ** 31 - 1]}, pa.schema([s64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 15 - 1]}, pa.schema([s16])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([s16])
table = pa.Table.from_pydict({"f": [2 ** 15]}, pa.schema([s64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [2 ** 31 - 1]}, pa.schema([s32])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [2 ** 15 - 1]}, pa.schema([s64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 15 - 1]}, pa.schema([s32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([u32])
table = pa.Table.from_pydict({"f": [2 ** 32]}, pa.schema([u64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [2 ** 32 - 1]}, pa.schema([u64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 16 - 1]}, pa.schema([u16])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
def test_integer_signedness(self):
s32 = ("f", pa.int32())
s64 = ("f", pa.int64())
u32 = ("f", pa.uint32())
u64 = ("f", pa.uint64())
schema = pa.schema([s64])
table = pa.Table.from_pydict({"f": [2 ** 32 - 1]}, pa.schema([u32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 63 - 1]}, pa.schema([u64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 63 + 1]}, pa.schema([u64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([u64])
table = pa.Table.from_pydict({"f": [-1]}, pa.schema([s32])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [-1]}, pa.schema([s64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_integer_wrong_type(self):
s64 = ("f", pa.int64())
u32 = ("f", pa.uint32())
f64 = ("f", pa.float64())
utf8 = ("f", pa.utf8())
schema = pa.schema([s64])
table = pa.Table.from_pydict({"f": [1.0, 2.0, 3.0, 4.9]}, pa.schema([f64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": ["1", "2", "3", "4"]}, pa.schema([utf8])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([u32])
table = pa.Table.from_pydict({"f": [1.0, 2.0, 3.0, 4.9]}, pa.schema([f64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": ["1", "2", "3", "4"]}, pa.schema([utf8])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_float_same_type(self):
f32 = ("f", pa.float32())
f64 = ("f", pa.float64())
schema = pa.schema([f32])
table = pa.Table.from_pydict({"f": [1.0, 2.0, 3.0, 4.0]}, pa.schema([f32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
schema = pa.schema([f64])
table = pa.Table.from_pydict({"f": [1.0, 2.0, 3.0, 4.0]}, pa.schema([f64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
def test_float_width(self):
f32 = ("f", pa.float32())
f64 = ("f", pa.float64())
schema = pa.schema([f64])
table = pa.Table.from_pydict({"f": [1.0, 2.0, 3.0, 4.0]}, pa.schema([f32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([f32])
table = pa.Table.from_pydict({"f": [2.0 ** 100]}, pa.schema([f64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_float_from_integer_types(self):
f32 = ("f", pa.float32())
f64 = ("f", pa.float64())
s64 = ("f", pa.int64())
u64 = ("f", pa.uint64())
s8 = ("f", pa.int8())
u8 = ("f", pa.uint8())
schema = pa.schema([f64])
table = pa.Table.from_pydict({"f": [2 ** 53]}, pa.schema([s64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [255]}, pa.schema([u8])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([f32])
table = pa.Table.from_pydict({"f": [2 ** 24]}, pa.schema([u64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [-128]}, pa.schema([s8])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
def test_float_wrong_type(self):
f32 = ("f", pa.float32())
f64 = ("f", pa.float64())
dec = ("f", pa.decimal128(6, 3))
utf8 = ("f", pa.utf8())
schema = pa.schema([f32])
table = pa.Table.from_pydict({"f": [decimal.Decimal(1.0)]}, pa.schema([dec])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": ["1.0"]}, pa.schema([utf8])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([f64])
table = pa.Table.from_pydict({"f": [decimal.Decimal(1.0)]}, pa.schema([dec])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": ["1.0"]}, pa.schema([utf8])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_decimal_same_type(self):
dec1 = ("f", pa.decimal128(6, 3))
dec2 = ("f", pa.decimal128(38, 12))
dec3 = ("f", pa.decimal256(50, 15))
schema = pa.schema([dec1])
table = pa.Table.from_pydict({"f": [decimal.Decimal(1.0)]}, pa.schema([dec1])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
schema = pa.schema([dec2])
table = pa.Table.from_pydict({"f": [decimal.Decimal(1.0)]}, pa.schema([dec2])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
schema = pa.schema([dec3])
table = pa.Table.from_pydict({"f": [decimal.Decimal(1.0)]}, pa.schema([dec3])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
def test_decimal_precision_and_scale_128(self):
dec1 = ("f", pa.decimal128(8, 2))
dec2 = ("f", pa.decimal128(10, 4))
dec3 = ("f", pa.decimal128(8, 4))
self._test_decimal_precision_and_scale(dec1, dec2, dec3)
def test_decimal_precision_and_scale_256(self):
dec1 = ("f", pa.decimal256(8, 2))
dec2 = ("f", pa.decimal256(10, 4))
dec3 = ("f", pa.decimal256(8, 4))
self._test_decimal_precision_and_scale(dec1, dec2, dec3)
def _test_decimal_precision_and_scale(self, dec1, dec2, dec3):
schema = pa.schema([dec1])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.0001")]}, pa.schema([dec2])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [decimal.Decimal("1000.0001")]}, pa.schema([dec3])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([dec2])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.01")]}, pa.schema([dec1])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [decimal.Decimal("1000.0001")]}, pa.schema([dec3])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([dec3])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.01")]}, pa.schema([dec1])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.0001")]}, pa.schema([dec2])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_decimal_128_256(self):
d128_1 = ("f", pa.decimal128(8, 2))
d128_2 = ("f", pa.decimal128(8, 4))
d256_1 = ("f", pa.decimal256(8, 2))
d256_2 = ("f", pa.decimal256(8, 4))
# Allow conversion 128 <-> 256 if precision and scale are the same
schema = pa.schema([d128_1])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.01")]}, pa.schema([d256_1])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([d256_1])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.01")]}, pa.schema([d128_1])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
# Allow conversion 128 <-> 256 if the target can hold larger numbers (max_exp = precision - scale - 1)
schema = pa.schema([d128_1])
table = pa.Table.from_pydict({"f": [decimal.Decimal("1000.01")]}, pa.schema([d256_2])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([d256_1])
table = pa.Table.from_pydict({"f": [decimal.Decimal("1000.01")]}, pa.schema([d128_2])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
# Do not allow conversion in either direction if the source can hold larger numbers
schema = pa.schema([d128_2])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.01")]}, pa.schema([d256_1])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([d256_2])
table = pa.Table.from_pydict({"f": [decimal.Decimal("100000.01")]}, pa.schema([d128_1])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_decimal_from_numeric_types(self):
dec1 = ("f", pa.decimal128(38, 12))
dec2 = ("f", pa.decimal256(50, 15))
dec3 = ("f", pa.decimal128(6, 2))
dec4 = ("f", pa.decimal256(6, 2))
schema = pa.schema([dec1])
table = pa.Table.from_pydict({"f": [2.0 ** 32, 1.0 + 1 * 2.0 ** -15]}, pa.schema([("f", pa.float64())])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 32, -1 * 2 ** 32]}, pa.schema([("f", pa.int64())])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([dec2])
table = pa.Table.from_pydict({"f": [2.0 ** 32, 1.0 + 1 * 2.0 ** -15]}, pa.schema([("f", pa.float64())])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
table = pa.Table.from_pydict({"f": [2 ** 32, -1 * 2 ** 32]}, pa.schema([("f", pa.int64())])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([dec3])
table = pa.Table.from_pydict({"f": [2.0 ** 32, -1 * 2.0 ** 32]}, pa.schema([("f", pa.float64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [2 ** 32, -1 * 2 ** 32]}, pa.schema([("f", pa.int64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([dec4])
table = pa.Table.from_pydict({"f": [2.0 ** 32, -1 * 2.0 ** 32]}, pa.schema([("f", pa.float64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [2 ** 32, -1 * 2 ** 32]}, pa.schema([("f", pa.int64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_decimal_wrong_type(self):
dec1 = ("f", pa.decimal128(38, 12))
dec2 = ("f", pa.decimal256(50, 15))
schema = pa.schema([dec1])
table = pa.Table.from_pydict({"f": ["1.0", "2.0"]}, pa.schema([("f", pa.utf8())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([("f", pa.date32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([dec2])
table = pa.Table.from_pydict({"f": ["1.0", "2.0"]}, pa.schema([("f", pa.utf8())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([("f", pa.date32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_string_same_type(self):
str1 = ("f", pa.utf8())
str2 = ("f", pa.large_utf8())
schema = pa.schema([str1])
table = pa.Table.from_pydict({"f": ["hello", "world"]}, pa.schema([str1])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
schema = pa.schema([str2])
table = pa.Table.from_pydict({"f": ["hello", "world"]}, pa.schema([str2])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
def test_string_small_to_large(self):
str1 = ("f", pa.utf8())
str2 = ("f", pa.large_utf8())
# utf8 -> large utf8 is allowed
schema = pa.schema([str2])
table = pa.Table.from_pydict({"f": ["hello", "world"]}, pa.schema([str1])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
# large utf8 -> utf8 is not allowed
schema = pa.schema([str1])
table = pa.Table.from_pydict({"f": ["hello", "world"]}, pa.schema([str2])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_string_wrong_type(self):
str1 = ("f", pa.utf8())
str2 = ("f", pa.large_utf8())
schema = pa.schema([str1])
table = pa.Table.from_pydict({"f": [1, 2, 3, 4]}, pa.schema([("f", pa.int64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([("f", pa.date32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([str2])
table = pa.Table.from_pydict({"f": [1, 2, 3, 4]}, pa.schema([("f", pa.int64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([("f", pa.date32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_date_same_type(self):
d32 = ("f", pa.date32())
d64 = ("f", pa.date64())
schema = pa.schema([d32])
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([d32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
schema = pa.schema([d64])
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([d64])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
def test_date_32_64(self):
d32 = ("f", pa.date32())
d64 = ("f", pa.date64())
# Up-cast 32-bit day to 64-bit date (millisecond) is allowed, no loss of data or precision
schema = pa.schema([d64])
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([d32])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
# Down-cast is not allowed, could lose data and/or precision
schema = pa.schema([d32])
table = pa.Table.from_pydict({"f": [dt.date(2000, 1, 1)]}, pa.schema([d64])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_date_from_pandas(self):
d32 = ("f", pa.date32())
d64 = ("f", pa.date64())
df = pd.DataFrame({"f": pd.to_datetime([dt.date(2001, 1, 1)])})
# Conform should be applied inside DataMapping, allowing for conversion of NumPy native dates datetime64[ns]
schema = pa.schema([d32])
conformed = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, conformed.schema)
schema = pa.schema([d64])
conformed = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, conformed.schema)
def test_date_wrong_type(self):
d32 = ("f", pa.date32())
d64 = ("f", pa.date64())
schema = pa.schema([d32])
table = pa.Table.from_pydict({"f": ["2000-01-01"]}, pa.schema([("f", pa.utf8())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [1, 2, 3, 4]}, pa.schema([("f", pa.int32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
# By default, do not down-cast timestamps as dates (only allowed as a special case for Pandas data)
table = pa.Table.from_pydict({"f": [dt.datetime(2001, 1, 1)]}, pa.schema([("f", pa.timestamp("ns"))])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
schema = pa.schema([d64])
table = pa.Table.from_pydict({"f": ["2000-01-01"]}, pa.schema([("f", pa.utf8())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [1, 2, 3, 4]}, pa.schema([("f", pa.int64())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
# By default, do not down-cast timestamps as dates (only allowed as a special case for Pandas data)
table = pa.Table.from_pydict({"f": [dt.datetime(2001, 1, 1)]}, pa.schema([("f", pa.timestamp("ns"))])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
def test_timestamp_same_type(self):
def do_test(target_type):
schema = pa.schema([target_type])
table = pa.Table.from_pydict({"f": [dt.datetime(2000, 1, 1, 0, 0, 0)]}, pa.schema([target_type])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
self.assertEqual(table.column(0), conformed.column(0))
ts1 = ("f", pa.timestamp("s"))
ts2 = ("f", pa.timestamp("ms"))
ts3 = ("f", pa.timestamp("us"))
ts4 = ("f", pa.timestamp("ns"))
do_test(ts1)
do_test(ts2)
do_test(ts3)
do_test(ts4)
def test_timestamp_units(self):
int64_max = (1 << 63) - 1
epoch = dt.datetime(1970, 1, 1)
s_max = dt.datetime.max - dt.timedelta(seconds=1)
s_min = dt.datetime.min + dt.timedelta(seconds=1)
ms_max = dt.datetime.max - dt.timedelta(milliseconds=1)
ms_min = dt.datetime.min + dt.timedelta(milliseconds=1)
us_max = dt.datetime.max - dt.timedelta(microseconds=1)
us_min = dt.datetime.min + dt.timedelta(microseconds=1)
ns_max = epoch + dt.timedelta(microseconds=(int64_max / 1000) / 2)
ns_min = epoch - dt.timedelta(microseconds=(int64_max / 1000) / 2)
ts_s = ("f", pa.timestamp("s"))
ts_ms = ("f", pa.timestamp("ms"))
ts_us = ("f", pa.timestamp("us"))
ts_ns = ("f", pa.timestamp("ns"))
def convert_ok(min_val, max_val, src_type, tgt_type):
schema = pa.schema([tgt_type])
table = pa.Table.from_pydict({"f": [min_val, max_val]}, pa.schema([src_type])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
# Quick sanity check to make sure values are not being corrupted
# Exact match will not happen due to rounding
# But this will guard against wrap-around, zero outputs and other severe failures
self.assertEqual(min_val.year, conformed.column(0)[0].as_py().year)
self.assertEqual(min_val.month, conformed.column(0)[0].as_py().month)
self.assertEqual(min_val.day, conformed.column(0)[0].as_py().day)
def convert_fail(min_val, max_val, src_type, tgt_type):
schema = pa.schema([tgt_type])
table = pa.Table.from_pydict({"f": [min_val, max_val]}, pa.schema([src_type])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
# Casting to a less precise timestamp type should always be ok
# Less precision -> more range
convert_ok(ns_min, ns_max, ts_ns, ts_us)
convert_ok(ns_min, ns_max, ts_ns, ts_ms)
convert_ok(ns_min, ns_max, ts_ns, ts_s)
convert_ok(us_min, us_max, ts_us, ts_ms)
convert_ok(us_min, us_max, ts_us, ts_s)
convert_ok(ms_min, ms_max, ts_ms, ts_s)
# Casting to a more precise type will fail if the value is out of range for the target type
# More precision -> less range
# However, since Python date-time type is limited to the years 1 - 9999,
# It is only possible to create out-of-range test values for nanosecond target types
convert_fail(us_min, us_max, ts_us, ts_ns)
convert_fail(ms_min, ms_max, ts_ms, ts_ns)
convert_fail(s_min, s_max, ts_s, ts_ns)
# However, coercion should succeed if all values are inside the range of the target type
convert_ok(epoch, epoch, ts_us, ts_ns)
convert_ok(epoch, epoch, ts_ms, ts_ns)
convert_ok(epoch, epoch, ts_s, ts_ns)
# To test these out-of-range conversions
# would need Arrow source vectors with timestamps outside the range of Python the datetime object
# convert_fail(ms_min, ms_max, ts_ms, ts_us)
# convert_fail(s_min, s_max, ts_s, ts_us)
# convert_fail(s_min, s_max, ts_s, ts_ms)
def test_timestamp_zones(self):
epoch = dt.datetime(1970, 1, 1)
epoch_utc = dt.datetime(1970, 1, 1, tzinfo=dt.timezone.utc)
epoch_plus_zero = dt.datetime(1970, 1, 1, tzinfo=dt.timezone(dt.timedelta(hours=0)))
epoch_plus_one = dt.datetime(1970, 1, 1, tzinfo=dt.timezone(dt.timedelta(hours=+1)))
ts_no_zone = ("f", pa.timestamp("s"))
ts_utc = ("f", pa.timestamp("s", tz="UTC"))
ts_plus_one = ("f", pa.timestamp("s", tz="+01:00"))
ts_europe_london = ("f", pa.timestamp("s", tz="Europe/London"))
ts_ns_europe_london = ("f", pa.timestamp("ns", tz="Europe/London"))
def convert_ok(src_val, src_type, tgt_type):
schema = pa.schema([tgt_type])
table = pa.Table.from_pydict({"f": [src_val]}, pa.schema([src_type])) # noqa
conformed = _data.DataConformance.conform_to_schema(table, schema)
self.assertEqual(schema, conformed.schema)
def convert_fail(src_val, src_type, tgt_type):
schema = pa.schema([tgt_type])
table = pa.Table.from_pydict({"f": [src_val]}, pa.schema([src_type])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
# Fail all time zone conversions for now
# I.e., insist time zones match exactly and any conversion is defined in model code
# It is easier to add auto-conversion later than remove it
# (Or maybe just never add it, there is great potential for ambiguity)!
convert_fail(epoch, ts_no_zone, ts_utc)
convert_fail(epoch, ts_no_zone, ts_plus_one)
convert_fail(epoch, ts_no_zone, ts_europe_london)
convert_fail(epoch_utc, ts_utc, ts_no_zone)
convert_fail(epoch_plus_one, ts_plus_one, ts_no_zone)
convert_fail(epoch_plus_zero, ts_europe_london, ts_no_zone)
convert_fail(epoch_utc, ts_utc, ts_plus_one)
convert_fail(epoch_utc, ts_utc, ts_europe_london)
convert_fail(epoch_plus_one, ts_plus_one, ts_utc)
convert_fail(epoch_plus_one, ts_plus_one, ts_europe_london)
convert_fail(epoch_plus_zero, ts_europe_london, ts_utc)
convert_fail(epoch_plus_zero, ts_europe_london, ts_plus_one)
# Make sure that precision conversion still works when a zone is set
convert_ok(epoch, ts_ns_europe_london, ts_europe_london)
def test_timestamp_from_pandas(self):
df = pd.DataFrame({"f": pd.to_datetime([dt.datetime(2001, 1, 1, 3, 15, 23, 500000)])})
def do_test(target_type):
# Conform should be applied inside DataMapping, allowing for conversion of NumPy native dates datetime64[ns]
schema = pa.schema([target_type])
conformed = _data.DataMapping.pandas_to_arrow(df, schema)
self.assertEqual(schema, conformed.schema)
ts1 = ("f", pa.timestamp("s"))
ts2 = ("f", pa.timestamp("ms"))
ts3 = ("f", pa.timestamp("us"))
ts4 = ("f", pa.timestamp("ns"))
do_test(ts1)
do_test(ts2)
do_test(ts3)
do_test(ts4)
def test_timestamp_wrong_type(self):
def do_test(target_type):
schema = pa.schema([target_type])
table = pa.Table.from_pydict({"f": ["2000-01-01T00:00:00"]}, pa.schema([("f", pa.utf8())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [1, 2, 3, 4]}, pa.schema([("f", pa.int32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
table = pa.Table.from_pydict({"f": [dt.date(2001, 1, 1)]}, pa.schema([("f", pa.date32())])) # noqa
self.assertRaises(_ex.EDataConformance, lambda: _data.DataConformance.conform_to_schema(table, schema))
ts1 = ("f", pa.timestamp("s"))
ts2 = ("f", pa.timestamp("ms"))
ts3 = ("f", pa.timestamp("us"))
ts4 = ("f", pa.timestamp("ns"))
do_test(ts1)
do_test(ts2)
do_test(ts3)
do_test(ts4)
if __name__ == "__main__":
unittest.main()
| 42.775974 | 135 | 0.634611 |
866b4a097697d68fc98f3175fb0e72c801ba6e29 | 7,058 | py | Python | vision/ssd/ssd.py | SoonminHwang/pytorch-ssd | 1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8 | [
"MIT"
] | null | null | null | vision/ssd/ssd.py | SoonminHwang/pytorch-ssd | 1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8 | [
"MIT"
] | null | null | null | vision/ssd/ssd.py | SoonminHwang/pytorch-ssd | 1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
import numpy as np
from typing import List, Tuple
import torch.nn.functional as F
from ..utils import box_utils
# from ..utils import box_utils_numpy as box_utils
from collections import namedtuple
GraphPath = namedtuple("GraphPath", ['s0', 'name', 's1']) #
class SSD(nn.Module):
def __init__(self, num_classes: int, base_net: nn.ModuleList, source_layer_indexes: List[int],
extras: nn.ModuleList, classification_headers: nn.ModuleList,
regression_headers: nn.ModuleList, is_test=False, config=None, device=None):
"""Compose a SSD model using the given components.
"""
super(SSD, self).__init__()
self.num_classes = num_classes
self.base_net = base_net
self.source_layer_indexes = source_layer_indexes
self.extras = extras
self.classification_headers = classification_headers
self.regression_headers = regression_headers
self.is_test = is_test
self.config = config
# register layers in source_layer_indexes by adding them to a module list
self.source_layer_add_ons = nn.ModuleList([t[1] for t in source_layer_indexes
if isinstance(t, tuple) and not isinstance(t, GraphPath)])
# self.source_layer_add_ons = source_layer_indexes[0][1]
if device:
self.device = device
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# if is_test:
self.priors = config.priors.to(self.device)
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
confidences = []
locations = []
start_layer_index = 0
header_index = 0
for ii, end_layer_index in enumerate(self.source_layer_indexes):
# check & prepare path
if isinstance(end_layer_index, GraphPath):
path = end_layer_index
end_layer_index = end_layer_index.s0
added_layer = None
elif isinstance(end_layer_index, tuple):
# added_layer = end_layer_index[1]
added_layer = self.source_layer_add_ons[ii]
end_layer_index = end_layer_index[0]
path = None
else:
added_layer = None
path = None
# forward until source_layer_add_on
for layer in self.base_net[start_layer_index: end_layer_index]:
x = layer(x)
# forward added_layer, if exists.
if added_layer:
y = added_layer(x)
else:
y = x
if path:
sub = getattr(self.base_net[end_layer_index], path.name)
for layer in sub[:path.s1]:
x = layer(x)
y = x
for layer in sub[path.s1:]:
x = layer(x)
end_layer_index += 1
start_layer_index = end_layer_index
confidence, location = self.compute_header(header_index, y)
header_index += 1
confidences.append(confidence)
locations.append(location)
for layer in self.base_net[end_layer_index:]:
x = layer(x)
for layer in self.extras:
x = layer(x)
confidence, location = self.compute_header(header_index, x)
header_index += 1
confidences.append(confidence)
locations.append(location)
confidences = torch.cat(confidences, 1)
locations = torch.cat(locations, 1)
# if self.is_test:
if not self.training:
confidences = F.softmax(confidences, dim=2)
boxes = box_utils.convert_locations_to_boxes(
locations, self.priors, self.config.center_variance, self.config.size_variance
)
boxes = box_utils.center_form_to_corner_form(boxes)
return confidences, locations, boxes
else:
return confidences, locations
def compute_header(self, i, x):
confidence = self.classification_headers[i](x)
confidence = confidence.permute(0, 2, 3, 1).contiguous()
confidence = confidence.view(confidence.size(0), -1, self.num_classes)
location = self.regression_headers[i](x)
location = location.permute(0, 2, 3, 1).contiguous()
location = location.view(location.size(0), -1, 4)
return confidence, location
def init_from_base_net(self, model):
self.base_net.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage), strict=True)
# self.source_layer_add_ons.apply(_xavier_init_)
self.extras.apply(_xavier_init_)
self.classification_headers.apply(_xavier_init_)
self.regression_headers.apply(_xavier_init_)
def init_from_pretrained_ssd(self, model):
state_dict = torch.load(model, map_location=lambda storage, loc: storage)
state_dict = {k: v for k, v in state_dict.items() if not (k.startswith("classification_headers") or k.startswith("regression_headers"))}
model_dict = self.state_dict()
model_dict.update(state_dict)
self.load_state_dict(model_dict)
self.classification_headers.apply(_xavier_init_)
self.regression_headers.apply(_xavier_init_)
def init(self):
self.base_net.apply(_xavier_init_)
# self.source_layer_add_ons.apply(_xavier_init_)
self.extras.apply(_xavier_init_)
self.classification_headers.apply(_xavier_init_)
self.regression_headers.apply(_xavier_init_)
def load(self, model):
self.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage))
def save(self, model_path):
torch.save(self.state_dict(), model_path)
class MatchPrior(object):
def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold):
self.center_form_priors = center_form_priors
self.corner_form_priors = box_utils.center_form_to_corner_form(center_form_priors)
self.center_variance = center_variance
self.size_variance = size_variance
self.iou_threshold = iou_threshold
def __call__(self, gt_boxes, gt_labels):
if type(gt_boxes) is np.ndarray:
gt_boxes = torch.from_numpy(gt_boxes).float()
if type(gt_labels) is np.ndarray:
gt_labels = torch.from_numpy(gt_labels)
boxes, labels = box_utils.assign_priors(gt_boxes, gt_labels,
self.corner_form_priors, self.iou_threshold)
boxes = box_utils.corner_form_to_center_form(boxes)
locations = box_utils.convert_boxes_to_locations(boxes, self.center_form_priors, self.center_variance, self.size_variance)
return locations, labels
def _xavier_init_(m: nn.Module):
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
| 39.651685 | 144 | 0.634174 |
1a83fe649ba4b6f87d554e5b4e5202d6501cb612 | 1,000 | py | Python | dashboard/models/UserProfiles.py | PyFlux/PyFlux | 8abae10261e276bf4942aed8d54ef3b5498754ca | [
"Apache-2.0"
] | null | null | null | dashboard/models/UserProfiles.py | PyFlux/PyFlux | 8abae10261e276bf4942aed8d54ef3b5498754ca | [
"Apache-2.0"
] | 10 | 2020-03-24T17:09:56.000Z | 2021-12-13T20:00:15.000Z | dashboard/models/UserProfiles.py | PyFlux/PyFlux-Django-Html | 8abae10261e276bf4942aed8d54ef3b5498754ca | [
"Apache-2.0"
] | null | null | null | from django.db import models
class UserProfiles(models.Model):
class Meta:
db_table = '"dashboard_user_profiles"'
user_id = models.BigIntegerField(null=True)
first_name = models.CharField(max_length=128)
middle_name = models.CharField(max_length=128, null=True)
last_name = models.CharField(max_length=128, null=True)
address = models.CharField(max_length=255, null=True)
street = models.CharField(max_length=128, null=True)
city = models.CharField(max_length=128, null=True)
state = models.CharField(max_length=128, null=True)
zip = models.CharField(max_length=128, null=True)
media = models.CharField(max_length=255, null=True, default='test')
status = models.IntegerField(null=True, default=0)
created_by = models.IntegerField(null=True)
updated_by = models.IntegerField(null=True)
created_at = models.DateTimeField(null=True)
updated_at = models.DateTimeField(null=True)
deleted_at = models.DateTimeField(null=True)
| 40 | 71 | 0.738 |
6065bbdbb7a4d2ee1f5c5c3511265eb5241197e8 | 14,247 | py | Python | include/scons/src/engine/SCons/Scanner/Fortran.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | null | null | null | include/scons/src/engine/SCons/Scanner/Fortran.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | 4 | 2019-04-11T16:27:45.000Z | 2019-04-11T23:56:30.000Z | include/scons/src/engine/SCons/Scanner/Fortran.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | 1 | 2019-01-17T13:47:56.000Z | 2019-01-17T13:47:56.000Z | """SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 44.943218 | 125 | 0.605952 |
2024e69f4f701f600f8b99faec207c321c382b30 | 524 | py | Python | code/test/hash/old/genkey.py | psorus/git-pay | 4bd22125339991434ebea207d9e19d63ed5bafdb | [
"MIT"
] | null | null | null | code/test/hash/old/genkey.py | psorus/git-pay | 4bd22125339991434ebea207d9e19d63ed5bafdb | [
"MIT"
] | null | null | null | code/test/hash/old/genkey.py | psorus/git-pay | 4bd22125339991434ebea207d9e19d63ed5bafdb | [
"MIT"
] | 1 | 2021-03-10T20:25:44.000Z | 2021-03-10T20:25:44.000Z | from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from time import time
import random
random.seed(12)
t0=time()
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
pubyte=public_key.public_numbers()
print(pubyte)
exit()
t1=time()
print(private_key)
print(public_key)
print(t1-t0)
| 13.435897 | 57 | 0.784351 |
42e316be0277230303c1320e30656f489f431ac2 | 286 | py | Python | users/models.py | jayesh-prajapati/django_react | dcef550ca2b45921f1874fa713f706d8010686de | [
"MIT"
] | 1 | 2019-12-17T15:02:36.000Z | 2019-12-17T15:02:36.000Z | users/models.py | jayesh-prajapati/django_react | dcef550ca2b45921f1874fa713f706d8010686de | [
"MIT"
] | null | null | null | users/models.py | jayesh-prajapati/django_react | dcef550ca2b45921f1874fa713f706d8010686de | [
"MIT"
] | 1 | 2020-01-11T18:57:56.000Z | 2020-01-11T18:57:56.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
name = models.CharField(blank=True, max_length=255)
def __str__(self):
return self.email
| 22 | 55 | 0.741259 |
fa00177b26096cf6791aeb7eb887a6dcf23947dc | 404 | py | Python | abi_variables.py | Abirami33/python-75-hackathon | c15505615d92cf304c27eabd3136406b08c59078 | [
"MIT"
] | null | null | null | abi_variables.py | Abirami33/python-75-hackathon | c15505615d92cf304c27eabd3136406b08c59078 | [
"MIT"
] | null | null | null | abi_variables.py | Abirami33/python-75-hackathon | c15505615d92cf304c27eabd3136406b08c59078 | [
"MIT"
] | null | null | null | #variable creation
a=5
print(a)
#multiple assignments
a=b=9
print(a,b)
#even both int and a string
a=b=1,"hi"
print(a)
print(b)
#global variable
global x
x=10
def foo():
x=5
print("hi")
print(x) #prints a as 10 since global alone
#local variable usage
def fib():
y=9
return y
y=fib()
print(y)
'''OUTPUT:
stud@HP-246-Notebook-PC:~$ python abi_variables.py
5
9 9
(1, 'hi')
(1, 'hi')
10
9
'''
| 9.619048 | 50 | 0.653465 |
a8adb21e3f2eb3aa2e4a0791f97f9fa937bf5e67 | 1,709 | py | Python | ISIC2019/validate_k_script.py | datduong/pytorch-image-models | 05c9b52ca65b01e57f8cea2b6447882488aba4f6 | [
"Apache-2.0"
] | null | null | null | ISIC2019/validate_k_script.py | datduong/pytorch-image-models | 05c9b52ca65b01e57f8cea2b6447882488aba4f6 | [
"Apache-2.0"
] | null | null | null | ISIC2019/validate_k_script.py | datduong/pytorch-image-models | 05c9b52ca65b01e57f8cea2b6447882488aba4f6 | [
"Apache-2.0"
] | null | null | null |
import os,sys,re,pickle,time
from datetime import datetime
# ! valid
# sinteractive --partition=gpu --gres=gpu:p100:1 --mem=4g -c4
base = """#!/bin/bash
source /data/$USER/conda/etc/profile.d/conda.sh
conda activate py37
data_path=/data/duongdb/ISIC2019-SkinCancer8Labels/TrainDevTestRandState1/test
base_path=/data/duongdb/ISIC2019-SkinCancer8Labels/TrainDevTestRandState1/our-setting/
train_name=TRAIN_NAME
output=$base_path/$train_name/result_test.csv # path/name.csv
checkpoint=$base_path/$train_name/model_best.pth.tar # model_best.pth.tar averaged.pth
batchsize=64
cd /data/duongdb/pytorch-image-models
python3 validate_no_label.py $data_path --model MODEL_NAME -b $batchsize -j 2 --config $base_path/$train_name/args.yaml --num-classes 9 --results-file $output --checkpoint $checkpoint --amp --use-ema --no-test-pool --has_eval_label --crop-pct 0.922
"""
os.chdir('/data/duongdb/ISIC2019-SkinCancer8Labels/TrainDevTestRandState1/our-setting/')
case = {1: '20200909-155811-efficientnet_b0-450',
2: '20200909-155813-efficientnet_b1-450',
3: '20200909-155809-efficientnet_b2-450',
4: '20200909-155808-efficientnet_b3-450',
5: '20200910-175136-tf_efficientnet_b4_ns-450' # --crop-pct 0.922
}
for k, val in case.items() :
if k not in [5]:
continue
base2 = re.sub('TRAIN_NAME', '2/train/'+val, base) # train/
base2 = re.sub('MODEL_NAME', val.split('-')[2], base2) # get name
foutname = str(k)+'.'+val+'.sh'
fout = open(foutname, 'w')
fout.write(base2 + "\n\n")
fout.close()
#
# time.sleep(5)
# os.system ( 'sbatch --partition=gpu --time=1-12:00:00 --gres=gpu:p100:1 --mem=6g -c8 ' + foutname ) # k80
# os.system ( 'bash ' + foutname )
| 34.877551 | 248 | 0.709187 |
3ce850df1b48ced6db215cc4e10f1191f8ed7995 | 1,170 | py | Python | tests/http/test_auth.py | KingBain/virtool | 48e89e2c45090f517607ca5f3b4f56796623b2fd | [
"MIT"
] | 39 | 2016-10-31T23:28:59.000Z | 2022-01-15T00:00:42.000Z | tests/http/test_auth.py | KingBain/virtool | 48e89e2c45090f517607ca5f3b4f56796623b2fd | [
"MIT"
] | 1,690 | 2017-02-07T23:39:48.000Z | 2022-03-31T22:30:44.000Z | tests/http/test_auth.py | KingBain/virtool | 48e89e2c45090f517607ca5f3b4f56796623b2fd | [
"MIT"
] | 25 | 2017-02-08T18:25:31.000Z | 2021-09-20T22:55:25.000Z | from aiohttp import BasicAuth
from virtool.utils import hash_key
class TestJobAuthentication:
async def test_root_succeeds(self, spawn_job_client):
"""
Check that a request against the job accessible root URL (GET /) succeeds.
"""
client = await spawn_job_client(authorize=True)
resp = await client.get("/")
assert resp.status == 200
async def test_unauthenticated_root_fails(self, spawn_job_client):
"""
Check that an request against the root API URL
"""
client = await spawn_job_client(authorize=False)
resp = await client.get("/")
assert resp.status == 401
async def test_protected_fails(self, dbi, spawn_client):
"""
Check that a request against GET /samples using job authentication fails. This URI is
not accessible to jobs.
"""
key = "bar"
client = await spawn_client(auth=BasicAuth("job-foo", key))
client.settings.enable_api = True
await dbi.jobs.insert_one({"_id": "foo", "key": hash_key(key)})
resp = await client.get("/samples")
assert resp.status == 401
| 26.590909 | 93 | 0.631624 |
f1ff603eb3dda2f46c35ea66f38b74eb383282bb | 2,347 | py | Python | metadata-ingestion/src/datahub/ingestion/sink/datahub_rest.py | hmjahle/datahub_upstream | 62d5306a28e0df6b6b67a5f46c01dd508caada60 | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub/ingestion/sink/datahub_rest.py | hmjahle/datahub_upstream | 62d5306a28e0df6b6b67a5f46c01dd508caada60 | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub/ingestion/sink/datahub_rest.py | hmjahle/datahub_upstream | 62d5306a28e0df6b6b67a5f46c01dd508caada60 | [
"Apache-2.0"
] | null | null | null | import logging
from dataclasses import dataclass
from typing import Optional, Union
from datahub.configuration.common import ConfigModel, OperationalError
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.api.common import PipelineContext, RecordEnvelope, WorkUnit
from datahub.ingestion.api.sink import Sink, SinkReport, WriteCallback
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.com.linkedin.pegasus2avro.usage import UsageAggregation
logger = logging.getLogger(__name__)
class DatahubRestSinkConfig(ConfigModel):
"""Configuration class for holding connectivity to datahub gms"""
server: str = "http://localhost:8080"
token: Optional[str]
@dataclass
class DatahubRestSink(Sink):
config: DatahubRestSinkConfig
emitter: DatahubRestEmitter
report: SinkReport
def __init__(self, ctx: PipelineContext, config: DatahubRestSinkConfig):
super().__init__(ctx)
self.config = config
self.report = SinkReport()
self.emitter = DatahubRestEmitter(self.config.server, self.config.token)
self.emitter.test_connection()
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "DatahubRestSink":
config = DatahubRestSinkConfig.parse_obj(config_dict)
return cls(ctx, config)
def handle_work_unit_start(self, workunit: WorkUnit) -> None:
pass
def handle_work_unit_end(self, workunit: WorkUnit) -> None:
pass
def write_record_async(
self,
record_envelope: RecordEnvelope[Union[MetadataChangeEvent, UsageAggregation]],
write_callback: WriteCallback,
) -> None:
record = record_envelope.record
try:
self.emitter.emit(record)
self.report.report_record_written(record_envelope)
write_callback.on_success(record_envelope, {})
except OperationalError as e:
self.report.report_failure({"error": e.message, "info": e.info})
write_callback.on_failure(record_envelope, e, e.info)
except Exception as e:
self.report.report_failure({"e": e})
write_callback.on_failure(record_envelope, e, {})
def get_report(self) -> SinkReport:
return self.report
def close(self):
pass
| 34.014493 | 86 | 0.715807 |
bd145a30360a621b01fd301af0698826eed3e2ab | 1,383 | py | Python | testing/test_database.py | thomasehuang/Ithemal-Extension | 821a875962a261de003c6da6e2d3e9b49918d68a | [
"MIT"
] | 105 | 2019-08-05T21:27:33.000Z | 2022-02-16T03:35:10.000Z | testing/test_database.py | thomasehuang/Ithemal-Extension | 821a875962a261de003c6da6e2d3e9b49918d68a | [
"MIT"
] | 16 | 2019-08-06T21:12:11.000Z | 2021-03-22T14:09:21.000Z | testing/test_database.py | thomasehuang/Ithemal-Extension | 821a875962a261de003c6da6e2d3e9b49918d68a | [
"MIT"
] | 25 | 2019-08-11T22:41:57.000Z | 2021-11-10T08:02:50.000Z | import pytest
import os
import subprocess
import glob
from conftest import *
import common_libs.utilities as ut
import mysql.connector
@ithemal
class TestDatabase:
def test_connectivity(self,db_config):
assert 'password' in db_config.keys()
assert 'user' in db_config.keys()
assert 'port' in db_config.keys()
cnx = ut.create_connection(user=db_config['user'],password=db_config['password'],port=db_config['port'],database=None)
assert cnx != None
def test_connectivity_from_config(self):
cnx = ut.create_connection_from_config('test_data/db_config.cfg')
assert cnx != None
def test_create_database(self,db_config):
create_script = os.environ['ITHEMAL_HOME'] + '/data_export/scripts/create_and_populate_db.sh'
schema = os.environ['ITHEMAL_HOME'] + '/data_export/schemas/mysql_schema.sql'
proc = subprocess.call(['bash',create_script,'test_data/db_config.cfg','testIthemal',schema,'test_data'])
#_ = proc.communicate()
cnx = ut.create_connection(user=db_config['user'],password=db_config['password'],port=db_config['port'],database='testIthemal')
assert cnx != None
sql = 'select count(*) from code'
rows = ut.execute_query(cnx, sql, True)
assert len(rows) == 1
assert len(rows[0]) == 1
assert rows[0][0] == 3287
| 29.425532 | 135 | 0.678959 |
d99448495691bc70addcfcf765c89047524ca764 | 3,861 | py | Python | pelion_systest_lib/fixtures/general_fixtures.py | AnotherButler/e2e-edge-test-suite | 05d01922bc74d9ea4564a7561342ea428977ebff | [
"Apache-2.0"
] | null | null | null | pelion_systest_lib/fixtures/general_fixtures.py | AnotherButler/e2e-edge-test-suite | 05d01922bc74d9ea4564a7561342ea428977ebff | [
"Apache-2.0"
] | 1 | 2021-07-30T20:43:56.000Z | 2021-08-06T19:40:24.000Z | pelion_systest_lib/fixtures/general_fixtures.py | AnotherButler/e2e-edge-test-suite | 05d01922bc74d9ea4564a7561342ea428977ebff | [
"Apache-2.0"
] | 2 | 2021-07-29T15:47:25.000Z | 2022-03-07T08:38:20.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2020-2021, Pelion and affiliates.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: disable=redefined-outer-name
"""
General pytest fixtures
"""
import logging
import os
import pytest
import pelion_systest_lib.tools as utils
from pelion_systest_lib.cloud.cloud import PelionCloud
from pelion_systest_lib.cloud.libraries.rest_api.rest_api import RestAPI
log = logging.getLogger(__name__)
pytest.global_test_env_config = {}
@pytest.fixture(scope='session')
def tc_config_data(request):
"""
Fixture for accessing test case config json given with '--config_path' argument
:param request: Request object
:return: Config data object
"""
if request.config.getoption('config_path'):
log.debug('Getting test configs from json')
return utils.load_config(request.config.getoption('config_path'))
if pytest.global_test_env_config != {}:
log.debug('Getting test configs from global variable')
return pytest.global_test_env_config
raise AssertionError('Test configuration is not defined. Use --config_path=<path to define config file>')
@pytest.fixture(scope='session')
def cloud_api(request):
"""
Fixture for cloud API
Initializes the rest api with the api key given in test case config
:param request: Request object
:return: Cloud API object
"""
log.debug('Initializing Cloud API fixture')
tc_conf = {}
# If user doesn't give json config path, let's initialize cloud with given root login
# or api key and url defined in env variables
if not request.config.getoption('config_path'):
# Setting the api key logging to config
if request.config.getoption('show_api_key'):
tc_conf['rest_api_key_logging'] = (request.config.getoption('show_api_key') == 'true')
else:
error_msg = 'Connfiguration error in config json'
log.error(error_msg)
assert False, error_msg
# Initialize cloud either with temp account info or info from env variables
pytest.global_test_env_config = tc_conf
pelion_cloud = PelionCloud(tc_conf)
else:
log.info('Using account and api key defined in config json {}'.format(request.config.getoption('config_path')))
tc_conf = utils.load_config(request.config.getoption('config_path'))
pytest.global_test_env_config = tc_conf
pelion_cloud = PelionCloud(tc_conf)
log.info('Cloud API object initialized for {} and account id {}'.format(tc_conf.get('api_gw', ''),
tc_conf.get('account_id', '')))
yield pelion_cloud
@pytest.fixture(scope='function')
def rest_api():
"""
Fixture for initializing only the rest API request layer - usable for API testing
Initialize the URL and authentication token by the environment variables
:return: Rest API class
"""
config = {'api_gw': os.environ.get('REST_API_URL'),
'api_key': os.environ.get('REST_API_TOKEN'),
'rest_user_agent': os.environ.get('REST_API_USER_AGENT', 'SystemTesting')}
return RestAPI(config)
| 36.771429 | 119 | 0.668739 |
ec8d2bfe76af4f32d89ded9b4690afb418d3df68 | 2,725 | py | Python | discorduser/migrations/0001_initial.py | edinburghhacklab/hackdb | 3ec7d66039705aa511dd6559196fa51a53b3a110 | [
"MIT"
] | null | null | null | discorduser/migrations/0001_initial.py | edinburghhacklab/hackdb | 3ec7d66039705aa511dd6559196fa51a53b3a110 | [
"MIT"
] | null | null | null | discorduser/migrations/0001_initial.py | edinburghhacklab/hackdb | 3ec7d66039705aa511dd6559196fa51a53b3a110 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-27 20:18
import discorduser.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="DiscordVerificationToken",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"token",
models.CharField(
default=discorduser.models.random_token,
max_length=255,
unique=True,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("discord_id", models.BigIntegerField()),
("discord_username", models.CharField(max_length=255)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="DiscordUser",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("discord_id", models.BigIntegerField(unique=True)),
(
"discord_username",
models.CharField(blank=True, max_length=255, null=True),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"permissions": [
("get_discord_users", "Can retrieve Discord user data"),
(
"generate_discord_confirmation_token",
"Can generate Discord confirmation token",
),
],
},
),
]
| 31.686047 | 76 | 0.410642 |
6b57f8cf1db027639e090fd7551042e52cdd1009 | 1,414 | py | Python | csv2json/__init__.py | oplatek/csv2json | f2f95db71ba2ce683fd6d0d3e2f13c9d0a77ceb6 | [
"Apache-2.0"
] | 30 | 2016-07-29T21:22:19.000Z | 2021-12-28T12:52:28.000Z | csv2json/__init__.py | oplatek/csv2json | f2f95db71ba2ce683fd6d0d3e2f13c9d0a77ceb6 | [
"Apache-2.0"
] | 4 | 2017-01-28T13:48:48.000Z | 2020-03-26T14:47:45.000Z | csv2json/__init__.py | oplatek/csv2json | f2f95db71ba2ce683fd6d0d3e2f13c9d0a77ceb6 | [
"Apache-2.0"
] | 16 | 2017-01-13T06:06:13.000Z | 2021-08-06T11:45:43.000Z | import csv
import json
import sys
def load_csv(fp_in, delimiter=',', quotechar='"', remove_empty=False,
custom_headers=None, **kwargs):
r = csv.DictReader(fp_in, delimiter=delimiter, quotechar=quotechar,
fieldnames=custom_headers)
rows = [row_dct for row_dct in r]
if remove_empty:
rows = [dict([(k, item) for k, item in row.items() if item]) for row in rows]
return rows
def save_json(data, fp_out, pretty_spaces=4, sort_keys=False, **kwargs):
json.dump(data, fp_out, indent=pretty_spaces, sort_keys=sort_keys)
def convert(csv, json, **kwargs):
'''Convert csv to json.
csv: filename or file-like object
json: filename or file-like object
if csv is '-' or None:
stdin is used for input
if json is '-' or None:
stdout is used for output
'''
csv_local, json_local = None, None
try:
if csv == '-' or csv is None:
csv = sys.stdin
elif isinstance(csv, str):
csv = csv_local = open(csv, 'r')
if json == '-' or json is None:
json = sys.stdout
elif isinstance(json, str):
json = json_local = open(json, 'w')
data = load_csv(csv, **kwargs)
save_json(data, json, **kwargs)
finally:
if csv_local is not None:
csv_local.close()
if json_local is not None:
json_local.close()
| 27.192308 | 85 | 0.596888 |
00b24930839a69e58000b290a095cfaaa51e9dc4 | 15,780 | py | Python | nailgun/nailgun/test/unit/test_node_nic_handler.py | Axam/nsx-web | 4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5 | [
"Apache-2.0"
] | 1 | 2021-04-06T16:13:35.000Z | 2021-04-06T16:13:35.000Z | nailgun/nailgun/test/unit/test_node_nic_handler.py | Axam/nsx-web | 4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5 | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/test/unit/test_node_nic_handler.py | Axam/nsx-web | 4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_get_handler_with_wrong_nodeid(self):
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': 1}),
expect_errors=True,
headers=self.default_headers)
self.assertEqual(resp.status_code, 404)
def test_get_handler_with_invalid_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_list = [
{'interfaces': None},
{'interfaces': {}}
]
for nic_meta in meta_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 400)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(response, [])
def test_get_handler_with_incompleted_iface_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': '', 'mac': '00:00:00'}]},
{'interfaces': [{'name': 'eth0', 'mac': ''}]},
{'interfaces': [{'mac': '00:00:00'}]},
{'interfaces': [{'name': 'eth0'}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
ifaces = jsonutils.loads(resp.body)
self.assertEqual(ifaces, [])
def test_get_handler_with_invalid_speed_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'max_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'current_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'current_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'max_speed': 10.0}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'max_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'current_speed': 10.0}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node['id']}),
headers=self.default_headers
)
ifaces = jsonutils.loads(resp.body)['meta']['interfaces']
self.assertEqual(
ifaces,
[
{'name': 'eth0', 'mac': '00:00:00',
'max_speed': None, 'current_speed': None}
]
)
def test_get_handler_without_NICs(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(response, [])
def test_get_handler_with_NICs(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1},
{'name': 'eth1', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1}])
self.env.create_node(api=True, meta=meta)
node_db = self.env.nodes[0]
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertItemsEqual(
map(lambda i: i['id'], response),
map(lambda i: i.id, node_db.interfaces)
)
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
response
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_NIC_updates_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '12345', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [
{'name': 'new_nic', 'mac': '12345', 'current_speed': 10,
'max_speed': 10, 'state': 'down'}])
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(len(response), 1)
resp_nic = response[0]
nic = new_meta['interfaces'][0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
self.assertEqual(resp_nic['state'], nic['state'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_NIC_adds_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '12345', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
meta['interfaces'].append({'name': 'new_nic', 'mac': '643'})
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(len(response), len(meta['interfaces']))
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
response
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'],
nic.get('current_speed'))
self.assertEqual(resp_nic['max_speed'], nic.get('max_speed'))
self.assertEqual(resp_nic['state'], nic.get('state'))
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_ignore_NIC_id_in_meta(self):
fake_id = 'some_data'
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'id': fake_id, 'name': 'eth0', 'mac': '12345'}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertNotEquals(response[0]['id'], fake_id)
def test_mac_address_should_be_in_lower_case(self):
meta = self.env.default_metadata()
new_mac = 'AA:BB:CC:DD:11:22'
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': new_mac}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertNotEquals(response[0]['mac'], new_mac.lower())
def test_remove_assigned_interface(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return jsonutils.loads(resp.body)
self.env.create(nodes_kwargs=[{'api': True}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# remove all interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
ifaces = list(nodes_data[0]['meta']['interfaces'])
nodes_data[0]['meta']['interfaces'] = \
[i for i in ifaces if i['name'] == adm_eth.name]
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertEqual(len(nodes_data[0]['meta']['interfaces']), 1)
# restore removed interfaces
nodes_data[0]['meta']['interfaces'] = ifaces
self.app.put(
reverse(
'NodeAgentHandler',
),
jsonutils.dumps({
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}),
headers=self.default_headers,
)
# check node availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertItemsEqual(nodes_data[0]['meta']['interfaces'], ifaces)
def test_change_mac_of_assigned_nics(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return jsonutils.loads(resp.body)
meta = self.env.default_metadata()
meta["interfaces"] = [
{'name': 'eth0', 'mac': self.env.generate_random_mac()},
{'name': 'eth1', 'mac': self.env.generate_random_mac()},
{'name': 'eth2', 'mac': self.env.generate_random_mac()},
{'name': 'eth3', 'mac': self.env.generate_random_mac()},
{'name': 'eth4', 'mac': self.env.generate_random_mac()},
]
self.env.create(nodes_kwargs=[{'api': True, 'meta': meta}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# change mac address of interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
for iface in nodes_data[0]['meta']['interfaces']:
if iface['name'] != adm_eth.name:
iface['mac'] = self.env.generate_random_mac()
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
| 40.358056 | 78 | 0.542776 |
662c3aa5bd9752916d5a75ada2363a9941833004 | 18,194 | py | Python | coinbaseprotracker.py | Dwaynekj/coinbaseprotracker | efbcd1b4c1013e5a7b068af3471af800f265ebbf | [
"Apache-2.0"
] | null | null | null | coinbaseprotracker.py | Dwaynekj/coinbaseprotracker | efbcd1b4c1013e5a7b068af3471af800f265ebbf | [
"Apache-2.0"
] | null | null | null | coinbaseprotracker.py | Dwaynekj/coinbaseprotracker | efbcd1b4c1013e5a7b068af3471af800f265ebbf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
import json, sys
import numpy as np
import pandas as pd
from models.exchange.coinbase_pro import AuthAPI as CBAuthAPI, PublicAPI as CBPublicAPI
def printHelp():
print("Create a config.json:")
print("* Add 1 or more portfolios", "\n")
print("{")
print(' "<portfolio_name>" : {')
print(' "api_key" : "<coinbase_pro_api_key>",')
print(' "api_secret" : "<coinbase_pro_api_secret>",')
print(' "api_pass" : "<coinbase_pro_api_passphrase>",')
print(' "config" : {')
print(' "base_currency" : "<base_symbol>",')
print(' "quote_currency" : "<quote_symbol>"')
print(' "}')
print(" },")
print(' "<portfolio_name>" : {')
print(' "api_key" : "<coinbase_pro_api_key>",')
print(' "api_secret" : "<coinbase_pro_api_secret>",')
print(' "api_pass" : "<coinbase_pro_api_passphrase>",')
print(' "config" : {')
print(' "base_currency" : "<base_symbol>",')
print(' "quote_currency" : "<quote_symbol>"')
print(' "}')
print(" }")
print("}", "\n")
print('<portfolio_name> - Coinbase Pro portfolio name E.g. "Default portfolio"')
print("<coinbase_pro_api_key> - Coinbase Pro API key for the portfolio")
print("<coinbase_pro_api_secret> - Coinbase Pro API secret for the portfolio")
print(
"<coinbase_pro_api_passphrase> - Coinbase Pro API passphrase for the portfolio"
)
print("<base_symbol> - Base currency E.g. BTC")
print("<quote_symbol> - Base currency E.g. GBP")
print("\n")
try:
with open("../.secrets/coinbaseprotracker/coinbaseprotracker-config.json") as config_file:
json_config = json.load(config_file)
if not isinstance(json_config, dict):
raise TypeError("config.json is invalid.")
if len(list(json_config)) < 1:
printHelp()
sys.exit()
df_tracker = pd.DataFrame()
for portfolio in list(json_config):
base_currency = ""
quote_currency = ""
market = ""
portfolio_config = json_config[portfolio]
if (
"api_key" in portfolio_config
and "api_secret" in portfolio_config
and "api_pass" in portfolio_config
and "config" in portfolio_config
):
print(
"=== ",
portfolio,
" =======================================================\n",
)
api_key = portfolio_config["api_key"]
api_secret = portfolio_config["api_secret"]
api_pass = portfolio_config["api_pass"]
config = portfolio_config["config"]
if ("cryptoMarket" not in config and "base_currency" not in config) and (
"fiatMarket" not in config and "quote_currency" not in config
):
printHelp()
sys.exit()
if "cryptoMarket" in config:
base_currency = config["cryptoMarket"]
elif "base_currency" in config:
base_currency = config["base_currency"]
if "fiatMarket" in config:
quote_currency = config["fiatMarket"]
elif "base_currency" in config:
quote_currency = config["quote_currency"]
market = base_currency + "-" + quote_currency
api = CBAuthAPI(api_key, api_secret, api_pass)
orders = api.getOrders(market)
last_action = ""
if len(orders) > 0:
for market in orders["market"].sort_values().unique():
df_market = orders[orders["market"] == market]
else:
df_market = pd.DataFrame()
df_buy = pd.DataFrame()
df_sell = pd.DataFrame()
pair = 0
# pylint: disable=unused-variable
for index, row in df_market.iterrows():
if row["action"] == "buy":
pair = 1
if pair == 1 and (row["action"] != last_action):
if row["action"] == "buy":
df_buy = row
elif row["action"] == "sell":
df_sell = row
if row["action"] == "sell" and len(df_buy) != 0:
df_pair = pd.DataFrame(
[
[
df_sell["status"],
df_buy["market"],
df_buy["created_at"],
df_buy["type"],
df_buy["size"],
df_buy["filled"],
df_buy["fees"],
df_buy["price"],
df_sell["created_at"],
df_sell["type"],
df_sell["size"],
df_sell["filled"],
df_sell["fees"],
df_sell["price"],
]
],
columns=[
"status",
"market",
"buy_at",
"buy_type",
"buy_size",
"buy_filled",
"buy_fees",
"buy_price",
"sell_at",
"sell_type",
"sell_size",
"sell_filled",
"sell_fees",
"sell_price",
],
)
df_tracker = df_tracker.append(df_pair, ignore_index=True)
pair = 0
last_action = row["action"]
fees = api.authAPI("GET", "fees")
maker_fee_rate = float(
fees["maker_fee_rate"].to_string(index=False).strip()
)
taker_fee_rate = float(
fees["taker_fee_rate"].to_string(index=False).strip()
)
if len(orders) > 0:
last_order = orders.iloc[-1:]
last_buy_order = last_order[last_order.action == "buy"]
last_buy_order = last_buy_order.reset_index(drop=True)
if len(last_buy_order) > 0:
print(last_buy_order.to_string(index=False))
api = CBPublicAPI()
ticker = api.getTicker(market)
current_price = ticker[1]
market = last_buy_order["market"].to_string(index=False).strip()
buy_type = last_buy_order["type"].to_string(index=False).strip()
buy_size = round(
float(last_buy_order["size"].to_string(index=False).strip()), 8
)
buy_filled = round(
float(last_buy_order["filled"].to_string(index=False).strip()),
8,
)
buy_fees = round(
float(last_buy_order["fees"].to_string(index=False).strip()), 8
)
buy_price = round(
float(last_buy_order["price"].to_string(index=False).strip()), 8
)
sell_fees = (buy_filled * current_price) * maker_fee_rate
current_size = buy_filled * current_price - (
(buy_filled * current_price) * maker_fee_rate
)
maker_net_profit = round(current_size - buy_size, 2)
maker_margin = (maker_net_profit / buy_size) * 100
taker_net_profit = round(current_size - buy_size, 2)
taker_margin = (taker_net_profit / buy_size) * 100
if isinstance(current_price, float):
print("\n", " Current Price :", current_price)
print("\n", " Purchase Value :", "{:.2f}".format(buy_size))
print(" Current Value :", "{:.2f}".format(current_size))
if buy_type == "market":
print(
"\n",
" Buy Fee :",
"{:.6f}".format(buy_fees),
"(",
str(maker_fee_rate),
")",
)
print(
" Sell Fee :",
"{:.6f}".format(sell_fees),
"(",
str(maker_fee_rate),
")",
)
elif buy_type == "limit":
print(
"\n",
" Buy Fee :",
"{:.6f}".format(buy_fees),
"(",
str(taker_fee_rate),
")",
)
print(
" Sell Fee :",
"{:.6f}".format(sell_fees),
"(",
str(taker_fee_rate),
")",
)
print(
"\n",
" Maker Profit :",
"{:.2f}".format(maker_net_profit),
)
print(
" Maker Margin :",
str("{:.2f}".format(maker_margin)) + "%",
)
print(
"\n",
" Taker Profit :",
"{:.2f}".format(taker_net_profit),
)
print(
" Taker Margin :",
str("{:.2f}".format(taker_margin)) + "%",
)
else:
if len(orders) > 0:
second_last_order = orders.iloc[-2:-1]
last_buy_order = second_last_order[
second_last_order.action == "buy"
]
last_buy_order = last_buy_order.reset_index(drop=True)
if len(last_buy_order) > 0:
orders = api.getOrders(status="open")
if len(orders) == 1:
last_open_order = orders[orders.action == "sell"]
last_open_order = last_open_order.reset_index(drop=True)
print(last_buy_order.to_string(index=False))
print("\n", last_open_order.to_string(index=False))
api = CBPublicAPI()
ticker = api.getTicker(market)
current_price = ticker[1]
future_price = float(last_open_order["price"].values[0])
market = last_buy_order["market"].to_string(index=False).strip()
buy_type = last_buy_order["type"].to_string(index=False).strip()
buy_size = round(
float(last_buy_order["size"].to_string(index=False).strip()), 8
)
buy_filled = round(
float(last_buy_order["filled"].to_string(index=False).strip()),
8,
)
buy_fees = round(
float(last_buy_order["fees"].to_string(index=False).strip()), 8
)
buy_price = round(
float(last_buy_order["price"].to_string(index=False).strip()), 8
)
sell_fees = (buy_filled * current_price) * maker_fee_rate
current_size = buy_filled * current_price - (
(buy_filled * current_price) * maker_fee_rate
)
taker_net_profit = round(current_size - buy_size, 2)
taker_margin = (taker_net_profit / buy_size) * 100
sell_size = round(
float(last_open_order["value"].to_string(index=False).strip()), 8
)
sell_filled = round(
float(last_open_order["size"].to_string(index=False).strip()),
8,
)
future_size = sell_filled * future_price - (
(sell_filled * future_price) * maker_fee_rate
)
maker_net_profit = round(sell_size - buy_filled, 2)
maker_margin = (maker_net_profit / sell_size) * 100
if isinstance(current_price, float):
print("\n", " Current Price :", current_price)
print("\n", " Purchase Value :", "{:.2f}".format(buy_size))
print(" Current Value :", "{:.2f}".format(current_size))
print(" Target Value :", "{:.2f}".format(sell_size))
if buy_type == "market":
print(
"\n",
" Buy Fee :",
"{:.6f}".format(buy_fees),
"(",
str(maker_fee_rate),
")",
)
print(
" Sell Fee :",
"{:.6f}".format(sell_fees),
"(",
str(maker_fee_rate),
")",
)
elif buy_type == "limit":
print(
"\n",
" Buy Fee :",
"{:.6f}".format(buy_fees),
"(",
str(taker_fee_rate),
")",
)
print(
" Sell Fee :",
"{:.6f}".format(sell_fees),
"(",
str(taker_fee_rate),
")",
)
print(
"\n",
" Taker Profit :",
"{:.2f}".format(taker_net_profit), "(now)"
)
print(
" Taker Margin :",
str("{:.2f}".format(taker_margin)) + "% (now)",
)
print(
"\n",
" Maker Profit :",
"{:.2f}".format(maker_net_profit), "(target)"
)
print(
" Maker Margin :",
str("{:.2f}".format(maker_margin)) + "% (target)",
)
else:
print("*** no active position open ***")
else:
print("*** no active position open ***")
else:
print("*** no active position open ***")
print("\n")
else:
printHelp()
sys.exit()
#break
df_tracker = df_tracker[df_tracker['status'] == 'done']
df_tracker['profit'] = df_tracker['sell_filled'] - df_tracker['buy_size']
df_tracker['margin'] = (df_tracker['profit'] / df_tracker['buy_size']) * 100
df_sincebot = df_tracker[df_tracker['buy_at'] > '2021-02-1']
save_file = '../.secrets/coinbaseprotracker/tracker.csv'
try:
df_sincebot.to_csv(save_file, index=False)
except OSError:
raise SystemExit('Unable to save: ', save_file)
except IOError as err:
print(err)
except Exception as err:
print(err)
| 42.311628 | 101 | 0.361987 |
39f20917d425e59b09f5d0ffbf2e2b5c25f765e1 | 1,911 | py | Python | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetPageCompressConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetPageCompressConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetPageCompressConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class SetPageCompressConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetPageCompressConfig')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_Enable(self):
return self.get_query_params().get('Enable')
def set_Enable(self,Enable):
self.add_query_param('Enable',Enable) | 34.745455 | 74 | 0.765568 |
c43cf0b940e9a1f310975984a4b333ab4ece6732 | 72,007 | py | Python | reviewboard/reviews/tests/test_entries.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | reviewboard/reviews/tests/test_entries.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | reviewboard/reviews/tests/test_entries.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | """Unit tests for review request page entries."""
import logging
from datetime import datetime, timedelta
from django.contrib.auth.models import AnonymousUser, User
from django.template import RequestContext
from django.test.client import RequestFactory
from django.utils import timezone
from django.utils.timezone import utc
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.detail import (BaseReviewRequestPageEntry,
ChangeEntry,
InitialStatusUpdatesEntry,
ReviewEntry,
ReviewRequestPageData,
StatusUpdatesEntryMixin)
from reviewboard.reviews.models import (BaseComment, GeneralComment,
StatusUpdate)
from reviewboard.testing import TestCase
class BaseReviewRequestPageEntryTests(SpyAgency, TestCase):
"""Unit tests for BaseReviewRequestPageEntry."""
fixtures = ['test_users']
def setUp(self):
super(BaseReviewRequestPageEntryTests, self).setUp()
self.review_request = self.create_review_request()
self.request = RequestFactory().request()
self.request.user = AnonymousUser()
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_init_with_no_updated_timestamp(self):
"""Testing BaseReviewRequestPageEntry.__init__ without an
updated_timestamp specified
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_render_to_string(self):
"""Testing BaseReviewRequestPageEntry.render_to_string"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertNotEqual(html, '')
def test_render_to_string_with_entry_pos_main(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_pos=ENTRY_POS_MAIN
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
entry.entry_pos = BaseReviewRequestPageEntry.ENTRY_POS_MAIN
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertIn('<div class="box-statuses">', html)
def test_render_to_string_with_entry_pos_initial(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_pos=ENTRY_POS_INITIAL
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
entry.entry_pos = BaseReviewRequestPageEntry.ENTRY_POS_INITIAL
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertNotIn('<div class="box-statuses">', html)
def test_render_to_string_with_new_entry(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_is_new=True
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
entry.template_name = 'reviews/entries/base.html'
self.request.user = User.objects.create_user(username='test-user',
email='user@example.com')
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc),
}))
self.assertIn(
'class="review-request-page-entry new-review-request-page-entry',
html)
def test_render_to_string_without_new_entry(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_is_new=False
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
entry.template_name = 'reviews/entries/base.html'
self.request.user = User.objects.create_user(username='test-user',
email='user@example.com')
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': datetime(2017, 9, 7, 18, 0, 0, tzinfo=utc),
}))
self.assertNotEqual(html, '')
self.assertNotIn(
'class="review-request-page-entry new-review-request-page-entry"',
html)
def test_render_to_string_with_no_template(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
template_name=None
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertEqual(html, '')
def test_render_to_string_with_has_content_false(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
has_content=False
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
entry.has_content = False
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertEqual(html, '')
def test_render_to_string_with_exception(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
exception
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/NOT_FOUND.html'
from reviewboard.reviews.detail import logger
self.spy_on(logger.exception)
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertEqual(html, '')
self.assertTrue(logger.exception.spy.called)
self.assertEqual(logger.exception.spy.calls[0].args[0],
'Error rendering template for %s (ID=%s): %s')
def test_is_entry_new_with_timestamp(self):
"""Testing BaseReviewRequestPageEntry.is_entry_new with timestamp"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 15, 36, 0, tzinfo=utc))
user = User.objects.create_user(username='test-user',
email='user@example.com')
self.assertTrue(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc),
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 16, 0, 0, tzinfo=utc),
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 15, 36, 0, tzinfo=utc),
user=user))
def test_is_entry_new_without_timestamp(self):
"""Testing BaseReviewRequestPageEntry.is_entry_new without timestamp
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
self.assertFalse(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc),
user=User.objects.create_user(username='test-user',
email='user@example.com')))
def test_collapsed_with_older_than_last_visited(self):
"""Testing BaseReviewRequestPageEntry.collapsed with entry older than
last visited
"""
self.data.latest_changedesc_timestamp = \
self.review_request.time_added + timedelta(days=5)
self.data.last_visited = datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=self.data.last_visited - timedelta(days=2),
updated_timestamp=self.data.last_visited - timedelta(days=1))
self.assertTrue(entry.collapsed)
def test_collapsed_with_newer_than_last_visited(self):
"""Testing BaseReviewRequestPageEntry.collapsed with entry newer than
last visited
"""
self.data.last_visited = datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=self.data.last_visited,
updated_timestamp=self.data.last_visited + timedelta(days=1))
self.assertFalse(entry.collapsed)
def test_collapsed_without_last_visited(self):
"""Testing BaseReviewRequestPageEntry.collapsed without last visited
timestamp
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 6, 10, 0, 0, tzinfo=utc),
updated_timestamp=datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc))
self.assertFalse(entry.collapsed)
def test_collapsed_with_older_than_changedesc(self):
"""Testing BaseReviewRequestPageEntry.collapsed with older than latest
Change Description
"""
self.data.latest_changedesc_timestamp = \
self.review_request.time_added + timedelta(days=5)
self.data.last_visited = \
self.review_request.time_added + timedelta(days=10)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=(self.data.latest_changedesc_timestamp -
timedelta(days=2)),
updated_timestamp=(self.data.latest_changedesc_timestamp -
timedelta(days=1)))
self.assertTrue(entry.collapsed)
def test_collapsed_with_newer_than_changedesc(self):
"""Testing BaseReviewRequestPageEntry.collapsed with newer than latest
Change Description
"""
self.data.latest_changedesc_timestamp = self.review_request.time_added
self.data.last_visited = \
self.review_request.time_added + timedelta(days=10)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=self.data.latest_changedesc_timestamp,
updated_timestamp=(self.data.latest_changedesc_timestamp +
timedelta(days=1)))
self.assertFalse(entry.collapsed)
class StatusUpdatesEntryMixinTests(TestCase):
"""Unit tests for StatusUpdatesEntryMixin."""
def test_add_update_with_done_failure(self):
"""Testing StatusUpdatesEntryMixin.add_update with DONE_FAILURE"""
status_update = StatusUpdate(state=StatusUpdate.DONE_FAILURE)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_error(self):
"""Testing StatusUpdatesEntryMixin.add_update with ERROR"""
status_update = StatusUpdate(state=StatusUpdate.ERROR)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.add_update with TIMEOUT"""
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_pending(self):
"""Testing StatusUpdatesEntryMixin.add_update with PENDING"""
status_update = StatusUpdate(state=StatusUpdate.PENDING)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-pending')
def test_add_update_with_not_yet_run(self):
"""Testing StatusUpdatesEntryMixin.add_update with NOT_YET_RUN"""
status_update = StatusUpdate(state=StatusUpdate.NOT_YET_RUN)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-not-yet-run')
def test_add_update_with_done_success(self):
"""Testing StatusUpdatesEntryMixin.add_update with DONE_SUCCESS"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-success')
def test_add_update_html_rendering(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
'</div>'))
def test_add_update_html_rendering_with_url(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with URL
"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.',
url='https://example.com/')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
' <a href="https://example.com/">https://example.com/</a>'
'</div>'))
def test_add_update_html_rendering_with_url_and_text(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with URL
and URL text
"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.',
url='https://example.com/',
url_text='My URL')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
' <a href="https://example.com/">My URL</a>'
'</div>'))
def test_add_update_html_rendering_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with
timeout
"""
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT,
description='My description.',
summary='My summary.')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-failure">\n'
' <span class="summary">My summary.</span>\n'
' timed out.\n'
'</div>'))
@add_fixtures(['test_users'])
def test_add_update_html_rendering_with_timeout_can_retry(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with
timeout and retry
"""
review_request = self.create_review_request()
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT,
description='My description.',
summary='My summary.',
review_request=review_request)
status_update.extra_data['can_retry'] = True
status_update.save()
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-failure">\n'
' <span class="summary">My summary.</span>\n'
' timed out.\n'
' <input class="status-update-request-run"'
' data-status-update-id="1"'
' type="button" value="Retry" />'
'</div>'))
@add_fixtures(['test_users'])
def test_add_update_html_rendering_with_not_yet_run(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with not
yet run
"""
review_request = self.create_review_request()
status_update = StatusUpdate(state=StatusUpdate.NOT_YET_RUN,
description='My description.',
summary='My summary.',
review_request=review_request)
status_update.save()
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-not-yet-run">\n'
' <span class="summary">My summary.</span>\n'
' not yet run.\n'
' <input class="status-update-request-run"'
' data-status-update-id="1"'
' type="button" value="Run" />'
'</div>'))
@add_fixtures(['test_users'])
def test_add_comment(self):
"""Testing StatusUpdatesEntryMixin.add_comment"""
review_request = self.create_review_request()
review = self.create_review(review_request)
comment = self.create_general_comment(review)
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment.review_obj = review
status_update = self.create_status_update(
review_request=review_request,
review=review)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
entry.add_comment('general_comments', comment)
self.assertEqual(status_update.comments['general_comments'], [comment])
def test_finalize_with_all_states(self):
"""Testing StatusUpdatesEntryMixin.finalize with all states"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
for i in range(2):
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
for i in range(3):
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
for i in range(4):
entry.add_update(StatusUpdate(state=StatusUpdate.NOT_YET_RUN))
for i in range(5):
entry.add_update(StatusUpdate(state=StatusUpdate.ERROR))
for i in range(6):
entry.add_update(StatusUpdate(state=StatusUpdate.TIMEOUT))
entry.finalize()
self.assertEqual(
entry.state_summary,
'1 failed, 2 succeeded, 3 pending, 4 not yet run, '
'5 failed with error, 6 timed out')
def test_finalize_with_done_failure(self):
"""Testing StatusUpdatesEntryMixin.finalize with DONE_FAILURE"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
entry.finalize()
self.assertEqual(entry.state_summary, '1 failed')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_error(self):
"""Testing StatusUpdatesEntryMixin.finalize with ERROR"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.ERROR))
entry.finalize()
self.assertEqual(entry.state_summary, '1 failed with error')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.finalize with TIMEOUT"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.TIMEOUT))
entry.finalize()
self.assertEqual(entry.state_summary, '1 timed out')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_pending(self):
"""Testing StatusUpdatesEntryMixin.finalize with PENDING"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.finalize()
self.assertEqual(entry.state_summary, '1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
def test_finalize_with_not_yet_run(self):
"""Testing StatusUpdatesEntryMixin.finalize with NOT_YET_RUN"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.NOT_YET_RUN))
entry.finalize()
self.assertEqual(entry.state_summary, '1 not yet run')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
def test_finalize_with_done_success(self):
"""Testing StatusUpdatesEntryMixin.finalize with DONE_SUCCESS"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary, '1 succeeded')
self.assertEqual(entry.state_summary_class,
'status-update-state-success')
def test_finalize_with_failures_take_precedence(self):
"""Testing StatusUpdatesEntryMixin.finalize with failures taking
precedence over PENDING and DONE_SUCCESS
"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.add_update(StatusUpdate(state=StatusUpdate.NOT_YET_RUN))
entry.finalize()
self.assertEqual(entry.state_summary,
'1 failed, 1 succeeded, 1 pending, 1 not yet run')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_pending_take_precedence(self):
"""Testing StatusUpdatesEntryMixin.finalize with PENDING taking
precedence SUCCESS
"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary, '1 succeeded, 1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
@add_fixtures(['test_users'])
def test_populate_status_updates(self):
"""Testing StatusUpdatesEntryMixin.populate_status_updates"""
review_request = self.create_review_request()
review = self.create_review(review_request, public=True)
comment = self.create_general_comment(review)
# This state is normally set in ReviewRequestPageData.
comment._type = 'general_comments'
comment.review_obj = review
status_updates = [
StatusUpdate(state=StatusUpdate.PENDING),
StatusUpdate(state=StatusUpdate.DONE_FAILURE,
review=review)
]
request = RequestFactory().get('/r/1/')
request.user = AnonymousUser()
data = ReviewRequestPageData(review_request=review_request,
request=request)
data.review_comments[review.pk] = [comment]
entry = StatusUpdatesEntryMixin()
entry.collapsed = True
entry.data = data
entry.populate_status_updates(status_updates)
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, status_updates)
status_update = entry.status_updates[0]
self.assertIsNone(status_update.review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
status_update = entry.status_updates[1]
self.assertEqual(status_update.review, review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
@add_fixtures(['test_users'])
def test_populate_status_updates_with_draft_replies(self):
"""Testing StatusUpdatesEntryMixin.populate_status_updates with
draft replies
"""
review_request = self.create_review_request()
review = self.create_review(review_request, public=True)
comment = self.create_general_comment(review)
reply = self.create_reply(review)
reply_comment = self.create_general_comment(reply, reply_to=comment)
# This state is normally set in ReviewRequestPageData.
comment._type = 'general_comments'
comment.review_obj = review
status_updates = [
StatusUpdate(state=StatusUpdate.PENDING),
StatusUpdate(state=StatusUpdate.DONE_FAILURE,
review=review)
]
request = RequestFactory().get('/r/1/')
request.user = AnonymousUser()
data = ReviewRequestPageData(review_request=review_request,
request=request)
data.review_comments[review.pk] = [comment]
data.draft_reply_comments[review.pk] = [reply_comment]
entry = StatusUpdatesEntryMixin()
entry.data = data
entry.populate_status_updates(status_updates)
self.assertEqual(entry.status_updates, status_updates)
status_update = entry.status_updates[0]
self.assertIsNone(status_update.review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
status_update = entry.status_updates[1]
self.assertEqual(status_update.review, review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
class InitialStatusUpdatesEntryTests(TestCase):
"""Unit tests for InitialStatusUpdatesEntry."""
fixtures = ['test_users']
def setUp(self):
super(InitialStatusUpdatesEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request(
time_added=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.review = self.create_review(
self.review_request,
public=True,
timestamp=datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
self.general_comment = self.create_general_comment(self.review,
issue_opened=False)
self.status_update = self.create_status_update(
self.review_request,
review=self.review,
timestamp=datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc),
state=StatusUpdate.DONE_FAILURE)
self.data = ReviewRequestPageData(
review_request=self.review_request,
request=self.request,
last_visited=self.review_request.time_added + timedelta(days=10))
def test_added_timestamp(self):
"""Testing InitialStatusUpdatesEntry.added_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp(self):
"""Testing InitialStatusUpdatesEntry.updated_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
def test_build_entries(self):
"""Testing InitialStatusUpdatesEntry.build_entries"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(InitialStatusUpdatesEntry.build_entries(self.data))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
self.assertEqual(entry.status_updates, [self.status_update])
self.assertEqual(
entry.status_updates_by_review,
{
self.review.pk: self.status_update,
})
self.assertEqual(
entry.status_updates[0].comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [self.general_comment],
})
def test_build_entries_with_changedesc(self):
"""Testing InitialStatusUpdatesEntry.build_entries with
ChangeDescription following this entry
"""
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(InitialStatusUpdatesEntry.build_entries(self.data))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.status_updates, [self.status_update])
self.assertEqual(
entry.status_updates_by_review,
{
self.review.pk: self.status_update,
})
status_update = entry.status_updates[0]
self.assertEqual(status_update.review, self.review)
self.assertIsNone(status_update.change_description)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [self.general_comment],
})
def test_is_entry_new_with_timestamp(self):
"""Testing InitialStatusUpdatesEntry.is_entry_new"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
user = User.objects.create_user(username='test-user',
email='user@example.com')
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.is_entry_new(
last_visited=self.review_request.last_updated - timedelta(days=1),
user=user))
def test_collapsed_with_no_changedescs_and_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with no Change
Descriptions and page previously visited
"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) == 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_no_changedescs_and_not_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with no Change
Descriptions and page not previously visited
"""
self.data.last_visited = None
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) == 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_changedescs_and_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with Change Descriptions
and page previously visited
"""
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) > 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_changedescs_and_no_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with Change Descriptions
and page not previously visited
"""
self.data.last_visited = None
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) > 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_pending_status_updates(self):
"""Testing InitialStatusUpdatesEntry.collapsed with pending status
updates
"""
self.status_update.state = StatusUpdate.PENDING
self.status_update.review = None
self.status_update.save(update_fields=('state', 'review'))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_not_yet_run_status_updates(self):
"""Testing InitialStatusUpdatesEntry.collapsed with not yet run status
updates
"""
self.status_update.state = StatusUpdate.NOT_YET_RUN
self.status_update.review = None
self.status_update.save(update_fields=('state', 'review'))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_gt_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status update
timestamp newer than last visited
"""
# To update the status update's timestamp, we need to perform an
# update() call on the queryset and reload.
StatusUpdate.objects.filter(pk=self.status_update.pk).update(
timestamp=self.data.last_visited + timedelta(days=1))
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertTrue(self.status_update.timestamp > self.data.last_visited)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_lt_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status update
timestamp newer than last visited
"""
# To update the status update's timestamp, we need to perform an
# update() call on the queryset and reload.
StatusUpdate.objects.filter(pk=self.status_update.pk).update(
timestamp=self.data.last_visited - timedelta(days=1))
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertTrue(self.status_update.timestamp < self.data.last_visited)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_no_reviews(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
and no reviews
"""
self.status_update.state = StatusUpdate.DONE_SUCCESS
self.status_update.review = None
self.status_update.save(update_fields=('state', 'review'))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_comment_replies(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
containing draft comment replies
"""
self.request.user = self.review_request.submitter
self.assertEqual(self.status_update.state, StatusUpdate.DONE_FAILURE)
reply = self.create_reply(self.review, user=self.request.user)
self.create_general_comment(reply, reply_to=self.general_comment)
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_reply_comments)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_top_replies(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
containing draft replies to body_top
"""
self.request.user = self.review_request.submitter
self.assertEqual(self.status_update.state, StatusUpdate.DONE_FAILURE)
self.create_reply(self.review,
user=self.request.user,
body_top_reply_to=self.review)
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_top_replies)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_bottom_replies(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
containing draft replies to body_bottom
"""
self.request.user = self.review_request.submitter
self.assertEqual(self.status_update.state, StatusUpdate.DONE_FAILURE)
self.create_reply(self.review,
user=self.request.user,
body_bottom_reply_to=self.review)
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_bottom_replies)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
class ReviewEntryTests(TestCase):
"""Unit tests for ReviewEntry."""
fixtures = ['test_users']
def setUp(self):
super(ReviewEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.review = self.create_review(
self.review_request,
id=123,
public=True,
timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.changedesc = self.review_request.changedescs.create(
timestamp=self.review.timestamp + timedelta(days=10),
public=True)
self.data = ReviewRequestPageData(
review_request=self.review_request,
request=self.request,
last_visited=self.changedesc.timestamp)
def test_added_timestamp(self):
"""Testing ReviewEntry.added_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp(self):
"""Testing ReviewEntry.updated_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp_with_replies(self):
"""Testing ReviewEntry.updated_timestamp with replies"""
self.create_reply(self.review,
timestamp=datetime(2017, 9, 14, 15, 40, 0,
tzinfo=utc),
publish=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
def test_get_dom_element_id(self):
"""Testing ReviewEntry.get_dom_element_id"""
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.get_dom_element_id(), 'review123')
def test_collapsed_with_open_issues(self):
"""Testing ReviewEntry.collapsed with open issues"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.OPEN)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_open_issues_verifying_resolved(self):
"""Testing ReviewEntry.collapsed with open issues marked Verifying
Resolved
"""
self.create_general_comment(
self.review,
issue_opened=True,
issue_status=BaseComment.VERIFYING_RESOLVED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_open_issues_verifying_dropped(self):
"""Testing ReviewEntry.collapsed with open issues marked Verifying
Dropped
"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.VERIFYING_DROPPED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_dropped_issues(self):
"""Testing ReviewEntry.collapsed with dropped issues"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.DROPPED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertTrue(entry.collapsed)
def test_collapsed_with_resolved_issues(self):
"""Testing ReviewEntry.collapsed with resolved issues"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.RESOLVED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertTrue(entry.collapsed)
def test_collapsed_with_draft_reply_comments(self):
"""Testing ReviewEntry.collapsed with draft reply comments"""
self.request.user = self.review_request.submitter
comment = self.create_general_comment(self.review)
reply = self.create_reply(self.review, user=self.request.user)
self.create_general_comment(reply, reply_to=comment)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_reply_comments)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_draft_body_top_replies(self):
"""Testing ReviewEntry.collapsed with draft replies to body_top"""
self.request.user = self.review_request.submitter
self.create_reply(self.review,
user=self.request.user,
body_top_reply_to=self.review)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_top_replies)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_draft_body_bottom_replies(self):
"""Testing ReviewEntry.collapsed with draft replies to body_bottom"""
self.request.user = self.review_request.submitter
self.create_reply(self.review,
user=self.request.user,
body_bottom_reply_to=self.review)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_bottom_replies)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_reply_older_than_last_visited(self):
"""Testing ReviewEntry.collapsed with reply older than last visited"""
reply = self.create_reply(
self.review,
publish=True,
timestamp=self.review.timestamp + timedelta(days=2))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.data.last_visited = reply.timestamp + timedelta(days=1)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertTrue(entry.collapsed)
def test_collapsed_with_reply_newer_than_last_visited(self):
"""Testing ReviewEntry.collapsed with reply newer than last visited"""
reply = self.create_reply(
self.review,
publish=True,
timestamp=self.review.timestamp + timedelta(days=2))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.data.last_visited = reply.timestamp - timedelta(days=1)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_get_js_model_data(self):
"""Testing ReviewEntry.get_js_model_data"""
self.review.ship_it = True
self.review.publish()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.get_js_model_data(), {
'reviewData': {
'id': self.review.pk,
'bodyTop': 'Test Body Top',
'bodyBottom': 'Test Body Bottom',
'public': True,
'shipIt': True,
},
})
@add_fixtures(['test_scmtools'])
def test_get_js_model_data_with_diff_comments(self):
"""Testing ReviewEntry.get_js_model_data with diff comments"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
comment1 = self.create_diff_comment(self.review, filediff)
comment2 = self.create_diff_comment(self.review, filediff)
self.review.publish()
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment1.review_obj = self.review
comment2.review_obj = self.review
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
entry.add_comment('diff_comments', comment1)
entry.add_comment('diff_comments', comment2)
self.assertEqual(entry.get_js_model_data(), {
'reviewData': {
'id': self.review.pk,
'bodyTop': 'Test Body Top',
'bodyBottom': 'Test Body Bottom',
'public': True,
'shipIt': False,
},
'diffCommentsData': [
(str(comment1.pk), str(filediff.pk)),
(str(comment2.pk), str(filediff.pk)),
],
})
def test_add_comment_with_no_open_issues(self):
"""Testing ReviewEntry.add_comment with comment not opening an issue"""
self.request.user = self.review_request.submitter
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments', GeneralComment())
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
def test_add_comment_with_open_issues(self):
"""Testing ReviewEntry.add_comment with comment opening an issue"""
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments',
GeneralComment(issue_opened=True,
issue_status=GeneralComment.OPEN))
self.assertTrue(entry.has_issues)
self.assertEqual(entry.issue_open_count, 1)
def test_add_comment_with_open_issues_and_viewer_is_owner(self):
"""Testing ReviewEntry.add_comment with comment opening an issue and
the review request owner is viewing the page
"""
self.request.user = self.review_request.submitter
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments',
GeneralComment(issue_opened=True,
issue_status=GeneralComment.OPEN))
self.assertTrue(entry.has_issues)
self.assertEqual(entry.issue_open_count, 1)
def test_build_entries(self):
"""Testing ReviewEntry.build_entries"""
review1 = self.create_review(
self.review_request,
timestamp=self.review.timestamp - timedelta(days=2),
public=True)
review2 = self.review
comment = self.create_general_comment(review1)
# These shouldn't show up in the results.
self.create_review(
self.review_request,
timestamp=self.review.timestamp - timedelta(days=1),
public=False)
self.create_reply(review1)
status_update_review = self.create_review(self.review_request,
public=True)
self.create_general_comment(status_update_review)
self.create_status_update(self.review_request,
review=status_update_review,
state=StatusUpdate.DONE_FAILURE)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(ReviewEntry.build_entries(self.data))
self.assertEqual(len(entries), 2)
# These will actually be in database query order (newest to oldest),
# not the order shown on the page.
entry = entries[0]
self.assertEqual(entry.review, review2)
self.assertEqual(
entry.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
entry = entries[1]
self.assertEqual(entry.review, review1)
self.assertEqual(
entry.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
class ChangeEntryTests(TestCase):
"""Unit tests for ChangeEntry."""
fixtures = ['test_users']
def setUp(self):
super(ChangeEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.changedesc = ChangeDescription.objects.create(
id=123,
public=True,
timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.review_request.changedescs.add(self.changedesc)
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_added_timestamp(self):
"""Testing ChangeEntry.added_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp(self):
"""Testing ChangeEntry.updated_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp_with_status_update(self):
"""Testing ChangeEntry.updated_timestamp with status updates"""
self.create_status_update(
self.review_request,
change_description=self.changedesc,
timestamp=datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
def test_get_dom_element_id(self):
"""Testing ChangeEntry.get_dom_element_id"""
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.get_dom_element_id(), 'changedesc123')
def test_collapsed_with_older_than_latest_changedesc(self):
"""Testing ChangeEntry.collapsed with older than latest Change
Description
"""
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertTrue(entry.collapsed)
def test_collapsed_with_latest_changedesc(self):
"""Testing ChangeEntry.collapsed with older than latest Change
Description
"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertEqual(self.changedesc.timestamp,
self.data.latest_changedesc_timestamp)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_no_reviews(self):
"""Testing ChangeEntry.collapsed with status updates and no reviews"""
self.create_status_update(self.review_request,
change_description=self.changedesc,
state=StatusUpdate.DONE_SUCCESS)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_comment_replies(self):
"""Testing ChangeEntry.collapsed with status updates containing draft
comment replies
"""
self.request.user = self.review_request.submitter
review = self.create_review(self.review_request, publish=True)
comment = self.create_general_comment(review)
self.create_status_update(self.review_request,
review=review,
change_description=self.changedesc,
state=StatusUpdate.DONE_FAILURE)
reply = self.create_reply(review, user=self.request.user)
self.create_general_comment(reply, reply_to=comment)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(review.pk, self.data.draft_reply_comments)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_pending_status_updates(self):
"""Testing ChangeEntry.collapsed with pending status updates"""
self.request.user = self.review_request.submitter
self.create_status_update(self.review_request,
change_description=self.changedesc,
state=StatusUpdate.PENDING)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_not_yet_run_status_updates(self):
"""Testing ChangeEntry.collapsed with not yet run status updates"""
self.request.user = self.review_request.submitter
self.create_status_update(self.review_request,
change_description=self.changedesc,
state=StatusUpdate.NOT_YET_RUN)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_gt_last_visited(self):
"""Testing ChangeEntry.collapsed with status update timestamp newer
than last visited
"""
self.request.user = self.review_request.submitter
self.data.last_visited = self.changedesc.timestamp + timedelta(days=1)
status_update = self.create_status_update(
self.review_request,
change_description=self.changedesc,
state=StatusUpdate.DONE_SUCCESS,
timestamp=self.data.last_visited + timedelta(days=1))
self.assertTrue(status_update.timestamp > self.data.last_visited)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_lt_last_visited(self):
"""Testing ChangeEntry.collapsed with status update timestamp older
than last visited
"""
self.request.user = self.review_request.submitter
self.data.last_visited = self.changedesc.timestamp + timedelta(days=1)
status_update = self.create_status_update(
self.review_request,
change_description=self.changedesc,
state=StatusUpdate.DONE_SUCCESS,
timestamp=self.data.last_visited - timedelta(days=1))
self.assertTrue(status_update.timestamp < self.data.last_visited)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_top_replies(self):
"""Testing ChangeEntry.collapsed with status updates containing draft
comment replies to body_top
"""
self.request.user = self.review_request.submitter
review = self.create_review(self.review_request, publish=True)
self.create_status_update(self.review_request,
review=review,
change_description=self.changedesc,
state=StatusUpdate.DONE_FAILURE)
self.create_reply(review,
user=self.request.user,
body_top_reply_to=review)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(review.pk, self.data.draft_body_top_replies)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_bottom_replies(self):
"""Testing ChangeEntry.collapsed with status updates containing draft
comment replies to body_bottom
"""
self.request.user = self.review_request.submitter
review = self.create_review(self.review_request, publish=True)
self.create_status_update(self.review_request,
review=review,
change_description=self.changedesc,
state=StatusUpdate.DONE_FAILURE)
self.create_reply(review,
user=self.request.user,
body_bottom_reply_to=review)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(review.pk, self.data.draft_body_bottom_replies)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_get_js_model_data(self):
"""Testing ChangeEntry.get_js_model_data for standard ChangeDescription
"""
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.get_js_model_data(), {
'pendingStatusUpdates': False,
})
@add_fixtures(['test_scmtools'])
def test_get_js_model_data_with_status_updates(self):
"""Testing ChangeEntry.get_js_model_data for ChangeDescription with
status updates
"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(self.review_request,
body_top='Body top',
body_bottom='Body bottom',
ship_it=True)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment1.review_obj = review
comment2.review_obj = review
status_update = self.create_status_update(
self.review_request,
review=review,
change_description=self.changedesc)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
entry.add_update(status_update)
entry.add_comment('diff_comments', comment1)
entry.add_comment('diff_comments', comment2)
self.assertEqual(entry.get_js_model_data(), {
'reviewsData': [
{
'id': review.pk,
'bodyTop': 'Body top',
'bodyBottom': 'Body bottom',
'public': True,
'shipIt': True,
},
],
'diffCommentsData': [
(str(comment1.pk), str(filediff.pk)),
(str(comment2.pk), str(filediff.pk)),
],
'pendingStatusUpdates': False,
})
def test_build_entries(self):
"""Testing ChangeEntry.build_entries"""
changedesc1 = self.changedesc
changedesc2 = self.review_request.changedescs.create(
timestamp=changedesc1.timestamp + timedelta(days=1),
public=True)
review = self.create_review(self.review_request, public=True)
comment = self.create_general_comment(review)
status_update = self.create_status_update(
self.review_request,
review=review,
change_description=changedesc2)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(ChangeEntry.build_entries(self.data))
# These will actually be in database query order (newest to oldest),
# not the order shown on the page.
entry = entries[0]
self.assertEqual(entry.changedesc, changedesc2)
self.assertFalse(entry.collapsed)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(
entry.status_updates_by_review,
{
review.pk: status_update,
})
self.assertEqual(
entry.status_updates[0].comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
entry = entries[1]
self.assertEqual(entry.changedesc, changedesc1)
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, [])
def test_is_entry_new_with_timestamp(self):
"""Testing ChangeEntry.is_entry_new with timestamp"""
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
user = User.objects.create_user(username='test-user',
email='user@example.com')
self.assertTrue(entry.is_entry_new(
last_visited=self.changedesc.timestamp - timedelta(days=1),
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=self.changedesc.timestamp,
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=self.changedesc.timestamp + timedelta(days=1),
user=user))
| 38.568291 | 79 | 0.620287 |
0bf6166a97654a7bc80c1fae4ea612757fee89f9 | 6,503 | py | Python | tests/test_rotate.py | nbraud/ppb-vector | 9998a8f2c468b1b1335a799b77f1a5450e05f2a8 | [
"Artistic-2.0"
] | null | null | null | tests/test_rotate.py | nbraud/ppb-vector | 9998a8f2c468b1b1335a799b77f1a5450e05f2a8 | [
"Artistic-2.0"
] | null | null | null | tests/test_rotate.py | nbraud/ppb-vector | 9998a8f2c468b1b1335a799b77f1a5450e05f2a8 | [
"Artistic-2.0"
] | null | null | null | import math
from math import cos, fabs, radians, sin, sqrt
import hypothesis.strategies as st
import pytest # type: ignore
from hypothesis import assume, example, given, note
from ppb_vector import Vector
from utils import angle_isclose, angles, floats, isclose, vectors
data_exact = [
(Vector(1, 1), -90, Vector(1, -1)),
(Vector(1, 1), 0, Vector(1, 1)),
(Vector(1, 1), 90, Vector(-1, 1)),
(Vector(1, 1), 180, Vector(-1, -1)),
]
@pytest.mark.parametrize("input, angle, expected", data_exact,
ids=[str(angle) for _, angle, _ in data_exact])
def test_exact_rotations(input, angle, expected):
assert input.rotate(angle) == expected
assert input.angle(expected) == angle
# angle (in degrees) -> (sin, cos)
# values from 0 to 45°
# lifted from https://en.wikibooks.org/wiki/Trigonometry/Selected_Angles_Reference
remarkable_angles = {
15: ((sqrt(6) + sqrt(2)) / 4, (sqrt(6) - sqrt(2)) / 4),
22.5: (sqrt(2 + sqrt(2)) / 2, sqrt(2 - sqrt(2)) / 2),
30: (sqrt(3) / 2, 0.5),
45: (sqrt(2) / 2, sqrt(2) / 2),
}
# extend up to 90°
remarkable_angles.update({
90 - angle: (sin_t, cos_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
# extend up to 180°
remarkable_angles.update({
angle + 90: (-sin_t, cos_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
# extend up to 360°
remarkable_angles.update({
angle + 180: (-cos_t, -sin_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
# extend to negative angles
remarkable_angles.update({
-angle: (cos_t, -sin_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
@pytest.mark.parametrize("angle, trig", remarkable_angles.items(),
ids=[str(x) for x in remarkable_angles])
def test_remarkable_angles(angle, trig):
"""Test that our table of remarkable angles agrees with Vector._trig.
This is useful both as a consistency test of the table,
and as a test of Vector._trig (which Vector.rotate uses).
"""
cos_t, sin_t = trig
cos_m, sin_m = Vector._trig(angle)
assert isclose(sin_t, sin_m, abs_tol=0, rel_tol=1e-14)
assert isclose(cos_t, cos_m, abs_tol=0, rel_tol=1e-14)
data_close = [
(Vector(1, 0), angle, Vector(cos_t, sin_t))
for (angle, (cos_t, sin_t)) in remarkable_angles.items()
] + [
(Vector(1, 1), angle, Vector(cos_t - sin_t, cos_t + sin_t))
for (angle, (cos_t, sin_t)) in remarkable_angles.items()
]
@pytest.mark.parametrize("input, angle, expected", data_close,
ids=[f"({v.x},{v.y}).rotate({angle})" for v, angle, _ in data_close])
def test_close_rotations(input, angle, expected):
assert input.rotate(angle).isclose(expected)
assert angle_isclose(input.angle(expected), angle)
@given(angle=angles())
def test_trig_stability(angle):
"""cos² + sin² == 1
We are testing that this equation holds, as otherwise rotations
would (slightly) change the length of vectors they are applied to.
Moreover, Vector._trig should get closer to fulfilling it than
math.{cos,sin}.
"""
r_cos, r_sin = Vector._trig(angle)
r_len = r_cos * r_cos + r_sin * r_sin
# Don't use exponents here. Multiplication is generally more stable.
assert math.isclose(r_len, 1, rel_tol=1e-18)
t_cos, t_sin = cos(radians(angle)), sin(radians(angle))
t_len = t_cos * t_cos + t_sin * t_sin
assert fabs(1 - r_len) <= fabs(1 - t_len)
@given(angle=angles(), n=st.integers(min_value=0, max_value=1e5))
def test_trig_invariance(angle: float, n: int):
"""Test that cos(θ), sin(θ) ≃ cos(θ + n*360°), sin(θ + n*360°)"""
r_cos, r_sin = Vector._trig(angle)
n_cos, n_sin = Vector._trig(angle + 360 * n)
note(f"δcos: {r_cos - n_cos}")
assert isclose(r_cos, n_cos, rel_to=[n / 1e9])
note(f"δsin: {r_sin - n_sin}")
assert isclose(r_sin, n_sin, rel_to=[n / 1e9])
@given(v=vectors(), angle=angles(), n=st.integers(min_value=0, max_value=1e5))
def test_rotation_invariance(v: Vector, angle: float, n: int):
"""Check that rotating by angle and angle + n×360° have the same result."""
rot_once = v.rotate(angle)
rot_many = v.rotate(angle + 360 * n)
note(f"δ: {(rot_once - rot_many).length}")
assert rot_once.isclose(rot_many, rel_tol=n / 1e9)
@given(initial=vectors(), angle=angles())
def test_rotation_angle(initial, angle):
"""initial.angle( initial.rotate(angle) ) == angle"""
assume(initial.length > 1e-5)
assert angle_isclose(initial.angle(initial.rotate(angle)), angle)
@given(angle=angles(), loops=st.integers(min_value=0, max_value=500))
def test_rotation_stability(angle, loops):
"""Rotating loops times by angle is equivalent to rotating by loops*angle."""
initial = Vector(1, 0)
fellswoop = initial.rotate(angle * loops)
note(f"One Fell Swoop: {fellswoop}")
stepwise = initial
for _ in range(loops):
stepwise = stepwise.rotate(angle)
note(f"Step-wise: {stepwise}")
assert fellswoop.isclose(stepwise, rel_tol=1e-8)
assert math.isclose(fellswoop.length, initial.length, rel_tol=1e-15)
@given(initial=vectors(), angles=st.lists(angles()))
def test_rotation_stability2(initial, angles):
"""Rotating by a sequence of angles is equivalent to rotating by the total."""
total_angle = sum(angles)
fellswoop = initial.rotate(total_angle)
note(f"One Fell Swoop: {fellswoop}")
stepwise = initial
for angle in angles:
stepwise = stepwise.rotate(angle)
note(f"Step-wise: {stepwise}")
# Increase the tolerance on this comparison,
# as stepwise rotations induce rounding errors
assert fellswoop.isclose(stepwise, rel_tol=1e-6)
assert math.isclose(fellswoop.length, initial.length, rel_tol=1e-15)
@given(x=vectors(), y=vectors(), scalar=floats(), angle=angles())
# In this example:
# * x * l == -y
# * Rotation must not be an multiple of 90deg
# * Must be sufficiently large
@example(x=Vector(1e10, 1e10), y=Vector(1e19, 1e19), scalar=-1e9, angle=45)
def test_rotation_linearity(x, y, scalar, angle):
"""(l*x + y).rotate is equivalent to l*x.rotate + y.rotate"""
inner = (scalar * x + y).rotate(angle)
outer = scalar * x.rotate(angle) + y.rotate(angle)
note(f"scalar * x + y: {scalar * x + y}")
note(f"scalar * x.rotate(): {scalar * x.rotate(angle)}")
note(f"y.rotate(): {y.rotate(angle)}")
note(f"Inner: {inner}")
note(f"Outer: {outer}")
assert inner.isclose(outer, rel_to=[x, scalar * x, y])
| 33.694301 | 94 | 0.662925 |
62fabe554ff31c85059e570ed7a67fd8a63745a0 | 24,284 | py | Python | docker/zap-baseline.py | null-karbon/zaproxy | 5fb560c22159e18fd64814f49d2ff9e6ce097eb7 | [
"Apache-2.0"
] | 2 | 2021-10-03T00:22:01.000Z | 2021-11-08T12:53:34.000Z | docker/zap-baseline.py | TheTexasGamer/zaproxy | 895cc7e5090d5b7bfde455e7bec539df2c4bb180 | [
"Apache-2.0"
] | null | null | null | docker/zap-baseline.py | TheTexasGamer/zaproxy | 895cc7e5090d5b7bfde455e7bec539df2c4bb180 | [
"Apache-2.0"
] | 1 | 2021-12-02T07:30:36.000Z | 2021-12-02T07:30:36.000Z | #!/usr/bin/env python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2016 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a baseline scan against a target URL using ZAP
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# By default it will spider the target URL for one minute, but you can change
# that via the -m parameter.
# It will then wait for the passive scanning to finish - how long that takes
# depends on the number of pages found.
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
import getopt
import json
import logging
import os
import os.path
import sys
import time
import yaml
from datetime import datetime
from pathlib import Path
from shutil import copyfile
from zapv2 import ZAPv2
from zap_common import *
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
min_level = 0
# Pscan rules that aren't really relevant, e.g. the examples rules in the alpha set
ignore_scan_rules = ['-1', '50003', '60000', '60001']
# Pscan rules that are being addressed
in_progress_issues = {}
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Hide "Starting new HTTP connection" messages
logging.getLogger("requests").setLevel(logging.WARNING)
def usage():
print('Usage: zap-baseline.py -t <target> [options]')
print(' -t target target URL including the protocol, e.g. https://www.example.com')
print('Options:')
print(' -h print this help message')
print(' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print(' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print(' -g gen_file generate default config file (all rules set to WARN)')
print(' -m mins the number of minutes to spider for (default 1)')
print(' -r report_html file to write the full ZAP HTML report')
print(' -w report_md file to write the full ZAP Wiki (Markdown) report')
print(' -x report_xml file to write the full ZAP XML report')
print(' -J report_json file to write the full ZAP JSON document')
print(' -a include the alpha passive scan rules as well')
print(' -d show debug messages')
print(' -P specify listen port')
print(' -D secs delay in seconds to wait for passive scanning ')
print(' -i default rules not in the config file to INFO')
print(' -I do not return failure on warning')
print(' -j use the Ajax spider in addition to the traditional one')
print(' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs')
print(' -n context_file context file which will be loaded prior to spidering the target')
print(' -p progress_file progress file which specifies issues that are being addressed')
print(' -s short output format - dont show PASSes or example URLs')
print(' -T mins max time in minutes to wait for ZAP to start and the passive scan to run')
print(' -U user username to use for authenticated scans - must be defined in the given context file')
print(' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"')
print(' --hook path to python file that define your custom hooks')
print(' --auto use the automation framework if supported for the given parameters (this is now the default)')
print(' --autooff do not use the automation framework even if supported for the given parameters')
print('')
print('For more details see https://www.zaproxy.org/docs/docker/baseline-scan/')
'''
This script is in the process of being converted to use the Automation Framework.
If you map a directory to /zap/wrk then the zap.yaml file generated will be copied to that directory.
The following parameters are currently supported:
-c config_file
-u config_url
-m mins
-r report_html
-w report_md
-x report_xml
-J report_json
-a
-d
-P
-I
-j
-s
-T
-z zap_options
The following parameters are partially supported.
If you specify the '--auto' flag _before_ using them then the Automation Framework will be used:
Currently none.
If any of the next set of parameters are used then the existing code will be used instead:
-D secs need new delay/sleep job
-i need to support config files
-l level ditto
-n context file will need full context support in the AF
-p progress_file need to support config files
-U user will need full context support in the AF
--hook will need scripting support in the AF
-g gen_file may never support
--autooff will never support, may remove at some point
'''
def main(argv):
global min_level
global in_progress_issues
cid = ''
context_file = ''
progress_file = ''
config_file = ''
config_url = ''
generate = ''
mins = 1
port = 0
detailed_output = True
report_html = ''
report_md = ''
report_xml = ''
report_json = ''
target = ''
zap_alpha = False
info_unspecified = False
ajax = False
base_dir = ''
zap_ip = 'localhost'
zap_options = ''
delay = 0
timeout = 0
ignore_warn = False
hook_file = None
user = ''
use_af = True
af_supported = True
af_override = False
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
warn_inprog_count = 0
fail_inprog_count = 0
exception_raised = False
debug = False
try:
opts, args = getopt.getopt(argv, "t:c:u:g:m:n:r:J:w:x:l:hdaijp:sz:P:D:T:IU:", ["hook=", "auto", "autooff"])
except getopt.GetoptError as exc:
logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg)
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(0)
elif opt == '-t':
target = arg
logging.debug('Target: ' + target)
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
af_supported = False
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
debug = True
elif opt == '-m':
mins = int(arg)
elif opt == '-P':
port = int(arg)
elif opt == '-D':
delay = int(arg)
af_supported = False
elif opt == '-n':
context_file = arg
af_supported = False
elif opt == '-p':
progress_file = arg
af_supported = False
elif opt == '-r':
report_html = arg
elif opt == '-J':
report_json = arg
elif opt == '-w':
report_md = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
af_supported = False
elif opt == '-I':
ignore_warn = True
elif opt == '-j':
ajax = True
elif opt == '-l':
try:
min_level = zap_conf_lvls.index(arg)
except ValueError:
logging.warning('Level must be one of ' + str(zap_conf_lvls))
usage()
sys.exit(3)
af_supported = False
elif opt == '-z':
zap_options = arg
elif opt == '-s':
detailed_output = False
elif opt == '-T':
timeout = int(arg)
elif opt == '-U':
user = arg
af_supported = False
elif opt == '--hook':
hook_file = arg
af_supported = False
elif opt == '--auto':
use_af = True
af_override = True
elif opt == '--autooff':
use_af = False
check_zap_client_version()
load_custom_hooks(hook_file)
trigger_hook('cli_opts', opts)
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if not (target.startswith('http://') or target.startswith('https://')):
logging.warning('Target must start with \'http://\' or \'https://\'')
usage()
sys.exit(3)
if running_in_docker():
base_dir = '/zap/wrk/'
if config_file or generate or report_html or report_xml or report_json or report_md or progress_file or context_file:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
if user and not context_file:
logging.warning('A context file must be specified (and include the user) if the user option is selected')
usage()
sys.exit(3)
# Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
if port == 0:
port = get_free_port()
logging.debug('Using port: ' + str(port))
if config_file:
# load config file from filestore
with open(base_dir + config_file) as f:
try:
load_config(f, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to load config file " + base_dir + config_file + " " + str(e))
sys.exit(3)
elif config_url:
# load config file from url
try:
config_data = urlopen(config_url).read().decode('UTF-8').splitlines()
load_config(config_data, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to read configs from " + config_url + " " + str(e))
sys.exit(3)
except:
logging.warning('Failed to read configs from ' + config_url)
sys.exit(3)
if progress_file:
# load progress file from filestore
with open(base_dir + progress_file) as f:
progress = json.load(f)
# parse into something more useful...
# in_prog_issues = map of vulnid -> {object with everything in}
for issue in progress["issues"]:
if issue["state"] == "inprogress":
in_progress_issues[issue["id"]] = issue
if running_in_docker():
if use_af and af_supported:
print('Using the Automation Framework')
# Generate the yaml file
home_dir = str(Path.home())
yaml_file = home_dir + '/zap.yaml'
summary_file = home_dir + '/zap_out.json'
with open(yaml_file, 'w') as yf:
# Add the top level to the scope for backwards compatibility
top_levels = [ target ]
if target.count('/') > 2:
# The url can include a valid path, but always reset to spider the host (backwards compatibility)
t2 = target[0:target.index('/', 8)+1]
if not t2 == target:
target = t2
top_levels.append(target)
yaml.dump(get_af_env(top_levels, out_of_scope_dict, debug), yf)
alertFilters = []
# Handle id specific alertFilters - rules that apply to all IDs are excluded from the env
for id in out_of_scope_dict:
if id != '*':
for regex in out_of_scope_dict[id]:
alertFilters.append({'ruleId': id, 'newRisk': 'False Positive', 'url': regex.pattern, 'urlRegex': True})
addons = ['pscanrulesBeta']
if zap_alpha:
addons.append('pscanrulesAlpha')
jobs = [
get_af_addons(addons, []),
get_af_pscan_config()]
if len(alertFilters) > 0:
jobs.append(get_af_alertFilter(alertFilters))
jobs.append(get_af_spider(target, mins))
if ajax:
jobs.append(get_af_spiderAjax(target, mins))
jobs.append(get_af_pscan_wait(timeout))
jobs.append(get_af_output_summary(('Short', 'Long')[detailed_output], summary_file, config_dict, config_msg))
if report_html:
jobs.append(get_af_report('traditional-html', base_dir, report_html, 'ZAP Scanning Report', ''))
if report_md:
jobs.append(get_af_report('traditional-md', base_dir, report_md, 'ZAP Scanning Report', ''))
if report_xml:
jobs.append(get_af_report('traditional-xml', base_dir, report_xml, 'ZAP Scanning Report', ''))
if report_json:
jobs.append(get_af_report('traditional-json', base_dir, report_json, 'ZAP Scanning Report', ''))
yaml.dump({'jobs': jobs}, yf)
if os.path.exists('/zap/wrk'):
# Write the yaml file to the mapped directory, if there is one
copyfile(yaml_file, '/zap/wrk/zap.yaml')
# Run ZAP inline with the yaml file
try:
params = ['-autorun', yaml_file]
add_zap_options(params, zap_options)
out = run_zap_inline(port, params)
ignore_strs = ["Found Java version", "Available memory", "Using JVM args", "Add-on already installed", "[main] INFO",
"Automation plan succeeded"]
for line in out.splitlines():
if any(x in line for x in ignore_strs):
continue
print(line)
except OSError:
logging.warning('Failed to start ZAP :(')
sys.exit(3)
# Read the status file to find out what code we should exit with
if not os.path.isfile(summary_file):
logging.warning('Failed to access summary file ' + summary_file)
sys.exit(3)
try:
with open(summary_file) as f:
summary_data = json.load(f)
if summary_data['fail'] > 0:
sys.exit(1)
elif (not ignore_warn) and summary_data['warn'] > 0:
sys.exit(2)
elif summary_data['pass'] > 0:
sys.exit(0)
else:
sys.exit(3)
except IOError:
logging.warning('Failed to read summary file ' + summary_file)
sys.exit(3)
else:
try:
params = [
'-config', 'spider.maxDuration=' + str(mins),
'-addonupdate',
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if zap_alpha:
params.append('-addoninstall')
params.append('pscanrulesAlpha')
add_zap_options(params, zap_options)
start_zap(port, params)
except OSError:
logging.warning('Failed to start ZAP :(')
sys.exit(3)
else:
# Not running in docker, so start one
mount_dir = ''
if context_file:
mount_dir = os.path.dirname(os.path.abspath(context_file))
params = [
'-config', 'spider.maxDuration=' + str(mins),
'-addonupdate']
if (zap_alpha):
params.extend(['-addoninstall', 'pscanrulesAlpha'])
add_zap_options(params, zap_options)
try:
cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir)
zap_ip = ipaddress_for_cid(cid)
logging.debug('Docker ZAP IP Addr: ' + zap_ip)
except OSError:
logging.warning('Failed to start ZAP in docker :(')
sys.exit(3)
try:
zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})
wait_for_zap_start(zap, timeout * 60)
trigger_hook('zap_started', zap, target)
# Make suitable performance tweaks for running in this environment
zap_tune(zap)
trigger_hook('zap_tuned', zap)
if context_file:
# handle the context file, cant use base_dir as it might not have been set up
zap_import_context(zap, '/zap/wrk/' + os.path.basename(context_file))
if (user):
zap_set_scan_user(zap, user)
zap_access_target(zap, target)
if target.count('/') > 2:
# The url can include a valid path, but always reset to spider the host
target = target[0:target.index('/', 8)+1]
time.sleep(2)
# Spider target
zap_spider(zap, target)
if (ajax):
zap_ajax_spider(zap, target, mins)
if (delay):
start_scan = datetime.now()
while ((datetime.now() - start_scan).seconds < delay):
time.sleep(5)
logging.debug('Delay passive scan check ' + str(delay - (datetime.now() - start_scan).seconds) + ' seconds')
zap_wait_for_passive_scan(zap, timeout * 60)
# Print out a count of the number of urls
num_urls = len(zap.core.urls())
if num_urls == 0:
logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
else:
if detailed_output:
print('Total of ' + str(num_urls) + ' URLs')
alert_dict = zap_get_alerts(zap, target, ignore_scan_rules, out_of_scope_dict)
all_rules = zap.pscan.scanners
all_dict = {}
for rule in all_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
all_dict[plugin_id] = rule.get('name')
if generate:
# Create the config file
with open(base_dir + generate, 'w') as f:
f.write('# zap-baseline rule configuration file\n')
f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
f.write('# Only the rule identifiers are used - the names are just for info\n')
f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n')
for key, rule in sorted(all_dict.items()):
f.write(key + '\tWARN\t(' + rule + ')\n')
# print out the passing rules
pass_dict = {}
for rule in all_rules:
plugin_id = rule.get('id')
if plugin_id in ignore_scan_rules:
continue
if (plugin_id not in alert_dict):
pass_dict[plugin_id] = rule.get('name')
if min_level == zap_conf_lvls.index("PASS") and detailed_output:
for key, rule in sorted(pass_dict.items()):
print('PASS: ' + rule + ' [' + key + ']')
pass_count = len(pass_dict)
# print out the ignored rules
ignore_count, not_used = print_rules(zap, alert_dict, 'IGNORE', config_dict, config_msg, min_level,
inc_ignore_rules, True, detailed_output, {})
# print out the info rules
info_count, not_used = print_rules(zap, alert_dict, 'INFO', config_dict, config_msg, min_level,
inc_info_rules, info_unspecified, detailed_output, in_progress_issues)
# print out the warning rules
warn_count, warn_inprog_count = print_rules(zap, alert_dict, 'WARN', config_dict, config_msg, min_level,
inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues)
# print out the failing rules
fail_count, fail_inprog_count = print_rules(zap, alert_dict, 'FAIL', config_dict, config_msg, min_level,
inc_fail_rules, True, detailed_output, in_progress_issues)
if report_html:
# Save the report
write_report(base_dir + report_html, zap.core.htmlreport())
if report_json:
# Save the report
write_report(base_dir + report_json, zap.core.jsonreport())
if report_md:
# Save the report
write_report(base_dir + report_md, zap.core.mdreport())
if report_xml:
# Save the report
write_report(base_dir + report_xml, zap.core.xmlreport())
print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) +
'\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) +
'\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count))
trigger_hook('zap_pre_shutdown', zap)
# Stop ZAP
zap.core.shutdown()
except UserInputException as e:
exception_raised = True
print("ERROR %s" % e)
except ScanNotStartedException:
exception_raised = True
dump_log_file(cid)
except IOError as e:
exception_raised = True
print("ERROR %s" % e)
logging.warning('I/O error: ' + str(e))
dump_log_file(cid)
except:
exception_raised = True
print("ERROR " + str(sys.exc_info()[0]))
logging.warning('Unexpected error: ' + str(sys.exc_info()[0]))
dump_log_file(cid)
if not running_in_docker():
stop_docker(cid)
trigger_hook('pre_exit', fail_count, warn_count, pass_count)
if exception_raised:
sys.exit(3)
elif fail_count > 0:
sys.exit(1)
elif (not ignore_warn) and warn_count > 0:
sys.exit(2)
elif pass_count > 0:
sys.exit(0)
else:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv[1:])
| 37.417565 | 139 | 0.563293 |
c27ee552f61a7e9a77a36062140033dd409dbeaa | 2,770 | py | Python | ambari-agent/src/main/python/ambari_agent/HostStatusReporter.py | tqrg-bot/ambari | 05cd35982b30f424cec0b5b9d93bc4709880a3bc | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/main/python/ambari_agent/HostStatusReporter.py | tqrg-bot/ambari | 05cd35982b30f424cec0b5b9d93bc4709880a3bc | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/main/python/ambari_agent/HostStatusReporter.py | tqrg-bot/ambari | 05cd35982b30f424cec0b5b9d93bc4709880a3bc | [
"Apache-2.0"
] | null | null | null | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import threading
from ambari_agent import Constants
from ambari_agent.HostInfo import HostInfo
from ambari_agent.Utils import Utils
from ambari_agent.Hardware import Hardware
from ambari_stomp.adapter.websocket import ConnectionIsAlreadyClosed
logger = logging.getLogger(__name__)
class HostStatusReporter(threading.Thread):
"""
The thread reports host status to server if it changed from previous report every 'host_status_report_interval' seconds.
"""
def __init__(self, initializer_module):
self.initializer_module = initializer_module
self.report_interval = initializer_module.config.host_status_report_interval
self.stop_event = initializer_module.stop_event
self.config = initializer_module.config
self.host_info = HostInfo(initializer_module.config)
self.last_report = {}
threading.Thread.__init__(self)
def run(self):
while not self.stop_event.is_set():
try:
if self.initializer_module.is_registered:
report = self.get_report()
if self.initializer_module.is_registered and not Utils.are_dicts_equal(report, self.last_report, keys_to_skip=["agentTimeStampAtReporting"]):
self.initializer_module.connection.send(message=report, destination=Constants.HOST_STATUS_REPORTS_ENDPOINT)
self.last_report = report
# don't use else to avoid race condition
if not self.initializer_module.is_registered:
self.last_report = {}
except ConnectionIsAlreadyClosed: # server and agent disconnected during sending data. Not an issue
pass
except:
logger.exception("Exception in HostStatusReporter. Re-running it")
self.stop_event.wait(self.report_interval)
logger.info("HostStatusReporter has successfully finished")
def get_report(self):
host_info_dict = {}
self.host_info.register(host_info_dict)
report = {
'agentEnv': host_info_dict,
'mounts': Hardware.osdisks(self.config),
}
return report | 37.432432 | 151 | 0.761733 |
ddd6a3f7853a133c79136174478588f98e4a2225 | 5,668 | py | Python | visualization/visualize_predictions.py | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 | [
"Apache-2.0"
] | 50 | 2021-01-14T03:44:03.000Z | 2022-03-28T12:27:22.000Z | visualization/visualize_predictions.py | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 | [
"Apache-2.0"
] | 3 | 2021-01-15T22:39:03.000Z | 2021-09-22T15:52:03.000Z | visualization/visualize_predictions.py | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 | [
"Apache-2.0"
] | 8 | 2021-02-03T02:55:50.000Z | 2022-02-16T14:30:31.000Z | import cv2
import numpy as np
import os
import ujson as json
from scipy.stats import entropy
from matplotlib import cm
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.setup import setup_config, setup_arg_parser
from core.evaluation_tools import evaluation_utils
from core.visualization_tools.probabilistic_visualizer import ProbabilisticVisualizer
from probabilistic_inference.inference_utils import get_inference_output_dir
# noinspection PyTypeChecker
def main(
args,
cfg=None,
min_allowed_score=None):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if min_allowed_score is None:
# Check if F-1 Score has been previously computed.
try:
with open(os.path.join(inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# get preprocessed instances
preprocessed_predicted_instances, preprocessed_gt_instances = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score)
# get metacatalog and image infos
meta_catalog = MetadataCatalog.get(args.test_dataset)
images_info = json.load(open(meta_catalog.json_file, 'r'))['images']
# Loop over all images and visualize errors
for image_info in images_info:
image_id = image_info['id']
image = cv2.imread(
os.path.join(
meta_catalog.image_root,
image_info['file_name']))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
v = ProbabilisticVisualizer(
image,
meta_catalog,
scale=1.5)
class_list = v.metadata.as_dict()['thing_classes']
predicted_box_means = preprocessed_predicted_instances['predicted_boxes'][image_id].cpu(
).numpy()
gt_box_means = preprocessed_gt_instances['gt_boxes'][image_id].cpu(
).numpy()
predicted_box_covariances = preprocessed_predicted_instances[
'predicted_covar_mats'][image_id].cpu(
).numpy()
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs'][image_id]
if predicted_cls_probs.shape[0] > 0:
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticGeneralizedRCNN" or cfg.MODEL.META_ARCHITECTURE == "ProbabilisticDetr":
predicted_scores, predicted_classes = predicted_cls_probs[:, :-1].max(
1)
predicted_entropies = entropy(
predicted_cls_probs.cpu().numpy(), base=2)
else:
predicted_scores, predicted_classes = predicted_cls_probs.max(
1)
predicted_entropies = entropy(
np.stack(
(predicted_scores.cpu().numpy(),
1 - predicted_scores.cpu().numpy())),
base=2)
predicted_classes = predicted_classes.cpu(
).numpy()
predicted_classes = [class_list[p_class]
for p_class in predicted_classes]
assigned_colors = cm.autumn(predicted_entropies)
predicted_scores = predicted_scores.cpu().numpy()
else:
predicted_scores=np.array([])
predicted_classes = np.array([])
assigned_colors = []
gt_cat_idxs = preprocessed_gt_instances['gt_cat_idxs'][image_id].cpu(
).numpy()
thing_dataset_id_to_contiguous_id = meta_catalog.thing_dataset_id_to_contiguous_id
if gt_cat_idxs.shape[0] > 0:
gt_labels = [class_list[thing_dataset_id_to_contiguous_id[gt_class]]
for gt_class in gt_cat_idxs[:, 0]]
else:
gt_labels = []
# noinspection PyTypeChecker
_ = v.overlay_covariance_instances(
boxes=gt_box_means,
assigned_colors=[
'lightgreen' for _ in gt_box_means],
labels=gt_labels,
alpha=1.0)
plotted_detections = v.overlay_covariance_instances(
boxes=predicted_box_means,
covariance_matrices=predicted_box_covariances,
assigned_colors=assigned_colors,
alpha=1.0,
labels=predicted_classes)
cv2.imshow(
'Detected Instances.',
cv2.cvtColor(
plotted_detections.get_image(),
cv2.COLOR_RGB2BGR))
cv2.waitKey()
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 35.873418 | 131 | 0.637791 |
fd03768b0acd687ae1925b18add41e4ea1ef34f4 | 160 | py | Python | gemini/settings.py | quantroom-pro/cryptocurrency.backtester | 5713aefd60fef52cb5ff09a729541573a7099587 | [
"MIT"
] | 37 | 2018-04-15T05:43:24.000Z | 2022-03-04T00:13:15.000Z | gemini/settings.py | friendly-pig/Gemini | 5713aefd60fef52cb5ff09a729541573a7099587 | [
"MIT"
] | 2 | 2018-02-08T16:51:10.000Z | 2018-02-18T16:23:46.000Z | gemini/settings.py | friendly-pig/Gemini | 5713aefd60fef52cb5ff09a729541573a7099587 | [
"MIT"
] | 16 | 2018-05-12T07:53:59.000Z | 2022-01-30T07:35:57.000Z | """
Main settings file for Gemini.Backtester
"""
# precision for pandas and rounding
PRECISION = 8
# default fees
FEES = {
'Long': 0.,
'Short': 0.,
}
| 12.307692 | 40 | 0.625 |
4997e6c4d3f369bcc3a01af5ded4b8455ca44bbe | 1,145 | py | Python | discii/state.py | CaedenPH/discii | 0d5a61701c8e2f1c39f2616e5e0e7c1f8701b6b6 | [
"MIT"
] | 1 | 2022-03-16T18:13:45.000Z | 2022-03-16T18:13:45.000Z | discii/state.py | CaedenPH/disci | 0d5a61701c8e2f1c39f2616e5e0e7c1f8701b6b6 | [
"MIT"
] | null | null | null | discii/state.py | CaedenPH/disci | 0d5a61701c8e2f1c39f2616e5e0e7c1f8701b6b6 | [
"MIT"
] | 1 | 2022-03-15T21:17:07.000Z | 2022-03-15T21:17:07.000Z | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .cache import Cache
from .client import Client
from .http import HTTPClient
from .gateway import DiscordWebSocket
# fmt: off
__all__ = (
'ClientState',
)
# fmt: on
class ClientState:
"""
Represents a State with all the properties
of `Client`.
Parameters
----------
client: :class:`Client`
The client or bot instance.
http: :class:`HTTPClient`
The http client which all requests
are sent through.
loop: :class:`asyncio.AbstractEventLoop`
The loop that all tasks and events are
ran off of.
ws: :class:`DiscordWebSocket`
The websocket connected to the gateway.
cache: :class:`Cache`
The cache which holds all the data sent
and received from the gateway.
"""
def __init__(
self,
client: "Client",
*,
http: "HTTPClient",
ws: "DiscordWebSocket",
cache: "Cache"
) -> None:
self.client = client
self.http = http
self.loop = http.loop
self.ws = ws
self.cache = cache
| 22.019231 | 47 | 0.595633 |
8a6f604b1413643901e65e78efc72e03efa79958 | 6,278 | py | Python | encoding/models/sseg/fcfpn.py | whwu95/PyTorch-Encoding | 0fa7adcc08d48cb1d8cd22d9abf44983ba73805e | [
"MIT"
] | null | null | null | encoding/models/sseg/fcfpn.py | whwu95/PyTorch-Encoding | 0fa7adcc08d48cb1d8cd22d9abf44983ba73805e | [
"MIT"
] | null | null | null | encoding/models/sseg/fcfpn.py | whwu95/PyTorch-Encoding | 0fa7adcc08d48cb1d8cd22d9abf44983ba73805e | [
"MIT"
] | 1 | 2020-12-18T12:46:40.000Z | 2020-12-18T12:46:40.000Z | ###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2017
###########################################################################
from __future__ import division
import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import upsample
from .base import BaseNet
torch_ver = torch.__version__[:3]
__all__ = ['FCFPN', 'get_fcfpn', 'get_fcfpn_50_ade']
class FCFPN(BaseNet):
r"""Fully Convolutional Networks for Semantic Segmentation
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
Reference:
Long, Jonathan, Evan Shelhamer, and Trevor Darrell. "Fully convolutional networks
for semantic segmentation." *CVPR*, 2015
Examples
--------
>>> model = FCFPN(nclass=21, backbone='resnet50')
>>> print(model)
"""
def __init__(self, nclass, backbone, aux=True, se_loss=False, norm_layer=nn.BatchNorm2d, **kwargs):
super(FCFPN, self).__init__(nclass, backbone, aux, se_loss, dilated=False, norm_layer=norm_layer)
self.head = FCFPNHead(nclass, norm_layer, up_kwargs=self._up_kwargs)
assert not aux, "FCFPN does not support aux loss"
def forward(self, x):
imsize = x.size()[2:]
features = self.base_forward(x)
x = list(self.head(*features))
x[0] = upsample(x[0], imsize, **self._up_kwargs)
return tuple(x)
class FCFPNHead(nn.Module):
def __init__(self, out_channels, norm_layer=None, fpn_inchannels=[256, 512, 1024, 2048],
fpn_dim=256, up_kwargs=None):
super(FCFPNHead, self).__init__()
# bilinear upsample options
assert up_kwargs is not None
self._up_kwargs = up_kwargs
fpn_lateral = []
for fpn_inchannel in fpn_inchannels[:-1]:
fpn_lateral.append(nn.Sequential(
nn.Conv2d(fpn_inchannel, fpn_dim, kernel_size=1, bias=False),
norm_layer(fpn_dim),
nn.ReLU(inplace=True),
))
self.fpn_lateral = nn.ModuleList(fpn_lateral)
fpn_out = []
for _ in range(len(fpn_inchannels) - 1):
fpn_out.append(nn.Sequential(
nn.Conv2d(fpn_dim, fpn_dim, kernel_size=3, padding=1, bias=False),
norm_layer(fpn_dim),
nn.ReLU(inplace=True),
))
self.fpn_out = nn.ModuleList(fpn_out)
self.c4conv = nn.Sequential(nn.Conv2d(fpn_inchannels[-1], fpn_dim, 3, padding=1, bias=False),
norm_layer(fpn_dim),
nn.ReLU())
inter_channels = len(fpn_inchannels) * fpn_dim
self.conv5 = nn.Sequential(nn.Conv2d(inter_channels, 512, 3, padding=1, bias=False),
norm_layer(512),
nn.ReLU(),
nn.Dropout2d(0.1, False),
nn.Conv2d(512, out_channels, 1))
def forward(self, *inputs):
c4 = inputs[-1]
#se_pred = False
if hasattr(self, 'extramodule'):
#if self.extramodule.se_loss:
# se_pred = True
# feat, se_out = self.extramodule(feat)
#else:
c4 = self.extramodule(c4)
feat = self.c4conv(c4)
c1_size = inputs[0].size()[2:]
feat_up = upsample(feat, c1_size, **self._up_kwargs)
fpn_features = [feat_up]
# c4, c3, c2, c1
for i in reversed(range(len(inputs) - 1)):
feat_i = self.fpn_lateral[i](inputs[i])
feat = upsample(feat, feat_i.size()[2:], **self._up_kwargs)
feat = feat + feat_i
# upsample to the same size with c1
feat_up = upsample(self.fpn_out[i](feat), c1_size, **self._up_kwargs)
fpn_features.append(feat_up)
fpn_features = torch.cat(fpn_features, 1)
#if se_pred:
# return (self.conv5(fpn_features), se_out)
return (self.conv5(fpn_features), )
def get_fcfpn(dataset='pascal_voc', backbone='resnet50', pretrained=False,
root='~/.encoding/models', **kwargs):
r"""FCFPN model from the paper `"Fully Convolutional Network for semantic segmentation"
<https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcfpn.pdf>`_
Parameters
----------
dataset : str, default pascal_voc
The dataset that model pretrained on. (pascal_voc, ade20k)
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_fcfpn(dataset='pascal_voc', backbone='resnet50s', pretrained=False)
>>> print(model)
"""
acronyms = {
'pascal_voc': 'voc',
'pascal_aug': 'voc',
'ade20k': 'ade',
}
# infer number of classes
from ...datasets import datasets, VOCSegmentation, VOCAugSegmentation, ADE20KSegmentation
model = FCFPN(datasets[dataset.lower()].NUM_CLASS, backbone=backbone, **kwargs)
if pretrained:
from ..model_store import get_model_file
model.load_state_dict(torch.load(
get_model_file('fcfpn_%s_%s'%(backbone, acronyms[dataset]), root=root)))
return model
def get_fcfpn_50_ade(pretrained=False, root='~/.encoding/models', **kwargs):
r"""EncNet-PSP model from the paper `"Context Encoding for Semantic Segmentation"
<https://arxiv.org/pdf/1803.08904.pdf>`_
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_fcfpn_50_ade(pretrained=True)
>>> print(model)
"""
return get_fcfpn('ade20k', 'resnet50s', pretrained)
| 37.369048 | 105 | 0.594138 |
ce4e6793a54afbcb30cb69a83fa0781777fdd111 | 13,578 | py | Python | custom_components/smartir/climate.py | nurikk/SmartIR | 73be6a6d467dcd230feb2a0ab62a52f16680afac | [
"MIT"
] | null | null | null | custom_components/smartir/climate.py | nurikk/SmartIR | 73be6a6d467dcd230feb2a0ab62a52f16680afac | [
"MIT"
] | null | null | null | custom_components/smartir/climate.py | nurikk/SmartIR | 73be6a6d467dcd230feb2a0ab62a52f16680afac | [
"MIT"
] | null | null | null | import asyncio
import json
import logging
import os.path
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL,
HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_AUTO,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_FAN_MODE,
HVAC_MODES, ATTR_HVAC_MODE)
from homeassistant.const import (
CONF_NAME, STATE_ON, STATE_OFF, STATE_UNKNOWN, ATTR_TEMPERATURE,
PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import COMPONENT_ABS_DIR, Helper
from .controller import Controller
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "SmartIR Climate"
CONF_UNIQUE_ID = 'unique_id'
CONF_DEVICE_CODE = 'device_code'
CONF_CONTROLLER_DATA = "controller_data"
CONF_TEMPERATURE_SENSOR = 'temperature_sensor'
CONF_HUMIDITY_SENSOR = 'humidity_sensor'
CONF_POWER_SENSOR = 'power_sensor'
SUPPORT_FLAGS = (
SUPPORT_TARGET_TEMPERATURE |
SUPPORT_FAN_MODE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_DEVICE_CODE): cv.positive_int,
vol.Required(CONF_CONTROLLER_DATA): cv.string,
vol.Optional(CONF_TEMPERATURE_SENSOR): cv.entity_id,
vol.Optional(CONF_HUMIDITY_SENSOR): cv.entity_id,
vol.Optional(CONF_POWER_SENSOR): cv.entity_id
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the IR Climate platform."""
device_code = config.get(CONF_DEVICE_CODE)
device_files_subdir = os.path.join('codes', 'climate')
device_files_absdir = os.path.join(COMPONENT_ABS_DIR, device_files_subdir)
if not os.path.isdir(device_files_absdir):
os.makedirs(device_files_absdir)
device_json_filename = str(device_code) + '.json'
device_json_path = os.path.join(device_files_absdir, device_json_filename)
if not os.path.exists(device_json_path):
_LOGGER.warning("Couldn't find the device Json file. The component will " \
"try to download it from the GitHub repo.")
try:
codes_source = ("https://raw.githubusercontent.com/"
"smartHomeHub/SmartIR/master/"
"codes/climate/{}.json")
await Helper.downloader(codes_source.format(device_code), device_json_path)
except Exception:
_LOGGER.error("There was an error while downloading the device Json file. " \
"Please check your internet connection or if the device code " \
"exists on GitHub. If the problem still exists please " \
"place the file manually in the proper directory.")
return
with open(device_json_path) as j:
try:
device_data = json.load(j)
except Exception:
_LOGGER.error("The device Json file is invalid")
return
async_add_entities([SmartIRClimate(
hass, config, device_data
)])
class SmartIRClimate(ClimateEntity, RestoreEntity):
def __init__(self, hass, config, device_data):
self.hass = hass
self._unique_id = config.get(CONF_UNIQUE_ID)
self._name = config.get(CONF_NAME)
self._device_code = config.get(CONF_DEVICE_CODE)
self._controller_data = config.get(CONF_CONTROLLER_DATA)
self._temperature_sensor = config.get(CONF_TEMPERATURE_SENSOR)
self._humidity_sensor = config.get(CONF_HUMIDITY_SENSOR)
self._power_sensor = config.get(CONF_POWER_SENSOR)
self._manufacturer = device_data['manufacturer']
self._supported_models = device_data['supportedModels']
self._supported_controller = device_data['supportedController']
self._commands_encoding = device_data['commandsEncoding']
self._min_temperature = device_data['minTemperature']
self._max_temperature = device_data['maxTemperature']
self._precision = device_data['precision']
valid_hvac_modes = [x for x in device_data['operationModes'] if x in HVAC_MODES]
self._operation_modes = [HVAC_MODE_OFF] + valid_hvac_modes
self._fan_modes = device_data['fanModes']
self._commands = device_data['commands']
self._target_temperature = self._min_temperature
self._hvac_mode = HVAC_MODE_OFF
self._current_fan_mode = self._fan_modes[0]
self._last_on_operation = None
self._current_temperature = None
self._current_humidity = None
self._unit = hass.config.units.temperature_unit
self._support_flags = SUPPORT_FLAGS
self._temp_lock = asyncio.Lock()
#Init the IR/RF controller
self._controller = Controller(
self.hass,
self._supported_controller,
self._commands_encoding,
self._controller_data)
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if last_state is not None:
self._hvac_mode = last_state.state
self._current_fan_mode = last_state.attributes['fan_mode']
self._target_temperature = last_state.attributes['temperature']
if 'last_on_operation' in last_state.attributes:
self._last_on_operation = last_state.attributes['last_on_operation']
if self._temperature_sensor:
async_track_state_change(self.hass, self._temperature_sensor,
self._async_temp_sensor_changed)
temp_sensor_state = self.hass.states.get(self._temperature_sensor)
if temp_sensor_state and temp_sensor_state.state != STATE_UNKNOWN:
self._async_update_temp(temp_sensor_state)
if self._humidity_sensor:
async_track_state_change(self.hass, self._humidity_sensor,
self._async_humidity_sensor_changed)
humidity_sensor_state = self.hass.states.get(self._humidity_sensor)
if humidity_sensor_state and humidity_sensor_state.state != STATE_UNKNOWN:
self._async_update_humidity(humidity_sensor_state)
if self._power_sensor:
async_track_state_change(self.hass, self._power_sensor,
self._async_power_sensor_changed)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def state(self):
"""Return the current state."""
return self.hvac_mode
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def min_temp(self):
"""Return the polling state."""
return self._min_temperature
@property
def max_temp(self):
"""Return the polling state."""
return self._max_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._precision
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._operation_modes
@property
def hvac_mode(self):
"""Return hvac mode ie. heat, cool."""
return self._hvac_mode
@property
def last_on_operation(self):
"""Return the last non-idle operation ie. heat, cool."""
return self._last_on_operation
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._fan_modes
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_humidity(self):
"""Return the current humidity."""
return self._current_humidity
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def device_state_attributes(self) -> dict:
"""Platform specific attributes."""
return {
'last_on_operation': self._last_on_operation,
'device_code': self._device_code,
'manufacturer': self._manufacturer,
'supported_models': self._supported_models,
'supported_controller': self._supported_controller,
'commands_encoding': self._commands_encoding,
}
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
hvac_mode = kwargs.get(ATTR_HVAC_MODE)
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if temperature < self._min_temperature or temperature > self._max_temperature:
_LOGGER.warning('The temperature value is out of min/max range')
return
if self._precision == PRECISION_WHOLE:
self._target_temperature = round(temperature)
else:
self._target_temperature = round(temperature, 1)
if hvac_mode:
await self.async_set_hvac_mode(hvac_mode)
return
if not self._hvac_mode.lower() == HVAC_MODE_OFF:
await self.send_command()
await self.async_update_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set operation mode."""
self._hvac_mode = hvac_mode
if not hvac_mode == HVAC_MODE_OFF:
self._last_on_operation = hvac_mode
await self.send_command()
await self.async_update_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set fan mode."""
self._current_fan_mode = fan_mode
if not self._hvac_mode.lower() == HVAC_MODE_OFF:
await self.send_command()
await self.async_update_ha_state()
async def async_turn_off(self):
"""Turn off."""
await self.async_set_hvac_mode(HVAC_MODE_OFF)
async def async_turn_on(self):
"""Turn on."""
if self._last_on_operation is not None:
await self.async_set_hvac_mode(self._last_on_operation)
else:
await self.async_set_hvac_mode(self._operation_modes[1])
async def send_command(self):
async with self._temp_lock:
operation_mode = self._hvac_mode
fan_mode = self._current_fan_mode
target_temperature = '{0:g}'.format(self._target_temperature)
if operation_mode.lower() == HVAC_MODE_OFF:
command = self._commands['off']
else:
command = self._commands[operation_mode][fan_mode][target_temperature]
try:
await self._controller.send(command)
except Exception as e:
_LOGGER.exception(e)
async def _async_temp_sensor_changed(self, entity_id, old_state, new_state):
"""Handle temperature sensor changes."""
if new_state is None:
return
self._async_update_temp(new_state)
await self.async_update_ha_state()
async def _async_humidity_sensor_changed(self, entity_id, old_state, new_state):
"""Handle humidity sensor changes."""
if new_state is None:
return
self._async_update_humidity(new_state)
await self.async_update_ha_state()
async def _async_power_sensor_changed(self, entity_id, old_state, new_state):
"""Handle power sensor changes."""
if new_state is None:
return
if new_state.state == STATE_ON and self._hvac_mode == HVAC_MODE_OFF:
if self._last_on_operation is not None:
self._hvac_mode = self._last_on_operation
else:
self._hvac_mode = self._operation_modes[1]
await self.async_update_ha_state()
if new_state.state == STATE_OFF:
self._hvac_mode = HVAC_MODE_OFF
await self.async_update_ha_state()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from temperature sensor."""
try:
if state.state != STATE_UNKNOWN:
self._current_temperature = float(state.state)
except ValueError as ex:
_LOGGER.error("Unable to update from temperature sensor: %s", ex)
@callback
def _async_update_humidity(self, state):
"""Update thermostat with latest state from humidity sensor."""
try:
if state.state != STATE_UNKNOWN:
self._current_humidity = float(state.state)
except ValueError as ex:
_LOGGER.error("Unable to update from humidity sensor: %s", ex) | 35.731579 | 90 | 0.657313 |
4f74b2cd9d55b172b6c4c378d1830553b915c29c | 20,156 | py | Python | guides/preprocessing_layers.py | jkhales/keras-io | 0da4523f083ca68d4d1970efb523b0c83fac29ab | [
"Apache-2.0"
] | null | null | null | guides/preprocessing_layers.py | jkhales/keras-io | 0da4523f083ca68d4d1970efb523b0c83fac29ab | [
"Apache-2.0"
] | null | null | null | guides/preprocessing_layers.py | jkhales/keras-io | 0da4523f083ca68d4d1970efb523b0c83fac29ab | [
"Apache-2.0"
] | null | null | null | """
Title: Working with preprocessing layers
Authors: Francois Chollet, Mark Omernick
Date created: 2020/07/25
Last modified: 2021/04/23
Description: Overview of how to leverage preprocessing layers to create end-to-end models.
"""
"""
## Keras preprocessing
The Keras preprocessing layers API allows developers to build Keras-native input
processing pipelines. These input processing pipelines can be used as independent
preprocessing code in non-Keras workflows, combined directly with Keras models, and
exported as part of a Keras SavedModel.
With Keras preprocessing layers, you can build and export models that are truly
end-to-end: models that accept raw images or raw structured data as input; models that
handle feature normalization or feature value indexing on their own.
"""
"""
## Available preprocessing
### Text preprocessing
- `TextVectorization` layer: turns raw strings into an encoded representation that can be
read by an `Embedding` layer or `Dense` layer.
### Numerical features preprocessing
- `Normalization` layer: performs feature-wise normalize of input features.
- `Discretization` layer: turns continuous numerical features into integer categorical
features.
### Categorical features preprocessing
- `CategoryEncoding` layer: turns integer categorical features into one-hot, multi-hot,
or count dense representations.
- `Hashing` layer: performs categorical feature hashing, also known as the "hashing
trick".
- `StringLookup` layer: turns string categorical values an encoded representation that can be
read by an `Embedding` layer or `Dense` layer.
- `IntegerLookup` layer: turns integer categorical values into an encoded representation that can be
read by an `Embedding` layer or `Dense` layer.
### Image preprocessing
These layers are for standardizing the inputs of an image model.
- `Resizing` layer: resizes a batch of images to a target size.
- `Rescaling` layer: rescales and offsets the values of a batch of image (e.g. go from
inputs in the `[0, 255]` range to inputs in the `[0, 1]` range.
- `CenterCrop` layer: returns a center crop of a batch of images.
### Image data augmentation
These layers apply random augmentation transforms to a batch of images. They
are only active during training.
- `RandomCrop` layer
- `RandomFlip` layer
- `RandomTranslation` layer
- `RandomRotation` layer
- `RandomZoom` layer
- `RandomHeight` layer
- `RandomWidth` layer
"""
"""
## The `adapt()` method
Some preprocessing layers have an internal state that must be computed based on
a sample of the training data. The list of stateful preprocessing layers is:
- `TextVectorization`: holds a mapping between string tokens and integer indices
- `StringLookup` and `IntegerLookup`: hold a mapping between input values and integer
indices.
- `Normalization`: holds the mean and standard deviation of the features.
- `Discretization`: holds information about value bucket boundaries.
Crucially, these layers are **non-trainable**. Their state is not set during training; it
must be set **before training**, a step called "adaptation".
You set the state of a preprocessing layer by exposing it to training data, via the
`adapt()` method:
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
data = np.array([[0.1, 0.2, 0.3], [0.8, 0.9, 1.0], [1.5, 1.6, 1.7],])
layer = preprocessing.Normalization()
layer.adapt(data)
normalized_data = layer(data)
print("Features mean: %.2f" % (normalized_data.numpy().mean()))
print("Features std: %.2f" % (normalized_data.numpy().std()))
"""
The `adapt()` method takes either a Numpy array or a `tf.data.Dataset` object. In the
case of `StringLookup` and `TextVectorization`, you can also pass a list of strings:
"""
data = [
"ξεῖν᾽, ἦ τοι μὲν ὄνειροι ἀμήχανοι ἀκριτόμυθοι",
"γίγνοντ᾽, οὐδέ τι πάντα τελείεται ἀνθρώποισι.",
"δοιαὶ γάρ τε πύλαι ἀμενηνῶν εἰσὶν ὀνείρων:",
"αἱ μὲν γὰρ κεράεσσι τετεύχαται, αἱ δ᾽ ἐλέφαντι:",
"τῶν οἳ μέν κ᾽ ἔλθωσι διὰ πριστοῦ ἐλέφαντος,",
"οἵ ῥ᾽ ἐλεφαίρονται, ἔπε᾽ ἀκράαντα φέροντες:",
"οἱ δὲ διὰ ξεστῶν κεράων ἔλθωσι θύραζε,",
"οἵ ῥ᾽ ἔτυμα κραίνουσι, βροτῶν ὅτε κέν τις ἴδηται.",
]
layer = preprocessing.TextVectorization()
layer.adapt(data)
vectorized_text = layer(data)
print(vectorized_text)
"""
In addition, adaptable layers always expose an option to directly set state via
constructor arguments or weight assignment. If the intended state values are known at
layer construction time, or are calculated outside of the `adapt()` call, they can be set
without relying on the layer's internal computation. For instance, if external vocabulary
files for the `TextVectorization`, `StringLookup`, or `IntegerLookup` layers already
exist, those can be loaded directly into the lookup tables by passing a path to the
vocabulary file in the layer's constructor arguments.
Here's an example where we instantiate a `StringLookup` layer with precomputed vocabulary:
"""
vocab = ["a", "b", "c", "d"]
data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
layer = preprocessing.StringLookup(vocabulary=vocab)
vectorized_data = layer(data)
print(vectorized_data)
"""
## Preprocessing data before the model or inside the model
There are two ways you could be using preprocessing layers:
**Option 1:** Make them part of the model, like this:
```python
inputs = keras.Input(shape=input_shape)
x = preprocessing_layer(inputs)
outputs = rest_of_the_model(x)
model = keras.Model(inputs, outputs)
```
With this option, preprocessing will happen on device, synchronously with the rest of the
model execution, meaning that it will benefit from GPU acceleration.
If you're training on GPU, this is the best option for the `Normalization` layer, and for
all image preprocessing and data augmentation layers.
**Option 2:** apply it to your `tf.data.Dataset`, so as to obtain a dataset that yields
batches of preprocessed data, like this:
```python
dataset = dataset.map(
lambda x, y: (preprocessing_layer(x), y))
```
With this option, your preprocessing will happen on CPU, asynchronously, and will be
buffered before going into the model.
This is the best option for `TextVectorization`, and all structured data preprocessing
layers. It can also be a good option if you're training on CPU
and you use image preprocessing layers.
**When running on TPU, you should always place preprocessing layers in the `tf.data` pipeline**
(with the exception of `Normalization` and `Rescaling`, which run fine on TPU and are commonly
used as the first layer is an image model).
"""
"""
## Benefits of doing preprocessing inside the model at inference time
Even if you go with option 2, you may later want to export an inference-only end-to-end
model that will include the preprocessing layers. The key benefit to doing this is that
**it makes your model portable** and it **helps reduce the
[training/serving skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew)**.
When all data preprocessing is part of the model, other people can load and use your
model without having to be aware of how each feature is expected to be encoded &
normalized. Your inference model will be able to process raw images or raw structured
data, and will not require users of the model to be aware of the details of e.g. the
tokenization scheme used for text, the indexing scheme used for categorical features,
whether image pixel values are normalized to `[-1, +1]` or to `[0, 1]`, etc. This is
especially powerful if you're exporting
your model to another runtime, such as TensorFlow.js: you won't have to
reimplement your preprocessing pipeline in JavaScript.
If you initially put your preprocessing layers in your `tf.data` pipeline,
you can export an inference model that packages the preprocessing.
Simply instantiate a new model that chains
your preprocessing layers and your training model:
```python
inputs = keras.Input(shape=input_shape)
x = preprocessing_layer(inputs)
outputs = training_model(x)
inference_model = keras.Model(inputs, outputs)
```
"""
"""
## Quick recipes
### Image data augmentation
Note that image data augmentation layers are only active during training (similarly to
the `Dropout` layer).
"""
from tensorflow import keras
from tensorflow.keras import layers
# Create a data augmentation stage with horizontal flipping, rotations, zooms
data_augmentation = keras.Sequential(
[
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomRotation(0.1),
preprocessing.RandomZoom(0.1),
]
)
# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
input_shape = x_train.shape[1:]
classes = 10
# Create a tf.data pipeline of augmented images (and their labels)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(16).map(lambda x, y: (data_augmentation(x), y))
# Create a model and train it on the augmented image data
inputs = keras.Input(shape=input_shape)
x = preprocessing.Rescaling(1.0 / 255)(inputs) # Rescale inputs
outputs = keras.applications.ResNet50( # Add the rest of the model
weights=None, input_shape=input_shape, classes=classes
)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")
model.fit(train_dataset, steps_per_epoch=5)
"""
You can see a similar setup in action in the example
[image classification from scratch](https://keras.io/examples/vision/image_classification_from_scratch/).
"""
"""
### Normalizing numerical features
"""
# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
x_train = x_train.reshape((len(x_train), -1))
input_shape = x_train.shape[1:]
classes = 10
# Create a Normalization layer and set its internal state using the training data
normalizer = preprocessing.Normalization()
normalizer.adapt(x_train)
# Create a model that include the normalization layer
inputs = keras.Input(shape=input_shape)
x = normalizer(inputs)
outputs = layers.Dense(classes, activation="softmax")(x)
model = keras.Model(inputs, outputs)
# Train the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
model.fit(x_train, y_train)
"""
### Encoding string categorical features via one-hot encoding
"""
# Define some toy data
data = tf.constant([["a"], ["b"], ["c"], ["b"], ["c"], ["a"]])
# Use StringLookup to build an index of the feature values and encode output.
lookup = preprocessing.StringLookup(output_mode="binary")
lookup.adapt(data)
# Convert new test data (which includes unknown feature values)
test_data = tf.constant([["a"], ["b"], ["c"], ["d"], ["e"], [""]])
encoded_data = lookup(test_data)
print(encoded_data)
"""
Note that index 0 is reserved for missing values (which you should specify as the empty
string `""`), and index 1 is reserved for out-of-vocabulary values (values that were not
seen during `adapt()`). You can configure this by using the `mask_token` and `oov_token`
constructor arguments of `StringLookup`.
You can see the `StringLookup` in action in the
[Structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/)
example.
"""
"""
### Encoding integer categorical features via one-hot encoding
"""
# Define some toy data
data = tf.constant([[10], [20], [20], [10], [30], [0]])
# Use IntegerLookup to build an index of the feature values and encode output.
lookup = preprocessing.IntegerLookup(output_mode="multi_hot")
lookup.adapt(data)
# Convert new test data (which includes unknown feature values)
test_data = tf.constant([[10], [10], [20], [50], [60], [0]])
encoded_data = lookup(test_data)
print(encoded_data)
"""
Note that index 0 is reserved for missing values (which you should specify as the value
0), and index 1 is reserved for out-of-vocabulary values (values that were not seen
during `adapt()`). You can configure this by using the `mask_token` and `oov_token`
constructor arguments of `IntegerLookup`.
You can see the `IntegerLookup` in action in the example
[structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/).
"""
"""
### Applying the hashing trick to an integer categorical feature
If you have a categorical feature that can take many different values (on the order of
10e3 or higher), where each value only appears a few times in the data,
it becomes impractical and ineffective to index and one-hot encode the feature values.
Instead, it can be a good idea to apply the "hashing trick": hash the values to a vector
of fixed size. This keeps the size of the feature space manageable, and removes the need
for explicit indexing.
"""
# Sample data: 10,000 random integers with values between 0 and 100,000
data = np.random.randint(0, 100000, size=(10000, 1))
# Use the Hashing layer to hash the values to the range [0, 64]
hasher = preprocessing.Hashing(num_bins=64, salt=1337)
# Use the CategoryEncoding layer to one-hot encode the hashed values
encoder = preprocessing.CategoryEncoding(num_tokens=64, output_mode="multi_hot")
encoded_data = encoder(hasher(data))
print(encoded_data.shape)
"""
### Encoding text as a sequence of token indices
This is how you should preprocess text to be passed to an `Embedding` layer.
"""
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Create a TextVectorization layer
text_vectorizer = preprocessing.TextVectorization(output_mode="int")
# Index the vocabulary via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(input_dim=text_vectorizer.vocabulary_size(), output_dim=16)(inputs)
x = layers.GRU(8)(x)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["\nThe Brain is deeper than the sea"], [1])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(1).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
"""
You can see the `TextVectorization` layer in action, combined with an `Embedding` mode,
in the example
[text classification from scratch](https://keras.io/examples/nlp/text_classification_from_scratch/).
Note that when training such a model, for best performance, you should always
use the `TextVectorization` layer as part of the input pipeline.
"""
"""
### Encoding text as a dense matrix of ngrams with multi-hot encoding
This is how you should preprocess text to be passed to a `Dense` layer.
"""
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Instantiate TextVectorization with "binary" output_mode (multi-hot)
# and ngrams=2 (index all bigrams)
text_vectorizer = preprocessing.TextVectorization(output_mode="multi_hot", ngrams=2)
# Index the bigrams via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))
outputs = layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["\nThe Brain is deeper than the sea"], [1])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(1).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
"""
### Encoding text as a dense matrix of ngrams with TF-IDF weighting
This is an alternative way of preprocessing text before passing it to a `Dense` layer.
"""
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Instantiate TextVectorization with "tf-idf" output_mode
# (multi-hot with TF-IDF weighting) and ngrams=2 (index all bigrams)
text_vectorizer = preprocessing.TextVectorization(output_mode="tf-idf", ngrams=2)
# Index the bigrams and learn the TF-IDF weights via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))
outputs = layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["\nThe Brain is deeper than the sea"], [1])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(1).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
"""
## Important gotchas
### Working with lookup layers with very large vocabularies
You may find yourself working with a very large vocabulary in a `TextVectorization`, a `StringLookup` layer,
or an `IntegerLookup` layer. Typically, a vocabulary larger than 500MB would be considered "very large".
In such case, for best performance, you should avoid using `adapt()`.
Instead, pre-compute your vocabulary in advance
(you could use Apache Beam or TF Transform for this)
and store it in a file. Then load the vocabulary into the layer at construction
time by passing the filepath as the `vocabulary` argument.
### Using lookup layers on a TPU pod or with `ParameterServerStrategy`.
There is an outstanding issue that causes performance to degrade when using
a `TextVectorization`, `StringLookup`, or `IntegerLookup` layer while
training on a TPU pod or on multiple machines via `ParameterServerStrategy`.
This is slated to be fixed in TensorFlow 2.7.
"""
| 36.251799 | 134 | 0.753076 |
d3776fa4cf93ea2e311291f280ef7673e1abc39f | 7,867 | py | Python | jina/parsers/__init__.py | HarshCasper/jina | 81ab098b140b74ad1cfdfde9218cec7a40923749 | [
"Apache-2.0"
] | 1 | 2021-02-25T19:28:50.000Z | 2021-02-25T19:28:50.000Z | jina/parsers/__init__.py | HarshCasper/jina | 81ab098b140b74ad1cfdfde9218cec7a40923749 | [
"Apache-2.0"
] | 1 | 2021-02-27T05:56:45.000Z | 2021-02-27T05:57:03.000Z | jina/parsers/__init__.py | deepampatel/jina | 97f9e97a4a678a28bdeacbc7346eaf7bbd2aeb89 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina.parsers.peapods.runtimes.distributed import mixin_distributed_feature_parser
def set_pea_parser(parser=None):
if not parser:
from .base import set_base_parser
parser = set_base_parser()
from .peapods.base import mixin_base_ppr_parser
from .peapods.runtimes.zmq import mixin_zmq_runtime_parser
from .peapods.runtimes.zed import mixin_zed_runtime_parser
from .peapods.runtimes.container import mixin_container_runtime_parser
from .peapods.runtimes.remote import mixin_remote_parser
from .peapods.pea import mixin_pea_parser
mixin_base_ppr_parser(parser)
mixin_zmq_runtime_parser(parser)
mixin_zed_runtime_parser(parser)
mixin_container_runtime_parser(parser)
mixin_remote_parser(parser)
mixin_distributed_feature_parser(parser)
mixin_pea_parser(parser)
return parser
def set_pod_parser(parser=None):
if not parser:
from .base import set_base_parser
parser = set_base_parser()
set_pea_parser(parser)
from .peapods.pod import mixin_base_pod_parser
mixin_base_pod_parser(parser)
return parser
def set_gateway_parser(parser=None):
if not parser:
from .base import set_base_parser
parser = set_base_parser()
from .peapods.base import mixin_base_ppr_parser
from .peapods.runtimes.zmq import mixin_zmq_runtime_parser
from .peapods.runtimes.zed import mixin_zed_runtime_parser
from .peapods.runtimes.container import mixin_container_runtime_parser
from .peapods.runtimes.remote import mixin_remote_parser
from .peapods.runtimes.remote import mixin_grpc_parser
from .peapods.pea import mixin_pea_parser
mixin_base_ppr_parser(parser)
mixin_zmq_runtime_parser(parser)
mixin_zed_runtime_parser(parser)
mixin_grpc_parser(parser)
mixin_remote_parser(parser)
mixin_pea_parser(parser)
from ..enums import SocketType, PodRoleType
parser.set_defaults(name='gateway',
socket_in=SocketType.PULL_CONNECT, # otherwise there can be only one client at a time
socket_out=SocketType.PUSH_CONNECT,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
read_only=True,
runtime_cls='GRPCRuntime',
pod_role=PodRoleType.GATEWAY)
return parser
def set_client_cli_parser(parser=None):
if not parser:
from .base import set_base_parser
parser = set_base_parser()
from .peapods.runtimes.remote import mixin_grpc_parser, mixin_remote_parser
from .client import mixin_client_cli_parser
mixin_client_cli_parser(parser)
mixin_grpc_parser(parser)
mixin_remote_parser(parser)
return parser
def get_main_parser():
from .base import set_base_parser
from .helloworld import set_hw_parser, set_hw_chatbot_parser
from .helper import _chf, _SHOW_ALL_ARGS
from .check import set_check_parser
from .export_api import set_export_api_parser
from .flow import set_flow_parser
from .hub import set_hub_parser
from .logger import set_logger_parser
from .ping import set_ping_parser
from .optimizer import set_optimizer_parser
# create the top-level parser
parser = set_base_parser()
sp = parser.add_subparsers(dest='cli',
description='use `%(prog)-8s [sub-command] --help` '
'to get detailed information about each sub-command', required=True)
set_hw_parser(sp.add_parser('hello-world',
help='👋 Hello World! Hello Jina!',
description='Start the hello-world demo, a simple end2end image index and search demo '
'without any extra dependencies.',
formatter_class=_chf))
set_pod_parser(sp.add_parser('pod',
help='Start a Pod',
description='Start a Jina Pod',
formatter_class=_chf))
set_flow_parser(sp.add_parser('flow',
description='Start a Flow that orchestrates multiple pods',
help='Start a Flow',
formatter_class=_chf))
set_optimizer_parser(sp.add_parser('optimizer',
description='Start a FlowOptimizer from a YAML configuration file',
help='Start an FlowOptimizer from a YAML file', formatter_class=_chf))
set_gateway_parser(sp.add_parser('gateway',
description='Start a Gateway that receives client Requests via gRPC/REST interface',
help='Start a Gateway',
formatter_class=_chf))
set_ping_parser(sp.add_parser('ping',
help='Ping a pod and check its connectivity',
description='Ping a remote pod and check the network connectivity',
formatter_class=_chf))
set_check_parser(sp.add_parser('check',
help='Check the import of all Executors and Drivers',
description='Check the import status of all executors and drivers',
formatter_class=_chf))
set_hub_parser(sp.add_parser('hub', help='Build, push, pull Jina Hub images',
description='Build, push, pull Jina Hub images',
formatter_class=_chf))
# Below are low-level / internal / experimental CLIs, hidden from users by default
set_pea_parser(sp.add_parser('pea',
description='Start a Jina pea. '
'You should rarely use this directly unless you '
'are doing low-level orchestration',
formatter_class=_chf, **(dict(help='start a pea')) if _SHOW_ALL_ARGS else {}))
set_logger_parser(sp.add_parser('log',
description='Receive piped log output and beautify the log. '
'Depreciated, use Jina Dashboard instead',
formatter_class=_chf,
**(dict(help='beautify the log')) if _SHOW_ALL_ARGS else {}))
set_client_cli_parser(sp.add_parser('client',
description='Start a Python client that connects to a remote Jina gateway',
formatter_class=_chf,
**(dict(help='start a client')) if _SHOW_ALL_ARGS else {}))
set_export_api_parser(sp.add_parser('export-api',
description='Export Jina API to JSON/YAML file for 3rd party applications',
formatter_class=_chf,
**(dict(help='export Jina API to file')) if _SHOW_ALL_ARGS else {}))
set_hw_chatbot_parser(sp.add_parser('hello-world-chatbot',
**(dict(help='Covid-19 chatbot based on DistilBERT')) if _SHOW_ALL_ARGS else {},
description='Start a hello-world demo: a simple Covid-19 chatbot. '
'Pytorch and transformers are required to run this demo',
formatter_class=_chf))
return parser
| 43.949721 | 121 | 0.597687 |
2d8ae9aeb27b9e20d197b2da8b9561d7cc6731de | 6,795 | py | Python | src/Modules/Lighting/test/test_lighting.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | 1 | 2016-09-21T19:30:21.000Z | 2016-09-21T19:30:21.000Z | src/Modules/Lighting/test/test_lighting.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | null | null | null | src/Modules/Lighting/test/test_lighting.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | 1 | 2020-07-23T11:13:36.000Z | 2020-07-23T11:13:36.000Z | """
@name: PyHouse/src/Modules/Lighting/test/test_lighting.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2016 by D. Brian Kimmel
@note: Created on Apr 9, 2013
@license: MIT License
@summary: Test the home lighting system automation.
Passed all 12 tests. DBK 2016-06-24
"""
# Import system type stuff
from twisted.trial import unittest
import xml.etree.ElementTree as ET
# Import PyMh files and modules.
from Modules.Core.data_objects import LightData
from Modules.Families.family import API as familyAPI
from Modules.Lighting.lighting import API as lightingAPI
from test.xml_data import XML_LONG
from test.testing_mixin import SetupPyHouseObj
from Modules.Lighting.test.xml_controllers import \
TESTING_CONTROLLER_NAME_0, \
TESTING_CONTROLLER_NAME_1
from Modules.Core.test.xml_device import \
TESTING_DEVICE_FAMILY_INSTEON
from Modules.Utilities.debug_tools import PrettyFormatAny
from Modules.Lighting.test.xml_lights import TESTING_LIGHT_NAME_0, TESTING_LIGHT_NAME_1
from Modules.Lighting.test.xml_buttons import TESTING_LIGHTING_BUTTON_NAME_0, TESTING_LIGHTING_BUTTON_NAME_1
class SetupMixin(object):
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
self.m_light_obj = LightData()
self.m_api = lightingAPI(self.m_pyhouse_obj)
self.m_family = familyAPI(self.m_pyhouse_obj).LoadFamilyTesting()
self.m_pyhouse_obj.House.FamilyData = self.m_family
self.m_version = '1.4.0'
class A1_Setup(SetupMixin, unittest.TestCase):
""" This section tests the master setup above this.
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_1_SetupLighting(self):
"""Verify that we can find items we need in the test XML
"""
l_xml = self.m_api._setup_lighting(self.m_pyhouse_obj)
self.assertEqual(l_xml.find('ButtonSection').tag, 'ButtonSection')
self.assertEqual(l_xml.find('ControllerSection').tag, 'ControllerSection')
self.assertEqual(l_xml.find('LightSection').tag, 'LightSection')
def test_2_PyHouse(self):
self.assertIsNotNone(self.m_pyhouse_obj.Xml)
def test_3_XML(self):
self.assertIsNotNone(self.m_xml.house_div)
def test_4_Light(self):
self.assertEqual(self.m_light_obj.Name, 'undefined baseobject')
def test_5_Api(self):
self.assertIsNotNone(self.m_api)
class A2_XML(SetupMixin, unittest.TestCase):
""" This section tests the reading and writing of XML used by Lights.
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Version(self):
self.assertGreater(self.m_pyhouse_obj.Xml.XmlVersion, '1.4.0')
def test_02_XmlTags(self):
""" Be sure that the XML contains the right stuff.
"""
self.assertEqual(self.m_xml.root.tag, 'PyHouse')
self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')
self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')
self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')
self.assertEqual(self.m_xml.light.tag, 'Light')
class B1_Read(SetupMixin, unittest.TestCase):
""" This section tests the utility class
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_1_Button(self):
"""Utility.
"""
l_xml = self.m_api._setup_lighting(self.m_pyhouse_obj)
l_buttons = self.m_api._read_buttons(self.m_pyhouse_obj, l_xml)
self.assertEqual(len(l_buttons), 2)
self.assertEqual(l_buttons[0].Name, TESTING_LIGHTING_BUTTON_NAME_0)
self.assertEqual(l_buttons[0].DeviceFamily, TESTING_DEVICE_FAMILY_INSTEON)
self.assertEqual(l_buttons[1].Name, TESTING_LIGHTING_BUTTON_NAME_1)
def test_2_Controller(self):
"""Utility.
"""
l_xml = self.m_api._setup_lighting(self.m_pyhouse_obj)
l_dict = self.m_api._read_controllers(self.m_pyhouse_obj, l_xml)
self.assertEqual(len(l_dict), 2)
self.assertEqual(l_dict[0].Name, TESTING_CONTROLLER_NAME_0)
self.assertEqual(l_dict[0].DeviceFamily, TESTING_DEVICE_FAMILY_INSTEON)
self.assertEqual(l_dict[1].Name, TESTING_CONTROLLER_NAME_1)
def test_3_Light(self):
"""Utility.
"""
l_xml = self.m_api._setup_lighting(self.m_pyhouse_obj)
l_lights = self.m_api._read_lights(self.m_pyhouse_obj, l_xml)
self.assertEqual(len(l_lights), 2)
self.assertEqual(l_lights[0].Name, TESTING_LIGHT_NAME_0)
self.assertEqual(l_lights[0].DeviceFamily, TESTING_DEVICE_FAMILY_INSTEON)
self.assertEqual(l_lights[1].Name, TESTING_LIGHT_NAME_1)
def test_4_Lighting(self):
"""Read all the lighting info (Buttons, Controllers, Lights)
"""
l_obj = self.m_api._read_lighting_xml(self.m_pyhouse_obj)
self.assertEqual(len(l_obj.Buttons), 2)
self.assertEqual(len(l_obj.Controllers), 2)
self.assertEqual(len(l_obj.Lights), 2)
self.assertEqual(l_obj.Buttons[0].Name, TESTING_LIGHTING_BUTTON_NAME_0)
self.assertEqual(l_obj.Buttons[1].Name, TESTING_LIGHTING_BUTTON_NAME_1)
self.assertEqual(l_obj.Controllers[0].Name, TESTING_CONTROLLER_NAME_0)
self.assertEqual(l_obj.Controllers[1].Name, TESTING_CONTROLLER_NAME_1)
self.assertEqual(l_obj.Lights[0].Name, TESTING_LIGHT_NAME_0)
self.assertEqual(l_obj.Lights[1].Name, TESTING_LIGHT_NAME_1)
class B2_Write(SetupMixin, unittest.TestCase):
""" This section tests the utility class
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
self.m_pyhouse_obj.House.Lighting = self.m_api._read_lighting_xml(self.m_pyhouse_obj)
def test_1_lighting(self):
"""Write out the 'LightingSection' which contains the 'LightSection',
"""
self.m_api._read_lighting_xml(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_obj, 'House'))
l_xml = ET.Element('HouseDivision')
l_xml = self.m_api._write_lighting_xml(self.m_pyhouse_obj, l_xml)
print(PrettyFormatAny.form(l_xml, 'XML'))
self.assertEqual(len(l_xml), 3)
self.assertEqual(len(l_xml[0]), 2)
self.assertEqual(len(l_xml[1]), 2)
self.assertEqual(len(l_xml[2]), 2)
self.assertEqual(l_xml.find('LightSection').tag, 'LightSection')
self.assertEqual(l_xml.find('ButtonSection').tag, 'ButtonSection')
self.assertEqual(l_xml.find('ControllerSection').tag, 'ControllerSection')
self.assertEqual(l_xml.find('ControllerSection/Controller').tag, 'Controller')
# ## END DBK
| 39.505814 | 108 | 0.710522 |
d6edcc8bcfc1b08410fe799a6e5beb492fa869d9 | 4,919 | py | Python | setup.py | imranashraf/travis-libqasm | fc4d9e53c9b9e7edc2e19a57b5e7b184a1eb9c13 | [
"Apache-2.0"
] | null | null | null | setup.py | imranashraf/travis-libqasm | fc4d9e53c9b9e7edc2e19a57b5e7b184a1eb9c13 | [
"Apache-2.0"
] | null | null | null | setup.py | imranashraf/travis-libqasm | fc4d9e53c9b9e7edc2e19a57b5e7b184a1eb9c13 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
from shutil import copyfile
from sys import platform
from typing import Dict
from setuptools import setup
root_dir = os.path.dirname(os.path.realpath(__file__))
src_dir = os.path.join(root_dir, 'src')
build_dir = os.path.join(src_dir, "cbuild")
libqasm_dir = os.path.join(src_dir, "libQasm")
platforms = {
'unix': {
'make_command': 'make',
'cmake_options': '',
'clib_name': '_libQasm.so',
'liblexgram': 'liblexgram.so'
},
'darwin': {
'make_command': 'make',
'cmake_options': '',
'clib_name': '_libQasm.so',
'liblexgram': 'liblexgram.dylib'
},
'win32': {
'make_command': 'mingw32-make',
'cmake_options': '-G "MinGW Makefiles"',
'clib_name': '_libQasm.pyd',
'liblexgram': 'liblexgram.dll'
}
}
def determine_platform() -> Dict[str, str]:
"""Determine the family of the current platform.
Based on the system libraries, determine whether the platform is of the UNIX family or the win32 family. Other
platforms are currently not supported and will raise an exception.
"""
if platform == "linux" or platform == "linux2":
return platforms['unix']
elif platform == "darwin" or platform == "win32":
return platforms[platform]
else:
raise OSError('Platform not recognised!')
def create_directory(directory: str) -> None:
"""Wrapper function for checking whether a directory already exists, and otherwise create it.
Args:
directory: the path for the directory that needs to be created.
"""
if not os.path.exists(directory):
os.makedirs(directory)
def build_libqasm_library(make_command: str, cmake_options: str) -> None:
"""Call cmake and make to build the c++ libraries.
Args:
make_command: the make command to use, varies between windows and unix.
cmake_options: additional build options to pass to cmake.
"""
os.chdir(build_dir)
# cmd = 'make -j{}'.format(nprocs)
cmd = 'git submodule update --init --recursive'
execute_process(cmd)
cmd = 'cmake {} {}'.format(cmake_options, os.path.join("..", "library"))
execute_process(cmd)
cmd = make_command
execute_process(cmd)
cmd = 'ls {}'.format(libqasm_dir)
execute_process(cmd)
cmd = 'ls {}'.format(build_dir)
execute_process(cmd)
cmd = '{} test'.format(make_command)
execute_process(cmd)
os.chdir(root_dir)
def execute_process(command: str) -> None:
"""Execute shell commands.
Args:
command: the shell command to execute.
"""
proc = subprocess.Popen(command, shell=True)
proc.communicate()
def create_init_file() -> None:
"""Create init file for the libQasm directory
Create a __init__.py file to make the libQasm directory a python package. This __init__ file will be prepopulated
with a relative import of libQasm.
"""
init_file_path = os.path.join(libqasm_dir, '__init__.py')
with open(init_file_path, 'w') as init_fp:
init_fp.write('from .libQasm import libQasm')
def copy_file(src_dir: str, dest_dir: str, file_name: str) -> None:
"""Copy a specified file from the source directory to the destination directory.
Args:
src_dir: source folder from which to copy the specified file.
dest_dir: destination folder to which to copy the specified file.
file_name: the file name of the file to copy.
"""
copyfile(
os.path.join(src_dir, file_name),
os.path.join(dest_dir, file_name)
)
def build_libqasm():
"""Wrapper that calls the differnt components to build libQasm and place the necessary binaries"""
sys_platform = determine_platform()
for directory in [libqasm_dir, build_dir]:
create_directory(directory)
build_libqasm_library(sys_platform['make_command'], sys_platform['cmake_options'])
clibname = sys_platform['clib_name']
create_init_file()
copy_file(build_dir, libqasm_dir, clibname)
copy_file(build_dir, libqasm_dir, "libQasm.py")
copy_file(build_dir, libqasm_dir, sys_platform['liblexgram'])
return os.path.join(libqasm_dir, clibname), os.path.join(libqasm_dir, sys_platform['liblexgram'])
clib, liblexgram = build_libqasm()
setup(name='libQasm',
description='libQasm Python Package',
author='Kelvin Loh',
author_email='kel85uk@gmail.com',
url="https://www.github.com/QE-Lab/libqasm/",
version='0.0.1',
python_requires='>=3.5',
packages=['libQasm'],
package_dir={'': 'src'},
package_data={'libQasm': [clib, liblexgram]},
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'],
license='Other/Proprietary License',
zip_safe=False)
| 31.735484 | 117 | 0.66111 |
0f81be047dd86151e72b66fac05b45f47f7d446b | 51,378 | py | Python | dev/buildtool/inspection_commands.py | ruchit1705/spinnaker | e7ccbe8718573d16e8ce5f3d2bb7a7b972168aef | [
"Apache-2.0"
] | null | null | null | dev/buildtool/inspection_commands.py | ruchit1705/spinnaker | e7ccbe8718573d16e8ce5f3d2bb7a7b972168aef | [
"Apache-2.0"
] | null | null | null | dev/buildtool/inspection_commands.py | ruchit1705/spinnaker | e7ccbe8718573d16e8ce5f3d2bb7a7b972168aef | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements inspection commands for buildtool.
1) buildtool.sh collect_bom_versions
2) buildtool.sh collect_artifact_versions
3) buildtool.sh audit_artifact_versions
This will produce files of things to prune.
The should be reviewed. Those remove those that you wish to keep.
Then to remove each of the artifacts:
for url in $(cat prune_jars.txt); do
curl -s -u$BINTRAY_USER:$BINTRAY_KEY -X DELETE $url &
done
wait
for url in $(cat prune_debians.txt); do
curl -s -u$BINTRAY_USER:$BINTRAY_KEY -X DELETE $url &
done
wait
for url in $(cat prune_containers.txt); do
gcloud -q container images delete $url --force-delete-tags &
done
wait
for image_name in $(cat prune_images.txt); do
gcloud -q compute images --project $PROJECT delete $image_name &
done
for url in $(cat prune_boms.txt); do
gsutil rm $url
done
"""
from threading import current_thread
from multiprocessing.pool import ThreadPool
import base64
import json
import logging
import os
import re
import sys
import yaml
try:
from urllib2 import urlopen, HTTPError, Request
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from buildtool import (
CommandFactory,
CommandProcessor,
SemanticVersion,
check_options_set,
check_path_exists,
check_subprocess,
exception_to_message,
maybe_log_exception,
raise_and_log_error,
write_to_path,
ConfigError,
UnexpectedError,
ResponseError)
def my_unicode_representer(self, data):
return self.represent_str(data.encode('utf-8'))
if sys.version_info[0] == 2:
yaml.representer.Representer.add_representer(unicode, my_unicode_representer)
yaml.Dumper.ignore_aliases = lambda *args: True
class CollectBomVersions(CommandProcessor):
"""Determine which artifact versions are in use by which boms.
Ultimately this produces an inverse map of boms. Whereas a bom
maps to a collection of services and their build information, this
produces a map of service build information and which boms they apepar in.
Historically boms were unique builds, however this wasnt supposed to be
the case, and is no longer the case.
The map is further partitioned into two files where one contains
released boms and the other unreleased boms. Unreleased boms are not
necessarily obsolete.
Emits files:
bom_list.txt: A list of all the boms, released and unreleased
bad_boms.txt: A list of malformed boms with what makes it malformed.
all_bom_sevice_map.yml: The inverse service version mapping of all the boms
released_bom_service_map.yml: The subset of all_bom_service_map for boms
that were released.
unreleased_bom_service_map.yml: The subset of all_bom_service_map for
service versions that only appear in unreleased boms.
nonstandard_boms.txt: A list of boms whose artifactSources do not match
the values specified via options. Unspecified options match anything.
config.yml: The configuration values used to determine standard compliance.
"""
RELEASED_VERSION_MATCHER = re.compile(r'^\d+(?:\.\d+){2}$')
@staticmethod
def url_to_bom_name(url):
"""Given a url to a bom, return the name of the bom."""
name = url
dash = name.rfind('/')
if dash >= 0:
name = name[dash + 1:]
return os.path.splitext(name)[0]
def __init__(self, factory, options, **kwargs):
if options.bintray_org is None != options.bintray_debian_repository is None:
raise_and_log_error(
ConfigError('Either neither or both "bintray_org"'
' and "bintray_debian_repository" should be specified'))
self.__bad_files = {}
self.__non_standard_boms = {}
# We're going to have a bunch of threads each writing into different keys
# in order to deconflict with one another lockless. Then we'll aggregate
# it all together when we're done processing for a single aggregate result.
self.__per_thread_result_map = {}
self.__expect_docker_registry = options.docker_registry
self.__expect_debian_repository = (
'https://dl.bintray.com/%s/%s' % (options.bintray_org,
options.bintray_debian_repository)
if options.bintray_org
else None)
super(CollectBomVersions, self).__init__(
factory, options, **kwargs)
def load_bom_from_url(self, url):
"""Returns the bom specification dict from a gcs url."""
logging.debug('Loading %s', url)
try:
text = check_subprocess('gsutil cat ' + url)
return yaml.safe_load(text)
except Exception as ex:
self.__bad_files[self.url_to_bom_name(url)] = exception_to_message(ex)
maybe_log_exception('load_from_from_url', ex,
action_msg='Skipping %s' % url)
return None
def extract_bom_info(self, bom):
"""Return a minimal dict identifying this BOM.
This also includes non-standard config specified by this BOM.
"""
info = {
'bom_version': bom['version'],
'bom_timestamp': bom.get('timestamp', 'NotRecorded')
}
artifact_sources = bom.get('artifactSources')
if artifact_sources is None:
logging.warning('%s does not have artifactSources', bom['version'])
return info
def add_if_nonstandard(name, expect):
if artifact_sources[name] != expect:
logging.warning('%s has nonstandard %s = %s',
bom['version'], name, artifact_sources[name])
info[name] = artifact_sources[name]
add_if_nonstandard('dockerRegistry', self.__expect_docker_registry)
add_if_nonstandard('debianRepository', self.__expect_debian_repository)
if len(info) > 2:
problems = dict(info)
del problems['bom_version']
del problems['bom_timestamp']
self.__non_standard_boms[bom['version']] = problems
return info
def analyze_bom(self, bom):
"""Analyzes one bom and breaks it down into this threads result_map.
Boms are processed within a single thread, but multiple boms can be
processed in different threads.
"""
tid = current_thread().name
thread_service_map = self.__per_thread_result_map.get(tid, {})
self.__per_thread_result_map[tid] = thread_service_map
bom_info = self.extract_bom_info(bom)
for name, entry in bom['services'].items():
if name == 'defaultArtifact':
continue
build_version = entry['version']
parts = build_version.split('-', 1)
if len(parts) == 1:
version = parts[0]
buildnum = 'NotRecorded'
else:
version, buildnum = parts
commit = entry.get('commit', 'NotRecorded')
service_record = thread_service_map.get(name)
if service_record is None:
service_record = {}
thread_service_map[name] = service_record
version_map = service_record.get(version)
if version_map is None:
version_map = {}
service_record[version] = version_map
commit_map = version_map.get(commit)
if commit_map is None:
commit_map = {}
version_map[commit] = commit_map
build_list = commit_map.get(buildnum)
if build_list is None:
build_list = []
commit_map[buildnum] = build_list
build_list.append(bom_info)
def ingest_bom(self, line):
"""Function to ingest a single bom into the result map."""
bom = self.load_bom_from_url(line)
if not bom:
return
try:
if bom['version'] + '.yml' != line[line.rfind('/') + 1:]:
message = 'BOM version "%s" != filename "%s"' % (bom['version'], line)
self.__bad_files[self.url_to_bom_name(line.strip())] = message
logging.warning(message)
raise_and_log_error(UnexpectedError(message))
self.analyze_bom(bom)
except Exception as ex:
self.__bad_files[self.url_to_bom_name(line.strip())] = (
exception_to_message(ex))
maybe_log_exception('analyze_bom', ex,
action_msg='Skipping %s' % line)
def join_result_maps(self):
"""Join the individual thread result maps into a single one.
This assumes a single threaded environment.
"""
def join_buildnums(commit_buildnums, result_buildnums):
for buildnum, info_list in commit_buildnums.items():
result_info_list = result_buildnums.get(buildnum)
if result_info_list is None:
result_info_list = []
result_buildnums[buildnum] = result_info_list
result_info_list.extend(info_list)
result_info_list.sort(key=lambda info: info['bom_timestamp'])
def join_commits(commit_map, result_commits):
for commit, commit_buildnums in commit_map.items():
result_buildnums = result_commits.get(commit)
if result_buildnums is None:
result_buildnums = {}
result_commits[commit] = result_buildnums
join_buildnums(commit_buildnums, result_buildnums)
def join_versions(version_map, result_versions):
for version, commit_map in version_map.items():
result_commits = result_versions.get(version)
if result_commits is None:
result_commits = {}
result_versions[version] = result_commits
join_commits(commit_map, result_commits)
def join_results(thread_results, result_map):
for name, version_map in thread_results.items():
result_versions = result_map.get(name)
if result_versions is None:
result_versions = {}
result_map[name] = result_versions
join_versions(version_map, result_versions)
result_map = {}
for thread_results in self.__per_thread_result_map.values():
join_results(thread_results, result_map)
return result_map
def ingest_bom_list(self, bom_list):
"""Ingest each of the boms."""
max_threads = 1 if self.options.one_at_a_time else 64
pool = ThreadPool(min(max_threads, len(bom_list)))
pool.map(self.ingest_bom, bom_list)
pool.close()
pool.join()
return self.join_result_maps()
def list_bom_urls(self, gcs_dir_url_prefix):
"""Get a list of all the bom versions that exist."""
result = check_subprocess('gsutil ls ' + gcs_dir_url_prefix)
return [line for line in result.split('\n')
if line.startswith(gcs_dir_url_prefix) and line.endswith('.yml')]
def _do_command(self):
"""Reads the list of boms, then concurrently processes them.
Ultimately it will write out the analysis into bom_service_map.yml
"""
options = self.options
url_prefix = 'gs://%s/bom/' % options.halyard_bom_bucket
if options.version_name_prefix:
url_prefix += options.version_name_prefix
logging.debug('Listing BOM urls')
results = self.list_bom_urls(url_prefix)
write_to_path('\n'.join(sorted(results)),
os.path.join(self.get_output_dir(), 'bom_list.txt'))
result_map = self.ingest_bom_list(results)
path = os.path.join(self.get_output_dir(), 'all_bom_service_map.yml')
logging.info('Writing bom analysis to %s', path)
write_to_path(yaml.safe_dump(result_map, default_flow_style=False), path)
partition_names = ['released', 'unreleased']
partitions = self.partition_service_map(result_map)
for index, data in enumerate(partitions):
path = os.path.join(self.get_output_dir(),
partition_names[index] + '_bom_service_map.yml')
logging.info('Writing bom analysis to %s', path)
write_to_path(yaml.safe_dump(data, default_flow_style=False), path)
if self.__bad_files:
path = os.path.join(self.get_output_dir(), 'bad_boms.txt')
logging.warning('Writing %d bad URLs to %s', len(self.__bad_files), path)
write_to_path(
yaml.safe_dump(self.__bad_files, default_flow_style=False),
path)
if self.__non_standard_boms:
path = os.path.join(self.get_output_dir(), 'nonstandard_boms.txt')
logging.warning('Writing %d nonstandard boms to %s',
len(self.__non_standard_boms), path)
write_to_path(
yaml.safe_dump(self.__non_standard_boms, default_flow_style=False),
path)
config = {
'halyard_bom_bucket': options.halyard_bom_bucket
}
path = os.path.join(self.get_output_dir(), 'config.yml')
logging.info('Writing to %s', path)
write_to_path(yaml.safe_dump(config, default_flow_style=False), path)
def partition_service_map(self, result_map):
def partition_info_list(info_list):
released = []
unreleased = []
for info in info_list:
if self.RELEASED_VERSION_MATCHER.match(info['bom_version']):
released.append(info)
else:
unreleased.append(info)
if released:
# If we released this somewhere, then it isnt unreleased.
unreleased = []
return released, unreleased
def partition_buildnum_map(buildnum_map):
released = {}
unreleased = {}
for buildnum, info_list in buildnum_map.items():
results = partition_info_list(info_list)
if results[0]:
released[buildnum] = results[0]
if results[1]:
unreleased[buildnum] = results[1]
return released, unreleased
def partition_commit_map(commit_map):
released = {}
unreleased = {}
for commit, buildnum_map in commit_map.items():
results = partition_buildnum_map(buildnum_map)
if results[0]:
released[commit] = results[0]
if results[1]:
unreleased[commit] = results[1]
return released, unreleased
def partition_version_map(version_map):
released = {}
unreleased = {}
for version, commit_map in version_map.items():
results = partition_commit_map(commit_map)
if results[0]:
released[version] = results[0]
if results[1]:
unreleased[version] = results[1]
if not released:
released = None
if not unreleased:
unreleased = None
return released, unreleased
released = {}
unreleased = {}
for name, version_map in result_map.items():
released[name], unreleased[name] = partition_version_map(version_map)
return released, unreleased
class CollectBomVersionsFactory(CommandFactory):
def __init__(self, **kwargs):
super(CollectBomVersionsFactory, self).__init__(
'collect_bom_versions', CollectBomVersions,
'Find information about bom versions.', **kwargs)
def init_argparser(self, parser, defaults):
super(CollectBomVersionsFactory, self).init_argparser(parser, defaults)
self.add_argument(parser, 'version_name_prefix', defaults, None,
help='Prefix for bom version to collect.')
self.add_argument(
parser, 'halyard_bom_bucket', defaults, 'halconfig',
help='The bucket managing halyard BOMs and config profiles.')
self.add_argument(
parser, 'docker_registry', defaults, None,
help='The expected docker registry in boms.')
self.add_argument(
parser, 'bintray_org', defaults, None,
help='The expected bintray organization in boms.')
self.add_argument(
parser, 'bintray_debian_repository', defaults, None,
help='The expected bintray debian repository in boms.')
class CollectArtifactVersions(CommandProcessor):
"""Locate all the existing spinnaker build artifacts.
Ultimately this produces files mapping all the existing artifact
builds for each service of a given type. It also looks for consistency
between the bintray jar and debian builds.
Emits files:
<debian_repository>__versions.yml: All the debian build versions
<jar_repository>__versions.yml: All the jar build versions
<docker_registry>__versions.yml: All the container build versions
missing_jars.yml: Bintray debian versions without a corresponding jar
missing_debians.yml: Bintray jar versions witout a corresponding debian
config.yml: The configuration values used to collect the artifacts
"""
def __init__(self, factory, options, **kwargs):
super(CollectArtifactVersions, self).__init__(
factory, options, **kwargs)
check_options_set(options,
['docker_registry', 'bintray_org',
'bintray_jar_repository', 'bintray_debian_repository'])
user = os.environ.get('BINTRAY_USER')
password = os.environ.get('BINTRAY_KEY')
if user and password:
encoded_auth = base64.encodestring('{user}:{password}'.format(
user=user, password=password))[:-1] # strip eoln
self.__basic_auth = 'Basic ' + encoded_auth
else:
self.__basic_auth = None
def fetch_bintray_url(self, bintray_url):
request = Request(bintray_url)
if self.__basic_auth:
request.add_header('Authorization', self.__basic_auth)
try:
response = urlopen(request)
headers = response.info()
payload = response.read()
content = json.JSONDecoder(encoding='utf-8').decode(payload)
except HTTPError as ex:
raise_and_log_error(
ResponseError('Bintray failure: {}'.format(ex),
server='bintray.api'),
'Failed on url=%s: %s' % (bintray_url, exception_to_message(ex)))
except Exception as ex:
raise
return headers, content
def list_bintray_packages(self, subject_repo):
path = 'repos/%s/packages' % subject_repo
base_url = 'https://api.bintray.com/' + path
result = []
while True:
url = base_url + '?start_pos=%d' % len(result)
headers, content = self.fetch_bintray_url(url)
# logging.debug('Bintray responded with headers\n%s', headers)
total = headers.get('X-RangeLimit-Total', 0)
result.extend(['%s/%s' % (subject_repo, entry['name'])
for entry in content])
if len(result) >= total:
break
return result
def query_bintray_package_versions(self, package_path):
path = 'packages/' + package_path
url = 'https://api.bintray.com/' + path
_, content = self.fetch_bintray_url(url)
# logging.debug('Bintray responded with headers\n%s', headers)
package_name = package_path[package_path.rfind('/') + 1:]
return (package_name, content['versions'])
def difference(self, versions, target):
missing = []
for version in versions:
if not version in target:
missing.append(version)
return missing
def find_missing_jar_versions(self, jar_map, debian_map):
missing_jars = {}
prefix = 'spinnaker-'
for package, versions in debian_map.items():
if package.startswith(prefix):
key = package[len(prefix):]
if not key in jar_map:
key = package
if key == 'spinnaker-monitoring-daemon':
key = 'spinnaker-monitoring'
if not key in jar_map:
if key == 'spinnaker-monitoring-third-party':
continue
continue
missing = self.difference(versions, jar_map.get(key))
if missing:
missing_jars[key] = missing
return missing_jars
def find_missing_debian_versions(self, jar_map, debian_map):
missing_debians = {}
for package, versions in jar_map.items():
key = 'spinnaker-' + package
if not key in debian_map:
key = package
if not key in debian_map:
if key == 'spinnaker-monitoring':
key = 'spinnaker-monitoring-daemon'
else:
raise ValueError('Unknown DEBIAN "%s"' % package)
missing = self.difference(versions, debian_map.get(key))
if missing:
missing_debians[key] = missing
return missing_debians
def collect_bintray_versions(self, pool):
options = self.options
repos = [('jar', options.bintray_jar_repository),
('debian', options.bintray_debian_repository)]
results = []
for repo_type, bintray_repo in repos:
subject_repo = '%s/%s' % (options.bintray_org, bintray_repo)
packages = self.list_bintray_packages(subject_repo)
package_versions = pool.map(self.query_bintray_package_versions, packages)
package_map = {}
for name, versions in package_versions:
package_map[name] = versions
results.append(package_map)
path = os.path.join(
self.get_output_dir(),
'%s__%s_versions.yml' % (bintray_repo, repo_type))
logging.info('Writing %s versions to %s', bintray_repo, path)
write_to_path(yaml.safe_dump(package_map,
allow_unicode=True,
default_flow_style=False), path)
return results[0], results[1]
def query_gcr_image_versions(self, image):
options = self.options
command_parts = ['gcloud',
'--format=json',
'container images list-tags',
image, '--limit 10000']
if options.gcb_service_account:
command_parts.extend(['--account', options.gcb_service_account])
response = check_subprocess(' '.join(command_parts))
result = []
for version in json.JSONDecoder(encoding='utf-8').decode(response):
result.extend(version['tags'])
return (image[image.rfind('/') + 1:], result)
def collect_gcb_versions(self, pool):
options = self.options
logging.debug('Collecting GCB versions from %s', options.docker_registry)
command_parts = ['gcloud',
'--format=json',
'container images list',
'--repository', options.docker_registry]
if options.gcb_service_account:
logging.debug('Using account %s', options.gcb_service_account)
command_parts.extend(['--account', options.gcb_service_account])
response = check_subprocess(' '.join(command_parts))
images = [entry['name']
for entry in json.JSONDecoder(encoding='utf-8').decode(response)]
image_versions = pool.map(self.query_gcr_image_versions, images)
image_map = {}
for name, versions in image_versions:
image_map[name] = versions
path = os.path.join(
self.get_output_dir(),
options.docker_registry.replace('/', '__') + '__gcb_versions.yml')
logging.info('Writing %s versions to %s', options.docker_registry, path)
write_to_path(yaml.safe_dump(image_map,
allow_unicode=True,
default_flow_style=False), path)
return image_map
def collect_gce_image_versions(self):
options = self.options
project = options.publish_gce_image_project
logging.debug('Collecting GCE image versions from %s', project)
command_parts = ['gcloud', '--format=json',
'compute images list', '--project', project,
'--filter spinnaker-']
if options.build_gce_service_account:
logging.debug('Using account %s', options.build_gce_service_account)
command_parts.extend(['--account', options.build_gce_service_account])
response = check_subprocess(' '.join(command_parts))
images = [entry['name']
for entry in json.JSONDecoder(encoding='utf-8').decode(response)]
image_map = {}
for name in images:
parts = name.split('-', 2)
if len(parts) != 3:
logging.warning('Skipping malformed %s', name)
continue
_, module, build_version = parts
parts = build_version.split('-')
if len(parts) != 4:
logging.warning('Skipping malformed %s', name)
continue
version_list = image_map.get(module, [])
version_list.append('{}.{}.{}-{}'.format(*parts))
image_map[module] = version_list
path = os.path.join(
self.get_output_dir(), project + '__gce_image_versions.yml')
logging.info('Writing gce image versions to %s', path)
write_to_path(yaml.safe_dump(image_map,
allow_unicode=True,
default_flow_style=False), path)
return image_map
def _do_command(self):
pool = ThreadPool(16)
bintray_jars, bintray_debians = self.collect_bintray_versions(pool)
self.collect_gcb_versions(pool)
self.collect_gce_image_versions()
pool.close()
pool.join()
missing_jars = self.find_missing_jar_versions(
bintray_jars, bintray_debians)
missing_debians = self.find_missing_debian_versions(
bintray_jars, bintray_debians)
options = self.options
for which in [(options.bintray_jar_repository, missing_jars),
(options.bintray_debian_repository, missing_debians)]:
if not which[1]:
logging.info('%s is all accounted for.', which[0])
continue
path = os.path.join(self.get_output_dir(), 'missing_%s.yml' % which[0])
logging.info('Writing to %s', path)
write_to_path(
yaml.safe_dump(which[1], allow_unicode=True,
default_flow_style=False),
path)
config = {
'bintray_org': options.bintray_org,
'bintray_jar_repository': options.bintray_jar_repository,
'bintray_debian_repository': options.bintray_debian_repository,
'docker_registry': options.docker_registry,
'googleImageProject': options.publish_gce_image_project
}
path = os.path.join(self.get_output_dir(), 'config.yml')
logging.info('Writing to %s', path)
write_to_path(yaml.safe_dump(config, default_flow_style=False), path)
class CollectArtifactVersionsFactory(CommandFactory):
def __init__(self, **kwargs):
super(CollectArtifactVersionsFactory, self).__init__(
'collect_artifact_versions', CollectArtifactVersions,
'Find information about artifact jar/debian versions.', **kwargs)
def init_argparser(self, parser, defaults):
super(CollectArtifactVersionsFactory, self).init_argparser(
parser, defaults)
self.add_argument(
parser, 'bintray_org', defaults, None,
help='bintray organization for the jar and debian repositories.')
self.add_argument(
parser, 'bintray_jar_repository', defaults, None,
help='bintray repository in the bintray_org containing published jars.')
self.add_argument(
parser, 'bintray_debian_repository', defaults, None,
help='bintray repository in the bintray_org containing debians.')
self.add_argument(
parser, 'version_name_prefix', defaults, None,
help='Prefix for bintray versions to collect.')
self.add_argument(
parser, 'gcb_service_account', defaults, None,
help='The service account to use when checking gcr images.')
self.add_argument(
parser, 'docker_registry', defaults, None,
help='The GCB service account query image versions from.')
self.add_argument(
parser, 'build_gce_service_account', defaults, None,
help='The service account to use with the gce project.')
self.add_argument(
parser, 'publish_gce_image_project', defaults, None,
help='The GCE project ot collect images from.')
class AuditArtifactVersions(CommandProcessor):
"""Given the collected BOMs and artifacts, separate good from bad.
Ultimately this determines which existing artifacts are in use and which are
not referenced by a bom. It also verifies the integrity of the boms with
regard to the existence of the artifacts they specify. It will emit files
that suggest which specific boms and artifacts can be deleted. The artifacts
in use by the boms suggested for pruning are not included in the prune list.
They will be nominated in the next round.
Emits files:
audit_confirmed_boms.yml: All the boms that have been verified intact.
audit_found_<type>.yml: All the artifacts of <type> that were referenced
by a bom.
audit_missing_<type>.yml: All the artifacts of <type> that were not
referenced by a bom.
audit_unused_<type>.yml: All the artifacts of <type> that were referenced
by a bom but not found to actually exist. These are for documentation.
The audit_invalid_boms.yml file is more useful.
audit_invalid_boms.yml: All the boms whose integrity is suspect along with
the explanation as to why. Usually they are missing artifacts, but
there could be other reasons.
prune_<type>.txt The list of URLs that should be safe to delete for the
given <type> from a strict referential integrity standpoint. There
could be unanticipated uses of these artifacts.
"""
def __init_bintray_versions_helper(self, base_path):
artifact_data_dir = os.path.join(base_path, 'collect_artifact_versions')
debian_paths = []
jar_paths = []
gcr_paths = []
image_paths = []
for filename in os.listdir(artifact_data_dir):
path = os.path.join(artifact_data_dir, filename)
if filename.endswith('__gcb_versions.yml'):
gcr_paths.append(path)
elif filename.endswith('__jar_versions.yml'):
jar_paths.append(path)
elif filename.endswith('__debian_versions.yml'):
debian_paths.append(path)
elif filename.endswith('__gce_image_versions.yml'):
image_paths.append(path)
for name, found in [('jar', jar_paths), ('debian', debian_paths),
('gce image', image_paths), ('gcr image', gcr_paths)]:
if len(found) != 1:
raise_and_log_error(
ConfigError(
'Expected 1 %s version files in "%s": %s' % (
name, artifact_data_dir, found)))
logging.debug('Loading container image versions from "%s"', gcr_paths[0])
with open(gcr_paths[0], 'r') as stream:
self.__container_versions = yaml.safe_load(stream.read())
with open(jar_paths[0], 'r') as stream:
self.__jar_versions = yaml.safe_load(stream.read())
with open(debian_paths[0], 'r') as stream:
self.__debian_versions = yaml.safe_load(stream.read())
with open(image_paths[0], 'r') as stream:
self.__gce_image_versions = yaml.safe_load(stream.read())
def __extract_all_bom_versions(self, bom_map):
result = set([])
for versions in bom_map.values():
if not versions:
continue
for commits in versions.values():
for buildnum in commits.values():
for info_list in buildnum.values():
for info in info_list:
result.add(info['bom_version'])
return result
def __remove_old_bom_versions(self, min_semver, version_to_commit_boms):
"""Remove references to older boms in collected bom info.
Args:
min_semver: [SemanticVersion] minimally acceptable semantic version
version_to_commit_boms: [dict of {commit_id, build_info}]
where build_info is a dictionary mapping buildnum to list of
bom_metadata dictionaries.
Returns:
copy of versions but without build_info referencing older bom_versions.
"""
def list_of_current_bom_meta(min_semver, all_bom_meta):
good_bom_meta = []
for bom_meta in all_bom_meta:
semver = SemanticVersion.make('ignored-' + bom_meta['bom_version'])
if SemanticVersion.compare(semver, min_semver) >= 0:
good_bom_meta.append(bom_meta)
return good_bom_meta
def commit_to_current_bom_meta(min_semver, build_map):
build_info = {}
for buildnum, all_bom_meta in build_map.items():
good_bom_meta = list_of_current_bom_meta(min_semver, all_bom_meta)
if good_bom_meta:
build_info[buildnum] = good_bom_meta
return build_info
result = {}
for version, commit_build_map in version_to_commit_boms.items():
commit_map = {}
for commit_id, orig_build_map in commit_build_map.items():
build_map = commit_to_current_bom_meta(min_semver, orig_build_map)
if build_map:
commit_map[commit_id] = build_map
if commit_map:
result[version] = commit_map
else:
logging.info(
'Dropping version=%s because it bom versions are all too old.',
version)
return result
def __init__(self, factory, options, **kwargs):
if options.prune_min_buildnum_prefix is not None:
# Typically numeric so is interpreted as number from yaml
options.prune_min_buildnum_prefix = str(options.prune_min_buildnum_prefix)
super(AuditArtifactVersions, self).__init__(factory, options, **kwargs)
base_path = os.path.dirname(self.get_output_dir())
self.__init_bintray_versions_helper(base_path)
min_version = options.min_audit_bom_version or '0.0.0'
min_parts = min_version.split('.')
if len(min_parts) < 3:
min_version += '.0' * (3 - len(min_parts))
self.__min_semver = SemanticVersion.make('ignored-' + min_version)
bom_data_dir = os.path.join(base_path, 'collect_bom_versions')
path = os.path.join(bom_data_dir, 'released_bom_service_map.yml')
check_path_exists(path, 'released bom analysis')
with open(path, 'r') as stream:
self.__all_released_boms = {} # forever
self.__current_released_boms = {} # since min_version to audit
for service, versions in yaml.safe_load(stream.read()).items():
if not versions:
# e.g. this service has not yet been released.
logging.info('No versions for service=%s', service)
continue
self.__all_released_boms[service] = versions
self.__current_released_boms[service] = versions
stripped_versions = self.__remove_old_bom_versions(
self.__min_semver, versions)
if stripped_versions:
self.__current_released_boms[service] = stripped_versions
path = os.path.join(bom_data_dir, 'unreleased_bom_service_map.yml')
check_path_exists(path, 'unreleased bom analysis')
with open(path, 'r') as stream:
self.__unreleased_boms = yaml.safe_load(stream.read())
self.__only_bad_and_invalid_boms = False
self.__all_bom_versions = self.__extract_all_bom_versions(
self.__all_released_boms)
self.__all_bom_versions.update(
self.__extract_all_bom_versions(self.__unreleased_boms))
self.__missing_debians = {}
self.__missing_jars = {}
self.__missing_containers = {}
self.__missing_images = {}
self.__found_debians = {}
self.__found_jars = {}
self.__found_containers = {}
self.__found_images = {}
self.__unused_jars = {}
self.__unused_debians = {}
self.__unused_containers = {}
self.__unused_gce_images = {}
self.__invalid_boms = {}
self.__confirmed_boms = set([])
self.__prune_boms = []
self.__prune_jars = {}
self.__prune_debians = {}
self.__prune_containers = {}
self.__prune_gce_images = {}
self.__invalid_versions = {}
def audit_artifacts(self):
self.audit_bom_services(self.__all_released_boms, 'released')
self.audit_bom_services(self.__unreleased_boms, 'unreleased')
self.audit_package(
'jar', self.__jar_versions, self.__unused_jars)
self.audit_package(
'debian', self.__debian_versions, self.__unused_debians)
self.audit_package(
'container', self.__container_versions, self.__unused_containers)
self.audit_package(
'image',
self.__gce_image_versions, self.__unused_gce_images)
def maybe_write_log(what, data):
if not data:
return
path = os.path.join(self.get_output_dir(), 'audit_' + what + '.yml')
logging.info('Writing %s', path)
write_to_path(
yaml.safe_dump(data, allow_unicode=True, default_flow_style=False),
path)
confirmed_boms = self.__all_bom_versions - set(self.__invalid_boms.keys())
unchecked_releases = [
key
for key in self.__all_bom_versions
if (CollectBomVersions.RELEASED_VERSION_MATCHER.match(key)
and SemanticVersion.compare(SemanticVersion.make('ignored-' + key),
self.__min_semver) < 0)]
invalid_releases = {
key: bom
for key, bom in self.__invalid_boms.items()
if (CollectBomVersions.RELEASED_VERSION_MATCHER.match(key)
and SemanticVersion.compare(SemanticVersion.make('ignored-' + key),
self.__min_semver) >= 0)}
confirmed_releases = [
key
for key in confirmed_boms
if (CollectBomVersions.RELEASED_VERSION_MATCHER.match(key)
and SemanticVersion.compare(SemanticVersion.make('ignored-' + key),
self.__min_semver) >= 0)]
maybe_write_log('missing_debians', self.__missing_debians)
maybe_write_log('missing_jars', self.__missing_jars)
maybe_write_log('missing_containers', self.__missing_containers)
maybe_write_log('missing_images', self.__missing_images)
maybe_write_log('found_debians', self.__found_debians)
maybe_write_log('found_jars', self.__found_jars)
maybe_write_log('found_containers', self.__found_containers)
maybe_write_log('found_images', self.__found_images)
maybe_write_log('unused_debians', self.__unused_debians)
maybe_write_log('unused_jars', self.__unused_jars)
maybe_write_log('unused_containers', self.__unused_containers)
maybe_write_log('unused_images', self.__unused_gce_images)
maybe_write_log('invalid_boms', self.__invalid_boms)
maybe_write_log('confirmed_boms', sorted(list(confirmed_boms)))
maybe_write_log('confirmed_releases', sorted(list(confirmed_releases)))
maybe_write_log('invalid_versions', self.__invalid_versions)
maybe_write_log('invalid_releases', invalid_releases)
maybe_write_log('unchecked_releases', unchecked_releases)
def most_recent_version(self, name, versions):
"""Find the most recent version built."""
if not versions:
return None
raw_versions = set([version.split('-')[0] for version in versions])
sem_vers = []
for text in raw_versions:
try:
sem_vers.append(SemanticVersion.make('version-' + text))
except Exception as ex:
bad_list = self.__invalid_versions.get(name, [])
bad_list.append(text)
self.__invalid_versions[name] = bad_list
logging.error('Ignoring invalid %s version "%s": %s', name, text, ex)
return sorted(sem_vers, cmp=SemanticVersion.compare)[-1].to_version()
def test_buildnum(self, buildver):
dash = buildver.rfind('-')
if dash < 0:
return True
buildnum = buildver[dash + 1:]
return buildnum < self.options.prune_min_buildnum_prefix
def determine_bom_candidates(self):
path = os.path.join(os.path.dirname(self.get_output_dir()),
'collect_bom_versions', 'bom_list.txt')
candidates = []
with open(path, 'r') as stream:
for line in stream.read().split('\n'):
if line.endswith('-latest-unvalidated.yml'):
continue
bom = CollectBomVersions.url_to_bom_name(line)
if not CollectBomVersions.RELEASED_VERSION_MATCHER.match(bom):
candidates.append(line)
return candidates
def determine_prunings(self):
def filter_from_candidates(newest_version, candidate_version_list):
if self.options.prune_keep_latest_version:
prune_version = lambda ver: not ver.startswith(newest_version)
else:
prune_version = lambda ver: True
if self.options.prune_min_buildnum_prefix:
prune_buildnum = self.test_buildnum
else:
prune_buildnum = lambda ver: True
return [candidate for candidate in candidate_version_list
if prune_version(candidate) and prune_buildnum(candidate)]
self.__prune_boms = [name for name in self.determine_bom_candidates()
if self.test_buildnum(name)]
service_list = set(self.__found_debians.keys())
service_list.update(set(self.__found_containers.keys()))
for name in service_list:
skip_versions = self.__invalid_versions.get(name, [])
for unused_map, prune_map in [
(self.__unused_jars, self.__prune_jars),
(self.__unused_debians, self.__prune_debians),
(self.__unused_gce_images, self.__prune_gce_images),
(self.__unused_containers, self.__prune_containers)]:
unused_list = unused_map.get(name, None)
if unused_list is None:
unused_list = unused_map.get('spinnaker-' + name, [])
newest_version = self.most_recent_version(name, unused_list)
candidates = filter_from_candidates(newest_version, unused_list)
# We're going to keep malformed versions. These are rare so
# we'll leave it to manual cleanup.
pruned = [version
for version in candidates if not version in skip_versions]
if pruned:
prune_map[name] = sorted(pruned)
def suggest_prunings(self):
path = os.path.join(os.path.dirname(self.get_output_dir()),
'collect_bom_versions', 'config.yml')
with open(path, 'r') as stream:
bom_config = yaml.safe_load(stream.read())
path = os.path.join(os.path.dirname(self.get_output_dir()),
'collect_artifact_versions', 'config.yml')
with open(path, 'r') as stream:
art_config = yaml.safe_load(stream.read())
if self.__prune_boms:
path = os.path.join(self.get_output_dir(), 'prune_boms.txt')
logging.info('Writing to %s', path)
write_to_path('\n'.join(sorted(self.__prune_boms)), path)
jar_repo_path = 'packages/%s/%s' % (
art_config['bintray_org'], art_config['bintray_jar_repository'])
debian_repo_path = 'packages/%s/%s' % (
art_config['bintray_org'], art_config['bintray_debian_repository'])
artifact_prefix_func = {
'jar': lambda name: 'https://api.bintray.com/%s/%s/versions/' % (
jar_repo_path, name),
'debian': lambda name: 'https://api.bintray.com/%s/%s/versions/' % (
debian_repo_path,
name if name == 'spinnaker' else 'spinnaker-' + name),
'container': lambda name: '%s/%s:' % (
art_config['docker_registry'], name),
'image': lambda name: 'spinnaker-%s-' % name
}
artifact_version_func = {
'jar': lambda version: version,
'debian': lambda version: version,
'container': lambda version: version,
'image': lambda version: version.replace('.', '-')
}
for art_type, art_map in [('jar', self.__prune_jars),
('debian', self.__prune_debians),
('container', self.__prune_containers),
('image', self.__prune_gce_images)]:
urls = []
for service, art_list in art_map.items():
prefix = artifact_prefix_func[art_type](service)
version_func = artifact_version_func[art_type]
urls.extend([prefix + version_func(version) for version in art_list])
if urls:
path = os.path.join(self.get_output_dir(), 'prune_%ss.txt' % art_type)
logging.info('Writing to %s', path)
write_to_path('\n'.join(sorted(urls)), path)
def _do_command(self):
self.audit_artifacts()
self.determine_prunings()
self.suggest_prunings()
def audit_container(self, service, build_version, entries):
if service in ['spinnaker', 'monitoring-third-party']:
return True # not applicable
if service in self.__container_versions:
versions = self.__container_versions[service]
elif service in ['monitoring-daemon']:
versions = self.__container_versions.get('monitoring-daemon', [])
else:
versions = []
if build_version in versions:
holder = self.__found_containers.get(service, {})
holder[build_version] = entries
self.__found_containers[service] = holder
return True
holder = self.__missing_containers.get(service, {})
holder[build_version] = entries
self.__missing_containers[service] = holder
logging.warning('Missing %s container %s', service, build_version)
return False
def audit_image(self, service, build_version, entries):
if service in ['spinnaker',
'monitoring-third-party', 'monitoring-daemon']:
return True # not applicable
versions = self.__gce_image_versions.get(service, [])
if build_version in versions:
holder = self.__found_images.get(service, {})
holder[build_version] = entries
self.__found_images[service] = holder
return True
holder = self.__missing_images.get(service, {})
holder[build_version] = entries
self.__missing_images[service] = holder
logging.warning('Missing %s gce image %s', service, build_version)
return False
def audit_jar(self, service, build_version, entries):
if service in self.__jar_versions:
versions = self.__jar_versions[service]
elif service in ['monitoring-daemon', 'monitoring-third-party']:
versions = self.__jar_versions.get('spinnaker-monitoring', [])
else:
versions = []
if build_version in versions:
holder = self.__found_jars.get(service, {})
holder[build_version] = entries
self.__found_jars[service] = holder
return True
holder = self.__missing_jars.get(service, {})
holder[build_version] = entries
self.__missing_jars[service] = holder
logging.warning('Missing %s jar %s', service, build_version)
return False
def audit_debian(self, service, build_version, info_list):
versions = []
if service in self.__debian_versions:
key = service
versions = self.__debian_versions[service]
else:
key = 'spinnaker-' + service
if key in self.__debian_versions:
versions = self.__debian_versions[key]
if build_version in versions:
holder = self.__found_debians.get(service, {})
holder[build_version] = info_list
self.__found_debians[service] = holder
return True
holder = self.__missing_debians.get(key, {})
holder[build_version] = info_list
self.__missing_debians[key] = holder
logging.warning('Missing %s debian %s', key, build_version)
return False
def package_in_bom_map(self, service, version, buildnum, service_map):
version_map = service_map.get(service)
if version_map is None:
return False
commit_map = version_map.get(version)
if commit_map is None:
return False
for _, buildnums in commit_map.items():
if buildnum in buildnums:
return True
return False
def audit_package_helper(self, package, version, buildnum, which):
if package in self.__all_released_boms or package in self.__unreleased_boms:
name = package
elif package.startswith('spinnaker-'):
name = package[package.find('-') + 1:]
else:
return False
is_released = self.package_in_bom_map(
name, version, buildnum, self.__all_released_boms)
is_unreleased = self.package_in_bom_map(
name, version, buildnum, self.__unreleased_boms)
if is_released or is_unreleased:
return True
data_list = which.get(package, [])
if buildnum:
data_list.append('%s-%s' % (version, buildnum))
else:
data_list.append(version)
which[package] = data_list
return False
def audit_package(self, kind, packages, which):
logging.info('Auditing %s packages', kind)
for package, versions in packages.items():
if package == 'halyard':
logging.warning('Skipping halyard.')
continue
for build_version in versions:
parts = build_version.split('-', 1)
if len(parts) == 1:
logging.warning('Unexpected %s version %s', package, build_version)
continue
version, buildnum = parts
self.audit_package_helper(package, version, buildnum, which)
def audit_bom_services(self, bom_services, title):
def add_invalid_boms(jar_ok, deb_ok, container_ok, image_ok,
service, version_buildnum, info_list, invalid_boms):
if jar_ok and deb_ok and container_ok and image_ok:
return
kind_checks = [(jar_ok, 'jars'), (deb_ok, 'debs'),
(container_ok, 'containers'), (image_ok, 'images')]
for info in info_list:
bom_version = info['bom_version']
bom_record = invalid_boms.get(bom_version, {})
for is_ok, kind in kind_checks:
if not is_ok:
problems = bom_record.get(kind, {})
problems[service] = version_buildnum
bom_record[kind] = problems
invalid_boms[bom_version] = bom_record
def audit_service(service, versions):
for version, commits in versions.items():
for _, buildnums in commits.items():
for buildnum, info_list in buildnums.items():
version_buildnum = '%s-%s' % (version, buildnum)
jar_ok = self.audit_jar(service, version_buildnum, info_list)
deb_ok = self.audit_debian(service, version_buildnum, info_list)
gcr_ok = self.audit_container(service, version_buildnum, info_list)
image_ok = self.audit_image(service, version_buildnum, info_list)
add_invalid_boms(jar_ok, deb_ok, gcr_ok, image_ok,
service, version_buildnum,
info_list, self.__invalid_boms)
logging.debug('Auditing %s BOMs', title)
for service, versions in bom_services.items():
if not versions:
logging.debug('No versions for %s', service)
continue
audit_service(service, versions)
class AuditArtifactVersionsFactory(CommandFactory):
def __init__(self, **kwargs):
super(AuditArtifactVersionsFactory, self).__init__(
'audit_artifact_versions', AuditArtifactVersions,
'Audit artifact versions in BOMs and vice-versa', **kwargs)
def init_argparser(self, parser, defaults):
super(AuditArtifactVersionsFactory, self).init_argparser(parser, defaults)
self.add_argument(parser, 'min_audit_bom_version', defaults, None,
help='Minimum released bom version to audit.')
self.add_argument(
parser, 'prune_min_buildnum_prefix', defaults, None,
help='Only suggest pruning artifacts with a smaller build number.'
' This is actually just a string, not a number so is a string compare.')
self.add_argument(
parser, 'prune_keep_latest_version', defaults, False, type=bool,
help='If true, suggest only artifacts whose version is not the most'
' recent version among the boms surveyed.')
def register_commands(registry, subparsers, defaults):
CollectBomVersionsFactory().register(registry, subparsers, defaults)
CollectArtifactVersionsFactory().register(registry, subparsers, defaults)
AuditArtifactVersionsFactory().register(registry, subparsers, defaults)
| 39.160061 | 80 | 0.673712 |
11dad7a49215f395b5a76f2a4c572b754d7df93f | 835 | py | Python | userbot/plugins/plane.py | thecyberbyte-tech/Secktor-Userbot | 5ede9c98e4480ec48ad5dd114a5bf2da3df6dc3f | [
"MIT"
] | null | null | null | userbot/plugins/plane.py | thecyberbyte-tech/Secktor-Userbot | 5ede9c98e4480ec48ad5dd114a5bf2da3df6dc3f | [
"MIT"
] | null | null | null | userbot/plugins/plane.py | thecyberbyte-tech/Secktor-Userbot | 5ede9c98e4480ec48ad5dd114a5bf2da3df6dc3f | [
"MIT"
] | null | null | null | """By STARKTM1
cmd : .plane"""
from telethon import events
import asyncio
import os
import sys
@borg.on(events.NewMessage(pattern=r"\.plane", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("✈-------------")
await event.edit("-✈------------")
await event.edit("--✈-----------")
await event.edit("---✈----------")
await event.edit("----✈---------")
await event.edit("-----✈--------")
await event.edit("------✈-------")
await event.edit("-------✈------")
await event.edit("--------✈-----")
await event.edit("---------✈----")
await event.edit("----------✈---")
await event.edit("-----------✈--")
await event.edit("------------✈-")
await event.edit("-------------✈")
await asyncio.sleep(3)
await event.delete()
| 26.935484 | 62 | 0.45988 |
083a83dafa7542838a539de20d8ae3194134ae9f | 7,052 | py | Python | flask_gdrive/flask_gdrive.py | Shubby98/Flask-GDrive | b6876fa224b24f4d00ff9382a46a7bc573331198 | [
"MIT"
] | 8 | 2019-10-09T19:42:56.000Z | 2020-02-22T02:14:09.000Z | flask_gdrive/flask_gdrive.py | Shubby98/Flask-GDrive | b6876fa224b24f4d00ff9382a46a7bc573331198 | [
"MIT"
] | 9 | 2019-10-22T16:32:23.000Z | 2019-12-26T17:57:32.000Z | flask_gdrive/flask_gdrive.py | Shubby98/Flask-GDrive | b6876fa224b24f4d00ff9382a46a7bc573331198 | [
"MIT"
] | 7 | 2019-10-22T20:29:54.000Z | 2021-01-27T19:42:00.000Z | from __future__ import print_function
import pickle
import os.path
import threading
import time
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from flask import current_app, _app_ctx_stack, Response, url_for
class GDriveMain:
"""Base class for using Google Drive api"""
#constructor
def __init__(self, app, creds, token, *args):
self.app = app
self.folder_id = ""
if app is not None:
app.config.setdefault('GDRIVE_CREDENTIALS_URI', creds)
app.config.setdefault('GDRIVE_TOKEN_URI', token)
self.init_app(app, *args)
def init_app(self, app, *args):
pass
#To be implemented separately.
#connect function establish the cred
def connect(self):
SCOPES = [ #List of Requesting server of Google
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/spreadsheets'
]
creds = None
"""
GDRIVE_TOKEN_URI stores token.pickle file and it is use to stores the user's access and refresh tokens,
and is created automatically when the authorization flow completes for the first
time.
"""
if os.path.exists(current_app.config['GDRIVE_TOKEN_URI']):
with open(current_app.config['GDRIVE_TOKEN_URI'], 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
current_app.config['GDRIVE_CREDENTIALS_URI'], SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(current_app.config['GDRIVE_TOKEN_URI'], 'wb') as token:
pickle.dump(creds, token)
return creds
class GDriveStatic(GDriveMain):
"""
GDrive Static is Content handler
It will fetch all the dynamic content from GDrive
"""
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'gdrive_service'):
ctx.gdrive_service = None
def init_app(self, app, *args):
remote_folder = args[0]
app.config.setdefault('GDRIVE_STATIC_FOLDER', remote_folder)
app.teardown_appcontext(self.teardown)
@property
def gdrive_service(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'gdrive_service'):
creds = self.connect()
service = build('drive', 'v3', credentials=creds)
result = service.files().list(
pageSize=1, fields='nextPageToken, files(id, mimeType)', q="name='{}'".format(current_app.config['GDRIVE_STATIC_FOLDER'])).execute()
items = result.get('files', [])
if not items:
raise IOError("Folder not found in Google Drive")
else:
self.folder_id = items[0]['id']
ctx.gdrive_service = service
print(self.folder_id)
return ctx.gdrive_service
def fileHandler(self, fpath):
name, ext = os.path.splitext(fpath)
doc_mimetypes = {
'.html': 'text/html',
'.txt': 'text/plain',
}
other_mimetypes = {
'.pdf': 'application/pdf',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.png': 'image/png',
'.svg': 'image/svg'
}
results = self.gdrive_service.files().list(
pageSize=1, fields="nextPageToken, files(id, mimeType)", q=f"name='{fpath}' and '{self.folder_id}' in parents").execute()
items = results.get('files', [])
if not items:
raise IOError('File Not Found')
else:
file_id = items[0]['id']
if ext in doc_mimetypes:
res = self.gdrive_service.files().export_media(fileId=file_id,
mimeType=doc_mimetypes[ext]).execute()
return res, 200
else:
res = self.gdrive_service.files().get_media(fileId=file_id).execute()
try:
return Response(res, mimetype=other_mimetypes[ext])
except:
return res, 200
def g_url_for(self, fpath):
return url_for('fileHandler', fpath=fpath)
class GDriveDB(GDriveMain):
"""
Database handler class
send and fetch data from Google docs store in our file
"""
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'gdrive_db'):
ctx.gdrive_service = None
def init_app(self, app, *args):
self.remote_sheets = args[0]
self.RANGE = 'A1:Z'
if len(args) > 1:
cache_update = args[1]
else:
cache_update = 0
# app.config.setdefault('GDRIVE_DB_ID', remote_sheet)
self.cache_update_time = cache_update
app.teardown_appcontext(self.teardown)
@property
def gdrive_db(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'gdrive_db'):
creds = self.connect()
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
self.global_values = dict()
for s in self.remote_sheets:
result = sheet.values().get(spreadsheetId=self.remote_sheets[s], range=self.RANGE).execute()
values = result.get('values', [])
if not values:
raise IOError("Sheet not found")
else:
self.global_values[s] = values
ctx.gdrive_db = self.global_values
self.sheet = sheet
# self.update_thread = threading.Thread(target=lambda: self.update_cache())
# self.update_thread.start()
return ctx.gdrive_db
def update(self, sheet_name):
result = self.sheet.values().update(
spreadsheetId=self.remote_sheets[sheet_name], range=self.RANGE,
body={'values': self.global_values[sheet_name]}, valueInputOption="RAW").execute()
# def update_cache(self):
# time.sleep(self.cache_update_time)
# print("Updating cache")
# #TODO
| 35.437186 | 153 | 0.549348 |
cf3438619bf06112eb658238054ba49f0278fc22 | 5,122 | py | Python | akshare/stock_fundamental/stock_finance_hk.py | J-Z-Z/akshare | 0a9ca71b381a272e2f56211e455ff2493dfed17a | [
"MIT"
] | 1 | 2022-01-23T13:55:24.000Z | 2022-01-23T13:55:24.000Z | akshare/stock_fundamental/stock_finance_hk.py | J-Z-Z/akshare | 0a9ca71b381a272e2f56211e455ff2493dfed17a | [
"MIT"
] | 102 | 2021-09-30T20:54:38.000Z | 2021-12-28T13:24:28.000Z | akshare/stock_fundamental/stock_finance_hk.py | J-Z-Z/akshare | 0a9ca71b381a272e2f56211e455ff2493dfed17a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/6 15:15
Desc: 港股-基本面数据
新浪财经-财务分析-财务指标
http://stock.finance.sina.com.cn/hkstock/finance/00700.html#a1
"""
import pandas as pd
import requests
def stock_financial_hk_report_em(
stock: str = "00700", symbol: str = "现金流量表", indicator: str = "年度"
) -> pd.DataFrame:
"""
东方财富-港股-财务报表-三大报表
https://emweb.securities.eastmoney.com/PC_HKF10/FinancialAnalysis/index?type=web&code=00700
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:param indicator: choice of {"年度", "报告期"}
:type indicator:
:return: 东方财富-港股-财务报表-三大报表
:rtype: pandas.DataFrame
"""
if indicator == "年度":
rtype = 6
elif indicator == "报告期":
rtype = 0
else:
raise Exception("请输入正确的 indicator !", indicator)
if symbol == "资产负债表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZCFZB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 资产负债表
elif symbol == "利润表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetLRB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 利润表
elif symbol == "现金流量表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetXJLLB?code={stock}&startdate=&rtype={rtype}" # 现金流量表
r = requests.get(url)
temp_df = pd.DataFrame(eval(r.text)["data"])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df['截止日期'] = pd.to_datetime(temp_df["截止日期"], format="%y-%m-%d").dt.date
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
return temp_df
def stock_financial_hk_analysis_indicator_em(
symbol: str = "00700", indicator: str = "年度"
) -> pd.DataFrame:
"""
东方财富-港股-财务分析-主要指标
https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/index?type=web&code=00700
:param symbol: 股票代码
:type symbol: str
:param indicator: choice of {"年度", "报告期"}
:type indicator: str
:return: 新浪财经-港股-财务分析-主要指标
:rtype: pandas.DataFrame
"""
if indicator == "年度":
key = "zyzb_an"
elif indicator == "报告期":
key = "zyzb_abgq"
else:
raise Exception("非法的关键字!", indicator)
url = f"http://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZYZB?code={symbol}"
r = requests.get(url)
temp_df = pd.DataFrame.from_records(eval(r.text)["data"][key])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df["周期"] = pd.to_datetime(temp_df["每股指标"], format="%y-%m-%d").dt.date
temp_df = temp_df.drop("每股指标", axis=1)
temp_df = temp_df[
[
"周期",
"基本每股收益(元)",
"稀释每股收益(元)",
"TTM每股收益(元)",
"每股净资产(元)",
"每股经营现金流(元)",
"每股营业收入(元)",
"成长能力指标",
"营业总收入(元)",
"毛利润",
"归母净利润",
"营业总收入同比增长(%)",
"毛利润同比增长(%)",
"归母净利润同比增长(%)",
"营业总收入滚动环比增长(%)",
"毛利润滚动环比增长(%)",
"归母净利润滚动环比增长(%)",
"盈利能力指标",
"平均净资产收益率(%)",
"年化净资产收益率(%)",
"总资产净利率(%)",
"毛利率(%)",
"净利率(%)",
"年化投资回报率(%)",
"盈利质量指标",
"所得税/利润总额(%)",
"经营现金流/营业收入(%)",
"财务风险指标",
"资产负债率(%)",
"流动负债/总负债(%)",
"流动比率",
]
]
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
temp_df['周期'] = pd.to_datetime(temp_df['周期']).dt.date
return temp_df
if __name__ == "__main__":
stock_financial_hk_analysis_indicator_em_df = (
stock_financial_hk_analysis_indicator_em(symbol="00700", indicator="年度")
)
print(stock_financial_hk_analysis_indicator_em_df)
stock_financial_hk_analysis_indicator_em_df = (
stock_financial_hk_analysis_indicator_em(symbol="00700", indicator="报告期")
)
print(stock_financial_hk_analysis_indicator_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="资产负债表", indicator="年度"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="资产负债表", indicator="报告期"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="利润表", indicator="年度"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="利润表", indicator="报告期"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="现金流量表", indicator="年度"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="现金流量表", indicator="报告期"
)
print(stock_financial_hk_report_em_df)
| 32.624204 | 149 | 0.621046 |
55cc144a33f9c56d3320b3f888cd361d9b0e6cc7 | 1,287 | py | Python | ews/urls.py | mrustl/plattform | 7c9fce2a697b7c9d3de0bd08382571ed89469281 | [
"MIT"
] | null | null | null | ews/urls.py | mrustl/plattform | 7c9fce2a697b7c9d3de0bd08382571ed89469281 | [
"MIT"
] | 3 | 2021-06-07T10:30:55.000Z | 2021-06-07T14:00:32.000Z | ews/urls.py | mrustl/plattform | 7c9fce2a697b7c9d3de0bd08382571ed89469281 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "ews"
urlpatterns=[
# List Views
path("bathingspots", views.bathingspots, name="bathing_spots"),
path("sites", views.sites, name="sites"),
path("", views.mlmodels, name="mlmodels"),
#authorization
#path("login", views.login_view, name="login"),
#path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
# create views
path("spot_create", views.spot_create, name="spot_create"),
path("model_config", views.model_config, name="model_config"),
path("model_delete/<int:model_id>", views.delete_model, name="delete_model"),
path("model_edit/<int:model_id>", views.model_edit, name="model_edit"),
path("add_site", views.add_site, name="add_site"),
path("delete_site/<int:site_id>", views.delete_site, name="delete_site"),
path("site_detail/<int:site_id>", views.site_detail, name="site_detail"),
path("selectarea_create", views.selectarea_create, name="selectarea_create"),
# ??
path("detail/<int:spot_id>", views.detail_view, name = "detail"),
path("file_upload/<int:site_id>", views.file_upload, name="file_upload"),
path('model_fit/<int:model_id>', views.model_fit, name ='model_fit'),
path('prediction_switch/<int:model_id>', views.prediction_switch, name ='prediction_switch')
] | 39 | 92 | 0.74359 |
6c3ed6418aeccd226e1d9b82b4f3d7cbef8f4498 | 4,889 | py | Python | utility/feature_extractor/feature_extractor_mother.py | BA-HanseML/NF_Prj_MIMII_Dataset | c9dd130a48c5ee28491a3f9369ace8f7217753d6 | [
"MIT"
] | 10 | 2020-08-25T21:12:32.000Z | 2021-11-04T22:14:37.000Z | utility/feature_extractor/feature_extractor_mother.py | BA-HanseML/NF_Prj_MIMII_Dataset | c9dd130a48c5ee28491a3f9369ace8f7217753d6 | [
"MIT"
] | 44 | 2020-05-04T11:37:55.000Z | 2021-09-26T04:12:23.000Z | utility/feature_extractor/feature_extractor_mother.py | ArneSch/NF_Prj_MIMII_Dataset | c9dd130a48c5ee28491a3f9369ace8f7217753d6 | [
"MIT"
] | 4 | 2020-11-24T02:14:13.000Z | 2021-07-01T08:52:59.000Z | print('load feature_extractor_mother')
# Enumeration off the IDs of feature extraction implemented as feature extractor classes
from enum import Enum
class feature_extractor_type(Enum):
BASE = 0
PSD = 1
MEL_SPECTRUM = 2
WELECHPSD = 3
ICA2 = 11
preNNFILTER = 101
# Autoloader function based on a dictionary that would be stored in a feature extractor pickle file
def feature_extractor_from_dict(d, base_folder):
#print(d['para_dict']['type'] )
if d['para_dict']['type'] == feature_extractor_type.MEL_SPECTRUM:
fe = feature_extractor_mel(base_folder)
fe.read_from_dict(d)
if d['para_dict']['type'] == feature_extractor_type.WELECHPSD:
fe = feature_extractor_welchPSD(base_folder)
fe.read_from_dict(d)
if d['para_dict']['type'] == feature_extractor_type.preNNFILTER:
fe = feature_extractor_pre_nnFilterDenoise(base_folder)
fe.read_from_dict(d)
if d['para_dict']['type'] == feature_extractor_type.ICA2:
fe = feature_extractor_ICA2(base_folder)
fe.read_from_dict(d)
return fe
# Autoloader function that can read from a pickle fire
def feature_extractor_from_file(filepath, base_folder):
d = pickle.load( open( filepath, "rb" ))
return feature_extractor_from_dict(d,base_folder)
# Main class definition
# This mother class is a API definition
class feature_extractor():
def __init__(self,base_folder, name='base_feature', xlabel='x', ylabel='y',zlabel='z'):
# Three main memory components off a feature extractor
self.para_dict = \
{'name': name,
'xlabel': xlabel,
'ylabel': ylabel,
'zlabel': zlabel,
'type_name': 'BASE',
'data_channel_use_str': '',
'type': feature_extractor_type.BASE,
'file_name_mainhyperparastr': '',
'wave_filepath': '',
'wave_srate': 0,
'wave_length': 0,
'wave_channel': [0],
'hyperpara':{}}
self.base_folder= base_folder
self.feature_data = None
@property
def name(self):
return self.para_dict['name']
def __str__(self):
return '<'+str(self.para_dict['type']) + '>[' + \
str(self.para_dict['hyperpara']) + ']' + \
'wav=' +str(self._full_wave_path())
def set_hyperparamter(self):
pass
def set_hyperparamter_from_fe(self,fe):
self.para_dict['hyperpara'] = fe.para_dict['hyperpara']
pass
def _full_wave_path(self,filepath=None):
if filepath:
return os.path.abspath(self.base_folder+filepath)
else:
return os.path.abspath(self.base_folder+self.para_dict['wave_filepath'])
def _read_wav(self, filepath):
if type(filepath) is str:
filepath = filepath.replace(os.path.abspath(self.base_folder),'')
self.para_dict['wave_filepath'] = filepath
af, sr = librosa.load(self._full_wave_path(filepath), sr=None, mono=False)
self.para_dict['wave_srate'] = sr
self.para_dict['wave_length'] = len(af[0])
else: # TODO make this more robust
self.para_dict['wave_filepath'] = filepath.filepath.replace(os.path.abspath(self.base_folder),'')
af = filepath.channel
self.para_dict['wave_srate'] = filepath.srate
self.para_dict['wave_length'] = len(af)
return af
def create_from_wav(self, filepath, channel):
pass
def read_from_dict(self, d):
self.para_dict = d['para_dict']
self.feature_data = d['feature_data']
pass
def get_dict(self):
return {'para_dict': self.para_dict,
'feature_data': self.feature_data}
def save_to_file(self, filepath):
pickle.dump(self.get_dict(),
open( filepath, "wb" ) )
# TODO catch errors and ahndling
pass
def plot(self):
print('nothing to plot')
pass
def plot_data(self):
pass
def flat_feature(self):
pass
@property
def file_name_mainhyperparastr(self): # returns a string for filename that descripes the most importand hyper parmater like window length etc.
return self.para_dict['file_name_mainhyperparastr']
@property
def type_str(self): # returns a str for filename type of extractor MEL PSD etc.
return self.para_dict['type_name']
@property
def data_channel_use_str(self): # returns a sting for filename use of channels like how many and concat or stacked
return self.para_dict['data_channel_use_str'] | 35.172662 | 147 | 0.605032 |
9fb6bb2c27f0ded7bcc75df1d6f5fe2d3e86451c | 4,590 | py | Python | test_miplib2017.py | pandat8/ML4LocalBranch_extend | 001839ace3506c8410a30d1f4d3188a3cd95e2dd | [
"MIT"
] | 4 | 2021-10-17T00:26:12.000Z | 2021-12-06T08:41:02.000Z | test_miplib2017.py | pandat8/ML4LocalBranch | 2fb38b12556ea5e62a0313f617e98cd163eaaf7f | [
"MIT"
] | null | null | null | test_miplib2017.py | pandat8/ML4LocalBranch | 2fb38b12556ea5e62a0313f617e98cd163eaaf7f | [
"MIT"
] | null | null | null | from pyscipopt import Model
import pyscipopt
import pathlib
from geco.mips.loading.miplib import Loader
file_directory = './result/miplib2017/miplib2017_purebinary_solved.txt'
with open(file_directory) as fp:
Lines = fp.readlines()
i = 0
for line in Lines:
i += 1
instance_str = line.strip()
# MIP_model = Loader().load_instance(instance_str)
# print(MIP_model.getProbName())
print(i)
# sample_files = [str(path) for path in pathlib.Path(instance_directory).glob(filename)]
# print(sample_files)
# i = 0
# for instance in sample_files:
# print(instance)
# MIP_model = Model()
# MIP_model.readProblem(instance)
# print(MIP_model.getProbName())
# print('Number of variables', MIP_model.getNVars())
# print('Number of binary variables', MIP_model.getNBinVars())
#
# print("Solving first solution ...")
# MIP_model.setParam('presolving/maxrounds', 0)
# MIP_model.setParam('presolving/maxrestarts', 0)
# MIP_model.setParam("display/verblevel", 0)
# MIP_model.setParam("limits/solutions", 1)
# MIP_model.optimize()
#
# status = MIP_model.getStatus()
# stage = MIP_model.getStage()
# print("* Solve status: %s" % status)
# print("* Solve stage: %s" % stage)
# n_sols = MIP_model.getNSols()
# print('* number of solutions : ', n_sols)
# obj = MIP_model.getObjVal()
# print('* first sol obj : ', obj)
# print("first solution solving time: ", MIP_model.getSolvingTime())
#
# MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(problemName='Copy',
# origcopy=True)
# print("Solving root node ...")
# MIP_model_copy.resetParams()
# MIP_model_copy.setParam('presolving/maxrounds', 0)
# MIP_model_copy.setParam('presolving/maxrestarts', 0)
# MIP_model_copy.setParam("display/verblevel", 0)
# MIP_model_copy.setParam("limits/nodes", 1)
# MIP_model_copy.optimize()
#
# status = MIP_model_copy.getStatus()
# stage = MIP_model_copy.getStage()
# print("* Solve status: %s" % status)
# print("* Solve stage: %s" % stage)
# n_sols = MIP_model_copy.getNSols()
# print('* number of solutions : ', n_sols)
# obj_root = MIP_model_copy.getObjVal()
# print('* root node obj : ', obj_root)
# print("root node solving time: ", MIP_model_copy.getSolvingTime())
# t_firstlp = MIP_model_copy.getFirstLpTime()
# print("first LP time : ", t_firstlp)
#
# lp_status = MIP_model_copy.getLPSolstat()
# print("* LP status: %s" % lp_status) # 1:optimal
# if lp_status:
# print('LP of root node is solved!')
# lp_obj = MIP_model_copy.getLPObjVal()
# print("LP objective: ", lp_obj)
#
# incumbent_solution_first = MIP_model.getBestSol()
# incumbent_solution_root = MIP_model_copy.getBestSol()
# first_sol_check = MIP_model.checkSol(solution=incumbent_solution_first)
#
# if first_sol_check:
# print('first solution is valid')
# else:
# print('Warning: first solution is not valid!')
# root_sol_check = MIP_model.checkSol(solution=incumbent_solution_root)
# if root_sol_check:
# print('root node solution is valid')
# else:
# print('Warning: root node solution is not valid!')
#
# if (not status == 'optimal') and first_sol_check and root_sol_check:
#
# if i > -1:
#
# MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(problemName='Copy2',
# origcopy=True)
# print("Solving to optimal ...")
# MIP_model_copy.resetParams()
# MIP_model_copy.setParam('presolving/maxrounds', 0)
# MIP_model_copy.setParam('presolving/maxrestarts', 0)
# MIP_model_copy.setParam("display/verblevel", 0)
# MIP_model_copy.setParam('limits/time', 600)
# MIP_model_copy.optimize()
# status = MIP_model_copy.getStatus()
# if status == 'optimal':
# print('instance is solved to optimal!')
# # objs.append(MIP_model_copy.getObjVal())
# # times.append(MIP_model_copy.getSolvingTime())
# print("instance:", MIP_model_copy.getProbName(),
# "status:", MIP_model_copy.getStatus(),
# "best obj: ", MIP_model_copy.getObjVal(),
# "solving time: ", MIP_model_copy.getSolvingTime())
# i += 1
# else:
# "no solution"
#
# print("\n")
| 40.619469 | 96 | 0.618519 |
3bd5f275f7e00cdcd45cbcc5fbac917ef8b46fef | 2,408 | py | Python | naslib/predictors/lce_m/lce_m.py | NUDTNASLab/NASLib | 451cdb4738a7c1501ac62f78727c6244039dc657 | [
"Apache-2.0"
] | 1 | 2022-03-28T09:35:33.000Z | 2022-03-28T09:35:33.000Z | naslib/predictors/lce_m/lce_m.py | NUDTNASLab/NASLib | 451cdb4738a7c1501ac62f78727c6244039dc657 | [
"Apache-2.0"
] | null | null | null | naslib/predictors/lce_m/lce_m.py | NUDTNASLab/NASLib | 451cdb4738a7c1501ac62f78727c6244039dc657 | [
"Apache-2.0"
] | null | null | null | # This code is mostly from https://github.com/automl/pybnn
# pybnn authors: Aaron Klein, Moritz Freidank
import numpy as np
from naslib.predictors.predictor import Predictor
from naslib.predictors.lce_m.learning_curves import MCMCCurveModelCombination
class LCEMPredictor(Predictor):
def __init__(self, metric=None):
self.metric = metric
def query(self, xtest, info):
learning_curves = np.array([np.array(inf["lc"]) / 100 for inf in info])
trained_epochs = len(info[0]["lc"])
t_idx = np.arange(1, trained_epochs + 1)
if self.ss_type == "nasbench201":
final_epoch = 200
default_guess = 85.0
elif self.ss_type == "darts":
final_epoch = 98
default_guess = 93.0
elif self.ss_type == "nlp":
final_epoch = 50
default_guess = 94.83
else:
raise NotImplementedError()
model = MCMCCurveModelCombination(
final_epoch + 1,
nwalkers=50,
nsamples=800,
burn_in=500,
recency_weighting=False,
soft_monotonicity_constraint=False,
monotonicity_constraint=True,
initial_model_weight_ml_estimate=True,
)
predictions = []
for i in range(len(xtest)):
model.fit(t_idx, learning_curves[i])
try:
p = model.predictive_distribution(final_epoch)
prediction = np.mean(p) * 100
except AssertionError:
# catch AssertionError in _split_theta method
print("caught AssertionError running model")
prediction = np.nan
if np.isnan(prediction) or not np.isfinite(prediction):
print("nan or finite")
prediction = default_guess + np.random.rand()
predictions.append(prediction)
predictions = np.array(predictions)
return predictions
def get_data_reqs(self):
"""
Returns a dictionary with info about whether the predictor needs
extra info to train/query.
"""
reqs = {
"requires_partial_lc": True,
"metric": self.metric,
"requires_hyperparameters": False,
"hyperparams": None,
"unlabeled": False,
"unlabeled_factor": 0,
}
return reqs
| 31.272727 | 79 | 0.58015 |
1c6268a2a82f94fff035040f6d9a2e25dc497c16 | 422 | py | Python | background_job/migrations/0002_auto_20210224_2353.py | drunkpig/django-background-job | d143db1199fbb453814b254e5f1ec890e43a9709 | [
"MIT"
] | 1 | 2021-07-02T06:22:43.000Z | 2021-07-02T06:22:43.000Z | background_job/migrations/0002_auto_20210224_2353.py | drunkpig/django-background-job | d143db1199fbb453814b254e5f1ec890e43a9709 | [
"MIT"
] | null | null | null | background_job/migrations/0002_auto_20210224_2353.py | drunkpig/django-background-job | d143db1199fbb453814b254e5f1ec890e43a9709 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.13 on 2021-02-24 23:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('background_job', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='jobexechistory',
name='result',
field=models.TextField(blank=True, null=True, verbose_name='执行返回结果'),
),
]
| 22.210526 | 81 | 0.613744 |
54a30c4e60a35ae3cdbd01839e20a9b927ac669d | 3,567 | py | Python | bbb-mon/views.py | BrutalBirdie/bigbluebutton-monitoring | dd979741b15e3356ad888f3a259ad8f9a929b37a | [
"MIT"
] | 29 | 2020-03-24T12:34:15.000Z | 2022-03-04T19:29:52.000Z | bbb-mon/views.py | BrutalBirdie/bigbluebutton-monitoring | dd979741b15e3356ad888f3a259ad8f9a929b37a | [
"MIT"
] | 11 | 2020-03-25T20:42:22.000Z | 2021-12-16T08:55:01.000Z | bbb-mon/views.py | BrutalBirdie/bigbluebutton-monitoring | dd979741b15e3356ad888f3a259ad8f9a929b37a | [
"MIT"
] | 17 | 2020-03-15T00:47:21.000Z | 2021-10-31T11:35:44.000Z | import logging
from collections import OrderedDict
from datetime import datetime
from urllib.parse import urlparse
import xmltodict
import api_lib
import settings
def get_meetings():
data = api_lib.getMeetings(settings.API_CLIENT)
if data is None:
return []
if data['response']['meetings'] is None:
return []
meetings = []
try:
if type(data['response']['meetings']['meeting']) == list:
meetings = data['response']['meetings']['meeting']
else:
meetings.append(data['response']['meetings']['meeting'])
except KeyError:
logging.warning("Failed to parse meetings")
except TypeError:
return []
response = []
for meeting in meetings:
if type(meeting) != OrderedDict:
continue
moderators = []
if type(meeting['attendees']) == OrderedDict:
if type(meeting['attendees']['attendee']) == list:
for attendee in meeting['attendees']['attendee']:
if attendee['role'].lower() == "moderator":
moderators.append(attendee['fullName'])
else:
attendee = meeting['attendees']['attendee']
if attendee['role'].lower() == 'moderator':
moderators.append(attendee['fullName'])
origin_server = None
try:
origin_server = meeting['metadata']['bbb-origin-server-name']
except (KeyError, TypeError):
logging.debug("BBB origin server name does not exist for meeting")
logging.debug("Setting BBB origin as the server itself")
origin_server = urlparse(settings.API_BASE_URL).netloc
m = {
"name": meeting['meetingName'],
"id": meeting['meetingID'],
"creation": meeting['createTime'],
"noUsers": meeting['participantCount'],
"moderators": moderators,
"metadata": {
"origin-server": origin_server,
}
}
# bbb-context is optional in bbb response
try:
m['metadata']['origin-context'] = _bbb_context_convert_moodle(meeting['metadata']['bbb-context'])
except (KeyError, TypeError):
pass
response.append(m)
return response
def _bbb_context_convert_moodle(context_html):
"""
Returns the first inner node string from the context html string (useful for the context string returned
by the BigBlueButton Moodle plugin.
"""
context_html = "<root>{}</root>".format(context_html) # removes the bug where there is no root node in context_html
return_str = ""
try:
root = xmltodict.parse(context_html)
if type(root['root']) == str:
# No XML contents, just plain old string
return root['root']
for element in root['root']:
el = root['root'][element]
if type(el) == list and len(el) > 0:
return_str = el[0]['#text']
break
except Exception as e:
logging.error("Failed to parse BBB context string from Moodle, error: " + str(e))
return return_str
def get_server():
url_parsed = urlparse(settings.API_BASE_URL)
return {
"service": "bigbluebutton-monitoring",
"server": url_parsed.netloc,
"api": settings.API_BASE_URL,
"version": settings.VERSION,
"datetime": datetime.now().isoformat(),
"source": "https://github.com/greenstatic/bigbluebutton-monitoring"}
| 30.228814 | 120 | 0.585366 |
edef01f207a5115b9fd404e2cac6be567774b753 | 42,044 | py | Python | tests/test_modeling_big_bird.py | reichang182/Transformer | 301536b15f1e757c51411800c25876617e9f1191 | [
"Apache-2.0"
] | 2 | 2021-04-22T21:47:19.000Z | 2021-04-30T22:22:16.000Z | tests/test_modeling_big_bird.py | slavetothebiologicalforce/transformers | 6f90c29eaaba898919b7689ab7e2cfce1604cdb8 | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_big_bird.py | slavetothebiologicalforce/transformers | 6f90c29eaaba898919b7689ab7e2cfce1604cdb8 | [
"Apache-2.0"
] | 1 | 2021-04-19T20:49:55.000Z | 2021-04-19T20:49:55.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BigBird model. """
import unittest
from tests.test_modeling_common import floats_tensor
from transformers import is_torch_available
from transformers.models.auto import get_values
from transformers.models.big_bird.tokenization_big_bird import BigBirdTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
BigBirdConfig,
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdModel,
)
from transformers.models.big_bird.modeling_big_bird import BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST
class BigBirdModelTester:
def __init__(
self,
parent,
batch_size=7,
seq_length=128,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu_fast",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=256,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
attention_type="block_sparse",
use_bias=True,
rescale_embeddings=False,
block_size=16,
num_rand_blocks=3,
position_embedding_type="absolute",
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.attention_type = attention_type
self.use_bias = use_bias
self.rescale_embeddings = rescale_embeddings
self.block_size = block_size
self.num_rand_blocks = num_rand_blocks
self.position_embedding_type = position_embedding_type
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = BigBirdConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_encoder_decoder=False,
initializer_range=self.initializer_range,
attention_type=self.attention_type,
use_bias=self.use_bias,
rescale_embeddings=self.rescale_embeddings,
block_size=self.block_size,
num_random_blocks=self.num_rand_blocks,
position_embedding_type=self.position_embedding_type,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, config.num_labels))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BigBirdForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = BigBirdForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BigBirdForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BigBirdForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = BigBirdForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def create_and_check_for_auto_padding(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_change_to_full_attn(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# the config should not be changed
self.parent.assertTrue(model.config.attention_type == "block_sparse")
@require_torch
class BigBirdModelTest(ModelTesterMixin, unittest.TestCase):
# head masking & pruning is currently not supported for big bird
test_head_masking = False
test_pruning = False
# torchscript should be possible, but takes prohibitively long to test.
# Also torchscript is not an important feature to have in the beginning.
test_torchscript = False
all_model_classes = (
(
BigBirdModel,
BigBirdForPreTraining,
BigBirdForMaskedLM,
BigBirdForCausalLM,
BigBirdForMultipleChoice,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BigBirdForCausalLM,) if is_torch_available() else ()
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = BigBirdModelTester(self)
self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_retain_grad_hidden_states_attentions(self):
# bigbird cannot keep gradients in attentions when `attention_type=block_sparse`
if self.model_tester.attention_type == "original_full":
super().test_retain_grad_hidden_states_attentions()
@slow
def test_model_from_pretrained(self):
for model_name in BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BigBirdForPreTraining.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_model_various_attn_type(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["original_full", "block_sparse"]:
config_and_inputs[0].attention_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_fast_integration(self):
# fmt: off
input_ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73],[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 12, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 28, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 18, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
input_ids = input_ids % self.model_tester.vocab_size
input_ids[1] = input_ids[1] - 1
attention_mask = torch.ones((input_ids.shape), device=torch_device)
attention_mask[:, :-10] = 0
config, _, _, _, _, _, _ = self.model_tester.prepare_config_and_inputs()
torch.manual_seed(0)
model = BigBirdModel(config).eval().to(torch_device)
with torch.no_grad():
hidden_states = model(input_ids, attention_mask=attention_mask).last_hidden_state
self.assertTrue(
torch.allclose(
hidden_states[0, 0, :5],
torch.tensor([1.4943, 0.0928, 0.8254, -0.2816, -0.9788], device=torch_device),
atol=1e-3,
)
)
def test_auto_padding(self):
self.model_tester.seq_length = 241
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_auto_padding(*config_and_inputs)
def test_for_change_to_full_attn(self):
self.model_tester.seq_length = 9
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs)
@require_torch
@slow
class BigBirdModelIntegrationTest(unittest.TestCase):
# we can have this true once block_sparse attn_probs works accurately
test_attention_probs = False
def _get_dummy_input_ids(self):
# fmt: off
ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def test_inference_block_sparse_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="block_sparse")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device)
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 4096, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[-0.2420, -0.6048, -0.0614, 7.8422],
[-0.0596, -0.0104, -1.8408, 9.3352],
[1.0588, 0.7999, 5.0770, 8.7555],
[-0.1385, -1.7199, -1.7613, 6.1094],
],
device=torch_device,
)
self.assertTrue(
torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
)
expected_seq_relationship_logits = torch.tensor([[58.8196, 56.3629]], device=torch_device)
self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
def test_inference_full_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="original_full")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device)
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 512 * 4, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[0.1499, -1.1217, 0.1990, 8.4499],
[-2.7757, -3.0687, -4.8577, 7.5156],
[1.5446, 0.1982, 4.3016, 10.4281],
[-1.3705, -4.0130, -3.9629, 5.1526],
],
device=torch_device,
)
self.assertTrue(
torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
)
expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device)
self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
def test_block_sparse_attention_probs(self):
"""
Asserting if outputted attention matrix is similar to hard coded attention matrix
"""
if not self.test_attention_probs:
return
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
hidden_states = model.embeddings(input_ids)
batch_size, seqlen, _ = hidden_states.size()
attn_mask = torch.ones(batch_size, seqlen, device=torch_device, dtype=torch.float)
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = config.block_size
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
from_blocked_mask = to_blocked_mask = blocked_mask
for i in range(config.num_hidden_layers):
pointer = model.encoder.layer[i].attention.self
query_layer = pointer.transpose_for_scores(pointer.query(hidden_states))
key_layer = pointer.transpose_for_scores(pointer.key(hidden_states))
value_layer = pointer.transpose_for_scores(pointer.value(hidden_states))
context_layer, attention_probs = pointer.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
pointer.num_attention_heads,
pointer.num_random_blocks,
pointer.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=pointer.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=True,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
cl = torch.einsum("bhqk,bhkd->bhqd", attention_probs, value_layer)
cl = cl.view(context_layer.size())
self.assertTrue(torch.allclose(context_layer, cl, atol=0.001))
def test_block_sparse_context_layer(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
dummy_hidden_states = model.embeddings(input_ids)
attn_mask = torch.ones_like(input_ids, device=torch_device)
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
targeted_cl = torch.tensor(
[
[0.1874, 1.5260, 0.2335, -0.0473, -0.0961, 1.8384, -0.0141, 0.1250, 0.0085, -0.0048],
[-0.0554, 0.0728, 0.1683, -0.1332, 0.1741, 0.1337, -0.2380, -0.1849, -0.0390, -0.0259],
[-0.0419, 0.0767, 0.1591, -0.1399, 0.1789, 0.1257, -0.2406, -0.1772, -0.0261, -0.0079],
[0.1860, 1.5172, 0.2326, -0.0473, -0.0953, 1.8291, -0.0147, 0.1245, 0.0082, -0.0046],
[0.1879, 1.5296, 0.2335, -0.0471, -0.0975, 1.8433, -0.0136, 0.1260, 0.0086, -0.0054],
[0.1854, 1.5147, 0.2334, -0.0480, -0.0956, 1.8250, -0.0149, 0.1222, 0.0082, -0.0060],
[0.1859, 1.5184, 0.2334, -0.0474, -0.0955, 1.8297, -0.0143, 0.1234, 0.0079, -0.0054],
[0.1885, 1.5336, 0.2335, -0.0467, -0.0979, 1.8481, -0.0130, 0.1269, 0.0085, -0.0049],
[0.1881, 1.5305, 0.2335, -0.0471, -0.0976, 1.8445, -0.0135, 0.1262, 0.0086, -0.0053],
[0.1852, 1.5148, 0.2333, -0.0480, -0.0949, 1.8254, -0.0151, 0.1225, 0.0079, -0.0055],
[0.1877, 1.5292, 0.2335, -0.0470, -0.0972, 1.8431, -0.0135, 0.1259, 0.0084, -0.0052],
[0.1874, 1.5261, 0.2334, -0.0472, -0.0968, 1.8393, -0.0140, 0.1251, 0.0084, -0.0052],
[0.1853, 1.5151, 0.2331, -0.0478, -0.0948, 1.8256, -0.0154, 0.1228, 0.0086, -0.0052],
[0.1867, 1.5233, 0.2334, -0.0475, -0.0965, 1.8361, -0.0139, 0.1247, 0.0084, -0.0054],
],
device=torch_device,
)
context_layer = model.encoder.layer[0].attention.self(
dummy_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_mask,
to_blocked_mask=blocked_mask,
)
context_layer = context_layer[0]
self.assertEqual(context_layer.shape, torch.Size((1, 128, 768)))
self.assertTrue(torch.allclose(context_layer[0, 64:78, 300:310], targeted_cl, atol=0.0001))
def test_tokenizer_inference(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
text = [
"Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA."
]
inputs = tokenizer(text)
for k in inputs:
inputs[k] = torch.tensor(inputs[k], device=torch_device, dtype=torch.long)
prediction = model(**inputs)
prediction = prediction[0]
self.assertEqual(prediction.shape, torch.Size((1, 199, 768)))
expected_prediction = torch.tensor(
[
[-0.0213, -0.2213, -0.0061, 0.0687],
[0.0977, 0.1858, 0.2374, 0.0483],
[0.2112, -0.2524, 0.5793, 0.0967],
[0.2473, -0.5070, -0.0630, 0.2174],
[0.2885, 0.1139, 0.6071, 0.2991],
[0.2328, -0.2373, 0.3648, 0.1058],
[0.2517, -0.0689, 0.0555, 0.0880],
[0.1021, -0.1495, -0.0635, 0.1891],
[0.0591, -0.0722, 0.2243, 0.2432],
[-0.2059, -0.2679, 0.3225, 0.6183],
[0.2280, -0.2618, 0.1693, 0.0103],
[0.0183, -0.1375, 0.2284, -0.1707],
],
device=torch_device,
)
self.assertTrue(torch.allclose(prediction[0, 52:64, 320:324], expected_prediction, atol=1e-4))
def test_inference_question_answering(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-base-trivia-itc")
model = BigBirdForQuestionAnswering.from_pretrained(
"google/bigbird-base-trivia-itc", attention_type="block_sparse", block_size=16, num_random_blocks=3
)
model.to(torch_device)
context = "The BigBird model was proposed in Big Bird: Transformers for Longer Sequences by Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and random attention approximates full attention, while being computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context, BigBird has shown improved performance on various long document NLP tasks, such as question answering and summarization, compared to BERT or RoBERTa."
question = [
"Which is better for longer sequences- BigBird or BERT?",
"What is the benefit of using BigBird over BERT?",
]
inputs = tokenizer(
question,
[context, context],
padding=True,
return_tensors="pt",
add_special_tokens=True,
max_length=256,
truncation=True,
)
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
start_logits, end_logits = model(**inputs).to_tuple()
# fmt: off
target_start_logits = torch.tensor(
[[-8.9304, -10.3849, -14.4997, -9.6497, -13.9469, -7.8134, -8.9687, -13.3585, -9.7987, -13.8869, -9.2632, -8.9294, -13.6721, -7.3198, -9.5434, -11.2641, -14.3245, -9.5705, -12.7367, -8.6168, -11.083, -13.7573, -8.1151, -14.5329, -7.6876, -15.706, -12.8558, -9.1135, 8.0909, -3.1925, -11.5812, -9.4822], [-11.5595, -14.5591, -10.2978, -14.8445, -10.2092, -11.1899, -13.8356, -10.5644, -14.7706, -9.9841, -11.0052, -14.1862, -8.8173, -11.1098, -12.4686, -15.0531, -11.0196, -13.6614, -10.0236, -11.8151, -14.8744, -9.5123, -15.1605, -8.6472, -15.4184, -8.898, -9.6328, -7.0258, -11.3365, -14.4065, -10.2587, -8.9103]], # noqa: E231
device=torch_device,
)
target_end_logits = torch.tensor(
[[-12.4131, -8.5959, -15.7163, -11.1524, -15.9913, -12.2038, -7.8902, -16.0296, -12.164, -16.5017, -13.3332, -6.9488, -15.7756, -13.8506, -11.0779, -9.2893, -15.0426, -10.1963, -17.3292, -12.2945, -11.5337, -16.4514, -9.1564, -17.5001, -9.1562, -16.2971, -13.3199, -7.5724, -5.1175, 7.2168, -10.3804, -11.9873], [-10.8654, -14.9967, -11.4144, -16.9189, -14.2673, -9.7068, -15.0182, -12.8846, -16.8716, -13.665, -10.3113, -15.1436, -14.9069, -13.3364, -11.2339, -16.0118, -11.8331, -17.0613, -13.8852, -12.4163, -16.8978, -10.7772, -17.2324, -10.6979, -16.9811, -10.3427, -9.497, -13.7104, -11.1107, -13.2936, -13.855, -14.1264]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertTrue(torch.allclose(start_logits[:, 64:96], target_start_logits, atol=1e-4))
self.assertTrue(torch.allclose(end_logits[:, 64:96], target_end_logits, atol=1e-4))
input_ids = inputs["input_ids"].tolist()
answer = [
input_ids[i][torch.argmax(start_logits, dim=-1)[i] : torch.argmax(end_logits, dim=-1)[i] + 1]
for i in range(len(input_ids))
]
answer = tokenizer.batch_decode(answer)
self.assertTrue(answer == ["BigBird", "global attention"])
def test_fill_mask(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base")
model.to(torch_device)
input_ids = tokenizer("The goal of life is [MASK] .", return_tensors="pt").input_ids.to(torch_device)
logits = model(input_ids).logits
# [MASK] is token at 6th position
pred_token = tokenizer.decode(torch.argmax(logits[0, 6:7], axis=-1))
self.assertEqual(pred_token, "happiness")
def test_auto_padding(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long)
output = model(input_ids).to_tuple()[0]
# fmt: off
target = torch.tensor(
[[-0.045136, -0.068013, 0.12246, -0.01356, 0.018386, 0.025333, -0.0044439, -0.0030996, -0.064031, 0.0006439], [-0.045018, -0.067638, 0.12317, -0.013998, 0.019216, 0.025695, -0.0043705, -0.0031895, -0.063153, 0.00088899], [-0.045042, -0.067305, 0.1234, -0.014512, 0.020057, 0.026084, -0.004615, -0.0031728, -0.062442, 0.0010263], [-0.044589, -0.067655, 0.12416, -0.014287, 0.019416, 0.026065, -0.0050958, -0.002702, -0.063158, 0.0004827], [-0.044627, -0.067535, 0.1239, -0.014319, 0.019491, 0.026213, -0.0059482, -0.0025906, -0.063116, 0.00014669], [-0.044899, -0.067704, 0.12337, -0.014231, 0.019256, 0.026345, -0.0065565, -0.0022938, -0.063433, -0.00011409], [-0.045599, -0.067764, 0.12235, -0.014151, 0.019206, 0.026417, -0.0068965, -0.0024494, -0.063313, -4.4499e-06], [-0.045557, -0.068372, 0.12199, -0.013747, 0.017962, 0.026103, -0.0070607, -0.0023552, -0.06447, -0.00048756], [-0.045334, -0.068913, 0.1217, -0.013566, 0.01693, 0.025745, -0.006311, -0.0024903, -0.065575, -0.0006719], [-0.045171, -0.068726, 0.12164, -0.013688, 0.017139, 0.025629, -0.005213, -0.0029412, -0.065237, -0.00020669], [-0.044411, -0.069267, 0.12206, -0.013645, 0.016212, 0.025589, -0.0044121, -0.002972, -0.066277, -0.00067963], [-0.043487, -0.069792, 0.1232, -0.013663, 0.015303, 0.02613, -0.0036294, -0.0030616, -0.067483, -0.0012642], [-0.042622, -0.069287, 0.12469, -0.013936, 0.016204, 0.026474, -0.0040534, -0.0027365, -0.066994, -0.0014148], [-0.041879, -0.070031, 0.12593, -0.014047, 0.015082, 0.027751, -0.0040683, -0.0027189, -0.068985, -0.0027146]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertEqual(output.shape, torch.Size((1, 241, 768)))
self.assertTrue(torch.allclose(output[0, 64:78, 300:310], target, atol=0.0001))
| 46.50885 | 1,570 | 0.652507 |
bbeb887ae39571e84fece1f4e6e092acef8e4d0a | 2,524 | py | Python | api/src/rate_limit/rate_limit.py | ttbud/ttbud | 2162c4f3a49d706f38b0791d88db20f6b2098ed6 | [
"MIT"
] | 5 | 2020-05-21T17:44:08.000Z | 2021-09-27T03:57:45.000Z | api/src/rate_limit/rate_limit.py | ttbud/ttbud | 2162c4f3a49d706f38b0791d88db20f6b2098ed6 | [
"MIT"
] | 220 | 2020-05-05T01:11:22.000Z | 2022-02-26T20:14:26.000Z | api/src/rate_limit/rate_limit.py | ttbud/ttbud | 2162c4f3a49d706f38b0791d88db20f6b2098ed6 | [
"MIT"
] | 1 | 2020-05-21T17:45:21.000Z | 2020-05-21T17:45:21.000Z | from __future__ import annotations
from typing import Protocol, AsyncContextManager, Iterator
MAX_ROOMS_PER_TEN_MINUTES = 50
MAX_CONNECTIONS_PER_USER = 10
MAX_CONNECTIONS_PER_ROOM = 20
SERVER_LIVENESS_EXPIRATION_SECONDS = 60 * 10
class RoomFullException(Exception):
pass
class TooManyConnectionsException(Exception):
pass
class TooManyRoomsCreatedException(Exception):
pass
class RateLimiter(Protocol):
async def acquire_connection(self, user_id: str, room_id: str) -> None:
"""
Reserve a connection for user identified by user_id.
:param user_id: A string uniquely identifying the user, should be the
same across servers like the IP address of the user
:param room_id: The unique room id the user is connecting to
:raises TooManyConnectionsException if the user already has
MAX_CONNECTIONS_PER_USER active connections
"""
...
async def release_connection(self, user_id: str, room_id: str) -> None:
"""
Release a connection for the given user id
:param room_id: The unique room id the user is disconnecting from
:param user_id: A string uniquely identifying the user, should be the
same across servers like the IP address of the user
"""
...
def rate_limited_connection(
self, user_id: str, room_id: str
) -> AsyncContextManager:
"""
Reserve a connection for the given user_id for the duration of the
context
:param room_id: The unique room id the user is connecting to
:param user_id: A string uniquely identifying the user, should be the
same across servers like the IP address of the user
:raises TooManyConnectionsException if the user already has
MAX_CONNECTIONS_PER_USER active connections
"""
...
async def refresh_server_liveness(self, user_ids: Iterator[str]) -> None:
"""
This function should be called every
SERVER_LIVENESS_EXPIRATION_SECONDS/3 while the server is operating
"""
...
async def acquire_new_room(self, user_id: str) -> None:
"""
Increment the number of rooms this user has created in the last ten minutes
:param user_id: A string uniquely identifying the user, should be the
same across servers like the IP address of the user
:raises TooManyRoomsCreatedException if the user has already created too
many rooms recently
"""
...
| 33.653333 | 83 | 0.684231 |
e55a3d0e5b74aa1b02dad9163bf5c120675f5d51 | 687 | py | Python | scripts/check_doc_requirements.py | giswqs/jupyter-book | 5e12cb324afca1192f2354689d9d5755329e1c9d | [
"BSD-3-Clause"
] | 561 | 2019-01-11T09:45:29.000Z | 2020-04-23T16:37:37.000Z | scripts/check_doc_requirements.py | QDaria/jupyter-book | e372c718e4cf1515eda8157138857d5d885d0486 | [
"BSD-3-Clause"
] | 317 | 2019-01-14T17:15:42.000Z | 2020-04-23T16:43:27.000Z | scripts/check_doc_requirements.py | QDaria/jupyter-book | e372c718e4cf1515eda8157138857d5d885d0486 | [
"BSD-3-Clause"
] | 135 | 2019-01-11T01:37:05.000Z | 2020-04-22T11:01:59.000Z | #!/usr/bin/env python3
import sys
from pathlib import Path
import tomli
def check_reqs(pyproject_path="pyproject.toml", req_path=".binder/requirements.txt"):
with open(pyproject_path, "rb") as f:
toml_dict = tomli.load(f)
optional_deps = toml_dict["project"]["optional-dependencies"]
sphinx_content = "\n".join(optional_deps["sphinx"]).strip()
sphinx_content = "# Copied from 'sphinx' extra of pyproject.toml\n" + sphinx_content
req_content = Path(req_path).read_text()
if sphinx_content != req_content.strip():
Path(req_path).write_text(sphinx_content + "\n")
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
check_reqs()
| 28.625 | 88 | 0.689956 |
aaf5a46095d0c0b2b77fe7e2c2fa0e7de481f659 | 44,668 | py | Python | rangeSlider.py | halsafar/rangeslider | 3d134545aa3e8c6ac4c629fdf85caff7c62fd4d2 | [
"Beerware"
] | 2 | 2018-03-05T08:42:45.000Z | 2020-05-30T10:54:53.000Z | rangeSlider.py | halsafar/rangeslider | 3d134545aa3e8c6ac4c629fdf85caff7c62fd4d2 | [
"Beerware"
] | 1 | 2019-06-07T19:42:41.000Z | 2019-06-07T19:42:41.000Z | rangeSlider.py | halsafar/rangeslider | 3d134545aa3e8c6ac4c629fdf85caff7c62fd4d2 | [
"Beerware"
] | 4 | 2018-03-16T23:52:46.000Z | 2020-06-03T19:24:16.000Z | '''
cmpt481
assignment 1 - RangleSlider in Python/Tk
Stephen Damm
sad503
10251739
February 2010
'''
import logging
from Tkinter import *
from Tkinter import Canvas
'''
logging.INFO
logging.ERROR
logging.WARN
'''
LOGGING_LEVEL = logging.INFO
'''
logging global
'''
log = logging.getLogger("default")
'''
The view (V in MVC)
'''
class RangeSlider(Canvas):
'''
Fields
- meaningful names
'''
__canvasWidth = 0
__canvasHeight = 0
__canvasCenterX = 0
__canvasCenterY = 0
__majorTickSpacing = 10
__minorTickSpacing = 5
__paintTicks = False
__leftCaretId = 0
__rightCaretId = 0
__barId = 0
__sliderId = 0
__majorTicks = []
__minorTicks = []
__textText = []
# defaults for widget look`n`feel
__sliderColor = "gray60"
__sliderNoFocusColor = "gray80"
__sliderHighlightedColor = "gray40"
__sliderOutlineColor = "black"
__sliderNoFocusOutlineColor = "gray80"
__barColor = "gray85"
__barNoFocusColor = "gray95"
__barOutlineColor = "black"
__barNoFocusOutlineColor = "gray80"
__barWidthPercent = 0.90
__barHeightPercent = 0.05
__barBevelWidthPercent = 0.01
__caretColor = "gray70"
__caretNoFocusColor = "gray80"
__caretHighlightedColor = "gray40"
__caretOutlineColor = "black"
__caretNoFocusOutlineColor = "gray80"
__caretWidthPercent = 0.035
__caretHeightPercent = 1.50
__tickOutlineColor = "black"
__tickNoFocusOutlineColor = "gray80"
__tickWidthPercent = 0.001
__majorTickHeightPercent = 0.10
__minorTickHeightPercent = 0.05
# store values for easy look up of current dimensions
__barX = 0
__barY = 0
__barWidth = 0
__barHeight = 0
__caretWidth = 0
__caretHeight = 0
__inFocus = False
__highlightedId = 0
'''
Constructor
'''
def __init__(self, master, **cnf):
Canvas.__init__(self, master, highlightthickness=0)
self.__model = RangeSliderModel()
self.__controller = RangeSliderController(self.__model, self)
self.configure(**cnf)
self.__model.subscribe(self.__controller.update)
self.bind("<Configure>", self.__resize)
self.bind("<Key>", self.__controller.rangeSlider_onKeyPress)
# critical to the focus subsystem!
self.master.bind("<Button>", self.__focusCheck, add="+")
self.master.bind("<Key>", self.__focusCheck, add="+")
'''
Configure
- pops all the RangeSlider specific variables off the cnf dict
- uses sane default values if no value is found
'''
def configure(self, **cnf):
try:
lowerBound = cnf.pop('lowerBound')
self.__model.setLowerBound(lowerBound)
except:
self.__model.setLowerBound(0)
try:
upperBound = cnf.pop('upperBound')
self.__model.setUpperBound(upperBound)
except:
self.__model.setUpperBound(100)
try:
initialLowerBound = cnf.pop('initialLowerBound')
self.__model.setLower(initialLowerBound)
except:
self.__model.setLower(0)
try:
initialUpperBound = cnf.pop('initialUpperBound')
self.__model.setUpper(initialUpperBound)
except:
self.__model.setUpper(100)
try:
sliderColor = cnf.pop('sliderColor')
self.__sliderColor = sliderColor
except:
None
try:
sliderHighlightedColor = cnf.pop('sliderHighlightedColor')
self.__sliderHighlightedColor = sliderHighlightedColor
except:
None
try:
sliderNoFocusColor = cnf.pop('sliderNoFocusColor')
self.__sliderNoFocusColor = sliderNoFocusColor
except:
None
try:
sliderOutlineColor = cnf.pop('sliderOutlineColor')
self.__sliderOutlineColor = sliderOutlineColor
except:
None
try:
sliderNoFocusOutlineColor = cnf.pop('sliderNoFocusOutlineColor')
self.__sliderNoFocusOutlineColor = sliderNoFocusOutlineColor
except:
None
try:
barColor = cnf.pop('barColor')
self.__barColor = barColor
except:
None
try:
barHighlightedColor = cnf.pop('barHighlightedColor')
self.__barHighlightedColor = barHighlightedColor
except:
None
try:
barNoFocusColor = cnf.pop('barNoFocusColor')
self.__barNoFocusColor = barNoFocusColor
except:
None
try:
barOutlineColor = cnf.pop('barOutlineColor')
self.__barOutlineColor = barOutlineColor
except:
None
try:
barNoFocusOutlineColor = cnf.pop('barNoFocusOutlineColor')
self.__barNoFocusOutlineColor = barNoFocusOutlineColor
except:
None
try:
caretColor = cnf.pop('caretColor')
self.__caretColor = caretColor
except:
None
try:
caretHighlightedColor = cnf.pop('caretHighlightedColor')
self.__caretHighlightedColor = caretHighlightedColor
except:
None
try:
caretNoFocusColor = cnf.pop('caretNoFocusColor')
self.__caretNoFocusColor = caretNoFocusColor
except:
None
try:
caretOutlineColor = cnf.pop('caretOutlineColor')
self.__caretOutlineColor = caretOutlineColor
except:
None
try:
caretNoFocusOutlineColor = cnf.pop('caretNoFocusOutlineColor')
self.__caretNoFocusOutlineColor = caretNoFocusOutlineColor
except:
None
try:
barWidthPercent = cnf.pop('barWidthPercent')
self.__barWidthPercent = barWidthPercent
except:
None
try:
barHeightPercent = cnf.pop('barHeightPercent')
self.__barHeightPercent = barHeightPercent
except:
None
try:
caretWidthPercent = cnf.pop('caretWidthPercent')
self.__caretWidthPercent = caretWidthPercent
except:
None
try:
caretHeightPercent = cnf.pop('caretHeightPercent')
self.__caretHeightPercent = caretHeightPercent
except:
None
'''
Subscribe
- pass along subscribers to the model changer
'''
def subscribe(self, func):
self.__model.subscribe(func)
'''
Accessors/Mutators
'''
def getUpper(self):
return self.__model.getUpper()
def setUpper(self, u):
self.__model.setUpper(u)
def getLower(self):
return self.__model.getLower()
def setLower(self, l):
self.__model.setLower(l)
def getUpperBound(self):
return self.__model.getUpperBound()
def setUpperBound(self, ub):
self.__model.setUpperBound(ub)
self.redraw()
def getLowerBound(self):
return self.__model.getLowerBound()
def setLowerBound(self, lb):
self.__model.setLowerBound(lb)
self.redraw()
def getBoundsRange(self):
return self.__model.getBoundsRange()
def getRange(self):
return self.__model.getRange()
def getMajorTickSpacing(self):
return self.__majorTickSpacing
def setMajorTickSpacing(self, majorTS):
self.__majorTickSpacing = majorTS
self.redraw()
def getMinorTickSpacing(self):
return self.__minorTickSpacing
def setMinorTickSpacing(self, minorTS):
self.__minorTickSpacing = minorTS
self.redraw()
def getPaintTicks(self):
return self.__paintTicks
def setPaintTicks(self, b):
self.__paintTicks = b
self.redraw()
def getSnapToTicks(self):
return self.__controller.getSnapToTicks()
def setSnapToTicks(self, b):
self.__controller.setSnapToTicks(b)
def getLeftCaretId(self):
return self.__leftCaretId
def getRightCaretId(self):
return self.__rightCaretId
def getBarId(self):
return self.__barId
def getSliderId(self):
return self.__sliderId
def getCanvasCenterY(self):
return self.__canvasCenterY
def getBarX(self):
return self.__barX
def getBarY(self):
return self.__barY
def getBarWidth(self):
return self.__barWidth
def getBarHeight(self):
return self.__barHeight
def getCaretHeight(self):
return self.__caretHeight
def getCaretWidth(self):
return self.__caretWidth
def getLeftCaretX(self):
return self.coords(self.__leftCaretId)[0]
def getLeftCaretY(self):
return self.coords(self.__leftCaretId)[1]
def getRightCaretX(self):
return self.coords(self.__rightCaretId)[0]
def getRightCaretY(self):
return self.coords(self.__rightCaretId)[1]
def getTickWidth(self):
return self.__canvasWidth * self.__tickWidthPercent
def getHighlightedId(self):
return self.__highlightedId
'''
Resize Function
- captures the new canvas dimensions
- causes a redraw
'''
def __resize(self, e):
log.debug("Resize, New Size -- " +
str(e.width) + " : " + str(e.height))
self.__canvasWidth = e.width
self.__canvasHeight = e.height
self.__canvasCenterX = e.width / 2.0
self.__canvasCenterY = e.height / 2.0
self.redraw()
'''
FullDraw Function
'''
def __draw(self,e):
# only redraw if the canvas is visible still
if (self.__canvasWidth >= 0 and self.__canvasHeight >= 0):
log.debug("Doing a fulldraw")
self.__tickText = []
self.__createBar()
self.__caretWidth = self.__barWidth*self.__caretWidthPercent
self.__caretHeight = self.__barHeight*self.__caretHeightPercent
if (self.__paintTicks):
self.__createMajorTicks()
self.__createMinorTicks()
self.__createSlider()
self.__createLeftCaret()
self.__createRightCaret()
if (self.__inFocus == True):
self.setFocus()
self.__changeHighlighted(self.__highlightedId)
'''
Draw the bar
'''
def __createBar(self):
self.__barWidth = self.__canvasWidth * self.__barWidthPercent
newbarWidth = self.__barWidth + (self.__canvasWidth * self.__tickWidthPercent * 2.0)
self.__barHeight = self.__canvasHeight * self.__barHeightPercent
self.__barX = self.__canvasCenterX - (self.__barWidth / 2)
newbarX = self.__barX - (self.__canvasWidth * self.__tickWidthPercent)
self.__barY = self.__canvasCenterY - (self.__barHeight + (self.__canvasHeight * self.__majorTickHeightPercent)) / 2.0
self.__barId = self.create_rectangle(newbarX, self.__barY,
newbarX + newbarWidth,
self.__barY + self.__barHeight,
outline=self.__barNoFocusOutlineColor,
fill=self.__barNoFocusColor)
'''
Draw left caret
'''
def __createLeftCaret(self):
curId = self.__leftCaretId
self.__leftCaretId = self.__createCaret(
self.__caret_onMouseEnter,
self.__leftCaret_onMouseLeave,
self.__leftCaret_onMouseClick,
self.__controller.leftCaret_onMouseMotion)
if (curId == self.__highlightedId and self.__highlightedId != 0):
self.__highlightedId = self.__leftCaretId
'''
Draw right caret
'''
def __createRightCaret(self):
curId = self.__rightCaretId
self.__rightCaretId = self.__createCaret(
self.__caret_onMouseEnter,
self.__rightCaret_onMouseLeave,
self.__rightCaret_onMouseClick,
self.__controller.rightCaret_onMouseMotion)
if (curId == self.__highlightedId and self.__highlightedId != 0):
self.__highlightedId = self.__rightCaretId
'''
Generic draw caret function Function
'''
def __createCaret(self, enterCallback, leaveCallback, clickCallback, motionCallback):
hw = self.__caretWidth / 2.0
hh = self.__caretHeight / 2.0
cx = 0 + hw
cy = 0 + hh
p1x = cx - hw
p1y = 0
p2x = cx+hw
p2y = 0
p3x = cx+hw
p3y = 0 + hh
p4x = cx
p4y = cy + hh
p5x = cx - hw
p5y = 0+hh
p6x = cx - hw
p6y = 0
newCaret = self.create_polygon(p1x, p1y,
p2x, p2y,
p3x, p3y,
p4x, p4y,
p5x, p5y,
p6x, p6y,
outline=self.__caretNoFocusOutlineColor,
fill=self.__caretNoFocusColor)
self.tag_bind(newCaret, "<Button-1>",
self.__controller.caret_onMouseClick, add="+")
self.tag_bind(newCaret, "<Button-3>",
self.__controller.caret_onMouseClick, add="+")
self.tag_bind(newCaret, "<B1-Motion>", motionCallback)
self.tag_bind(newCaret, "<ButtonRelease-1>",
self.__controller.caret_onMouseRelease)
self.tag_bind(newCaret, "<B1-Motion>",
self.__caret_onMouseEnter, add="+")
self.tag_bind(newCaret, "<Enter>", enterCallback)
self.tag_bind(newCaret, "<Leave>", leaveCallback)
self.tag_bind(newCaret,"<Button-1>", clickCallback, add="+")
self.tag_bind(newCaret,"<Button-3>", clickCallback, add="+")
return newCaret
'''
Draw slider
'''
def __createSlider(self):
curId = self.__sliderId
self.__sliderId = self.create_rectangle(0, 0,
self.__barWidth, self.__barHeight,
fill=self.__sliderNoFocusColor,
outline=self.__sliderNoFocusOutlineColor)
if (curId == self.__highlightedId and self.__highlightedId != 0):
self.__highlightedId = self.__sliderId
self.tag_bind(self.__sliderId,"<Button-1>",
self.__controller.slider_onMouseClick, add="+");
self.tag_bind(self.__sliderId,"<B1-Motion>",
self.__controller.slider_onMouseMotion);
self.tag_bind(self.__sliderId,"<B1-Motion>",
self.__slider_onMouseEnter,
add = "+")
self.tag_bind(self.__sliderId,"<Enter>", self.__slider_onMouseEnter)
self.tag_bind(self.__sliderId,"<Leave>", self.__slider_onMouseLeave)
self.tag_bind(self.__sliderId,"<Button-1>", self.__slider_onMouseClick, add="+")
self.tag_bind(self.__sliderId,"<Button-3>", self.__slider_onMouseClick, add="+")
'''
Draw the major ticks
'''
def __createMajorTicks(self):
self.__majorTicks = []
self.__createTicks(self.__majorTicks,
self.__canvasWidth * self.__tickWidthPercent,
self.__canvasHeight * self.__majorTickHeightPercent,
self.getMajorTickSpacing(),
True)
'''
Draw the minor ticks
'''
def __createMinorTicks(self):
self.__minorTicks = []
self.__createTicks(self.__minorTicks,
self.__canvasWidth * self.__tickWidthPercent,
self.__canvasHeight * self.__minorTickHeightPercent,
self.getMinorTickSpacing(),
False)
'''
Generic draw ticks Function
'''
def __createTicks(self, tickArray, width, height, tickSpacing, createText):
tickCount = self.__model.getBoundsRange() / tickSpacing
# zero vision
if (tickCount == 0):
return
tickXStart = self.__barX
tickYStart = self.__barY + (self.__barHeight)
tickInterval = (self.__barWidth) / float(tickCount)
for i in range(0, int(tickCount)+1):
tickX = tickXStart + (tickInterval * i) - width / 2.0
newMajorTick = self.create_rectangle(tickX,
tickYStart,
tickX + width,
tickYStart + height,
outline=self.__tickNoFocusOutlineColor)
if (createText):
strVal = self.__model.getLowerBound() + tickSpacing*float(i)
self.__tickText.append(self.create_text(tickX,
tickYStart + height + 5,
text=str("%.2f"%(strVal)),
fill=self.__tickNoFocusOutlineColor,
font=("Default", "8", "")))
self.tag_bind(newMajorTick, "<1>", self.__controller.majorTick_onClick)
self.tag_bind(newMajorTick, "<Enter>", self.__tick_onMouseEnter)
self.tag_bind(newMajorTick, "<Leave>", self.__tick_onMouseLeave)
tickArray.append(newMajorTick)
'''
Full redraw logic
Exposed
'''
def redraw(self):
self.delete(ALL)
self.__draw(None)
self.__controller.update(None)
'''
Helper - Change Highlighted
- sorts out the logic when user is clicking around the different
- functional parts of the program.
'''
def __changeHighlighted(self, id):
oldId = self.__highlightedId
self.__highlightedId = id
# unhighlight if necessary
if (oldId != id):
if (oldId == self.__sliderId):
self.__slider_onMouseLeave(None)
self.itemconfig(self.__sliderId, fill=self.__sliderColor)
elif (oldId == self.__leftCaretId):
self.__leftCaret_onMouseLeave(None)
self.itemconfig(self.__leftCaretId, fill=self.__caretColor)
elif (oldId == self.__rightCaretId):
self.__rightCaret_onMouseLeave(None)
self.itemconfig(self.__rightCaretId, fill=self.__caretColor)
# highlight new one if necessary
if (id == self.__sliderId):
self.__slider_onMouseEnter(None)
self.itemconfig(self.__sliderId, fill=self.__sliderHighlightedColor)
elif (id == self.__leftCaretId):
self.__caret_onMouseEnter(None)
self.itemconfig(self.__leftCaretId, fill=self.__caretHighlightedColor)
elif (id == self.__rightCaretId):
self.__caret_onMouseEnter(None)
self.itemconfig(self.__rightCaretId, fill=self.__caretHighlightedColor)
'''
Event - caretMouseClick
- cause selection of the caret clicked
'''
def __leftCaret_onMouseClick(self, e):
self.__changeHighlighted(self.__leftCaretId)
def __rightCaret_onMouseClick(self, e):
self.__changeHighlighted(self.__rightCaretId)
'''
Event - caretMouseClick
- cause selection of the caret clicked
'''
def __slider_onMouseClick(self, e):
self.__changeHighlighted(self.__sliderId)
'''
Event - caretMouseEnter
'''
def __caret_onMouseEnter(self, e):
self.master.configure(cursor="hand2")
if (self.__inFocus):
self.itemconfig(CURRENT, fill=self.__caretHighlightedColor)
'''
Event - caretMouseLeave
'''
def __leftCaret_onMouseLeave(self, e):
self.master.configure(cursor="")
if (self.__inFocus):
if not (self.__highlightedId == self.__leftCaretId):
self.itemconfig(CURRENT, fill=self.__caretColor)
def __rightCaret_onMouseLeave(self, e):
self.master.configure(cursor="")
if (self.__inFocus):
if not (self.__highlightedId == self.__rightCaretId):
self.itemconfig(CURRENT, fill=self.__caretColor)
'''
Event - sliderMouseEnter
'''
def __slider_onMouseEnter(self, e):
self.master.configure(cursor="hand2")
if (self.__inFocus):
self.itemconfig(CURRENT, fill=self.__sliderHighlightedColor)
'''
Event - sliderMouseLeave
'''
def __slider_onMouseLeave(self, e):
self.master.configure(cursor="")
if (self.__inFocus):
if not (self.__highlightedId == self.__sliderId):
self.itemconfig(CURRENT, fill=self.__sliderColor)
'''
Event - caretMouseEnter
'''
def __tick_onMouseEnter(self, e):
self.master.configure(cursor="center_ptr")
'''
Event - caretMouseLeave
'''
def __tick_onMouseLeave(self, e):
self.master.configure(cursor="")
'''
Event - handle focus!
'''
def __focusCheck(self, e):
log.debug("FocusCheck...")
if (e.widget == self and not self.__inFocus):
self.setFocus()
elif (e.widget != self and self.__inFocus):
self.clearFocus()
'''
Helper - Clear Focus
- sets the items to be in their non focus state
'''
def clearFocus(self):
self.__inFocus = False
self.itemconfig(self.__barId, fill=self.__barNoFocusColor, outline=self.__barNoFocusOutlineColor)
self.itemconfig(self.__sliderId, fill=self.__sliderNoFocusColor, outline=self.__sliderNoFocusOutlineColor)
self.itemconfig(self.__leftCaretId, fill=self.__caretNoFocusColor, outline=self.__caretNoFocusOutlineColor)
self.itemconfig(self.__rightCaretId, fill=self.__caretNoFocusColor, outline=self.__caretNoFocusOutlineColor)
for id in self.__majorTicks:
self.itemconfig(id, outline=self.__tickNoFocusOutlineColor);
for id in self.__minorTicks:
self.itemconfig(id, outline=self.__tickNoFocusOutlineColor);
for id in self.__tickText:
self.itemconfig(id, fill=self.__tickNoFocusOutlineColor);
'''
Helper - Set Focus
- sets the focus colors
'''
def setFocus(self):
self.__inFocus = True
self.focus_set()
self.itemconfig(self.__barId, fill=self.__barColor, outline=self.__barOutlineColor)
self.itemconfig(self.__sliderId, fill=self.__sliderColor, outline=self.__sliderOutlineColor)
self.itemconfig(self.__leftCaretId, fill=self.__caretColor, outline=self.__caretOutlineColor)
self.itemconfig(self.__rightCaretId, fill=self.__caretColor, outline=self.__caretOutlineColor)
for id in self.__majorTicks:
self.itemconfig(id, outline=self.__tickOutlineColor);
for id in self.__minorTicks:
self.itemconfig(id, outline=self.__tickOutlineColor);
for id in self.__tickText:
self.itemconfig(id, fill=self.__tickOutlineColor);
'''
the controller (C in MVC)
'''
class RangeSliderController():
'''
Fields
'''
__model = None
__view = None
__lastMouseX = 0
__snapToTicks = False
'''
Constructor
'''
def __init__(self, rs_model, rs_view):
self.__model = rs_model
self.__view = rs_view
'''
Accessors/Mutators
'''
def getSnapToTicks(self):
return self.__snapToTicks
def setSnapToTicks(self, b):
self.__snapToTicks = b
'''
Event - on Key press
- handles key presses directed at the canvas
'''
def rangeSlider_onKeyPress(self, e):
# unit step
lowerstep = 0
upperstep = 0
# get direction
direction = 0
if (e.keysym == "Left"):
direction = -1
elif (e.keysym == "Right"):
direction = 1
else:
return
#if (direction == -1 and self.__model.getLower() <= self.__model.getLowerBound()):
# return
#elif (direction == 1 and self.__model.getUpper() >= self.__model.getUpperBound()):
# return
bRoundUp = False
if (direction == 1):
bRoundUp = True
step = self.__model.getBoundsRange() / self.__view.getBarWidth()
step *= direction
if (self.__view.getHighlightedId() == self.__view.getLeftCaretId()):
if (direction == 1 and self.__view.getLower() >= self.__view.getUpper()):
self.__view.setLower(self.__view.getUpper())
return
if (self.__snapToTicks):
step += self.__model.getLower()
step = self.barRoundValue(step, self.__view.getMinorTickSpacing(), bRoundUp)
step -= self.__model.getLower()
self.__model.setLower(self.__model.getLower() + step)
return
elif (self.__view.getHighlightedId() == self.__view.getRightCaretId()):
if (self.__snapToTicks):
step += self.__model.getUpper()
step = self.barRoundValue(step, self.__view.getMinorTickSpacing(), bRoundUp)
step -= self.__model.getUpper()
self.__model.setUpper(self.__model.getUpper() + step)
return
elif (self.__view.getHighlightedId() == self.__view.getSliderId()):
# prevent 0 division
if (self.__model.getBoundsRange() <= 0):
return
if not (self.__snapToTicks):
step = self.__model.getBoundsRange() / self.__view.getBarWidth()
step *= direction
lowerstep = step
upperstep = step
else:
step = self.__model.getBoundsRange() / self.__view.getBarWidth()
step *= direction
lowerstep = (self.__model.getLower()+step)
upperstep = (self.__model.getUpper()+step)
lowerstep = self.barRoundValue(lowerstep, self.__view.getMinorTickSpacing(), bRoundUp)
lowerstep -= self.__model.getLower()
upperstep = self.barRoundValue(upperstep, self.__view.getMinorTickSpacing(), bRoundUp)
upperstep -= self.__model.getUpper()
# move the range 1 unit to the direction pressed
self.__model.setLower(self.__model.getLower() + lowerstep)
self.__model.setUpper(self.__model.getUpper() + upperstep)
'''
Helper - Round to snap
'''
def barRoundValue(self, value, roundToNearest, bRoundUp):
tmpVal = (value / roundToNearest) + (-0.5 + int(bRoundUp))
tmp = int(tmpVal)
tmpVal = round((tmpVal - tmp) * pow(10, 0))
nValue = tmp + tmpVal / pow(10, 0)
roundedValue = nValue * roundToNearest
return roundedValue
'''
Event - Caret On Mouse Click event
- nothing
'''
def caret_onMouseClick(self, e):
log.debug("Button " + str(e.num) + " @ " +
str(e.x) + " : " + str(e.y))
self.__lastMouseX = e.x
'''
Event - Caret on mouse release
'''
def caret_onMouseRelease(self, e):
return
'''
Event - Caret OnMouseMotion
- moves the caret that fired the event
- the amount moved is a delta between cur caret bar pos
- and the amount moved in canvas coords
'''
def leftCaret_onMouseMotion(self, e):
log.debug(str(self.__view.canvasx(e.x)) + " : " +
str(self.__view.canvasy(e.y)))
# leave early if the mouse is not aligned with the caret anymore
rightCaretX = self.__view.getRightCaretX() + (self.__view.getCaretWidth())
if (self.__view.canvasx(e.x) < self.__view.getBarX()):
self.__model.setLower(self.__model.getLowerBound())
# edge case, inside right caret
elif (self.__view.canvasx(e.x) > rightCaretX):
self.__model.setLower(self.__model.getUpper())
self.__lastMouseX = e.x
else:
if (self.__snapToTicks):
newLower = self.__snapCanvasXToSliderValue(e.x)
else:
# determine how much to move
barDistance = self.__mouseMotionToBarDistance(self.__lastMouseX, e.x)
newLower = self.__model.getLower() + barDistance
self.__model.setLower(newLower)
self.__lastMouseX = e.x
# raise the caret to the top
if (self.__model.getUpper() == self.__model.getLowerBound()):
self.__view.tag_raise(self.__view.getRightCaretId())
else:
self.__view.tag_raise(self.__view.getLeftCaretId())
def rightCaret_onMouseMotion(self, e):
log.debug(str(self.__view.canvasx(e.x)) + " : " +
str(self.__view.canvasy(e.y)))
# leave early if the mouse is not aligned with the caret anymore
leftCaretX = self.__view.getLeftCaretX() + (self.__view.getCaretWidth())
if (self.__view.canvasx(e.x) > (self.__view.getBarX() + self.__view.getBarWidth())):
self.__model.setUpper(self.__model.getUpperBound())
# edge case, inside right caret
elif (self.__view.canvasx(e.x) < leftCaretX):
if (self.__model.getLower() <= self.__model.getUpperBound()):
self.__model.setUpper(self.__model.getLower())
self.__lastMouseX = e.x
else:
if (self.__snapToTicks):
newUpper = self.__snapCanvasXToSliderValue(e.x)
else:
# determine how much to move
barDistance = self.__mouseMotionToBarDistance(self.__lastMouseX, e.x)
newUpper = self.__model.getUpper() + barDistance
self.__model.setUpper(newUpper)
self.__lastMouseX = e.x
# raise the caret to the top
if (self.__model.getLower() >= self.__model.getUpperBound()):
self.__view.tag_raise(self.__view.getLeftCaretId())
else:
self.__view.tag_raise(self.__view.getRightCaretId())
'''
Event - Major Tick onClick
- Snaps the approriate caret to the position represented by the tick
- Note: the tick canvas x is converted to bar x
'''
def majorTick_onClick(self, e):
w = CURRENT
tickCoords = self.__view.coords(w)
tickWidth = tickCoords[2] - tickCoords[0]
barX = self.__canvasXToBarX(tickCoords[0] + tickWidth / 2.0)
if (barX < self.__model.getLower()):
self.__model.setLower(barX)
elif (barX > self.__model.getUpper()):
self.__model.setUpper(barX)
'''
Event - Slider onClick
- save the slider position
'''
def slider_onMouseClick(self, e):
self.__lastMouseX = e.x
'''
Event - Slider onMouseMotion
- allows user to slide the slider by clicking the range inbetween
'''
def slider_onMouseMotion(self, e):
log.debug(str(self.__view.canvasx(e.x)) + " : " +
str(self.__view.canvasy(e.y)))
# leave early if the mouse is not aligned with the slider anymore
if (self.__view.canvasx(e.x) < self.__view.getBarX()):
return
elif (self.__view.canvasx(e.x) > self.__view.getBarX() + self.__view.getBarWidth()):
return
# determine how much to move
barDistance = 0
lowerAdjust = 0
upperAdjust = 0
if (self.__snapToTicks):
step = self.__mouseMotionToBarDistance(self.__view.canvasx(self.__lastMouseX), self.__view.canvasx(e.x)) / 2.0
tmp = self.__view.getMinorTickSpacing() / 3.0
if (abs(step) >= tmp):
lowerstep = (self.__model.getLower()+step)
upperstep = (self.__model.getUpper()+step)
bRoundUp = False
if (step >= 0):
bRoundUp = True
lowerAdjust = self.barRoundValue(lowerstep, self.__view.getMinorTickSpacing(), bRoundUp)
lowerAdjust -= self.__model.getLower()
upperAdjust = self.barRoundValue(upperstep, self.__view.getMinorTickSpacing(), bRoundUp)
upperAdjust -= self.__model.getUpper()
# bounds check
if (lowerAdjust+self.__model.getLower() < self.__model.getLowerBound()):
upperAdjust = 0
if (upperAdjust+self.__model.getUpper() > self.__model.getUpperBound()):
lowerAdjust = 0
if (upperAdjust != 0 or lowerAdjust != 0):
self.__lastMouseX = e.x
else:
barDistance = self.__mouseMotionToBarDistance(self.__view.canvasx(self.__lastMouseX), self.__view.canvasx(e.x))
# detect bounds collision
if self.__model.getLower()+barDistance <= self.__model.getLowerBound():
barDistance = self.__model.getLowerBound() - self.__model.getLower()
elif (self.__model.getUpper()+barDistance >= self.__model.getUpperBound()):
barDistance = self.__model.getUpperBound() - self.__model.getUpper()
lowerAdjust = barDistance
upperAdjust = barDistance
if (barDistance != 0.0):
self.__lastMouseX = e.x
# adjust slider
self.__model.setLower(self.__model.getLower() + lowerAdjust)
self.__model.setUpper(self.__model.getUpper() + upperAdjust)
'''
Update Logic
- allows for a full update of all positions
- left exposed for manual calling
'''
def update(self, e):
log.debug("Performing RePositioning...")
# position left caret
lower = self.__model.getLower()
if (lower > self.__model.getUpper()):
lower = self.__model.getUpper()
if (self.__view.getLeftCaretId() > 0):
self.__positionCaret(self.__view.getLeftCaretId(),
lower)
# position right caret
if (self.__view.getRightCaretId() > 0):
self.__positionCaret(self.__view.getRightCaretId(),
self.__model.getUpper())
# position slider
if (self.__view.getSliderId() > 0):
self.__updateSlider()
'''
PositionCaret helper function
- performs the simple task of positioning a caret at a point on the bar
- sliderValue is intended to be in bar coords
'''
def __positionCaret(self, caretId, sliderValue):
caretCoords = self.__view.coords(caretId)
barY = self.__view.getBarY()
barYCenter = barY + (self.__view.getBarHeight() / 2.0)
caretY = self.__view.getCaretHeight() / 2.0
caretHalfWidth = self.__view.getCaretWidth() / 2.0
canvasX = self.__barXToCanvasX(sliderValue)
if (canvasX > self.__view.getBarX() + self.__view.getBarWidth()):
canvasX = self.__view.getBarX() + self.__view.getBarWidth()
canvasX = canvasX - caretHalfWidth
canvasY = barYCenter - caretCoords[1] - caretY
log.debug("Moving caret to: (slider=%f) %d,%d",
sliderValue,
canvasX, canvasY)
if (self.__model.getUpperBound() < self.__model.getLowerBound()):
self.__view.itemconfig(caretId, state="hidden")
else:
self.__view.itemconfig(caretId, state="normal")
if (canvasX < self.__view.getBarX()-caretHalfWidth):
self.__view.itemconfig(caretId, state="hidden")
else:
self.__view.itemconfig(caretId, state="normal")
self.__view.move(caretId,
canvasX - caretCoords[0],
canvasY)
'''
updateSlider helper function
- positions the slider at its correct point
- we convert the distance on the bar into canvas coords
'''
def __updateSlider(self):
sliderId = self.__view.getSliderId()
sliderCoords = self.__view.coords(self.__view.getSliderId())
sliderX = self.__view.getLeftCaretX() + (self.__view.getCaretWidth() / 2.0)
sliderY = self.__view.getBarY()
sliderHeight = sliderCoords[3] - sliderCoords[1]
sliderWidth = 0
if (self.__model.getRange() > 0):
sliderWidth = self.__barRangeToCanvasDistance(self.__model.getRange())
if (self.__model.getBoundsRange() < 0):
sliderX = 0
sliderWidth = 0
sliderHeiht = 0
sliderY = 0
else:
if (sliderWidth <= 0):
sliderWidth = 0
elif (sliderWidth + sliderX > self.__view.getRightCaretX() + (self.__view.getCaretWidth() / 2.0)):
sliderWidth = (self.__view.getRightCaretX() + (self.__view.getCaretWidth() / 2.0)) - sliderX
# bounds check
self.__view.coords(sliderId,
sliderX, sliderY,
sliderX + sliderWidth, sliderY + sliderHeight)
'''
Helper - BarPointX To CanvasPointX
- converts a value from the model into a point on the bar
- then converts that bar point to an x value on the canvas
'''
def __barXToCanvasX(self, point):
barLength = self.__view.getBarWidth()
canvasX = 0
# prevent trying to draw carets at invalid range
# this is user problem
if (self.__model.getBoundsRange() > 0):
unitPoint = barLength / (self.__model.getBoundsRange())
translatedBarPoint = (point - self.__model.getLowerBound())
canvasX = self.__view.getBarX() + (unitPoint * translatedBarPoint)
return canvasX
'''
Helper - BarRange to CanvasDistance
- this will convert the range on our slider bar to a distance
- in canvas coords.
- Note: if the bar length is 0 or the range is 0 this returns 0
'''
def __barRangeToCanvasDistance(self, range):
barLength = self.__view.getBarWidth()
if (self.__model.getBoundsRange() > 0):
return barLength * (float(range) / self.__model.getBoundsRange())
else:
return 0
'''
Helper - CanvasX to BarX
- Converts an X coordinate in canvas coords to bar coords
'''
def __canvasXToBarX(self, canvasX):
barLength = float(self.__view.getBarWidth())
unitStep = float(self.__model.getBoundsRange()) / barLength
# check bounds
return self.__model.getLowerBound() + (unitStep *
(canvasX - self.__view.getBarX()))
'''
Helper - MouseMotionToBarDistance
- calculate the distance the mouse has moved in canvas coords
- converts it to bar distance
'''
def __mouseMotionToBarDistance(self, lastX, newX):
barLastMouseX = self.__canvasXToBarX(lastX)
barNewMouseX = self.__canvasXToBarX(newX)
return barNewMouseX - barLastMouseX
'''
Helper - Snap CanvasX Slider Value
'''
def __snapCanvasXToSliderValue(self, canvasX):
# find nearest tick to mouse pos
barPointX = self.__canvasXToBarX(canvasX)
clickPointRounded = round(barPointX / self.__view.getMinorTickSpacing())
snapX = clickPointRounded * self.__view.getMinorTickSpacing()
return snapX
'''
The model (M in MVC)
'''
class RangeSliderModel():
'''
Fields
'''
__callbacks = []
__lower = 0
__lowerBound = 0
__upper = 0
__upperBound = 0
'''
Constructor
'''
def __init__(self, init_color = "black"):
self.__stroke_color = init_color
'''
Callback subsystem
'''
def subscribe(self, callback):
self.__callbacks.append(callback)
'''
Notify all subscribers of a change
'''
def __notify(self, **state):
for call in self.__callbacks:
call(state)
'''
Accessor/Mutator
'''
def getLower(self):
return self.__lower
def setLower(self, l):
if (l < self.__lowerBound):
l = self.__lowerBound
self.__lower = l
self.__notify()
def getUpper(self):
return self.__upper
def setUpper(self, u):
if (u > self.__upperBound):
u = self.__upperBound
elif (u < self.__lower):
u = self.__lower
self.__upper = u
self.__notify()
def getLowerBound(self):
return self.__lowerBound
def setLowerBound(self, lb):
self.__lowerBound = lb
self.__notify()
def getUpperBound(self):
return self.__upperBound
def setUpperBound(self, ub):
self.__upperBound = ub
self.__notify()
def getRange(self):
return self.__upper - self.__lower
def getBoundsRange(self):
return self.__upperBound - self.__lowerBound
'''
END MEGAWIDGETS
'''
'''
RangeSlider Demo class
- Creates the window and grid bags the widgets
'''
class RangeSliderDemo(Frame):
__width = 800
__height = 600
__minValueLabel = None
__maxValueLabel = None
__rs = None
'''
Constructor
- does all the grunt work in creating the demo
'''
def __init__(self):
Frame.__init__(self)
self.master.title("Range Slider Demo")
self.__lowerEntryString = StringVar()
self.__lowerBoundEntryString = StringVar()
self.__upperEntryString = StringVar()
self.__upperBoundEntryString = StringVar()
self.__majorTickEntryString = StringVar()
self.__minorTickEntryString = StringVar()
# center the window
ws = self.master.winfo_screenwidth()
hs = self.master.winfo_screenheight()
x = (ws/2) - (self.__width / 2)
y = (hs/2) - (self.__height / 2)
# geometry = wxh+x+y
self.master.geometry('%dx%d+%d+%d' %
(self.__width, self.__height, x, y))
# create the range slider widget to spec
self.__rs = RangeSlider(self.master,
lowerBound = 0, upperBound = 100,
initialLowerBound = 25, initialUpperBound = 75);
self.__rs.setUpperBound(1000)
self.__rs.setLowerBound(500)
self.__rs.setLower(650)
self.__rs.setUpper(750)
self.__rs.setMajorTickSpacing(100)
self.__rs.setMinorTickSpacing(20)
self.__rs.setPaintTicks(True)
self.__rs.setSnapToTicks(False)
self.__rs.setFocus()
# create the label widgets for min/max
self.__minValueLabel = Label(self.master, text = "Lower")
self.__minValueEntry = Entry(self.master, textvariable=self.__lowerEntryString)
self.__lowerEntryString.trace("w", self.__lowerEntry_onChange)
self.__maxValueLabel = Label(self.master, text = "Upper")
self.__maxValueEntry = Entry(self.master, textvariable=self.__upperEntryString)
self.__upperEntryString.trace("w", self.__upperEntry_onChange)
self.__lowerBoundLabel = Label(self.master, text = "LowerBound")
self.__lowerBoundEntry = Entry(self.master, textvariable=self.__lowerBoundEntryString)
self.__lowerBoundEntryString.trace("w", self.__lowerBoundEntry_onChange)
self.__upperBoundLabel = Label(self.master, text = "UpperBound")
self.__upperBoundEntry = Entry(self.master, textvariable=self.__upperBoundEntryString)
self.__upperBoundEntryString.trace("w", self.__upperBoundEntry_onChange)
self.__majorTickSpacingLabel = Label(self.master, text = "Major Tick Spacing")
self.__majorTickSpacingEntry = Entry(self.master, textvariable=self.__majorTickEntryString)
self.__majorTickEntryString.trace("w", self.__majorTickEntry_onChange)
self.__minorTickSpacingLabel = Label(self.master, text = "Minor Tick Spacing")
self.__minorTickSpacingEntry = Entry(self.master, textvariable=self.__minorTickEntryString)
self.__minorTickEntryString.trace("w", self.__minorTickEntry_onChange)
self.__paintTicksCheckVar = IntVar()
self.__paintTicksCheckVar.set(int(self.__rs.getPaintTicks()))
self.__paintTicksCheck = Checkbutton(self.master,
text="Paint Ticks",
command=self.__paintTicksCheck_onClick,
variable=self.__paintTicksCheckVar,
onvalue="1",
offvalue="0")
self.__snapToTicksCheckVar = IntVar()
self.__snapToTicksCheckVar.set(int(self.__rs.getSnapToTicks()))
self.__snapToTicks = Checkbutton(self.master,
text="Snap To Ticks",
command=self.__snapToTicksCheck_onClick,
variable=self.__snapToTicksCheckVar,
onvalue="1",
offvalue="0")
# create the reset slider and quit button
resetButton = Button(self.master, text = "Reset")
resetButton.bind("<1>", self.resetButton_onClick)
quitButton = Button(self.master, text = "Quit")
quitButton.bind("<1>", self.quitButton_onClick)
secondRs = RangeSlider(self.master,
sliderColor="yellow", sliderHighlightedColor="green",
barColor="lightblue",
caretColor="red", caretHighlightedColor="green",
barWidthPercent=0.85, barHeightPercent=0.10)
secondRs.setPaintTicks(True)
# THE GRID
# this positions all the GUI components into their grid
self.__minValueLabel.grid(column=1, row=0, sticky=(W, E))
self.__minValueEntry.grid(column=2, row=0, sticky=(W, E))
self.__maxValueLabel.grid(column=1, row=1, sticky=(W, E))
self.__maxValueEntry.grid(column=2, row=1, sticky=(W, E))
self.__lowerBoundLabel.grid(column=1, row=2, sticky=(W, E))
self.__lowerBoundEntry.grid(column=2, row=2, sticky=(W, E))
self.__upperBoundLabel.grid(column=1, row=3, sticky=(W, E))
self.__upperBoundEntry.grid(column=2, row=3, sticky=(W, E))
self.__majorTickSpacingLabel.grid(column=1, row=4, sticky=(W, E))
self.__majorTickSpacingEntry.grid(column=2, row=4, sticky=(W, E))
self.__minorTickSpacingLabel.grid(column=1, row=5, sticky=(W, E))
self.__minorTickSpacingEntry.grid(column=2, row=5, sticky=(W, E))
self.__paintTicksCheck.grid(column=1, row = 6, columnspan=2, stick=(W,E))
self.__snapToTicks.grid(column=1, row = 7, columnspan=2, stick=(W,E))
resetButton.grid(column=1, row = 8, columnspan=2, sticky=(W,E))
quitButton.grid(column=1, row = 9, columnspan=2, sticky=(W,E))
self.__rs.grid(column=0, row=0, rowspan=10, sticky=(N, S, E, W))
secondRs.grid(column=0, row=10, columnspan=3, sticky=(W,E))
# setup the grid weights
self.master.rowconfigure(0, weight=1)
self.master.rowconfigure(1, weight=1)
self.master.rowconfigure(2, weight=1)
self.master.rowconfigure(3, weight=1)
self.master.rowconfigure(4, weight=1)
self.master.rowconfigure(5, weight=1)
self.master.rowconfigure(6, weight=1)
self.master.rowconfigure(7, weight=1)
self.master.rowconfigure(8, weight=1)
self.master.rowconfigure(9, weight=1)
self.master.rowconfigure(10, weight=1)
self.master.columnconfigure(0, weight=1)
self.master.columnconfigure(1, weight=0)
self.master.columnconfigure(2, weight=0)
# bind our slider state change event
self.__rs.subscribe(self.slider_changeState)
self.slider_changeState(None)
# bind some convenience keys
self.master.bind("<Escape>", self.keyPress_Escape);
'''
Utility - Shutdown
Perform any last minute shutdown tasks
'''
def shutdown(self):
#perform any cleanup
log.info("Quitting...")
self.master.destroy()
'''
Event - keyPress Escape
- perform shutdown whenever escape is hit
- added for developmental reasons
'''
def keyPress_Escape(self, e):
self.shutdown()
'''
Event - slider change state
- Binded to the notify event of our slider controller
- This will be called whenever the slider changes
'''
def slider_changeState(self, e):
if (self.focus_displayof() != self.__minValueEntry):
self.__minValueEntry.delete(0, END)
self.__minValueEntry.insert(0, self.__rs.getLower())
if (self.focus_displayof() != self.__maxValueEntry):
self.__maxValueEntry.delete(0, END)
self.__maxValueEntry.insert(0, self.__rs.getUpper())
if (self.focus_displayof() != self.__lowerBoundEntry):
self.__lowerBoundEntry.delete(0, END)
self.__lowerBoundEntry.insert(0, self.__rs.getLowerBound())
if (self.focus_displayof() != self.__upperBoundEntry):
self.__upperBoundEntry.delete(0, END)
self.__upperBoundEntry.insert(0, self.__rs.getUpperBound())
if (self.focus_displayof() != self.__majorTickSpacingEntry):
self.__majorTickSpacingEntry.delete(0, END)
self.__majorTickSpacingEntry.insert(0, self.__rs.getMajorTickSpacing())
if (self.focus_displayof() != self.__minorTickSpacingEntry):
self.__minorTickSpacingEntry.delete(0, END)
self.__minorTickSpacingEntry.insert(0, self.__rs.getMinorTickSpacing())
'''
Event - Rest button on click
- Reset the slider to its starting values
'''
def __paintTicksCheck_onClick(self):
b = self.__paintTicksCheckVar.get()
self.__rs.setPaintTicks(b)
'''
Event - Snap to ticks click
- Reset the slider to its starting values
'''
def __snapToTicksCheck_onClick(self):
b = self.__snapToTicksCheckVar.get()
self.__rs.setSnapToTicks(b)
'''
Event - on entry change events
- these all basically do the exact same thing
'''
def __lowerEntry_onChange(self, e, a, mode):
try:
f = float(self.__lowerEntryString.get())
if (f != self.__rs.getLower()):
self.__rs.setLower(f)
except:
None
def __lowerBoundEntry_onChange(self, e, a, mode):
try:
f = float(self.__lowerBoundEntryString.get())
if (f != self.__rs.getLowerBound()):
self.__rs.setLowerBound(f)
except:
None
def __upperEntry_onChange(self, e, a, mode):
try:
f = float(self.__upperEntryString.get())
if (f != self.__rs.getUpper()):
self.__rs.setUpper(f)
except:
None
def __upperBoundEntry_onChange(self, e, a, mode):
try:
f = float(self.__upperBoundEntryString.get())
if (f != self.__rs.getUpperBound()):
self.__rs.setUpperBound(f)
except:
None
def __minorTickEntry_onChange(self, e, a, mode):
try:
f = float(self.__minorTickEntryString.get())
if (f != self.__rs.getMinorTickSpacing()):
self.__rs.setMinorTickSpacing(f)
except:
None
def __majorTickEntry_onChange(self, e, a, mode):
try:
f = float(self.__majorTickEntryString.get())
if (f != self.__rs.getMajorTickSpacing()):
self.__rs.setMajorTickSpacing(f)
except:
None
'''
Event - Rest button on click
- Reset the slider to its starting values
'''
def resetButton_onClick(self, e):
log.debug("Reseting Slider...")
self.__rs.setLower(25)
self.__rs.setUpper(75)
'''
Event - quit on click
- Perform shutdown on quit
'''
def quitButton_onClick(self, e):
self.shutdown()
'''
Main
'''
def main():
# configure the logger
global log
log.setLevel(LOGGING_LEVEL)
handler = logging.StreamHandler()
logformat = logging.Formatter("%(levelname)s " +
"%(asctime)s " +
"%(filename)s:%(funcName)s(line:%(lineno)d)" +
"\n\t%(message)s")
handler.setFormatter(logformat)
log.addHandler(handler)
# initialize the demo
RangeSliderDemo().mainloop()
'''
Entry Point
'''
if __name__ == "__main__":
main()
| 26.827628 | 119 | 0.704957 |
16b8aa61856cb4edc45597a47ee202f7cdb9c457 | 196 | py | Python | micmon/audio/__init__.py | jenssss/micmon | 007456d5adea2594a3d95aef378454cd5e63672e | [
"MIT"
] | 35 | 2020-10-31T12:43:09.000Z | 2022-03-24T11:15:32.000Z | micmon/audio/__init__.py | jenssss/micmon | 007456d5adea2594a3d95aef378454cd5e63672e | [
"MIT"
] | 3 | 2021-01-29T05:07:23.000Z | 2021-03-20T07:31:19.000Z | micmon/audio/__init__.py | jenssss/micmon | 007456d5adea2594a3d95aef378454cd5e63672e | [
"MIT"
] | 7 | 2020-10-31T13:19:10.000Z | 2021-05-01T07:41:28.000Z | from .directory import AudioDirectory
from .segment import AudioSegment
from .player import AudioPlayer
from .source import AudioSource
from .file import AudioFile
from .device import AudioDevice
| 28 | 37 | 0.846939 |
1de321234bfd6f307a10d603ca229641a6847b9d | 4,133 | py | Python | build/clobber.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | build/clobber.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 250 | 2018-02-02T23:16:57.000Z | 2022-03-21T06:09:53.000Z | build/clobber.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script provides methods for clobbering build directories."""
import argparse
import os
import shutil
import subprocess
import sys
def extract_gn_build_commands(build_ninja_file):
"""Extracts from a build.ninja the commands to run GN.
The commands to run GN are the gn rule and build.ninja build step at the
top of the build.ninja file. We want to keep these when deleting GN builds
since we want to preserve the command-line flags to GN.
On error, returns the empty string."""
result = ""
with open(build_ninja_file, 'r') as f:
# Read until the third blank line. The first thing GN writes to the file
# is "ninja_required_version = x.y.z", then the "rule gn" and the third
# is the section for "build build.ninja", separated by blank lines.
num_blank_lines = 0
while num_blank_lines < 3:
line = f.readline()
if len(line) == 0:
return '' # Unexpected EOF.
result += line
if line[0] == '\n':
num_blank_lines = num_blank_lines + 1
return result
def delete_dir(build_dir):
if os.path.islink(build_dir):
return
# For unknown reasons (anti-virus?) rmtree of Chromium build directories
# often fails on Windows.
if sys.platform.startswith('win'):
subprocess.check_call(['rmdir', '/s', '/q', build_dir], shell=True)
else:
shutil.rmtree(build_dir)
def delete_build_dir(build_dir):
# GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
if not os.path.exists(build_ninja_d_file):
delete_dir(build_dir)
return
# GN builds aren't automatically regenerated when you sync. To avoid
# messing with the GN workflow, erase everything but the args file, and
# write a dummy build.ninja file that will automatically rerun GN the next
# time Ninja is run.
build_ninja_file = os.path.join(build_dir, 'build.ninja')
build_commands = extract_gn_build_commands(build_ninja_file)
try:
gn_args_file = os.path.join(build_dir, 'args.gn')
with open(gn_args_file, 'r') as f:
args_contents = f.read()
except IOError:
args_contents = ''
e = None
try:
# delete_dir and os.mkdir() may fail, such as when chrome.exe is running,
# and we still want to restore args.gn/build.ninja/build.ninja.d, so catch
# the exception and rethrow it later.
delete_dir(build_dir)
os.mkdir(build_dir)
except Exception as e:
pass
# Put back the args file (if any).
if args_contents != '':
with open(gn_args_file, 'w') as f:
f.write(args_contents)
# Write the build.ninja file sufficiently to regenerate itself.
with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
if build_commands != '':
f.write(build_commands)
else:
# Couldn't parse the build.ninja file, write a default thing.
f.write('''ninja_required_version = 1.7.2
rule gn
command = gn -q gen //out/%s/
description = Regenerating ninja files
build build.ninja: gn
generator = 1
depfile = build.ninja.d
''' % (os.path.split(build_dir)[1]))
# Write a .d file for the build which references a nonexistant file. This
# will make Ninja always mark the build as dirty.
with open(build_ninja_d_file, 'w') as f:
f.write('build.ninja: nonexistant_file.gn\n')
if e:
# Rethrow the exception we caught earlier.
raise e
def clobber(out_dir):
"""Clobber contents of build directory.
Don't delete the directory itself: some checkouts have the build directory
mounted."""
for f in os.listdir(out_dir):
path = os.path.join(out_dir, f)
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
delete_build_dir(path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('out_dir', help='The output directory to clobber')
args = parser.parse_args()
clobber(args.out_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| 30.614815 | 78 | 0.69804 |
56b3ae08ec9d01b67e4940148440d7cbdfc1cd0e | 10,183 | py | Python | silentsploit/utils.py | TheDeathWing/SilentSploit | 531658f091309576fbaf22a7b83c9bb4e58a5179 | [
"BSD-2-Clause"
] | null | null | null | silentsploit/utils.py | TheDeathWing/SilentSploit | 531658f091309576fbaf22a7b83c9bb4e58a5179 | [
"BSD-2-Clause"
] | null | null | null | silentsploit/utils.py | TheDeathWing/SilentSploit | 531658f091309576fbaf22a7b83c9bb4e58a5179 | [
"BSD-2-Clause"
] | null | null | null | import os
import re
import glob
import importlib
import random, string
import app as app_path
import silentsploit as silentsploit_path
import silentsploit.modules as silentsploit_src
import silentsploit.modules as silentsploit_config
from functools import wraps
from silentsploit.core.exceptions import *
from silentsploit.core.colors import *
MODULES_DIR = silentsploit_src.__path__[0]
"""
Warning: This code snippet is from Threat9
Credits: Routersploit
"""
def random_text(length: int, alph: str = string.ascii_letters + string.digits) -> str:
""" Generates random string text
:param int length: length of text to generate
:param str alph: string of all possible characters to choose from
:return str: generated random string of specified size
"""
return "".join(random.choice(alph) for _ in range(length))
def is_ipv4(address):
""" Checks if given address is valid IPv4 address
:param str address: IP address to check
:return bool: True if address is valid IPv4 address, False otherwise
"""
regexp = "^(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
if re.match(regexp, address):
return True
return False
def is_ipv6(address):
""" Checks if given address is valid IPv6 address
:param str address: IP address to check
:return bool: True if address is valid IPv6 address, False otherwise
"""
regexp = "^(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::(?:[0-9A-Fa-f]{1,4}:){5}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){3}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,2}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}:(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,4}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}|(?:(?:[0-9A-Fa-f]{1,4}:){,6}[0-9A-Fa-f]{1,4})?::)%.*$"
if re.match(regexp, address):
return True
return False
def convert_ip(address) :
""" Converts IP to bytes
:param str address: IP address that should be converted to bytes
:return bytes: IP converted to bytes format
"""
res = b
for i in address.split("."):
res += bytes([int(i)])
return res
def convert_port(port) :
""" Converts Port to bytes
:param int port: port that should be conveted to bytes
:return bytes: port converted to bytes format
"""
res = "%.4x" % int(port)
return bytes.fromhex(res)
def index_modules(modules_directory: str = MODULES_DIR):
""" Returns list of all exploits modules
:param str modules_directory: path to modules directory
:return list: list of found modules
"""
modules = []
for root, dirs, files in os.walk(modules_directory):
_, package, root = root.rpartition("silentsploit/modules/".replace("/", os.sep))
root = root.replace(os.sep, ".")
files = filter(lambda x: not x.startswith("__") and x.endswith(".py"), files)
modules.extend(map(lambda x: ".".join((root, os.path.splitext(x)[0])), files))
return modules
def restart_CLI():
python = sys.executable
os.execl(sys.executable, sys.executable, *sys.argv)
curdir = os.getcwd()
def pythonize_path(path):
""" Replaces argument to valid python dotted notation.
ex. foo/bar/baz -> foo.bar.baz
:param str path: path to pythonize
:return str: pythonized path
"""
return path.replace("/", ".")
def humanize_path(path):
""" Replace python dotted path to directory-like one.
ex. foo.bar.baz -> foo/bar/baz
:param str path: path to humanize
:return str: humanized path
"""
return path.replace(".", "/")
def config_required(fn):
""" Checks if module is loaded.
Decorator that checks if any module is activated
before executing command specific to modules (ex. 'run').
"""
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.current_config:
print_error("You must activate a module with the 'use' command.")
return
return fn(self, *args, **kwargs)
try:
name = "config_required"
wrapper.__decorators__.append(name)
except AttributeError:
wrapper.__decorators__ = [name]
return wrapper
def import_config(path):
""" Imports exploit module
:param str path: absolute path to exploit e.g. routersploit.modules.exploits.asus_auth_bypass
:return: exploit module or error
"""
try:
module = importlib.import_module(path)
if hasattr(module, "Payload"):
return getattr(module, "Payload")
elif hasattr(module, "Handler"):
return getattr(module, "Handler")
elif hasattr(module, "Encoder"):
return getattr(module, "Encoder")
elif hasattr(module, "Exploit"):
return getattr(module, "Exploit")
else:
raise ImportError("No module named '{}'".format(path))
except (ImportError, AttributeError, KeyError) as err:
raise SilentSploitException(
"Error during loading '{}'\n\n"
"Error: {}\n\n"
"It should be valid path to the module. "
"Use <tab> key multiple times for completion.".format(humanize_path(path), err)
)
def print_error(msg="", *args, **kwargs):
"""
Print error message [-]
"""
print("[{RED}-{END}] %s".format(**colors)%(msg))
def print_status(msg="", *args, **kwargs):
"""
Print status message [*]
"""
print("[{BLUE}*{END}] %s".format(**colors)%(msg))
def print_info(msg="", *args, **kwargs):
"""
Print info message
"""
print(msg)
def print_success(msg="", *args, **kwargs):
"""
Print success mesage [+]
"""
print("[{GREEN}+{END}] %s".format(**colors)%(msg))
def print_warning(msg="", *args, **kwargs):
"""
Print warning message [~]
"""
print("[{YELLOW}~{END}] %s".format(**colors)%(msg))
def print_table(headers, *args, **kwargs):
""" Print table.
example:
Name Current setting Description
---- --------------- -----------
option_name value description
foo bar baz
foo bar baz
:param headers: Headers names ex.('Name, 'Current setting', 'Description')
:param args: table values, each element representing one line ex. ('option_name', 'value', 'description), ...
:param kwargs: 'extra_fill' space between columns, 'header_separator' character to separate headers from content
:return:
"""
extra_fill = kwargs.get("extra_fill", 5)
header_separator = kwargs.get("header_separator", "-")
if not all(map(lambda x: len(x) == len(headers), args)):
print_error("Headers and table rows tuples should be the same length.")
return
def custom_len(x):
try:
return len(x)
except TypeError:
return 0
fill = []
headers_line = ' '
headers_separator_line = ' '
for idx, header in enumerate(headers):
column = [custom_len(arg[idx]) for arg in args]
column.append(len(header))
current_line_fill = max(column) + extra_fill
fill.append(current_line_fill)
headers_line = "".join((headers_line, "{header:<{fill}}".format(header=header, fill=current_line_fill)))
headers_separator_line = "".join((
headers_separator_line,
"{:<{}}".format(header_separator * len(header), current_line_fill)
))
print_info()
print_info(headers_line)
print_info(headers_separator_line)
for arg in args:
content_line = " "
for idx, element in enumerate(arg):
content_line = "".join((
content_line,
"{:<{}}".format(element, fill[idx])
))
print_info(content_line)
print_info()
def pprint_dict_in_order(dictionary, order=None, Space=True):
""" Pretty dict print.
Pretty printing dictionary in specific order. (as in 'show info' command)
Keys not mentioned in *order* parameter will be printed in random order.
ex. pprint_dict_in_order({'name': John, 'sex': 'male', "hobby": ["rugby", "golf"]}, ('sex', 'name'))
Sex:
male
Name:
John
Hobby:
- rugby
- golf
"""
order = order or ()
def prettyprint(title, body):
if Space == True:
print_info("\n{}:".format(title.capitalize()))
else:
print_info("{}:".format(title.capitalize()))
if not isinstance(body, str):
for value_element in body:
print_info("- ", value_element)
else:
print_info(body)
keys = list(dictionary.keys())
for element in order:
try:
key = keys.pop(keys.index(element))
value = dictionary[key]
except (KeyError, ValueError):
pass
else:
prettyprint(element, value)
for rest_keys in keys:
prettyprint(rest_keys, dictionary[rest_keys]) | 33.607261 | 1,526 | 0.573898 |
6e1fbd6bcdd441c689e34161840d8a9b8c42db7b | 4,935 | py | Python | examples/mnist_vae.py | danielsuo/jax | a473e5b6bb3be5aa032d30ed023abb6b121af0e0 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-04-03T03:11:55.000Z | 2020-05-21T18:01:51.000Z | examples/mnist_vae.py | danielsuo/jax | a473e5b6bb3be5aa032d30ed023abb6b121af0e0 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-08-15T18:22:52.000Z | 2019-08-20T18:19:42.000Z | examples/mnist_vae.py | danielsuo/jax | a473e5b6bb3be5aa032d30ed023abb6b121af0e0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-11-22T17:50:23.000Z | 2020-11-22T17:50:23.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic variational autoencoder (VAE) on binarized MNIST using Numpy and JAX.
This file uses the stax network definition library and the optimizers
optimization library.
"""
import os
import time
import matplotlib.pyplot as plt
import jax.numpy as np
from jax.config import config
from jax import jit, grad, lax, random
from jax.experimental import optimizers
from jax.experimental import stax
from jax.experimental.stax import Dense, FanOut, Relu, Softplus
from examples import datasets
def gaussian_kl(mu, sigmasq):
"""KL divergence from a diagonal Gaussian to the standard Gaussian."""
return -0.5 * np.sum(1. + np.log(sigmasq) - mu**2. - sigmasq)
def gaussian_sample(rng, mu, sigmasq):
"""Sample a diagonal Gaussian."""
return mu + np.sqrt(sigmasq) * random.normal(rng, mu.shape)
def bernoulli_logpdf(logits, x):
"""Bernoulli log pdf of data x given logits."""
return -np.sum(np.logaddexp(0., np.where(x, -1., 1.) * logits))
def elbo(rng, params, images):
"""Monte Carlo estimate of the negative evidence lower bound."""
enc_params, dec_params = params
mu_z, sigmasq_z = encode(enc_params, images)
logits_x = decode(dec_params, gaussian_sample(rng, mu_z, sigmasq_z))
return bernoulli_logpdf(logits_x, images) - gaussian_kl(mu_z, sigmasq_z)
def image_sample(rng, params, nrow, ncol):
"""Sample images from the generative model."""
_, dec_params = params
code_rng, img_rng = random.split(rng)
logits = decode(dec_params, random.normal(code_rng, (nrow * ncol, 10)))
sampled_images = random.bernoulli(img_rng, np.logaddexp(0., logits))
return image_grid(nrow, ncol, sampled_images, (28, 28))
def image_grid(nrow, ncol, imagevecs, imshape):
"""Reshape a stack of image vectors into an image grid for plotting."""
images = iter(imagevecs.reshape((-1,) + imshape))
return np.vstack([np.hstack([next(images).T for _ in range(ncol)][::-1])
for _ in range(nrow)]).T
encoder_init, encode = stax.serial(
Dense(512), Relu,
Dense(512), Relu,
FanOut(2),
stax.parallel(Dense(10), stax.serial(Dense(10), Softplus)),
)
decoder_init, decode = stax.serial(
Dense(512), Relu,
Dense(512), Relu,
Dense(28 * 28),
)
if __name__ == "__main__":
step_size = 0.001
num_epochs = 100
batch_size = 32
nrow, ncol = 10, 10 # sampled image grid size
test_rng = random.PRNGKey(1) # fixed prng key for evaluation
imfile = os.path.join(os.getenv("TMPDIR", "/tmp/"), "mnist_vae_{:03d}.png")
train_images, _, test_images, _ = datasets.mnist(permute_train=True)
num_complete_batches, leftover = divmod(train_images.shape[0], batch_size)
num_batches = num_complete_batches + bool(leftover)
enc_init_rng, dec_init_rng = random.split(random.PRNGKey(2))
_, init_encoder_params = encoder_init(enc_init_rng, (batch_size, 28 * 28))
_, init_decoder_params = decoder_init(dec_init_rng, (batch_size, 10))
init_params = init_encoder_params, init_decoder_params
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9)
def binarize_batch(rng, i, images):
i = i % num_batches
batch = lax.dynamic_slice_in_dim(images, i * batch_size, batch_size)
return random.bernoulli(rng, batch)
@jit
def run_epoch(rng, opt_state):
def body_fun(i, opt_state):
elbo_rng, data_rng = random.split(random.fold_in(rng, i))
batch = binarize_batch(data_rng, i, train_images)
loss = lambda params: -elbo(elbo_rng, params, batch) / batch_size
g = grad(loss)(get_params(opt_state))
return opt_update(i, g, opt_state)
return lax.fori_loop(0, num_batches, body_fun, opt_state)
@jit
def evaluate(opt_state, images):
params = get_params(opt_state)
elbo_rng, data_rng, image_rng = random.split(test_rng, 3)
binarized_test = random.bernoulli(data_rng, images)
test_elbo = elbo(elbo_rng, params, binarized_test) / images.shape[0]
sampled_images = image_sample(image_rng, params, nrow, ncol)
return test_elbo, sampled_images
opt_state = opt_init(init_params)
for epoch in range(num_epochs):
tic = time.time()
opt_state = run_epoch(random.PRNGKey(epoch), opt_state)
test_elbo, sampled_images = evaluate(opt_state, test_images)
print("{: 3d} {} ({:.3f} sec)".format(epoch, test_elbo, time.time() - tic))
plt.imsave(imfile.format(epoch), sampled_images, cmap=plt.cm.gray)
| 36.555556 | 80 | 0.721175 |
fdd9908d660903615a35fd4fc08519948f21574e | 250 | py | Python | serif/model/java_base_model.py | BBN-E/ZS4IE | 357965f3068cfe5098422d8cb0ca4b0f99c99fd4 | [
"Apache-2.0"
] | 7 | 2022-03-24T11:04:08.000Z | 2022-03-31T17:12:46.000Z | serif/model/java_base_model.py | BBN-E/ZS4IE | 357965f3068cfe5098422d8cb0ca4b0f99c99fd4 | [
"Apache-2.0"
] | null | null | null | serif/model/java_base_model.py | BBN-E/ZS4IE | 357965f3068cfe5098422d8cb0ca4b0f99c99fd4 | [
"Apache-2.0"
] | null | null | null | import io, typing
from serif.model.document_model import DocumentModel
from serif.theory.document import Document
class JavaDocumentModel(DocumentModel):
def __init__(self, **kwargs):
super(JavaDocumentModel, self).__init__(**kwargs)
| 22.727273 | 57 | 0.776 |
2458b2792ef1f3034f856e70bba12e0ced4d3cca | 2,276 | py | Python | PyOpenGL-3.1.0/tests/test_glx_pygame.py | proTest0116/Bodies | 5bebf6c1c2738b747548393b7f7dae3e26188af5 | [
"Unlicense"
] | null | null | null | PyOpenGL-3.1.0/tests/test_glx_pygame.py | proTest0116/Bodies | 5bebf6c1c2738b747548393b7f7dae3e26188af5 | [
"Unlicense"
] | null | null | null | PyOpenGL-3.1.0/tests/test_glx_pygame.py | proTest0116/Bodies | 5bebf6c1c2738b747548393b7f7dae3e26188af5 | [
"Unlicense"
] | null | null | null | import OpenGL
#OpenGL.USE_ACCELERATE=False
from OpenGL.GL import *
from OpenGL.GLX import *
from OpenGL.GLX.EXT.texture_from_pixmap import *
from pygamegltest import pygametest
import os
attributes = [
# GLX_BIND_TO_TEXTURE_RGBA_EXT, 1,
# GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT,
# GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT,
GLX_DOUBLEBUFFER, 1,
# GLX_Y_INVERTED_EXT, GLX_DONT_CARE,
GL_NONE
]
from OpenGL import platform
import ctypes
from OpenGL.platform import ctypesloader
X11 = ctypesloader.loadLibrary( ctypes.cdll, 'X11' )
XDefaultScreen = X11.XDefaultScreen
XDefaultScreen.argtypes = [ctypes.POINTER(Display)]
XOpenDisplay = X11.XOpenDisplay
XOpenDisplay.restype = ctypes.POINTER(Display)
@pygametest()
def main():
dsp = XOpenDisplay( os.environ.get( 'DISPLAY' ))
screen = XDefaultScreen( dsp )
print('X Display %s Screen %s'%( dsp, screen ))
major,minor = GLint(),GLint()
glXQueryVersion(dsp, major, minor)
version = (major.value,minor.value)
print('glX Version: %s.%s'%version)
if version >= (1,1):
print(glXQueryExtensionsString(dsp,screen))
if version >= (1,2):
d = glXGetCurrentDisplay()[0]
print('Current display', d)
else:
d = dsp
if version >= (1,3):
elements = GLint(0)
configs = glXChooseFBConfig(
dsp,
screen,
(GLint * len(attributes))( * attributes ),
elements
)
print('%s configs found'%( elements.value ))
for config in range( elements.value ):
print('Config: %s %s'%(config,configs[config][0]))
samples = ctypes.c_int()
for attribute in (
'GLX_FBCONFIG_ID','GLX_BUFFER_SIZE',
'GLX_LEVEL','GLX_DOUBLEBUFFER',
'GLX_STEREO',
'GLX_SAMPLES','GLX_SAMPLE_BUFFERS',
'GLX_DRAWABLE_TYPE',
):
glXGetFBConfigAttrib( dsp, configs[config], globals()[attribute], samples )
print('%s -> %s'%( attribute, samples.value ))
print()
from OpenGL.raw.GLX import _types
print('Extension List', _types.GLXQuerier.getExtensions())
if __name__ == "__main__":
main()
| 32.056338 | 91 | 0.621705 |
4ea206282feddba3829a58bae8e98bff7c57bfbe | 8,793 | py | Python | source2/blocks/mrph_block.py | tltneon/SourceIO | 418224918c2b062a4c78a41d4d65329ba2decb22 | [
"MIT"
] | null | null | null | source2/blocks/mrph_block.py | tltneon/SourceIO | 418224918c2b062a4c78a41d4d65329ba2decb22 | [
"MIT"
] | null | null | null | source2/blocks/mrph_block.py | tltneon/SourceIO | 418224918c2b062a4c78a41d4d65329ba2decb22 | [
"MIT"
] | null | null | null | import struct
from math import floor
from .data_block import DATA
import numpy as np
from ...source1.mdl.v49.flex_expressions import *
from ...source_shared.content_manager import ContentManager
class MRPH(DATA):
def __init__(self, valve_file, info_block):
super().__init__(valve_file, info_block)
self.flex_data = {}
def read_morphs(self):
from ..resouce_types.valve_texture import ValveCompiledTexture
if self.data['m_pTextureAtlas'] not in self._valve_file.available_resources:
return False
vmorf_actual_path = self._valve_file.available_resources.get(self.data['m_pTextureAtlas'], None)
if not vmorf_actual_path:
return False
vmorf_path = ContentManager().find_file(vmorf_actual_path)
if not vmorf_path:
return False
morph_atlas = ValveCompiledTexture(vmorf_path)
morph_atlas.read_block_info()
morph_atlas_data = morph_atlas.get_data_block(block_name="DATA")[0]
morph_atlas_data.read_image(False)
raw_flex_data = np.frombuffer(morph_atlas_data.image_data, dtype=np.uint8)
width = self.data['m_nWidth']
height = self.data['m_nHeight']
encoding_type = self.data['m_nEncodingType']
lookup_type = self.data['m_nLookupType']
if isinstance(encoding_type, tuple):
encoding_type = encoding_type[0].split('::')[-1]
lookup_type = lookup_type[0].split('::')[-1]
assert lookup_type == 'LOOKUP_TYPE_VERTEX_ID', "Unknown lookup type"
assert encoding_type == 'ENCODING_TYPE_OBJECT_SPACE', "Unknown encoding type"
bundle_types = self.data['m_bundleTypes']
raw_flex_data = raw_flex_data.reshape((morph_atlas_data.width, morph_atlas_data.height, 4))
for morph_datas in self.data['m_morphDatas']:
self.flex_data[morph_datas['m_name']] = np.zeros((len(bundle_types),
height, width,
4),
dtype=np.float32)
for n, rect in enumerate(morph_datas['m_morphRectDatas']):
rect_width = floor(rect['m_flUWidthSrc'] * morph_atlas_data.width)
rect_height = floor(rect['m_flVHeightSrc'] * morph_atlas_data.height)
dst_x = rect['m_nXLeftDst']
dst_y = rect['m_nYTopDst']
for c, bundle in enumerate(rect['m_bundleDatas']):
rect_u = floor(bundle['m_flULeftSrc'] * morph_atlas_data.width)
rect_v = floor(bundle['m_flVTopSrc'] * morph_atlas_data.height)
morph_data_rect = raw_flex_data[rect_v:rect_v + rect_height, rect_u:rect_u + rect_width, :]
vec_offset = bundle['m_offsets']
vec_range = bundle['m_ranges']
transformed_data = np.divide(morph_data_rect, 255)
transformed_data = np.multiply(transformed_data, vec_range)
transformed_data = np.add(transformed_data, vec_offset)
transformed_data = transformed_data
self.flex_data[morph_datas['m_name']][c, dst_y: dst_y + rect_height, dst_x: dst_x + rect_width,
:] = transformed_data
for k, v in self.flex_data.items():
self.flex_data[k] = v.reshape((len(bundle_types), width * height, 4))
return True
def rebuild_flex_expressions(self):
flex_rules = {}
def get_flex_desc(index):
return self.data['m_FlexDesc'][index]['m_szFacs']
def get_flex_cnt(index):
return self.data['m_FlexControllers'][index]['m_szName']
for rule in self.data['m_FlexRules']:
stack = []
# try:
for op in rule['m_FlexOps']:
flex_op = op['m_OpCode']
index = op['m_Data']
value = struct.unpack('f', struct.pack('i', index))[0]
if flex_op == "FLEX_OP_ADD":
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Add(left, right))
elif flex_op == "FLEX_OP_COMBO":
count = index
values = [stack.pop(-1) for _ in range(count)]
combo = Combo(*values)
stack.append(combo)
elif flex_op == "FLEX_OP_CONST":
stack.append(Value(value))
elif flex_op == "FLEX_OP_DIV":
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Div(left, right))
elif flex_op in [
"FLEX_OP_DME_UPPER_EYELID",
"FLEX_OP_DME_LOWER_EYELID",
]:
stack.pop(-1)
stack.pop(-1)
stack.pop(-1)
stack.append(Value(1.0))
elif flex_op == "FLEX_OP_DOMINATE":
count = index + 1
values = [stack.pop(-1) for _ in range(count)]
dom = Dominator(*values)
stack.append(dom)
elif flex_op == "FLEX_OP_FETCH1":
stack.append(FetchController(get_flex_cnt(index)))
elif flex_op == "FLEX_OP_FETCH2":
stack.append(FetchFlex(get_flex_desc(index)))
elif flex_op == "FLEX_OP_MAX":
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Max(left, right))
elif flex_op == "FLEX_OP_MIN":
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Min(left, right))
elif flex_op == "FLEX_OP_MUL":
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Mul(left, right))
elif flex_op == "FLEX_OP_NEG":
stack.append(Neg(stack.pop(-1)))
elif flex_op == "FLEX_OP_NWAY":
flex_cnt_value = int(stack.pop(-1).value)
flex_cnt = FetchController(get_flex_cnt(flex_cnt_value))
f_w = stack.pop(-1)
f_z = stack.pop(-1)
f_y = stack.pop(-1)
f_x = stack.pop(-1)
gtx = Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_x, flex_cnt))))
lty = Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_y))))
remap_x = Min(Max(Div(Sub(flex_cnt, f_x), (Sub(f_y, f_x))), Value(0.0)), Value(1.0))
gtey = Neg(Sub(Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_y)))), Value(1.0)))
ltez = Neg(Sub(Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_z, flex_cnt)))), Value(1.0)))
gtz = Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_z, flex_cnt))))
ltw = Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_w))))
remap_z = Sub(Value(1.0),
Min(Max(Div(Sub(flex_cnt, f_z), (Sub(f_w, f_z))), Value(0.0)), Value(1.0)))
final_expr = Add(Add(Mul(Mul(gtx, lty), remap_x), Mul(gtey, ltez)), Mul(Mul(gtz, ltw), remap_z))
final_expr = Mul(final_expr, FetchController(get_flex_cnt(index)))
stack.append(final_expr)
elif flex_op == "FLEX_OP_SUB":
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Sub(left, right))
elif flex_op == "FLEX_OP_TWO_WAY_0":
mx = Max(Add(FetchController(get_flex_cnt(index)), Value(1.0)), Value(0.0))
mn = Min(mx, Value(1.0))
res = Sub(1, mn)
stack.append(res)
elif flex_op == "FLEX_OP_TWO_WAY_1":
mx = Max(FetchController(get_flex_cnt(index)), Value(0.0))
mn = Min(mx, Value(1.0))
stack.append(mn)
else:
print("Unknown OP", op)
if len(stack) > 1 or not stack:
print(f"failed to parse ({get_flex_desc(rule['m_nFlex'])}) flex rule")
print(stack)
continue
final_expr = stack.pop(-1)
# name = self.get_value('stereo_flexes').get(rule.flex_index, self.flex_names[rule.flex_index])
name = get_flex_desc(rule['m_nFlex'])
flex_rules[name] = final_expr
# except:
# pass
return flex_rules
| 48.85 | 116 | 0.519732 |
f9f65a7a06acc23b7af5863a95f11ebdd56e8a60 | 675 | py | Python | lab/refactoring/decompose_conditional.py | diyarkudrat/SPD-2.3-Testing-And-Architecture | dde26e3a6aa02deeb7c0941971841b4f9e16f3fa | [
"MIT"
] | null | null | null | lab/refactoring/decompose_conditional.py | diyarkudrat/SPD-2.3-Testing-And-Architecture | dde26e3a6aa02deeb7c0941971841b4f9e16f3fa | [
"MIT"
] | null | null | null | lab/refactoring/decompose_conditional.py | diyarkudrat/SPD-2.3-Testing-And-Architecture | dde26e3a6aa02deeb7c0941971841b4f9e16f3fa | [
"MIT"
] | null | null | null | # By Kami Bigdely
# Decompose conditional: You have a complicated conditional(if-then-else) statement. Extract
# methods from the condition, then part, and else part(s).
def make_alert_sound():
print('made alert sound.')
def make_accept_sound():
print('made acceptance sound')
toxins = set('sodium nitrate', 'sodium benzoate', 'sodium oxide')
ingredients = ['sodium benzoate']
for ingredient in ingredients:
if ingredient in toxins:
print('!!!')
print('there is a toxin in the food!')
print('!!!')
make_alert_sound()
else:
print('***')
print('Toxin Free')
print('***')
make_accept_sound()
| 25 | 92 | 0.637037 |
34f9249087d91558a478c498a4def7e0aaea72c5 | 39,997 | py | Python | venv/lib/python3.8/site-packages/ansible/modules/file.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible/modules/file.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible/modules/file.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: file
version_added: historical
short_description: Manage files and file properties
extends_documentation_fragment: [files, action_common_attributes]
description:
- Set attributes of files, symlinks or directories.
- Alternatively, remove files, symlinks or directories.
- Many other modules support the same options as the C(file) module - including M(ansible.builtin.copy),
M(ansible.builtin.template), and M(ansible.builtin.assemble).
- For Windows targets, use the M(ansible.windows.win_file) module instead.
options:
path:
description:
- Path to the file being managed.
type: path
required: yes
aliases: [ dest, name ]
state:
description:
- If C(absent), directories will be recursively deleted, and files or symlinks will
be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
not exist as the state did not change.
- If C(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- If C(file), with no other options, returns the current state of C(path).
- If C(file), even with other options (such as C(mode)), the file will be modified if it exists but will NOT be created if it does not exist.
Set to C(touch) or use the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want to create the file if it does not exist.
- If C(hard), the hard link will be created or changed.
- If C(link), the symbolic link will be created or changed.
- If C(touch) (new in 1.4), an empty file will be created if the file does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
default: file
choices: [ absent, directory, file, hard, link, touch ]
src:
description:
- Path of the file to link to.
- This applies only to C(state=link) and C(state=hard).
- For C(state=link), this will also accept a non-existing path.
- Relative paths are relative to the file being created (C(path)) which is how
the Unix command C(ln -s SRC DEST) treats relative paths.
type: path
recurse:
description:
- Recursively set the specified file attributes on directory contents.
- This applies only when C(state) is set to C(directory).
type: bool
default: no
version_added: '1.1'
force:
description:
- >
Force the creation of the symlinks in two cases: the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
C(path) file and create symlink to the C(src) file in place of it).
type: bool
default: no
follow:
description:
- This flag indicates that filesystem links, if they exist, should be followed.
- Previous to Ansible 2.5, this was C(no) by default.
type: bool
default: yes
version_added: '1.8'
modification_time:
description:
- This parameter indicates the time the file's modification time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: "2.7"
modification_time_format:
description:
- When used with C(modification_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
access_time:
description:
- This parameter indicates the time the file's access time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: '2.7'
access_time_format:
description:
- When used with C(access_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
seealso:
- module: ansible.builtin.assemble
- module: ansible.builtin.copy
- module: ansible.builtin.stat
- module: ansible.builtin.template
- module: ansible.windows.win_file
attributes:
check_mode:
support: full
diff_mode:
details: permissions and ownership will be shown but file contents on absent/touch will not.
support: partial
platform:
platforms: posix
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Change file ownership, group and permissions
ansible.builtin.file:
path: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Give insecure permissions to an existing file
ansible.builtin.file:
path: /work
owner: root
group: root
mode: '1777'
- name: Create a symbolic link
ansible.builtin.file:
src: /file/to/link/to
dest: /path/to/symlink
owner: foo
group: foo
state: link
- name: Create two hard links
ansible.builtin.file:
src: '/tmp/{{ item.src }}'
dest: '{{ item.dest }}'
state: hard
loop:
- { src: x, dest: y }
- { src: z, dest: k }
- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u=rw,g=r,o=r
- name: Touch the same file, but add/remove some permissions
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
- name: Touch again the same file, but do not change times this makes the task idempotent
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
modification_time: preserve
access_time: preserve
- name: Create a directory if it does not exist
ansible.builtin.file:
path: /etc/some_directory
state: directory
mode: '0755'
- name: Update modification and access time of given file
ansible.builtin.file:
path: /etc/some_file
state: file
modification_time: now
access_time: now
- name: Set access time based on seconds from epoch value
ansible.builtin.file:
path: /etc/another_file
state: file
access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
- name: Recursively change ownership of a directory
ansible.builtin.file:
path: /etc/foo
state: directory
recurse: yes
owner: foo
group: foo
- name: Remove file (delete file)
ansible.builtin.file:
path: /etc/foo.txt
state: absent
- name: Recursively remove directory
ansible.builtin.file:
path: /etc/foo
state: absent
'''
RETURN = r'''
dest:
description: Destination file/path, equal to the value passed to I(path).
returned: state=touch, state=hard, state=link
type: str
sample: /path/to/file.txt
path:
description: Destination file/path, equal to the value passed to I(path).
returned: state=absent, state=directory, state=file
type: str
sample: /path/to/file.txt
'''
import errno
import os
import shutil
import sys
import time
from pwd import getpwnam, getpwuid
from grp import getgrnam, getgrgid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
# There will only be a single AnsibleModule object per module
module = None
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def __repr__(self):
return 'AnsibleModuleError(results={0})'.format(self.results)
class ParameterError(AnsibleModuleError):
pass
class Sentinel(object):
def __new__(cls, *args, **kwargs):
return cls
def _ansible_excepthook(exc_type, exc_value, tb):
# Using an exception allows us to catch it if the calling code knows it can recover
if issubclass(exc_type, AnsibleModuleError):
module.fail_json(**exc_value.results)
else:
sys.__excepthook__(exc_type, exc_value, tb)
def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the future:
# when isdir(path):
# if state == absent: Remove the directory
# if state == touch: Touch the directory
# if state == directory: Assert the directory is the same as the one specified
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
if params['_original_basename']:
basename = params['_original_basename']
elif params['src']:
basename = os.path.basename(params['src'])
if basename:
params['path'] = os.path.join(params['path'], basename)
# state should default to file, but since that creates many conflicts,
# default state to 'current' when it exists.
prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
if params['state'] is None:
if prev_state != 'absent':
params['state'] = prev_state
elif params['recurse']:
params['state'] = 'directory'
else:
params['state'] = 'file'
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
"path": params["path"]})
# Fail if 'src' but no 'state' is specified
if params['src'] and params['state'] not in ('link', 'hard'):
raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
'path': params['path']})
def get_state(path):
''' Find out current state '''
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
if os.path.lexists(b_path):
if os.path.islink(b_path):
return 'link'
elif os.path.isdir(b_path):
return 'directory'
elif os.stat(b_path).st_nlink > 1:
return 'hard'
# could be many other things, but defaulting to file
return 'file'
return 'absent'
except OSError as e:
if e.errno == errno.ENOENT: # It may already have been removed
return 'absent'
else:
raise
# This should be moved into the common file utilities
def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
changed = False
try:
for b_root, b_dirs, b_files in os.walk(b_path):
for b_fsobj in b_dirs + b_files:
b_fsname = os.path.join(b_root, b_fsobj)
if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
else:
# Change perms on the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
if follow:
b_fsname = os.path.join(b_root, os.readlink(b_fsname))
# The link target could be nonexistent
if os.path.exists(b_fsname):
if os.path.isdir(b_fsname):
# Link is a directory so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
except RuntimeError as e:
# on Python3 "RecursionError" is raised which is derived from "RuntimeError"
# TODO once this function is moved into the common file utilities, this should probably raise more general exception
raise AnsibleModuleError(
results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
if state == 'absent' and prev_state == 'directory':
walklist = {
'directories': [],
'files': [],
}
b_path = to_bytes(path, errors='surrogate_or_strict')
for base_path, sub_folders, files in os.walk(b_path):
for folder in sub_folders:
folderpath = os.path.join(base_path, folder)
walklist['directories'].append(folderpath)
for filename in files:
filepath = os.path.join(base_path, filename)
walklist['files'].append(filepath)
diff['before']['path_content'] = walklist
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
elif formatted_time == 'now':
return Sentinel
else:
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
% (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
os.utime(b_path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
% to_native(e, nonstring='simplerepr'), 'path': path})
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
elif state == 'touch' and parameter is None:
return 'now'
else:
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
diff = initial_diff(path, 'absent', prev_state)
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
'path': path})
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
result.update({'path': path, 'changed': False, 'state': 'absent'})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if not module.check_mode:
if prev_state == 'absent':
# Create an empty file if the filename did not already exist
try:
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
'path': path, 'state': prev_state})
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'path': path, 'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
' %s' % (curpath, to_native(e)),
'path': path})
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
'path': path})
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow:
# use the current target of the link as the source
src = to_native(os.readlink(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and not os.path.exists(b_absrc):
raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
' really want to create the link: %s' % absrc,
'path': path, 'src': src})
if prev_state == 'directory':
if not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
' convert it' % path,
'path': path})
elif prev_state in ('file', 'hard') and not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state in ('hard', 'file', 'directory', 'absent'):
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
os.rmdir(b_path)
os.symlink(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
# Now that we might have created the symlink, get the arguments.
# We need to do it now so we can properly follow the symlink if needed
# because load_file_common_arguments sets 'path' according
# the value of follow and the symlink existence.
file_args = module.load_file_common_arguments(module.params)
# Whenever we create a link to a nonexistent target we know that the nonexistent target
# cannot have any permissions set on it. Skip setting those and emit a warning (the user
# can set follow=False to remove the warning)
if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
' set to False to avoid this.')
else:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def ensure_hardlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# src is the source of a hardlink. We require it if we are creating a new hardlink.
# We require path in the argument_spec so we know it is present at this point.
if src is None:
raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
if not os.path.exists(b_src):
raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
diff = initial_diff(path, 'hard', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
if not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
'dest': path, 'src': src})
elif prev_state == 'file':
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
'dest': path, 'src': src})
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
'dest': path, 'src': src})
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
if os.path.exists(b_path):
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.link(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def check_owner_exists(module, owner):
try:
uid = int(owner)
try:
getpwuid(uid).pw_name
except KeyError:
module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
except ValueError:
try:
getpwnam(owner).pw_uid
except KeyError:
module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
def check_group_exists(module, group):
try:
gid = int(group)
try:
getgrgid(gid).gr_name
except KeyError:
module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
except ValueError:
try:
getgrnam(group).gr_gid
except KeyError:
module.warn('failed to look up group %s. Create group up to this point in real play' % group)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
path=dict(type='path', required=True, aliases=['dest', 'name']),
_original_basename=dict(type='str'), # Internal use only, for recursive ops
recurse=dict(type='bool', default=False),
force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
follow=dict(type='bool', default=True), # Note: Different default than file_common_args
_diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
src=dict(type='path'), # Note: Should not be in file_common_args in future
modification_time=dict(type='str'),
modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
access_time=dict(type='str'),
access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
),
add_file_common_args=True,
supports_check_mode=True,
)
# When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
sys.excepthook = _ansible_excepthook
additional_parameter_handling(module.params)
params = module.params
state = params['state']
recurse = params['recurse']
force = params['force']
follow = params['follow']
path = params['path']
src = params['src']
if module.check_mode and state != 'absent':
file_args = module.load_file_common_arguments(module.params)
if file_args['owner']:
check_owner_exists(module, file_args['owner'])
if file_args['group']:
check_group_exists(module, file_args['group'])
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
timestamps['access_time_format'] = params['access_time_format']
# short-circuit for diff_peek
if params['_diff_peek'] is not None:
appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
if state == 'file':
result = ensure_file_attributes(path, follow, timestamps)
elif state == 'directory':
result = ensure_directory(path, follow, recurse, timestamps)
elif state == 'link':
result = ensure_symlink(path, src, follow, force, timestamps)
elif state == 'hard':
result = ensure_hardlink(path, src, follow, force, timestamps)
elif state == 'touch':
result = execute_touch(path, follow, timestamps)
elif state == 'absent':
result = ensure_absent(path)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 41.106886 | 147 | 0.610396 |
87c322ba9510876f577e3451843d2cf8905e1dcd | 2,111 | py | Python | rc/ycm_conf_for_uboot.py | fedorov7/vinux | ba616b706af4dde7372e038e93f2f3548fdbdc40 | [
"MIT"
] | 210 | 2018-01-04T11:37:04.000Z | 2022-02-22T18:25:02.000Z | rc/ycm_conf_for_uboot.py | Gerry-Lee/vinux | c798354a5cc4e19d04c47fdb1e334e98709a9a2f | [
"MIT"
] | 27 | 2018-01-25T15:52:29.000Z | 2022-02-11T03:03:38.000Z | rc/ycm_conf_for_uboot.py | Gerry-Lee/vinux | c798354a5cc4e19d04c47fdb1e334e98709a9a2f | [
"MIT"
] | 39 | 2018-01-21T15:54:33.000Z | 2021-06-14T07:01:02.000Z | # .ycm_extra_conf.py for kernel
import os
# Attention:
# File path not starting with / or = will be expanded.
flags_c = [
'-Wall',
'-Wundef',
'-Wstrict-prototypes',
'-Wno-trigraphs',
'-fno-strict-aliasing',
'-fno-common',
'-Werror-implicit-function-declaration',
'-Wno-format-security',
'-D__KERNEL__',
'-DMODULE',
'-x', 'c',
'-std=gnu89',
'-nostdinc',
# Not sure if sysroot works in clang
# Will be path mangled
'-I', 'include',
'-include', 'include/config.h', # IMPORTANT
'-include', 'include/common.h', # IMPORTANT
'-include', 'include/generated/version_autogenerated.h', # IMPORTANT
'-include', 'include/generated/timestamp_autogenerated.h', # IMPORTANT
'-include', 'include/u-boot/u-boot.lds.h', # IMPORTANT
]
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=', '-include']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/') and not flag.startswith('='):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
extension = os.path.splitext(filename)[1]
if extension == '.cpp':
assert False
flags = flags_c
relative_to = os.getcwd()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True
}
| 27.415584 | 76 | 0.60919 |
e91830886f1c8568ba767d56063d9d870fe737c2 | 1,995 | py | Python | tests/test_tempfile.py | yanikou19/monty | 822ae841f7d29bd7464287fd99b51da6e5960088 | [
"MIT"
] | null | null | null | tests/test_tempfile.py | yanikou19/monty | 822ae841f7d29bd7464287fd99b51da6e5960088 | [
"MIT"
] | null | null | null | tests/test_tempfile.py | yanikou19/monty | 822ae841f7d29bd7464287fd99b51da6e5960088 | [
"MIT"
] | null | null | null | __author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2014, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
import unittest
import shutil
import os
from io import open
from monty.tempfile import ScratchDir
test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
class ScratchDirTest(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
os.chdir(test_dir)
self.scratch_root = os.path.join(test_dir, "..", "..", "tempscratch")
os.mkdir(self.scratch_root)
def test_with_copy(self):
with ScratchDir(self.scratch_root, copy_from_current_on_enter=True,
copy_to_current_on_exit=True) as d:
with open("scratch_text", "w") as f:
f.write(u"write")
files = os.listdir(d)
self.assertIn("scratch_text", files)
self.assertIn("empty_file.txt", files)
#Make sure the tempdir is deleted.
self.assertFalse(os.path.exists(d))
files = os.listdir(".")
self.assertIn("scratch_text", files)
os.remove("scratch_text")
def test_no_copy(self):
with ScratchDir(self.scratch_root, copy_from_current_on_enter=False,
copy_to_current_on_exit=False) as d:
with open("scratch_text", "w") as f:
f.write(u"write")
files = os.listdir(d)
self.assertIn("scratch_text", files)
self.assertNotIn("empty_file.txt", files)
#Make sure the tempdir is deleted.
self.assertFalse(os.path.exists(d))
files = os.listdir(".")
self.assertNotIn("scratch_text", files)
def test_bad_root(self):
with ScratchDir("bad_groot") as d:
self.assertEqual(d, test_dir)
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.scratch_root)
if __name__ == "__main__":
unittest.main()
| 28.913043 | 77 | 0.622055 |
0702b9baab5151f34e0bfcd9b27721942d0e5a31 | 2,240 | py | Python | tests/test_visitors/test_ast/test_conditions/test_implicit_in.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 1,931 | 2018-03-17T13:52:45.000Z | 2022-03-27T09:39:17.000Z | tests/test_visitors/test_ast/test_conditions/test_implicit_in.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 2,231 | 2018-03-09T21:19:05.000Z | 2022-03-31T08:35:37.000Z | tests/test_visitors/test_ast/test_conditions/test_implicit_in.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 492 | 2018-05-18T21:20:28.000Z | 2022-03-20T14:11:50.000Z | import pytest
from wemake_python_styleguide.violations.refactoring import (
ImplicitInConditionViolation,
)
from wemake_python_styleguide.visitors.ast.conditions import (
ImplicitBoolPatternsVisitor,
)
# Correct:
eq_and = '{0} == some1 and {1} == some2'
noteq_or = '{0} != some1 or {1} != some2'
# Wrong:
eq_or = '{0} == some1 or {1} == some2'
noteq_and = '{0} != some1 and {1} != some2'
@pytest.mark.parametrize('code', [
eq_and,
noteq_or,
eq_or,
noteq_and,
])
@pytest.mark.parametrize(('first', 'second'), [
('first', 'second'),
('one.attr', 'one'),
('first', 'first()'),
('value.method()', 'value.method'),
('value.method(1)', 'value.method(2)'),
])
def test_different_in_values(
code,
first,
second,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing regular conditions."""
tree = parse_ast_tree(code.format(first, second))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
eq_and,
noteq_or,
])
@pytest.mark.parametrize(('first', 'second'), [
('first', 'first'),
('one.attr', 'one.attr'),
('first()', 'first()'),
('value.method(1)', 'value.method(2)'),
])
def test_safe_patterns_in_values(
code,
first,
second,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing safe in patterns."""
tree = parse_ast_tree(code.format(first, second))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
eq_or,
noteq_and,
])
@pytest.mark.parametrize(('first', 'second'), [
('first', 'first'),
('one.attr', 'one.attr'),
('first()', 'first()'),
('value.method(1)', 'value.method(1)'),
])
def test_wrong_patterns_in_values(
code,
first,
second,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing safe in patterns."""
tree = parse_ast_tree(code.format(first, second))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitInConditionViolation])
| 21.747573 | 69 | 0.6375 |
5988c0bd442517bbf08d503a774230ae046bae08 | 1,033 | py | Python | snake_tail/download.py | countercept/snake-tail | 572468e6c8a39fbf0f9edad3ab82d6d05bb946a4 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T08:46:10.000Z | 2020-07-23T08:46:10.000Z | snake_tail/download.py | countercept/snake-tail | 572468e6c8a39fbf0f9edad3ab82d6d05bb946a4 | [
"BSD-3-Clause"
] | 3 | 2020-01-15T13:29:52.000Z | 2020-08-16T16:40:49.000Z | snake_tail/download.py | countercept/snake-tail | 572468e6c8a39fbf0f9edad3ab82d6d05bb946a4 | [
"BSD-3-Clause"
] | 3 | 2018-10-24T19:44:23.000Z | 2021-09-06T11:51:48.000Z | from os import path
from clint.textui import progress
import requests
from snake_tail import SNAKE_URL
def download(sha256_digest, output_dir=None, json=False, verify=True):
resp = requests.get(SNAKE_URL + "/download/" + sha256_digest, stream=True, verify=verify)
if resp.ok:
file_path = resp.headers['Content-Disposition'].split('filename=')[1].split('"')[1]
if output_dir:
file_path = path.join(path.abspath(path.expanduser(output_dir)), file_path)
with open(file_path, "wb") as f:
file_size = int(resp.headers['content-length'])
for chunk in progress.bar(resp.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1):
if chunk:
f.write(chunk)
f.flush()
else:
resp_json = resp.json()
if json:
print(resp_json)
else:
print("Status: {}".format(resp_json['status'].capitalize()))
print("Message: {}".format(resp_json['message']))
| 36.892857 | 112 | 0.61181 |
6eebd3bde6ac0ecd7af905ceac70c240a0bb5f0c | 1,454 | py | Python | userbot/plugins/autopic.py | SHER321/DedSec-BOT | f95add7e989fbff016eb1ff9d385a23aa1a9f0d7 | [
"MIT"
] | null | null | null | userbot/plugins/autopic.py | SHER321/DedSec-BOT | f95add7e989fbff016eb1ff9d385a23aa1a9f0d7 | [
"MIT"
] | null | null | null | userbot/plugins/autopic.py | SHER321/DedSec-BOT | f95add7e989fbff016eb1ff9d385a23aa1a9f0d7 | [
"MIT"
] | null | null | null | import os
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from pySmartDL import SmartDL
from telethon.tl import functions
import asyncio
import shutil
from userbot.utils import admin_cmd
FONT_FILE_TO_USE = "/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf"
#@command(pattern="^.autopic", outgoing=True)
@borg.on(admin_cmd(pattern=r"autopic"))
async def autopic(event):
downloaded_file_name = "userbot/original_pic.png"
downloader = SmartDL(Var.DOWNLOAD_PFP_URL_CLOCK, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
photo = "userbot/photo_pfp.png"
while not downloader.isFinished():
place_holder = None
counter = -30
while True:
shutil.copy(downloaded_file_name, photo)
im = Image.open(photo)
current_time = datetime.now().strftime("Time: %H:%M \n Date: %d.%m.%y \n")
img = Image.open(photo)
drawn_text = ImageDraw.Draw(img)
fnt = ImageFont.truetype(FONT_FILE_TO_USE, 35)
drawn_text.text((300, 500), current_time, font=fnt, fill=(255, 255, 255))
img.save(photo)
file = await bot.upload_file(photo) # pylint:disable=E0602
try:
await bot(functions.photos.UploadProfilePhotoRequest( # pylint:disable=E0602
file
))
os.remove(photo)
counter -= 30
await asyncio.sleep(60)
except:
return
| 35.463415 | 94 | 0.669188 |
1e635dd1b647a6717d0bea17a763ac368ea9286b | 1,157 | py | Python | python/ray/serve/tests/conftest.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 3 | 2021-07-22T17:03:33.000Z | 2021-09-20T15:46:25.000Z | python/ray/serve/tests/conftest.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 77 | 2021-06-05T07:04:56.000Z | 2022-03-26T07:04:33.000Z | python/ray/serve/tests/conftest.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 2 | 2021-12-12T14:51:29.000Z | 2022-01-23T00:14:00.000Z | import os
import pytest
import ray
from ray import serve
if os.environ.get("RAY_SERVE_INTENTIONALLY_CRASH", False) == 1:
serve.controller._CRASH_AFTER_CHECKPOINT_PROBABILITY = 0.5
@pytest.fixture(scope="session")
def _shared_serve_instance():
# Note(simon):
# This line should be not turned on on master because it leads to very
# spammy and not useful log in case of a failure in CI.
# To run locally, please use this instead.
# SERVE_LOG_DEBUG=1 pytest -v -s test_api.py
# os.environ["SERVE_LOG_DEBUG"] = "1" <- Do not uncomment this.
# Overriding task_retry_delay_ms to relaunch actors more quickly
ray.init(
num_cpus=36,
namespace="default_test_namespace",
_metrics_export_port=9999,
_system_config={
"metrics_report_interval_ms": 1000,
"task_retry_delay_ms": 50
})
yield serve.start(detached=True)
@pytest.fixture
def serve_instance(_shared_serve_instance):
yield _shared_serve_instance
# Clear all state between tests to avoid naming collisions.
for deployment in serve.list_deployments().values():
deployment.delete()
| 29.666667 | 74 | 0.706137 |
6022a6fc653f97bc13027d1ebb969206c490f198 | 14,066 | py | Python | pyabsa/core/apc/prediction/sentiment_classifier.py | lpfy/PyABSA | 01facb92012f42ec7a43cc9f5de7fa0eddd1c9b6 | [
"MIT"
] | null | null | null | pyabsa/core/apc/prediction/sentiment_classifier.py | lpfy/PyABSA | 01facb92012f42ec7a43cc9f5de7fa0eddd1c9b6 | [
"MIT"
] | null | null | null | pyabsa/core/apc/prediction/sentiment_classifier.py | lpfy/PyABSA | 01facb92012f42ec7a43cc9f5de7fa0eddd1c9b6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# file: sentiment_classifier.py
# author: yangheng <yangheng@m.scnu.edu.cn>
# Copyright (C) 2020. All Rights Reserved.
import json
import os
import pickle
import random
import numpy
import torch
from findfile import find_file
from termcolor import colored
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from pyabsa.core.apc.classic.__glove__.dataset_utils.data_utils_for_training import build_embedding_matrix, build_tokenizer
from pyabsa.core.apc.models.ensembler import APCEnsembler
from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError
from pyabsa.functional.dataset import detect_infer_dataset
from pyabsa.core.apc.models import (APCModelList,
GloVeAPCModelList,
BERTBaselineAPCModelList
)
from pyabsa.core.apc.classic.__bert__.dataset_utils.data_utils_for_inferring import BERTBaselineABSADataset
from pyabsa.core.apc.classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeABSADataset
from pyabsa.core.apc.dataset_utils.apc_utils import LABEL_PADDING
from pyabsa.core.apc.dataset_utils.data_utils_for_inferring import ABSADataset
class SentimentClassifier:
def __init__(self, model_arg=None, sentiment_map=None, eval_batch_size=128):
'''
from_train_model: load inferring_tutorials model from trained model
'''
self.initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_
}
# load from a training
if not isinstance(model_arg, str):
print('Load sentiment classifier from training')
self.model = model_arg[0]
self.opt = model_arg[1]
self.tokenizer = model_arg[2]
else:
# load from a model path
try:
if 'fine-tuned' in model_arg:
raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')
print('Load sentiment classifier from', model_arg)
state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])
model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])
tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])
config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])
print('config: {}'.format(config_path))
print('state_dict: {}'.format(state_dict_path))
print('model: {}'.format(model_path))
print('tokenizer: {}'.format(tokenizer_path))
self.opt = pickle.load(open(config_path, mode='rb'))
self.opt.eval_batch_size = eval_batch_size
if state_dict_path:
if not hasattr(GloVeAPCModelList, self.opt.model.__name__.upper()):
if state_dict_path:
self.model = APCEnsembler(self.opt, load_dataset=False)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
if model_path:
self.model = torch.load(model_path, map_location='cpu')
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.opt.pretrained_bert, do_lower_case='uncased' in self.opt.pretrained_bert)
except ValueError:
if tokenizer_path:
self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))
else:
raise TransformerConnectionError()
else:
tokenizer = build_tokenizer(
dataset_list=self.opt.dataset_file,
max_seq_len=self.opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
if model_path:
self.model = torch.load(model_path, map_location='cpu')
else:
embedding_matrix = build_embedding_matrix(
word2idx=tokenizer.word2idx,
embed_dim=self.opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
self.model = self.opt.model(embedding_matrix, self.opt).to(self.opt.device)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
self.tokenizer = tokenizer
print('Config used in Training:')
print_args(self.opt, mode=1)
except Exception as e:
raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))
if isinstance(self.opt.model, list):
if hasattr(APCModelList, self.opt.model[0].__name__):
self.dataset = ABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(BERTBaselineAPCModelList, self.opt.model[0].__name__):
self.dataset = BERTBaselineABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeAPCModelList, self.opt.model[0].__name__):
self.dataset = GloVeABSADataset(tokenizer=self.tokenizer, opt=self.opt)
else:
raise KeyError('The ref_checkpoint you are loading is not from APC model.')
else:
if hasattr(APCModelList, self.opt.model.__name__):
self.dataset = ABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(BERTBaselineAPCModelList, self.opt.model.__name__):
self.dataset = BERTBaselineABSADataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeAPCModelList, self.opt.model.__name__):
self.dataset = GloVeABSADataset(tokenizer=self.tokenizer, opt=self.opt)
else:
raise KeyError('The ref_checkpoint you are loading is not from APC model.')
self.infer_dataloader = None
if self.opt.seed is not None:
random.seed(self.opt.seed)
numpy.random.seed(self.opt.seed)
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.opt.initializer = self.opt.initializer
self.sentiment_map = None
self.set_sentiment_map(sentiment_map)
def set_sentiment_map(self, sentiment_map):
if sentiment_map:
print(colored('Warning: set_sentiment_map() is deprecated, please directly set labels within dataset.', 'red'))
sentiment_map[LABEL_PADDING] = ''
self.sentiment_map = sentiment_map
def to(self, device=None):
self.opt.device = device
self.model.to(device)
def cpu(self):
self.opt.device = 'cpu'
self.model.to('cpu')
def cuda(self, device='cuda:0'):
self.opt.device = device
self.model.to(device)
def batch_infer(self,
target_file=None,
print_result=True,
save_result=False,
clear_input_samples=True,
ignore_error=True):
if clear_input_samples:
self.clear_input_samples()
save_path = os.path.join(os.getcwd(), 'apc_inference.result.json')
target_file = detect_infer_dataset(target_file, task='apc')
if not target_file:
raise FileNotFoundError('Can not find inference datasets!')
self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)
return self._infer(save_path=save_path if save_result else None, print_result=print_result)
def infer(self, text: str = None,
print_result=True,
clear_input_samples=True):
if clear_input_samples:
self.clear_input_samples()
if text:
self.dataset.prepare_infer_sample(text)
else:
raise RuntimeError('Please specify your datasets path!')
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)
return self._infer(print_result=print_result)
def merge_results(self, results):
""" merge APC results have the same input text
"""
final_res = []
for result in results:
if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()):
final_res[-1]['aspect'].append(result['aspect'])
final_res[-1]['sentiment'].append(result['sentiment'])
final_res[-1]['ref_sentiment'].append(result['ref_sentiment'])
final_res[-1]['ref_check'].append(result['ref_check'])
else:
final_res.append(
{
'text': result['text'].replace(' ', ' '),
'aspect': [result['aspect']],
'sentiment': [result['sentiment']],
'ref_sentiment': [result['ref_sentiment']],
'ref_check': [result['ref_check']]
}
)
return final_res
def _infer(self, save_path=None, print_result=True):
_params = filter(lambda p: p.requires_grad, self.model.parameters())
correct = {True: 'Correct', False: 'Wrong'}
results = []
with torch.no_grad():
self.model.eval()
n_correct = 0
n_labeled = 0
n_total = 0
for _, sample in enumerate(self.infer_dataloader):
inputs = {col: sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'polarity'}
self.model.eval()
outputs = self.model(inputs)
sen_logits = outputs['logits']
t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()
for i, i_probs in enumerate(t_probs):
if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)) in self.opt.index_to_label:
sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]
real_sent = sample['polarity'][i] if isinstance(sample['polarity'][i], str) else self.opt.index_to_label.get(int(sample['polarity'][i]), 'N.A.')
if real_sent != -999 and real_sent != '-999':
n_labeled += 1
if sent == real_sent:
n_correct += 1
else: # for the former versions before 1.2.0
sent = int(i_probs.argmax(axis=-1))
real_sent = int(sample['polarity'][i])
aspect = sample['aspect'][i]
text_raw = sample['text_raw'][i]
results.append({
'text': text_raw,
'aspect': aspect,
'sentiment': sent,
'ref_sentiment': real_sent,
'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',
})
n_total += 1
results = self.merge_results(results)
try:
if print_result:
for result in results:
text_printing = result['text']
for i in range(len(result['aspect'])):
if result['ref_sentiment'][i] != -999:
if result['sentiment'][i] == result['ref_sentiment'][i]:
aspect_info = colored('{} -> {}(ref:{})'.format(result['aspect'][i], result['sentiment'][i], result['ref_sentiment'][i]), 'green')
else:
aspect_info = colored('{} -> {}(ref:{})'.format(result['aspect'][i], result['sentiment'][i], result['ref_sentiment'][i]), 'red')
else:
aspect_info = '{} -> {}'.format(result['aspect'][i], result['sentiment'][i])
text_printing = text_printing.replace(result['aspect'][i], aspect_info)
print(text_printing)
if save_path:
fout = open(save_path, 'w', encoding='utf8')
json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)
# fout.write('Total samples:{}\n'.format(n_total))
# fout.write('Labeled samples:{}\n'.format(n_labeled))
# fout.write('Prediction Accuracy:{}%\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.'
print('inference result saved in: {}'.format(save_path))
except Exception as e:
print('Can not save result: {}, Exception: {}'.format(text_raw, e))
if len(self.infer_dataloader) > 1:
print('Total samples:{}'.format(n_total))
print('Labeled samples:{}'.format(n_labeled))
print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))
return results
def clear_input_samples(self):
self.dataset.all_data = []
| 47.201342 | 168 | 0.567681 |
06fe49fdcf0f34597f66aa36902d48dd28983cb3 | 745 | py | Python | knowledge_repo/app/deploy/__init__.py | My-Coursera-Presentation/1.knowledge | 19c6507eef6477a99a21f8037feafc5d61cd33f5 | [
"Apache-2.0"
] | 12 | 2018-03-10T19:27:36.000Z | 2021-03-24T10:21:29.000Z | knowledge_repo/app/deploy/__init__.py | My-Coursera-Presentation/1.knowledge | 19c6507eef6477a99a21f8037feafc5d61cd33f5 | [
"Apache-2.0"
] | null | null | null | knowledge_repo/app/deploy/__init__.py | My-Coursera-Presentation/1.knowledge | 19c6507eef6477a99a21f8037feafc5d61cd33f5 | [
"Apache-2.0"
] | 9 | 2018-03-17T11:52:11.000Z | 2021-03-24T10:21:29.000Z | import logging
from .common import KnowledgeDeployer, get_app_builder
# The following subclasses of KnowledgeDeployer must be imported in order to be registered as a deployer and hence
# made accessible using `KnowledgeDeployer.using(..)`.
from .flask import FlaskDeployer
from .uwsgi import uWSGIDeployer
# Wrap the gunicorn deployer in a try/except block, as it has a hard dependency on gunicorn which does not work on
# non-POSIX systems, or if it is not installed.
try:
from .gunicorn import GunicornDeployer
except:
logging.warn("Gunicorn deployer is not available. It only works on POSIX platforms (e.g. Linux, Mac OS X, etc). "
"If you are using a POSIX platform, please ensure that `gunicorn` is installed.") | 46.5625 | 117 | 0.761074 |
8700bdfaa4ab3394c81d002245da9ec053388f05 | 9,420 | py | Python | hicprediction/configurations.py | abajorat/MasterProjekt | 22b870f6e6e2d2ffa7e5ae2d9fe9da41b7cfefaf | [
"MIT"
] | null | null | null | hicprediction/configurations.py | abajorat/MasterProjekt | 22b870f6e6e2d2ffa7e5ae2d9fe9da41b7cfefaf | [
"MIT"
] | null | null | null | hicprediction/configurations.py | abajorat/MasterProjekt | 22b870f6e6e2d2ffa7e5ae2d9fe9da41b7cfefaf | [
"MIT"
] | 1 | 2019-11-22T13:03:25.000Z | 2019-11-22T13:03:25.000Z | #!/usr/bin/env python3
from pkg_resources import resource_filename, Requirement
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import auc
from hicmatrix import HiCMatrix as hm
from hicexplorer import hicPlotMatrix as hicPlot
import h5py
import joblib
import sys
import bisect
import argparse
import glob
import math
import time
from itertools import product
import datetime
import itertools
import shutil
import operator
import subprocess
import click
import pickle
import os
import numpy as np
import logging as log
import pandas as pd
from copy import copy, deepcopy
from io import StringIO
from csv import writer
from tqdm import tqdm
import logging
import cooler
import pybedtools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
from scipy import sparse
from scipy import signal
from scipy import misc
from scipy.stats.stats import pearsonr
from scipy.sparse import coo_matrix
log.basicConfig(level=log.DEBUG)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(precision=3, suppress=True)
class Mutex(click.Option):
def __init__(self, *args, **kwargs):
self.not_required_if:list = kwargs.pop("not_required_if")
assert self.not_required_if, "'not_required_if' parameter required"
kwargs["help"] = (kwargs.get("help", "") + "Option is mutually exclusive with " + ", ".join(self.not_required_if) + ".").strip()
super(Mutex, self).__init__(*args, **kwargs)
def handle_parse_result(self, ctx, opts, args):
current_opt:bool = self.name in opts
for mutex_opt in self.not_required_if:
if mutex_opt in opts:
if current_opt:
raise click.UsageError("Illegal usage: '" + str(self.name)\
+"' is mutually exclusive with " + str(mutex_opt) + ".")
else:
self.prompt = None
return super(Mutex, self).handle_parse_result(ctx, opts, args)
_setAndProtein_options = [
click.option('--baseFile','-bf', required=True,type=click.Path(writable=True),
help='Base file where to store proteins and chromosomes for later use.'),
click.option('--chromosomes', '-chs', default=None, show_default=True,help=\
"If set, sets are only calculated for these chromosomes instead of all"),
]
_predict_base_options = [
click.option('--predictionOutputDirectory', '-pod',default=None,\
type=click.Path(exists=True),help='Output directory for'\
+' prediction files'),
click.option('--resultsFilePath', '-rfp', default=None,show_default=True,\
help='File where to store evaluation metrics. If not set'\
+' no evaluation is executed'),
click.option('--baseFile','-bf', required=True,type=click.Path(writable=True),
help='Base file where to store proteins and chromosomes for later use.'),
]
_predict_options = [
click.option('--modelFilePath', '-mfp', required=True,\
help='Choose model on which to predict'),
click.option('--predictionSetPath','-psp', required=True,type=click.Path(writable=True),
help='Data set that is to be predicted.'),
]
_allpredict_options = [
click.option('--modelDirectory', '-md', required=True,\
help='Choose model directory'),
click.option('--testSetDirectory','-tsd', required=True,type=click.Path(writable=True),
help='Data set directory'),
click.option('--chromosomes', '-chs', default=None, show_default=True,help=\
"If set, sets are only calculated for these chromosomes instead of all"),
]
_protein_options = [
click.option('--resolution' ,'-r', required=True,\
help = "Store resolution for analys and documentation"),
click.option('--cellType' ,'-ct', required=True, \
help="Store cell type for analysis and documentation"),
click.option('--matrixFile', '-mf',required=True,type=click.Path(exists=True),\
help='Input file with the whole HiC-matrix ')
]
_set_base_options = [
click.option('--windowSize', '-ws', default=200, show_default=True,\
help='Maximum distance between two basepairs'),
click.option('--centromeresFile', '-cmf',show_default=True,
default=None,\
type=click.Path(exists=True)),
click.option('--datasetOutputDirectory', '-dod',required=True,type=click.Path(exists=True),\
help='Output directory for training set files'),
]
_allset_options = [
click.option('--baseFile','-bf', required=True,type=click.Path(writable=True),
help='Base file where to store proteins and chromosomes for later use.'),
click.option('--setParamsFile', '-spf', required=True,\
type=click.Path(exists=True)),
]
_set_options = [
click.option('--peakColumn' ,'-pc', default=6,hidden=True),
click.option('--mergeOperation','-mo',default='avg',\
type=click.Choice(['avg', 'max']),show_default=True,\
help='This parameter defines how the proteins are binned'),
click.option('--normalize', default=False,\
show_default=True,\
help='Should the proteins be normalized to a 0-1 range'),
click.option('--ignoreCentromeres', default=True,\
show_default=True,help='Cut out the centroid arms for training'),
click.option('--windowOperation', '-wo', default='avg',\
type=click.Choice(['avg', 'max', 'sum']), show_default=True,\
help='How should the proteins in between two base pairs be summed up'),
]
_train_options = [
click.option('--trainDatasetFile', '-tdf',\
required=True,\
help='File from which training is loaded'\
,type=click.Path(writable=True)),
]
_alltrain_options = [
click.option('--setDirectory', '-sd',type=click.Path(exists=True),\
help='Input directory for training files', required=True,),
]
_train_base_options = [
click.option('--modelOutputDirectory', '-mod',type=click.Path(exists=True),\
help='Output directory for model files', required=True,),
click.option('--conversion', '-co', default='none',\
type=click.Choice(['standardLog', 'none']), show_default=True,\
help='Define a conversion function for the read values')
]
def protein_options(func):
for option in reversed(_protein_options):
func = option(func)
for option in reversed(_setAndProtein_options):
func = option(func)
return func
def set_options(func):
for option in reversed(_set_options):
func = option(func)
for option in reversed(_setAndProtein_options):
func = option(func)
for option in reversed(_set_base_options):
func = option(func)
return func
def predict_options(func):
for option in reversed(_predict_base_options):
func = option(func)
for option in reversed(_predict_options):
func = option(func)
return func
def allpredict_options(func):
for option in reversed(_predict_base_options):
func = option(func)
for option in reversed(_allpredict_options):
func = option(func)
return func
def train_options(func):
for option in reversed(_train_options):
func = option(func)
for option in reversed(_train_base_options):
func = option(func)
return func
def alltrain_options(func):
for option in reversed(_alltrain_options):
func = option(func)
for option in reversed(_train_base_options):
func = option(func)
return func
def allset_options(func):
for option in reversed(_set_base_options):
func = option(func)
for option in reversed(_setAndProtein_options):
func = option(func)
for option in reversed(_allset_options):
func = option(func)
return func
def getBaseCombinations():
params = {
'mergeOperation': ["avg", "max"],
'normalize': [True, False],
'peakColumn': [4,6],
}
paramDict = product(*params.values())
for val in tqdm(list(paramDict), desc= 'Iterate parameter combinations' ):
yield dict(zip(params, val))
def checkExtension(fileName, extension, option=None):
if fileName.split(".")[-1] != extension:
if option and fileName.split(".")[-1] == option:
return
else:
msg = 'The file {} has the wrong extension. Ensure to '\
+'pass a file with .{} extension'
print(msg.format(str(fileName), extension))
sys.exit()
def getCombinations(paramsfile):
with open(paramsfile) as f:
params = json.load(f)
paramDict = product(*params.values())
for val in tqdm(list(paramDict), desc= 'Iterate parameter combinations' ):
yield dict(zip(params, val))
| 37.380952 | 136 | 0.665817 |
0f28ee4b960ee757e1b41f90cb00ede397ebd6db | 4,617 | py | Python | setup.py | Priyamriu1/jupyter_contrib_nbextensions | 4e841d92b2eda4caf2536865a42c1f732ab518a3 | [
"BSD-3-Clause-Clear"
] | 6 | 2019-07-08T15:08:22.000Z | 2021-12-04T01:38:57.000Z | setup.py | Priyamriu1/jupyter_contrib_nbextensions | 4e841d92b2eda4caf2536865a42c1f732ab518a3 | [
"BSD-3-Clause-Clear"
] | 2 | 2019-07-24T20:42:53.000Z | 2019-08-16T17:11:02.000Z | setup.py | Priyamriu1/jupyter_contrib_nbextensions | 4e841d92b2eda4caf2536865a42c1f732ab518a3 | [
"BSD-3-Clause-Clear"
] | 2 | 2021-06-14T15:53:25.000Z | 2021-09-08T12:35:17.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup script for jupyter_contrib_nbextensions."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import print_function
import os
from glob import glob
from setuptools import find_packages, setup
# -----------------------------------------------------------------------------
# main setup call
# -----------------------------------------------------------------------------
def main():
setup(
name='jupyter_contrib_nbextensions',
description="A collection of Jupyter nbextensions.",
long_description="""
Contains a collection of extensions that add functionality to the Jupyter
notebook. These extensions are mostly written in Javascript, and are loaded
locally in the browser.
Read
`the documentation <https://jupyter-contrib-nbextensions.readthedocs.io>`_
for more information.
The
`jupyter-contrib repository <https://github.com/ipython-contrib/jupyter_contrib_nbextensions>`_
is maintained independently by a group of users and developers, and is not
officially related to the Jupyter development team.
The maturity of the provided extensions varies, so please check
`the repository issues page <https://github.com/ipython-contrib/jupyter_contrib_nbextensions/issues>`_
if you encounter any problems, and create a new issue if needed!
""", # noqa: E501
version='0.5.1',
author='ipython-contrib and jupyter-contrib developers',
author_email='jupytercontrib@gmail.com',
url=('https://github.com/'
'ipython-contrib/jupyter_contrib_nbextensions.git'),
download_url=('https://github.com/'
'ipython-contrib/jupyter_contrib_nbextensions'
'/tarball/0.5.1'),
keywords=['IPython', 'Jupyter', 'notebook'],
license='BSD',
platforms=['Any'],
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
py_modules=[
os.path.splitext(os.path.basename(path))[0]
for path in glob('src/*.py')
],
install_requires=[
'ipython_genutils',
'jupyter_contrib_core >=0.3.3',
'jupyter_core',
'jupyter_highlight_selected_word >=0.1.1',
'jupyter_latex_envs >=1.3.8',
'jupyter_nbextensions_configurator >=0.4.0',
'nbconvert >=4.2',
'notebook >=4.0',
'pyyaml',
'tornado',
'traitlets >=4.1',
'lxml'
],
extras_require={
'test': [
'nbformat',
'nose',
'pip',
'requests',
],
'test:python_version == "2.7"': [
'mock',
],
},
# we can't be zip safe as we require templates etc to be accessible to
# jupyter server
zip_safe=False,
entry_points={
'console_scripts': [
'jupyter-contrib-nbextension = jupyter_contrib_nbextensions.application:main', # noqa: E501
],
'jupyter_contrib_core.app.subcommands': [
'nbextension = jupyter_contrib_nbextensions.application:jupyter_contrib_core_app_subcommands', # noqa: E501
],
'nbconvert.exporters': [
'html_toc = jupyter_contrib_nbextensions.nbconvert_support.toc2:TocExporter', # noqa: E501
'selectLanguage = jupyter_contrib_nbextensions.nbconvert_support.nbTranslate:NotebookLangExporter', # noqa: E501
'html_embed = jupyter_contrib_nbextensions.nbconvert_support.embedhtml:EmbedHTMLExporter', # noqa: E501
'html_ch = jupyter_contrib_nbextensions.nbconvert_support.collapsible_headings:ExporterCollapsibleHeadings', # noqa: E501
],
},
scripts=[os.path.join('scripts', p) for p in [
'jupyter-contrib-nbextension',
]],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: JavaScript',
'Programming Language :: Python',
'Topic :: Utilities',
],
)
if __name__ == '__main__':
main()
| 37.233871 | 138 | 0.563786 |
64d12cd0ea569b685f404daaa20cc1e00c004b85 | 666 | py | Python | djangocms_bootstrap4/contrib/bootstrap4_card/constants.py | jpVm5jYYRE1VIKL/djangocms-bootstrap4 | d36a369af54850eddaa0299e5ae33ee5e78cf2b1 | [
"BSD-3-Clause"
] | 59 | 2017-09-28T17:13:38.000Z | 2020-09-22T02:55:47.000Z | djangocms_bootstrap4/contrib/bootstrap4_card/constants.py | jpVm5jYYRE1VIKL/djangocms-bootstrap4 | d36a369af54850eddaa0299e5ae33ee5e78cf2b1 | [
"BSD-3-Clause"
] | 102 | 2017-10-20T09:37:52.000Z | 2020-09-23T06:37:47.000Z | djangocms_bootstrap4/contrib/bootstrap4_card/constants.py | jpVm5jYYRE1VIKL/djangocms-bootstrap4 | d36a369af54850eddaa0299e5ae33ee5e78cf2b1 | [
"BSD-3-Clause"
] | 40 | 2017-12-29T20:05:40.000Z | 2020-09-21T08:33:48.000Z | from django.utils.translation import gettext_lazy as _
CARD_TYPE_CHOICES = (
('card', _('Card')),
('card-group', _('Card group')),
('card-deck', _('Card deck')),
('card-columns', _('Card columns')),
)
CARD_ALIGNMENT_CHOICES = (
('text-left', _('Left')),
('text-center', _('Center')),
('text-right', _('Right')),
)
CARD_INNER_TYPE_CHOICES = (
('card-body', _('Body')),
('card-header', _('Header')),
('card-footer', _('Footer')),
)
CARD_TAG_CHOICES = (
('div', 'DIV'),
('h1', 'H1'),
('h2', 'H2'),
('h3', 'H3'),
('h4', 'H4'),
('h5', 'H5'),
('h6', 'H6'),
('p', 'P'),
('small', 'SMALL'),
)
| 19.588235 | 54 | 0.490991 |
d3683e49bafec4402da2a67c8ee13aef7c21e948 | 3,391 | py | Python | Array_ADT/array_structure.py | jaeheeLee17/DS_and_Algorithms_summary | 917500dd768eae8cfbb02cf2838d494cb720f1c0 | [
"MIT"
] | null | null | null | Array_ADT/array_structure.py | jaeheeLee17/DS_and_Algorithms_summary | 917500dd768eae8cfbb02cf2838d494cb720f1c0 | [
"MIT"
] | null | null | null | Array_ADT/array_structure.py | jaeheeLee17/DS_and_Algorithms_summary | 917500dd768eae8cfbb02cf2838d494cb720f1c0 | [
"MIT"
] | null | null | null | # Implements the Array ADT using array capabilities of the ctypes module.
import ctypes
class Array:
# Creates an array with size elements.
def __init__(self, size):
assert size > 0, "Array size must be > 0"
self._size = size
# Create the array structure using the ctypes module.
PyArrayType = ctypes.py_object * size
self._elements = PyArrayType()
# Initialize each element.
self.clear(None)
# Returns the size of the array.
def __len__(self):
return self._size
# Gets the contents of the index element.
def __getitem__(self, index):
assert index >= 0 and index < len(self), "Array subscript out of range"
return self._elements[index]
# Puts the value in the array element at index position.
def __setitem__(self, index, value):
assert index >= 0 and index < len(self), "Array subscript out of range"
self._elements[index] = value
# Clears the array by setting each element to the given value.
def clear(self, value):
for i in range(len(self)):
self._elements[i] = value
# Returns the array's iterator for traversing the elements.
def __iter__(self):
return _ArrayIterator(self._elements)
#Implementation of the Array2D ADT using an array of arrays.
class Array2D:
# Creates a 2-D array of size numRows x numCols.
def __init__(self, numRows, numCols):
# Create a 1-D array to store an array reference for each row.
self._theRows = Array(numRows)
# Create the 1-D arrays for each row of the 2-D array.
for i in range(numRows):
self._theRows[i] = Array(numCols)
# Returns the number of rows in the 2-D array.
def numRows(self):
return len(self._theRows)
# Returns the number of columns in the 2-D array.
def numCols(self):
return len(self._theRows[0])
# Clears the array by setting every element to the given value.
def clear(self, value):
for row in range(self.numRows()):
row = value
# Gets the contents of the element at position [i, j]
def __getitem__(self, ndxTuple):
assert len(ndxTuple) == 2, "Invalid number of array subscripts."
row = ndxTuple[0]
col = ndxTuple[1]
assert row >= 0 and row < self.numRows() \
and col >= 0 and col < self.numCols(), \
"Array subscript out of range."
the1dArray = self._theRows[row]
return the1dArray[col]
# Sets the contents of the element at position [i, j] to value.
def __setitem__(self, ndxTuple, value):
assert len(ndxTuple) == 2, "Invalid number of array subscripts."
row = ndxTuple[0]
col = ndxTuple[1]
assert row >= 0 and row < self.numRows() \
and col >= 0 and col < self.numCols(), \
"Array subscript out of range."
the1dArray = self._theRows[row]
the1dArray[col] = value
# An iterator for the Array ADT.
class _ArrayIterator:
def __init__(self, theArray):
self._arrayRef = theArray
self._curidx = 0
def __iter__(self):
return self
def __next__(self):
if self._curidx < len(self._arrayRef):
entry = self._arrayRef[self._curidx]
self._curidx += 1
return entry
else:
raise StopIteration
| 33.91 | 79 | 0.62371 |
0c7d844e2e27364d5b0968f1001f0cf6a90f75d2 | 1,163 | py | Python | tests/test_umlparser.py | candyabc/mw-uml-generator | ccfc635fae93617000adb9e93b2821a3aa82324b | [
"MIT"
] | null | null | null | tests/test_umlparser.py | candyabc/mw-uml-generator | ccfc635fae93617000adb9e93b2821a3aa82324b | [
"MIT"
] | null | null | null | tests/test_umlparser.py | candyabc/mw-uml-generator | ccfc635fae93617000adb9e93b2821a3aa82324b | [
"MIT"
] | null | null | null | from __future__ import print_function, absolute_import, division
import pytest
import sys
import os
# metaclass是创建类,所以必须从`type`类型派生:
gg ={}
class Singleton(type):
def __new__(cls, name,bases,attrs):
print("__new__")
print(bases)
attrs["_instance"] = None
new_cls = super(Singleton,cls).__new__(cls,name,bases,attrs)
gg[name] = new_cls
# print['name',new_cls]
return new_cls
# def __call__(self, *args, **kwargs):
# print ("__call__")
# if self._instance is None:
# self._instance = super(Singleton,self).__call__(*args, **kwargs)
# return self._instance
class FA():
def __init__(self):
self.a ='heool'
class Foo(FA, metaclass=Singleton):
pass
class TestClass:
def test_mya(self):
# foo1 = Foo()
# foo2 = Foo()
# print(Foo.__dict__ )
# print(['a',foo1.a] )
print(['gg',gg])
f1 =gg['Foo']()
print(f1.__dict__)
# assert len(gg.keys())>0
# def test_factory(self):
# print(_umlParseFactory._classMapping)
# assert len(_umlParseFactory._classMapping.keys())>0 | 27.046512 | 78 | 0.595873 |
92d1b590f0b7d0ad65ca4bd2690493b3302d7583 | 11,713 | py | Python | preprocessing/dmop_analysis.py | fornaxco/Mars-Express-Challenge | 4e0dff9909df0d10e507083af59326b3342d67fe | [
"BSD-3-Clause"
] | 7 | 2016-08-14T02:40:47.000Z | 2016-09-28T05:55:23.000Z | preprocessing/dmop_analysis.py | fornaxco/Mars-Express-Challenge | 4e0dff9909df0d10e507083af59326b3342d67fe | [
"BSD-3-Clause"
] | null | null | null | preprocessing/dmop_analysis.py | fornaxco/Mars-Express-Challenge | 4e0dff9909df0d10e507083af59326b3342d67fe | [
"BSD-3-Clause"
] | 5 | 2016-08-14T02:45:41.000Z | 2017-05-27T08:59:10.000Z | # -*- coding: utf-8 -*-
"""
@author: fornax
"""
import numpy as np
import pandas as pd
def get_npwd2881_features(df):
"""
Extracts AOOO commands from pandas file which are
correlated with NPWD2881 line. Those commands are
then used as a train features for final predictions.
:param df: a data frame contains all features with added merged DMOP columns.
:return: a data frame containing selected features used for training of
NPWD2881 power line.
"""
aooo_list = [
['AOOO_current_F03A1',3,True],
['AOOO_current_F04A0',0,True],
['AOOO_current_F05A0',3,False],
['AOOO_current_F06A0',0,False],
['AOOO_current_F100A',0,False],
['AOOO_current_F100C',0,False],
['AOOO_current_F20A1',2,True],
['AOOO_current_F20D1',2,True],
['AOOO_current_F22A0',2,True],
['AOOO_current_F32A0',2,True],
['AOOO_current_F62A0',3,False],
['AOOO_current_F63A0',3,False],
['AOOO_current_F64A0',3,False],
['AOOO_current_F65A0',3,False],
['AOOO_current_F66A0',3,False],
['AOOO_current_F67A0',3,False],
['AOOO_current_F68A0',3,False],
['AOOO_current_F77A0',3,False],
['AOOO_current_F02A1',3,False],
['AOOO_current_F01D0',3,True],
['AOOO_current_F01D1',3,True],
['AOOO_current_F02A0',3,False],
['AOOO_current_F03A0',3,False],
['AOOO_current_F32R0',3,False],
['AOOO_current_F33A0',3,False],
['AOOO_current_F34A0',4,False],
['AOOO_current_F15A0',3,True],
['AOOO_current_F100B',1,True],
['AOOO_current_F15B0',1,True],
['AOOO_current_F22A1',1,True],
['AOOO_current_F22R1',0,False],
['AOOO_current_F23A0',0,True],
['AOOO_current_F24A0',1,False],
['AOOO_current_F100D',0,False],
['ATTT_current_F321D_F321R',1,False],
]
n_cols = np.shape(aooo_list)[0]
aooo_data = np.zeros([df.mission_time.size,n_cols])
aooo_cols = []
it = 0
# iterate over all selected features
for l in aooo_list:
offset = l[1]
v = df[l[0]].values
if(l[2] == True): # offset the signal and pad it while translating
for k in range(1,offset+1):
v2 = np.append(df[l[0]].values[k:],[0]*k)
v = v + v2
else: # just offset the signal
v = np.append(df[l[0]].values[offset:],[0]*offset)
aooo_cols.append(l[0])
aooo_data[:,it] = v
it += 1
pd_aooo = pd.DataFrame(aooo_data,columns=aooo_cols)
return pd_aooo
def correct_dmop(df):
"""
Function removes unwanted subsystem/command combinations, and merges commands
within a single subsystem as indicated for merging by manual inspection (see README).
:param df: data frame
:return: input data frame with features
"""
trash, merge, atmb_vals = correction_list(df)
to_delete = [i for i in trash.values() for i in i]
# merging
to_merge = [i for i in merge.values() if len(i)>1]
to_merge = [i for i in to_merge for i in i]
for cols in to_merge:
name = '_'.join(cols[0].split('_')[:-1] + [i.split('_')[-1] for i in cols])
df[name] = 0
for col in cols:
df[name] += df[col]
to_delete.append(col)
# deleting
for col in to_delete:
if col in df.columns:
subsys = col.split('_')[0]
command = col.split('_')[-1]
col_related = filter(lambda x: subsys in x and command in x and len(x.split('_')) == 3, df.columns)
df.drop(col_related, axis=1, inplace=True)
# ATMB
df['ATMB_temp'] = atmb_vals
return df
def correction_list(pd_data):
"""
Function returns dictionaries of subsystem/command combinations that should
be removed from the data, or tuples of commands to merge together into
a single feature. Also, a temperature-based feature is extracted from
the ATMB subsystem.
:param pd_data: data frame with ATMB subsystem
:return: dictionaries
"""
trash = {}
merge = {}
# --------------------------------------------------
trash['ATTT'] = ['ATTT_current_260A',
'ATTT_current_F301A',
'ATTT_current_F301B',
'ATTT_current_F301E',
'ATTT_current_F301F',
'ATTT_current_F301I',
'ATTT_current_F301J',
'ATTT_current_F310A',
'ATTT_current_F310B',
'ATTT_current_F410B',
'ATTT_current_F420B']
merge['ATTT'] = [['ATTT_current_305C','ATTT_current_305O','ATTT_current_305P','ATTT_current_306C','ATTT_current_306P'],
['ATTT_current_309A','ATTT_current_309B','ATTT_current_309P','ATTT_current_309Q'],
['ATTT_current_F321A','ATTT_current_F321P'],
['ATTT_current_F321D','ATTT_current_F321R']
]
# --------------------------------------------------
trash['ASXX'] = []
merge['ASXX'] = [['ASXX_current_303A','ASXX_current_304A'],
['ASXX_current_307A','ASXX_current_308A'],
['ASXX_current_382C','ASXX_current_383C','ASXX_current_382S','ASXX_current_383S','ASXX_current_382R']]
# --------------------------------------------------
trash['AVVV'] = ['AVVV_current_01A0',
'AVVV_current_02A0',
'AVVV_current_03A0',
'AVVV_current_03B0',
'AVVV_current_05A0',
'AVVV_current_06A0',
'AVVV_current_07A0']
merge['AVVV'] = [[]]
# --------------------------------------------------
trash['AHHH'] = ['AHHH_current_C05A1',
'AHHH_current_C25A1',
'AHHH_current_C532E',
'AHHH_current_F04P3',
'AHHH_current_F095B',
'AHHH_current_F095C',
'AHHH_current_F11A2',
'AHHH_current_F20A1',
'AHHH_current_F23P1',
'AHHH_current_F50A2'
]
merge['AHHH'] = [['AHHH_current_F01A2','AHHH_current_F01P1','AHHH_current_F01R1'],
['AHHH_current_F01S0'],
['AHHH_current_F02A1','AHHH_current_F02P1'],
['AHHH_current_F03A2'],
['AHHH_current_F04A3'],
['AHHH_current_F05A2'],
['AHHH_current_F06A1','AHHH_current_F06P1','AHHH_current_F06R1'],
['AHHH_current_F06S0'],
['AHHH_current_F11A1'],
['AHHH_current_F13A1'],
['AHHH_current_F17A1','AHHH_current_F17B1','AHHH_current_F17C2'],
['AHHH_current_F19A1']]
# --------------------------------------------------
trash['AOOO'] = []
merge['AOOO'] = [[]]
# --------------------------------------------------
trash['AMMM'] = [
'AMMM_current_F01A0',
'AMMM_current_F01B0',
'AMMM_current_F01R0',
'AMMM_current_F71A0',
'AMMM_current_F71AF',
'AMMM_current_F73A0',
'AMMM_current_F21A0',
'AMMM_current_F22A0',
'AMMM_current_F06B0',
'AMMM_current_F06R0',
'AMMM_current_F13A0',
'AMMM_current_F14A0',
'AMMM_current_F26A0',
'AMMM_current_F32A0'
]
merge['AMMM'] = [
['AMMM_current_F04A0','AMMM_current_F40A0'],
['AMMM_current_F05A0','AMMM_current_F40C0'],
['AMMM_current_F19A0'],
['AMMM_current_F51A0','AMMM_current_F52A0','AMMM_current_F52D1','AMMM_current_F52D2','AMMM_current_F52D3','AMMM_current_F52D4'],
['AMMM_current_F10A0','AMMM_current_F11A0','AMMM_current_F12A0','AMMM_current_F18A0','AMMM_current_F20A0','AMMM_current_F23A0','AMMM_current_F24A0','AMMM_current_F40B0']
]
# --------------------------------------------------
trash['APSF'] = [
'APSF_current_12B1',
'APSF_current_12C1',
'APSF_current_12D1',
'APSF_current_12E1',
'APSF_current_12G1',
'APSF_current_82B1',
'APSF_current_83A1',
'APSF_current_83B1',
'APSF_current_88A1',
'APSF_current_29B1',
'APSF_current_15A2',
'APSF_current_16A2',
'APSF_current_22A1',
'APSF_current_01A2',
'APSF_current_02A1',
'APSF_current_03A3',
'APSF_current_13A3',
'APSF_current_14A2',
'APSF_current_23B1',
'APSF_current_28A1',
'APSF_current_30A1',
'APSF_current_30B2',
'APSF_current_30C2',
'APSF_current_31A1',
'APSF_current_31B1',
'APSF_current_32A1',
'APSF_current_33A1',
'APSF_current_35A1',
'APSF_current_37A1',
'APSF_current_38A1',
'APSF_current_40A1',
'APSF_current_82A1',
'APSF_current_89A1'
]
merge['APSF'] = [
['APSF_current_06A1','APSF_current_06A2','APSF_current_60B0'],
['APSF_current_50A2'],
['APSF_current_12H1'],
['APSF_current_28A1','APSF_current_60A0','APSF_current_60D0']]
# --------------------------------------------------
trash['ASSS'] = [
'ASSS_current_F57A0',
'ASSS_current_F58A0',
'ASSS_current_F59A0',
'ASSS_current_F60A0',
'ASSS_current_F63A0',
]
merge['ASSS'] = [
['ASSS_current_F01A0','ASSS_current_F01P0'],
['ASSS_current_F06A0','ASSS_current_F06P0'],
['ASSS_current_F62A0'],
['ASSS_current_F53A0','ASSS_current_F55A0','ASSS_current_F56A0']
]
# --------------------------------------------------
trash['AXXX'] = [
'AXXX_current_301A',
'AXXX_current_301B',
'AXXX_current_301C',
'AXXX_current_301E',
'AXXX_current_302E',
'AXXX_current_305A',
'AXXX_current_305B',
'AXXX_current_380A',
'AXXX_current_380B',
'AXXX_current_380C',
'AXXX_current_380R',
'AXXX_current_381A',
'AXXX_current_381B',
'AXXX_current_381C'
]
merge['AXXX'] = [[]]
# --------------------------------------------------
trash['AACF'] = [
'AACF_current_319O',
'AACF_current_325B',
'AACF_current_E90A',
'AACF_current_E90B',
'AACF_current_U07D',
'AACF_current_M13A',
'AACF_current_E92A',
'AACF_current_325E',
'AACF_current_325C',
'AACF_current_325D',
'AACF_current_M03A',
]
merge['AACF'] = [
['AACF_current_M21A','AACF_current_M22A','AACF_current_E70A',],
['AACF_current_M02A'],
['AACF_current_M06A'],
['AACF_current_M07A'],
['AACF_current_E03A'],
['AACF_current_E05A']
]
# --------------------------------------------------
# --------------------------------------------------
# This subsystem looks like a temperature indicator
# We delete all commands and process it into a single "temperature" feature
# --------------------------------------------------
trash['ATMB'] = ['ATMB_current_003K'
,'ATMB_current_022K'
,'ATMB_current_045K'
,'ATMB_current_057K'
,'ATMB_current_076K'
,'ATMB_current_091K'
,'ATMB_current_114K'
,'ATMB_current_152K'
,'ATMB_current_182K'
,'ATMB_current_228K']
merge['ATMB'] = [[]]
# Creating a single signal
atmb_cols = [i for i in pd_data.columns if i.startswith('ATMB_current_')]
atmb_vals = np.copy(pd_data[atmb_cols[0]])*0
temps = [3,22,45,57,76,91,114,152,182,228]
for i in range(1, np.size(atmb_cols)):
atmb_vals += pd_data[atmb_cols[i]]*temps[i-1]
# --------------------------------------------------
return trash, merge, atmb_vals
| 35.068862 | 173 | 0.554939 |
24082121480303b3d6222291e1445e6c98d4eb4a | 11,584 | py | Python | mobly/controllers/sniffer.py | chuanhsiao/mobly | b65c09c6bd147325311fd27f71a7ee36ff137251 | [
"Apache-2.0"
] | null | null | null | mobly/controllers/sniffer.py | chuanhsiao/mobly | b65c09c6bd147325311fd27f71a7ee36ff137251 | [
"Apache-2.0"
] | null | null | null | mobly/controllers/sniffer.py | chuanhsiao/mobly | b65c09c6bd147325311fd27f71a7ee36ff137251 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
MOBLY_CONTROLLER_CONFIG_NAME = "Sniffer"
def create(configs):
"""Initializes the sniffer structures based on the JSON configuration. The
expected keys are:
* Type: A first-level type of sniffer. Planned to be 'local' for
sniffers running on the local machine, or 'remote' for sniffers
running remotely.
* SubType: The specific sniffer type to be used.
* Interface: The WLAN interface used to configure the sniffer.
* BaseConfigs: A dictionary specifying baseline configurations of
the sniffer. Configurations can be overridden when starting a
capture. The keys must be one of the Sniffer.CONFIG_KEY_*
values.
"""
objs = []
for c in configs:
sniffer_type = c["Type"]
sniffer_subtype = c["SubType"]
interface = c["Interface"]
base_configs = c["BaseConfigs"]
module_name = "mobly.controllers.sniffer_lib.{}.{}".format(
sniffer_type, sniffer_subtype)
module = importlib.import_module(module_name)
objs.append(
module.Sniffer(interface,
logging.getLogger(),
base_configs=base_configs))
return objs
def destroy(objs):
"""Destroys the sniffers and terminates any ongoing capture sessions.
"""
for sniffer in objs:
try:
sniffer.stop_capture()
except SnifferError:
pass
class SnifferError(Exception):
"""This is the Exception class defined for all errors generated by
Sniffer-related modules.
"""
pass
class InvalidDataError(Exception):
"""This exception is thrown when invalid configuration data is passed
to a method.
"""
pass
class ExecutionError(SnifferError):
"""This exception is thrown when trying to configure the capture device
or when trying to execute the capture operation.
When this exception is seen, it is possible that the sniffer module is run
without sudo (for local sniffers) or keys are out-of-date (for remote
sniffers).
"""
pass
class InvalidOperationError(SnifferError):
"""Certain methods may only be accessed when the instance upon which they
are invoked is in a certain state. This indicates that the object is not
in the correct state for a method to be called.
"""
pass
class Sniffer(object):
"""This class defines an object representing a sniffer.
The object defines the generic behavior of sniffers - irrespective of how
they are implemented, or where they are located: on the local machine or on
the remote machine.
"""
CONFIG_KEY_CHANNEL = "channel"
def __init__(self, interface, logger, base_configs=None):
"""The constructor for the Sniffer. It constructs a sniffer and
configures it to be ready for capture.
Args:
interface: A string specifying the interface used to configure the
sniffer.
logger: Mobly logger object.
base_configs: A dictionary containing baseline configurations of the
sniffer. These can be overridden when staring a capture. The
keys are specified by Sniffer.CONFIG_KEY_*.
Returns:
self: A configured sniffer.
Raises:
InvalidDataError: if the config_path is invalid.
NoPermissionError: if an error occurs while configuring the
sniffer.
"""
raise NotImplementedError("Base class should not be called directly!")
def get_descriptor(self):
"""This function returns a string describing the sniffer. The specific
string (and its format) is up to each derived sniffer type.
Returns:
A string describing the sniffer.
"""
raise NotImplementedError("Base class should not be called directly!")
def get_type(self):
"""This function returns the type of the sniffer.
Returns:
The type (string) of the sniffer. Corresponds to the 'Type' key of
the sniffer configuration.
"""
raise NotImplementedError("Base class should not be called directly!")
def get_subtype(self):
"""This function returns the sub-type of the sniffer.
Returns:
The sub-type (string) of the sniffer. Corresponds to the 'SubType'
key of the sniffer configuration.
"""
raise NotImplementedError("Base class should not be called directly!")
def get_interface(self):
"""This function returns The interface used to configure the sniffer,
e.g. 'wlan0'.
Returns:
The interface (string) used to configure the sniffer. Corresponds to
the 'Interface' key of the sniffer configuration.
"""
raise NotImplementedError("Base class should not be called directly!")
def get_capture_file(self):
"""The sniffer places a capture in the logger directory. This function
enables the caller to obtain the path of that capture.
Returns:
The full path of the current or last capture.
"""
raise NotImplementedError("Base class should not be called directly!")
def start_capture(self,
override_configs=None,
additional_args=None,
duration=None,
packet_count=None):
"""This function starts a capture which is saved to the specified file
path.
Depending on the type/subtype and configuration of the sniffer the
capture may terminate on its own or may require an explicit call to the
stop_capture() function.
This is a non-blocking function so a terminating function must be
called either explicitly or implicitly:
* Explicitly: call either stop_capture() or wait_for_capture()
* Implicitly: use with a with clause. The wait_for_capture()
function will be called if a duration is specified (i.e.
is not None), otherwise a stop_capture() will be called.
The capture is saved to a file in the log path of the logger. Use
the get_capture_file() to get the full path to the current or most
recent capture.
Args:
override_configs: A dictionary which is combined with the
base_configs ("BaseConfigs" in the sniffer configuration). The
keys (specified by Sniffer.CONFIG_KEY_*) determine the
configuration of the sniffer for this specific capture.
additional_args: A string specifying additional raw
command-line arguments to pass to the underlying sniffer. The
interpretation of these flags is sniffer-dependent.
duration: An integer specifying the number of seconds over which to
capture packets. The sniffer will be terminated after this
duration. Used in implicit mode when using a 'with' clause. In
explicit control cases may have to be performed using a
sleep+stop or as the timeout argument to the wait function.
packet_count: An integer specifying the number of packets to capture
before terminating. Should be used with duration to guarantee
that capture terminates at some point (even if did not capture
the specified number of packets).
Returns:
An ActiveCaptureContext process which can be used with a 'with'
clause.
Raises:
InvalidDataError: for invalid configurations
NoPermissionError: if an error occurs while configuring and running
the sniffer.
"""
raise NotImplementedError("Base class should not be called directly!")
def stop_capture(self):
"""This function stops a capture and guarantees that the capture is
saved to the capture file configured during the start_capture() method.
Depending on the type of the sniffer the file may previously contain
partial results (e.g. for a local sniffer) or may not exist until the
stop_capture() method is executed (e.g. for a remote sniffer).
Depending on the type/subtype and configuration of the sniffer the
capture may terminate on its own without requiring a call to this
function. In such a case it is still necessary to call either this
function or the wait_for_capture() function to make sure that the
capture file is moved to the correct location.
Raises:
NoPermissionError: No permission when trying to stop a capture
and save the capture file.
"""
raise NotImplementedError("Base class should not be called directly!")
def wait_for_capture(self, timeout=None):
"""This function waits for a capture to terminate and guarantees that
the capture is saved to the capture file configured during the
start_capture() method. Depending on the type of the sniffer the file
may previously contain partial results (e.g. for a local sniffer) or
may not exist until the stop_capture() method is executed (e.g. for a
remote sniffer).
Depending on the type/subtype and configuration of the sniffer the
capture may terminate on its own without requiring a call to this
function. In such a case it is still necessary to call either this
function or the stop_capture() function to make sure that the capture
file is moved to the correct location.
Args:
timeout: An integer specifying the number of seconds to wait for
the capture to terminate on its own. On expiration of the
timeout the sniffer is stopped explicitly using the
stop_capture() function.
Raises:
NoPermissionError: No permission when trying to stop a capture and
save the capture file.
"""
raise NotImplementedError("Base class should not be called directly!")
class ActiveCaptureContext(object):
"""This class defines an object representing an active sniffer capture.
The object is returned by a Sniffer.start_capture() command and terminates
the capture when the 'with' clause exits. It is syntactic sugar for
try/finally.
"""
_sniffer = None
_timeout = None
def __init__(self, sniffer, timeout=None):
self._sniffer = sniffer
self._timeout = timeout
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
if self._sniffer is not None:
if self._timeout is None:
self._sniffer.stop_capture()
else:
self._sniffer.wait_for_capture(self._timeout)
self._sniffer = None
| 39.135135 | 80 | 0.656854 |
202dff46220ef7a0c72af6fc5e402c8d449b8cb7 | 2,404 | py | Python | spot_motion_monitor/config/vimba_camera_config.py | lsst-sitcom/spot_motion_monitor | 3d0242276198126240667ba13e95b7bdf901d053 | [
"BSD-3-Clause"
] | null | null | null | spot_motion_monitor/config/vimba_camera_config.py | lsst-sitcom/spot_motion_monitor | 3d0242276198126240667ba13e95b7bdf901d053 | [
"BSD-3-Clause"
] | 5 | 2020-01-08T23:50:22.000Z | 2020-02-14T18:15:20.000Z | spot_motion_monitor/config/vimba_camera_config.py | lsst-com/spot_motion_monitor | 3d0242276198126240667ba13e95b7bdf901d053 | [
"MIT"
] | null | null | null | # This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from . import CameraConfig
__all__ = ['VimbaCameraConfig']
class VimbaCameraConfig(CameraConfig):
"""Class that handles the configuration of the Vimba class cameras.
Attributes
----------
cameraIndex : int
The current index of the camera if multiple present.
fullExposureTime : int
The exposure time (microseconds) in full frame mode.
modelName : str
A description of the camera model.
roiExposureTime : int
The exposure time (microseconds) in ROI mode.
roiFluxMinimum : int
The minimum flux allowed in an ROI.
"""
def __init__(self):
"""Initialize the class.
"""
super().__init__()
self.modelName = None
self.roiFluxMinimum = 2000
self.roiExposureTime = 8000 # microseconds
self.fullExposureTime = 8000 # microseconds
self.cameraIndex = 0
def fromDict(self, config):
"""Translate config to class attributes.
Parameters
----------
config : dict
The configuration to translate.
"""
self.modelName = config["modelName"]
self.roiFluxMinimum = config["roi"]["fluxMin"]
self.roiExposureTime = config["roi"]["exposureTime"]
self.fullExposureTime = config["full"]["exposureTime"]
self.cameraIndex = config["cameraIndex"]
super().fromDict(config)
def toDict(self, writeEmpty=False):
"""Translate class attributes to configuration dict.
Parameters
----------
writeEmpty : bool
Flag to write parameters with None as values.
Returns
-------
dict
The currently stored configuration.
"""
config = super().toDict(writeEmpty)
if writeEmpty or self.modelName is not None:
config["modelName"] = self.modelName
config["roi"]["fluxMin"] = self.roiFluxMinimum
config["roi"]["exposureTime"] = self.roiExposureTime
config["full"]["exposureTime"] = self.fullExposureTime
return config
| 31.220779 | 71 | 0.6302 |
c51741f4452a3ff59893245949cf5e5d425267b6 | 12,481 | py | Python | MacetesListaAdjacencia.py | RafaelMunizz/Teoria-dos-Grafos | 1502219c629a3081a815cbcc9d0d3353eb471872 | [
"MIT"
] | null | null | null | MacetesListaAdjacencia.py | RafaelMunizz/Teoria-dos-Grafos | 1502219c629a3081a815cbcc9d0d3353eb471872 | [
"MIT"
] | null | null | null | MacetesListaAdjacencia.py | RafaelMunizz/Teoria-dos-Grafos | 1502219c629a3081a815cbcc9d0d3353eb471872 | [
"MIT"
] | null | null | null | from bibgrafo.grafo_lista_adjacencia import GrafoListaAdjacencia
from bibgrafo.grafo_exceptions import *
from meu_grafo_listaAdjacencia import MeuGrafo
############################ TESTES grafo_test ################################
#################### GRAFOS ATIVIDADES ####################
# Grafo da Paraíba (Roteiro 1)
g_p = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p.adicionaAresta('a1', 'J', 'C')
g_p.adicionaAresta('a2', 'C', 'E')
g_p.adicionaAresta('a3', 'C', 'E')
g_p.adicionaAresta('a4', 'P', 'C')
g_p.adicionaAresta('a5', 'P', 'C')
g_p.adicionaAresta('a6', 'T', 'C')
g_p.adicionaAresta('a7', 'M', 'C')
g_p.adicionaAresta('a8', 'M', 'T')
g_p.adicionaAresta('a9', 'T', 'Z')
# Grafo (Roteiro 2)
g_Roteiro2 = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
g_Roteiro2.adicionaAresta('a1', 'A', 'B')
g_Roteiro2.adicionaAresta('a2', 'A', 'G')
g_Roteiro2.adicionaAresta('a3', 'A', 'J')
g_Roteiro2.adicionaAresta('a4', 'K', 'G')
g_Roteiro2.adicionaAresta('a5', 'K', 'J')
g_Roteiro2.adicionaAresta('a6', 'J', 'G')
g_Roteiro2.adicionaAresta('a7', 'J', 'I')
g_Roteiro2.adicionaAresta('a8', 'G', 'I')
g_Roteiro2.adicionaAresta('a9', 'G', 'H')
g_Roteiro2.adicionaAresta('a10', 'H', 'F')
g_Roteiro2.adicionaAresta('a11', 'F', 'B')
g_Roteiro2.adicionaAresta('a12', 'B', 'G')
g_Roteiro2.adicionaAresta('a13', 'B', 'C')
g_Roteiro2.adicionaAresta('a14', 'C', 'D')
g_Roteiro2.adicionaAresta('a15', 'D', 'E')
g_Roteiro2.adicionaAresta('a16', 'D', 'B')
g_Roteiro2.adicionaAresta('a17', 'B', 'E')
#################### GRAFOS SIMPLES ####################
grafoSimples1 = MeuGrafo(['A', 'B', 'C', 'D', 'E'])
grafoSimples1.adicionaAresta('a1', 'A', 'B')
grafoSimples1.adicionaAresta('a2', 'B', 'D')
grafoSimples1.adicionaAresta('a3', 'B', 'C')
grafoSimples1.adicionaAresta('a4', 'C', 'D')
grafoSimples1.adicionaAresta('a5', 'D', 'E')
grafoSimples1.adicionaAresta('a6', 'C', 'E')
grafoSimples2 = MeuGrafo(['M', 'N', 'O', 'P', 'Q'])
grafoSimples2.adicionaAresta('a1', 'M', 'O')
grafoSimples2.adicionaAresta('a2', 'M', 'P')
grafoSimples2.adicionaAresta('a3', 'O', 'N')
grafoSimples2.adicionaAresta('a4', 'N', 'P')
grafoSimples2.adicionaAresta('a5', 'P', 'Q')
grafoSimples3 = MeuGrafo(['1', '2', '3', '4', '5', '6'])
grafoSimples3.adicionaAresta('a1', '1', '2')
grafoSimples3.adicionaAresta('a2', '1', '3')
grafoSimples3.adicionaAresta('a3', '1', '4')
grafoSimples3.adicionaAresta('a4', '1', '6')
grafoSimples3.adicionaAresta('a5', '3', '4')
grafoSimples3.adicionaAresta('a6', '3', '5')
grafoSimples3.adicionaAresta('a7', '4', '5')
grafoSimples3.adicionaAresta('a8', '2', '3')
grafoSimples3.adicionaAresta('a9', '2', '5')
grafoSimples3.adicionaAresta('a10', '5', '6')
grafoSimples3.adicionaAresta('a11', '4', '6')
grafoSimples3.adicionaAresta('a12', '2', '6')
#################### GRAFOS COM ARESTAS PARALELAS ####################
grafoComArestasParalelas1 = MeuGrafo(['A', 'B', 'C', 'D', 'E'])
grafoComArestasParalelas1.adicionaAresta('a1', 'A', 'B')
grafoComArestasParalelas1.adicionaAresta('a2', 'A', 'B')
grafoComArestasParalelas1.adicionaAresta('a3', 'A', 'E')
grafoComArestasParalelas1.adicionaAresta('a4', 'E', 'C')
grafoComArestasParalelas1.adicionaAresta('a5', 'B', 'D')
grafoComArestasParalelas1.adicionaAresta('a6', 'D', 'C')
grafoComArestasParalelas2 = MeuGrafo(['1', '2', '3', '4'])
grafoComArestasParalelas2.adicionaAresta('a1', '1', '2')
grafoComArestasParalelas2.adicionaAresta('a2', '2', '3')
grafoComArestasParalelas2.adicionaAresta('a3', '2', '3')
grafoComArestasParalelas2.adicionaAresta('a4', '3', '4')
#################### GRAFOS SEM ARESTAS PARALELAS ####################
# Grafo da Paraíba sem arestas paralelas
g_p_sem_paralelas = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p_sem_paralelas.adicionaAresta('a1', 'J', 'C')
g_p_sem_paralelas.adicionaAresta('a2', 'C', 'E')
g_p_sem_paralelas.adicionaAresta('a3', 'P', 'C')
g_p_sem_paralelas.adicionaAresta('a4', 'T', 'C')
g_p_sem_paralelas.adicionaAresta('a5', 'M', 'C')
g_p_sem_paralelas.adicionaAresta('a6', 'M', 'T')
g_p_sem_paralelas.adicionaAresta('a7', 'T', 'Z')
#################### GRAFOS COMPLETOS ####################
g_c = MeuGrafo(['J', 'C', 'E', 'P'])
g_c.adicionaAresta('a1', 'J', 'C')
g_c.adicionaAresta('a2', 'J', 'E')
g_c.adicionaAresta('a3', 'J', 'P')
g_c.adicionaAresta('a4', 'E', 'C')
g_c.adicionaAresta('a5', 'P', 'C')
g_c.adicionaAresta('a6', 'P', 'E')
g_c2 = MeuGrafo(['Nina', 'Maria'])
g_c2.adicionaAresta('amiga', 'Nina', 'Maria')
g_c3 = MeuGrafo(['J'])
grafoCompleto1 = MeuGrafo(['P', 'Q', 'R', 'S', 'T'])
grafoCompleto1.adicionaAresta('a1', 'P', 'T')
grafoCompleto1.adicionaAresta('a2', 'P', 'S')
grafoCompleto1.adicionaAresta('a3', 'P', 'R')
grafoCompleto1.adicionaAresta('a4', 'P', 'Q')
grafoCompleto1.adicionaAresta('a5', 'Q', 'T')
grafoCompleto1.adicionaAresta('a6', 'R', 'T')
grafoCompleto1.adicionaAresta('a7', 'Q', 'S')
grafoCompleto1.adicionaAresta('a8', 'Q', 'R')
grafoCompleto1.adicionaAresta('a9', 'R', 'S')
grafoCompleto1.adicionaAresta('a10', 'S', 'T')
#################### GRAFOS COM LAÇOS ####################
g_l1 = MeuGrafo(['A', 'B', 'C', 'D'])
g_l1.adicionaAresta('a1', 'A', 'A')
g_l1.adicionaAresta('a2', 'A', 'B')
g_l1.adicionaAresta('a3', 'A', 'A')
g_l2 = MeuGrafo(['A', 'B', 'C', 'D'])
g_l2.adicionaAresta('a1', 'A', 'B')
g_l2.adicionaAresta('a2', 'B', 'B')
g_l2.adicionaAresta('a3', 'B', 'A')
g_l3 = MeuGrafo(['A', 'B', 'C', 'D'])
g_l3.adicionaAresta('a1', 'C', 'A')
g_l3.adicionaAresta('a2', 'C', 'C')
g_l3.adicionaAresta('a3', 'D', 'D')
g_l3.adicionaAresta('a4', 'D', 'D')
g_l4 = MeuGrafo(['D'])
g_l4.adicionaAresta('a1', 'D', 'D')
g_l5 = MeuGrafo(['C', 'D'])
g_l5.adicionaAresta('a1', 'D', 'C')
g_l5.adicionaAresta('a2', 'C', 'C')
#################### GRAFOS DESCONEXOS ####################
g_d = MeuGrafo(['A', 'B', 'C', 'D'])
g_d.adicionaAresta('asd', 'A', 'B')
# Grafo Paraíba desconexo
g_p_desconexo = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p_desconexo.adicionaAresta('a2', 'C', 'E')
g_p_desconexo.adicionaAresta('a3', 'C', 'E')
g_p_desconexo.adicionaAresta('a4', 'P', 'C')
g_p_desconexo.adicionaAresta('a5', 'P', 'C')
g_p_desconexo.adicionaAresta('a6', 'T', 'C')
g_p_desconexo.adicionaAresta('a7', 'M', 'C')
g_p_desconexo.adicionaAresta('a8', 'M', 'T')
g_p_desconexo.adicionaAresta('a9', 'T', 'Z')
grafoDesconexo1 = MeuGrafo(['A', 'B', 'C', 'D', 'E'])
grafoDesconexo1.adicionaAresta('a1', 'A', 'E')
grafoDesconexo1.adicionaAresta('a2', 'E', 'B')
grafoDesconexo1.adicionaAresta('a3', 'E', 'C')
grafoDesconexo1.adicionaAresta('a4', 'C', 'A')
#################### GRAFOS TESTE DFS ####################
# Grafo Paraiba DFS partindo de J
g_p_DFS_J = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p_DFS_J.adicionaAresta('a1', 'J', 'C')
g_p_DFS_J.adicionaAresta('a2', 'C', 'E')
g_p_DFS_J.adicionaAresta('a4', 'P', 'C')
g_p_DFS_J.adicionaAresta('a6', 'T', 'C')
g_p_DFS_J.adicionaAresta('a8', 'M', 'T')
g_p_DFS_J.adicionaAresta('a9', 'T', 'Z')
# Grafo Roteiro2 DFS partindo de K
g_Roteiro2_DFS_K = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
g_Roteiro2_DFS_K.adicionaAresta('a4', 'K', 'G')
g_Roteiro2_DFS_K.adicionaAresta('a2', 'A', 'G')
g_Roteiro2_DFS_K.adicionaAresta('a1', 'A', 'B')
g_Roteiro2_DFS_K.adicionaAresta('a11', 'F', 'B')
g_Roteiro2_DFS_K.adicionaAresta('a10', 'H', 'F')
g_Roteiro2_DFS_K.adicionaAresta('a13', 'B', 'C')
g_Roteiro2_DFS_K.adicionaAresta('a14', 'C', 'D')
g_Roteiro2_DFS_K.adicionaAresta('a15', 'D', 'E')
g_Roteiro2_DFS_K.adicionaAresta('a3', 'A', 'J')
g_Roteiro2_DFS_K.adicionaAresta('a7', 'J', 'I')
#################### GRAFOS TESTE BFS ####################
# Grafo da Paraíba BFS partindo de C
g_p_BFS_C = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p_BFS_C.adicionaAresta('a1', 'J', 'C')
g_p_BFS_C.adicionaAresta('a2', 'C', 'E')
g_p_BFS_C.adicionaAresta('a4', 'P', 'C')
g_p_BFS_C.adicionaAresta('a6', 'T', 'C')
g_p_BFS_C.adicionaAresta('a7', 'M', 'C')
g_p_BFS_C.adicionaAresta('a9', 'T', 'Z')
# Grafo Roteiro2 BFS partindo de F
g_Roteiro2_BFS_F = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
g_Roteiro2_BFS_F.adicionaAresta('a10', 'H', 'F')
g_Roteiro2_BFS_F.adicionaAresta('a11', 'F', 'B')
g_Roteiro2_BFS_F.adicionaAresta('a9', 'G', 'H')
g_Roteiro2_BFS_F.adicionaAresta('a2', 'A', 'G')
g_Roteiro2_BFS_F.adicionaAresta('a4', 'K', 'G')
g_Roteiro2_BFS_F.adicionaAresta('a6', 'J', 'G')
g_Roteiro2_BFS_F.adicionaAresta('a8', 'G', 'I')
g_Roteiro2_BFS_F.adicionaAresta('a13', 'B', 'C')
g_Roteiro2_BFS_F.adicionaAresta('a16', 'D', 'B')
g_Roteiro2_BFS_F.adicionaAresta('a17', 'B', 'E')
#################### GRAFOS ACÍCLICOS ####################
grafoAciclico1 = MeuGrafo(['1', '2', '3', '4', '5', '6'])
grafoAciclico1.adicionaAresta('a1', '1', '4')
grafoAciclico1.adicionaAresta('a2', '2', '4')
grafoAciclico1.adicionaAresta('a3', '3', '4')
grafoAciclico1.adicionaAresta('a4', '4', '5')
grafoAciclico1.adicionaAresta('a5', '5', '6')
grafoAciclico2 = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F'])
grafoAciclico2.adicionaAresta('a1', 'A', 'C')
grafoAciclico2.adicionaAresta('a2', 'C', 'E')
grafoAciclico2.adicionaAresta('a3', 'E', 'B')
grafoAciclico2.adicionaAresta('a4', 'E', 'F')
grafoAciclico2.adicionaAresta('a5', 'F', 'D')
grafoAciclico3 = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
grafoAciclico3.adicionaAresta('a1', 'A', 'B')
grafoAciclico3.adicionaAresta('a2', 'B', 'D')
grafoAciclico3.adicionaAresta('a3', 'B', 'F')
grafoAciclico3.adicionaAresta('a4', 'A', 'C')
grafoAciclico3.adicionaAresta('a5', 'C', 'G')
grafoAciclico3.adicionaAresta('a6', 'A', 'E')
#################### GRAFOS BIPARTIDO ####################
grafoBipartido1 = MeuGrafo(['1', '2', '3', '4', '5', '6'])
grafoBipartido1.adicionaAresta('a1', '1', '4')
grafoBipartido1.adicionaAresta('a2', '1', '5')
grafoBipartido1.adicionaAresta('a3', '1', '6')
grafoBipartido1.adicionaAresta('a4', '2', '4')
grafoBipartido1.adicionaAresta('a5', '2', '5')
grafoBipartido1.adicionaAresta('a6', '2', '6')
grafoBipartido1.adicionaAresta('a7', '3', '4')
grafoBipartido1.adicionaAresta('a8', '3', '5')
grafoBipartido1.adicionaAresta('a9', '3', '6')
##################### TESTES CAMINHO EULERIANO ###################
grafoEuleriano1 = MeuGrafo(['A', 'B', 'C', 'D'])
grafoEuleriano1.adicionaAresta('a1', 'A', 'B')
grafoEuleriano1.adicionaAresta('a2', 'B', 'C')
grafoEuleriano1.adicionaAresta('a3', 'C', 'D')
grafoEuleriano1.adicionaAresta('a4', 'D', 'A')
grafoEuleriano2 = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
grafoEuleriano2.adicionaAresta('a1', 'A', 'B')
grafoEuleriano2.adicionaAresta('a2', 'B', 'C')
grafoEuleriano2.adicionaAresta('a3', 'C', 'A')
grafoEuleriano2.adicionaAresta('a4', 'A', 'G')
grafoEuleriano2.adicionaAresta('a5', 'G', 'C')
grafoEuleriano2.adicionaAresta('a6', 'C', 'D')
grafoEuleriano2.adicionaAresta('a7', 'D', 'G')
grafoEuleriano2.adicionaAresta('a8', 'G', 'F')
grafoEuleriano2.adicionaAresta('a9', 'F', 'D')
grafoEuleriano2.adicionaAresta('a10', 'D', 'E')
grafoEuleriano2.adicionaAresta('a11', 'E', 'F')
grafoEuleriano2.adicionaAresta('a12', 'F', 'A')
grafoEuleriano3 = MeuGrafo(['A', 'B', 'C', 'D', 'E'])
grafoEuleriano3.adicionaAresta('a1', 'A', 'B')
grafoEuleriano3.adicionaAresta('a2', 'B', 'C')
grafoEuleriano3.adicionaAresta('a3', 'C', 'A')
grafoEuleriano3.adicionaAresta('a4', 'A', 'D')
grafoEuleriano3.adicionaAresta('a5', 'D', 'C')
grafoEuleriano3.adicionaAresta('a6', 'C', 'E')
grafoEuleriano3.adicionaAresta('a7', 'E', 'D')
grafoEuleriano4 = MeuGrafo(['A', 'B', 'C'])
grafoEuleriano4.adicionaAresta('a1', 'A', 'B')
grafoEuleriano4.adicionaAresta('a2', 'A', 'C')
grafoEuleriano5 = MeuGrafo(['A', 'B', 'C', 'D', 'E'])
grafoEuleriano5.adicionaAresta('a1', 'A', 'B')
grafoEuleriano5.adicionaAresta('a2', 'B', 'C')
grafoEuleriano5.adicionaAresta('a3', 'C', 'A')
grafoEuleriano5.adicionaAresta('a4', 'A', 'D')
grafoEuleriano5.adicionaAresta('a5', 'D', 'E')
grafoEuleriano5.adicionaAresta('a6', 'E', 'A')
#################### TESTES FUNÇÕES ####################
'''
#### LISTA DE ADJACENCIA ###
# PERCORRER VÉRTICES (NÓS)
for v in teste.N:
print(v)
'''
'''
# PERCORRER ARESTAS
for a in teste.A:
print(a)
'''
'''
# PERCORRER VÉRTICES QUE LIGAM ARESTAS
for a in teste.A:
print(teste.A[a])
'''
'''
# PEGAR ÚNICA ARESTAJ
print(teste.getAresta("a1"))
'''
'''
# INFORMAÇÕES DA ARESTA (teste = self em meu_grafo)
print(teste.getAresta("a1"))
print(teste.getAresta("a1").getRotulo())
print(teste.getAresta("a1").getV1())
print(teste.getAresta("a1").getV2())
print(teste.getAresta("a1").getPeso())
'''
print(g_Roteiro2.dfs("A")) | 37.368263 | 84 | 0.628555 |
a1c5db30d965d9c81486d82531b53028475bedf5 | 1,619 | py | Python | server/game/test/test_player.py | the-artists-formerly-known-as-spam/hacktm2018 | 3f3523b0b0bf9ed0acf7585015c2d58277c12e42 | [
"Apache-2.0"
] | 1 | 2018-10-11T07:48:03.000Z | 2018-10-11T07:48:03.000Z | server/game/test/test_player.py | the-artists-formerly-known-as-spam/hacktm2018 | 3f3523b0b0bf9ed0acf7585015c2d58277c12e42 | [
"Apache-2.0"
] | 6 | 2021-03-08T19:18:49.000Z | 2022-02-26T03:43:13.000Z | server/game/test/test_player.py | the-artists-formerly-known-as-spam/hacktm2018 | 3f3523b0b0bf9ed0acf7585015c2d58277c12e42 | [
"Apache-2.0"
] | null | null | null | """ Module to test player methods, to make sure movement does not go
out of bounds, and other business logic."""
import game.player as player
from game.exceptions import IllegalMoveException
def test_move_outside_bounds_by_1():
p1 = player.Player("Joe", [0, 0], "player1")
#warmup
p1.move(0, 0)
assert p1.position == [0, 0]
try:
p1.move(-1, 0)
assert False
except IllegalMoveException:
assert p1.position == [0, 0]
try:
p1.move(0, -1)
assert False
except IllegalMoveException:
assert p1.position == [0, 0]
def test_move_outside_bounds_by_2():
p1 = player.Player("Joe", [0, 0], "player1")
try:
p1.move(-2, 0)
assert False
except IllegalMoveException:
assert p1.position == [0, 0]
try:
p1.move(0, -2)
assert False
except IllegalMoveException:
assert p1.position == [0, 0]
def test_move_outside_bounds_by_2_up():
p1 = player.Player("Joe", [4, 14], "player1")
try:
p1.move(2, 0)
assert False
except IllegalMoveException:
assert p1.position == [4, 14]
try:
p1.move(0, 2)
assert False
except IllegalMoveException:
assert p1.position == [4, 14]
def test_move_outside_bounds_by_1_up():
p1 = player.Player("Joe", [4, 14], "player1")
try:
p1.move(1, 0)
assert False
except IllegalMoveException:
assert p1.position == [4, 14]
try:
p1.move(0, 1)
assert False
except IllegalMoveException:
assert p1.position == [4, 14]
| 23.463768 | 68 | 0.590488 |
a034bcb157f3cc10b1759c144b0a11efaa7091fb | 21,990 | py | Python | custom_components/meross_lan/__init__.py | patrick-blom/meross_lan | dfab3133c6ed00b513cc1500b510e57c8eee58e2 | [
"MIT"
] | 117 | 2021-03-20T16:36:33.000Z | 2022-03-30T02:52:06.000Z | custom_components/meross_lan/__init__.py | patrick-blom/meross_lan | dfab3133c6ed00b513cc1500b510e57c8eee58e2 | [
"MIT"
] | 132 | 2021-03-16T16:46:05.000Z | 2022-03-30T09:49:59.000Z | custom_components/meross_lan/__init__.py | patrick-blom/meross_lan | dfab3133c6ed00b513cc1500b510e57c8eee58e2 | [
"MIT"
] | 20 | 2021-03-20T14:30:32.000Z | 2022-03-13T18:21:56.000Z | """The Meross IoT local LAN integration."""
from typing import Callable, Dict, Optional, Union
from time import time
from datetime import datetime, timedelta
from json import (
dumps as json_dumps,
loads as json_loads,
)
from aiohttp.client_exceptions import ClientConnectionError
from homeassistant.config_entries import ConfigEntry, SOURCE_DISCOVERY
from homeassistant.core import HomeAssistant, callback
from homeassistant.components.mqtt.const import MQTT_DISCONNECTED
from homeassistant.helpers import device_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.update_coordinator import (
DataUpdateCoordinator,
)
from homeassistant.exceptions import ConfigEntryNotReady
from . import merossclient
from .merossclient import (
const as mc, KeyType,
MerossDeviceDescriptor, MerossHttpClient,
build_default_payload_get,
)
from .meross_device import MerossDevice
from logging import WARNING, INFO
from .helpers import (
LOGGER, LOGGER_trap,
mqtt_publish, mqtt_is_connected,
)
from .const import (
DOMAIN, SERVICE_REQUEST,
CONF_HOST, CONF_PROTOCOL, CONF_OPTION_HTTP, CONF_OPTION_MQTT,
CONF_DEVICE_ID, CONF_KEY, CONF_CLOUD_KEY, CONF_PAYLOAD,
CONF_POLLING_PERIOD_DEFAULT,
PARAM_UNAVAILABILITY_TIMEOUT,PARAM_HEARTBEAT_PERIOD,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Meross IoT local LAN component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Meross IoT local LAN from a config entry."""
LOGGER.debug("async_setup_entry entry_id = %s", entry.entry_id)
api = hass.data.get(DOMAIN)
if api == None:
api = MerossApi(hass)
hass.data[DOMAIN] = api
device_id = entry.data.get(CONF_DEVICE_ID)
if (api.unsub_mqtt is None) and \
(api.mqtt_subscribing is False) and \
((device_id is None) or (entry.data.get(CONF_PROTOCOL) != CONF_OPTION_HTTP)):
"""
this is the MQTT Hub entry or a device which could/should use MQTT
and we still havent registered MQTT
"""
api.mqtt_subscribing = True # guard ON
try:
await api.async_mqtt_register()
except Exception:
pass
api.mqtt_subscribing = False
"""
this is a hell of race conditions: the previous mqtt_register could be overlapping (awaited)
because of a different ConfigEntry request (where CONF_PROTOCOL != HTTP)
here we need to be sure to delay load this entry until mqtt is in place (at least for those
directly requiring MQTT)
"""
if (device_id is None) or (entry.data.get(CONF_PROTOCOL) == CONF_OPTION_MQTT):
if api.unsub_mqtt is None:
raise ConfigEntryNotReady("MQTT unavailable")
if device_id is None:
# this is the MQTT Hub entry
api.key = entry.data.get(CONF_KEY) # could be 'None' : if so defaults to "" but allows key reply trick
api.unsub_entry_update_listener = entry.add_update_listener(api.entry_update_listener)
else:
#device related entry
LOGGER.debug("async_setup_entry device_id = %s", device_id)
cloud_key = entry.data.get(CONF_CLOUD_KEY)
if cloud_key is not None:
api.cloud_key = cloud_key # last loaded overwrites existing: shouldnt it be the same ?!
device = api.build_device(device_id, entry)
device.unsub_entry_update_listener = entry.add_update_listener(device.entry_update_listener)
device.unsub_updatecoordinator_listener = api.coordinator.async_add_listener(device.updatecoordinator_listener)
# this api is too recent (around April 2021): hass.config_entries.async_setup_platforms(entry, device.platforms.keys())
for platform in device.platforms.keys():
hass.async_create_task(hass.config_entries.async_forward_entry_setup(entry, platform))
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
LOGGER.debug("async_unload_entry entry_id = %s", entry.entry_id)
api: MerossApi = hass.data.get(DOMAIN)
if api is not None:
device_id = entry.data.get(CONF_DEVICE_ID)
if device_id is not None:
LOGGER.debug("async_unload_entry device_id = %s", device_id)
# when removing devices we could also need to cleanup platforms
device = api.devices[device_id]
if not await hass.config_entries.async_unload_platforms(entry, device.platforms.keys()):
return False
if device.unsub_entry_update_listener is not None:
device.unsub_entry_update_listener()
device.unsub_entry_update_listener = None
if device.unsub_updatecoordinator_listener is not None:
device.unsub_updatecoordinator_listener()
device.unsub_updatecoordinator_listener = None
api.devices.pop(device_id)
#when removing the last configentry do a complete cleanup
if (not api.devices) and (len(hass.config_entries.async_entries(DOMAIN)) == 1):
if api.unsub_mqtt_disconnected is not None:
api.unsub_mqtt_disconnected()
api.unsub_mqtt_disconnected = None
if api.unsub_mqtt is not None:
api.unsub_mqtt()
api.unsub_mqtt = None
if api.unsub_entry_update_listener is not None:
api.unsub_entry_update_listener()
api.unsub_entry_update_listener = None
if api.unsub_discovery_callback is not None:
api.unsub_discovery_callback()
api.unsub_discovery_callback = None
hass.data.pop(DOMAIN)
return True
class MerossApi:
KEY_STARTTIME = '__starttime'
KEY_REQUESTTIME = '__requesttime'
def __init__(self, hass: HomeAssistant):
self.hass = hass
self.key = None
self.cloud_key = None
self.devices: Dict[str, MerossDevice] = {}
self.discovering: Dict[str, dict] = {}
self.mqtt_subscribing = False # guard for asynchronous mqtt sub registration
self.unsub_mqtt = None
self.unsub_mqtt_disconnected = None
self.unsub_entry_update_listener = None
self.unsub_discovery_callback = None
async def async_update_data():
"""
data fetch and control moved to MerossDevice
"""
return None
self.coordinator = DataUpdateCoordinator(
hass,
LOGGER,
# Name of the data. For logging purposes.
name=DOMAIN,
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=CONF_POLLING_PERIOD_DEFAULT),
)
@callback
def _request(service_call):
self.request(
device_id=service_call.data.get(CONF_DEVICE_ID),
namespace=service_call.data.get(mc.KEY_NAMESPACE),
method=service_call.data.get(mc.KEY_METHOD),
payload=json_loads(service_call.data.get(mc.KEY_PAYLOAD, "{}")),
key=service_call.data.get(CONF_KEY, self.key),
host=service_call.data.get(CONF_HOST)
)
return
hass.services.async_register(DOMAIN, SERVICE_REQUEST, _request)
return
async def async_mqtt_register(self):
# Listen to a message on MQTT.
@callback
async def mqtt_receive(msg):
try:
mqttpayload = json_loads(msg.payload)
header = mqttpayload.get(mc.KEY_HEADER)
method = header.get(mc.KEY_METHOD)
namespace = header.get(mc.KEY_NAMESPACE)
payload = mqttpayload.get(mc.KEY_PAYLOAD)
device_id = msg.topic.split("/")[2]
LOGGER.debug("MerossApi: MQTT RECV device_id:(%s) method:(%s) namespace:(%s)", device_id, method, namespace)
device = self.devices.get(device_id)
if device == None:
# lookout for any disabled/ignored entry
for domain_entry in self.hass.config_entries.async_entries(DOMAIN):
if (domain_entry.unique_id == device_id):
# entry already present...
#if domain_entry.disabled_by == DOMAIN:
# we previously disabled this one due to extended anuavailability
#await self.hass.config_entries.async_set_disabled_by(domain_entry.entry_id, None)
# skip discovery anyway
msg_reason = "disabled" if domain_entry.disabled_by is not None \
else "ignored" if domain_entry.source == "ignore" \
else "unknown"
LOGGER_trap(INFO, 14400, "Ignoring discovery for device_id: %s (ConfigEntry is %s)", device_id, msg_reason)
return
#also skip discovered integrations waititng in HA queue
for flow in self.hass.config_entries.flow.async_progress():
if (flow.get("handler") == DOMAIN) and (flow.get("context", {}).get("unique_id") == device_id):
LOGGER_trap(INFO, 14400, "Ignoring discovery for device_id: %s (ConfigEntry is in progress)", device_id)
return
replykey = merossclient.get_replykey(header, self.key)
if replykey != self.key:
LOGGER_trap(WARNING, 300, "Meross discovery key error for device_id: %s", device_id)
if self.key is not None:# we're using a fixed key in discovery so ignore this device
return
discovered = self.discovering.get(device_id)
if discovered == None:
# new device discovered: try to determine the capabilities
self.mqtt_publish_get(device_id, mc.NS_APPLIANCE_SYSTEM_ALL, replykey)
epoch = time()
self.discovering[device_id] = {
MerossApi.KEY_STARTTIME: epoch,
MerossApi.KEY_REQUESTTIME: epoch
}
if self.unsub_discovery_callback is None:
self.unsub_discovery_callback = async_track_point_in_utc_time(
self.hass,
self.discovery_callback,
datetime.fromtimestamp(epoch + PARAM_UNAVAILABILITY_TIMEOUT + 2)
)
else:
if method == mc.METHOD_GETACK:
if namespace == mc.NS_APPLIANCE_SYSTEM_ALL:
discovered[mc.NS_APPLIANCE_SYSTEM_ALL] = payload
self.mqtt_publish_get(device_id, mc.NS_APPLIANCE_SYSTEM_ABILITY, replykey)
discovered[MerossApi.KEY_REQUESTTIME] = time()
return
elif namespace == mc.NS_APPLIANCE_SYSTEM_ABILITY:
if discovered.get(mc.NS_APPLIANCE_SYSTEM_ALL) is None:
self.mqtt_publish_get(device_id, mc.NS_APPLIANCE_SYSTEM_ALL, replykey)
discovered[MerossApi.KEY_REQUESTTIME] = time()
return
payload.update(discovered[mc.NS_APPLIANCE_SYSTEM_ALL])
self.discovering.pop(device_id)
if (len(self.discovering) == 0) and self.unsub_discovery_callback:
self.unsub_discovery_callback()
self.unsub_discovery_callback = None
await self.hass.config_entries.flow.async_init(
DOMAIN,
context={ "source": SOURCE_DISCOVERY },
data={
CONF_DEVICE_ID: device_id,
CONF_PAYLOAD: payload,
CONF_KEY: replykey
},
)
return
else:
device.mqtt_receive(namespace, method, payload, header)
except Exception as e:
LOGGER.debug("MerossApi: mqtt_receive exception:(%s) payload:(%s)", str(e), msg.payload)
return
@callback
def mqtt_disconnected():
for device in self.devices.values():
device.mqtt_disconnected()
self.unsub_mqtt = await self.hass.components.mqtt.async_subscribe(mc.TOPIC_DISCOVERY, mqtt_receive)
self.unsub_mqtt_disconnected = async_dispatcher_connect(self.hass, MQTT_DISCONNECTED, mqtt_disconnected)
#self.unsub_mqtt_connected = async_dispatcher_connect(self.hass, MQTT_CONNECTED, mqtt_connected)
def has_device(self, ipaddress: str, macaddress:str) -> bool:
# macaddress from dhcp discovery is already stripped/lower but...
macaddress = macaddress.replace(':', '').lower()
for device in self.devices.values():
if device.descriptor.innerIp == ipaddress:
return True
if device.descriptor.macAddress.replace(':', '').lower() == macaddress:
return True
else:
return False
def build_device(self, device_id: str, entry: ConfigEntry) -> MerossDevice:
"""
scans device descriptor to build a 'slightly' specialized MerossDevice
The base MerossDevice class is a bulk 'do it all' implementation
but some devices (i.e. Hub) need a (radically?) different behaviour
"""
descriptor = MerossDeviceDescriptor(entry.data.get(CONF_PAYLOAD, {}))
if (mc.KEY_HUB in descriptor.digest):
from .meross_device_hub import MerossDeviceHub
device = MerossDeviceHub(self, descriptor, entry)
elif (mc.KEY_LIGHT in descriptor.digest):
from .meross_device_bulb import MerossDeviceBulb
device = MerossDeviceBulb(self, descriptor, entry)
elif (mc.KEY_GARAGEDOOR in descriptor.digest):
from .meross_device_cover import MerossDeviceGarage
device = MerossDeviceGarage(self, descriptor, entry)
elif (mc.NS_APPLIANCE_ROLLERSHUTTER_STATE in descriptor.ability):
from .meross_device_cover import MerossDeviceShutter
device = MerossDeviceShutter(self, descriptor, entry)
else:
from .meross_device_switch import MerossDeviceSwitch
device = MerossDeviceSwitch(self, descriptor, entry)
self.devices[device_id] = device
self.update_polling_period()
try:
# try block since this is not critical and api has recently changed
device_registry.async_get(self.hass).async_get_or_create(
config_entry_id = entry.entry_id,
connections = {(device_registry.CONNECTION_NETWORK_MAC, descriptor.macAddress)},
identifiers = {(DOMAIN, device_id)},
manufacturer = mc.MANUFACTURER,
name = descriptor.productname,
model = descriptor.productmodel,
sw_version = descriptor.firmware.get(mc.KEY_VERSION)
)
except:
pass
return device
def mqtt_publish(self,
device_id: str,
namespace: str,
method: str,
payload: dict,
key: KeyType = None,
messageid: str = None
) -> None:
LOGGER.debug("MerossApi: MQTT SEND device_id:(%s) method:(%s) namespace:(%s)", device_id, method, namespace)
mqtt_publish(
self.hass,
mc.TOPIC_REQUEST.format(device_id),
json_dumps(merossclient.build_payload(
namespace, method, payload, key,
mc.TOPIC_RESPONSE.format(device_id), messageid))
)
def mqtt_publish_get(self,
device_id: str,
namespace: str,
key: KeyType = None
) -> None:
self.mqtt_publish(
device_id,
namespace,
mc.METHOD_GET,
build_default_payload_get(namespace),
key
)
async def async_http_request(self,
host: str,
namespace: str,
method: str,
payload: dict,
key: KeyType = None,
callback_or_device: Union[Callable, MerossDevice] = None # pylint: disable=unsubscriptable-object
) -> None:
try:
_httpclient:MerossHttpClient = getattr(self, '_httpclient', None)
if _httpclient is None:
_httpclient = MerossHttpClient(host, key, async_get_clientsession(self.hass), LOGGER)
self._httpclient = _httpclient
else:
_httpclient.host = host
_httpclient.key = key
response = await _httpclient.async_request(namespace, method, payload)
r_header = response[mc.KEY_HEADER]
r_namespace = r_header[mc.KEY_NAMESPACE]
r_method = r_header[mc.KEY_METHOD]
if callback_or_device is not None:
if isinstance(callback_or_device, MerossDevice):
callback_or_device.receive( r_namespace, r_method,
response[mc.KEY_PAYLOAD], r_header)
elif (r_method == mc.METHOD_SETACK):
#we're actually only using this for SET->SETACK command confirmation
callback_or_device()
except Exception as e:
LOGGER.warning("MerossApi: error in async_http_request(%s)", str(e) or type(e).__name__)
def request(self,
device_id: str,
namespace: str,
method: str,
payload: dict = {},
key: Union[dict, Optional[str]] = None, # pylint: disable=unsubscriptable-object
host: str = None,
callback_or_device: Union[Callable, MerossDevice] = None # pylint: disable=unsubscriptable-object
) -> None:
"""
send a request with an 'adaptable protocol' behaviour i.e. use MQTT if the
api is registered with the mqtt service else fallback to HTTP
"""
#LOGGER.debug("MerossApi: MQTT SEND device_id:(%s) method:(%s) namespace:(%s)", device_id, method, namespace)
if (self.unsub_mqtt is None) or (device_id is None):
if host is None:
if device_id is None:
LOGGER.warning("MerossApi: cannot call async_http_request (missing device_id and host)")
return
device = self.devices.get(device_id)
if device is None:
LOGGER.warning("MerossApi: cannot call async_http_request (device_id(%s) not found)", device_id)
return
host = device.host
self.hass.async_create_task(
self.async_http_request(host, namespace, method, payload, key, callback_or_device)
)
else:
self.mqtt_publish(device_id, namespace, method, payload, key)
def update_polling_period(self) -> None:
"""
called whenever a new device is added or a config_entry changes
"""
polling_period = CONF_POLLING_PERIOD_DEFAULT
for device in self.devices.values():
if device.polling_period < polling_period:
polling_period = device.polling_period
self.coordinator.update_interval = timedelta(seconds=polling_period)
@callback
async def entry_update_listener(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
self.key = config_entry.data.get(CONF_KEY)
@callback
def discovery_callback(self, _now: datetime):
"""
async task to keep alive the discovery process:
activated when any device is initially detected
this task is not renewed when the list of devices
under 'discovery' is empty or these became stale
"""
self.unsub_discovery_callback = None
if len(discovering := self.discovering) == 0:
return
_mqtt_is_connected = mqtt_is_connected(self.hass)
epoch = time()
for device_id, discovered in discovering.copy().items():
if (epoch - discovered.get(MerossApi.KEY_STARTTIME, 0)) > PARAM_HEARTBEAT_PERIOD:
# stale entry...remove
discovering.pop(device_id)
continue
if (
_mqtt_is_connected and
((epoch - discovered.get(MerossApi.KEY_REQUESTTIME, 0)) > PARAM_UNAVAILABILITY_TIMEOUT)
):
if discovered.get(mc.NS_APPLIANCE_SYSTEM_ALL) is None:
self.mqtt_publish_get(device_id, mc.NS_APPLIANCE_SYSTEM_ALL, self.key)
else:
self.mqtt_publish_get(device_id, mc.NS_APPLIANCE_SYSTEM_ABILITY, self.key)
discovered[MerossApi.KEY_REQUESTTIME] = epoch
if len(discovering):
self.unsub_discovery_callback = async_track_point_in_utc_time(
self.hass,
self.discovery_callback,
datetime.fromtimestamp(epoch + PARAM_UNAVAILABILITY_TIMEOUT + 2)
)
| 44.334677 | 135 | 0.605684 |
7883f6063a7fba7f04b655e60038cb46410bf98e | 533 | py | Python | union_find/quick_find.py | ShAlireza/Algorithms | 4b7be5715c4fd3e8d4b8ba3645327ddca7b68858 | [
"MIT"
] | 1 | 2020-04-13T07:40:44.000Z | 2020-04-13T07:40:44.000Z | union_find/quick_find.py | ShAlireza/Algorithms | 4b7be5715c4fd3e8d4b8ba3645327ddca7b68858 | [
"MIT"
] | null | null | null | union_find/quick_find.py | ShAlireza/Algorithms | 4b7be5715c4fd3e8d4b8ba3645327ddca7b68858 | [
"MIT"
] | null | null | null | class QuickFindUF:
"""
Quick Find approach
time complexities:
connected: O(1)
union: O(n)
worst-case time with M union-find operations on a set of N objects:
M N
"""
def __init__(self, size):
self.ids = [_ for _ in range(size)]
def connected(self, p, q):
return self.ids[p] == self.ids[q]
def union(self, p, q):
for index in self.ids:
if self.ids[index] == self.ids[p]:
self.ids[index] = self.ids[q]
| 23.173913 | 75 | 0.5197 |
27af06f3fbf3475a82b6ec915736968f3bdd9455 | 2,321 | py | Python | training_scripts/train_mlp_iris.py | JackHunt/tf-models | 6d22c309e28456865afd1cdc3fb87afd275cddfc | [
"BSD-3-Clause"
] | null | null | null | training_scripts/train_mlp_iris.py | JackHunt/tf-models | 6d22c309e28456865afd1cdc3fb87afd275cddfc | [
"BSD-3-Clause"
] | null | null | null | training_scripts/train_mlp_iris.py | JackHunt/tf-models | 6d22c309e28456865afd1cdc3fb87afd275cddfc | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License
# Copyright (c) 2021, Jack Hunt
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append('..')
import tensorflow as tf
from sklearn import datasets
from models_lib.models.mlp import MLP
def create_dataset(batch_size=4):
iris = datasets.load_iris()
x = iris['data']
t = tf.keras.utils.to_categorical(iris['target'])
iris_ds = tf.data.Dataset.from_tensor_slices((x, t))
iris_ds = iris_ds.batch(batch_size)
return iris_ds
def create_model():
return tf.keras.Sequential([
MLP([16, 8, 4], 3,
hidden_act='sigmoid',
output_act='sigmoid')
])
def do_training():
ds = create_dataset()
model = create_model()
opt = tf.keras.optimizers.SGD(0.1)
model.compile(optimizer=opt, loss='mse')
model.fit(ds, epochs=300, validation_split=0.2)
if __name__=='__main__':
do_training()
| 35.707692 | 80 | 0.754416 |
b86a42dac87ca701ade786bfe6cec63c77c07882 | 881 | py | Python | deepracing_py/plot_calibration.py | linklab-uva/deepracing | fc25c47658277df029e7399d295d97a75fe85216 | [
"Apache-2.0"
] | 11 | 2020-06-29T15:21:37.000Z | 2021-04-12T00:42:26.000Z | deepracing_py/plot_calibration.py | linklab-uva/deepracing | fc25c47658277df029e7399d295d97a75fe85216 | [
"Apache-2.0"
] | null | null | null | deepracing_py/plot_calibration.py | linklab-uva/deepracing | fc25c47658277df029e7399d295d97a75fe85216 | [
"Apache-2.0"
] | 4 | 2019-01-23T23:36:57.000Z | 2021-07-02T00:18:37.000Z | import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
data = np.loadtxt("out.csv", delimiter=',')
steerleft = data[data[:,0]>=0]
steerright = data[data[:,0]<=0]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("vJoy vs Magic F1")
ax1.set_xlabel('Magic F1 Value')
ax1.set_ylabel('vJoy Value')
ax1.plot(steerleft[:,0],steerleft[:,1], c='r', label='Left Steering')
ax1.plot(steerright[:,0],steerright[:,1], c='b', label='Right Steering')
steerleft_fit = np.polyfit(steerleft[:,0],steerleft[:,1],1)
steerright_fit = np.polyfit(steerright[:,0],steerright[:,1],1)
print("Left-steering equation: vjoy = %f*f1 + %f" % (steerleft_fit[0], steerleft_fit[1]))
print("Right-steering equation: vjoy = %f*f1 + %f" % (steerright_fit[0], steerright_fit[1]))
leg = ax1.legend()
plt.savefig("vjoy_calibration")
plt.show() | 29.366667 | 92 | 0.699205 |
1903da6e08bf8dc573e6b3bccf1e5c02d8fa0767 | 8,623 | py | Python | Lib/hTools2/dialogs/glyphs/points_shift.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 11 | 2015-01-06T15:43:56.000Z | 2019-07-27T00:35:20.000Z | Lib/hTools2/dialogs/glyphs/points_shift.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 2 | 2017-05-17T10:11:46.000Z | 2018-11-21T21:43:43.000Z | Lib/hTools2/dialogs/glyphs/points_shift.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 4 | 2015-01-10T13:58:50.000Z | 2019-12-18T15:40:14.000Z | # [h] shift points in selected glyphs
import hTools2.modules.glyphutils
reload(hTools2.modules.glyphutils)
from mojo.roboFont import CurrentFont, version
from vanilla import *
from hTools2 import hDialog
from hTools2.dialogs.misc import Spinner
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.glyphutils import *
from hTools2.modules.messages import no_font_open, no_glyph_selected
class shiftPointsDialog(hDialog):
'''A dialog to select and shift points in the selected glyphs in a font.
.. image:: imgs/glyphs/points-shift.png
'''
pos = 250
delta = 125
side = 1
axis = 0
layers = False
font = None
glyph_names = []
def __init__(self):
self.title = 'shift points'
self.column1 = 51
self.width = self.nudge_button*6 + self.padding_x*2 - 5
self.small_button = (self.width - self.padding_x*2) / 2
self.height = self.text_height*4 + self.padding_y*9 + self.nudge_button*2 + self.button_height + 5
self.w = HUDFloatingWindow((self.width, self.height), self.title)
# position
x = 0
y = self.padding_y
self.w.spinner_pos = Spinner(
(x, y),
default='100',
integer=True,
label='pos')
# delta
y += self.w.spinner_pos.getPosSize()[3]
self.w.spinner_delta = Spinner(
(x, y),
default='100',
integer=True,
label='delta')
# axis
x = self.padding_x
y += self.w.spinner_delta.getPosSize()[3]
self.w.axis_label = TextBox(
(x, y, self.column1, self.text_height),
"axis",
sizeStyle=self.size_style)
x = self.column1
self.w._axis = RadioGroup(
(x, y, -self.padding_x, self.text_height),
["x", "y"],
sizeStyle=self.size_style,
isVertical=False)
self.w._axis.set(self.axis)
# apply buttons
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.button_minus = SquareButton(
(x, y, self.small_button + 1, self.button_height),
'-',
callback=self.shift_minus_callback)
x += self.small_button
self.w.button_plus = SquareButton(
(x, y, self.small_button, self.button_height),
'+',
callback=self.shift_plus_callback)
# switch sides
x = self.padding_x
y += (self.button_height + self.padding_y)
self.w._side = CheckBox(
(x, y, -self.padding_x, self.text_height),
"invert side",
value=False,
sizeStyle=self.size_style)
y += self.text_height
self.w._layers = CheckBox(
(x, y, -self.padding_x, self.text_height),
"all layers",
value=self.layers,
sizeStyle=self.size_style)
# open window
self.w.open()
# functions
def _get_parameters(self):
self.pos = int(self.w.spinner_pos.value.get())
self.delta = int(self.w.spinner_delta.value.get())
self.axis = self.w._axis.get()
self.side = self.w._side.get()
self.layers = self.w._layers.get()
def shift_plus_callback(self, sender):
self._get_parameters()
self.shift_callback(mode=1)
def shift_minus_callback(self, sender):
self._get_parameters()
self.shift_callback(mode=0)
def shift_callback(self, mode):
self.font = CurrentFont()
if self.font is not None:
glyph_names = get_glyphs(self.font)
if len(glyph_names) > 0:
boolstring = ['False', 'True']
modes = ['minus', 'plus']
axes = ['x', 'y']
# set delta value
if mode == 1:
delta = self.delta
else:
delta = -self.delta
# set side
if self.axis == 0:
sides = ['right', 'left']
else:
sides = ['top', 'bottom']
# print info
print 'shifting points in glyphs...\n'
print '\tposition: %s' % self.pos
print '\tdelta: %s' % delta
print '\taxis: %s' % axes[self.axis]
print '\tmode: %s' % modes[mode]
print '\tside: %s' % sides[self.side]
print '\tlayers: %s' % boolstring[self.layers]
print
print '\t',
# transform
for glyph_name in glyph_names:
print glyph_name,
# get glyph
g = self.font[glyph_name]
#---------
# shift y
#---------
if self.axis:
# all layers
if self.layers:
for layer_name in self.font.layerOrder:
layer_glyph = g.getLayer(layer_name)
layer_glyph.prepareUndo('shift points y')
deselect_points(layer_glyph)
select_points_y(layer_glyph, self.pos, side=sides[self.side])
shift_selected_points_y(layer_glyph, delta)
layer_glyph.performUndo()
# RF 2.0
if version[0] == '2':
layer_glyph.changed()
# RF 1.8.X
else:
layer_glyph.update()
# active layer only
else:
g.prepareUndo('shift points y')
deselect_points(g)
select_points_y(g, self.pos, side=sides[self.side])
shift_selected_points_y(g, delta)
g.performUndo()
# RF 2.0
if version[0] == '2':
g.changed()
# RF 1.8.X
else:
g.update()
#---------
# shift x
#---------
else:
# all layers
if self.layers:
for layer_name in self.font.layerOrder:
layer_glyph = g.getLayer(layer_name)
layer_glyph.prepareUndo('shift points x')
deselect_points(layer_glyph)
select_points_x(layer_glyph, self.pos, side=sides[self.side])
shift_selected_points_x(layer_glyph, delta)
layer_glyph.performUndo()
# RF 2.0
if version[0] == '2':
layer_glyph.changed()
# RF 1.8.X
else:
layer_glyph.update()
# active layer only
else:
g.prepareUndo('shift points x')
deselect_points(g)
select_points_x(g, self.pos, side=sides[self.side])
shift_selected_points_x(g, delta)
g.performUndo()
# RF 2.0
if version[0] == '2':
g.changed()
# RF 1.8.X
else:
g.update()
# done with glyph
# done with font
# RF 2.0
if version[0] == '2':
self.font.changed()
# RF 1.8.X
else:
self.font.update()
print
print '\n...done.\n'
# no glyph selected
else:
print no_glyph_selected
# no font open
else:
print no_font_open
| 36.693617 | 106 | 0.432912 |
a671f081e67481c660f6a7184b3e365d10b3f344 | 138 | py | Python | kattis/twostones.py | calebclark/competition | 824be38781e6cca8c30092c032e111cec6b9ce4b | [
"MIT"
] | null | null | null | kattis/twostones.py | calebclark/competition | 824be38781e6cca8c30092c032e111cec6b9ce4b | [
"MIT"
] | null | null | null | kattis/twostones.py | calebclark/competition | 824be38781e6cca8c30092c032e111cec6b9ce4b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Take Two Stones
from sys import *
n = int(stdin.readline())
if n%2 == 1:
print "Alice"
else:
print "Bob"
| 13.8 | 25 | 0.615942 |
dab207523f0b24d1438aad7588f64c598b0c3d8f | 2,072 | py | Python | src/airflow_postgres_plugin/operators/postgres_to_file_operator.py | techalchemy/airflow-postgres-plugin | 57b8d4c8a2d5463a226e8a720e6c84108572a056 | [
"MIT"
] | 1 | 2020-03-22T18:34:32.000Z | 2020-03-22T18:34:32.000Z | src/airflow_postgres_plugin/operators/postgres_to_file_operator.py | techalchemy/airflow-postgres-plugin | 57b8d4c8a2d5463a226e8a720e6c84108572a056 | [
"MIT"
] | 13 | 2020-02-24T19:26:54.000Z | 2022-01-28T22:06:14.000Z | src/airflow_postgres_plugin/operators/postgres_to_file_operator.py | techalchemy/airflow-postgres-plugin | 57b8d4c8a2d5463a226e8a720e6c84108572a056 | [
"MIT"
] | null | null | null | # -*- coding=utf-8 -*-
import logging
import os
import tempfile
from typing import Any, Dict, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow_postgres_plugin.hooks.postgres_hook import PostgresHook
class PostgresToFileOperator(BaseOperator):
template_fields = ("sql", "sql_args", "filepath", "schema")
@apply_defaults
def __init__(
self,
conn_id: str,
sql: str,
sql_args: str,
filepath: str,
schema: str = "public",
*args,
**kwargs,
):
super(PostgresToFileOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.sql = sql
self.sql_args = sql_args
self.filepath = filepath
self.schema = schema
self._hook: Optional[PostgresHook] = None
@property
def hook(self) -> PostgresHook:
if self._hook is None:
self._hook = PostgresHook(self.conn_id, schema=self.schema)
assert self._hook is not None
return self._hook
@hook.setter
def hook(self, val):
self._hook = val
def execute(self, context: Dict[str, Any]) -> str:
if not isinstance(self.filepath, str):
# generate temporary if no filepath given
new_file = tempfile.NamedTemporaryFile(suffix=".csv", delete=False)
new_file.close()
self.filepath = new_file.name
self.log.debug(
f"no filepath given, creating temporary file at {self.filepath!r}"
)
statement = self.sql % tuple(self.sql_args.split(","))
self.log.info(
f"exporting data from executing {statement!r} on "
f"{self.hook!r} to {self.filepath!r}"
)
try:
self.hook.export(self.sql, self.filepath, parameters=self.sql_args.split(","))
except Exception as exc:
raise AirflowException(f"Failed exporting data: {exc!r}")
return self.filepath
| 29.6 | 90 | 0.619208 |
a19059c799cafb45ade5b8d5850747f5da3f84dc | 496 | py | Python | handFiguration/module.py | CAU-OSP-02/T03 | bd5e32eb76aa651d959c86439f13c07d7781004a | [
"MIT"
] | null | null | null | handFiguration/module.py | CAU-OSP-02/T03 | bd5e32eb76aa651d959c86439f13c07d7781004a | [
"MIT"
] | null | null | null | handFiguration/module.py | CAU-OSP-02/T03 | bd5e32eb76aa651d959c86439f13c07d7781004a | [
"MIT"
] | null | null | null | import cv2
import numpy as np
#함수 부분
def hand_img_revert(image):
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
ret, flag1 = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)
return flag1
def hand_img_rotation(img, degree):
height, width = img.shape[:-1]
centerRotatePT = int(width / 2), int(height / 2)
rotatefigure = cv2.getRotationMatrix2D(centerRotatePT, degree, 1)
result = cv2.warpAffine(img, rotatefigure, (height, width))
return result
| 20.666667 | 70 | 0.679435 |
89c245ccc224e27b2c362b266cb9645256ead403 | 848 | py | Python | zine/plugins/myrtle_theme/__init__.py | jace/zine-main | d6a466b0293e26d0ce6a280ae9685fce304c7544 | [
"BSD-3-Clause"
] | null | null | null | zine/plugins/myrtle_theme/__init__.py | jace/zine-main | d6a466b0293e26d0ce6a280ae9685fce304c7544 | [
"BSD-3-Clause"
] | null | null | null | zine/plugins/myrtle_theme/__init__.py | jace/zine-main | d6a466b0293e26d0ce6a280ae9685fce304c7544 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
zine.plugins.myrtle_theme
~~~~~~~~~~~~~~~~~~~~~~~~~
The current default theme for Zine.
:copyright: (c) 2009 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from os.path import join, dirname
TEMPLATE_FILES = join(dirname(__file__), 'templates')
SHARED_FILES = join(dirname(__file__), 'shared')
THEME_SETTINGS = {
'pagination.right_threshold': 1,
'pagination.left_threshold': 1,
'pagination.threshold': 2,
'pagination.next_link': True,
'pagination.prev_link': True,
'pagination.commata': u'<span class="commata"> ·\n</span>'
}
def setup(app, plugin):
app.add_theme('myrtle', TEMPLATE_FILES, plugin.metadata, THEME_SETTINGS)
app.add_shared_exports('myrtle_theme', SHARED_FILES)
| 31.407407 | 76 | 0.642689 |
5378e93b6e0b607140dd10bf43dab1371ba34e17 | 508 | py | Python | aukro/settings.py | cuteredcat/aukro | c037b822abd54c77a99d0f3fbd1244eaaf583847 | [
"MIT"
] | null | null | null | aukro/settings.py | cuteredcat/aukro | c037b822abd54c77a99d0f3fbd1244eaaf583847 | [
"MIT"
] | null | null | null | aukro/settings.py | cuteredcat/aukro | c037b822abd54c77a99d0f3fbd1244eaaf583847 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is aukro config file
# Do not change this file, use instance/aukro.conf instead
HOST = "localhost"
PORT = 5000
BASE_URL = "http://localhost:5000"
DEBUG = True
TESTING = False
SECRET_KEY = "DuMmY sEcReT kEy"
CSRF_ENABLED = True
CSRF_SESSION_KEY = "_csrf_token"
MONGODB_SETTINGS = {
"db": "aukro",
"host": "mongodb://localhost"
}
BABEL_DEFAULT_LOCALE = "ru"
BABEL_DEFAULT_TIMEZONE = "Europe/Kiev"
# Google Analytics
GA_UA = "UA-XXXXXXXX-X"
| 16.933333 | 58 | 0.69685 |
b23e6b1f94e1cf883ab1bfa91f7a235ca54dead1 | 21,905 | py | Python | components/arduino/tools/platformio-build-esp32s2.py | Whitolf/esp-idf-arduino | 0048da468cc844ae5b43b6f568d44d299d60b48a | [
"MIT"
] | 1 | 2021-05-09T06:57:05.000Z | 2021-05-09T06:57:05.000Z | components/arduino/tools/platformio-build-esp32s2.py | Whitolf/esp-idf-arduino | 0048da468cc844ae5b43b6f568d44d299d60b48a | [
"MIT"
] | null | null | null | components/arduino/tools/platformio-build-esp32s2.py | Whitolf/esp-idf-arduino | 0048da468cc844ae5b43b6f568d44d299d60b48a | [
"MIT"
] | null | null | null | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Arduino
Arduino Wiring-based Framework allows writing cross-platform software to
control devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://arduino.cc/en/Reference/HomePage
"""
# Extends: https://github.com/platformio/platform-espressif32/blob/develop/builder/main.py
from os.path import abspath, isdir, isfile, join, basename
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.PioPlatform()
FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoespressif32")
assert isdir(FRAMEWORK_DIR)
env.Append(
ASFLAGS=[
"-x", "assembler-with-cpp"
],
CFLAGS=[
"-mlongcalls",
"-std=gnu99",
"-Wno-old-style-declaration"
],
CXXFLAGS=[
"-mlongcalls",
"-std=gnu++11",
"-fexceptions",
"-fno-rtti"
],
CCFLAGS=[
"-ffunction-sections",
"-fdata-sections",
"-fstrict-volatile-bitfields",
"-Wno-error=unused-function",
"-Wno-error=unused-but-set-variable",
"-Wno-error=unused-variable",
"-Wno-error=deprecated-declarations",
"-Wno-unused-parameter",
"-Wno-sign-compare",
"-ggdb",
"-O2",
"-fstack-protector",
"-MMD"
],
LINKFLAGS=[
"-Wl,--cref",
"-fno-rtti",
"-fno-lto",
"-Wl,--gc-sections",
"-Wl,--undefined=uxTopUsedPriority",
"-T", "esp32s2.rom.api.ld",
"-T", "esp32s2.rom.ld",
"-T", "esp32s2.rom.libgcc.ld",
"-T", "esp32s2.rom.newlib-data.ld",
"-T", "esp32s2.rom.newlib-funcs.ld",
"-T", "esp32s2.rom.spiflash.ld",
"-T", "esp32s2_out.ld",
"-T", "esp32s2.project.ld",
"-T", "esp32s2.peripherals.ld",
"-u", "esp_app_desc",
"-u", "pthread_include_pthread_impl",
"-u", "pthread_include_pthread_cond_impl",
"-u", "pthread_include_pthread_local_storage_impl",
"-u", "ld_include_panic_highint_hdl",
"-u", "start_app",
"-u", "vfs_include_syscalls_impl",
"-u", "call_user_start_cpu0",
"-u", "app_main",
"-u", "newlib_include_locks_impl",
"-u", "newlib_include_heap_impl",
"-u", "newlib_include_syscalls_impl",
"-u", "newlib_include_pthread_impl",
"-u", "__cxa_guard_dummy",
"-Wl,-Map=" + join("$BUILD_DIR", basename(env.subst("${PROJECT_DIR}.map")))
],
CPPPATH=[
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "config"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "newlib", "platform_include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "freertos", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "freertos", "xtensa", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_hw_support", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_hw_support", "port", "esp32s2"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_hw_support", "port", "esp32s2", "private_include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "heap", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "log", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "lwip", "include", "apps"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "lwip", "include", "apps", "sntp"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "lwip", "lwip", "src", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "lwip", "port", "esp32", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "lwip", "port", "esp32", "include", "arch"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "lwip", "port", "esp32", "tcp_isn"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "soc", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "soc", "esp32s2"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "soc", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "hal", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "hal", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_rom", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_common", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_system", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "xtensa", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "xtensa", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "driver", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "driver", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_ringbuf", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "efuse", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "efuse", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "espcoredump", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_timer", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_ipc", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_pm", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "vfs", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_wifi", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_wifi", "esp32s2", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_event", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_netif", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_eth", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "tcpip_adapter", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "app_trace", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "mbedtls", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "mbedtls", "mbedtls", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "mbedtls", "esp_crt_bundle", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "bootloader_support", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "app_update", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "spi_flash", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "nvs_flash", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "pthread", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "wpa_supplicant", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "wpa_supplicant", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "wpa_supplicant", "include", "esp_supplicant"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "asio", "asio", "asio", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "asio", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "cbor", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "unity", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "unity", "unity", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "unity", "unity", "extras", "fixture", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "cmock", "CMock", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "coap", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "coap", "port", "include", "coap"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "coap", "libcoap", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "coap", "libcoap", "include", "coap2"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "console"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "nghttp", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "nghttp", "nghttp2", "lib", "includes"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-tls"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-tls", "esp-tls-crypto"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_adc_cal", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_gdbstub", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_hid", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "tcp_transport", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_http_client", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_http_server", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_https_ota", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_https_server", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "protobuf-c", "protobuf-c"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "protocomm", "include", "common"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "protocomm", "include", "security"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "protocomm", "include", "transports"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "mdns", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_local_ctrl", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "sdmmc", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_serial_slave_link", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_websocket_client", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "expat", "expat", "expat", "lib"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "expat", "port", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "wear_levelling", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "fatfs", "diskio"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "fatfs", "vfs"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "fatfs", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "freemodbus", "common", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "idf_test", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "idf_test", "include", "esp32s2"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "jsmn", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "json", "cJSON"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "libsodium", "libsodium", "src", "libsodium", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "libsodium", "port_include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "mqtt", "esp-mqtt", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "openssl", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "perfmon", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "spiffs", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "freertos", "include", "freertos"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "tinyusb", "tinyusb", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "tinyusb", "additions", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "ulp", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "wifi_provisioning", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-face", "face_detection", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-face", "face_recognition", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-face", "object_detection", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-face", "image_util", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-face", "pose_estimation", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp-face", "lib", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_littlefs", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "esp_littlefs", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "include", "fb_gfx", "include"),
join(FRAMEWORK_DIR, "cores", env.BoardConfig().get("build.core"))
],
LIBPATH=[
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "lib"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "ld")
],
LIBS=[
"-lxtensa", "-lesp_pm", "-lmbedtls", "-lefuse", "-lbootloader_support", "-lapp_update", "-lesp_ipc", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lesp_ringbuf", "-ldriver", "-lespcoredump", "-lesp32s2", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lasio", "-lcbor", "-lunity", "-lcmock", "-lcoap", "-lconsole", "-lnghttp", "-lesp-tls", "-lesp_adc_cal", "-lesp_gdbstub", "-lesp_hid", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lesp_https_server", "-lprotobuf-c", "-lprotocomm", "-lmdns", "-lesp_local_ctrl", "-lsdmmc", "-lesp_serial_slave_link", "-lesp_websocket_client", "-lexpat", "-lwear_levelling", "-lfatfs", "-lfreemodbus", "-ljsmn", "-ljson", "-llibsodium", "-lmqtt", "-lopenssl", "-lperfmon", "-lspiffs", "-lulp", "-lwifi_provisioning", "-lesp-face", "-lesp_littlefs", "-lfb_gfx", "-lasio", "-lcbor", "-lcmock", "-lunity", "-lcoap", "-lesp_gdbstub", "-lesp_hid", "-lesp_local_ctrl", "-lesp_https_server", "-lesp_websocket_client", "-lexpat", "-lfreemodbus", "-ljsmn", "-llibsodium", "-lmqtt", "-lperfmon", "-lwifi_provisioning", "-lprotocomm", "-lprotobuf-c", "-ljson", "-lesp-face", "-lpe", "-lfd", "-lfr", "-ldetection_cat_face", "-ldetection", "-ldl", "-lesp_littlefs", "-lfb_gfx", "-lesp_adc_cal", "-lmdns", "-lconsole", "-lfatfs", "-lwear_levelling", "-lopenssl", "-lspiffs", "-ltinyusb", "-lxtensa", "-lesp_pm", "-lmbedtls", "-lefuse", "-lbootloader_support", "-lapp_update", "-lesp_ipc", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lesp_ringbuf", "-ldriver", "-lespcoredump", "-lesp32s2", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lulp", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lrtc", "-lsmartconfig", "-lphy", "-lxtensa", "-lesp_pm", "-lmbedtls", "-lefuse", "-lbootloader_support", "-lapp_update", "-lesp_ipc", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lesp_ringbuf", "-ldriver", "-lespcoredump", "-lesp32s2", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lulp", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lrtc", "-lsmartconfig", "-lphy", "-lxtensa", "-lesp_pm", "-lmbedtls", "-lefuse", "-lbootloader_support", "-lapp_update", "-lesp_ipc", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lesp_ringbuf", "-ldriver", "-lespcoredump", "-lesp32s2", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lulp", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lrtc", "-lsmartconfig", "-lphy", "-lxtensa", "-lesp_pm", "-lmbedtls", "-lefuse", "-lbootloader_support", "-lapp_update", "-lesp_ipc", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lesp_ringbuf", "-ldriver", "-lespcoredump", "-lesp32s2", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lulp", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lrtc", "-lsmartconfig", "-lphy", "-lxt_hal", "-lesp32s2", "-lm", "-lnewlib", "-lgcc", "-lstdc++", "-lpthread", "-lapp_trace", "-lgcov", "-lapp_trace", "-lgcov", "-lc"
],
CPPDEFINES=[
"HAVE_CONFIG_H",
("MBEDTLS_CONFIG_FILE", '\\"mbedtls/esp_config.h\\"'),
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.3-dev-1720-g494a124d9-dirty\\"'),
"ESP_PLATFORM",
"ARDUINO_ARCH_ESP32",
"ESP32",
("F_CPU", "$BOARD_F_CPU"),
("ARDUINO", 10812),
("ARDUINO_VARIANT", '\\"%s\\"' % env.BoardConfig().get("build.variant").replace('"', "")),
("ARDUINO_BOARD", '\\"%s\\"' % env.BoardConfig().get("name").replace('"', ""))
],
LIBSOURCE_DIRS=[
join(FRAMEWORK_DIR, "libraries")
],
FLASH_EXTRA_IMAGES=[
("0x1000", join(FRAMEWORK_DIR, "tools", "sdk", "esp32s2", "bin", "bootloader_${BOARD_FLASH_MODE}_${__get_board_f_flash(__env__)}.bin")),
("0x8000", join(env.subst("$BUILD_DIR"), "partitions.bin")),
("0xe000", join(FRAMEWORK_DIR, "tools", "partitions", "boot_app0.bin"))
]
)
#
# Target: Build Core Library
#
libs = []
variants_dir = join(FRAMEWORK_DIR, "variants")
if "build.variants_dir" in env.BoardConfig():
variants_dir = join("$PROJECT_DIR", env.BoardConfig().get("build.variants_dir"))
if "build.variant" in env.BoardConfig():
env.Append(
CPPPATH=[
join(variants_dir, env.BoardConfig().get("build.variant"))
]
)
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduinoVariant"),
join(variants_dir, env.BoardConfig().get("build.variant"))
))
envsafe = env.Clone()
libs.append(envsafe.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduino"),
join(FRAMEWORK_DIR, "cores", env.BoardConfig().get("build.core"))
))
env.Prepend(LIBS=libs)
#
# Generate partition table
#
fwpartitions_dir = join(FRAMEWORK_DIR, "tools", "partitions")
partitions_csv = env.BoardConfig().get("build.partitions", "default.csv")
env.Replace(
PARTITIONS_TABLE_CSV=abspath(
join(fwpartitions_dir, partitions_csv) if isfile(
join(fwpartitions_dir, partitions_csv)) else partitions_csv))
partition_table = env.Command(
join("$BUILD_DIR", "partitions.bin"),
"$PARTITIONS_TABLE_CSV",
env.VerboseAction('"$PYTHONEXE" "%s" -q $SOURCE $TARGET' % join(
FRAMEWORK_DIR, "tools", "gen_esp32part.py"),
"Generating partitions $TARGET"))
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", partition_table)
| 70.66129 | 4,848 | 0.613467 |
c0054ff923c21307ac81e6d10dd07a2c2f9a5c04 | 55,408 | py | Python | Lib/ufo2ft/outlineCompiler.py | simoncozens/ufo2ft | 910762498441c8480d6512f613146631de093dce | [
"MIT"
] | 1 | 2015-11-03T07:49:22.000Z | 2015-11-03T07:49:22.000Z | Lib/ufo2ft/outlineCompiler.py | simoncozens/ufo2ft | 910762498441c8480d6512f613146631de093dce | [
"MIT"
] | null | null | null | Lib/ufo2ft/outlineCompiler.py | simoncozens/ufo2ft | 910762498441c8480d6512f613146631de093dce | [
"MIT"
] | null | null | null | import logging
import math
from collections import Counter, namedtuple
from io import BytesIO
from types import SimpleNamespace
from fontTools.cffLib import (
CharStrings,
GlobalSubrsIndex,
IndexedStrings,
PrivateDict,
SubrsIndex,
TopDict,
TopDictIndex,
)
from fontTools.misc.arrayTools import unionRect
from fontTools.misc.fixedTools import otRound
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.t2CharStringPen import T2CharStringPen
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._g_l_y_f import USE_MY_METRICS, Glyph
from fontTools.ttLib.tables._h_e_a_d import mac_epoch_diff
from fontTools.ttLib.tables.O_S_2f_2 import Panose
from ufo2ft.constants import COLOR_LAYERS_KEY, COLOR_PALETTES_KEY
from ufo2ft.errors import InvalidFontData
from ufo2ft.fontInfoData import (
dateStringForNow,
dateStringToTimeValue,
getAttrWithFallback,
intListToNum,
normalizeStringForPostscript,
)
from ufo2ft.util import (
_copyGlyph,
calcCodePageRanges,
makeOfficialGlyphOrder,
makeUnicodeToGlyphNameMapping,
)
logger = logging.getLogger(__name__)
BoundingBox = namedtuple("BoundingBox", ["xMin", "yMin", "xMax", "yMax"])
EMPTY_BOUNDING_BOX = BoundingBox(0, 0, 0, 0)
def _isNonBMP(s):
for c in s:
if ord(c) > 65535:
return True
return False
def _getVerticalOrigin(font, glyph):
if hasattr(glyph, "verticalOrigin") and glyph.verticalOrigin is not None:
verticalOrigin = glyph.verticalOrigin
else:
os2 = font.get("OS/2")
typo_ascender = os2.sTypoAscender if os2 is not None else 0
verticalOrigin = typo_ascender
return otRound(verticalOrigin)
class BaseOutlineCompiler:
"""Create a feature-less outline binary."""
sfntVersion = None
tables = frozenset(
[
"head",
"hmtx",
"hhea",
"name",
"maxp",
"cmap",
"OS/2",
"post",
"vmtx",
"vhea",
"COLR",
"CPAL",
]
)
def __init__(
self,
font,
glyphSet=None,
glyphOrder=None,
tables=None,
notdefGlyph=None,
):
self.ufo = font
# use the previously filtered glyphSet, if any
if glyphSet is None:
glyphSet = {g.name: g for g in font}
self.makeMissingRequiredGlyphs(font, glyphSet, self.sfntVersion, notdefGlyph)
self.allGlyphs = glyphSet
# store the glyph order
if glyphOrder is None:
glyphOrder = font.glyphOrder
self.glyphOrder = self.makeOfficialGlyphOrder(glyphOrder)
# make a reusable character mapping
self.unicodeToGlyphNameMapping = self.makeUnicodeToGlyphNameMapping()
if tables is not None:
self.tables = tables
# cached values defined later on
self._glyphBoundingBoxes = None
self._fontBoundingBox = None
self._compiledGlyphs = None
def compile(self):
"""
Compile the OpenType binary.
"""
self.otf = TTFont(sfntVersion=self.sfntVersion)
# only compile vertical metrics tables if vhea metrics are defined
vertical_metrics = [
"openTypeVheaVertTypoAscender",
"openTypeVheaVertTypoDescender",
"openTypeVheaVertTypoLineGap",
]
self.vertical = all(
getAttrWithFallback(self.ufo.info, metric) is not None
for metric in vertical_metrics
)
self.colorLayers = (
COLOR_LAYERS_KEY in self.ufo.lib and COLOR_PALETTES_KEY in self.ufo.lib
)
# write the glyph order
self.otf.setGlyphOrder(self.glyphOrder)
# populate basic tables
self.setupTable_head()
self.setupTable_hmtx()
self.setupTable_hhea()
self.setupTable_name()
self.setupTable_maxp()
self.setupTable_cmap()
self.setupTable_OS2()
self.setupTable_post()
if self.vertical:
self.setupTable_vmtx()
self.setupTable_vhea()
if self.colorLayers:
self.setupTable_COLR()
self.setupTable_CPAL()
self.setupOtherTables()
self.importTTX()
return self.otf
def compileGlyphs(self):
"""Compile glyphs and return dict keyed by glyph name.
**This should not be called externally.**
Subclasses must override this method to handle compilation of glyphs.
"""
raise NotImplementedError
def getCompiledGlyphs(self):
if self._compiledGlyphs is None:
self._compiledGlyphs = self.compileGlyphs()
return self._compiledGlyphs
def makeGlyphsBoundingBoxes(self):
"""
Make bounding boxes for all the glyphs, and return a dictionary of
BoundingBox(xMin, xMax, yMin, yMax) namedtuples keyed by glyph names.
The bounding box of empty glyphs (without contours or components) is
set to None.
The bbox values are integers.
**This should not be called externally.**
Subclasses must override this method to handle the bounds creation for
their specific glyph type.
"""
raise NotImplementedError
@property
def glyphBoundingBoxes(self):
if self._glyphBoundingBoxes is None:
self._glyphBoundingBoxes = self.makeGlyphsBoundingBoxes()
return self._glyphBoundingBoxes
def makeFontBoundingBox(self):
"""
Make a bounding box for the font.
**This should not be called externally.** Subclasses
may override this method to handle the bounds creation
in a different way if desired.
"""
fontBox = None
for glyphBox in self.glyphBoundingBoxes.values():
if glyphBox is None:
continue
if fontBox is None:
fontBox = glyphBox
else:
fontBox = unionRect(fontBox, glyphBox)
if fontBox is None: # unlikely
fontBox = EMPTY_BOUNDING_BOX
return fontBox
@property
def fontBoundingBox(self):
if self._fontBoundingBox is None:
self._fontBoundingBox = self.makeFontBoundingBox()
return self._fontBoundingBox
def makeUnicodeToGlyphNameMapping(self):
"""
Make a ``unicode : glyph name`` mapping for the font.
**This should not be called externally.** Subclasses
may override this method to handle the mapping creation
in a different way if desired.
"""
return makeUnicodeToGlyphNameMapping(self.allGlyphs, self.glyphOrder)
@staticmethod
def makeMissingRequiredGlyphs(font, glyphSet, sfntVersion, notdefGlyph=None):
"""
Add .notdef to the glyph set if it is not present.
**This should not be called externally.** Subclasses
may override this method to handle the glyph creation
in a different way if desired.
"""
if ".notdef" in glyphSet:
return
reverseContour = sfntVersion == "\000\001\000\000"
if notdefGlyph:
notdefGlyph = _copyGlyph(notdefGlyph, reverseContour=reverseContour)
else:
unitsPerEm = otRound(getAttrWithFallback(font.info, "unitsPerEm"))
ascender = otRound(getAttrWithFallback(font.info, "ascender"))
descender = otRound(getAttrWithFallback(font.info, "descender"))
defaultWidth = otRound(unitsPerEm * 0.5)
notdefGlyph = StubGlyph(
name=".notdef",
width=defaultWidth,
unitsPerEm=unitsPerEm,
ascender=ascender,
descender=descender,
reverseContour=reverseContour,
)
glyphSet[".notdef"] = notdefGlyph
def makeOfficialGlyphOrder(self, glyphOrder):
"""
Make the final glyph order.
**This should not be called externally.** Subclasses
may override this method to handle the order creation
in a different way if desired.
"""
return makeOfficialGlyphOrder(self.allGlyphs, glyphOrder)
# --------------
# Table Builders
# --------------
def setupTable_gasp(self):
if "gasp" not in self.tables:
return
self.otf["gasp"] = gasp = newTable("gasp")
gasp_ranges = dict()
for record in self.ufo.info.openTypeGaspRangeRecords:
rangeMaxPPEM = record["rangeMaxPPEM"]
behavior_bits = record["rangeGaspBehavior"]
rangeGaspBehavior = intListToNum(behavior_bits, 0, 4)
gasp_ranges[rangeMaxPPEM] = rangeGaspBehavior
gasp.gaspRange = gasp_ranges
def setupTable_head(self):
"""
Make the head table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "head" not in self.tables:
return
self.otf["head"] = head = newTable("head")
font = self.ufo
head.checkSumAdjustment = 0
head.tableVersion = 1.0
head.magicNumber = 0x5F0F3CF5
# version numbers
# limit minor version to 3 digits as recommended in OpenType spec:
# https://www.microsoft.com/typography/otspec/recom.htm
versionMajor = getAttrWithFallback(font.info, "versionMajor")
versionMinor = getAttrWithFallback(font.info, "versionMinor")
fullFontRevision = float("%d.%03d" % (versionMajor, versionMinor))
head.fontRevision = round(fullFontRevision, 3)
if head.fontRevision != fullFontRevision:
logger.warning(
"Minor version in %s has too many digits and won't fit into "
"the head table's fontRevision field; rounded to %s.",
fullFontRevision,
head.fontRevision,
)
# upm
head.unitsPerEm = otRound(getAttrWithFallback(font.info, "unitsPerEm"))
# times
head.created = (
dateStringToTimeValue(getAttrWithFallback(font.info, "openTypeHeadCreated"))
- mac_epoch_diff
)
head.modified = dateStringToTimeValue(dateStringForNow()) - mac_epoch_diff
# bounding box
xMin, yMin, xMax, yMax = self.fontBoundingBox
head.xMin = otRound(xMin)
head.yMin = otRound(yMin)
head.xMax = otRound(xMax)
head.yMax = otRound(yMax)
# style mapping
styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName")
macStyle = []
if styleMapStyleName == "bold":
macStyle = [0]
elif styleMapStyleName == "bold italic":
macStyle = [0, 1]
elif styleMapStyleName == "italic":
macStyle = [1]
head.macStyle = intListToNum(macStyle, 0, 16)
# misc
head.flags = intListToNum(
getAttrWithFallback(font.info, "openTypeHeadFlags"), 0, 16
)
head.lowestRecPPEM = otRound(
getAttrWithFallback(font.info, "openTypeHeadLowestRecPPEM")
)
head.fontDirectionHint = 2
head.indexToLocFormat = 0
head.glyphDataFormat = 0
def setupTable_name(self):
"""
Make the name table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "name" not in self.tables:
return
font = self.ufo
self.otf["name"] = name = newTable("name")
name.names = []
# Set name records from font.info.openTypeNameRecords
for nameRecord in getAttrWithFallback(font.info, "openTypeNameRecords"):
nameId = nameRecord["nameID"]
platformId = nameRecord["platformID"]
platEncId = nameRecord["encodingID"]
langId = nameRecord["languageID"]
# on Python 2, plistLib (used by ufoLib) returns unicode strings
# only when plist data contain non-ascii characters, and returns
# ascii-encoded bytes when it can. On the other hand, fontTools's
# name table `setName` method wants unicode strings, so we must
# decode them first
nameVal = nameRecord["string"]
name.setName(nameVal, nameId, platformId, platEncId, langId)
# Build name records
familyName = getAttrWithFallback(font.info, "styleMapFamilyName")
styleName = getAttrWithFallback(font.info, "styleMapStyleName").title()
preferredFamilyName = getAttrWithFallback(
font.info, "openTypeNamePreferredFamilyName"
)
preferredSubfamilyName = getAttrWithFallback(
font.info, "openTypeNamePreferredSubfamilyName"
)
fullName = f"{preferredFamilyName} {preferredSubfamilyName}"
nameVals = {
0: getAttrWithFallback(font.info, "copyright"),
1: familyName,
2: styleName,
3: getAttrWithFallback(font.info, "openTypeNameUniqueID"),
4: fullName,
5: getAttrWithFallback(font.info, "openTypeNameVersion"),
6: getAttrWithFallback(font.info, "postscriptFontName"),
7: getAttrWithFallback(font.info, "trademark"),
8: getAttrWithFallback(font.info, "openTypeNameManufacturer"),
9: getAttrWithFallback(font.info, "openTypeNameDesigner"),
10: getAttrWithFallback(font.info, "openTypeNameDescription"),
11: getAttrWithFallback(font.info, "openTypeNameManufacturerURL"),
12: getAttrWithFallback(font.info, "openTypeNameDesignerURL"),
13: getAttrWithFallback(font.info, "openTypeNameLicense"),
14: getAttrWithFallback(font.info, "openTypeNameLicenseURL"),
16: preferredFamilyName,
17: preferredSubfamilyName,
18: getAttrWithFallback(font.info, "openTypeNameCompatibleFullName"),
19: getAttrWithFallback(font.info, "openTypeNameSampleText"),
21: getAttrWithFallback(font.info, "openTypeNameWWSFamilyName"),
22: getAttrWithFallback(font.info, "openTypeNameWWSSubfamilyName"),
}
# don't add typographic names if they are the same as the legacy ones
if nameVals[1] == nameVals[16]:
del nameVals[16]
if nameVals[2] == nameVals[17]:
del nameVals[17]
# postscript font name
if nameVals[6]:
nameVals[6] = normalizeStringForPostscript(nameVals[6])
for nameId in sorted(nameVals.keys()):
nameVal = nameVals[nameId]
if not nameVal:
continue
platformId = 3
platEncId = 10 if _isNonBMP(nameVal) else 1
langId = 0x409
# Set built name record if not set yet
if name.getName(nameId, platformId, platEncId, langId):
continue
name.setName(nameVal, nameId, platformId, platEncId, langId)
def setupTable_maxp(self):
"""
Make the maxp table.
**This should not be called externally.** Subclasses
must override or supplement this method to handle the
table creation for either CFF or TT data.
"""
raise NotImplementedError
def setupTable_cmap(self):
"""
Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "cmap" not in self.tables:
return
from fontTools.ttLib.tables._c_m_a_p import cmap_format_4
nonBMP = {k: v for k, v in self.unicodeToGlyphNameMapping.items() if k > 65535}
if nonBMP:
mapping = {
k: v for k, v in self.unicodeToGlyphNameMapping.items() if k <= 65535
}
else:
mapping = dict(self.unicodeToGlyphNameMapping)
# mac
cmap4_0_3 = cmap_format_4(4)
cmap4_0_3.platformID = 0
cmap4_0_3.platEncID = 3
cmap4_0_3.language = 0
cmap4_0_3.cmap = mapping
# windows
cmap4_3_1 = cmap_format_4(4)
cmap4_3_1.platformID = 3
cmap4_3_1.platEncID = 1
cmap4_3_1.language = 0
cmap4_3_1.cmap = mapping
# store
self.otf["cmap"] = cmap = newTable("cmap")
cmap.tableVersion = 0
cmap.tables = [cmap4_0_3, cmap4_3_1]
# If we have glyphs outside Unicode BMP, we must set another
# subtable that can hold longer codepoints for them.
if nonBMP:
from fontTools.ttLib.tables._c_m_a_p import cmap_format_12
nonBMP.update(mapping)
# mac
cmap12_0_4 = cmap_format_12(12)
cmap12_0_4.platformID = 0
cmap12_0_4.platEncID = 4
cmap12_0_4.language = 0
cmap12_0_4.cmap = nonBMP
# windows
cmap12_3_10 = cmap_format_12(12)
cmap12_3_10.platformID = 3
cmap12_3_10.platEncID = 10
cmap12_3_10.language = 0
cmap12_3_10.cmap = nonBMP
# update tables registry
cmap.tables = [cmap4_0_3, cmap4_3_1, cmap12_0_4, cmap12_3_10]
def setupTable_OS2(self):
"""
Make the OS/2 table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "OS/2" not in self.tables:
return
self.otf["OS/2"] = os2 = newTable("OS/2")
font = self.ufo
os2.version = 0x0004
# average glyph width
os2.xAvgCharWidth = 0
hmtx = self.otf.get("hmtx")
if hmtx is not None:
widths = [width for width, _ in hmtx.metrics.values() if width > 0]
if widths:
os2.xAvgCharWidth = otRound(sum(widths) / len(widths))
# weight and width classes
os2.usWeightClass = getAttrWithFallback(font.info, "openTypeOS2WeightClass")
os2.usWidthClass = getAttrWithFallback(font.info, "openTypeOS2WidthClass")
# embedding
os2.fsType = intListToNum(
getAttrWithFallback(font.info, "openTypeOS2Type"), 0, 16
)
# subscript, superscript, strikeout values, taken from AFDKO:
# FDK/Tools/Programs/makeotf/makeotf_lib/source/hotconv/hot.c
unitsPerEm = getAttrWithFallback(font.info, "unitsPerEm")
italicAngle = getAttrWithFallback(font.info, "italicAngle")
xHeight = getAttrWithFallback(font.info, "xHeight")
def adjustOffset(offset, angle):
"""Adjust Y offset based on italic angle, to get X offset."""
return offset * math.tan(math.radians(-angle)) if angle else 0
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptXSize")
if v is None:
v = unitsPerEm * 0.65
os2.ySubscriptXSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptYSize")
if v is None:
v = unitsPerEm * 0.6
os2.ySubscriptYSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptYOffset")
if v is None:
v = unitsPerEm * 0.075
os2.ySubscriptYOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptXOffset")
if v is None:
v = adjustOffset(-os2.ySubscriptYOffset, italicAngle)
os2.ySubscriptXOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptXSize")
if v is None:
v = os2.ySubscriptXSize
os2.ySuperscriptXSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptYSize")
if v is None:
v = os2.ySubscriptYSize
os2.ySuperscriptYSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptYOffset")
if v is None:
v = unitsPerEm * 0.35
os2.ySuperscriptYOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptXOffset")
if v is None:
v = adjustOffset(os2.ySuperscriptYOffset, italicAngle)
os2.ySuperscriptXOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2StrikeoutSize")
if v is None:
v = getAttrWithFallback(font.info, "postscriptUnderlineThickness")
os2.yStrikeoutSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2StrikeoutPosition")
if v is None:
v = xHeight * 0.6 if xHeight else unitsPerEm * 0.22
os2.yStrikeoutPosition = otRound(v)
# family class
ibmFontClass, ibmFontSubclass = getAttrWithFallback(
font.info, "openTypeOS2FamilyClass"
)
os2.sFamilyClass = (ibmFontClass << 8) + ibmFontSubclass
# panose
data = getAttrWithFallback(font.info, "openTypeOS2Panose")
panose = Panose()
panose.bFamilyType = data[0]
panose.bSerifStyle = data[1]
panose.bWeight = data[2]
panose.bProportion = data[3]
panose.bContrast = data[4]
panose.bStrokeVariation = data[5]
panose.bArmStyle = data[6]
panose.bLetterForm = data[7]
panose.bMidline = data[8]
panose.bXHeight = data[9]
os2.panose = panose
# Unicode ranges
uniRanges = getAttrWithFallback(font.info, "openTypeOS2UnicodeRanges")
if uniRanges is not None:
os2.ulUnicodeRange1 = intListToNum(uniRanges, 0, 32)
os2.ulUnicodeRange2 = intListToNum(uniRanges, 32, 32)
os2.ulUnicodeRange3 = intListToNum(uniRanges, 64, 32)
os2.ulUnicodeRange4 = intListToNum(uniRanges, 96, 32)
else:
os2.recalcUnicodeRanges(self.otf)
# codepage ranges
codepageRanges = getAttrWithFallback(font.info, "openTypeOS2CodePageRanges")
if codepageRanges is None:
unicodes = self.unicodeToGlyphNameMapping.keys()
codepageRanges = calcCodePageRanges(unicodes)
os2.ulCodePageRange1 = intListToNum(codepageRanges, 0, 32)
os2.ulCodePageRange2 = intListToNum(codepageRanges, 32, 32)
# vendor id
os2.achVendID = getAttrWithFallback(font.info, "openTypeOS2VendorID")
# vertical metrics
os2.sxHeight = otRound(getAttrWithFallback(font.info, "xHeight"))
os2.sCapHeight = otRound(getAttrWithFallback(font.info, "capHeight"))
os2.sTypoAscender = otRound(
getAttrWithFallback(font.info, "openTypeOS2TypoAscender")
)
os2.sTypoDescender = otRound(
getAttrWithFallback(font.info, "openTypeOS2TypoDescender")
)
os2.sTypoLineGap = otRound(
getAttrWithFallback(font.info, "openTypeOS2TypoLineGap")
)
os2.usWinAscent = otRound(
getAttrWithFallback(font.info, "openTypeOS2WinAscent")
)
os2.usWinDescent = otRound(
getAttrWithFallback(font.info, "openTypeOS2WinDescent")
)
# style mapping
selection = list(getAttrWithFallback(font.info, "openTypeOS2Selection"))
styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName")
if styleMapStyleName == "regular":
selection.append(6)
elif styleMapStyleName == "bold":
selection.append(5)
elif styleMapStyleName == "italic":
selection.append(0)
elif styleMapStyleName == "bold italic":
selection += [0, 5]
os2.fsSelection = intListToNum(selection, 0, 16)
# characetr indexes
unicodes = [i for i in self.unicodeToGlyphNameMapping.keys() if i is not None]
if unicodes:
minIndex = min(unicodes)
maxIndex = max(unicodes)
else:
# the font may have *no* unicode values (it really happens!) so
# there needs to be a fallback. use 0xFFFF, as AFDKO does:
# FDK/Tools/Programs/makeotf/makeotf_lib/source/hotconv/map.c
minIndex = 0xFFFF
maxIndex = 0xFFFF
if maxIndex > 0xFFFF:
# the spec says that 0xFFFF should be used
# as the max if the max exceeds 0xFFFF
maxIndex = 0xFFFF
os2.fsFirstCharIndex = minIndex
os2.fsLastCharIndex = maxIndex
os2.usBreakChar = 32
os2.usDefaultChar = 0
# maximum contextual lookup length
os2.usMaxContex = 0
def setupTable_hmtx(self):
"""
Make the hmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "hmtx" not in self.tables:
return
self.otf["hmtx"] = hmtx = newTable("hmtx")
hmtx.metrics = {}
for glyphName, glyph in self.allGlyphs.items():
width = otRound(glyph.width)
if width < 0:
raise ValueError("The width should not be negative: '%s'" % (glyphName))
bounds = self.glyphBoundingBoxes[glyphName]
left = bounds.xMin if bounds else 0
hmtx[glyphName] = (width, left)
def _setupTable_hhea_or_vhea(self, tag):
"""
Make the hhea table or the vhea table. This assume the hmtx or
the vmtx were respectively made first.
"""
if tag not in self.tables:
return
if tag == "hhea":
isHhea = True
else:
isHhea = False
self.otf[tag] = table = newTable(tag)
mtxTable = self.otf.get(tag[0] + "mtx")
font = self.ufo
if isHhea:
table.tableVersion = 0x00010000
else:
table.tableVersion = 0x00011000
# Vertical metrics in hhea, horizontal metrics in vhea
# and caret info.
# The hhea metrics names are formed as:
# "openType" + tag.title() + "Ascender", etc.
# While vhea metrics names are formed as:
# "openType" + tag.title() + "VertTypo" + "Ascender", etc.
# Caret info names only differ by tag.title().
commonPrefix = "openType%s" % tag.title()
if isHhea:
metricsPrefix = commonPrefix
else:
metricsPrefix = "openType%sVertTypo" % tag.title()
metricsDict = {
"ascent": "%sAscender" % metricsPrefix,
"descent": "%sDescender" % metricsPrefix,
"lineGap": "%sLineGap" % metricsPrefix,
"caretSlopeRise": "%sCaretSlopeRise" % commonPrefix,
"caretSlopeRun": "%sCaretSlopeRun" % commonPrefix,
"caretOffset": "%sCaretOffset" % commonPrefix,
}
for otfName, ufoName in metricsDict.items():
setattr(table, otfName, otRound(getAttrWithFallback(font.info, ufoName)))
# Horizontal metrics in hhea, vertical metrics in vhea
advances = [] # width in hhea, height in vhea
firstSideBearings = [] # left in hhea, top in vhea
secondSideBearings = [] # right in hhea, bottom in vhea
extents = []
if mtxTable is not None:
for glyphName in self.allGlyphs:
advance, firstSideBearing = mtxTable[glyphName]
advances.append(advance)
bounds = self.glyphBoundingBoxes[glyphName]
if bounds is None:
continue
if isHhea:
boundsAdvance = bounds.xMax - bounds.xMin
# equation from the hhea spec for calculating xMaxExtent:
# Max(lsb + (xMax - xMin))
extent = firstSideBearing + boundsAdvance
else:
boundsAdvance = bounds.yMax - bounds.yMin
# equation from the vhea spec for calculating yMaxExtent:
# Max(tsb + (yMax - yMin)).
extent = firstSideBearing + boundsAdvance
secondSideBearing = advance - firstSideBearing - boundsAdvance
firstSideBearings.append(firstSideBearing)
secondSideBearings.append(secondSideBearing)
extents.append(extent)
setattr(
table,
"advance%sMax" % ("Width" if isHhea else "Height"),
max(advances) if advances else 0,
)
setattr(
table,
"min%sSideBearing" % ("Left" if isHhea else "Top"),
min(firstSideBearings) if firstSideBearings else 0,
)
setattr(
table,
"min%sSideBearing" % ("Right" if isHhea else "Bottom"),
min(secondSideBearings) if secondSideBearings else 0,
)
setattr(
table,
"%sMaxExtent" % ("x" if isHhea else "y"),
max(extents) if extents else 0,
)
if isHhea:
reserved = range(4)
else:
# vhea.reserved0 is caretOffset for legacy reasons
reserved = range(1, 5)
for i in reserved:
setattr(table, "reserved%i" % i, 0)
table.metricDataFormat = 0
# glyph count
setattr(
table, "numberOf%sMetrics" % ("H" if isHhea else "V"), len(self.allGlyphs)
)
def setupTable_hhea(self):
"""
Make the hhea table. This assumes that the hmtx table was made first.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self._setupTable_hhea_or_vhea("hhea")
def setupTable_vmtx(self):
"""
Make the vmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "vmtx" not in self.tables:
return
self.otf["vmtx"] = vmtx = newTable("vmtx")
vmtx.metrics = {}
for glyphName, glyph in self.allGlyphs.items():
height = otRound(glyph.height)
if height < 0:
raise ValueError(
"The height should not be negative: '%s'" % (glyphName)
)
verticalOrigin = _getVerticalOrigin(self.otf, glyph)
bounds = self.glyphBoundingBoxes[glyphName]
top = bounds.yMax if bounds else 0
vmtx[glyphName] = (height, verticalOrigin - top)
def setupTable_VORG(self):
"""
Make the VORG table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "VORG" not in self.tables:
return
self.otf["VORG"] = vorg = newTable("VORG")
vorg.majorVersion = 1
vorg.minorVersion = 0
vorg.VOriginRecords = {}
# Find the most frequent verticalOrigin
vorg_count = Counter(
_getVerticalOrigin(self.otf, glyph) for glyph in self.allGlyphs.values()
)
vorg.defaultVertOriginY = vorg_count.most_common(1)[0][0]
if len(vorg_count) > 1:
for glyphName, glyph in self.allGlyphs.items():
vertOriginY = _getVerticalOrigin(self.otf, glyph)
if vertOriginY == vorg.defaultVertOriginY:
continue
vorg.VOriginRecords[glyphName] = vertOriginY
vorg.numVertOriginYMetrics = len(vorg.VOriginRecords)
def setupTable_vhea(self):
"""
Make the vhea table. This assumes that the head and vmtx tables were
made first.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self._setupTable_hhea_or_vhea("vhea")
def setupTable_post(self):
"""
Make the post table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "post" not in self.tables:
return
self.otf["post"] = post = newTable("post")
font = self.ufo
post.formatType = 3.0
# italic angle
italicAngle = getAttrWithFallback(font.info, "italicAngle")
post.italicAngle = italicAngle
# underline
underlinePosition = getAttrWithFallback(
font.info, "postscriptUnderlinePosition"
)
post.underlinePosition = otRound(underlinePosition)
underlineThickness = getAttrWithFallback(
font.info, "postscriptUnderlineThickness"
)
post.underlineThickness = otRound(underlineThickness)
post.isFixedPitch = getAttrWithFallback(font.info, "postscriptIsFixedPitch")
# misc
post.minMemType42 = 0
post.maxMemType42 = 0
post.minMemType1 = 0
post.maxMemType1 = 0
def setupTable_COLR(self):
"""
Compile the COLR table.
**This should not be called externally.**
"""
if "COLR" not in self.tables:
return
from fontTools.colorLib.builder import buildCOLR
layerInfo = self.ufo.lib[COLOR_LAYERS_KEY]
glyphMap = self.otf.getReverseGlyphMap()
if layerInfo:
self.otf["COLR"] = buildCOLR(layerInfo, glyphMap=glyphMap)
def setupTable_CPAL(self):
"""
Compile the CPAL table.
**This should not be called externally.**
"""
if "CPAL" not in self.tables:
return
from fontTools.colorLib.builder import buildCPAL
from fontTools.colorLib.errors import ColorLibError
# colorLib wants colors as tuples, plistlib gives us lists
palettes = [
[tuple(color) for color in palette]
for palette in self.ufo.lib[COLOR_PALETTES_KEY]
]
try:
self.otf["CPAL"] = buildCPAL(palettes)
except ColorLibError as e:
raise InvalidFontData("Failed to build CPAL table") from e
def setupOtherTables(self):
"""
Make the other tables. The default implementation does nothing.
**This should not be called externally.** Subclasses
may override this method to add other tables to the
font if desired.
"""
pass
def importTTX(self):
"""
Merge TTX files from data directory "com.github.fonttools.ttx"
**This should not be called externally.** Subclasses
may override this method to handle the bounds creation
in a different way if desired.
"""
import os
prefix = "com.github.fonttools.ttx"
if not hasattr(self.ufo, "data"):
return
if not self.ufo.data.fileNames:
return
for path in self.ufo.data.fileNames:
foldername, filename = os.path.split(path)
if foldername == prefix and filename.endswith(".ttx"):
ttx = self.ufo.data[path].decode("utf-8")
fp = BytesIO(ttx.encode("utf-8"))
# Preserve the original SFNT version when loading a TTX dump.
sfntVersion = self.otf.sfntVersion
try:
self.otf.importXML(fp)
finally:
self.otf.sfntVersion = sfntVersion
class OutlineOTFCompiler(BaseOutlineCompiler):
"""Compile a .otf font with CFF outlines."""
sfntVersion = "OTTO"
tables = BaseOutlineCompiler.tables | {"CFF", "VORG"}
def __init__(
self,
font,
glyphSet=None,
glyphOrder=None,
tables=None,
notdefGlyph=None,
roundTolerance=None,
optimizeCFF=True,
):
if roundTolerance is not None:
self.roundTolerance = float(roundTolerance)
else:
# round all coordinates to integers by default
self.roundTolerance = 0.5
super().__init__(
font,
glyphSet=glyphSet,
glyphOrder=glyphOrder,
tables=tables,
notdefGlyph=notdefGlyph,
)
self.optimizeCFF = optimizeCFF
self._defaultAndNominalWidths = None
def getDefaultAndNominalWidths(self):
"""Return (defaultWidthX, nominalWidthX).
If fontinfo.plist doesn't define these explicitly, compute optimal values
from the glyphs' advance widths.
"""
if self._defaultAndNominalWidths is None:
info = self.ufo.info
# populate the width values
if all(
getattr(info, attr, None) is None
for attr in ("postscriptDefaultWidthX", "postscriptNominalWidthX")
):
# no custom values set in fontinfo.plist; compute optimal ones
from fontTools.cffLib.width import optimizeWidths
widths = [otRound(glyph.width) for glyph in self.allGlyphs.values()]
defaultWidthX, nominalWidthX = optimizeWidths(widths)
else:
defaultWidthX = otRound(
getAttrWithFallback(info, "postscriptDefaultWidthX")
)
nominalWidthX = otRound(
getAttrWithFallback(info, "postscriptNominalWidthX")
)
self._defaultAndNominalWidths = (defaultWidthX, nominalWidthX)
return self._defaultAndNominalWidths
def compileGlyphs(self):
"""Compile and return the CFF T2CharStrings for this font."""
defaultWidth, nominalWidth = self.getDefaultAndNominalWidths()
# The real PrivateDict will be created later on in setupTable_CFF.
# For convenience here we use a namespace object to pass the default/nominal
# widths that we need to draw the charstrings when computing their bounds.
private = SimpleNamespace(
defaultWidthX=defaultWidth, nominalWidthX=nominalWidth
)
compiledGlyphs = {}
for glyphName in self.glyphOrder:
glyph = self.allGlyphs[glyphName]
cs = self.getCharStringForGlyph(glyph, private)
compiledGlyphs[glyphName] = cs
return compiledGlyphs
def makeGlyphsBoundingBoxes(self):
"""
Make bounding boxes for all the glyphs, and return a dictionary of
BoundingBox(xMin, xMax, yMin, yMax) namedtuples keyed by glyph names.
The bounding box of empty glyphs (without contours or components) is
set to None.
Check that the float values are within the range of the specified
self.roundTolerance, and if so use the rounded value; else take the
floor or ceiling to ensure that the bounding box encloses the original
values.
"""
def toInt(value, else_callback):
rounded = otRound(value)
if tolerance >= 0.5 or abs(rounded - value) <= tolerance:
return rounded
else:
return int(else_callback(value))
tolerance = self.roundTolerance
glyphBoxes = {}
charStrings = self.getCompiledGlyphs()
for name, cs in charStrings.items():
bounds = cs.calcBounds(charStrings)
if bounds is not None:
rounded = []
for value in bounds[:2]:
rounded.append(toInt(value, math.floor))
for value in bounds[2:]:
rounded.append(toInt(value, math.ceil))
bounds = BoundingBox(*rounded)
if bounds == EMPTY_BOUNDING_BOX:
bounds = None
glyphBoxes[name] = bounds
return glyphBoxes
def getCharStringForGlyph(self, glyph, private, globalSubrs=None):
"""
Get a Type2CharString for the *glyph*
**This should not be called externally.** Subclasses
may override this method to handle the charstring creation
in a different way if desired.
"""
width = glyph.width
defaultWidth = private.defaultWidthX
nominalWidth = private.nominalWidthX
if width == defaultWidth:
# if width equals the default it can be omitted from charstring
width = None
else:
# subtract the nominal width
width -= nominalWidth
if width is not None:
width = otRound(width)
pen = T2CharStringPen(width, self.allGlyphs, roundTolerance=self.roundTolerance)
glyph.draw(pen)
charString = pen.getCharString(private, globalSubrs, optimize=self.optimizeCFF)
return charString
def setupTable_maxp(self):
"""Make the maxp table."""
if "maxp" not in self.tables:
return
self.otf["maxp"] = maxp = newTable("maxp")
maxp.tableVersion = 0x00005000
maxp.numGlyphs = len(self.glyphOrder)
def setupOtherTables(self):
self.setupTable_CFF()
if self.vertical:
self.setupTable_VORG()
def setupTable_CFF(self):
"""Make the CFF table."""
if not {"CFF", "CFF "}.intersection(self.tables):
return
self.otf["CFF "] = cff = newTable("CFF ")
cff = cff.cff
# set up the basics
cff.major = 1
cff.minor = 0
cff.hdrSize = 4
cff.offSize = 4
cff.fontNames = []
strings = IndexedStrings()
cff.strings = strings
private = PrivateDict(strings=strings)
private.rawDict.update(private.defaults)
globalSubrs = GlobalSubrsIndex(private=private)
topDict = TopDict(GlobalSubrs=globalSubrs, strings=strings)
topDict.Private = private
charStrings = topDict.CharStrings = CharStrings(
file=None,
charset=None,
globalSubrs=globalSubrs,
private=private,
fdSelect=None,
fdArray=None,
)
charStrings.charStringsAreIndexed = True
topDict.charset = []
charStringsIndex = charStrings.charStringsIndex = SubrsIndex(
private=private, globalSubrs=globalSubrs
)
cff.topDictIndex = topDictIndex = TopDictIndex()
topDictIndex.append(topDict)
topDictIndex.strings = strings
cff.GlobalSubrs = globalSubrs
# populate naming data
info = self.ufo.info
psName = getAttrWithFallback(info, "postscriptFontName")
cff.fontNames.append(psName)
topDict = cff.topDictIndex[0]
topDict.version = "%d.%d" % (
getAttrWithFallback(info, "versionMajor"),
getAttrWithFallback(info, "versionMinor"),
)
trademark = getAttrWithFallback(info, "trademark")
if trademark:
trademark = normalizeStringForPostscript(
trademark.replace("\u00A9", "Copyright")
)
if trademark != self.ufo.info.trademark:
logger.info(
"The trademark was normalized for storage in the "
"CFF table and consequently some characters were "
"dropped: '%s'",
trademark,
)
if trademark is None:
trademark = ""
topDict.Notice = trademark
copyright = getAttrWithFallback(info, "copyright")
if copyright:
copyright = normalizeStringForPostscript(
copyright.replace("\u00A9", "Copyright")
)
if copyright != self.ufo.info.copyright:
logger.info(
"The copyright was normalized for storage in the "
"CFF table and consequently some characters were "
"dropped: '%s'",
copyright,
)
if copyright is None:
copyright = ""
topDict.Copyright = copyright
topDict.FullName = getAttrWithFallback(info, "postscriptFullName")
topDict.FamilyName = getAttrWithFallback(
info, "openTypeNamePreferredFamilyName"
)
topDict.Weight = getAttrWithFallback(info, "postscriptWeightName")
# populate various numbers
topDict.isFixedPitch = getAttrWithFallback(info, "postscriptIsFixedPitch")
topDict.ItalicAngle = getAttrWithFallback(info, "italicAngle")
underlinePosition = getAttrWithFallback(info, "postscriptUnderlinePosition")
topDict.UnderlinePosition = otRound(underlinePosition)
underlineThickness = getAttrWithFallback(info, "postscriptUnderlineThickness")
topDict.UnderlineThickness = otRound(underlineThickness)
# populate font matrix
unitsPerEm = otRound(getAttrWithFallback(info, "unitsPerEm"))
topDict.FontMatrix = [1.0 / unitsPerEm, 0, 0, 1.0 / unitsPerEm, 0, 0]
# populate the width values
defaultWidthX, nominalWidthX = self.getDefaultAndNominalWidths()
if defaultWidthX:
private.rawDict["defaultWidthX"] = defaultWidthX
if nominalWidthX:
private.rawDict["nominalWidthX"] = nominalWidthX
# populate hint data
blueFuzz = otRound(getAttrWithFallback(info, "postscriptBlueFuzz"))
blueShift = otRound(getAttrWithFallback(info, "postscriptBlueShift"))
blueScale = getAttrWithFallback(info, "postscriptBlueScale")
forceBold = getAttrWithFallback(info, "postscriptForceBold")
blueValues = getAttrWithFallback(info, "postscriptBlueValues")
if isinstance(blueValues, list):
blueValues = [otRound(i) for i in blueValues]
otherBlues = getAttrWithFallback(info, "postscriptOtherBlues")
if isinstance(otherBlues, list):
otherBlues = [otRound(i) for i in otherBlues]
familyBlues = getAttrWithFallback(info, "postscriptFamilyBlues")
if isinstance(familyBlues, list):
familyBlues = [otRound(i) for i in familyBlues]
familyOtherBlues = getAttrWithFallback(info, "postscriptFamilyOtherBlues")
if isinstance(familyOtherBlues, list):
familyOtherBlues = [otRound(i) for i in familyOtherBlues]
stemSnapH = getAttrWithFallback(info, "postscriptStemSnapH")
if isinstance(stemSnapH, list):
stemSnapH = [otRound(i) for i in stemSnapH]
stemSnapV = getAttrWithFallback(info, "postscriptStemSnapV")
if isinstance(stemSnapV, list):
stemSnapV = [otRound(i) for i in stemSnapV]
# only write the blues data if some blues are defined.
if any((blueValues, otherBlues, familyBlues, familyOtherBlues)):
private.rawDict["BlueFuzz"] = blueFuzz
private.rawDict["BlueShift"] = blueShift
private.rawDict["BlueScale"] = blueScale
private.rawDict["ForceBold"] = forceBold
if blueValues:
private.rawDict["BlueValues"] = blueValues
if otherBlues:
private.rawDict["OtherBlues"] = otherBlues
if familyBlues:
private.rawDict["FamilyBlues"] = familyBlues
if familyOtherBlues:
private.rawDict["FamilyOtherBlues"] = familyOtherBlues
# only write the stems if both are defined.
if stemSnapH and stemSnapV:
private.rawDict["StemSnapH"] = stemSnapH
private.rawDict["StdHW"] = stemSnapH[0]
private.rawDict["StemSnapV"] = stemSnapV
private.rawDict["StdVW"] = stemSnapV[0]
# populate glyphs
cffGlyphs = self.getCompiledGlyphs()
for glyphName in self.glyphOrder:
charString = cffGlyphs[glyphName]
charString.private = private
charString.globalSubrs = globalSubrs
# add to the font
if glyphName in charStrings:
# XXX a glyph already has this name. should we choke?
glyphID = charStrings.charStrings[glyphName]
charStringsIndex.items[glyphID] = charString
else:
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
topDict.FontBBox = self.fontBoundingBox
class OutlineTTFCompiler(BaseOutlineCompiler):
"""Compile a .ttf font with TrueType outlines."""
sfntVersion = "\000\001\000\000"
tables = BaseOutlineCompiler.tables | {"loca", "gasp", "glyf"}
def compileGlyphs(self):
"""Compile and return the TrueType glyphs for this font."""
allGlyphs = self.allGlyphs
ttGlyphs = {}
for name in self.glyphOrder:
glyph = allGlyphs[name]
pen = TTGlyphPen(allGlyphs)
try:
glyph.draw(pen)
except NotImplementedError:
logger.error("%r has invalid curve format; skipped", name)
ttGlyph = Glyph()
else:
ttGlyph = pen.glyph()
ttGlyphs[name] = ttGlyph
return ttGlyphs
def makeGlyphsBoundingBoxes(self):
"""Make bounding boxes for all the glyphs.
Return a dictionary of BoundingBox(xMin, xMax, yMin, yMax) namedtuples
keyed by glyph names.
The bounding box of empty glyphs (without contours or components) is
set to None.
"""
glyphBoxes = {}
ttGlyphs = self.getCompiledGlyphs()
for glyphName, glyph in ttGlyphs.items():
glyph.recalcBounds(ttGlyphs)
bounds = BoundingBox(glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax)
if bounds == EMPTY_BOUNDING_BOX:
bounds = None
glyphBoxes[glyphName] = bounds
return glyphBoxes
def setupTable_maxp(self):
"""Make the maxp table."""
if "maxp" not in self.tables:
return
self.otf["maxp"] = maxp = newTable("maxp")
maxp.tableVersion = 0x00010000
maxp.numGlyphs = len(self.glyphOrder)
maxp.maxZones = 1
maxp.maxTwilightPoints = 0
maxp.maxStorage = 0
maxp.maxFunctionDefs = 0
maxp.maxInstructionDefs = 0
maxp.maxStackElements = 0
maxp.maxSizeOfInstructions = 0
maxp.maxComponentElements = max(
len(g.components) for g in self.allGlyphs.values()
)
def setupTable_post(self):
"""Make a format 2 post table with the compiler's glyph order."""
super().setupTable_post()
if "post" not in self.otf:
return
post = self.otf["post"]
post.formatType = 2.0
post.extraNames = []
post.mapping = {}
post.glyphOrder = self.glyphOrder
def setupOtherTables(self):
self.setupTable_glyf()
if self.ufo.info.openTypeGaspRangeRecords:
self.setupTable_gasp()
def setupTable_glyf(self):
"""Make the glyf table."""
if not {"glyf", "loca"}.issubset(self.tables):
return
self.otf["loca"] = newTable("loca")
self.otf["glyf"] = glyf = newTable("glyf")
glyf.glyphs = {}
glyf.glyphOrder = self.glyphOrder
hmtx = self.otf.get("hmtx")
ttGlyphs = self.getCompiledGlyphs()
for name in self.glyphOrder:
ttGlyph = ttGlyphs[name]
if ttGlyph.isComposite() and hmtx is not None and self.autoUseMyMetrics:
self.autoUseMyMetrics(ttGlyph, name, hmtx)
glyf[name] = ttGlyph
@staticmethod
def autoUseMyMetrics(ttGlyph, glyphName, hmtx):
"""Set the "USE_MY_METRICS" flag on the first component having the
same advance width as the composite glyph, no transform and no
horizontal shift (but allow it to shift vertically).
This forces the composite glyph to use the possibly hinted horizontal
metrics of the sub-glyph, instead of those from the "hmtx" table.
"""
width = hmtx[glyphName][0]
for component in ttGlyph.components:
try:
baseName, transform = component.getComponentInfo()
except AttributeError:
# component uses '{first,second}Pt' instead of 'x' and 'y'
continue
try:
baseMetrics = hmtx[baseName]
except KeyError:
continue # ignore missing components
else:
if baseMetrics[0] == width and transform[:-1] == (1, 0, 0, 1, 0):
component.flags |= USE_MY_METRICS
break
class StubGlyph:
"""
This object will be used to create missing glyphs
(specifically .notdef) in the provided UFO.
"""
def __init__(
self,
name,
width,
unitsPerEm,
ascender,
descender,
unicodes=None,
reverseContour=False,
):
self.name = name
self.width = width
self.unitsPerEm = unitsPerEm
self.ascender = ascender
self.descender = descender
self.unicodes = unicodes if unicodes is not None else []
self.components = []
self.anchors = []
if self.unicodes:
self.unicode = self.unicodes[0]
else:
self.unicode = None
if name == ".notdef":
self.draw = self._drawDefaultNotdef
self.reverseContour = reverseContour
def __len__(self):
if self.name == ".notdef":
return 1
return 0
@property
def height(self):
return self.ascender - self.descender
def draw(self, pen):
pass
def _drawDefaultNotdef(self, pen):
# Draw contour in PostScript direction (counter-clockwise) by default. Reverse
# for TrueType.
if self.reverseContour:
pen = ReverseContourPen(pen)
width = otRound(self.unitsPerEm * 0.5)
stroke = otRound(self.unitsPerEm * 0.05)
ascender = self.ascender
descender = self.descender
xMin = stroke
xMax = width - stroke
yMax = ascender
yMin = descender
pen.moveTo((xMin, yMin))
pen.lineTo((xMax, yMin))
pen.lineTo((xMax, yMax))
pen.lineTo((xMin, yMax))
pen.lineTo((xMin, yMin))
pen.closePath()
xMin += stroke
xMax -= stroke
yMax -= stroke
yMin += stroke
pen.moveTo((xMin, yMin))
pen.lineTo((xMin, yMax))
pen.lineTo((xMax, yMax))
pen.lineTo((xMax, yMin))
pen.lineTo((xMin, yMin))
pen.closePath()
def _get_controlPointBounds(self):
pen = ControlBoundsPen(None)
self.draw(pen)
return pen.bounds
controlPointBounds = property(_get_controlPointBounds)
| 37.037433 | 88 | 0.604281 |
023dca3a65faf19fa2a0c36ae038246103632706 | 576 | py | Python | simple/projects/migrations/0008_auto_20160511_0837.py | zurfyx/simple | 13b9b4e674cec95af7f2003a55dceda36b7b4184 | [
"MIT"
] | null | null | null | simple/projects/migrations/0008_auto_20160511_0837.py | zurfyx/simple | 13b9b4e674cec95af7f2003a55dceda36b7b4184 | [
"MIT"
] | null | null | null | simple/projects/migrations/0008_auto_20160511_0837.py | zurfyx/simple | 13b9b4e674cec95af7f2003a55dceda36b7b4184 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-11 08:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0007_merge'),
]
operations = [
migrations.RemoveField(
model_name='projectcomment',
name='project',
),
migrations.RemoveField(
model_name='projectcomment',
name='user',
),
migrations.DeleteModel(
name='ProjectComment',
),
]
| 21.333333 | 47 | 0.571181 |
d5bb337d593d85451bb5b08c648945b3ede9774a | 2,030 | py | Python | migrations/versions/1f7a3ddf473b_initial_migration.py | Joseph-Wairimu/pitches | 3f365722409b0df0f4a7831e3516408f04b65e46 | [
"Unlicense"
] | null | null | null | migrations/versions/1f7a3ddf473b_initial_migration.py | Joseph-Wairimu/pitches | 3f365722409b0df0f4a7831e3516408f04b65e46 | [
"Unlicense"
] | null | null | null | migrations/versions/1f7a3ddf473b_initial_migration.py | Joseph-Wairimu/pitches | 3f365722409b0df0f4a7831e3516408f04b65e46 | [
"Unlicense"
] | null | null | null | """Initial Migration
Revision ID: 1f7a3ddf473b
Revises: 154be2cf7790
Create Date: 2022-03-06 15:10:31.949602
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1f7a3ddf473b'
down_revision = '154be2cf7790'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('post', sa.String(length=255), nullable=True),
sa.Column('category', sa.String(length=255), nullable=True),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('vote_count', sa.Integer(), nullable=True),
sa.Column('added_date', sa.DateTime(), nullable=True),
sa.Column('author', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.Text(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
op.drop_table('posts')
op.drop_table('users')
# ### end Alembic commands ###
| 33.833333 | 65 | 0.66601 |
e8ee9d8b79e83f4fe4ca40e3dd20087165aa0098 | 1,647 | py | Python | 8_javascript/practice/chankoo/article_list.py | sisobus/WebStudio2019 | 2f659a84647110bcf975525905722931fa7055b3 | [
"MIT"
] | 14 | 2019-03-06T10:32:40.000Z | 2021-11-18T01:44:28.000Z | 8_javascript/practice/chankoo/article_list.py | sisobus/WebStudio2019 | 2f659a84647110bcf975525905722931fa7055b3 | [
"MIT"
] | 35 | 2019-03-13T07:04:02.000Z | 2019-10-08T06:26:45.000Z | 8_javascript/practice/chankoo/article_list.py | sisobus/WebStudio2019 | 2f659a84647110bcf975525905722931fa7055b3 | [
"MIT"
] | 22 | 2019-03-11T11:00:24.000Z | 2019-09-14T06:53:30.000Z | from flask import request
from flask_restful import Resource
from models import db, Article
from utils import serializer
class ArticleList(Resource):
def _get_articles(self):
return Article.query.all()
def get(self):
articles = self._get_articles()
#######
# for article in articles:
# print(article.comments)
# print(articles[0].comments[0].content)
# print(articles[0].comments[0].user.email)
########
return serializer(articles)
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
title = r_json['title']
content = r_json['content']
new_article = Article(user_id, title, content)
db.session.add(new_article)
db.session.commit()
return 'create {}'.format(new_article.__repr__())
def put(self):
r_json = request.get_json()
put_id = r_json['id']
title = r_json['title']
content = r_json['content']
article = Article.query.filter_by(id = put_id).first()
if article is None:
return '{} does not exist'.format(put_id)
article.title = title
article.content = content
db.session.commit()
return 'update article successfully'
def delete(self):
r_json = request.get_json()
del_id = r_json['id']
article = Article.query.filter_by(id=del_id).first()
if article is None:
return '{} does not exist'.format(del_id)
db.session.delete(article)
db.session.commit()
return '{} deleted successfully'.format(del_id) | 29.410714 | 62 | 0.599879 |
7e31ae97e813ff693336699684a39ec316943145 | 4,298 | py | Python | evaluate.py | asdas1505/LVAE-MultiGPU | 3dec344af9f6a1d1a22d3e1be969bc86ec906586 | [
"MIT"
] | 64 | 2019-12-23T12:15:15.000Z | 2022-01-17T13:02:20.000Z | evaluate.py | asdas1505/LVAE-MultiGPU | 3dec344af9f6a1d1a22d3e1be969bc86ec906586 | [
"MIT"
] | null | null | null | evaluate.py | asdas1505/LVAE-MultiGPU | 3dec344af9f6a1d1a22d3e1be969bc86ec906586 | [
"MIT"
] | 8 | 2020-01-31T02:32:57.000Z | 2022-03-22T18:21:43.000Z | """
Standalone script for a couple of simple evaluations/tests of trained models.
"""
import argparse
import os
import warnings
import torch
import torch.utils.data
from boilr.eval import BaseOfflineEvaluator
from boilr.utils.viz import img_grid_pad_value
from torchvision.utils import save_image
from experiment.experiment_manager import LVAEExperiment
class Evaluator(BaseOfflineEvaluator):
def run(self):
torch.set_grad_enabled(False)
n = 12
e = self._experiment
e.model.eval()
# Run evaluation and print results
results = e.test_procedure(iw_samples=self.args.ll_samples)
print("Eval results:\n{}".format(results))
# Save samples
for i in range(self.args.prior_samples):
fname = os.path.join(self._img_folder, "samples_{}.png".format(i))
e.generate_and_save_samples(fname, nrows=n)
# Save input and reconstructions
x, y = next(iter(e.dataloaders.test))
fname = os.path.join(self._img_folder, "reconstructions.png")
e.generate_and_save_reconstructions(x, fname, nrows=n)
# Inspect representations learned by each layer
if self.args.inspect_layer_repr:
inspect_layer_repr(e.model, self._img_folder, n=n)
# @classmethod
# def _define_args_defaults(cls) -> dict:
# defaults = super(Evaluator, cls)._define_args_defaults()
# return defaults
def _add_args(self, parser: argparse.ArgumentParser) -> None:
super(Evaluator, self)._add_args(parser)
parser.add_argument('--ll',
action='store_true',
help="estimate log likelihood with importance-"
"weighted bound")
parser.add_argument('--ll-samples',
type=int,
default=100,
dest='ll_samples',
metavar='N',
help="number of importance-weighted samples for "
"log likelihood estimation")
parser.add_argument('--ps',
type=int,
default=1,
dest='prior_samples',
metavar='N',
help="number of batches of samples from prior")
parser.add_argument('--layer-repr',
action='store_true',
dest='inspect_layer_repr',
help='inspect layer representations. Generate '
'samples by sampling top layers once, then taking '
'many samples from a middle layer, and finally '
'sample the downstream layers from the conditional '
'mode. Do this for every layer.')
@classmethod
def _check_args(cls, args: argparse.Namespace) -> argparse.Namespace:
args = super(Evaluator, cls)._check_args(args)
if not args.ll:
args.ll_samples = 1
if args.load_step is not None:
warnings.warn(
"Loading weights from specific training step is not supported "
"for now. The model will be loaded from the last checkpoint.")
return args
def inspect_layer_repr(model, img_folder, n=8):
for i in range(model.n_layers):
# print('layer', i)
mode_layers = range(i)
constant_layers = range(i + 1, model.n_layers)
# Sample top layers once, then take many samples of a middle layer,
# then sample from the mode in all downstream layers.
sample = []
for r in range(n):
sample.append(
model.sample_prior(n,
mode_layers=mode_layers,
constant_layers=constant_layers))
sample = torch.cat(sample)
pad_value = img_grid_pad_value(sample)
fname = os.path.join(img_folder, 'sample_mode_layer' + str(i) + '.png')
save_image(sample, fname, nrow=n, pad_value=pad_value)
def main():
evaluator = Evaluator(experiment_class=LVAEExperiment)
evaluator()
if __name__ == "__main__":
main()
| 34.66129 | 80 | 0.572127 |
981e9311707d9c22d939d873e0a89793c0ecb446 | 479 | py | Python | utils/gpu.py | Asichurter/MalFusionFSL | 713bf64cc07a3489f42941fd2299837075575ac0 | [
"MIT"
] | 4 | 2021-08-05T06:49:26.000Z | 2021-12-02T09:06:41.000Z | utils/gpu.py | Asichurter/MalFusionFSL | 713bf64cc07a3489f42941fd2299837075575ac0 | [
"MIT"
] | null | null | null | utils/gpu.py | Asichurter/MalFusionFSL | 713bf64cc07a3489f42941fd2299837075575ac0 | [
"MIT"
] | null | null | null | import pynvml
pynvml.nvmlInit()
class GPUManager:
MemUnitIntMap = {
'B': 1.,
'K': 1024.,
'M': 1024**2,
'G': 1024**3
}
def __init__(self, device_id):
self.Handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
def getGPUUsedMem(self, unit='M'):
mem_info = pynvml.nvmlDeviceGetMemoryInfo(self.Handle)
mem_unit_divide_factor = self.MemUnitIntMap[unit]
return mem_info.used / mem_unit_divide_factor
| 22.809524 | 66 | 0.632568 |
2849914e4071780d10fe9c32622b23982cb43ba0 | 25,141 | py | Python | pysapc/SparseAPCluster.py | bioinfocao/pysapc | db27b14d5935e025dbddb2187b2caee351f3c0fc | [
"BSD-3-Clause"
] | 11 | 2017-04-11T22:41:54.000Z | 2022-02-10T15:06:50.000Z | pysapc/SparseAPCluster.py | bioinfocao/pysapc | db27b14d5935e025dbddb2187b2caee351f3c0fc | [
"BSD-3-Clause"
] | 2 | 2018-04-13T09:17:01.000Z | 2020-05-13T04:33:59.000Z | pysapc/SparseAPCluster.py | bioinfocao/pysapc | db27b14d5935e025dbddb2187b2caee351f3c0fc | [
"BSD-3-Clause"
] | 8 | 2017-03-15T11:19:08.000Z | 2021-01-04T18:44:35.000Z | """
Sparse Affinity Propagation (SAP)
Designed for large data set using scipy sparse matrix(affinity/similarity matrix)
Speed optimized with cython
"""
# Authors: Huojun Cao <bioinfocao at gmail.com>
# License: BSD 3 clause
import numpy as np
from datetime import datetime
from scipy.sparse import coo_matrix,csr_matrix,lil_matrix
#from . import sparseAP_cy # cython for calculation speed optimization
import sparseMatrixPrepare
from sparseMatrixPrepare import sparseAP_cy
#########################################################################
def matixToRowColDataArr(X):
"""
Convert sparse affinity/similarity matrix to numpy array format (row_array,col_array,data_array)
So cython update function can work efficiently on it.
"""
# convert to coo format (from lil,csr,csc)
if isinstance(X, coo_matrix):
X_coo=X
elif (isinstance(X, csr_matrix)) or (isinstance(X, lil_matrix)):
X_coo=X.tocoo()
else: # others like numpy matrix could be convert to coo matrix
X_coo=coo_matrix(X)
# Upcast matrix to a floating point format (if necessary)
X_coo=X_coo.asfptype()
# get row_array,col_array,data_array in their correct data type (for cython to work)
row_array,col_array,data_array=X_coo.row.astype(np.int),X_coo.col.astype(np.int),X_coo.data
return row_array,col_array,data_array
def updateR_cython(S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array, row_indptr, rowBased_row_array, rowBased_col_array, damping):
"""
Update Responsibilities Matrix (R)
"""
as_data=A_rowbased_data_array+S_rowBased_data_array
as_max_data_arr=sparseAP_cy.updateR_maxRow(as_data,row_indptr)
r_new_data_arr=S_rowBased_data_array-as_max_data_arr
r_row_data=(r_new_data_arr*(1.0-damping)) + (R_rowbased_data_array*damping)
return r_row_data
def updateA_cython(A_rowbased_data_array, R_rowbased_data_array, col_indptr, row_to_col_ind_arr,col_to_row_ind_arr, kk_col_index, damping):
"""
Update Availabilities Matrix (A)
"""
A_colbased_data_array=sparseAP_cy.npArrRearrange_float(A_rowbased_data_array,row_to_col_ind_arr)
R_colbased_data_array=sparseAP_cy.npArrRearrange_float(R_rowbased_data_array,row_to_col_ind_arr)
r_col_data=np.copy(R_colbased_data_array)
r_col_data[r_col_data<0]=0
r_col_data[kk_col_index]=R_colbased_data_array[kk_col_index]
a_col_data_new=sparseAP_cy.updateA_col(r_col_data,col_indptr,kk_col_index)
a_col_data=(a_col_data_new*(1.0-damping)) + (A_colbased_data_array*damping)
A_rowbased_data_array=sparseAP_cy.npArrRearrange_float(a_col_data,col_to_row_ind_arr)
return A_rowbased_data_array
def updateR_cython_para(S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array, row_indptr, rowBased_row_array, rowBased_col_array, damping):
"""
Update Responsibilities Matrix (R), with cython multiprocessing.
"""
as_data=A_rowbased_data_array+S_rowBased_data_array
as_max_data_arr=sparseAP_cy.updateR_maxRow_para(as_data,row_indptr)
r_new_data_arr=S_rowBased_data_array-as_max_data_arr
r_row_data=(r_new_data_arr*(1.0-damping)) + (R_rowbased_data_array*damping)
return r_row_data
def updateA_cython_para(A_rowbased_data_array, R_rowbased_data_array, col_indptr, row_to_col_ind_arr,col_to_row_ind_arr, kk_col_index, damping):
"""
Update Availabilities Matrix (A), with cython multiprocessing.
"""
A_colbased_data_array=sparseAP_cy.npArrRearrange_float_para(A_rowbased_data_array,row_to_col_ind_arr)
R_colbased_data_array=sparseAP_cy.npArrRearrange_float_para(R_rowbased_data_array,row_to_col_ind_arr)
r_col_data=np.copy(R_colbased_data_array)
r_col_data[r_col_data<0]=0
r_col_data[kk_col_index]=R_colbased_data_array[kk_col_index]
a_col_data_new=sparseAP_cy.updateA_col_para(r_col_data,col_indptr,kk_col_index)
a_col_data=(a_col_data_new*(1.0-damping)) + (A_colbased_data_array*damping)
A_rowbased_data_array=sparseAP_cy.npArrRearrange_float_para(a_col_data,col_to_row_ind_arr)
return A_rowbased_data_array
def getPreferenceList(preference,nSamplesOri,data_array):
"""
Input preference should be a numeric scalar, or a string of 'min' / 'median', or a list/np 1D array(length of samples).
Return preference list(same length as samples)
"""
# numeric value
if isinstance(preference, float) or isinstance(preference, int):
preference_list=[float(preference)]*nSamplesOri
# str/unicode min/mean
elif isinstance(preference, str):
if str(preference)=='min':
preference=data_array.min()
elif str(preference)=='median':
preference=np.median(data_array)
else: #other string
raise ValueError("Preference should be a numeric scalar, or a string of 'min' / 'median',\
or a list/np 1D array(length of samples).\n Your input preference is: {0})".format(str(preference)))
preference_list=[preference]*nSamplesOri
# list or numpy array
elif (isinstance(preference, list) or isinstance(preference, np.ndarray)) and len(preference)==nSamplesOri:
preference_list=preference
else:
raise ValueError("Preference should be a numeric scalar, or a str of 'min' / 'median',\
or a list/np 1D array(length of samples).\n Your input preference is: {0})".format(str(preference)))
return preference_list
def sparseAffinityPropagation(row_array,col_array,data_array,\
preference='min',convergence_iter=15,convergence_percentage=0.999999,max_iter=200,damping=0.9,verboseIter=100, parallel=True):
"""
Sparse Affinity Propagation (SAP) clustering function
This function can be called directly if row_array,col_array,data_array available.
If called directly, there should be no duplicate datapoints(means (row_array[i],col_array[i]) should be unique for i in range(0,len(row_array)))
Parameters
----------------------
X: coo_matrix,csr_matrix,lil_matrix, precomputed sparse affinity/similarity matrix
(affinity/similarity could be cosine, pearson, euclidean distance, or others).
Please note that affinity/similarity matrix doesn't need to be symmetric, s(A,B) can be different from s(B,A).
In fact it could be that s(A,B) exist and s(B,A) not exist in the sparse affinity/similarity matrix
preference: a numeric scalar(float), or a str of 'min'/'median', or a list/numpy 1D array(length of samples)
the preference of a datapoint K, p(K), which will set to the affinity/similarity matrix s(K,K), is the
priori suitability of datapoint K to serve as an exemplar (cluster center), Higher values of preference will lead to more exemplars (cluster centers).
A good initial choice is minimum('min') or median('median') of the full dense affinity/similarity matrix.
Plsease note that minimum('min') or median('median') of sparse affinity/similarity matrix is not recommended.
convergence_iter: int, optional, default: 15. Number of iterations with no change or change less than 1.0-convergence_percentage
in exemplars (cluster centers) label of datapoint.
convergence_percentage: float, optional, default: 0.999999,
This parameter is used to define convergence condition.
If set as 0.999999, then one or less out of 1 million datapoints does not change their exemplars (cluster centers) will be considered as convergence.
This parameter is added because pySAPC is designed to deal with large data.
max_iter: int, optional, default: 2000
Maximum number of iterations. Try to increase max_iter if FSAPC is not convergence yet at max_iter.
damping: float, optional, default: 0.9.
Damping factor should between 0.5 and 1.
verboseIter: int/None, default: 100
The level of verbose. if set to 0 or None, do not print verbose output;
If set to 1, print the status for each interation.
If set to 100, for each 100 iterations, print current status
parallel: boolean, default: True
Turn on cython multiprocessing or not. It is recommended to set it True for speed up.
Returns
----------------------
The exemplars (cluster centers) for each datapoint. Exemplars are index(row index of matrix) of cluster centers for each datapoint.
"""
if (verboseIter is not None) and (verboseIter >0): print('{0}, Starting Sparse Affinity Propagation'.format(datetime.now()))
# Convert to numpy array if not
if not isinstance(row_array,np.ndarray): row_array=np.asarray(row_array)
if not isinstance(col_array,np.ndarray): col_array=np.asarray(col_array)
if not isinstance(data_array,np.ndarray): data_array=np.asarray(data_array)
# Make sure rowindex/colindex are int, data is float
row_array,col_array,data_array=row_array.astype(np.int),col_array.astype(np.int),data_array.astype(np.float)
# Get parameters (nSamplesOri, preference_list, damping)
nSamplesOri=max((row_array.max(),col_array.max()))+1
preference_list=getPreferenceList(preference,nSamplesOri,data_array)
if damping < 0.5 or damping >= 1:raise ValueError('damping must be >= 0.5 and < 1')
# set diag of affinity/similarity matrix to preference_list
row_array,col_array,data_array=sparseAP_cy.setDiag(row_array,col_array,data_array,np.asarray(preference_list))
# reOrder by rowbased
sortedLeftOriInd = np.lexsort((col_array,row_array)).astype(np.int)
if parallel:
rowBased_row_array=sparseAP_cy.npArrRearrange_int_para(row_array.astype(np.int),sortedLeftOriInd)
rowBased_col_array=sparseAP_cy.npArrRearrange_int_para(col_array.astype(np.int),sortedLeftOriInd)
S_rowBased_data_array=sparseAP_cy.npArrRearrange_float_para(data_array,sortedLeftOriInd)
else:
rowBased_row_array=sparseAP_cy.npArrRearrange_int(row_array.astype(np.int),sortedLeftOriInd)
rowBased_col_array=sparseAP_cy.npArrRearrange_int(col_array.astype(np.int),sortedLeftOriInd)
S_rowBased_data_array=sparseAP_cy.npArrRearrange_float(data_array,sortedLeftOriInd)
# For the FSAPC to work, specifically in computation of R and A matrix, each row/column of Affinity/similarity matrix should have at least two datapoints.
# Samples do not meet this condition are removed from computation (so their exemplars are themself) or copy a minimal value of corresponding column/row
rowBased_row_array,rowBased_col_array,S_rowBased_data_array,rowLeftOriDict,singleSampleInds,nSamples=\
sparseMatrixPrepare.rmSingleSamples(rowBased_row_array,rowBased_col_array,S_rowBased_data_array,nSamplesOri)
# Initialize matrix A, R; Remove degeneracies in data;
# Get col_indptr,row_indptr,row_to_col_ind_arr,col_to_row_ind_arr,kk_col_index
S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array,col_indptr,row_indptr,row_to_col_ind_arr,col_to_row_ind_arr,kk_col_index=\
sparseMatrixPrepare.preCompute(rowBased_row_array,rowBased_col_array,S_rowBased_data_array)
# Iterate update R,A matrix until meet convergence condition or reach max iteration
# In FSAPC, the convergence condition is when there is more than convergence_iter iteration in rows that have exact clustering result or
# have similar clustering result that similarity is great than convergence_percentage if convergence_percentage is set other than None
# (default convergence_percentage is 0.999999, that is one datapoint in 1 million datapoints have different clustering.)
# This condition is added to FSAPC is because FSAPC is designed to deal with large data set.
lastLabels,labels=np.empty((0), dtype=np.int),np.empty((0), dtype=np.int)
convergeCount=0
for it in range(1,max_iter+1):
lastLabels=labels
if parallel:
R_rowbased_data_array=updateR_cython_para(\
S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array, row_indptr, rowBased_row_array, rowBased_col_array, damping)
A_rowbased_data_array=updateA_cython_para(\
A_rowbased_data_array, R_rowbased_data_array, col_indptr, row_to_col_ind_arr,col_to_row_ind_arr, kk_col_index, damping)
else:
R_rowbased_data_array=updateR_cython(\
S_rowBased_data_array, A_rowbased_data_array, R_rowbased_data_array, row_indptr, rowBased_row_array, rowBased_col_array, damping)
A_rowbased_data_array=updateA_cython(\
A_rowbased_data_array, R_rowbased_data_array, col_indptr, row_to_col_ind_arr,col_to_row_ind_arr, kk_col_index, damping)
AR_rowBased_data_array=A_rowbased_data_array+R_rowbased_data_array
labels=sparseAP_cy.rowMaxARIndex(AR_rowBased_data_array,row_indptr).astype(np.int)
# check convergence
if convergence_percentage is None:
if np.array_equal(lastLabels, labels) and len(labels)!=0:
convergeCount+=1
else:
convergeCount=0
else:
if sparseAP_cy.arrSamePercent(lastLabels, labels)>=convergence_percentage and len(labels)!=0:
convergeCount+=1
else:
convergeCount=0
if convergeCount==convergence_iter and it<max_iter:
if (verboseIter is not None) and (verboseIter > 0):
print('{0}, Converged after {1} iterations.'.format(datetime.now(),it))
break
elif it==max_iter:
if (verboseIter is not None) and (verboseIter > 0):
print('{0}, Max iterations:{1} reached. labels doesnot change for last {2} iterations.'.format(datetime.now(),it,convergeCount))
else:
if (verboseIter is not None) and (verboseIter > 0) and ((it-1)%verboseIter==0):
print('{0}, {1} of {2} iterations, labels doesnot change for last {3} iterations.'.format(datetime.now(),it,max_iter,convergeCount))
# Converting labels back to original sample index
sampleLables=np.asarray(rowBased_col_array[labels])
if singleSampleInds is None or len(singleSampleInds)==0:
finalLabels=sampleLables
else:
finalLabels=[rowLeftOriDict[el] for el in sampleLables]
for ind in sorted(singleSampleInds): # sorted singleSampleInds, insert samples that removed in rmSingleSamples()
finalLabels.insert(ind,ind)
finalLabels=np.asarray(finalLabels)
return finalLabels
class SAP():
"""
Sparse Affinity Propagation (SAP) for large data (sparse affinity/similarity matrix)
To test installation, in python shell, run:
from pysapc import tests
tests.testDense()
tests.testSparse()
Quick Start:
Use pysapc to cluster sparse similarity matrix (scipy sparse matrix):
from pysapc import SAP
sap=SAP(preference,convergence_iter=convergence_iter,max_iter=max_iter,damping=damping,verboseIter=100)
sap_exemplars=sap.fit_predict(X) # X should be a scipy sparse similarity matrix
Parameters
----------------------
X: coo_matrix,csr_matrix,lil_matrix, precomputed sparse affinity/similarity matrix
(affinity/similarity could be cosine, pearson, euclidean distance, or others).
Please note that affinity/similarity matrix doesn't need to be symmetric, s(A,B) can be different from s(B,A).
In fact it could be that s(A,B) exist and s(B,A) not exist in the sparse affinity/similarity matrix
preference: a numeric scalar(float), or a str of 'min'/'median', or a list/numpy 1D array(length of samples)
the preference of a datapoint K, p(K), which will set to the affinity/similarity matrix s(K,K), is the
priori suitability of datapoint K to serve as an exemplar (cluster center), Higher values of preference will lead to more exemplars (cluster centers).
A good initial choice is minimum('min') or median('median') of the full dense affinity/similarity matrix.
Plsease note that minimum('min') or median('median') of sparse affinity/similarity matrix is not recommended.
convergence_iter: int, optional, default: 15. Number of iterations with no change or change less than 1.0-convergence_percentage
in exemplars (cluster centers) label of datapoint.
convergence_percentage: float, optional, default: 0.999999,
This parameter is used to define convergence condition.
If set as 0.999999, then one or less out of 1 million datapoints does not change their exemplars (cluster centers) will be considered as convergence.
This parameter is added because pySAPC is designed to deal with large data.
max_iter: int, optional, default: 2000
Maximum number of iterations. Try to increase max_iter if FSAPC is not convergence yet at max_iter.
damping: float, optional, default: 0.9.
Damping factor should between 0.5 and 1.
verboseIter: int/None, default: 100
The level of verbose. if set to 0 or None, do not print verbose output;
If set to 1, print the status for each interation.
If set to 100, for each 100 iterations, print current status
parallel: boolean, default: True
Turn on cython multiprocessing or not. It is recommended to set it True for speed up.
Attributes
----------------
exemplars_: the cluster centers for each datapoint
The index(row index of matrix) of examplers(cluster centers) for each datapoint
Notes
---------------
To prepare sparse matrix, either use a single cutoff for all samples (for example keep top 20 percent of full matrix)
or use different cutoff values for each samples so that each samples have K nearest neighbors.
Users are recommended to try several sparse matrix and compare their clustering result to determine
when the clustering result reach plateau (when including more data do not change clustering result significantly)
References
----------------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages Between Data Points", Science Feb. 2007
"""
def __init__(self, preference=None, convergence_iter=15, convergence_percentage=0.999999,\
max_iter=2000, damping=0.9, verboseIter=100, parallel=True):
self.preference=preference
self.convergence_iter=convergence_iter
self.convergence_percentage=convergence_percentage
self.max_iter=max_iter
self.damping=damping
self.verboseIter=verboseIter
self.parallel=parallel
self.exemplars_=[]
def denseToSparseAbvCutoff(self, denseMatrix, cutoff):
"""
Remove datas in denseMatrix that is below cutoff, Convert the remaining datas into sparse matrix.
Parameters:
----------------------
denseMatrix: dense numpy matrix
cutoff: int or float
Returns
----------------------
Scipy csr_matrix
"""
maskArray=denseMatrix>=cutoff
sparseMatrix=csr_matrix( (np.asarray(denseMatrix[maskArray]).reshape(-1),np.nonzero(maskArray)),\
shape=denseMatrix.shape)
return sparseMatrix
def denseToSparseTopPercentage(self, denseMatrix, percentage=10.0):
"""
Keep top percentage (such as 10%) of data points, remove all others. Convert into sparse matrix.
Parameters:
----------------------
denseMatrix: dense numpy matrix
percentage: float, default is 10.0
percentage of top data points to keep. default is 10.0% that is for 10000 data points keep top 1000.
Returns
----------------------
Scipy csr_matrix
"""
rowN,colN=denseMatrix.shape
totalN=rowN*colN
topN=min(int(totalN*(percentage/100.0)), totalN)
arr=np.array(denseMatrix.flatten())[0]
cutoff=arr[arr.argsort()[-(topN)]]
sparseMatrix=self.denseToSparseAbvCutoff(denseMatrix,cutoff)
return sparseMatrix
def fit(self, X, preference=None):
"""
Apply Sparse Affinity Propagation (SAP) to precomputed sparse affinity/similarity matrix X
Parameters
----------------------
X: coo_matrix,csr_matrix,lil_matrix, precomputed sparse affinity/similarity matrix
(affinity/similarity could be cosine, pearson, euclidean distance, or others).
Please note that affinity/similarity matrix doesn't need to be symmetric, s(A,B) can be different from s(B,A).
In fact it could be that s(A,B) exist and s(B,A) not exist in the sparse affinity/similarity matrix
preference: a numeric scalar(float), or a str of 'min'/'median', or a list/numpy 1D array(length of samples)
the preference of a datapoint K, p(K), which will set to the affinity/similarity matrix s(K,K), is the
priori suitability of datapoint K to serve as an exemplar (cluster center), Higher values of preference will lead to more exemplars (cluster centers).
A good initial choice is minimum('min') or median('median') of the full dense affinity/similarity matrix.
Plsease note that minimum('min') or median('median') of sparse affinity/similarity matrix,
which is top of the full dense affinity/similarity matrix, is not a good choice.
Notes
----------------------
After fitting, the clustering result (exemplars/ cluster centers) could be accessed by exemplars_ Attribute
Or use fit_predict function, which will return a list of exemplars (row index of affinity/similarity matrix)
"""
if (self.preference is None) and (preference is None):
raise ValueError("Preference should be a numeric scalar, or a string of 'min' / 'median',\
or a list/np 1D array(length of samples).\n Your input preference is: {0})".format(str(preference)))
if preference is not None:
preference_input=preference
else:
preference_input=self.preference
row_array,col_array,data_array=matixToRowColDataArr(X)
self.exemplars_=sparseAffinityPropagation(row_array,col_array,data_array,\
preference=preference_input,convergence_iter=self.convergence_iter,\
convergence_percentage=self.convergence_percentage,\
max_iter=self.max_iter,damping=self.damping,verboseIter=self.verboseIter,parallel=self.parallel)
return self
def fit_predict(self, X, preference=None):
"""
Apply Sparse Affinity Propagation (SAP) to precomputed sparse affinity/similarity matrix X
Parameters
----------------------
X: coo_matrix,csr_matrix,lil_matrix, precomputed sparse affinity/similarity matrix
(affinity/similarity could be cosine, pearson, euclidean distance, or others).
Please note that affinity/similarity matrix doesn't need to be symmetric, s(A,B) can be different from s(B,A).
In fact it could be that s(A,B) exist and s(B,A) not exist in the sparse affinity/similarity matrix
preference: a numeric scalar(float), or a str of 'min'/'median', or a list/numpy 1D array(length of samples)
the preference of a datapoint K, p(K), which will set to the affinity/similarity matrix s(K,K), is the
priori suitability of datapoint K to serve as an exemplar (cluster center), Higher values of preference will lead to more exemplars (cluster centers).
A good initial choice is minimum('min') or median('median') of the full dense affinity/similarity matrix.
Plsease note that minimum('min') or median('median') of sparse affinity/similarity matrix,
which is top of the full dense affinity/similarity matrix, is not a good choice.
Returns
----------------------
The exemplars (cluster centers) for each datapoint. Exemplars are index(row index of matrix) of cluster centers for each datapoint.
"""
if (self.preference is None) and (preference is None):
raise ValueError("Preference should be a numeric scalar, or a string of 'min' / 'median',\
or a list/np 1D array(length of samples).\n Your input preference is: {0})".format(str(preference)))
if preference is not None:
preference_input=preference
else:
preference_input=self.preference
row_array,col_array,data_array=matixToRowColDataArr(X)
self.exemplars_=sparseAffinityPropagation(row_array,col_array,data_array,\
preference=self.preference,convergence_iter=self.convergence_iter,\
convergence_percentage=self.convergence_percentage,\
max_iter=self.max_iter,damping=self.damping,verboseIter=self.verboseIter,parallel=self.parallel)
return self.exemplars_
| 56.369955 | 162 | 0.707251 |
065a5b5c98e77b571902e6e88743d343961f9788 | 6,419 | py | Python | troveclient/tests/test_instances.py | citrix-openstack-build/python-troveclient | 4a82591c79b622757dc2e6f1c89a65ca4064f9d3 | [
"Apache-2.0"
] | null | null | null | troveclient/tests/test_instances.py | citrix-openstack-build/python-troveclient | 4a82591c79b622757dc2e6f1c89a65ca4064f9d3 | [
"Apache-2.0"
] | null | null | null | troveclient/tests/test_instances.py | citrix-openstack-build/python-troveclient | 4a82591c79b622757dc2e6f1c89a65ca4064f9d3 | [
"Apache-2.0"
] | null | null | null | from testtools import TestCase
from mock import Mock
from troveclient import instances
from troveclient import base
"""
Unit tests for instances.py
"""
class InstanceTest(TestCase):
def setUp(self):
super(InstanceTest, self).setUp()
self.orig__init = instances.Instance.__init__
instances.Instance.__init__ = Mock(return_value=None)
self.instance = instances.Instance()
self.instance.manager = Mock()
def tearDown(self):
super(InstanceTest, self).tearDown()
instances.Instance.__init__ = self.orig__init
def test___repr__(self):
self.instance.name = "instance-1"
self.assertEqual('<Instance: instance-1>', self.instance.__repr__())
def test_list_databases(self):
db_list = ['database1', 'database2']
self.instance.manager.databases = Mock()
self.instance.manager.databases.list = Mock(return_value=db_list)
self.assertEqual(db_list, self.instance.list_databases())
def test_delete(self):
db_delete_mock = Mock(return_value=None)
self.instance.manager.delete = db_delete_mock
self.instance.delete()
self.assertEqual(1, db_delete_mock.call_count)
def test_restart(self):
db_restart_mock = Mock(return_value=None)
self.instance.manager.restart = db_restart_mock
self.instance.id = 1
self.instance.restart()
self.assertEqual(1, db_restart_mock.call_count)
class InstancesTest(TestCase):
def setUp(self):
super(InstancesTest, self).setUp()
self.orig__init = instances.Instances.__init__
instances.Instances.__init__ = Mock(return_value=None)
self.instances = instances.Instances()
self.instances.api = Mock()
self.instances.api.client = Mock()
self.instances.resource_class = Mock(return_value="instance-1")
self.orig_base_getid = base.getid
base.getid = Mock(return_value="instance1")
def tearDown(self):
super(InstancesTest, self).tearDown()
instances.Instances.__init__ = self.orig__init
base.getid = self.orig_base_getid
def test_create(self):
def side_effect_func(path, body, inst):
return path, body, inst
self.instances._create = Mock(side_effect=side_effect_func)
p, b, i = self.instances.create("test-name", 103, "test-volume",
['db1', 'db2'], ['u1', 'u2'])
self.assertEqual("/instances", p)
self.assertEqual("instance", i)
self.assertEqual(['db1', 'db2'], b["instance"]["databases"])
self.assertEqual(['u1', 'u2'], b["instance"]["users"])
self.assertEqual("test-name", b["instance"]["name"])
self.assertEqual("test-volume", b["instance"]["volume"])
self.assertEqual(103, b["instance"]["flavorRef"])
def test__list(self):
self.instances.api.client.get = Mock(return_value=('resp', None))
self.assertRaises(Exception, self.instances._list, "url", None)
body = Mock()
body.get = Mock(return_value=[{'href': 'http://test.net/test_file',
'rel': 'next'}])
body.__getitem__ = Mock(return_value='instance1')
#self.instances.resource_class = Mock(return_value="instance-1")
self.instances.api.client.get = Mock(return_value=('resp', body))
_expected = [{'href': 'http://test.net/test_file', 'rel': 'next'}]
self.assertEqual(_expected, self.instances._list("url", None).links)
def test_list(self):
def side_effect_func(path, inst, limit, marker):
return path, inst, limit, marker
self.instances._list = Mock(side_effect=side_effect_func)
limit_ = "test-limit"
marker_ = "test-marker"
expected = ("/instances", "instances", limit_, marker_)
self.assertEqual(expected, self.instances.list(limit_, marker_))
def test_get(self):
def side_effect_func(path, inst):
return path, inst
self.instances._get = Mock(side_effect=side_effect_func)
self.assertEqual(('/instances/instance1', 'instance'),
self.instances.get(1))
def test_delete(self):
resp = Mock()
resp.status = 200
body = None
self.instances.api.client.delete = Mock(return_value=(resp, body))
self.instances.delete('instance1')
resp.status = 500
self.assertRaises(Exception, self.instances.delete, 'instance1')
def test__action(self):
body = Mock()
resp = Mock()
resp.status = 200
self.instances.api.client.post = Mock(return_value=(resp, body))
self.assertEqual('instance-1', self.instances._action(1, body))
self.instances.api.client.post = Mock(return_value=(resp, None))
self.assertEqual(None, self.instances._action(1, body))
def _set_action_mock(self):
def side_effect_func(instance_id, body):
self._instance_id = instance_id
self._body = body
self._instance_id = None
self._body = None
self.instances._action = Mock(side_effect=side_effect_func)
def test_resize_volume(self):
self._set_action_mock()
self.instances.resize_volume(152, 512)
self.assertEqual(152, self._instance_id)
self.assertEqual({"resize": {"volume": {"size": 512}}}, self._body)
def test_resize_instance(self):
self._set_action_mock()
self.instances.resize_instance(4725, 103)
self.assertEqual(4725, self._instance_id)
self.assertEqual({"resize": {"flavorRef": 103}}, self._body)
def test_restart(self):
self._set_action_mock()
self.instances.restart(253)
self.assertEqual(253, self._instance_id)
self.assertEqual({'restart': {}}, self._body)
class InstanceStatusTest(TestCase):
def test_constants(self):
self.assertEqual("ACTIVE", instances.InstanceStatus.ACTIVE)
self.assertEqual("BLOCKED", instances.InstanceStatus.BLOCKED)
self.assertEqual("BUILD", instances.InstanceStatus.BUILD)
self.assertEqual("FAILED", instances.InstanceStatus.FAILED)
self.assertEqual("REBOOT", instances.InstanceStatus.REBOOT)
self.assertEqual("RESIZE", instances.InstanceStatus.RESIZE)
self.assertEqual("SHUTDOWN", instances.InstanceStatus.SHUTDOWN)
| 37.538012 | 76 | 0.647141 |
ff2dda310cbcd6cb17c9a4925c560fe506776d13 | 117 | py | Python | app/objects/models.py | tsuuki/gulag | 07323980cb7295e8481f2b296be7d2a9a7423b31 | [
"MIT"
] | 4 | 2021-11-09T09:47:24.000Z | 2022-03-17T21:15:20.000Z | app/objects/models.py | tsuuki/gulag | 07323980cb7295e8481f2b296be7d2a9a7423b31 | [
"MIT"
] | 4 | 2022-02-13T08:53:05.000Z | 2022-03-16T19:39:46.000Z | app/objects/models.py | tsuuki/gulag | 07323980cb7295e8481f2b296be7d2a9a7423b31 | [
"MIT"
] | 1 | 2022-03-16T19:32:26.000Z | 2022-03-16T19:32:26.000Z | from pydantic import BaseModel
class OsuBeatmapRequestForm(BaseModel):
Filenames: list[str]
Ids: list[int]
| 16.714286 | 39 | 0.752137 |
cd670e980b3537e2fa9df1a9af8ea3b466a9cbc6 | 6,079 | py | Python | ingestion/setup.py | shannonbradshaw/OpenMetadata | 07ae09bed147b42864bf8c43b6adacfe7fc1eb52 | [
"Apache-2.0"
] | null | null | null | ingestion/setup.py | shannonbradshaw/OpenMetadata | 07ae09bed147b42864bf8c43b6adacfe7fc1eb52 | [
"Apache-2.0"
] | null | null | null | ingestion/setup.py | shannonbradshaw/OpenMetadata | 07ae09bed147b42864bf8c43b6adacfe7fc1eb52 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Set
from setuptools import find_namespace_packages, setup
def get_long_description():
root = os.path.dirname(__file__)
with open(os.path.join(root, "README.md")) as f:
description = f.read()
return description
base_requirements = {
"openmetadata-ingestion-core==0.9.0",
"commonregex",
"idna<3,>=2.5",
"click>=7.1.1",
"typing_extensions>=3.7.4",
"mypy_extensions>=0.4.3",
"typing-inspect",
"pydantic>=1.7.4",
"pydantic[email]>=1.7.2",
"google>=3.0.0",
"google-auth>=1.33.0",
"python-dateutil>=2.8.1",
"email-validator>=1.0.3",
"wheel~=0.36.2",
"python-jose==3.3.0",
"sqlalchemy>=1.4.0",
"sql-metadata~=2.0.0",
"requests>=2.23",
"cryptography",
"Jinja2>=2.11.3",
"PyYAML",
"jsonschema",
"sqllineage==1.3.3",
}
report_requirements = {
"asgiref==3.4.1",
"Django==3.2.7",
"pytz==2021.1",
"sqlparse==0.4.2",
}
base_plugins = {
"query-parser",
"metadata-usage",
"file-stage",
"sql-metadata~=2.0.0",
}
plugins: Dict[str, Set[str]] = {
"airflow-container": {
"marshmallow-sqlalchemy>=0.26.0",
"SQLAlchemy-Utils>=0.38.0",
"pymysql>=1.0.2",
"requests==2.26.0",
},
"airflow-container-1.10.15": {"markupsafe==2.0.1 ", "requests==2.23.0"},
"amundsen": {"neo4j~=4.4.0"},
"athena": {"PyAthena[SQLAlchemy]"},
"atlas": {},
"azuresql": {"pyodbc"},
"bigquery": {
"sqlalchemy-bigquery==1.2.2",
"pyarrow~=6.0.1",
"google-cloud-datacatalog==3.6.2",
},
"bigquery-usage": {"google-cloud-logging", "cachetools"},
"docker": {"python_on_whales==0.34.0"},
"backup": {"boto3~=1.19.12"},
"dbt": {},
"druid": {"pydruid>=0.6.2"},
"elasticsearch": {"elasticsearch==7.13.1"},
"glue": {"boto3~=1.19.12"},
"dynamodb": {"boto3~=1.19.12"},
"hive": {
"pyhive~=0.6.3",
"thrift~=0.13.0",
"sasl==0.3.1",
"thrift-sasl==0.4.3",
"presto-types-parser==0.0.2",
},
"kafka": {"confluent_kafka>=1.5.0", "fastavro>=1.2.0"},
"ldap-users": {"ldap3==2.9.1"},
"looker": {"looker-sdk==21.12.2"},
"mssql": {"sqlalchemy-pytds>=0.3"},
"mssql-odbc": {"pyodbc"},
"mysql": {"pymysql>=1.0.2"},
"oracle": {"cx_Oracle"},
"powerbi": {"python-power-bi==0.1.2"},
"presto": {"pyhive~=0.6.3"},
"trino": {"sqlalchemy-trino==0.4.1"},
"postgres": {"pymysql>=1.0.2", "psycopg2-binary", "GeoAlchemy2"},
"redash": {"redash-toolbelt==0.1.8"},
"redshift": {"sqlalchemy-redshift==0.8.9", "psycopg2-binary", "GeoAlchemy2"},
"redshift-usage": {
"sqlalchemy-redshift==0.8.9",
"psycopg2-binary",
"GeoAlchemy2",
},
"snowflake": {"snowflake-sqlalchemy<=1.3.2", "cryptography"},
"snowflake-usage": {"snowflake-sqlalchemy<=1.3.2"},
"sample-entity": {"faker~=8.1.1"},
"superset": {},
"tableau": {"tableau-api-lib==0.1.29"},
"vertica": {"sqlalchemy-vertica[vertica-python]>=0.0.5"},
"report-server": report_requirements,
"airflow": {"apache-airflow >= 1.10.2"},
"salesforce": {"simple_salesforce~=1.11.4"},
"okta": {"okta~=2.3.0"},
"mlflow": {"mlflow-skinny~=1.22.0"},
"sklearn": {"scikit-learn==1.0.2"},
"db2": {"ibm-db-sa==0.3.7"},
"clickhouse": {"clickhouse-driver==0.2.3", "clickhouse-sqlalchemy==0.2.0"},
"databricks": {"sqlalchemy-databricks==0.1.0"},
"singlestore": {"pymysql>=1.0.2"},
"azure-sso": {"msal~=1.17.0"},
}
dev = {
"boto3==1.20.14",
"botocore==1.23.14",
"datamodel-code-generator==0.11.14",
"black==21.12b0", # required for datamodel-code-generator==0.11.14
"pycln",
"docker",
"google-cloud-storage==1.43.0",
"twine",
}
test = {
"isort",
"pre-commit",
"pylint",
"pytest==7.0.0",
"pytest-cov",
"faker",
"coverage",
# sklearn integration
"scikit-learn==1.0.2",
"pandas==1.3.5",
}
build_options = {"includes": ["_cffi_backend"]}
setup(
name="openmetadata-ingestion",
version="0.9.2.dev1",
url="https://open-metadata.org/",
author="OpenMetadata Committers",
license="Apache License 2.0",
description="Ingestion Framework for OpenMetadata",
long_description=get_long_description(),
long_description_content_type="text/markdown",
python_requires=">=3.8",
options={"build_exe": build_options},
package_dir={"": "src"},
zip_safe=False,
dependency_links=[],
project_urls={
"Documentation": "https://docs.open-metadata.org/",
"Source": "https://github.com/open-metadata/OpenMetadata",
},
packages=find_namespace_packages(where="./src", exclude=["tests*"]),
entry_points={
"console_scripts": ["metadata = metadata.cmd:metadata"],
"apache_airflow_provider": [
"provider_info = airflow_provider_openmetadata:get_provider_config"
],
},
install_requires=list(base_requirements),
extras_require={
"base": list(base_requirements),
"dev": list(dev),
"test": list(test),
**{plugin: list(dependencies) for (plugin, dependencies) in plugins.items()},
"all": list(
base_requirements.union(
*[
requirements
for plugin, requirements in plugins.items()
if plugin not in {"airflow-container-1.10.15", "db2"}
]
)
),
},
)
| 30.70202 | 85 | 0.580852 |
9d21e37ad827c7dcea37e14f26fa7b9c170a43e2 | 1,327 | py | Python | Allura/allura/lib/widgets/__init__.py | lym/allura-git | b2b53d0c1ba8b1e48f176ad75cf64675b3545d69 | [
"Apache-2.0"
] | 1 | 2017-07-31T23:13:58.000Z | 2017-07-31T23:13:58.000Z | Allura/allura/lib/widgets/__init__.py | lym/allura-git | b2b53d0c1ba8b1e48f176ad75cf64675b3545d69 | [
"Apache-2.0"
] | null | null | null | Allura/allura/lib/widgets/__init__.py | lym/allura-git | b2b53d0c1ba8b1e48f176ad75cf64675b3545d69 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .discuss import Post, Thread, Discussion
from .subscriptions import SubscriptionForm
from .oauth_widgets import OAuthApplicationForm, OAuthRevocationForm
from .auth_widgets import LoginForm, ForgottenPasswordForm, DisableAccountForm
from .vote import VoteForm
__all__ = [
'Post', 'Thread', 'Discussion', 'SubscriptionForm', 'OAuthApplicationForm', 'OAuthRevocationForm', 'LoginForm',
'ForgottenPasswordForm', 'DisableAccountForm', 'VoteForm']
| 49.148148 | 115 | 0.73474 |
87aa9faa692a30c59258295073b905e69bfb11a4 | 5,387 | py | Python | calm/dsl/log/logger.py | nutanixdev/calm-dsl | 90e1c583d7b9ac905cdfb3e2ad27f9f930e69831 | [
"Apache-2.0"
] | null | null | null | calm/dsl/log/logger.py | nutanixdev/calm-dsl | 90e1c583d7b9ac905cdfb3e2ad27f9f930e69831 | [
"Apache-2.0"
] | null | null | null | calm/dsl/log/logger.py | nutanixdev/calm-dsl | 90e1c583d7b9ac905cdfb3e2ad27f9f930e69831 | [
"Apache-2.0"
] | null | null | null | import logging
import inspect
from colorlog import ColoredFormatter
import time
import sys
class StdErrFilter(logging.Filter):
"""Filter for Stderr stream handler"""
def filter(self, rec):
return rec.levelno >= logging.DEBUG
class CustomLogging:
"""
customization on logging module.
custom logger with following log levels with appropriate color codes and
custom formatting for messages::“
* LOG.debug - [DEBUG]
* LOG.info - [INFO]
* LOG.warn - [WARNING]
* LOG.error - [ERROR]
* LOG.critical - [CRITICAL]
* LOG.exception - [ERROR]
"""
_VERBOSE_LEVEL = 20
_SHOW_TRACE = False
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
def __init__(self, name):
"""
Build CustomLogger based on logging module
Args:
name(str): name of the module/logger
Returns:
None
"""
self._ch1 = logging.StreamHandler()
self._ch1.addFilter(StdErrFilter())
# add custom formatter to console handler
self.__addCustomFormatter(self._ch1)
# create custom logger
self._logger = logging.getLogger(name)
# add console to logger
self._logger.addHandler(self._ch1)
# Add show trace option
self.show_trace = False
@staticmethod
def __add_caller_info(msg):
stack = inspect.stack()
# filename = stack[2][1]
# func = stack[2][3]
ln = stack[2][2]
return ":{}] {}".format(ln, msg)
@classmethod
def set_verbose_level(cls, lvl):
cls._VERBOSE_LEVEL = lvl
@classmethod
def enable_show_trace(cls):
cls._SHOW_TRACE = True
def get_logger(self):
self.set_logger_level(self._VERBOSE_LEVEL)
self.show_trace = self._SHOW_TRACE
return self._logger
def get_logging_levels(self):
return ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
def set_logger_level(self, lvl):
"""sets the logger verbose level"""
self._logger.setLevel(lvl)
def info(self, msg, nl=True, **kwargs):
"""
info log level
Args:
msg (str): message to log
nl (bool): Add newline (default: True)
Returns:
None
"""
logger = self.get_logger()
if not nl:
for handler in logger.handlers:
handler.terminator = " "
logger.info(self.__add_caller_info(msg), **kwargs)
if not nl:
for handler in logger.handlers:
handler.terminator = "\n"
def warning(self, msg, *args, **kwargs):
"""
warning log level
Args:
msg (str): message to log
Returns:
None
"""
logger = self.get_logger()
return logger.warning(self.__add_caller_info(msg), *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
error log level
Args:
msg (str): message to log
Returns:
None
"""
logger = self.get_logger()
if self.show_trace:
kwargs["stack_info"] = sys.exc_info()
return logger.error(self.__add_caller_info(msg), *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
exception log level
Args:
msg (str): message to log
Returns:
None
"""
logger = self.get_logger()
if self.show_trace:
kwargs["stack_info"] = sys.exc_info()
return logger.exception(self.__add_caller_info(msg), *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
critical log level
Args:
msg (str): message to log
Returns:
None
"""
logger = self.get_logger()
if self.show_trace:
kwargs["stack_info"] = sys.exc_info()
return logger.critical(self.__add_caller_info(msg), *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""
debug log level
Args:
msg (str): message to log
Returns:
None
"""
logger = self.get_logger()
return logger.debug(self.__add_caller_info(msg), *args, **kwargs)
def __addCustomFormatter(self, ch):
"""
add ColorFormatter with custom colors for each log level
Args:
None
Returns
None
"""
fmt = (
"[%(asctime)s] "
"[%(log_color)s%(levelname)s%(reset)s] "
"[%(name)s%(message)s"
)
formatter = ColoredFormatter(
fmt,
datefmt="%Y-%m-%d %H:%M:%S",
reset=True,
log_colors={
"DEBUG": "purple",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
formatter.converter = time.gmtime
# add formatter to console handler
ch.setFormatter(formatter)
def get_logging_handle(name):
"""returns the CustomLogging object"""
logging_handle = CustomLogging(name)
return logging_handle
| 22.826271 | 77 | 0.537776 |
7f1713e3f1464ae465aaaf669253fc935e20d89c | 651 | py | Python | tests/kyu_8_tests/test_fake_binary.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | tests/kyu_8_tests/test_fake_binary.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | tests/kyu_8_tests/test_fake_binary.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | import unittest
from katas.kyu_8.fake_binary import fake_bin
class FakeBinaryTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(fake_bin('45385593107843568'), '01011110001100111')
def test_equal_2(self):
self.assertEqual(fake_bin('509321967506747'), '101000111101101')
def test_equal_3(self):
self.assertEqual(fake_bin('366058562030849490134388085'),
'011011110000101010000011011')
def test_equal_4(self):
self.assertEqual(fake_bin('15889923'), '01111100')
def test_equal_5(self):
self.assertEqual(fake_bin('800857237867'), '100111001111')
| 29.590909 | 76 | 0.706605 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.