text stringlengths 4 1.02M | meta dict |
|---|---|
import numpy as np
import cv2
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension4
# Define source points for perspective transformation
src_points = np.float32([[200,720],[453,547],[835,547],[1100,720]])
# Define 4 destination points for perspective transformation
dst_points = np.float32([[320,720],[320,576],[960,576],[960,720]])
def undistort(img,mtx,dist):
'''
Undistort an image given the camera calibration coefficients
'''
return cv2.undistort(img, mtx, dist, None, mtx)
def get_eagle_eye(img):
'''
Gets eagle eye perspective of the source points in the current image.
'''
img_size = (img.shape[1],img.shape[0])
# Get the transform matrix
M = cv2.getPerspectiveTransform(src_points, dst_points)
# Warp image to a top-down view
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped
def get_inverse_transform(img):
'''
Warps the detected lane boundaries back onto the original image.
'''
img_size = (img.shape[1],img.shape[0])
Minv = cv2.getPerspectiveTransform(dst_points, src_points)
warped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR)
return warped
def draw_points(img, points):
'''
Draws lines in an image. Used to represent perspective transform.
'''
temp = np.copy(img)
temp = cv2.line(temp, tuple(points[0]), tuple(points[1]), color=(0,0,255), thickness=4)
temp = cv2.line(temp, tuple(points[1]), tuple(points[2]), color=(0,0,255), thickness=4)
temp = cv2.line(temp, tuple(points[2]), tuple(points[3]), color=(0,0,255), thickness=4)
temp = cv2.line(temp, tuple(points[3]), tuple(points[0]), color=(0,0,255), thickness=4)
return temp
def get_binary_image(img, l_thresh=(60, 255), v_thresh=(30, 100), y_thresh=(30, 255), kernel_size = 15):
'''
Applies different gradients to the current frame in order to detect road lanes.
'''
img = np.copy(img)
# Convert to HSV color space and separate the v channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
channel = cv2.GaussianBlur(l_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
hls_l_binary = np.zeros_like(scaled_sobel)
hls_l_binary[(scaled_sobel >= l_thresh[0]) & (scaled_sobel <= l_thresh[1])] = 1
# Convert to YUV
yuv = cv2.cvtColor(img, cv2.COLOR_RGB2YUV).astype(np.float)
y_channel = yuv[:,:,0]
v_channel = yuv[:,:,2]
channel = cv2.GaussianBlur(y_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
yuv_y_binary = np.zeros_like(scaled_sobel)
yuv_y_binary[(scaled_sobel >= y_thresh[0]) & (scaled_sobel <= y_thresh[1])] = 1
white = np.zeros_like(scaled_sobel)
white[(hls_l_binary == 1) | (yuv_y_binary == 1)] = 1
channel = cv2.GaussianBlur(v_channel, (kernel_size, kernel_size), 0)
sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
yellow = np.zeros_like(scaled_sobel)
yellow[(scaled_sobel >= v_thresh[0]) & (scaled_sobel <= v_thresh[1])] = 1
combined = np.zeros_like(scaled_sobel)
combined[(yellow == 1) | (white == 1)] = 1
return combined
def process_frame(frame,mtx,dist):
# Undistort image
undistorted = undistort(frame,mtx,dist)
# Threshold image with default values
thresholded = get_binary_image(undistorted)
# Change perspective to eagle-eye view
top_down = get_eagle_eye(thresholded)
return undistorted, thresholded, top_down
def find_lanes_hist(top_down):
'''
Finds the origin of the road lanes using a histogram.
'''
# Take a histogram of the bottom half of the image
histogram = np.sum(top_down[int(top_down.shape[0]*4/5):,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
margin = 200
leftx_base = np.argmax(histogram[margin:midpoint-margin]) + margin
rightx_base = np.argmax(histogram[midpoint+margin:top_down.shape[1]-margin]) + midpoint + margin
return leftx_base, rightx_base
def get_poly_from_last(binary_warped, left_fit, right_fit, margin=100):
'''
Fits a polynomial to the detected lanes. Assumes that previous frame was properly detected and a fit was
correctly estimated.
'''
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
if leftx.size == 0 or rightx.size == 0 or lefty.size == 0 or righty.size == 0:
return None, None, 0, 0
else:
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
left_curverad, right_curverad = get_curvature(leftx, lefty, rightx, righty)
return left_fit, right_fit, left_curverad, right_curverad
def get_polynomial(top_down, leftx_base, rightx_base, nwindows = 9, margin=100, minpix = 100, debug=False):
# Set height of windows
window_height = np.int(top_down.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = top_down.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
out_img = np.dstack((top_down, top_down, top_down))*255
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = top_down.shape[0] - (window+1)*window_height
win_y_high = top_down.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if debug:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
if leftx.size == 0 or rightx.size == 0 or lefty.size == 0 or righty.size == 0:
return None, None, 0, 0, None
else:
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if debug:
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
left_curverad, right_curverad = get_curvature(leftx, lefty, rightx, righty)
return left_fit, right_fit, left_curverad, right_curverad, out_img
def get_polynomial2(top_down, leftx_base, rightx_base, nwindows = 5, margin=60, minpix = 100, debug=False):
'''
Implementing a sliding window approach to detect points in left and right lanes and fit a second order polynomial
nwindows is the number of sliding windows
margin defines the width of the windows
minpix sets the minimum number of pixels to be found to recenter window
'''
# Set height of windows
window_height = np.int(top_down.shape[0]/nwindows)
# Create an output image to draw on and visualize the result
out_img = np.dstack((top_down, top_down, top_down))*255
# Identify the x and y positions of all nonzero pixels in the image
nonzero = top_down.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
init_height_left = top_down.shape[0] - np.ceil(window_height / float(2))
init_height_right = top_down.shape[0] - np.ceil(window_height / float(2))
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
temp_left_idx = []
temp_left_idx.append(np.array([], np.int))
temp_rigth_idx = []
temp_rigth_idx.append(np.array([], np.int))
max_empty_win = 5
counter_left = 0
counter_right = 0
# Step through the windows one by one
for window in range(nwindows*2):
# Identify window boundaries in x and y (and right and left)
win_y_low_left = int(init_height_left - np.ceil(window_height / float(2)))
win_y_high_left = int(init_height_left + np.ceil(window_height / float(2)))
win_y_low_right = int(init_height_right - np.ceil(window_height / float(2)))
win_y_high_right = int(init_height_right + np.ceil(window_height / float(2)))
win_xleft_low = int(leftx_current - margin)
win_xleft_high = int(leftx_current + margin)
win_xright_low = int(rightx_current - margin)
win_xright_high = int(rightx_current + margin)
if debug:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low_left),(win_xleft_high,win_y_high_left),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low_right),(win_xright_high,win_y_high_right),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low_left) & (nonzeroy < win_y_high_left) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low_right) & (nonzeroy < win_y_high_right) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Center next window around a point predicted using current fit
# 1. For left lane
if counter_left < max_empty_win:
num_new_points_left = np.setdiff1d(np.array(good_left_inds),np.unique(np.concatenate(temp_left_idx)))
if num_new_points_left.size > minpix:
temp_left_idx.append(good_left_inds)
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
counter_left = 0
else:
counter_left += 1
if np.concatenate(temp_left_idx).size== 0:
# No line was detected.
return None, None, 0, 0, None
temp_l_idx = np.unique(np.concatenate(temp_left_idx))
leftx = nonzerox[temp_l_idx]
lefty = nonzeroy[temp_l_idx]
temp_left_fit_x = np.polyfit(lefty, leftx, 1)
temp_left_fit_y = np.polyfit(leftx, lefty, 1)
cut_x = int(temp_left_fit_x[0]*win_y_low_left + temp_left_fit_x[1])
#cut_x = int(temp_left_fit_x[0]*win_y_low_left**2 + temp_left_fit_x[1]*win_y_low_left + temp_left_fit_x[2])
if cut_x > win_xleft_high or cut_x < win_xleft_low:
# Out of window
if cut_x > win_xleft_high:
pos_y = int(temp_left_fit_y[0]*win_xleft_high + temp_left_fit_y[1])
#pos_y = int(temp_left_fit_y[0]*win_xleft_high**2 + temp_left_fit_y[1]*win_xleft_high + temp_left_fit_y[2])
pos_x = win_xleft_high
else:
pos_y = int(temp_left_fit_y[0]*win_xleft_low + temp_left_fit_y[1])
#pos_y = int(temp_left_fit_y[0]*win_xleft_low**2 + temp_left_fit_y[1]*win_xleft_low + temp_left_fit_y[2])
pos_x = win_xleft_low
else:
pos_y = win_y_low_left
pos_x = cut_x
init_height_left = int(pos_y)
leftx_current = int(pos_x)
#leftx_current = int(temp_left_fit[0]*win_y_low**2 + temp_left_fit[1]*win_y_low + temp_left_fit[2])
# 2. For right lane
if counter_right < max_empty_win:
num_new_points_right = np.setdiff1d(np.array(good_right_inds),np.unique(np.concatenate(temp_rigth_idx)))
if num_new_points_right.size > minpix:
temp_rigth_idx.append(good_right_inds)
right_lane_inds.append(good_right_inds)
counter_right = 0
else:
counter_right += 1
if np.concatenate(temp_rigth_idx).size== 0:
# No line was detected.
return None, None, 0, 0, None
temp_r_idx = np.unique(np.concatenate(temp_rigth_idx))
rightx = nonzerox[temp_r_idx]
righty = nonzeroy[temp_r_idx]
temp_right_fit_x = np.polyfit(righty, rightx, 1)
temp_right_fit_y = np.polyfit(rightx, righty, 1)
cut_x = int(temp_right_fit_x[0]*win_y_low_right + temp_right_fit_x[1])
#cut_x = int(temp_right_fit_x[0]*win_y_low_right**2 + temp_right_fit_x[1]*win_y_low_right + temp_right_fit_x[2])
if cut_x > win_xright_high or cut_x < win_xright_low:
# Out of window
if cut_x > win_xright_high:
pos_y = int(temp_right_fit_y[0]*win_xright_high + temp_right_fit_y[1])
#pos_y = int(temp_right_fit_y[0]*win_xright_high**2 + temp_right_fit_y[1]*win_xright_high + temp_right_fit_y[2])
pos_x = win_xright_high
else:
pos_y = int(temp_right_fit_y[0]*win_xright_low + temp_right_fit_y[1])
#pos_y = int(temp_right_fit_y[0]*win_xright_low**2 + temp_right_fit_y[1]*win_xright_low + temp_right_fit_y[2])
pos_x = win_xright_low
else:
pos_y = win_y_low_right
pos_x = cut_x
init_height_right = int(pos_y)
rightx_current = int(pos_x)
#rightx_current = int(temp_right_fit[0]*win_y_low**2 + temp_right_fit[1]*win_y_low + temp_right_fit[2])
# Concatenate the arrays of indices
left_lane_inds = np.unique(np.concatenate(left_lane_inds))
right_lane_inds = np.unique(np.concatenate(right_lane_inds))
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
if leftx.size == 0 or rightx.size == 0 or lefty.size == 0 or righty.size == 0:
return None, None, 0, 0, None
else:
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if debug:
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
left_curverad, right_curverad = get_curvature(leftx, lefty, rightx, righty)
return left_fit, right_fit, left_curverad, right_curverad, out_img
def get_curvature(leftx, lefty, rightx, righty):
'''
Computes the curvature of both lanes in meters.
'''
y_eval_left = 700.
y_eval_right = 700.
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval_left*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval_right*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def sanity_check(left_fitx, right_fitx, left_curverad, right_curverad):
if left_curverad < 10 or right_curverad < 10:
return False
# 1. Check that the horizontal distance is close to the estimated 3.7 meters.
dist = (right_fitx - left_fitx) * xm_per_pix # Horizontal distance between points (in meters)
if np.min(dist) < 1.0:
return False
dist_bottom = (right_fitx[-1] - left_fitx[-1]) * xm_per_pix
if dist_bottom > 4.2 or dist_bottom < 2.3:
return False
return True
def find_position(image_size, left_point, right_point):
'''
Finds the position of the car relative to the center
'''
middle_of_image = image_size/2
center_of_road = (left_point + right_point)/2
# Define conversions in x and y from pixels space to meters
return (middle_of_image - center_of_road)*xm_per_pix
| {
"content_hash": "b857a2d80487f1b7eadc0233622977c5",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 205,
"avg_line_length": 44.009302325581395,
"alnum_prop": 0.6261889663918834,
"repo_name": "camigord/Self-Driving-Car-Nanodegree",
"id": "c0ad64a2f19daf44c9a5cbafaf9557fe3642cbb7",
"size": "18924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "P4-Advanced-Lane-Finding/utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1139723"
},
{
"name": "Jupyter Notebook",
"bytes": "16877239"
},
{
"name": "Python",
"bytes": "51094"
}
],
"symlink_target": ""
} |
import os.path
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from avatar.settings import AVATAR_DEFAULT_URL, AVATAR_MAX_AVATARS_PER_USER
from avatar.util import get_primary_avatar, User
from avatar.models import Avatar
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
def upload_helper(o, filename):
f = open(os.path.join(o.testdatapath, filename), "rb")
response = o.client.post(reverse('avatar_add'), {
'avatar': f,
}, follow=True)
f.close()
return response
class AvatarUploadTests(TestCase):
def setUp(self):
self.testdatapath = os.path.join(os.path.dirname(__file__), "testdata")
self.user = User.objects.create_user('test', 'lennon@thebeatles.com', 'testpassword')
self.user.save()
self.client.login(username='test', password='testpassword')
Image.init()
def testNonImageUpload(self):
response = upload_helper(self, "nonimagefile")
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testNormalImageUpload(self):
response = upload_helper(self, "test.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
self.failUnlessEqual(response.context['upload_avatar_form'].errors, {})
avatar = get_primary_avatar(self.user)
self.failIfEqual(avatar, None)
def testImageWithoutExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithoutext")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageWithWrongExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithwrongext.ogg")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageTooBig(self):
# use with AVATAR_MAX_SIZE = 1024 * 1024
response = upload_helper(self, "testbig.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testDefaultUrl(self):
response = self.client.get(reverse('avatar_render_primary', kwargs={
'user': self.user.username,
'size': 80,
}))
loc = response['Location']
base_url = getattr(settings, 'STATIC_URL', None)
if not base_url:
base_url = settings.MEDIA_URL
self.assertTrue(base_url in loc)
self.assertTrue(loc.endswith(AVATAR_DEFAULT_URL))
def testNonExistingUser(self):
a = get_primary_avatar("nonexistinguser")
self.failUnlessEqual(a, None)
def testThereCanBeOnlyOnePrimaryAvatar(self):
for i in range(1, 10):
self.testNormalImageUpload()
count = Avatar.objects.filter(user=self.user, primary=True).count()
self.failUnlessEqual(count, 1)
def testDeleteAvatar(self):
self.testNormalImageUpload()
avatar = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(len(avatar), 1)
response = self.client.post(reverse('avatar_delete'), {
'choices': [avatar[0].id],
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
count = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(count, 0)
def testDeletePrimaryAvatarAndNewPrimary(self):
self.testThereCanBeOnlyOnePrimaryAvatar()
primary = get_primary_avatar(self.user)
oid = primary.id
response = self.client.post(reverse('avatar_delete'), {
'choices': [oid],
})
primaries = Avatar.objects.filter(user=self.user, primary=True)
self.failUnlessEqual(len(primaries), 1)
self.failIfEqual(oid, primaries[0].id)
avatars = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(avatars[0].id, primaries[0].id)
def testTooManyAvatars(self):
for i in range(0, AVATAR_MAX_AVATARS_PER_USER):
self.testNormalImageUpload()
count_before = Avatar.objects.filter(user=self.user).count()
response = upload_helper(self, "test.png")
count_after = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
self.failUnlessEqual(count_before, count_after)
# def testAvatarOrder
# def testReplaceAvatarWhenMaxIsOne
# def testHashFileName
# def testHashUserName
# def testChangePrimaryAvatar
# def testDeleteThumbnailAndRecreation
# def testAutomaticThumbnailCreation
| {
"content_hash": "3525a377da065740fbacf5be01bfa30e",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 98,
"avg_line_length": 41.53383458646616,
"alnum_prop": 0.6591238233164374,
"repo_name": "marcfro/django-avatar",
"id": "6d233d86bdd1653f885e7cd362d90bab36d4e887",
"size": "5524",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "avatar/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
def set_null_to_blank(queryset, fields):
for element in queryset:
for field in fields:
value = getattr(element, field)
if value is None:
setattr(element, field, '')
element.save()
def run_data_migration(apps, schema_editor):
OptionSet = apps.get_model('options', 'OptionSet')
Option = apps.get_model('options', 'Option')
set_null_to_blank(OptionSet.objects.all(), [
'uri',
'uri_prefix',
'key',
'comment',
])
set_null_to_blank(Option.objects.all(), [
'uri',
'uri_prefix',
'key',
'path',
'comment',
'text_lang1',
'text_lang2',
'text_lang3',
'text_lang4',
'text_lang5',
])
class Migration(migrations.Migration):
dependencies = [
('options', '0017_add_language_fields'),
]
operations = [
migrations.RunPython(run_data_migration),
]
| {
"content_hash": "ad2a4de9b94c2e0561714edc9358af3f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 54,
"avg_line_length": 22.565217391304348,
"alnum_prop": 0.5491329479768786,
"repo_name": "rdmorganiser/rdmo",
"id": "38292c851d5d77dd641c39f4402c8e56ccf5647f",
"size": "1112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/options/migrations/0018_data_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
} |
"""Implement the inner handling for tr-98/181 ManagementServer."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import copy
import datetime
import math
import random
import re
import socket
import time
import urlparse
import google3
import tornado.ioloop
import cwmpbool
import cwmpdate
# Allow unit tests to override with a mock
PERIODIC_CALLBACK = tornado.ioloop.PeriodicCallback
class DefaultSetAcsUrl(object):
def SetAcsUrl(self, url):
return False
class ServerParameters(object):
"""Class to hold parameters of CpeManagementServer."""
def __init__(self):
self.CWMPRetryMinimumWaitInterval = 5
self.CWMPRetryIntervalMultiplier = 2000
# The default password is trivial. In the initial Inform exchange
# the ACS generally sets ConnectionRequest{Username,Password}
# to values which only it knows. If something goes wrong, we want
# the password to be well known so the ACS can wake us up and
# try again.
self.ConnectionRequestPassword = 'cwmp'
self.ConnectionRequestUsername = 'catawampus'
self.DefaultActiveNotificationThrottle = 0
self.EnableCWMP = True
self._PeriodicInformEnable = True
# Once every 15 minutes plus or minus one minute (3 minute spread)
self._PeriodicInformInterval = (15 * 60) + random.randint(-60, 60)
self._PeriodicInformTime = 0
self.Password = ''
self.Username = ''
class CpeManagementServer(object):
"""Inner class implementing tr-98 & 181 ManagementServer."""
def __init__(self, platform_config, port, ping_path,
acs_url=None, get_parameter_key=None,
start_periodic_session=None, ioloop=None,
restrict_acs_hosts=None):
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self.restrict_acs_hosts = restrict_acs_hosts
self.ValidateAcsUrl(acs_url)
if platform_config:
self.ValidateAcsUrl(platform_config.GetAcsUrl())
self.acs_url = acs_url
self.platform_config = platform_config
self.port = port
self.ping_path = ping_path
self.get_parameter_key = get_parameter_key
self.start_periodic_session = start_periodic_session
self.my_ip = None
self._periodic_callback = None
self._start_periodic_timeout = None
self.config_copy = None
self.config = ServerParameters()
self.ConfigurePeriodicInform()
def StartTransaction(self):
if self.config_copy is None:
self.config_copy = copy.deepcopy(self.config)
def CommitTransaction(self):
self.config_copy = None
def AbandonTransaction(self):
self.config = self.config_copy
self.config_copy = None
self.ConfigurePeriodicInform()
def ValidateAcsUrl(self, value):
"""Checks if the URL passed is acceptable. If not raises an exception."""
if not self.restrict_acs_hosts or not value:
return
# Require https for the url scheme.
split_url = urlparse.urlsplit(value)
if split_url.scheme != 'https':
raise ValueError('The ACS Host must be https: %s' % str(value))
# Iterate over the restrict domain name list and see if one of
# the restricted domain names matches the supplied url host name.
restrict_hosts = re.split(r'[\s,]+', self.restrict_acs_hosts)
for host in restrict_hosts:
# Check the full hostname.
if split_url.hostname == host:
return
# Check against the restrict host of form '.foo.com'
if not host.startswith('.'):
dotted_host = '.' + host
else:
dotted_host = host
if split_url.hostname.endswith(dotted_host):
return
# If we don't find a valid host, raise an exception.
raise ValueError('The ACS Host is not permissible: %s' % str(value))
@property
def CWMPRetryMinimumWaitInterval(self):
return self.config.CWMPRetryMinimumWaitInterval
@CWMPRetryMinimumWaitInterval.setter
def CWMPRetryMinimumWaitInterval(self, value):
self.config.CWMPRetryMinimumWaitInterval = int(value)
@property
def CWMPRetryIntervalMultiplier(self):
return self.config.CWMPRetryIntervalMultiplier
@CWMPRetryIntervalMultiplier.setter
def CWMPRetryIntervalMultiplier(self, value):
self.config.CWMPRetryIntervalMultiplier = int(value)
@property
def ConnectionRequestPassword(self):
return self.config.ConnectionRequestPassword
@ConnectionRequestPassword.setter
def ConnectionRequestPassword(self, value):
self.config.ConnectionRequestPassword = value
@property
def ConnectionRequestUsername(self):
return self.config.ConnectionRequestUsername
@ConnectionRequestUsername.setter
def ConnectionRequestUsername(self, value):
self.config.ConnectionRequestUsername = value
@property
def DefaultActiveNotificationThrottle(self):
return self.config.DefaultActiveNotificationThrottle
@DefaultActiveNotificationThrottle.setter
def DefaultActiveNotificationThrottle(self, value):
self.config.DefaultActiveNotificationThrottle = int(value)
@property
def EnableCWMP(self):
return True
@property
def Password(self):
return self.config.Password
@Password.setter
def Password(self, value):
self.config.Password = value
@property
def Username(self):
return self.config.Username
@Username.setter
def Username(self, value):
self.config.Username = value
def GetURL(self):
return self.acs_url or self.platform_config.GetAcsUrl()
def SetURL(self, value):
self.ValidateAcsUrl(value)
if self.acs_url:
self.acs_url = value
else:
self.platform_config.SetAcsUrl(value)
URL = property(GetURL, SetURL, None, 'tr-98/181 ManagementServer.URL')
def _isIp6Address(self, ip):
# pylint: disable-msg=W0702
try:
socket.inet_pton(socket.AF_INET6, ip)
except:
return False
return True
def _formatIP(self, ip):
if self._isIp6Address(ip):
return '[' + ip + ']'
else:
return ip
def GetConnectionRequestURL(self):
if self.my_ip and self.port and self.ping_path:
path = self.ping_path if self.ping_path[0] != '/' else self.ping_path[1:]
ip = self._formatIP(self.my_ip)
return 'http://{0}:{1!s}/{2}'.format(ip, self.port, path)
else:
return ''
ConnectionRequestURL = property(
GetConnectionRequestURL, None, None,
'tr-98/181 ManagementServer.ConnectionRequestURL')
def GetParameterKey(self):
if self.get_parameter_key is not None:
return self.get_parameter_key()
else:
return ''
ParameterKey = property(GetParameterKey, None, None,
'tr-98/181 ManagementServer.ParameterKey')
def GetPeriodicInformEnable(self):
return self.config._PeriodicInformEnable
def SetPeriodicInformEnable(self, value):
self.config._PeriodicInformEnable = cwmpbool.parse(value)
self.ConfigurePeriodicInform()
PeriodicInformEnable = property(
GetPeriodicInformEnable, SetPeriodicInformEnable, None,
'tr-98/181 ManagementServer.PeriodicInformEnable')
def GetPeriodicInformInterval(self):
return self.config._PeriodicInformInterval
def SetPeriodicInformInterval(self, value):
self.config._PeriodicInformInterval = int(value)
self.ConfigurePeriodicInform()
PeriodicInformInterval = property(
GetPeriodicInformInterval, SetPeriodicInformInterval, None,
'tr-98/181 ManagementServer.PeriodicInformInterval')
def GetPeriodicInformTime(self):
return self.config._PeriodicInformTime
def SetPeriodicInformTime(self, value):
self.config._PeriodicInformTime = value
self.ConfigurePeriodicInform()
PeriodicInformTime = property(
GetPeriodicInformTime, SetPeriodicInformTime, None,
'tr-98/181 ManagementServer.PeriodicInformTime')
def ConfigurePeriodicInform(self):
"""Commit changes to PeriodicInform parameters."""
if self._periodic_callback:
self._periodic_callback.stop()
self._periodic_callback = None
if self._start_periodic_timeout:
self.ioloop.remove_timeout(self._start_periodic_timeout)
self._start_periodic_timeout = None
# Delete the old periodic callback.
if self._periodic_callback:
self._periodic_callback.stop()
self._periodic_callback = None
if (self.config._PeriodicInformEnable and
self.config._PeriodicInformInterval > 0):
msec = self.config._PeriodicInformInterval * 1000
self._periodic_callback = PERIODIC_CALLBACK(self.start_periodic_session,
msec, self.ioloop)
if self.config._PeriodicInformTime:
# PeriodicInformTime is just meant as an offset, not an actual time.
# So if it's 25.5 hours in the future and the interval is 1 hour, then
# the interesting part is the 0.5 hours, not the 25.
#
# timetuple might be in the past, but that's okay; the modulus
# makes sure it's never negative. (ie. (-3 % 5) == 2, in python)
timetuple = cwmpdate.parse(self.config._PeriodicInformTime).timetuple()
offset = ((time.mktime(timetuple) - time.time())
% float(self.config._PeriodicInformInterval))
else:
offset = 0.0
self._start_periodic_timeout = self.ioloop.add_timeout(
datetime.timedelta(seconds=offset), self.StartPeriodicInform)
def StartPeriodicInform(self):
self._periodic_callback.start()
def SessionRetryWait(self, retry_count):
"""Calculate wait time before next session retry.
See $SPEC3 section 3.2.1 for a description of the algorithm.
Args:
retry_count: integer number of retries attempted so far.
Returns:
Number of seconds to wait before initiating next session.
"""
if retry_count == 0:
return 0
periodic_interval = self.config._PeriodicInformInterval
if self.config._PeriodicInformInterval <= 0:
periodic_interval = 30
c = 10 if retry_count >= 10 else retry_count
m = float(self.config.CWMPRetryMinimumWaitInterval)
k = float(self.config.CWMPRetryIntervalMultiplier) / 1000.0
start = m * math.pow(k, c-1)
stop = start * k
# pin start/stop to have a maximum value of PerdiodInfomInterval
start = int(min(start, periodic_interval/k))
stop = int(min(stop, periodic_interval))
return random.randrange(start, stop)
def main():
pass
if __name__ == '__main__':
main()
| {
"content_hash": "658f239be4b3992db587d11708452750",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 79,
"avg_line_length": 31.962848297213622,
"alnum_prop": 0.7026346377373112,
"repo_name": "fengyuanjs/catawampus",
"id": "f4de3584769b2487c71b36709196866f8afd8490",
"size": "11060",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tr/cpe_management_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3855"
},
{
"name": "Python",
"bytes": "664568"
},
{
"name": "Shell",
"bytes": "85447"
}
],
"symlink_target": ""
} |
from platform import machine
from eucaops import Eucaops
import re
import string
from eutester.euinstance import EuInstance
from eutester.eutestcase import EutesterTestCase
from eutester.sshconnection import CommandTimeoutException
class LVMSnapshotUtility(EutesterTestCase):
def __init__(self,extra_args = None):
self.setuptestcase()
self.setup_parser()
if extra_args:
for arg in extra_args:
self.parser.add_argument(arg)
self.parser.add_argument("--name", default="post_config")
self.get_args()
# Setup basic eutester object
self.tester = Eucaops( config_file=self.args.config, password=self.args.password, download_creds=False)
def clean_method(self):
pass
def CreateLVMSnapshot(self):
'''
Create LVM Snapshot
'''
machines = self.tester.get_component_machines()
for machine in machines:
if machine.distro.name is "vmware":
continue
machine.sys("lvcreate -l 100%origin -s -n " + self.args.name + " `blkid -L rootfs`", code=0)
def RestoreLVMSnapshot(self):
'''
Restore LVM Snapshot
'''
machines = self.tester.get_component_machines()
check_file = "/root/merge-executed"
for machine in machines:
if machine.distro.name is "vmware":
continue
logical_volume = "/dev/vg01/" + self.args.name
machine.sys("e2label " + logical_volume + " rootfs")
machine.sys("touch " + check_file)
machine.sys("lvconvert --merge " + logical_volume, code=0)
try:
machine.sys("reboot -f", timeout=2)
except CommandTimeoutException:
pass
self.tester.sleep(30)
for machine in machines:
self.tester.ping(machine.hostname, poll_count=120)
for machine in machines:
def ssh_refresh():
try:
machine.refresh_ssh()
return True
except:
return False
self.tester.wait_for_result(ssh_refresh, True, timeout=120)
machine.sys('ls ' + check_file, code=2)
def lv_gone():
try:
machine.sys("lvdisplay " + logical_volume, code=5)
return True
except:
return False
self.tester.wait_for_result(lv_gone, True, timeout=240)
machine.sys("lvcreate -l 100%origin -s -n " + logical_volume + " `blkid -L rootfs`", code=0)
def get_safe_uptime(self, machine):
uptime = None
try:
uptime = machine.get_uptime()
except: pass
return uptime
if __name__ == "__main__":
testcase = LVMSnapshotUtility()
### Either use the list of tests passed from config/command line to determine what subset of tests to run
list = testcase.args.tests or ["CreateLVMSnapshot"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result) | {
"content_hash": "76f85e36533738bf7fbdcbc33ef76365",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 111,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.5782738095238096,
"repo_name": "nagyistoce/eutester",
"id": "33e9d02e7dd1be8dad492c5bcd3c443363d7a33e",
"size": "4837",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "testcases/cloud_admin/lvm_snapshot_utility.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Erlang",
"bytes": "15990"
},
{
"name": "Groovy",
"bytes": "418692"
},
{
"name": "HTML",
"bytes": "1792"
},
{
"name": "Java",
"bytes": "967088"
},
{
"name": "Python",
"bytes": "2543284"
},
{
"name": "RobotFramework",
"bytes": "4827"
},
{
"name": "Shell",
"bytes": "3066"
}
],
"symlink_target": ""
} |
import os
from os import path as op
import shutil
import warnings
import numpy as np
from nose.tools import assert_raises, assert_true, assert_false
from numpy.testing import assert_allclose, assert_array_equal, assert_equal
from mne import pick_types
# from mne.tests.common import assert_dig_allclose
from mne.transforms import apply_trans
from mne.io import read_raw_fif, read_raw_ctf
from mne.io.tests.test_raw import _test_raw_reader
from mne.utils import _TempDir, run_tests_if_main, slow_test
from mne.datasets import testing, spm_face
from mne.io.constants import FIFF
ctf_dir = op.join(testing.data_path(download=False), 'CTF')
ctf_fname_continuous = 'testdata_ctf.ds'
ctf_fname_1_trial = 'testdata_ctf_short.ds'
ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds'
ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds'
ctf_fname_somato = 'somMDYO-18av.ds'
ctf_fname_catch = 'catch-alp-good-f.ds'
block_sizes = {
ctf_fname_continuous: 12000,
ctf_fname_1_trial: 4801,
ctf_fname_2_trials: 12000,
ctf_fname_discont: 1201,
ctf_fname_somato: 313,
ctf_fname_catch: 2500,
}
single_trials = (
ctf_fname_continuous,
ctf_fname_1_trial,
)
ctf_fnames = tuple(sorted(block_sizes.keys()))
@slow_test
@testing.requires_testing_data
def test_read_ctf():
"""Test CTF reader"""
temp_dir = _TempDir()
out_fname = op.join(temp_dir, 'test_py_raw.fif')
# Create a dummy .eeg file so we can test our reading/application of it
os.mkdir(op.join(temp_dir, 'randpos'))
ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch)
shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname)
with warnings.catch_warnings(record=True) as w: # reclassified ch
raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname)
assert_true(all('MISC channel' in str(ww.message) for ww in w))
picks = pick_types(raw.info, meg=False, eeg=True)
pos = np.random.RandomState(42).randn(len(picks), 3)
fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg')
# Create a bad file
with open(fake_eeg_fname, 'wb') as fid:
fid.write('foo\n'.encode('ascii'))
assert_raises(RuntimeError, read_raw_ctf, ctf_eeg_fname)
# Create a good file
with open(fake_eeg_fname, 'wb') as fid:
for ii, ch_num in enumerate(picks):
args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple(
'%0.5f' % x for x in 100 * pos[ii]) # convert to cm
fid.write(('\t'.join(args) + '\n').encode('ascii'))
pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
with warnings.catch_warnings(record=True) as w: # reclassified channel
raw = read_raw_ctf(ctf_eeg_fname) # read modified data
assert_true(all('MISC channel' in str(ww.message) for ww in w))
pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read,
rtol=1e-5, atol=1e-5)
assert_true((pos_read == pos_read_old).mean() < 0.1)
shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'),
op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif'))
# Create a version with no hc, starting out *with* EEG pos (error)
os.mkdir(op.join(temp_dir, 'nohc'))
ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch)
shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname)
remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3]))
os.remove(remove_base + '.hc')
with warnings.catch_warnings(record=True): # no coord tr
assert_raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname)
os.remove(remove_base + '.eeg')
shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'),
op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif'))
# All our files
use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames]
for fname in use_fnames:
raw_c = read_raw_fif(fname + '_raw.fif', preload=True)
with warnings.catch_warnings(record=True) as w: # reclassified ch
raw = read_raw_ctf(fname)
assert_true(all('MISC channel' in str(ww.message) for ww in w))
# check info match
assert_array_equal(raw.ch_names, raw_c.ch_names)
assert_allclose(raw.times, raw_c.times)
assert_allclose(raw._cals, raw_c._cals)
for key in ('version', 'usecs'):
assert_equal(raw.info['meas_id'][key], raw_c.info['meas_id'][key])
py_time = raw.info['meas_id']['secs']
c_time = raw_c.info['meas_id']['secs']
max_offset = 24 * 60 * 60 # probably overkill but covers timezone
assert_true(c_time - max_offset <= py_time <= c_time)
for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'],
rtol=1e-4, atol=1e-7)
for key in ('acq_pars', 'acq_stim', 'bads',
'ch_names', 'custom_ref_applied', 'description',
'events', 'experimenter', 'highpass', 'line_freq',
'lowpass', 'nchan', 'proj_id', 'proj_name',
'projs', 'sfreq', 'subject_info'):
assert_equal(raw.info[key], raw_c.info[key], key)
if op.basename(fname) not in single_trials:
# We don't force buffer size to be smaller like MNE-C
assert_equal(raw.info['buffer_size_sec'],
raw_c.info['buffer_size_sec'])
assert_equal(len(raw.info['comps']), len(raw_c.info['comps']))
for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']):
for key in ('colcals', 'rowcals'):
assert_allclose(c1[key], c2[key])
assert_equal(c1['save_calibrated'], c2['save_calibrated'])
for key in ('row_names', 'col_names', 'nrow', 'ncol'):
assert_array_equal(c1['data'][key], c2['data'][key])
assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7,
rtol=1e-5)
assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'],
raw_c.info['hpi_results'][0]['coord_trans']['trans'],
rtol=1e-5, atol=1e-7)
assert_equal(len(raw.info['chs']), len(raw_c.info['chs']))
for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])):
for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul',
'range', 'coord_frame', 'coil_type', 'logno'):
if c1['ch_name'] == 'RMSP' and \
'catch-alp-good-f' in fname and \
key in ('kind', 'unit', 'coord_frame', 'coil_type',
'logno'):
continue # XXX see below...
assert_equal(c1[key], c2[key], err_msg=key)
for key in ('cal',):
assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
# XXX 2016/02/24: fixed bug with normal computation that used
# to exist, once mne-C tools are updated we should update our FIF
# conversion files, then the slices can go away (and the check
# can be combined with that for "cal")
for key in ('loc',):
if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname:
continue
assert_allclose(c1[key][:3], c2[key][:3], atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
assert_allclose(c1[key][9:12], c2[key][9:12], atol=1e-6,
rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file
raw.info['dig'] = raw.info['dig'][:-10]
# XXX: Next test would fail because c-tools assign the fiducials from
# CTF data as HPI. Should eventually clarify/unify with Matti.
# assert_dig_allclose(raw.info, raw_c.info)
# check data match
raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.)
raw_read = read_raw_fif(out_fname)
# so let's check tricky cases based on sample boundaries
rng = np.random.RandomState(0)
pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10]
bnd = int(round(raw.info['sfreq'] * raw.info['buffer_size_sec']))
assert_equal(bnd, raw._raw_extras[0]['block_size'])
assert_equal(bnd, block_sizes[op.basename(fname)])
slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
slice(3, 300), slice(None))
if len(raw.times) >= 2 * bnd: # at least two complete blocks
slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
slice(0, bnd + 100))
for sl_time in slices:
assert_allclose(raw[pick_ch, sl_time][0],
raw_c[pick_ch, sl_time][0])
assert_allclose(raw_read[pick_ch, sl_time][0],
raw_c[pick_ch, sl_time][0])
# all data / preload
with warnings.catch_warnings(record=True) as w: # reclassified ch
raw = read_raw_ctf(fname, preload=True)
assert_true(all('MISC channel' in str(ww.message) for ww in w))
assert_allclose(raw[:][0], raw_c[:][0])
raw.plot(show=False) # Test plotting with ref_meg channels.
assert_raises(ValueError, raw.plot, order='selection')
assert_raises(TypeError, read_raw_ctf, 1)
assert_raises(ValueError, read_raw_ctf, ctf_fname_continuous + 'foo.ds')
# test ignoring of system clock
read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore')
assert_raises(ValueError, read_raw_ctf,
op.join(ctf_dir, ctf_fname_continuous), 'foo')
@spm_face.requires_spm_data
def test_read_spm_ctf():
"""Test CTF reader with omitted samples."""
data_path = spm_face.data_path()
raw_fname = op.join(data_path, 'MEG', 'spm',
'SPM_CTF_MEG_example_faces1_3D.ds')
raw = read_raw_ctf(raw_fname)
extras = raw._raw_extras[0]
assert_equal(extras['n_samp'], raw.n_times)
assert_false(extras['n_samp'] == extras['n_samp_tot'])
# Test that LPA, nasion and RPA are correct.
coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']])
assert_true(np.all(coord_frames == FIFF.FIFFV_COORD_HEAD))
cardinals = {d['ident']: d['r'] for d in raw.info['dig']}
assert_true(cardinals[1][0] < cardinals[2][0] < cardinals[3][0]) # x coord
assert_true(cardinals[1][1] < cardinals[2][1]) # y coord
assert_true(cardinals[3][1] < cardinals[2][1]) # y coord
for key in cardinals.keys():
assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord
run_tests_if_main()
| {
"content_hash": "3b8e76c25c96c87057ec44fe8b1268d9",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 48.92,
"alnum_prop": 0.5829017897701463,
"repo_name": "nicproulx/mne-python",
"id": "dbfa013f6ebd076cbd79297c8c419cf24c5fddf9",
"size": "11085",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "mne/io/ctf/tests/test_ctf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3723"
},
{
"name": "Python",
"bytes": "5866703"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
idx = 0
while n > 0:
# read file to buf4
buf4 = ['']*4
l = read4(buf4)
# if no more char in file, return
if not l:
return idx
# write buf4 into buf directly
for i in range(min(l, n)):
buf[idx] = buf4[i]
idx += 1
n -= 1
return idx
| {
"content_hash": "79d6578284e2626c5aa13b5a55c6da94",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 59,
"avg_line_length": 29.136363636363637,
"alnum_prop": 0.43993759750390016,
"repo_name": "rx2130/Leetcode",
"id": "8ac6e61eb3f6ed74cba9c989676f40a47a97628c",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/157 Read N Characters Given Read4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "277012"
}
],
"symlink_target": ""
} |
class PriorityQueue():
def __init__(self, maxHeap=True):
self.heap = []
self.heapsize = 1
self.heap[0] = maxheap
# move the better of left or right into empty
def better_down(empty, left, right):
if right > len(self.heap):
return
if left > len(self.heap):
return
if self.heap[right].priority > self.heap[left].priority:
self.heap[empty] = self.heap[right]
better_down(right, right*2, right*2+1)
else:
self.heap[empty] = self.heap[left]
better_down(left, left*2, left*2+1)
def better_up(parent, left, right):
if parent == 0:
return
if right <= len(self.heap) and \
self.heap[right].priority > self.heap[parent].priority:
swap = self.heap[parent]
self.heap[parent] = self.heap[right]
self.heap[right] = swap
better_up(int(parent/2), parent-1, parent)
elif left <= len(self.heap) and \
self.heap[left].priority > self.heap[parent].priority:
swap = self.heap[parent]
self.heap[parent] = self.heap[left]
self.heap[left] = swap
better_up(int(parent/2), parent, parent+1)
def insert(self, item):
self.heap.append(item)
heap_len = len(self.heap)
if (heap_len % 2) == 0:
better_up(int(heap_len/2), heap_len-1, heap_len)
else:
better_up(int(heap_len/2), heap_len, heap_len+1)
def pop(self):
if len(self.heap) <= 1:
return None
val = self.heap[1]
better_down(1, 2, 3)
# should snip the
self.heap.remove()
return val
def peek(self):
if heapsize <= 1:
return None
return self.heap[1]
| {
"content_hash": "9174b89df649570c99d33924f627aa14",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 71,
"avg_line_length": 32.29824561403509,
"alnum_prop": 0.5258011950027159,
"repo_name": "CharlesGust/data_structures",
"id": "3730ba23f9cba34dae7bbc3bfc298f200da434e4",
"size": "1841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "priorityq/priority_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76735"
}
],
"symlink_target": ""
} |
"""Tokenizer that uses a Hub module."""
from tensorflow.python.eager import monitoring
from tensorflow_text.python.ops import hub_module_splitter
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
_tf_text_hub_module_tokenizer_create_counter = monitoring.Counter(
'/nlx/api/python/hub_module_tokenizer_create_counter',
'Counter for number of HubModuleTokenizers created in Python.')
class HubModuleTokenizer(TokenizerWithOffsets):
r"""Tokenizer that uses a Hub module.
This class is just a wrapper around an internal HubModuleSplitter. It offers
the same functionality, but with 'token'-based method names: e.g., one can use
tokenize() instead of the more general and less informatively named split().
Example:
>>> HUB_MODULE = "https://tfhub.dev/google/zh_segmentation/1"
>>> segmenter = HubModuleTokenizer(HUB_MODULE)
>>> segmenter.tokenize(["新华社北京"])
<tf.RaggedTensor [[b'\xe6\x96\xb0\xe5\x8d\x8e\xe7\xa4\xbe',
b'\xe5\x8c\x97\xe4\xba\xac']]>
You can also use this tokenizer to return the split strings and their offsets:
>>> HUB_MODULE = "https://tfhub.dev/google/zh_segmentation/1"
>>> segmenter = HubModuleTokenizer(HUB_MODULE)
>>> pieces, starts, ends = segmenter.tokenize_with_offsets(["新华社北京"])
>>> print("pieces: %s starts: %s ends: %s" % (pieces, starts, ends))
pieces: <tf.RaggedTensor [[b'\xe6\x96\xb0\xe5\x8d\x8e\xe7\xa4\xbe',
b'\xe5\x8c\x97\xe4\xba\xac']]>
starts: <tf.RaggedTensor [[0, 9]]>
ends: <tf.RaggedTensor [[9, 15]]>
"""
def __init__(self, hub_module_handle):
"""Initializes a new HubModuleTokenizer instance.
Args:
hub_module_handle: A string handle accepted by hub.load(). Supported
cases include (1) a local path to a directory containing a module, and
(2) a handle to a module uploaded to e.g., https://tfhub.dev
"""
super(HubModuleTokenizer, self).__init__()
self._splitter = hub_module_splitter.HubModuleSplitter(hub_module_handle)
def tokenize_with_offsets(self, input_strs):
"""Tokenizes a tensor of UTF-8 strings into words with [start,end) offsets.
Args:
input_strs: An N-dimensional `Tensor` or `RaggedTensor` of UTF-8 strings.
Returns:
A tuple `(tokens, start_offsets, end_offsets)` where:
* `tokens` is a `RaggedTensor` of strings where `tokens[i1...iN, j]` is
the string content of the `j-th` token in `input_strs[i1...iN]`
* `start_offsets` is a `RaggedTensor` of int64s where
`start_offsets[i1...iN, j]` is the byte offset for the start of the
`j-th` token in `input_strs[i1...iN]`.
* `end_offsets` is a `RaggedTensor` of int64s where
`end_offsets[i1...iN, j]` is the byte offset immediately after the
end of the `j-th` token in `input_strs[i...iN]`.
"""
return self._splitter.split_with_offsets(input_strs)
def tokenize(self, input_strs):
"""Tokenizes a tensor of UTF-8 strings into words.
Args:
input_strs: An N-dimensional `Tensor` or `RaggedTensor` of UTF-8 strings.
Returns:
A `RaggedTensor` of segmented text. The returned shape is the shape of the
input tensor with an added ragged dimension for tokens of each string.
"""
return self._splitter.split(input_strs)
| {
"content_hash": "a63c11adb82e851bb0169751f3f32b41",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 80,
"avg_line_length": 41.7125,
"alnum_prop": 0.6745579862151633,
"repo_name": "nwjs/chromium.src",
"id": "17dc98303e5f74a809f39036b1db23b75a430d7b",
"size": "3953",
"binary": false,
"copies": "9",
"ref": "refs/heads/nw70",
"path": "third_party/tensorflow-text/src/tensorflow_text/python/ops/hub_module_tokenizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from pyshark import LiveCapture
class RemoteCapture(LiveCapture):
"""A capture which is performed on a remote machine which has an rpcapd service running."""
def __init__(self, remote_host, remote_interface, remote_port=2002, bpf_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', decode_as=None,
disable_protocol=None,tshark_path=None, override_prefs=None, eventloop=None, debug=False):
"""
Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff()
method to get packets.
Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic
is unencrypted!
:param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd.
:param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is
not the device display name but the true interface name (i.e. \\Device\\NPF_..).
:param remote_port: The remote port the rpcapd service is listening on
:param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
"""
interface = 'rpcap://%s:%d/%s' % (remote_host, remote_port, remote_interface)
super(RemoteCapture, self).__init__(interface, bpf_filter=bpf_filter, only_summaries=only_summaries,
decryption_key=decryption_key, encryption_type=encryption_type,
tshark_path=tshark_path, decode_as=decode_as, disable_protocol=disable_protocol,
override_prefs=override_prefs, eventloop=eventloop,
debug=debug)
| {
"content_hash": "d5f21b6522d8552541944aba37644a8a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 125,
"avg_line_length": 75.27027027027027,
"alnum_prop": 0.6664272890484739,
"repo_name": "KimiNewt/pyshark",
"id": "eeb390441c40bd3f4084f3d58dc2d0216e54b651",
"size": "2785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyshark/capture/remote_capture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127807"
}
],
"symlink_target": ""
} |
import time
from ethereum.utils import sha3
import rlp
from rlp.utils import encode_hex
from ethereum import processblock
from synchronizer import Synchronizer
from ethereum.slogging import get_logger
from ethereum.chain import Chain
from ethereum.blocks import Block, VerificationFailed
from ethereum.transactions import Transaction
from devp2p.service import WiredService
import eth_protocol
import gevent
import gevent.lock
from gevent.queue import Queue
log = get_logger('eth.chainservice')
# patch to get context switches between tx replay
processblock_apply_transaction = processblock.apply_transaction
def apply_transaction(block, tx):
# import traceback
# print traceback.print_stack()
log.debug('apply_transaction ctx switch', at=time.time())
gevent.sleep(0.001)
return processblock_apply_transaction(block, tx)
processblock.apply_transaction = apply_transaction
rlp_hash_hex = lambda data: encode_hex(sha3(rlp.encode(data)))
class DuplicatesFilter(object):
def __init__(self, max_items=128):
self.max_items = max_items
self.filter = list()
def known(self, data):
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return False
else:
self.filter.append(self.filter.pop(0))
return True
class ChainService(WiredService):
"""
Manages the chain and requests to it.
"""
# required by BaseService
name = 'chain'
default_config = dict(eth=dict(network_id=0))
# required by WiredService
wire_protocol = eth_protocol.ETHProtocol # create for each peer
# initialized after configure:
chain = None
genesis = None
synchronizer = None
config = None
block_queue_size = 1024
transaction_queue_size = 1024
processed_gas = 0
processed_elapsed = 0
def __init__(self, app):
self.config = app.config
self.db = app.services.db
assert self.db is not None
super(ChainService, self).__init__(app)
log.info('initializing chain')
coinbase = app.services.accounts.coinbase
self.chain = Chain(self.db, new_head_cb=self._on_new_head, coinbase=coinbase)
log.info('chain at', number=self.chain.head.number)
self.synchronizer = Synchronizer(self, force_sync=None)
self.block_queue = Queue(maxsize=self.block_queue_size)
self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
self.add_blocks_lock = False
self.add_transaction_lock = gevent.lock.Semaphore()
self.broadcast_filter = DuplicatesFilter()
self.on_new_head_cbs = []
self.on_new_head_candidate_cbs = []
@property
def is_syncing(self):
return self.synchronizer.synctask is not None
def _on_new_head(self, block):
for cb in self.on_new_head_cbs:
cb(block)
self._on_new_head_candidate() # we implicitly have a new head_candidate
def _on_new_head_candidate(self):
for cb in self.on_new_head_candidate_cbs:
cb(self.chain.head_candidate)
def add_transaction(self, tx, origin=None):
assert isinstance(tx, Transaction)
log.debug('add_transaction', locked=self.add_transaction_lock.locked())
self.add_transaction_lock.acquire()
success = self.chain.add_transaction(tx)
self.add_transaction_lock.release()
if success:
self._on_new_head_candidate()
self.broadcast_transaction(tx, origin=origin) # asap
def add_block(self, t_block, proto):
"adds a block to the block_queue and spawns _add_block if not running"
self.block_queue.put((t_block, proto)) # blocks if full
if not self.add_blocks_lock:
self.add_blocks_lock = True # need to lock here (ctx switch is later)
gevent.spawn(self._add_blocks)
def add_mined_block(self, block):
log.debug('adding mined block', block=block)
assert block.check_pow()
if self.chain.add_block(block):
log.info('added', block=block, ts=time.time())
assert block == self.chain.head
self.broadcast_newblock(block, chain_difficulty=block.chain_difficulty())
def knows_block(self, block_hash):
"if block is in chain or in queue"
if block_hash in self.chain:
return True
# check if queued or processed
for i in range(len(self.block_queue.queue)):
if block_hash == self.block_queue.queue[i][0].header.hash:
return True
return False
def _add_blocks(self):
log.debug('add_blocks', qsize=self.block_queue.qsize(),
add_tx_lock=self.add_transaction_lock.locked())
assert self.add_blocks_lock is True
self.add_transaction_lock.acquire()
try:
while not self.block_queue.empty():
t_block, proto = self.block_queue.peek() # peek: knows_block while processing
if t_block.header.hash in self.chain:
log.warn('known block', block=t_block)
self.block_queue.get()
continue
if t_block.header.prevhash not in self.chain:
log.warn('missing parent', block=t_block)
self.block_queue.get()
continue
# FIXME, this is also done in validation and in synchronizer for new_blocks
if not t_block.header.check_pow():
log.warn('invalid pow', block=t_block, FIXME='ban node')
self.block_queue.get()
continue
try: # deserialize
st = time.time()
block = t_block.to_block(db=self.chain.db)
elapsed = time.time() - st
log.debug('deserialized', elapsed='%.4fs' % elapsed,
gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed))
except processblock.InvalidTransaction as e:
log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node')
self.block_queue.get()
continue
except VerificationFailed as e:
log.warn('verification failed', error=e, FIXME='ban node')
self.block_queue.get()
continue
if self.chain.add_block(block):
log.info('added', block=block, ts=time.time())
self.block_queue.get() # remove block from queue (we peeked only)
gevent.sleep(0.001)
finally:
self.add_blocks_lock = False
self.add_transaction_lock.release()
def gpsec(self, gas_spent=0, elapsed=0):
self.processed_gas += gas_spent
self.processed_elapsed += elapsed
return int(self.processed_gas / (0.001 + self.processed_elapsed))
def broadcast_newblock(self, block, chain_difficulty=None, origin=None):
if not chain_difficulty:
assert block.hash in self.chain
chain_difficulty = block.chain_difficulty()
assert isinstance(block, (eth_protocol.TransientBlock, Block))
if self.broadcast_filter.known(block.header.hash):
log.debug('already broadcasted block')
else:
log.debug('broadcasting newblock', origin=origin)
bcast = self.app.services.peermanager.broadcast
bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty),
exclude_peers=[origin.peer] if origin else [])
def broadcast_transaction(self, tx, origin=None):
assert isinstance(tx, Transaction)
if self.broadcast_filter.known(tx.hash):
log.debug('already broadcasted tx')
else:
log.debug('broadcasting tx', origin=origin)
bcast = self.app.services.peermanager.broadcast
bcast(eth_protocol.ETHProtocol, 'transactions', args=(tx,),
exclude_peers=[origin.peer] if origin else [])
# wire protocol receivers ###########
def on_wire_protocol_start(self, proto):
log.debug('on_wire_protocol_start', proto=proto)
assert isinstance(proto, self.wire_protocol)
# register callbacks
proto.receive_status_callbacks.append(self.on_receive_status)
proto.receive_transactions_callbacks.append(self.on_receive_transactions)
proto.receive_getblockhashes_callbacks.append(self.on_receive_getblockhashes)
proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
proto.receive_blocks_callbacks.append(self.on_receive_blocks)
proto.receive_newblock_callbacks.append(self.on_receive_newblock)
# send status
head = self.chain.head
proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash,
genesis_hash=self.chain.genesis.hash)
def on_wire_protocol_stop(self, proto):
assert isinstance(proto, self.wire_protocol)
log.debug('on_wire_protocol_stop', proto=proto)
def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash,
genesis_hash):
log.debug('status received', proto=proto, eth_version=eth_version)
assert eth_version == proto.version, (eth_version, proto.version)
if network_id != self.config['eth'].get('network_id', proto.network_id):
log.warn("invalid network id", remote_network_id=network_id,
expected_network_id=self.config['eth'].get('network_id', proto.network_id))
raise eth_protocol.ETHProtocolError('wrong network_id')
# check genesis
if genesis_hash != self.chain.genesis.hash:
log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex'))
raise eth_protocol.ETHProtocolError('wrong genesis block')
# request chain
self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty)
# send transactions
transactions = self.chain.get_transactions()
if transactions:
log.debug("sending transactions", remote_id=proto)
proto.send_transactions(*transactions)
# transactions
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
for tx in transactions:
self.add_transaction(tx, origin=proto)
# blockhashes ###########
def on_receive_getblockhashes(self, proto, child_block_hash, count):
log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash))
max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
found = []
if child_block_hash not in self.chain:
log.debug("unknown block")
proto.send_blockhashes(*[])
return
last = child_block_hash
while len(found) < max_hashes:
try:
last = rlp.decode_lazy(self.chain.db.get(last))[0][0]
except KeyError:
# this can happen if we started a chain download, which did not complete
# should not happen if the hash is part of the canonical chain
log.warn('KeyError in getblockhashes', hash=last)
break
if last:
found.append(last)
else:
break
log.debug("sending: found block_hashes", count=len(found))
proto.send_blockhashes(*found)
def on_receive_blockhashes(self, proto, blockhashes):
if blockhashes:
log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto,
first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1]))
else:
log.debug("recv 0 remote block hashes, signifying genesis block")
self.synchronizer.receive_blockhashes(proto, blockhashes)
# blocks ################
def on_receive_getblocks(self, proto, blockhashes):
log.debug("on_receive_getblocks", count=len(blockhashes))
found = []
for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
try:
found.append(self.chain.db.get(bh))
except KeyError:
log.debug("unknown block requested", block_hash=encode_hex(bh))
if found:
log.debug("found", count=len(found))
proto.send_blocks(*found)
def on_receive_blocks(self, proto, transient_blocks):
blk_number = max(x.header.number for x in transient_blocks) if transient_blocks else 0
log.debug("recv blocks", count=len(transient_blocks), remote_id=proto,
highest_number=blk_number)
if transient_blocks:
self.synchronizer.receive_blocks(proto, transient_blocks)
def on_receive_newblock(self, proto, block, chain_difficulty):
log.debug("recv newblock", block=block, remote_id=proto)
self.synchronizer.receive_newblock(proto, block, chain_difficulty)
| {
"content_hash": "b7571fe90a632110563b15fad3e9d290",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 98,
"avg_line_length": 41.11926605504587,
"alnum_prop": 0.6212256433139968,
"repo_name": "heikoheiko/pyethapp",
"id": "b0d5e977c9d8a2a077e5cf4d3ae61592d87c9317",
"size": "13503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyethapp/eth_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "125044"
}
],
"symlink_target": ""
} |
"""The standard python Queue.Queue class has the join() method for
blocking until all work is done on the queue (signaled by the
task_done() method). Unfortuntely, if you want to stop early
(e.g. due to a fatal error), there is no way to do this.
This is problematic with multi-threading programs.
To address this, we subclass from Queue.Queue and add an abort_request()
method. If called, this will set a flag on the queue. It will wake any
threads blocked on the queue and cause any queue calls to throw
a AbortRequested exception.
Unfortunately, we have to copy some code from the base class, as there's
no good way to include our subclass code in the same lock aquisition.
We also provide a WakeableQueueWorker class which subclasses threading.Thread
and provides all the control flow for workers and the master thread.
"""
import Queue as Q
import threading
from time import time as _time
import time
import sys
class AbortRequested(Exception):
"""Raised when a thread has called the abort_request() method
on the queue."""
pass
class Queue(Q.Queue):
"""Subclass of Queue.Queue with abort_request() functionality.
"""
def __init__(self, maxsize=0):
Q.Queue.__init__(self, maxsize=maxsize)
self.abort_requested = False
self.total_wait_time = 0.0
self.request_count = 0
self.wait_count = 0
def abort_request(self):
with self.all_tasks_done:
if not self.abort_requested:
self.abort_requested = True
self.all_tasks_done.notify_all()
self.not_empty.notify_all()
self.not_full.notify_all()
def task_done(self):
self.all_tasks_done.acquire()
try:
if self.abort_requested:
raise AbortRequested
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks and not self.abort_requested:
self.all_tasks_done.wait()
if self.abort_requested:
raise AbortRequested
finally:
self.all_tasks_done.release()
def put(self, item, block=True, timeout=None):
self.not_full.acquire()
try:
if self.abort_requested:
raise AbortRequested
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Q.Full
elif timeout is None:
while self._qsize() == self.maxsize and (not self.abort_requested):
self.not_full.wait()
if self.abort_requested:
raise AbortRequested
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize and (not self.abort_requested):
remaining = endtime - _time()
if remaining <= 0.0:
raise Q.Full
self.not_full.wait(remaining)
if self.abort_requested:
raise AbortRequested
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_multiple(self, items):
self.not_full.acquire()
try:
if self.abort_requested:
raise AbortRequested
self.queue.extend(items)
self.unfinished_tasks += len(items)
self.not_empty.notify()
finally:
self.not_full.release()
def get(self, block=True, timeout=None):
self.not_empty.acquire()
waited = False
try:
if self.abort_requested:
raise AbortRequested
if not block:
if not self._qsize():
raise Q.Empty
elif timeout is None:
while not self._qsize() and (not self.abort_requested):
wait_start = _time()
self.not_empty.wait()
self.total_wait_time += (_time() - wait_start)
waited = True
if self.abort_requested:
raise AbortRequested
if waited:
self.wait_count += 1
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize() and (not self.abort_requested):
waited = True
remaining = endtime - _time()
if remaining <= 0.0:
raise Q.Empty
self.not_empty.wait(remaining)
if self.abort_requested:
raise AbortRequested
if waited:
self.wait_count += 1
self.total_wait_time += max(float(timeout) - remaining, 0.0)
item = self._get()
self.request_count += 1
self.not_full.notify()
return item
finally:
self.not_empty.release()
class WorkerStatus(object):
NOT_STARTED = 0
RUNNING=1
STOPPED=2
ABORT_REQUESTED=3
ABORTED=4
INTERNAL_ERROR=5
class WakeableQueueWorker(threading.Thread):
"""A generic Worker thread class that functions with the
WakeableQueue. It processes batches of work and tells the
queue when done via the task_done() method. When a stop_msg
is received, it finishes up and stops. If an exception is
thrown, it catches the exception, does an abort_request()
on the queue, tidies up, and stops. If another thread does
an abort_request(), it tidies up and stops.
"""
def __init__(self, worker_id, queue, stop_msg, logger):
threading.Thread.__init__(self, name=worker_id)
self.worker_id = worker_id
self.queue = queue
self.stop_msg = stop_msg # if we get this message, we stop
self.logger = logger
self.status = WorkerStatus.NOT_STARTED
self.exc_info = None# if there's a fatal error put it here
def process_batch(self, data):
"""Process a batch from the queue"""
pass
def handle_abort_request(self):
"""Put any cleanup needed if we are aborting here"""
pass
def finish_processing(self):
"""Called before we stop, when all the work was processed successfully."""
pass
def run(self):
self.status = WorkerStatus.RUNNING
self.logger.info("Worker %s running" % self.worker_id)
try:
data = self.queue.get(block=True)
while True:
if data==self.stop_msg:
self.logger.info("Worker %s received stop message" % self.worker_id)
break
self.process_batch(data)
self.queue.task_done()
data = self.queue.get(block=True)
except AbortRequested:
self.logger.info("Worker %s got AbortRequested exception" % self.worker_id)
try:
self.handle_abort_request()
except Exception, e:
self.logger.exception("Worker %s: Got an exception in handle_abort_request(): %s(%s)" %
(self.worker_id, e.__class__.__name__, unicode(e)))
self.status = WorkerStatus.ABORTED
return
except Exception, e:
self.exc_info = sys.exc_info()
self.logger.exception("Got a fatal exception in worker thread %s: %s(%s)" %
(self.worker_id, e.__class__.__name__, unicode(e)))
try:
self.handle_abort_request()
except Exception, e:
self.logger.exception("Worker %s: Got an exception in handle_abort_request(): %s(%s)" %
(self.worker_id, e.__class__.__name__, unicode(e)))
self.status = WorkerStatus.ABORT_REQUESTED
self.queue.abort_request()
return
try:
self.finish_processing()
except Exception, e:
self.logger.exception("Worker %s: Got an exception in finish_processing(): %s(%s)" %
(self.worker_id, e.__class__.__name__, unicode(e)))
self.status = WorkerStatus.INTERNAL_ERROR
self.logger.exception("Worker %s set status to INTERNAL_ERROR" % self.worker_id)
self.queue.task_done() # the master thread should pick up the error status
raise
self.status = WorkerStatus.STOPPED
self.logger.info("worker %s: calling task_done() for the exit message" % self.worker_id)
self.queue.task_done() # let the master know we are done
self.logger.info("Worker %s shutting down normally " % self.worker_id)
@staticmethod
def run_workers(workers, queue, stop_msg, logger):
"""This is a function to be called from the master thread to start the
workers after the queue has been primed, wait for them to finish the
work, and then stop them. It also handles fatal errors as well.
It re-raises AbortRequested if an abort was requested.
"""
logger.info("Starting workers...")
for w in workers:
w.start()
try:
logger.info("Waiting for worker threads to complete...")
queue.join()
except AbortRequested:
logger.error("Got an abort request from a worker, waiting for others to shut down...")
while True:
aborted_cnt = 0
for w in workers:
if w.status != WorkerStatus.ABORTED and \
w.status != WorkerStatus.ABORT_REQUESTED:
logger.info("Worker %s has status %s" % (w.worker_id, w.status))
else:
aborted_cnt += 1
if aborted_cnt == len(workers):
break
else:
time.sleep(2.0)
logger.info("All workers now stopped from abort request")
raise
assert queue.empty(), "Workers stopped, but there was still %d messages in queue" %\
queue.qsize()
logger.info("All work is complete, sending stop messages...")
for w in workers:
queue.put(stop_msg)
logger.info("Waiting for workers to stop")
queue.join()
for w in workers:
assert w.status == WorkerStatus.STOPPED, \
"Worker %s not stopped, status was %s" % \
(w.worker_id, w.status)
logger.info("All workers stopped")
| {
"content_hash": "271d3184cf9be77e9df5b14ce6110eb9",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 104,
"avg_line_length": 39.207612456747405,
"alnum_prop": 0.5469067160886065,
"repo_name": "quaddra/engage-utils",
"id": "cd416571544e923d638a5a30060949ac9501518b",
"size": "11331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engage_utils/wakeable_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "295243"
}
],
"symlink_target": ""
} |
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"NoiseTexture",
"OpenSimplexNoise",
]
def get_doc_path():
return "doc_classes"
| {
"content_hash": "819cbcffdb87b579c46db4c28e379b19",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 29,
"avg_line_length": 12.941176470588236,
"alnum_prop": 0.5909090909090909,
"repo_name": "Faless/godot",
"id": "90b85dbd70d12440a62314485a569185141f383a",
"size": "220",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "modules/opensimplex/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "1633"
},
{
"name": "C",
"bytes": "820090"
},
{
"name": "C#",
"bytes": "954253"
},
{
"name": "C++",
"bytes": "36679303"
},
{
"name": "CMake",
"bytes": "538"
},
{
"name": "GAP",
"bytes": "62"
},
{
"name": "GDScript",
"bytes": "59146"
},
{
"name": "GLSL",
"bytes": "802565"
},
{
"name": "Java",
"bytes": "512268"
},
{
"name": "JavaScript",
"bytes": "184665"
},
{
"name": "Kotlin",
"bytes": "16172"
},
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Objective-C",
"bytes": "20550"
},
{
"name": "Objective-C++",
"bytes": "315906"
},
{
"name": "PowerShell",
"bytes": "2713"
},
{
"name": "Python",
"bytes": "427946"
},
{
"name": "Shell",
"bytes": "27482"
}
],
"symlink_target": ""
} |
"""Utility functions for tag databases."""
import re
from hashlib import sha256
class TaggedSeries(object):
prohibitedTagChars = ';!^='
prohibitedValueChars = ';~'
@classmethod
def validateCharacters(cls, tag, value):
"""validate that there are no prohibited characters in given tag/value"""
for char in cls.prohibitedTagChars:
if char in tag:
return False
for char in cls.prohibitedValueChars:
if char in value:
return False
return True
@classmethod
def parse(cls, path):
# if path is in openmetrics format: metric{tag="value",...}
if path[-2:] == '"}' and '{' in path:
return cls.parse_openmetrics(path)
# path is a carbon path with optional tags: metric;tag=value;...
return cls.parse_carbon(path)
@classmethod
def parse_openmetrics(cls, path):
"""parse a path in openmetrics format: metric{tag="value",...}
https://github.com/RichiH/OpenMetrics
"""
(metric, rawtags) = path[0:-1].split('{', 2)
if not metric:
raise Exception('Cannot parse path %s, no metric found' % path)
tags = {}
while len(rawtags) > 0:
m = re.match(r'([^=]+)="((?:[\\]["\\]|[^"\\])+)"(:?,|$)', rawtags)
if not m:
raise Exception('Cannot parse path %s, invalid segment %s' % (path, rawtags))
tag = m.group(1)
value = m.group(2).replace(r'\"', '"').replace(r'\\', '\\')
if not cls.validateCharacters(tag, value):
raise Exception('Tag/Value contains invalid characters: %s/%s' % (tag, value))
tags[tag] = value
rawtags = rawtags[len(m.group(0)):]
tags['name'] = metric
return cls(metric, tags)
@classmethod
def parse_carbon(cls, path):
"""parse a carbon path with optional tags: metric;tag=value;..."""
segments = path.split(';')
metric = segments[0]
if not metric:
raise Exception('Cannot parse path %s, no metric found' % path)
tags = {}
for segment in segments[1:]:
tag = segment.split('=', 1)
if len(tag) != 2 or not tag[0]:
raise Exception('Cannot parse path %s, invalid segment %s' % (path, segment))
if not cls.validateCharacters(*tag):
raise Exception('Tag/Value contains invalid characters: %s/%s' % (tag[0], tag[1]))
tags[tag[0]] = tag[1]
tags['name'] = metric
return cls(metric, tags)
@staticmethod
def format(tags):
return tags.get('name', '') + ''.join(sorted([
';%s=%s' % (tag, value)
for tag, value in tags.items()
if tag != 'name'
]))
@staticmethod
def encode(metric, sep='.', hash_only=False):
"""
Helper function to encode tagged series for storage in whisper etc
When tagged series are detected, they are stored in a separate hierarchy of folders under a
top-level _tagged folder, where subfolders are created by using the first 3 hex digits of the
sha256 hash of the tagged metric path (4096 possible folders), and second-level subfolders are
based on the following 3 hex digits (another 4096 possible folders) for a total of 4096^2
possible subfolders. The metric files themselves are created with any . in the metric path
replaced with -, to avoid any issues where metrics, tags or values containing a '.' would end
up creating further subfolders. This helper is used by both whisper and ceres, but by design
each carbon database and graphite-web finder is responsible for handling its own encoding so
that different backends can create their own schemes if desired.
The hash_only parameter can be set to True to use the hash as the filename instead of a
human-readable name. This avoids issues with filename length restrictions, at the expense of
being unable to decode the filename and determine the original metric name.
A concrete example:
.. code-block:: none
some.metric;tag1=value2;tag2=value.2
with sha256 hash starting effaae would be stored in:
_tagged/eff/aae/some-metric;tag1=value2;tag2=value-2.wsp (whisper)
_tagged/eff/aae/some-metric;tag1=value2;tag2=value-2 (ceres)
"""
if ';' in metric:
metric_hash = sha256(metric.encode('utf8')).hexdigest()
return sep.join([
'_tagged',
metric_hash[0:3],
metric_hash[3:6],
metric_hash if hash_only else metric.replace('.', '_DOT_')
])
# metric isn't tagged, just replace dots with the separator and trim any leading separator
return metric.replace('.', sep).lstrip(sep)
@staticmethod
def decode(path, sep='.'):
"""
Helper function to decode tagged series from storage in whisper etc
"""
if path.startswith('_tagged'):
return path.split(sep, 3)[-1].replace('_DOT_', '.')
# metric isn't tagged, just replace the separator with dots
return path.replace(sep, '.')
def __init__(self, metric, tags, series_id=None):
self.metric = metric
self.tags = tags
self.id = series_id
@property
def path(self):
return self.__class__.format(self.tags)
| {
"content_hash": "b7479a0f1b8f8e2bf1e8ec017ce6d66d",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 98,
"avg_line_length": 32.438709677419354,
"alnum_prop": 0.6453858392999204,
"repo_name": "mcoolive/graphite-web",
"id": "632cd753f2ea8fe43d8f4ef7ba8a4645b68da2b7",
"size": "5028",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webapp/graphite/tags/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149965"
},
{
"name": "HTML",
"bytes": "21170"
},
{
"name": "JavaScript",
"bytes": "1679914"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "754908"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
} |
import logging
from neon import NervanaObject
from neon.backends import Autodiff
logger = logging.getLogger(__name__)
class Layer(NervanaObject):
"""
Top level generic neural network layer class from which all other layer
types inherit.
Arguments:
name (string): Name identifying this layer (in logs, etc.)
"""
def __init__(self, name="layer"):
super(Layer, self).__init__(name)
self.outputs = None
self.deltas = None
self.has_params = False
def __str__(self):
"""
Format the layer as a printable string.
"""
ret = '{} {}'.format(self.__class__.__name__, self.name)
return ret
def fprop(self, inputs, inference=False):
"""
Apply the forward pass transformation to the input data.
Arguments:
inputs (Tensor): input data
Returns:
Tensor: output data
"""
raise NotImplementedError
def _fprop_inference(self, inputs):
"""
Apply the forward pass transformation to the input data.
May skip any computation not needed for doing inference only.
Calling bprop subsequently is not valid.
Arguments:
inputs (Tensor): input data
Returns:
Tensor: output data
"""
raise NotImplementedError
def bprop(self, error):
"""
Apply the backward pass transformation to the input data.
Arguments:
error (Tensor): deltas back propagated from the adjacent higher layer
Returns:
Tensor: deltas to propagate to the adjacent lower layer
"""
raise NotImplementedError
def serialize(self):
"""
Get state parameters for this layer
Returns:
?: whatever data this model wants to receive in order to restore state
"""
if self.has_params:
return self.get_params()
class Pooling(Layer):
"""
Pooling layer implementation.
Arguments:
fshape (int, tuple(int, int)): one or two dimensional shape
of pooling window
op (str, optional): pooling operation in [max, avg]. Defaults to "max"
strides (int, dict, optional): strides to apply pooling window
over. An int applies to both dimensions, or a dict with str_h
and str_w applies to h and w dimensions distinctly. Defaults
to str_w = str_h = None
padding (int, dict, optional): padding to apply to edges of
input. An int applies to both dimensions, or a dict with pad_h
and pad_w applies to h and w dimensions distinctly. Defaults
to pad_w = pad_h = None
name (str, optional): layer name. Defaults to "PoolingLayer"
"""
def __init__(self, fshape, op="max", strides={}, padding={},
name="PoolingLayer"):
super(Pooling, self).__init__(name)
self.poolparams = {'str_h': None, 'str_w': None, 'str_d': None, 'str_j': None,
'pad_h': 0, 'pad_w': 0, 'pad_d': 0, 'pad_j': 0,
'J': 1, 'T': 1, 'D': 1, 'op': op} # 3D paramaters
# keep args around in __dict__ for get_description
self.op = op
self.fshape = fshape
self.strides = strides
self.padding = padding
if isinstance(fshape, int):
fshape = {'R': fshape, 'S': fshape}
elif isinstance(fshape, tuple):
fshape = {'R': fshape[0], 'S': fshape[1]}
if isinstance(strides, int):
strides = {'str_h': strides, 'str_w': strides}
if isinstance(padding, int):
padding = {'pad_h': padding, 'pad_w': padding}
for d in [fshape, strides, padding]:
self.poolparams.update(d)
self.nglayer = None
def init_buffers(self, inputs):
self.inputs = inputs
if self.nglayer is None:
assert hasattr(self.inputs, 'lshape')
self.poolparams['C'] = self.inputs.lshape[0]
self.poolparams['H'] = self.inputs.lshape[1]
self.poolparams['W'] = self.inputs.lshape[2]
self.poolparams['N'] = self.be.bsz
self.nglayer = self.be.pool_layer(self.be.default_dtype, **self.poolparams)
self.outputs = self.be.iobuf(self.nglayer.nOut, self.outputs)
self.outputs.lshape = (self.nglayer.K, self.nglayer.P, self.nglayer.Q)
self.deltas = self.be.iobuf(self.inputs.shape[0], self.deltas)
def fprop(self, inputs, inference=False):
self.init_buffers(inputs)
self.be.fprop_pool(self.nglayer, inputs, self.outputs)
return self.outputs
def bprop(self, error):
self.be.bprop_pool(self.nglayer, self.inputs, error, self.deltas)
return self.deltas
class ParameterLayer(Layer):
"""
Intermediate class used for common functionality for any layer with weights.
Not intended to be used directly.
Arguments:
init (Initializer, optional): Initializer object to use for
initializing layer weights
name (str, optional): layer name. Defaults to "ParameterLayer"
"""
def __init__(self, init=None, name="ParameterLayer"):
super(ParameterLayer, self).__init__(name)
self.has_params = True
self.init = init
self.W = None
self.weight_shape = None
self.states = []
def fprop(self, inputs, inference=False):
self.init_buffers(inputs)
if self.W is None:
self.init_params(self.weight_shape)
def init_params(self, shape):
"""
Allocate layer parameter buffers and initialize them with the
supplied initializer.
Arguments:
shape (int, tuple): shape to allocate for layer paremeter
buffers.
"""
self.W = self.be.empty(shape)
self.dW = self.be.empty_like(self.W)
self.init.fill(self.W)
def get_params(self):
"""
Get layer parameters, gradients, and states for optimization
"""
return ((self.W, self.dW), self.states)
def get_params_serialize(self, keep_states=True):
"""
Get layer parameters. All parameters are needed for optimization, but
only Weights are serialized.
Arguments:
keep_states (bool): Control whether all parameters are returned
or just weights for serialization. Defaults to True.
"""
serial_dict = {'params': self.W.asnumpyarray()}
if keep_states:
serial_dict['states'] = [s.asnumpyarray() for s in self.states]
return serial_dict
def set_params(self, W):
"""
Set layer parameters (weights). Allocate space for other parameters but
do not initialize them.
Arguments:
W (Tensor): Tensor containing weights to use.
"""
self.W = self.be.array(W)
self.dW = self.be.empty_like(self.W)
def set_states(self, states):
self.states = [self.be.array(x) for x in states]
class Convolution(ParameterLayer):
"""
Convolutional layer implementation.
Arguments:
fshape (tuple(int)): three dimensional shape of convolution window
strides (int, dict, optional): strides to apply convolution
window over. An int applies to both dimensions, or a dict with
str_h and str_w applies to h and w dimensions distinctly. Defaults
to str_w = str_h = None
padding (int, dict, optional): padding to apply to edges of
input. An int applies to both dimensions, or a dict with pad_h
and pad_w applies to h and w dimensions distinctly. Defaults
to pad_w = pad_h = None
init (Initializer, optional): Initializer object to use for
initializing layer weights
name (str, optional): layer name. Defaults to "ConvolutionLayer"
"""
def __init__(self, fshape, strides={}, padding={}, init=None, name="ConvolutionLayer"):
super(Convolution, self).__init__(init, name)
self.nglayer = None
self.convparams = {'str_h': 1, 'str_w': 1, 'str_d': 1,
'pad_h': 0, 'pad_w': 0, 'pad_d': 0,
'T': 1, 'D': 1} # 3D paramaters
# keep around args in __dict__ for get_description.
self.fshape = fshape
self.strides = strides
self.padding = padding
if isinstance(fshape, tuple):
fshape = {'R': fshape[0], 'S': fshape[1], 'K': fshape[2]}
if isinstance(strides, int):
strides = {'str_h': strides, 'str_w': strides}
if isinstance(padding, int):
padding = {'pad_h': padding, 'pad_w': padding}
for d in [fshape, strides, padding]:
self.convparams.update(d)
def init_buffers(self, inputs):
"""
Helper for allocating output and delta buffers (but not initializing
them)
Arguments:
inputs (Tensor): tensor used for frop inputs, used to determine
shape of buffers being allocated.
"""
self.inputs = inputs
if not self.nglayer:
assert hasattr(self.inputs, 'lshape')
self.convparams['C'] = self.inputs.lshape[0]
self.convparams['H'] = self.inputs.lshape[1]
self.convparams['W'] = self.inputs.lshape[2]
self.convparams['N'] = self.be.bsz
self.nglayer = self.be.conv_layer(self.be.default_dtype, **self.convparams)
self.outputs = self.be.iobuf(self.nglayer.nOut, self.outputs)
self.outputs.lshape = (self.nglayer.K, self.nglayer.P, self.nglayer.Q)
self.deltas = self.be.iobuf(self.inputs.shape[0], self.deltas)
if self.weight_shape is None:
self.weight_shape = self.nglayer.dimF2 # (C * R * S, K)
def fprop(self, inputs, inference=False):
super(Convolution, self).fprop(inputs)
self.be.fprop_conv(self.nglayer, inputs, self.W, self.outputs)
return self.outputs
def bprop(self, error, do_acts=True):
if do_acts:
self.be.bprop_conv(self.nglayer, self.W, error, self.deltas)
self.be.update_conv(self.nglayer, self.inputs, error, self.dW)
return self.deltas
class Deconv(ParameterLayer):
"""
Deconvolutional layer implementation.
Arguments:
fshape (tuple): three dimensional shape of convolution window
strides (int, dict, optional): strides to apply convolution
window over. An int applies to both dimensions, or a dict with
str_h and str_w applies to h and w dimensions distinctly. Defaults
to str_w = str_h = None
padding (int, dict, optional): padding to apply to edges of
input. An int applies to both dimensions, or a dict with pad_h
and pad_w applies to h and w dimensions distinctly. Defaults
to pad_w = pad_h = None
init (Initializer, optional): Initializer object to use for
initializing layer weights
name (str, optional): layer name. Defaults to "DeconvolutionLayer"
"""
def __init__(self, fshape, strides={}, padding={}, init=None, name="DeconvolutionLayer"):
super(Deconv, self).__init__(init, name)
self.nglayer = None
self.deconvparams = {'str_h': 1, 'str_w': 1, 'str_d': 1,
'pad_h': 0, 'pad_w': 0, 'pad_d': 0}
# keep around args in __dict__ for get_description.
self.fshape = fshape
self.strides = strides
self.padding = padding
if isinstance(fshape, tuple):
# fshape[2] should now map to C (nifm)
fshape = {'R': fshape[0], 'S': fshape[1], 'C': fshape[2]}
if isinstance(strides, int):
strides = {'str_h': strides, 'str_w': strides}
if isinstance(padding, int):
padding = {'pad_h': padding, 'pad_w': padding}
for d in [fshape, strides, padding]:
self.deconvparams.update(d)
def init_buffers(self, inputs):
self.inputs = inputs
if not self.nglayer:
assert hasattr(self.inputs, 'lshape')
# We switch H, W and C with P, Q and K
# so that in the GPU, we can reverse calculate
# H and W
self.deconvparams['K'] = self.inputs.lshape[0]
self.deconvparams['P'] = self.inputs.lshape[1]
self.deconvparams['Q'] = self.inputs.lshape[2]
self.deconvparams['N'] = self.be.bsz
self.nglayer = self.be.deconv_layer(self.be.default_dtype, **self.deconvparams)
self.outputs = self.be.iobuf(self.nglayer.dimI2[0], self.outputs)
self.outputs.lshape = (self.nglayer.C, self.nglayer.H, self.nglayer.W)
self.deltas = self.be.iobuf(self.inputs.shape[0], self.deltas)
if self.weight_shape is None:
self.weight_shape = self.nglayer.dimF2 # (C * R * S, K)
def fprop(self, inputs, inference=False):
"""
fprop for deconv is equivalent to bprop for conv.
bprop_conv takes in error and deltas as "E" and "grad_I"
for deconv, bprop_conv will take in input as "E" and output as "grad_I"
"""
super(Deconv, self).fprop(inputs)
self.be.bprop_conv(layer=self.nglayer, F=self.W, E=inputs, grad_I=self.outputs)
return self.outputs
def bprop(self, error, do_acts=True):
"""
bprop for deconv is equivalent to fprop for conv.
fprop_conv takes input and output as "I" and "O".
for deconv, fprop_conv will take error as input and delta as output
"""
if do_acts:
self.be.fprop_conv(self.nglayer, error, self.W, self.deltas)
self.be.update_conv(self.nglayer, error, self.inputs, self.dW)
return self.deltas
class Linear(ParameterLayer):
"""
A fully connected layer implemented as the dot product of inputs and
weights.
Arguments:
nout (int, tuple): Desired size or shape of layer output
init (Initializer, optional): Initializer object to use for
initializing layer weights
name (str, optional): Layer name. Defaults to "LinearLayer"
"""
def __init__(self, nout, init, name="LinearLayer"):
super(Linear, self).__init__(init, name)
self.nout = nout
def init_buffers(self, inputs):
self.inputs = inputs
if self.outputs is None:
self.nin = inputs.shape[0]
# non recurrent case:
if inputs.shape[1] == self.be.bsz:
self.outputs = self.be.iobuf(self.nout)
self.deltas = self.be.iobuf(self.nin)
else:
self.nsteps = inputs.shape[1] / self.be.bsz
self.outputs = self.be.iobuf((self.nout, self.nsteps))
self.deltas = self.be.iobuf((self.nin, self.nsteps))
if self.weight_shape is None:
self.weight_shape = (self.nout, inputs.shape[0])
def fprop(self, inputs, inference=False):
super(Linear, self).fprop(inputs)
self.be.compound_dot(A=self.W, B=inputs, C=self.outputs)
return self.outputs
def bprop(self, error, do_acts=True):
if do_acts:
self.be.compound_dot(A=self.W.T, B=error, C=self.deltas)
self.be.compound_dot(A=error, B=self.inputs.T, C=self.dW)
return self.deltas
class Bias(ParameterLayer):
"""
A bias layer implemented that adds a learned bias to inputs and produces
outputs of the same shape.
Arguments:
init (Initializer, optional): Initializer object to use for
initializing layer bias
name (str, optional): Layer name. Defaults to "BiasLayer"
"""
def __init__(self, init, name="BiasLayer"):
super(Bias, self).__init__(init, name)
self.reshaped_outputs = None
def init_buffers(self, inputs):
self.inputs = inputs
self.outputs = inputs
if self.reshaped_outputs is None:
if hasattr(inputs, 'lshape'):
self.bsize = inputs.lshape[0]
else:
self.bsize = inputs.shape[0]
self.reshaped_outputs = self.outputs.reshape((self.bsize,
self.outputs.size /
self.bsize))
if self.weight_shape is None:
self.weight_shape = (self.bsize, 1)
def fprop(self, inputs, inference=False):
super(Bias, self).fprop(inputs)
# reshaped_outputs is a different view of outputs, which is
# the same as inputs (we call it outputs for naming reasons)
self.reshaped_outputs[:] = self.reshaped_outputs + self.W
return self.outputs
def bprop(self, error):
if not self.deltas:
self.deltas = error.reshape(self.reshaped_outputs.shape)
self.be.sum(self.deltas, axis=1, out=self.dW)
return error
class Activation(Layer):
"""
A layer that applies a specified transform to the inputs and
produces outputs of the same shape.
Generally used to implemenent nonlinearities for layer post activations.
Arguments:
transform (Transform): a transform object with fprop and bprop
functions to apply
name (str, optional): Layer name. Defaults to "ActivationLayer"
"""
def __init__(self, transform, name="ActivationLayer"):
super(Activation, self).__init__(name)
self.transform = transform
def init_buffers(self, inputs):
self.inputs = inputs
if self.outputs is None:
self.outputs = self.inputs
def fprop(self, inputs, inference=False):
self.init_buffers(inputs)
self.outputs[:] = self.transform(self.inputs)
return self.outputs
def bprop(self, error):
error[:] = self.transform.bprop(self.inputs) * error
return error
class Affine(list):
"""
A linear layer with a learned bias and activation, implemented as a list
composing separate linear, bias and activation layers.
Arguments:
nout (int, tuple): Desired size or shape of layer output
init (Initializer, optional): Initializer object to use for
initializing layer weights and bias
bias (Initializer): an initializer to use for bias parameters
activation (Transform): a transform object with fprop and bprop
functions to apply
linear_name (str): the name to call the Linear layer. Defaults to 'LinearLayer'.
bias_name (str): the name to call the Bias layer. Defautls to 'BiasLayer'.
act_name (str): the name to call the Activation layer. Defaults to 'ActivationLayer'.
"""
def __init__(self, nout, init, bias=None, batch_norm=False, activation=None,
linear_name='LinearLayer', bias_name='BiasLayer',
act_name='ActivationLayer'):
list.__init__(self)
self.append(Linear(nout, init, name=linear_name))
if bias is not None:
self.append(Bias(init=bias, name=bias_name))
if batch_norm:
self.append(BatchNorm())
if activation is not None:
self.append(Activation(transform=activation, name=act_name))
class Conv(list):
"""
A convolutional layer with a learned bias and activation, implemented as a
list composing separate Convolution, Bias and Activation layers.
Arguments:
fshape (tuple(int)): three dimensional shape of convolution window
init (Initializer, optional): Initializer object to use for
initializing layer weights and bias
strides (int, dict, optional): strides to apply convolution
window over. An int applies to both dimensions, or a dict with
str_h and str_w applies to h and w dimensions distinctly. Defaults
to str_w = str_h = None
pad (int, dict, optional): padding to apply to edges of
input. An int applies to both dimensions, or a dict with pad_h
and pad_w applies to h and w dimensions distinctly. Defaults
to pad_w = pad_h = None
bias (Initializer): an initializer to use for bias parameters
activation (Transform): a transform object with fprop and bprop
functions to apply
conv_name (str): the name to call the Convolutional layer. Defaults to 'ConvolutionLayer'
bias_name (str): the name to call the Bias layer. Defaults to 'BiasLayer'
act_name (str): the name to call the Activation layer. Defaults to ActivationLayer.
"""
def __init__(self, fshape, init, strides={}, pad={}, bias=None, batch_norm=False,
activation=None, conv_name='ConvolutionLayer',
bias_name='BiasLayer', act_name='ActivationLayer'):
list.__init__(self)
self.append(Convolution(fshape=fshape, strides=strides, padding=pad, init=init))
if bias is not None:
self.append(Bias(init=bias))
if batch_norm:
self.append(BatchNorm())
if activation is not None:
self.append(Activation(transform=activation))
class Dropout(Layer):
"""
A dropout layer.
Applies an element-wise multiplication of inputs with a keep mask.
A keep mask is a tensor of ones and zeros of the same shape as the input.
Each fprop call generates an new keep mask stochastically where there
distribution of ones in the mask is controlled by the keep param.
Arguments:
keep (float): fraction of the inputs that should be stochastically kept.
"""
def __init__(self, keep=0.5, name="droplayer"):
super(Dropout, self).__init__(name)
self.keep = keep
self.keep_mask = None
def init_buffers(self, inputs, inference):
self.inputs = inputs
if self.outputs is None:
self.outputs = self.be.zeros(inputs.shape)
if hasattr(self.inputs, 'lshape'):
self.outputs.lshape = self.inputs.lshape
if not inference:
self.keep_mask = self.be.zeros(inputs.shape)
def fprop(self, inputs, inference=False):
self.init_buffers(inputs, inference)
if inference:
return self._fprop_inference(inputs)
self.be.make_binary_mask(self.keep_mask, self.keep)
self.outputs[:] = self.keep_mask * inputs
return self.outputs
def _fprop_inference(self, inputs):
self.outputs[:] = inputs * self.keep
return self.outputs
def bprop(self, error, do_acts=False):
if self.deltas is None:
self.deltas = error
self.deltas[:] = self.keep_mask * error
return self.deltas
class GeneralizedCost(NervanaObject):
"""
A cost layer that applies the provided cost function and computes errors
with respect to inputs and targets.
Arguments:
costfunc (Cost): class with costfunc that computes errors
"""
def __init__(self, costfunc, name=None):
super(GeneralizedCost, self).__init__(name)
self.costfunc = costfunc
self.cost = self.be.empty((1, 1))
self.outputs = None
self.deltas = None
def get_cost(self, inputs, targets):
"""
Compute the cost function over the inputs and targets.
Arguments:
inputs (Tensor): Tensor containing input values to be compared to
targets
targets (Tensor): Tensor containing target values.
Returns:
Tensor containing cost
"""
if self.outputs is None:
self.nstep = 1 # Non-recurrent case
if inputs.shape[1] != self.be.bsz:
self.nstep = inputs.shape[1] / self.be.bsz
# For non recurrent case, this is the same as be.iobuf(1)
self.outputs = self.be.iobuf((1, self.nstep))
self.outputs[:] = self.costfunc(inputs, targets)
self.cost[:] = self.be.mean(self.outputs, axis=1)
return self.cost
def get_errors(self, inputs, targets):
"""
Compute the derivative of the cost function
Arguments:
inputs (Tensor): Tensor containing input values to be compared to
targets
targets (Tensor): Tensor containing target values.
Returns:
Tensor of same shape as the inputs containing their respective
deltas.
"""
if self.deltas is None:
self.deltas = self.be.empty_like(inputs)
self.deltas[:] = self.costfunc.bprop(inputs, targets)
return self.deltas
class GeneralizedCostMask(GeneralizedCost):
"""
A cost layer that applies the provided cost function and computes errors
with respect to inputs and targets. Applies mask to deltas.
Arguments:
costfunc (Cost): class with costfunc that computes errors
"""
def get_cost(self, inputs, targets_mask):
"""
Compute the cost function over the inputs and targets.
Arguments:
inputs (Tensor): Tensor containing input values to be compared to
targets
targets_mask ((Tensor, Tensor)): Tuple with Tensor target values and Tensor mask
Returns:
Tensor containing cost
"""
targets, mask = targets_mask
if self.outputs is None:
self.nstep = 1 # Non-recurrent case
if inputs.shape[1] != self.be.bsz:
self.nstep = inputs.shape[1] / self.be.bsz
# For non recurrent case, this is the same as be.iobuf(1)
self.outputs = self.be.iobuf((1, self.nstep))
masked_input = inputs * mask
self.outputs[:] = self.costfunc(masked_input, targets)
self.cost[:] = self.be.mean(self.outputs, axis=1)
return self.cost
def get_errors(self, inputs, targets_mask):
"""
Compute the derivative of the cost function
Arguments:
inputs (Tensor): Tensor containing input values to be compared to
targets
targets_mask ((Tensor, Tensor)): Tuple with Tensor target values and Tensor mask
Returns:
Tensor of same shape as the inputs containing their respective
deltas.
"""
targets, mask = targets_mask
if self.deltas is None:
self.deltas = self.be.empty_like(inputs)
self.deltas[:] = self.costfunc.bprop(inputs, targets) * mask
return self.deltas
class BatchNorm(Layer):
"""
A batch normalization layer as described in [Ioffe]_
Normalizes a batch worth of inputs by subtracting batch mean and dividing by
batch variance. Then scales by learned factor gamma and shifts by learned bias beta.
Notes:
.. [Ioffe] arXiv:1502.03167
"""
def __init__(self, rho=0.99, eps=1e-6, name="BatchNormLayer"):
super(BatchNorm, self).__init__(name)
self.allparams = None
self.x = None # used to point to reshaped view of inputs
self.xhat = None
self.has_params = True
self.outputs = None
self.rho = rho
self.eps = eps
self.states = [[] for i in range(2)]
def set_bn_shape(self, inputs):
self.nfm = inputs.shape[0] if not hasattr(inputs, 'lshape') else inputs.lshape[0]
self.bn_shape = (self.nfm, inputs.size / self.nfm)
def init_buffers(self, inputs):
self.inputs = inputs
if self.x is None:
self.nout = self.inputs.shape[0]
self.outputs = self.be.iobuf(self.nout)
if hasattr(self.inputs, 'lshape'):
self.outputs.lshape = self.inputs.lshape
# This is for local layers -- the first dimension should be number of featuremaps
self.set_bn_shape(self.inputs)
self.x = self.inputs.reshape(self.bn_shape)
self.y = self.outputs.reshape(self.bn_shape)
self.xvar = self.be.zeros((self.nfm, 1))
self.xmean = self.be.zeros((self.nfm, 1))
def init_params(self, dim0):
self.beta = self.be.zeros((dim0, 1))
self.gamma = self.be.ones((dim0, 1))
self.params = [self.beta, self.gamma]
self.grad_params = [self.be.zeros_like(p) for p in self.params]
self.inf_params = [self.be.zeros_like(p) for p in self.params]
(self.grad_beta, self.grad_gamma) = self.grad_params
(self.gmean, self.gvar) = self.inf_params
self.allparams = self.params + self.inf_params
self.plist = [((p, g), s) for p, g, s in zip(self.params, self.grad_params, self.states)]
def fprop(self, inputs, inference=False):
"""
Normalize inputs (x) over batch mean and variance.
xhat = (x - xmean) / xvar
Scale and shift normalized inputs (xhat) by learned parameters gamma and beta.
y = xhat * gamma + beta
Accumulate partial results to global mean and variance buffers used for inference.
"""
if inference:
return self._fprop_inference(inputs)
self.init_buffers(inputs)
if self.allparams is None:
self.init_params(self.nfm)
# These are cached op-trees
self.xvar[:] = self.be.var(self.x, axis=1)
self.xmean[:] = self.be.mean(self.x, axis=1)
self.xhat = (self.x - self.xmean) / self.be.sqrt(self.xvar + self.eps)
self.gmean[:] = self.gmean * self.rho + (1.0 - self.rho) * self.xmean
self.gvar[:] = self.gvar * self.rho + (1.0 - self.rho) * self.xvar
self.y[:] = self.xhat * self.gamma + self.beta
return self.outputs
def _fprop_inference(self, inputs):
"""
Apply one linear transformation that captures normalization, gamma scaling and beta shift.
"""
self.init_buffers(inputs)
xhat = (self.x - self.gmean) / self.be.sqrt(self.gvar + self.eps) # Op-tree only
self.y[:] = xhat * self.gamma + self.beta
return self.outputs
def bprop(self, error):
"""
Compute gradients for learning gamma and beta as well as layer weights.
"""
if not self.deltas:
self.deltas = error.reshape(self.bn_shape)
self.grad_gamma[:] = self.be.sum(self.xhat * self.deltas, axis=1)
self.grad_beta[:] = self.be.sum(self.deltas, axis=1)
xtmp = (self.xhat * self.grad_gamma + self.grad_beta) / float(self.x.shape[1])
self.deltas[:] = self.gamma * (self.deltas - xtmp) / self.be.sqrt(self.xvar + self.eps)
return error
def get_params(self):
return self.plist
def get_params_serialize(self, keep_states=True):
serial_dict = {'params': [p.asnumpyarray() for p in self.allparams]}
if keep_states:
serial_dict['states'] = [[s.asnumpyarray() for s in slist] for slist in self.states]
return serial_dict
def set_params(self, allparams):
self.allparams = [self.be.array(x) for x in allparams]
self.params = self.allparams[:2]
self.inf_params = self.allparams[2:]
self.grad_params = [self.be.zeros_like(p) for p in self.params]
(self.beta, self.gamma) = self.params
(self.grad_beta, self.grad_gamma) = self.grad_params
(self.gmean, self.gvar) = self.inf_params
self.plist = [((p, g), s) for p, g, s in zip(self.params, self.grad_params, self.states)]
def set_states(self, states):
self.states = [[self.be.array(x) for x in slist] for slist in states]
class BatchNormAutodiff(BatchNorm):
"""
An example to use autodiff in batchnorm.
"""
def __init__(self, rho=0.99, eps=1e-6, name="BatchNormAutodiffLayer"):
super(BatchNormAutodiff, self).__init__(rho, eps, name)
def get_forward_optree(self):
"""
Initialize the fprop optree for batchnorm.
"""
# get fprop op-tree
xvar = self.be.var(self.x, axis=1)
xmean = self.be.mean(self.x, axis=1)
xhat = (self.x - xmean) / self.be.sqrt(xvar + self.eps)
return xhat * self.gamma + self.beta
def fprop(self, inputs, inference=False):
"""
Compute the actual fprop from op-tree, update the global estimations
"""
if inference:
return self._fprop_inference(inputs)
self.init_buffers(inputs)
if self.allparams is None:
self.init_params(self.nfm)
self.fprop_op_tree = self.get_forward_optree()
# the actual f-prop
self.y[:] = self.fprop_op_tree
# for inference
self.gmean[:] = (self.gmean * self.rho + (1.0 - self.rho) * self.be.mean(self.x, axis=1))
self.gvar[:] = (self.gvar * self.rho + (1.0 - self.rho) * self.be.var(self.x, axis=1))
return self.outputs
def bprop(self, error):
"""
Use Autodiff.back_prop_grad to back propagate gradients for the
corresponding tensors.
"""
if not self.deltas:
self.deltas = error.reshape(self.bn_shape)
# autodiff will automatically cache and reuse the object
# if we know the `error` buffer at init, we can also create the autodiff
# object at layer's init
ad = Autodiff(self.fprop_op_tree, self.be, next_error=self.deltas)
# back propagate
ad.back_prop_grad([self.x, self.gamma, self.beta],
[self.deltas, self.grad_gamma, self.grad_beta])
return error
| {
"content_hash": "0d114709c4b8b5e8940d41edd14eb2b5",
"timestamp": "",
"source": "github",
"line_count": 902,
"max_line_length": 98,
"avg_line_length": 37.310421286031044,
"alnum_prop": 0.5977001247994295,
"repo_name": "misko/neon",
"id": "d9674b1c6d2550e9d8ee95263b37ce35b1829901",
"size": "34395",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neon/layers/layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "13448"
},
{
"name": "CSS",
"bytes": "810211"
},
{
"name": "Cuda",
"bytes": "87750"
},
{
"name": "Makefile",
"bytes": "8982"
},
{
"name": "Python",
"bytes": "777025"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
# Application name:
name="PyEmailWatcher",
# Version number (initial):
version="0.1",
# Application author details:
author="Jason",
# Packages
packages=['pyemailwatcher'],
# Include additional files into the package
include_package_data=True,
# Details
#url="http://pypi.python.org/pypi/MyApplication_v010/",
#
license="MIT",
# description="",
# long_description=open("README.md").read(),
# Dependent packages (distributions)
#install_requires=[ ],
) | {
"content_hash": "5ee9af7f2cc22255ceb1b7426feed080",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 59,
"avg_line_length": 18.35483870967742,
"alnum_prop": 0.632688927943761,
"repo_name": "jasongwartz/PyEmailWatcher",
"id": "8f9f64c182c13e028609f1236ef9c2ea25b7a1d6",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3798"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
from pybel import BELGraph
from pybel.dsl import gene, protein, rna
from pybel.manager import Manager
from pybel.testing.utils import n
HGNC = 'HGNC'
dir_path = os.path.dirname(os.path.realpath(__file__))
class ManagerMixin(unittest.TestCase):
def setUp(self):
super(ManagerMixin, self).setUp()
self.db_fd, self.db_file = tempfile.mkstemp()
self.connection = 'sqlite:///' + self.db_file
self.manager = Manager(connection=self.connection)
def tearDown(self):
os.close(self.db_fd)
os.unlink(self.db_file)
protein_a = protein(namespace=HGNC, name='a')
protein_b = protein(namespace=HGNC, name='b')
gene_c = gene(namespace=HGNC, name='c')
rna_d = rna(namespace=HGNC, name='d')
protein_e = protein(namespace=HGNC, name='e')
gene_f = gene(namespace=HGNC, name='f')
protein_g = protein(namespace=HGNC, name='g')
protein_h = protein(namespace=HGNC, name='h')
protein_i = protein(namespace=HGNC, name='i')
protein_j = protein(namespace=HGNC, name='j')
def make_graph_1() -> BELGraph:
graph = BELGraph(
name='PyBEL Tools Example Network 1',
version='1.1.0',
description='Example Network for PyBEL Tools Tests',
authors='Daniel Domingo-Fernández and Charles Tapley Hoyt',
contact='charles.hoyt@scai.fraunhofer.de',
)
graph.annotation_list['Annotation'] = {'foo'}
graph.add_node_from_data(protein_a)
graph.add_node_from_data(protein_b)
graph.add_node_from_data(gene_c)
graph.add_node_from_data(rna_d)
graph.add_increases(
protein_a,
protein_b,
citation='1',
evidence='Evidence 1',
annotations={'Annotation': 'foo'}
)
graph.add_increases(
rna_d,
protein_a,
citation='2',
evidence='Evidence 2',
annotations={'Annotation': 'foo'}
)
graph.add_decreases(
gene_c,
protein_b,
citation='3',
evidence='Evidence 3',
annotations={'Annotation': 'foo'}
)
return graph
def make_graph_2() -> BELGraph:
"""Make an example graph."""
graph = BELGraph(
name='PyBEL Tools Example Network 2',
version='1.0.0',
description='Example Network for PyBEL Tools Tests',
authors='Daniel Domingo-Fernández and Charles Tapley Hoyt',
contact='charles.hoyt@scai.fraunhofer.de',
)
graph.annotation_list['Annotation'] = {'foo'}
graph.add_node_from_data(gene_f)
graph.add_node_from_data(protein_e)
graph.add_node_from_data(protein_b)
graph.add_increases(
protein_e,
protein_b,
citation='1',
evidence='Evidence 1',
annotations={'Annotation': 'foo'},
)
graph.add_increases(
gene_f,
protein_e,
citation='2',
evidence='Evidence 2',
annotations={'Annotation': 'foo2'}
)
return graph
def make_graph_3() -> BELGraph:
"""Make an example graph.
A -> B -| C
D -| F -> C
C -| F
C -- G
"""
graph = BELGraph(
name='PyBEL Tools Example Network 3',
version='1.0.0',
description='Example Network for PyBEL Tools Tests',
authors='Daniel Domingo-Fernández and Charles Tapley Hoyt',
contact='charles.hoyt@scai.fraunhofer.de',
)
graph.add_increases(protein_a, protein_b, citation=n(), evidence=n())
graph.add_decreases(protein_b, gene_c, citation=n(), evidence=n())
graph.add_decreases(rna_d, gene_f, citation=n(), evidence=n())
graph.add_increases(protein_e, gene_f, citation=n(), evidence=n())
graph.add_increases(gene_f, gene_c, citation=n(), evidence=n())
graph.add_association(gene_c, protein_g, citation=n(), evidence=n())
return graph
def make_graph_4() -> BELGraph:
"""Make an example graph.
A -> B
B -| C
B -| D
B -| E
B -| F
B -> G
B -> H
B -| H
B -> I
B -- J
"""
graph = BELGraph(
name='PyBEL Tools Example Network 4',
version='1.0.0',
description='Example Network for PyBEL Tools Tests',
authors='Daniel Domingo-Fernández and Charles Tapley Hoyt',
contact='charles.hoyt@scai.fraunhofer.de',
)
graph.add_increases(protein_a, protein_b, citation=n(), evidence=n())
graph.add_decreases(protein_b, gene_c, citation=n(), evidence=n())
graph.add_decreases(protein_b, rna_d, citation=n(), evidence=n())
graph.add_decreases(protein_b, protein_e, citation=n(), evidence=n())
graph.add_decreases(protein_b, gene_f, citation=n(), evidence=n())
graph.add_increases(protein_b, protein_g, citation=n(), evidence=n())
graph.add_decreases(protein_b, protein_h, citation=n(), evidence=n())
graph.add_increases(protein_b, protein_h, citation=n(), evidence=n())
graph.add_increases(protein_b, protein_i, citation=n(), evidence=n())
graph.add_association(protein_b, protein_j, citation=n(), evidence=n())
return graph
example_1 = make_graph_1()
example_2 = make_graph_2()
example_3 = make_graph_3()
example_4 = make_graph_4()
class ExampleNetworkMixin(unittest.TestCase):
"""A mixin that gives a class access to example networks"""
def setUp(self):
super(ExampleNetworkMixin, self).setUp()
self.graph_1 = make_graph_1()
self.network2 = make_graph_2()
self.network3 = make_graph_3()
self.network4 = make_graph_4()
| {
"content_hash": "00448886394ce386180c203180cc2f00",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 75,
"avg_line_length": 27.79591836734694,
"alnum_prop": 0.6209618208516887,
"repo_name": "pybel/pybel-tools",
"id": "0f170e53786416ce388a49232a2d72fbe193d6df",
"size": "5477",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "40316"
},
{
"name": "JavaScript",
"bytes": "462"
},
{
"name": "Jupyter Notebook",
"bytes": "793365"
},
{
"name": "Python",
"bytes": "385330"
}
],
"symlink_target": ""
} |
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.cli import CLI
from mininet.node import UserSwitch,RemoteController
from mininet.term import makeTerm
import os, time
class MyTopo( Topo ):
"Simple topology example."
def __init__( self):
"Create custom topo."
# Add default members to class.
Topo.__init__(self)
# Add nodes
Host1=self.addHost('h1', ip='10.0.0.1/24')
Host2=self.addHost('h2', ip='10.0.0.2/24')
switch1=self.addSwitch('s1')
switch2=self.addSwitch('s2')
switch3=self.addSwitch('s3')
switch4=self.addSwitch('s4')
switch5=self.addSwitch('s5')
# Add edges
self.addLink( Host1, switch1, 1, 1)
self.addLink( switch1, switch2, 2, 1)
self.addLink( switch1, switch3, 3, 1)
self.addLink( switch1, switch4, 4, 1)
self.addLink( switch2, switch5, 2, 1)
self.addLink( switch3, switch5, 2, 2)
self.addLink( switch4, switch5, 2, 3)
self.addLink( switch5, Host2, 4, 1)
######Starting controller
os.system("xterm -e 'ryu-manager ~/ryu/ryu/app/openstate/playground/forwarding_consistency_many_to_1_ctrl.py'&")
######Starting mininet
topos = { 'mytopo': ( lambda: MyTopo() ) }
mytopo=MyTopo()
time.sleep(1)
print("\n********************************** HELP *********************************************")
print("Type \"python ~/ryu/ryu/app/openstate/echo_server.py 200\" in h2's xterm")
print("Type \"nc 10.0.0.2 200\" in h1's xterm")
print("Watching the tcpdump results, it is possible to see that forwarding consistency is guaranteed\n"
"In order to test new path selection, close and reopen netcat")
print("\nTo exit type \"ctrl+D\" or exit")
print("*************************************************************************************")
net = Mininet(topo=mytopo,switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,autoStaticArp=True,listenPort=6634)
net.start()
os.system("xterm -e 'tcpdump -i s2-eth1'&")
os.system("xterm -e 'tcpdump -i s3-eth1'&")
os.system("xterm -e 'tcpdump -i s4-eth1'&")
h1,h2 = net.hosts[0], net.hosts[1]
makeTerm(h1)
makeTerm(h2)
CLI(net)
net.stop()
os.system("sudo mn -c")
os.system("kill -9 $(pidof -x ryu-manager)")
| {
"content_hash": "627aee8551d0cb83eb420582256af400",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 137,
"avg_line_length": 33.95454545454545,
"alnum_prop": 0.6207050423917894,
"repo_name": "Tesi-Luca-Davide/ryu",
"id": "f7253f2da62ae21d3ff22ab8085cb80e2d6cdb84",
"size": "2260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ryu/app/openstate/playground/start_many_to_1_ctrl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8269"
},
{
"name": "CSS",
"bytes": "7182"
},
{
"name": "Erlang",
"bytes": "871862"
},
{
"name": "HTML",
"bytes": "4648"
},
{
"name": "JavaScript",
"bytes": "47740"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "5511220"
},
{
"name": "Shell",
"bytes": "14605"
}
],
"symlink_target": ""
} |
from nailgun.errors.base import NailgunException
default_messages = {
# common errors
"InvalidData": "Invalid data received",
"AlreadyExists": "Object already exists",
"DumpRunning": "Dump already running",
# REST errors
"CannotDelete": "Can't delete object",
"CannotCreate": "Can't create object",
"NotAllowed": "Action is not allowed",
"InvalidField": "Invalid field specified for object",
"ObjectNotFound": "Object not found in DB",
# node discovering errors
"InvalidInterfacesInfo": "Invalid interfaces info",
"InvalidMetadata": "Invalid metadata specified for node",
"CannotFindNodeIDForDiscovering": "Cannot find node for discovering",
# deployment errors
"CheckBeforeDeploymentError": "Pre-Deployment check wasn't successful",
"DeploymentAlreadyStarted": "Deployment already started",
"DeploymentNotRunning": "Deployment is not running",
"NoDeploymentTasks": "Deployment tasks not found for specific release in the database",
"DeletionAlreadyStarted": "Environment removal already started",
"StopAlreadyRunning": "Stopping deployment already initiated",
"FailedProvisioning": "Failed to start provisioning",
"WrongNodeStatus": "Wrong node status",
"NodeOffline": "Node is offline",
"NotEnoughControllers": "Not enough controllers",
"RedHatSetupError": "Red Hat setup error",
"TaskAlreadyRunning": "A task is already running",
"InvalidReleaseId": "Release Id is invalid",
"InvalidOperatingSystem": "Invalid operating system",
"CannotFindPluginForRelease": "Cannot find plugin for the release",
"UnavailableRelease": "Release is unavailable",
"ControllerInErrorState": ("One of the cluster controllers is in error "
"state, please, eliminate the problem prior "
"to proceeding further"),
# mongo errors
"ExtMongoCheckerError": "Mongo nodes shouldn`t be used with external mongo",
"MongoNodesCheckError": "Mongo nodes have to be present if ceilometer is chosen",
# disk errors
"NotEnoughFreeSpace": "Not enough free space",
"NotEnoughOsdNodes": "Not enough OSD nodes",
# network errors
"AdminNetworkNotFound": "Admin network info not found",
"InvalidNetworkCidr": "Invalid network CIDR",
"InvalidNetworkVLANIDs": "Invalid network VLAN IDs",
"AssignIPError": "Failed to assign IP to node",
"NetworkCheckError": "Network checking failed",
"CantRemoveOldVerificationTask": "Can't remove old verification task, still running",
"OutOfVLANs": "Not enough available VLAN IDs",
"OutOfIPs": "Not enough free IP addresses in pool",
"NoSuitableCIDR": "Cannot find suitable CIDR",
"CanNotFindInterface": "Cannot find interface",
"CanNotDetermineEndPointIP": "Cannot determine end point IP",
"CanNotFindNetworkForNode": "Cannot find network for node",
"NetworkRoleConflict": "Cannot override existing network role",
"NetworkTemplateMissingRoles": "Roles are missing from network template",
"NetworkTemplateMissingNetRoles": "Network roles are missing",
# RPC errors
"CannotFindTask": "Cannot find task",
# expression parser errors
"LexError": "Illegal character",
"ParseError": "Synxtax error",
"UnknownModel": "Unknown model",
# Tracking errors
"TrackingError": "Action failed",
# Zabbix errors
"CannotMakeZabbixRequest": "Can't make a request to Zabbix",
"ZabbixRequestError": "Zabbix request returned an error",
# Plugin errors
"PackageVersionIsNotCompatible": "Package version is not compatible",
# Extensions
"CannotFindExtension": "Cannot find extension",
# unknown
"UnknownError": "Unknown error"
}
class ErrorFactory(object):
def __init__(self):
for name, msg in default_messages.iteritems():
setattr(self, name, self._build_exc(name, msg))
def _build_exc(self, name, msg):
return type(
name,
(NailgunException,),
{
"message": msg
}
)
errors = ErrorFactory()
| {
"content_hash": "3510dcc5da9bf18661c451d749bf0ca0",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 91,
"avg_line_length": 37.908256880733944,
"alnum_prop": 0.6870764762826719,
"repo_name": "SmartInfrastructures/fuel-web-dev",
"id": "08b55f95a0234312098de954dabd4005d04b7f70",
"size": "4767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/errors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91131"
},
{
"name": "HTML",
"bytes": "7949"
},
{
"name": "JavaScript",
"bytes": "945307"
},
{
"name": "Mako",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "3961568"
},
{
"name": "Ruby",
"bytes": "14701"
},
{
"name": "Shell",
"bytes": "24392"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('geo', '0008_auto_20181129_1451'),
('geo', '0008_auto_20180918_1037'),
]
operations = [
]
| {
"content_hash": "78a8aea9d5066d1e2918a12e8255cda3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 18.357142857142858,
"alnum_prop": 0.622568093385214,
"repo_name": "onepercentclub/bluebottle",
"id": "c7975f5139c44adb12572f0f08eae199ee2a2be9",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/geo/migrations/0009_merge_20190121_1425.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('basicviz', '0049_auto_20170216_2228'),
]
operations = [
migrations.CreateModel(
name='Decomposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Experiment')),
],
),
migrations.CreateModel(
name='DecompositionFeatureInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intensity', models.FloatField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Document')),
],
),
migrations.CreateModel(
name='DocumentGlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intensity', models.FloatField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Document')),
],
),
migrations.CreateModel(
name='FeatureMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='FeatureSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='GlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('min_mz', models.FloatField()),
('max_mz', models.FloatField()),
('featureset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.FeatureSet')),
],
),
migrations.CreateModel(
name='GlobalMotif',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalmotif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Mass2Motif')),
],
),
migrations.CreateModel(
name='GlobalMotifGlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('probability', models.FloatField()),
('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature')),
('motif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalMotif')),
],
),
migrations.AddField(
model_name='featuremap',
name='globalfeature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
migrations.AddField(
model_name='featuremap',
name='localfeature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Feature'),
),
migrations.AddField(
model_name='documentglobalfeature',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
migrations.AddField(
model_name='decompositionfeatureinstance',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
]
| {
"content_hash": "b537e2d3b2996fd3f9bbc934b4f86017",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 126,
"avg_line_length": 44.3,
"alnum_prop": 0.5801354401805869,
"repo_name": "sdrogers/ms2ldaviz",
"id": "c4b4a193daf997395eb2ecf9e42593876e013da5",
"size": "4500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ms2ldaviz/decomposition/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155389"
},
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "HTML",
"bytes": "281089"
},
{
"name": "JavaScript",
"bytes": "564464"
},
{
"name": "Jupyter Notebook",
"bytes": "22354299"
},
{
"name": "Python",
"bytes": "897444"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.rnn_cells.base_cell import BaseCell
from lib import linalg
#***************************************************************
class RNNCell(BaseCell):
""""""
#=============================================================
def __init__(self, *args, **kwargs):
""""""
super(RNNCell, self).__init__(*args, **kwargs)
#=============================================================
def __call__(self, inputs, state, scope=None):
""""""
if self.recur_diag_bilin:
inputs1, inputs2 = tf.split(1, 2, inputs)
inputs = tf.concat(1, [inputs1*inputs2, inputs1, inputs2])
with tf.variable_scope(scope or type(self).__name__):
hidden_act = linalg.linear([inputs, state],
self.output_size,
add_bias=False,
moving_params=self.moving_params)
hidden = self.recur_func(hidden_act)
return hidden, hidden
#=============================================================
@property
def state_size(self):
return self.output_size
| {
"content_hash": "ec2a88e85c6939a6c23ea6116324b98a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 66,
"avg_line_length": 31.58974358974359,
"alnum_prop": 0.4602272727272727,
"repo_name": "tdozat/Parser",
"id": "d44584022f8a3adccb9f1ac7d841d4e7f4c37728",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rnn_cells/rnn_cell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "53523"
},
{
"name": "Python",
"bytes": "179632"
}
],
"symlink_target": ""
} |
""" Unit Testing for Day 1
Markus Foote
2017
Call me like this (runs all tests):
$ python -m unittest discover tests
from the /2017 directory
or (only this file's tests):
$ python -m unittest test1
from the /tests directory with appropriate PYTHONPATH for the day1 file
"""
import unittest
from y2017.day1 import *
class TestDay1(unittest.TestCase):
def test_part_A(self):
self.assertEqual(inverse_captcha('1122'), 3)
self.assertEqual(inverse_captcha('1111'), 4)
self.assertEqual(inverse_captcha('1234'), 0)
self.assertEqual(inverse_captcha('91212129'), 9)
def test_part_B(self):
self.assertEqual(inverse_captcha_half('1212'), 6)
self.assertEqual(inverse_captcha_half('1221'), 0)
self.assertEqual(inverse_captcha_half('123425'), 4)
self.assertEqual(inverse_captcha_half('123123'), 12)
self.assertEqual(inverse_captcha_half('12131415'), 4)
| {
"content_hash": "71ac3e3e740f7e3505c1ecbad50dbcd5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 34.392857142857146,
"alnum_prop": 0.6645898234683282,
"repo_name": "martakus/advent-of-code",
"id": "48a1f53d566a366724bdea1095c6f01158d0cdee",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "y2017/tests/test1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1076"
},
{
"name": "Python",
"bytes": "15886"
}
],
"symlink_target": ""
} |
def test():
abs(False)
int(10)
long(False)
float(False)
complex(False)
divmod(False, False)
divmod(<warning descr="Expected type '_N2', got 'str' instead">'foo'</warning>, <warning descr="Expected type '_N2', got 'unicode' instead">u'bar'</warning>)
pow(False, True)
round<warning descr="Unexpected type(s):(bool, str)Possible types:(SupportsFloat, int)(float, int)">(False, 'foo')</warning>
| {
"content_hash": "4663ade89898232b0abb1bdb16c6caa0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 161,
"avg_line_length": 42.8,
"alnum_prop": 0.6518691588785047,
"repo_name": "leafclick/intellij-community",
"id": "a6bea4418bf88624a68b4300db8107a8e1043527",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/testData/inspections/PyTypeCheckerInspection/BuiltinNumeric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
#setup(name="fastloop",
# ext_modules=cythonize('fastloop.pyx'),
#)
ext_modules=[
Extension("fastloop",
["fastloop.pyx"],
libraries=["m"],
extra_compile_args = ["-O3", "-ffast-math", "-march=native"],
)
]
setup(
name = "fastloop",
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules
)
ext_modules=[
Extension("thread_demo",
["thread_demo.pyx"],
libraries=["m"],
extra_compile_args = ["-O3", "-ffast-math", "-march=native", "-fopenmp" ],
extra_link_args=['-fopenmp']
)
]
setup(
name = "thread_demo",
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules
)
| {
"content_hash": "ccb8b1c2a43285a513a0534b7a3ba0eb",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 88,
"avg_line_length": 23.236842105263158,
"alnum_prop": 0.5685164212910532,
"repo_name": "nealbob/nealbob.github.io",
"id": "551e66d8e155f5424828ecc0e224cb3f98542c73",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "117748"
},
{
"name": "CSS",
"bytes": "19682"
},
{
"name": "Cython",
"bytes": "4243"
},
{
"name": "HTML",
"bytes": "501193"
},
{
"name": "JavaScript",
"bytes": "140793"
},
{
"name": "Jupyter Notebook",
"bytes": "40404275"
},
{
"name": "Python",
"bytes": "16315"
},
{
"name": "Ruby",
"bytes": "48"
}
],
"symlink_target": ""
} |
"""
Bot launcher.
This class is responsible for bootstrapping the bot.
"""
import os
import shutil
import sys
from ruamel import yaml
from joku.core.bot import Jokusoramame
def main():
# Read in the config file.
try:
config = sys.argv[1]
except IndexError:
config = "config.yml"
if not os.path.exists(config):
shutil.copy("config.example.yml", config)
bot = Jokusoramame(config_file=config)
bot.logger.info("Launching Jokusoramame in autosharded mode...")
try:
bot.run()
except (KeyboardInterrupt, EOFError):
pass
# fuck off forever
bot.loop.set_exception_handler(lambda *args, **kwargs: None)
if __name__ == "__main__":
main()
| {
"content_hash": "7d4103607453932cbcc3f706e11ec244",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.6472222222222223,
"repo_name": "MJB47/Jokusoramame",
"id": "c731623d2ae9b437c55ac12c9f46f07de2e6172a",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4220"
},
{
"name": "Lua",
"bytes": "7014"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "305451"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
} |
"""PyGlove's built-in early stopping policies."""
# pylint: disable=g-bad-import-order
from pyglove.ext.early_stopping.base import EarlyStopingPolicyBase
from pyglove.ext.early_stopping.base import And
from pyglove.ext.early_stopping.base import Or
from pyglove.ext.early_stopping.base import Not
from pyglove.ext.early_stopping.step_wise import early_stop_by_rank
from pyglove.ext.early_stopping.step_wise import early_stop_by_value
from pyglove.ext.early_stopping.step_wise import StepWise
# pylint: enable=g-bad-import-order
| {
"content_hash": "d7fbbc46fc3a29e27314ea843ac60bef",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 38,
"alnum_prop": 0.8101503759398496,
"repo_name": "google/pyglove",
"id": "386e76793419270a2bc70ad0ca9c8ca141f609ab",
"size": "1116",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyglove/ext/early_stopping/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1796188"
}
],
"symlink_target": ""
} |
"""
For when multiple database are used to store information. Generally this works well for storage but getting from
both databases is generally discouraged.
"""
from .datastore import DataStore, DataStoreException
class MultiStore(DataStore):
"""
MultiStore performs the same command on all passed DataStore Objects. Then asserts that the results are the same.
After passing this check the first result is returned.
"""
def __init__(self, dbs):
super(MultiStore, self).__init__()
self.dbs = dbs
if len(self.dbs) <= 1:
raise DataStoreException("The MultiStore is designed to be used with multiple databases. "
"Expected > 1 dbs, got {}".format(len(self.dbs)))
@staticmethod
def _all_results_equal(results):
for result1 in results:
for result2 in results:
if result1 != result2:
raise DataStoreException("Returned results from multiple databases were different. "
"Got {} and {}.".format(result1, result2))
def _call_function(self, function_name, args):
results = list()
for db in self.dbs:
func = getattr(db, function_name)
results.append(func(*args))
self._all_results_equal(results)
return results
def get_device(self, device_name):
"""
See @DataStore description
"""
super(MultiStore, self).get_device(device_name)
results = self._call_function("get_device", [device_name])
return results[0]
def list_devices(self, filters=None):
"""
See @DataStore description
"""
super(MultiStore, self).list_devices(filters)
results = self._call_function("list_devices", [filters])
return results[0]
def set_device(self, device_info):
"""
See @DataStore description
"""
super(MultiStore, self).set_device(device_info)
results = self._call_function("set_device", [device_info])
return results[0]
def delete_device(self, device_name):
"""
See @DataStore description
"""
super(MultiStore, self).delete_device(device_name)
results = self._call_function("delete_device", [device_name])
return results[0]
def get_device_history(self, device_name=None):
"""
See @DataStore description
"""
super(MultiStore, self).get_device_history(device_name)
results = self._call_function("get_device_history", [device_name])
return results[0]
def get_profile(self, profile_name=None):
"""
See @DataStore description
"""
super(MultiStore, self).get_profile(profile_name)
results = self._call_function("get_profile", [profile_name])
return results[0]
def list_profiles(self, filters=None):
"""
See @DataStore description
"""
super(MultiStore, self).list_profiles(filters)
results = self._call_function("list_profiles", [filters])
return results[0]
def set_profile(self, profile_info):
"""
See @DataStore description
"""
super(MultiStore, self).set_profile(profile_info)
results = self._call_function("set_profile", [profile_info])
return results[0]
def delete_profile(self, profile_name):
"""
See @DataStore description
"""
super(MultiStore, self).delete_profile(profile_name)
results = self._call_function("delete_profile", [profile_name])
return results[0]
def list_logs(self, device_name=None, limit=100):
"""
See @DataStore description
"""
super(MultiStore, self).list_logs(device_name, limit)
results = self._call_function("list_logs", [device_name, limit])
return results[0]
def list_logs_between_timeslice(self, begin, end, device_name=None, limit=100):
"""
See @DataStore description
"""
super(MultiStore, self).list_logs_between_timeslice(begin, end, device_name, limit)
results = self._call_function("list_logs_between_timeslice", [begin, end, device_name, limit])
return results[0]
def add_log(self, level, msg, device_name=None, process=None):
"""
See @DataStore description
"""
super(MultiStore, self).add_log(level, process, msg, device_name)
results = self._call_function("add_log", [level, msg, device_name, process])
return results[0]
def get_configuration_value(self, key=None):
"""
See @DataStore description
"""
super(MultiStore, self).get_configuration_value(key)
results = self._call_function("get_configuration_value", [key])
return results[0]
def list_configuration(self):
"""
See @DataStore description
"""
super(MultiStore, self).list_configuration()
results = self._call_function("list_configuration", [])
return results[0]
def set_configuration(self, key, value):
"""
See @DataStore description
"""
super(MultiStore, self).set_configuration(key, value)
results = self._call_function("set_configuration", [key, value])
return results[0]
def delete_configuration(self, key):
"""
See @DataStore description
"""
super(MultiStore, self).delete_configuration(key)
results = self._call_function("delete_configuration", [key])
return results[0]
def list_groups(self):
"""
See @DataStore description
"""
super(MultiStore, self).list_groups()
results = self._call_function("list_groups", [])
return results[0]
def get_group_devices(self, group):
"""
See @DataStore description
"""
super(MultiStore, self).get_group_devices(group)
results = self._call_function("get_group_devices", [group])
return results[0]
def add_to_group(self, device_list, group):
"""
See @DataStore description
"""
super(MultiStore, self).add_to_group(device_list, group)
results = self._call_function("add_to_group", [device_list, group])
return results[0]
def remove_from_group(self, device_list, group):
"""
See @DataStore description
"""
super(MultiStore, self).remove_from_group(device_list, group)
results = self._call_function("remove_from_group", [device_list, group])
return results[0]
# UTIL FUNCTIONS
def get_device_types(self):
"""
See @DataStore description
"""
super(MultiStore, self).get_device_types()
results = self._call_function("get_device_types", [])
return results[0]
def get_log_levels(self):
"""
See @DataStore description
"""
super(MultiStore, self).get_log_levels()
results = self._call_function("get_log_levels", [])
return results[0]
# CANNED QUERIES
def get_node(self, device_name=None):
"""
See @DataStore description
"""
super(MultiStore, self).get_node()
results = self._call_function("get_node", [device_name])
return results[0]
def get_bmc(self, device_name=None):
"""
See @DataStore description
"""
super(MultiStore, self).get_bmc()
results = self._call_function("get_bmc", [device_name])
return results[0]
def get_pdu(self, device_name=None):
"""
See @DataStore description
"""
super(MultiStore, self).get_pdu()
results = self._call_function("get_pdu", [device_name])
return results[0]
def get_profile_devices(self, profile_name):
"""
See @DataStore description
"""
super(MultiStore, self).get_profile_devices(profile_name)
results = self._call_function("get_profile_devices", [profile_name])
return results[0]
def export_to_file(self, file_location):
"""
See @DataStore description
"""
super(MultiStore, self).export_to_file(file_location)
results = self._call_function("export_to_file", [file_location])
return results[0]
def import_from_file(self, file_location):
"""
See @DataStore description
"""
super(MultiStore, self).import_from_file(file_location)
results = self._call_function("import_from_file", [file_location])
return results[0]
| {
"content_hash": "7b8f45517768a40ea6f61a1e5b14ec2a",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 117,
"avg_line_length": 33.19083969465649,
"alnum_prop": 0.5911913523459061,
"repo_name": "intel-ctrlsys/actsys",
"id": "f63b1c4d83ad3683b26f7de6f05f621f5d37fb0d",
"size": "8757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datastore/datastore/multistore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "11641"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1048209"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/armor/shared_arm_corellian_supreme_durasteel.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","arm_corellian_supreme_durasteel_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "cf500ac95fdf77b42ed22161f8b1e1ce",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 101,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.7257617728531855,
"repo_name": "obi-two/Rebelion",
"id": "5c9170a3d5fe004bcd403e140f25f82a5d7a2baf",
"size": "506",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/armor/shared_arm_corellian_supreme_durasteel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Copied+modified from rest_framework.negotiation, which is licensed under the BSD license:
*******************************************************************************
Copyright (c) 2011-2016, Tom Christie
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
Content negotiation deals with selecting an appropriate renderer given the
incoming request. Typically this will be based on the request's Accept header.
"""
from __future__ import unicode_literals
from django.http import Http404
from api import exceptions
from api.settings import HTTP_HEADER_ENCODING
from api.utils.mediatypes import MediaType, media_type_matches, order_by_precedence
class DefaultContentNegotiation(object):
# noinspection PyMethodMayBeStatic
def select_parser(self, request, parsers):
"""
Given a list of parsers and a media type, return the appropriate
parser to handle the incoming request.
"""
for parser in parsers:
if media_type_matches(parser.media_type, request.content_type):
return parser
return None
def select_renderer(self, request, renderers, format_suffix=None):
"""
Given a request and a list of renderers, return a two-tuple of:
(renderer, media type).
"""
if format_suffix:
renderers = self.filter_renderers(renderers, format_suffix)
accepts = self.get_accept_list(request)
# Check the acceptable media types against each renderer,
# attempting more specific media types first
# NB. The inner loop here isn't as bad as it first looks :)
# Worst case is we're looping over len(accept_list) * len(self.renderers)
for media_type_set in order_by_precedence(accepts):
for renderer in renderers:
for media_type in media_type_set:
if media_type_matches(renderer.media_type, media_type):
# Return the most specific media type as accepted.
media_type_wrapper = MediaType(media_type)
if MediaType(renderer.media_type).precedence > media_type_wrapper.precedence:
# Eg client requests '*/*'
# Accepted media type is 'application/json'
full_media_type = ';'.join(
(renderer.media_type,) +
tuple('{0}={1}'.format(
key, value.decode(HTTP_HEADER_ENCODING))
for key, value in media_type_wrapper.params.items()))
return renderer, full_media_type
else:
# Eg client requests 'application/json; indent=8'
# Accepted media type is 'application/json; indent=8'
return renderer, media_type
raise exceptions.NotAcceptable(available_renderers=renderers)
# noinspection PyMethodMayBeStatic
def filter_renderers(self, renderers, format_suffix):
"""
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
"""
renderers = [renderer for renderer in renderers if renderer.format == format_suffix]
if not renderers:
raise Http404
return renderers
# noinspection PyMethodMayBeStatic
def get_accept_list(self, request):
"""
Given the incoming request, return a tokenised list of media type strings.
"""
header = request.META.get('HTTP_ACCEPT', '*/*')
return [token.strip() for token in header.split(',')]
| {
"content_hash": "f7873c96d2e789b8b71b864065e92c68",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 101,
"avg_line_length": 45.85454545454545,
"alnum_prop": 0.6379857256145915,
"repo_name": "erigones/esdc-ce",
"id": "0b1aef2dfd956947f9e50579a13d2104a600bcc9",
"size": "5044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/negotiation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
} |
import unittest
import numpy
from yann.modules.visualizer import save_images
from yann.modules.visualizer import visualizer as v
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock,patch
class TestVisualizer(unittest.TestCase):
def setUp(self):
self.input_ndarray = numpy.zeros((1,10,10,1))
self.input_ndarray_multichannel_3 = numpy.zeros((3,10,10,1))
self.input_ndarray_multichannel_2 = numpy.zeros((2, 10, 10, 3))
self.verbose = 3
self.prefix = "."
self.input_ndarray_dim_2 = numpy.zeros((1, 1))
self.input_ndarray_dim_2_case_else_case = numpy.zeros((3, 10))
self.visualizer_init_args = {
'id':'test',
'root':'test',
'frequency':'test',
'sample_size':'test',
'rgb_filters':'test',
'debug_functions':'test',
'debug_layers':'test',
'save_confusion': True
}
self.wrong_path = 'C:/wrong/path'
self.input_batch_size = 1
self.indices_size = (self.input_batch_size,)
@patch('yann.modules.visualizer.imsave')
def test1_save_images_channels_1(self,mock_imsave):
try:
mock_imsave.return_value = ""
save_images(imgs= self.input_ndarray, prefix = self.prefix, is_color= True, verbose = self.verbose)
self.assertEqual(True, True)
except Exception, c:
self.assertEqual(True, False)
@patch('yann.modules.visualizer.imsave')
def test2_save_images_channels_3(self,mock_imsave):
try:
mock_imsave.return_value = ""
save_images(imgs= self.input_ndarray_multichannel_3, prefix = self.prefix, is_color= True, verbose = self.verbose)
self.assertEqual(True, True)
except Exception, c:
self.assertEqual(True, False)
@patch('yann.modules.visualizer.imsave')
def test3_save_images_channels_2(self,mock_imsave):
try:
mock_imsave.return_value = ""
save_images(imgs= self.input_ndarray_multichannel_2, prefix = self.prefix, is_color= True, verbose = self.verbose)
self.assertEqual(True, True)
except Exception, c:
self.assertEqual(True, False)
@patch('yann.modules.visualizer.imsave')
def test4_save_images_channels_dim_2(self,mock_imsave):
try:
mock_imsave.return_value = ""
save_images(imgs= self.input_ndarray_dim_2, prefix = self.prefix, is_color= True, verbose = self.verbose)
self.assertEqual(True, True)
except Exception, c:
self.assertEqual(True, False)
@patch('yann.modules.visualizer.imsave')
def test5_save_images_channels_dim_2_else_case(self,mock_imsave):
try:
mock_imsave.return_value = ""
save_images(imgs= self.input_ndarray_dim_2_case_else_case, prefix = self.prefix, is_color= True, verbose = self.verbose)
self.assertEqual(True, True)
except Exception, c:
self.assertEqual(True, False)
@patch('os.getcwd')
@patch('os.makedirs')
def test6_visualizer_no_vals(self,mock_makedir,mock_getcwd):
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
try:
v(
visualizer_init_args ={},
verbose = self.verbose)
self.assertEqual(True,True)
except Exception:
self.assertEqual(True,False)
@patch('os.getcwd')
@patch('os.makedirs')
def test7_visualizer_vals(self,mock_makedir,mock_getcwd):
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
try:
v(
visualizer_init_args =self.visualizer_init_args,
verbose = self.verbose)
self.assertEqual(True,True)
except Exception:
self.assertEqual(True,False)
@patch('os.getcwd')
@patch('os.makedirs')
def test8_visualizer_initialize(self,mock_makedir,mock_getcwd):
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
self.v = v(
visualizer_init_args = {},
verbose = self.verbose)
self.assertEqual(True,True)
self.v.sample_size = self.input_batch_size
self.v.initialize(batch_size=3, verbose= self.verbose)
self.assertEqual(self.v.indices.shape,self.indices_size)
@patch('yann.modules.visualizer.static_theano_print')
@patch('yann.modules.visualizer.static_printer_import')
@patch('yann.modules.visualizer.dynamic_printer_import')
@patch('os.getcwd')
@patch('os.makedirs')
def test9_theano_function_visualizer_import_true(self,mock_makedir,mock_getcwd,mock_dynamic,mock_static,mock_theano_print):
mock_dynamic.return_value = True
mock_static.return_value = True
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
mock_theano_print.return_value = ""
self.v = v(
visualizer_init_args = {},
verbose = self.verbose)
self.assertEqual(True,True)
self.v.sample_size = self.input_batch_size
self.v.initialize(batch_size=3, verbose= self.verbose)
self.v.theano_function_visualizer(function= "",
short_variable_names = False,
format ='pdf',
verbose = self.verbose)
self.assertEqual(self.v.indices.shape,self.indices_size)
@patch('theano.printing.pydot_imported')
@patch('yann.modules.visualizer.static_theano_print')
@patch('yann.modules.visualizer.static_printer_import')
@patch('yann.modules.visualizer.dynamic_printer_import')
@patch('os.getcwd')
@patch('os.makedirs')
def test10_theano_function_visualizer_exp_static(self,mock_makedir,mock_getcwd,mock_dynamic,mock_static,mock_theano_print,mock_pydot):
mock_dynamic.return_value = False
mock_static.return_value = True
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
mock_theano_print.side_effect = OSError('abc')
mock_pydot.return_value = False
try:
self.v = v(
visualizer_init_args = {},
verbose = self.verbose)
self.v.sample_size = self.input_batch_size
self.v.initialize(batch_size=3, verbose= self.verbose)
self.v.theano_function_visualizer(function="",
short_variable_names="abs",
format='pdf1',
verbose=self.verbose)
self.assertEqual(True,False)
except Exception:
self.assertEqual(True,True)
@patch('theano.printing.pydot_imported')
@patch('yann.modules.visualizer.static_theano_print')
@patch('yann.modules.visualizer.static_printer_import')
@patch('yann.modules.visualizer.dynamic_printer_import')
@patch('os.getcwd')
@patch('os.makedirs')
def test11_theano_function_visualizer_exp_dynamic(self,mock_makedir,mock_getcwd,mock_dynamic,mock_static,mock_theano_print,mock_pydot):
mock_dynamic.return_value = True
mock_static.return_value = False
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
mock_theano_print.side_effect = OSError('abc')
mock_pydot.return_value = False
try:
self.v = v(
visualizer_init_args = {},
verbose = self.verbose)
self.v.sample_size = self.input_batch_size
self.v.initialize(batch_size=3, verbose= self.verbose)
self.v.theano_function_visualizer(function="",
short_variable_names="abs",
format='pdf1',
verbose=self.verbose)
self.assertEqual(True,False)
except Exception:
self.assertEqual(True,True)
@patch('yann.modules.visualizer.save_images')
@patch('yann.modules.visualizer.static_theano_print')
@patch('yann.modules.visualizer.static_printer_import')
@patch('yann.modules.visualizer.dynamic_printer_import')
@patch('os.getcwd')
@patch('os.makedirs')
def test12_theano_function_visualizer_visualize_images(self,mock_makedir,mock_getcwd,mock_dynamic,mock_static,mock_theano_print,mock_save_images):
mock_dynamic.return_value = True
mock_static.return_value = True
mock_getcwd.return_value = self.wrong_path
mock_makedir.return_value = ""
mock_theano_print.return_value = ""
mock_save_images.return_value = ""
try:
self.v = v(
visualizer_init_args = {},
verbose = self.verbose)
self.assertEqual(True,True)
self.v.sample_size = self.input_batch_size
self.v.initialize(batch_size=3, verbose= self.verbose)
self.v.visualize_images(imgs= self.input_ndarray, loc = None, verbose = self.verbose)
self.assertEqual(True,True)
except Exception:
self.assertEqual(True,False)
# @patch('yann.modules.visualizer.save_images')
# @patch('yann.modules.visualizer.static_theano_print')
# @patch('yann.modules.visualizer.static_printer_import')
# @patch('yann.modules.visualizer.dynamic_printer_import')
# @patch('os.getcwd')
# @patch('os.makedirs')
# def test13_theano_function_visualizer_visualize_activities(self,mock_makedir,mock_getcwd,mock_dynamic,mock_static,mock_theano_print,mock_save_images):
# mock_dynamic.return_value = True
# mock_static.return_value = True
# mock_getcwd.return_value = self.wrong_path
# mock_makedir.return_value = ""
# mock_theano_print.return_value = ""
# mock_save_images.return_value = ""
# # try:
# self.v = v(
# visualizer_init_args = {},
# verbose = self.verbose)
# self.assertEqual(True,True)
# self.v.sample_size = self.input_batch_size
# self.v.initialize(batch_size=3, verbose= self.verbose)
# self.layer_activities ={ 'a': ['xyz1', 'xyz2'],
# 'b': ['xyz3', 'xyz4'],
# 'c': ['xyz5'],
# 'd': ['xyz6']}
# self.v.visualize_activities(layer_activities=self.layer_activities, epoch =0, index = 0, verbose = self.verbose)
# self.assertEqual(True,True)
# except Exception:
# self.assertEqual(True,False)
| {
"content_hash": "2e294e1d12afa3673c2c37a438fd616f",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 156,
"avg_line_length": 44.164,
"alnum_prop": 0.5826464994112852,
"repo_name": "ragavvenkatesan/Convolutional-Neural-Networks",
"id": "48fa2bc8890165c06dc1a95251d0441caaa84057",
"size": "11041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modules/test_visualizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "2002"
},
{
"name": "Python",
"bytes": "171871"
}
],
"symlink_target": ""
} |
from euca2ools.commands.argtypes import delimited_list
from euca2ools.commands.elasticloadbalancing import ELBRequest
from requestbuilder import Arg
from requestbuilder.mixins import TabifyingMixin
def instance_id(inst_as_str):
return {'InstanceId': inst_as_str}
class DeregisterInstancesFromLoadBalancer(ELBRequest, TabifyingMixin):
DESCRIPTION = 'Remove one or more instances from a load balancer'
ARGS = [Arg('LoadBalancerName', metavar='ELB',
help='name of the load balancer to modify (required)'),
Arg('--instances', dest='Instances.member', required=True,
metavar='INSTANCE1,INSTANCE2,...',
type=delimited_list(',', item_type=instance_id),
help='''IDs of the instances to remove from the load balancer
(required)''')]
LIST_TAGS = ['Instances']
def print_result(self, result):
for instance in result.get('Instances', []):
print self.tabify(('INSTANCE', instance.get('InstanceId')))
| {
"content_hash": "59ca6e86b96f72bfe4e072a0216a28f0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.671875,
"repo_name": "vasiliykochergin/euca2ools",
"id": "48d0e5be00e413ebf10099a60ad84f3a48447129",
"size": "2366",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "euca2ools/commands/elasticloadbalancing/deregisterinstancesfromloadbalancer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1220919"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
} |
from scipy import *
from ephem import *
from astropy import *
from astropy import units
# Vasaant Krishnan
def wave(freq): # Frequency to Wavelength
wavelength = c/freq
return wavelength
def freq(wave): # Wavelength to Frequency
frequency = c/wave
return frequency
# Adaptation of J. Mac's MATlAB weighted mean function
def wmean(x,W,rms=False):
wmean = sum(multiply(x,W))/sum(W) # element-by-element multiplication
if len(x) == 1:
wrms = 0
else:
x = [(x - wmean)**2 for x in x]
wrms = sqrt(sum(multiply(x,W))/sum(W))
if rms:
return wmean,wrms
else:
return wmean
# Variance Weighted Mean. Wednesday, 03 August 2016
def varmean(x,W,rms=False):
if len(x) == 1:
wrms = 0
else:
W = [1/(i**2) for i in W]
varmean = sum(multiply(x,W))/sum(W)
err = sqrt(1/sum(W))
if rms:
return varmean,err
else:
return varmean
# mas/yr to km/s, Sunday, 11 September 2016
def masyr2kms(asyr,parsecDistance):
# Constants conversions:
parsec2km = units.pc.to(units.km) * parsecDistance
yr2sec = 365.2425 * 24 * 60 * 60
as2rad = radians(1./3600.)
# Variable converstions:
theta = asyr * as2rad # arcsec/yr --> radians/yr
theta = theta/yr2sec # radians/yr --> radians/sec
kms = parsec2km*theta
return kms
| {
"content_hash": "c5b50b5368b21b6820bfe0acb306333f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 24.389830508474578,
"alnum_prop": 0.5865184155663655,
"repo_name": "ekadhanda/bin",
"id": "5309a8b717a7a6da3051b34a06035c10f31244e5",
"size": "1439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "256802"
},
{
"name": "Fortran",
"bytes": "2243850"
},
{
"name": "Gnuplot",
"bytes": "568"
},
{
"name": "Limbo",
"bytes": "7427"
},
{
"name": "M",
"bytes": "400712"
},
{
"name": "Makefile",
"bytes": "452"
},
{
"name": "Matlab",
"bytes": "1899618"
},
{
"name": "Objective-C",
"bytes": "1705"
},
{
"name": "Perl",
"bytes": "209638"
},
{
"name": "Python",
"bytes": "350904"
},
{
"name": "Shell",
"bytes": "321"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import responses
from exam import fixture
from sentry.utils.compat.mock import Mock
from requests.exceptions import SSLError
import sentry.identity
from sentry.identity.oauth2 import OAuth2CallbackView
from sentry.identity.pipeline import IdentityProviderPipeline
from sentry.identity.providers.dummy import DummyProvider
from sentry.testutils import TestCase
class OAuth2CallbackViewTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user("foo@example.com")
sentry.identity.register(DummyProvider)
super(OAuth2CallbackViewTest, self).setUp()
def tearDown(self):
super(OAuth2CallbackViewTest, self).tearDown()
sentry.identity.unregister(DummyProvider)
@fixture
def view(self):
return OAuth2CallbackView(
access_token_url="https://example.org/oauth/token",
client_id=123456,
client_secret="secret-value",
)
@responses.activate
def test_exchange_token_success(self):
responses.add(
responses.POST, "https://example.org/oauth/token", json={"token": "a-fake-token"}
)
pipeline = IdentityProviderPipeline(request=Mock(), provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(None, pipeline, code)
assert "token" in result
assert "a-fake-token" == result["token"]
@responses.activate
def test_exchange_token_ssl_error(self):
def ssl_error(request):
raise SSLError("Could not build connection")
responses.add_callback(
responses.POST, "https://example.org/oauth/token", callback=ssl_error
)
pipeline = IdentityProviderPipeline(request=Mock(), provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(None, pipeline, code)
assert "token" not in result
assert "error" in result
assert "error_description" in result
assert "SSL" in result["error_description"]
@responses.activate
def test_exchange_token_no_json(self):
responses.add(responses.POST, "https://example.org/oauth/token", body="")
pipeline = IdentityProviderPipeline(request=Mock(), provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(None, pipeline, code)
assert "token" not in result
assert "error" in result
assert "error_description" in result
assert "JSON" in result["error_description"]
| {
"content_hash": "21a07a4285df17d8a41b325d633bb8fd",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 93,
"avg_line_length": 37.01428571428571,
"alnum_prop": 0.6719413353917406,
"repo_name": "beeftornado/sentry",
"id": "f94fb777ddbbcd5f92dfdf7e84d6edf5680965e9",
"size": "2591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/identity/test_oauth2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
"""Test the base functions of the media player."""
import base64
from homeassistant.components import media_player
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_get_image(hass, hass_ws_client, caplog):
"""Test get image via WS command."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
client = await hass_ws_client(hass)
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_media_image",
return_value=(b"image", "image/jpeg"),
):
await client.send_json(
{
"id": 5,
"type": "media_player_thumbnail",
"entity_id": "media_player.bedroom",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["content_type"] == "image/jpeg"
assert msg["result"]["content"] == base64.b64encode(b"image").decode("utf-8")
assert "media_player_thumbnail is deprecated" in caplog.text
async def test_get_image_http(hass, aiohttp_client):
"""Test get image via http command."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.bedroom")
assert "entity_picture_local" not in state.attributes
client = await aiohttp_client(hass.http.app)
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_media_image",
return_value=(b"image", "image/jpeg"),
):
resp = await client.get(state.attributes["entity_picture"])
content = await resp.read()
assert content == b"image"
async def test_get_image_http_remote(hass, aiohttp_client):
"""Test get image url via http command."""
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"media_image_remotely_accessible",
return_value=True,
):
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.bedroom")
assert "entity_picture_local" in state.attributes
client = await aiohttp_client(hass.http.app)
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_media_image",
return_value=(b"image", "image/jpeg"),
):
resp = await client.get(state.attributes["entity_picture_local"])
content = await resp.read()
assert content == b"image"
async def test_get_async_get_browse_image(hass, aiohttp_client, hass_ws_client):
"""Test get browse image."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_comp = hass.data.get("entity_components", {}).get("media_player")
assert entity_comp
player = entity_comp.get_entity("media_player.bedroom")
assert player
client = await aiohttp_client(hass.http.app)
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_browse_image",
return_value=(b"image", "image/jpeg"),
):
url = player.get_browse_image_url("album", "abcd")
resp = await client.get(url)
content = await resp.read()
assert content == b"image"
def test_deprecated_base_class(caplog):
"""Test deprecated base class."""
class CustomMediaPlayer(media_player.MediaPlayerDevice):
pass
CustomMediaPlayer()
assert "MediaPlayerDevice is deprecated, modify CustomMediaPlayer" in caplog.text
async def test_media_browse(hass, hass_ws_client):
"""Test browsing media."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
client = await hass_ws_client(hass)
with patch(
"homeassistant.components.demo.media_player.YOUTUBE_PLAYER_SUPPORT",
media_player.SUPPORT_BROWSE_MEDIA,
), patch(
"homeassistant.components.media_player.MediaPlayerEntity." "async_browse_media",
return_value={"bla": "yo"},
) as mock_browse_media:
await client.send_json(
{
"id": 5,
"type": "media_player/browse_media",
"entity_id": "media_player.bedroom",
"media_content_type": "album",
"media_content_id": "abcd",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"] == {"bla": "yo"}
assert mock_browse_media.mock_calls[0][1] == ("album", "abcd")
with patch(
"homeassistant.components.demo.media_player.YOUTUBE_PLAYER_SUPPORT",
media_player.SUPPORT_BROWSE_MEDIA,
), patch(
"homeassistant.components.media_player.MediaPlayerEntity." "async_browse_media",
return_value={"bla": "yo"},
):
await client.send_json(
{
"id": 6,
"type": "media_player/browse_media",
"entity_id": "media_player.bedroom",
}
)
msg = await client.receive_json()
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"] == {"bla": "yo"}
| {
"content_hash": "bb241f0fc8eee89e1ad1cd1264838aa1",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 88,
"avg_line_length": 31.155913978494624,
"alnum_prop": 0.6098360655737705,
"repo_name": "tboyce021/home-assistant",
"id": "9434fb1a411e4e7c4a2083cad8b2e07e850ac685",
"size": "5795",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/media_player/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "28861968"
},
{
"name": "Shell",
"bytes": "4815"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
setup_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(setup_dir, 'README.rst')) as readme_file:
readme = readme_file.read()
with open(os.path.join(setup_dir, 'HISTORY.rst')) as history_file:
history = history_file.read()
setup(
name='pyope',
version='0.2.2',
description='Implementation of symmetric order-preserving encryption scheme',
long_description=readme + '\n\n' + history,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Security :: Cryptography',
],
url='https://github.com/tonyo/pyope/',
author='Anton Ovchinnikov',
author_email='anton.ovchi2nikov@gmail.com',
license='MIT',
packages=['pyope'],
install_requires=[
'cryptography>=1.1',
'six>=1.5.0',
],
)
| {
"content_hash": "72a80ac9f84e17458d7adef11ace2a03",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 81,
"avg_line_length": 28.516129032258064,
"alnum_prop": 0.6357466063348416,
"repo_name": "rev112/pyope",
"id": "6dc8f2b80068b6f5d81f4d98b4f0f3a3135ed239",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2316"
},
{
"name": "Python",
"bytes": "21564"
}
],
"symlink_target": ""
} |
"""annotate fusion outputs from STAR and Tophat
Supported:
oncofuse: http://www.unav.es/genetica/oncofuse.html
github: https://github.com/mikessh/oncofuse
"""
from __future__ import print_function
import os
import pysam
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
# ## oncofuse fusion trancript detection
def run(data):
#cmd line: java -Xmx1G -jar Oncofuse.jar input_file input_type tissue_type output_file
config = data["config"]
genome_build = data.get("genome_build", "")
input_type, input_dir, input_file = _get_input_para(data)
if genome_build == "GRCh37": # assume genome_build is hg19 otherwise
if config["algorithm"].get("aligner") in ["star"]:
input_file = _fix_star_junction_output(input_file)
if config["algorithm"].get("aligner") in ["tophat", "tophat2"]:
input_file = _fix_tophat_junction_output(input_file)
elif "hg19" not in genome_build:
return None
#handle cases when fusion file doesn't exist
if not file_exists(input_file):
return None
out_file = os.path.join(input_dir, "oncofuse_out.txt")
if file_exists(out_file):
return out_file
oncofuse = config_utils.get_program("oncofuse", config)
tissue_type = _oncofuse_tissue_arg_from_config(data)
resources = config_utils.get_resources("oncofuse", config)
if not file_exists(out_file):
cl = ["java"]
cl += resources.get("jvm_opts", ["-Xms750m", "-Xmx5g"])
with file_transaction(data, out_file) as tx_out_file:
cl += ["-jar", oncofuse, input_file, input_type, tissue_type, tx_out_file]
cmd = " ".join(cl)
try:
do.run(cmd, "oncofuse fusion detection", data)
except:
do.run("touch %s && echo '# failed' >> %s" % (tx_out_file, tx_out_file), "oncofuse failed", data)
#return out_file
return out_file
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
def _get_input_para(data):
TOPHAT_FUSION_OUTFILE = "fusions.out"
STAR_FUSION_OUTFILE = "Chimeric.out.junction"
config = data["config"]
is_disambiguate = len(config["algorithm"].get("disambiguate", [])) > 0
aligner = config["algorithm"].get("aligner")
if aligner == "tophat2":
aligner = "tophat"
names = data["rgnames"]
# set some default hard filters:
N = 2 # min. spanning reads
M = 4 # min. supporting reads (spanning + encompassing)
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"])
align_dir_parts = os.path.join(align_dir_parts, data["genome_build"]) if is_disambiguate else align_dir_parts
if aligner in ["tophat", "tophat2"]:
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"], names["lane"]+"_%s" % aligner)
return "tophat-%d-%d" % (N,M), align_dir_parts, os.path.join(align_dir_parts, TOPHAT_FUSION_OUTFILE)
if aligner in ["star"]:
star_junction_file = os.path.join(align_dir_parts, names["lane"]+STAR_FUSION_OUTFILE)
if is_disambiguate:
contamination_bam = data["disambiguate"][ config["algorithm"]["disambiguate"][0] ]
disambig_out_file = star_junction_file + "_disambiguated"
if file_exists(disambig_out_file):
star_junction_file = disambig_out_file
elif file_exists(star_junction_file) and file_exists(contamination_bam):
star_junction_file = _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam,
disambig_out_file, data)
return "rnastar-%d-%d" % (N,M), align_dir_parts, star_junction_file
return None
def _fix_tophat_junction_output(chimeric_out_junction_file):
#for fusion.out
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
left, right = parts[0].split("-")
leftchr = _h37tohg19(left)
rightchr = _h37tohg19(right)
if not leftchr or not rightchr:
continue
parts[0] = "%s-%s" % (_h37tohg19(left), _h37tohg19(right))
out_handle.write("\t".join(parts))
return out_file
def _fix_star_junction_output(chimeric_out_junction_file):
#for Chimeric.out.junction
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
parts[0] = _h37tohg19(parts[0])
parts[3] = _h37tohg19(parts[3])
if not parts[0] or not parts[3]:
continue
out_handle.write("\t".join(parts))
return out_file
def _h37tohg19(chromosome):
MAX_CHROMOSOMES = 23
if chromosome in [str(x) for x in range(1, MAX_CHROMOSOMES)] + ["X", "Y"]:
new_chrom = "chr%s" % chromosome
elif chromosome == "MT":
new_chrom = "chrM"
# not a supported chromosome
else:
return None
return new_chrom
def _oncofuse_tissue_arg_from_config(data):
"""Retrieve oncofuse arguments supplied through input configuration.
tissue_type is the library argument, which tells Oncofuse to use its
own pre-built gene expression libraries. There are four pre-built
libraries, corresponding to the four supported tissue types:
EPI (epithelial origin),
HEM (hematological origin),
MES (mesenchymal origin) and
AVG (average expression, if tissue source is unknown).
"""
SUPPORTED_TISSUE_TYPE = ["EPI", "HEM", "MES", "AVG"]
if data.get("metadata", {}).get("tissue") in SUPPORTED_TISSUE_TYPE:
return data.get("metadata", {}).get("tissue")
else:
return "AVG"
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data):
""" Disambiguate detected fusions based on alignments to another species.
"""
out_file = disambig_out_file
fusiondict = {}
for my_line in open(star_junction_file, "r"):
my_line_split = my_line.strip().split("\t")
if len(my_line_split) < 10:
continue
fusiondict[my_line_split[9]] = my_line.strip("\n")
samfile = pysam.Samfile(contamination_bam, "rb")
for my_read in samfile:
if 0x4 & my_read.flag or my_read.is_secondary: # flag 0x4 means unaligned
continue
if my_read.qname in fusiondict:
fusiondict.pop(my_read.qname)
with file_transaction(data, out_file) as tx_out_file:
myhandle = open(tx_out_file, 'w')
for my_key in fusiondict:
print(fusiondict[my_key], file=myhandle)
return out_file
| {
"content_hash": "c8f307f963ab006cf6e4cb6511e7d3ba",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 117,
"avg_line_length": 42.58682634730539,
"alnum_prop": 0.6190944881889764,
"repo_name": "Cyberbio-Lab/bcbio-nextgen",
"id": "4891a7adf947a7743b1fcf423ea1bc24d5796475",
"size": "7112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/rnaseq/oncofuse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1656627"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from dials.command_line import missing_reflections
def test_l_cysteine_4_sweeps_scaled(dials_data, capsys):
data = dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
missing_reflections.run(
args=[str(data / "scaled_30.expt"), str(data / "scaled_30.refl")]
)
captured = capsys.readouterr()
assert "Completeness in resolution range: 0.754473" in captured.out
assert "Completeness with d_max=infinity: 0.753543" in captured.out
assert "# reflections | % missing | Resolution range (Å)" in captured.out
assert "260 | 16 | 1.37-0.59" in captured.out
def test_vmxi_proteinase_k_sweeps_integrated(dials_data, capsys):
data = dials_data("vmxi_proteinase_k_sweeps", pathlib=True)
missing_reflections.run(
args=[
str(data / "experiments_0.expt"),
str(data / "reflections_0.refl"),
str(data / "experiments_1.expt"),
str(data / "reflections_1.refl"),
]
)
captured = capsys.readouterr()
assert "Completeness in resolution range: 0.781833" in captured.out
assert "Completeness with d_max=infinity: 0.7818" in captured.out
assert "# reflections | % missing | Resolution range (Å)" in captured.out
assert "4899 | 20.7 | 2.36-1.80" in captured.out
assert "190 | 0.8 | 2.36-1.80" in captured.out
def test_insulin_scaled(dials_data, capsys):
data = dials_data("insulin_processed", pathlib=True)
missing_reflections.run(args=[str(data / "scaled.expt"), str(data / "scaled.refl")])
captured = capsys.readouterr()
assert "Resolution range: 55.2195 1.45064" in captured.out
assert "Completeness in resolution range: 0.792288" in captured.out
assert "Completeness with d_max=infinity: 0.792288" in captured.out
assert "# reflections | % missing | Resolution range (Å)" in captured.out
assert (
"2925 | 20.6 | 1.84-1.45" in captured.out
or "2924 | 20.6 | 1.84-1.45" in captured.out
)
assert "163 | 1.1 | 1.57-1.45" in captured.out
def test_insulin_scaled_d_min_d_max(dials_data, capsys):
data = dials_data("insulin_processed", pathlib=True)
missing_reflections.run(
args=[
str(data / "scaled.expt"),
str(data / "scaled.refl"),
"d_min=1.863199", # inscribed circle
"d_max=55",
"min_component_size=10",
]
)
captured = capsys.readouterr()
assert "Resolution range: 39.0461 1.86463" in captured.out
assert "Completeness in resolution range: 0.996462" in captured.out
assert "Completeness with d_max=infinity: 0.996315" in captured.out
assert "No connected regions of missing reflections identified" in captured.out
| {
"content_hash": "8ecf9456f28f37ca0e05b4d90b97e13a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 42.303030303030305,
"alnum_prop": 0.6429083094555874,
"repo_name": "dials/dials",
"id": "8059794da12ca2777b2ddd025f35990cd3480a07",
"size": "2795",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/command_line/test_missing_reflections.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
} |
from datetime import datetime
from operator import methodcaller
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
from pandas.core.indexes.datetimes import date_range
from pandas.core.resample import TimeGrouper
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
test_series = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
grouped = test_series.groupby(grouper)
def f(x):
return x.sort_values()[-3:]
applied = grouped.apply(f)
expected = test_series.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count():
test_series[::3] = np.nan
expected = test_series.groupby(lambda x: x.year).count()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
result = test_series.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
result = test_series.resample('A').count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction():
result = test_series.resample('A', closed='right').prod()
expected = test_series.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration():
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
def f(df):
return df['close'] / df['open']
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
@pytest.mark.parametrize('name, func', [
('Int64Index', tm.makeIntIndex),
('Index', tm.makeUnicodeIndex),
('Float64Index', tm.makeFloatIndex),
('MultiIndex', lambda m: tm.makeCustomIndex(m, 2))
])
def test_fails_on_no_datetime_index(name, func):
n = 2
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
msg = ("Only valid with DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an instance of '{}'".format(name))
with pytest.raises(TypeError, match=msg):
df.groupby(TimeGrouper('D'))
def test_aaa_group_order():
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
grouped = df.groupby(TimeGrouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)),
df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)),
df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)),
df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)),
df[4::5])
def test_aggregate_normal(resample_method):
"""Check TimeGrouper's aggregation is identical as normal groupby."""
if resample_method == 'ohlc':
pytest.xfail(reason='DataError: No numeric types to aggregate')
data = np.random.randn(20, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
expected = getattr(normal_grouped, resample_method)()
dt_result = getattr(dt_grouped, resample_method)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
tm.assert_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
@pytest.mark.parametrize('method, method_args, unit', [
('sum', dict(), 0),
('sum', dict(min_count=0), 0),
('sum', dict(min_count=1), np.nan),
('prod', dict(), 1),
('prod', dict(min_count=0), 1),
('prod', dict(min_count=1), np.nan)
])
def test_resample_entirly_nat_window(method, method_args, unit):
s = pd.Series([0] * 2 + [np.nan] * 2,
index=pd.date_range('2017', periods=4))
result = methodcaller(method, **method_args)(s.resample("2d"))
expected = pd.Series([0.0, unit],
index=pd.to_datetime(['2017-01-01',
'2017-01-03']))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, fill_value', [
('min', np.nan),
('max', np.nan),
('sum', 0),
('prod', 1),
('count', 0),
])
def test_aggregate_with_nat(func, fill_value):
# check TimeGrouper's aggregation is identical as normal groupby
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[fill_value] * 4], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
assert dt_result.index.name == 'key'
def test_aggregate_with_nat_size():
# GH 9925
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
normal_result = normal_grouped.size()
dt_result = dt_grouped.size()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_series_equal(expected, dt_result)
assert dt_result.index.name == 'key'
def test_repr():
# GH18203
result = repr(TimeGrouper(key='A', freq='H'))
expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', base=0)")
assert result == expected
@pytest.mark.parametrize('method, method_args, expected_values', [
('sum', dict(), [1, 0, 1]),
('sum', dict(min_count=0), [1, 0, 1]),
('sum', dict(min_count=1), [1, np.nan, 1]),
('sum', dict(min_count=2), [np.nan, np.nan, np.nan]),
('prod', dict(), [1, 1, 1]),
('prod', dict(min_count=0), [1, 1, 1]),
('prod', dict(min_count=1), [1, np.nan, 1]),
('prod', dict(min_count=2), [np.nan, np.nan, np.nan]),
])
def test_upsample_sum(method, method_args, expected_values):
s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H"))
resampled = s.resample("30T")
index = pd.to_datetime(['2017-01-01T00:00:00',
'2017-01-01T00:30:00',
'2017-01-01T01:00:00'])
result = methodcaller(method, **method_args)(resampled)
expected = pd.Series(expected_values, index=index)
tm.assert_series_equal(result, expected)
| {
"content_hash": "e563d09d5968382d4358fc226b1d562b",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 73,
"avg_line_length": 34.80451127819549,
"alnum_prop": 0.581551090948369,
"repo_name": "MJuddBooth/pandas",
"id": "2f330d1f2484b7ffaf5247235862c8180a8f5d5f",
"size": "9258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/resample/test_time_grouper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from unittest import TestCase
from tempfile import mktemp
import shutil
import datetime
import time
from httmock import all_requests, response, HTTMock
from mock import patch, Mock
@all_requests
def clld(url, request):
res = {
'/resource/languoid/id/stan1295.json': (
'<http://glottolog.org/>; rel="canonical"; type="text/html"',
'application/json',
('{"id": "stan1295", "name": "Standard German"}')),
'/resource/languoid/id/stan1295.rdf': (
'',
'application/rdf+xml; charset=utf8',
("""\
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dcterms="http://purl.org/dc/terms/">
<rdf:Description rdf:about="http://glottolog.org/resource/languoid/id/stan1295">
<dcterms:isReferencedBy rdf:resource="http://glottolog.org/resource/reference/id/7242"/>
</rdf:Description>
</rdf:RDF>
""")),
}.get(url.path)
if res is None:
return response(404, 'not found', {}, None, 5, request)
return response(
200, res[2], {'content-type': res[1], 'link': res[0]}, None, 5, request)
class Tests(TestCase):
def setUp(self):
self.tmp = mktemp()
def tearDown(self):
if os.path.exists(self.tmp):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_Cache(self):
from clldclient.cache import Cache
with patch('clldclient.cache.user_cache_dir', Mock(return_value=self.tmp)):
with HTTMock(clld):
cache = Cache()
r1 = cache.get('http://glottolog.org/resource/languoid/id/stan1295.json')
self.assertEqual(r1.content['id'], 'stan1295')
self.assertEqual(r1.canonical_url, 'http://glottolog.org/')
r2 = cache.get('http://glottolog.org/resource/languoid/id/stan1295.json')
self.assertEqual(r1.created, r2.created)
cache.drop()
r2 = cache.get('http://glottolog.org/resource/languoid/id/stan1295.json')
self.assertNotEqual(r1.created, r2.created)
self.assertRaises(KeyError, cache.get, 'http://glottolog.org/unknown')
self.assertEqual(cache.get('http://glottolog.org/unknown', default=1), 1)
res = cache.get('http://glottolog.org/resource/languoid/id/stan1295.rdf')
self.assertEqual(res.mimetype, 'application/rdf+xml')
self.assertEqual(
res.canonical_url,
'http://glottolog.org/resource/languoid/id/stan1295.rdf')
assert hasattr(res.content, 'triples')
self.assertEqual(res.links, [])
cached = {r[0]: r[1] for r in cache.stats()}['glottolog.org']
self.assertEqual(cached, cache.purge(host='glottolog.org'))
now = datetime.datetime.utcnow()
time.sleep(0.2)
cache.get('http://glottolog.org/resource/languoid/id/stan1295.json')
self.assertEqual(0, cache.purge(before=now, host='glottolog.org'))
self.assertEqual(1, cache.purge(after=now, host='glottolog.org'))
| {
"content_hash": "1ef19639872567d3abb8f88a9b411ac8",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 96,
"avg_line_length": 42.96,
"alnum_prop": 0.5949720670391061,
"repo_name": "clld/clldclient",
"id": "4a90361805ebd70f8920237aafc15f95e84077b0",
"size": "3237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clldclient/tests/test_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36825"
}
],
"symlink_target": ""
} |
import requests
import json
import logging
import csv
import os
import pdb
from elasticsearch import Elasticsearch
import math
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
CSV_FILE_DIRECTORY = 'csv_files'
JSON_FILE_DIRECTORY = 'json_files'
def get_csv_files():
logger.info("Start downloading data")
data_model = json.load(open('../scripts/datamodel.json', 'r'))
headers = {'authority': 'basleratlas.ch',
'cookie': '_ga=GA1.2.1267730810.1508505504; _gid=GA1.2.531338445.1509109332',
'upgrade-insecure-requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encodingi': 'gzip, deflate, br',
'accept-language': 'de-DE,de;q=0.8,en-US;q=0.6,en;q=0.4,es;q=0.2,it;q=0.2',
'cache-control': 'max-age=0',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
for key, value in data_model.items():
if os.path.exists('csv_files/{}.csv'.format(key)):
logger.info("File already exists")
continue
logger.info("Get data for {}".format(key))
id = value['id']
if not "wbe" in value['Ebenen']:
logger.info("Skip {} because wbe does not exists".format(id))
continue
layer = value['Ebenen'].split(',')
url = 'https://basleratlas.ch/geoclip_data_csv.php?iID={}&ngeo={}'.format(id, 'wbe')
logger.info("URL: {}".format(url))
# get the data
r = requests.get(url=url, headers=headers, verify=True)
print(r.status_code)
# Write csv files
with open('{}/{}-{}.csv'.format(CSV_FILE_DIRECTORY, key, layer[0]), 'w') as csv_file:
csv_file.write(r.text)
def search_area(id, areas):
for feature in areas['features']:
prop = feature.get('properties', {})
if not prop:
logger.error("Do not find properties in feature")
return None
if prop.get('BEZ_ID') == id:
logger.debug("Found WOV_ID: {}".format(prop.get('WOV_ID', None)))
return prop.get('WOV_ID', None)
def generate_json_from_csv():
areas = json.load(open('../data/json/Bezirke.json', 'r'))
areas_wov_id = {}
for file in os.listdir(CSV_FILE_DIRECTORY):
csvfile = open('{}/{}'.format(CSV_FILE_DIRECTORY, file), 'r')
logger.info("Read CSV File {}/{}".format(CSV_FILE_DIRECTORY, file))
level = file.split('-')[-1].split('.')[0]
logger.debug("level: {}".format(level))
key = ''.join(file.split('-')[0])
logger.debug("Key: {}".format(key))
fieldnames = [level, "jahr", key]
logger.debug("Try to parse file {}".format(file))
meta_data = json.load(open('../scripts/datamodel.json', 'r'))
try:
reader = csv.DictReader(csvfile, fieldnames, delimiter=';')
idx = -1
data = []
for row in reader:
idx += 1
# Ignore first entry because it is header
if idx == 0:
continue
if not row[level] in areas_wov_id.keys():
logger.info("Do not find {}".format(row[level]))
areas_wov_id[row[level]] = search_area(row[level], areas)
wov_id = areas_wov_id[row[level]]
count = float(row.get(key, "0"))
if math.isnan(count):
count = None
data.append({
'key': key,
level: row[level],
'wov_id': wov_id,
'year': int(row['jahr']),
'count': count,
'indicator': meta_data[key]
})
if data:
with open('{}/{}'.format(JSON_FILE_DIRECTORY, file.replace('csv', 'json')), 'w') as json_file:
json_file.write(json.dumps(data, indent=4))
except TypeError as e:
logger.error("Could not parse file {}", file)
pdb.set_trace()
continue
def import_to_elastic():
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
idx = 0
for file in os.listdir(JSON_FILE_DIRECTORY):
json_data = json.load(open('{}/{}'.format(JSON_FILE_DIRECTORY, file), 'r'))
logger.info("process {}".format(file))
for data in json_data:
data['autoComplete'] = {}
data['autoComplete']['input'] = data.get('indicator', {}).get('title').split()
req = es.index(index='baselhack', doc_type='dataset', id=idx, body=data)
if req['result'] != 'created':
logger.error("Could not save entry for {}".format(data))
continue
idx += 1
def main():
# get_csv_files()
# generate_json_from_csv()
import_to_elastic()
if __name__ == "__main__":
main() | {
"content_hash": "0c4ead91c31d4c7d181b5a88cea67d41",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 137,
"avg_line_length": 37.8134328358209,
"alnum_prop": 0.5304914150384843,
"repo_name": "FUUbi/07_BaselStats",
"id": "7dc10cc76398bea1ca1cdafb9825629a11005931",
"size": "5068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database/getData.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3147"
},
{
"name": "Go",
"bytes": "290"
},
{
"name": "HTML",
"bytes": "14099"
},
{
"name": "JavaScript",
"bytes": "4188"
},
{
"name": "Python",
"bytes": "14639"
},
{
"name": "Shell",
"bytes": "2863"
},
{
"name": "TypeScript",
"bytes": "464596"
}
],
"symlink_target": ""
} |
import pkg_resources
import plone.testing
import zeit.cms.repository.interfaces
import zeit.cms.testing
import zeit.content.image.image
import zeit.content.image.interfaces
import zeit.content.image.testing
import zeit.imp.tests
import zeit.push.testing
import zeit.workflow.testing
import zope.component
product_config = """
<product-config zeit.content.gallery>
scale-source file://%s
ticket-secret All work and no play makes jack a dull boy
gallery-types-url file://%s
</product-config>
""" % (
pkg_resources.resource_filename(__name__, 'scales.xml'),
pkg_resources.resource_filename(__name__, 'gallery-types.xml'),
)
ZCML_LAYER = zeit.cms.testing.ZCMLLayer(
'ftesting.zcml',
product_config=(
zeit.cms.testing.cms_product_config +
zeit.content.image.testing.product_config +
zeit.imp.tests.product_config +
zeit.push.testing.product_config +
product_config))
PUSH_LAYER = zeit.push.testing.UrbanairshipTemplateLayer(
name='UrbanairshipTemplateLayer', bases=(ZCML_LAYER,))
LAYER = plone.testing.Layer(bases=(PUSH_LAYER,), name='GalleryLayer')
WORKFLOW_ZCML_LAYER = zeit.cms.testing.ZCMLLayer(
'ftesting-workflow.zcml',
product_config=(
zeit.cms.testing.cms_product_config +
zeit.imp.tests.product_config +
zeit.workflow.testing.product_config +
zeit.push.testing.product_config +
product_config))
WORKFLOW_LAYER = plone.testing.Layer(
name='GalleryWorkflowLayer', module=__name__,
bases=(WORKFLOW_ZCML_LAYER, zeit.workflow.testing.SCRIPTS_LAYER))
def add_image(folder, filename, name=None):
if name is None:
name = filename
filename = pkg_resources.resource_filename(
__name__, 'browser/testdata/' + filename)
test_data = open(filename, 'rb').read()
image = zeit.content.image.image.LocalImage()
image.__name__ = name
image.contentType = 'image/jpeg'
image.open('w').write(test_data)
metadata = zeit.content.image.interfaces.IImageMetadata(image)
metadata.copyright = ((u'ZEIT online', u'http://www.zeit.de'),)
metadata.caption = u'Nice image'
repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
repository[folder][name] = image
| {
"content_hash": "35e47378de911db2904d0c0bc873e64a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 69,
"avg_line_length": 30.783783783783782,
"alnum_prop": 0.7006145741878841,
"repo_name": "ZeitOnline/zeit.content.gallery",
"id": "9057e058e7154f7fd03d773621d3aa2639be863c",
"size": "2278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/content/gallery/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "70896"
},
{
"name": "Batchfile",
"bytes": "113"
},
{
"name": "CSS",
"bytes": "2935"
},
{
"name": "JavaScript",
"bytes": "65363"
},
{
"name": "Python",
"bytes": "56979"
}
],
"symlink_target": ""
} |
import json
import logging
import os
import tempfile
import capture
import maya.cmds as cmds
from .vendor.Qt import QtCore, QtWidgets, QtGui
from . import lib
from . import plugin
from . import presets
from . import version
from . import tokens
from .accordion import AccordionWidget
log = logging.getLogger("Capture Gui")
class ClickLabel(QtWidgets.QLabel):
"""A QLabel that emits a clicked signal when clicked upon."""
clicked = QtCore.Signal()
def mouseReleaseEvent(self, event):
self.clicked.emit()
return super(ClickLabel, self).mouseReleaseEvent(event)
class PreviewWidget(QtWidgets.QWidget):
"""The playblast image preview widget.
Upon refresh it will retrieve the options through the function set as
`options_getter` and make a call to `capture.capture()` for a single
frame (playblasted) snapshot. The result is displayed as image.
"""
preview_width = 320
preview_height = 180
def __init__(self, options_getter, validator, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
# Add attributes
self.options_getter = options_getter
self.validator = validator
self.preview = ClickLabel()
self.preview.setFixedWidth(self.preview_width)
self.preview.setFixedHeight(self.preview_height)
tip = "Click to force a refresh"
self.preview.setToolTip(tip)
self.preview.setStatusTip(tip)
# region Build
self.layout = QtWidgets.QVBoxLayout()
self.layout.setAlignment(QtCore.Qt.AlignHCenter)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.layout.addWidget(self.preview)
# endregion Build
# Connect widgets to functions
self.preview.clicked.connect(self.refresh)
def refresh(self):
"""Refresh the playblast preview"""
frame = cmds.currentTime(query=True)
# When playblasting outside of an undo queue it seems that undoing
# actually triggers a reset to frame 0. As such we sneak in the current
# time into the undo queue to enforce correct undoing.
cmds.currentTime(frame, update=True)
# check if plugin outputs are correct
valid = self.validator()
if not valid:
return
with lib.no_undo():
options = self.options_getter()
tempdir = tempfile.mkdtemp()
# override settings that are constants for the preview
options = options.copy()
options['filename'] = None
options['complete_filename'] = os.path.join(tempdir, "temp.jpg")
options['width'] = self.preview_width
options['height'] = self.preview_height
options['viewer'] = False
options['frame'] = frame
options['off_screen'] = True
options['format'] = "image"
options['compression'] = "jpg"
options['sound'] = None
fname = capture.capture(**options)
if not fname:
log.warning("Preview failed")
return
image = QtGui.QPixmap(fname)
self.preview.setPixmap(image)
os.remove(fname)
def showEvent(self, event):
"""Initialize when shown"""
self.refresh()
event.accept()
class PresetWidget(QtWidgets.QWidget):
"""Preset Widget
Allows the user to set preferences and create presets to load before
capturing.
"""
preset_loaded = QtCore.Signal(dict)
config_opened = QtCore.Signal()
id = "Presets"
label = "Presets"
def __init__(self, inputs_getter, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
self.inputs_getter = inputs_getter
layout = QtWidgets.QHBoxLayout(self)
layout.setAlignment(QtCore.Qt.AlignCenter)
layout.setContentsMargins(0, 0, 0, 0)
presets = QtWidgets.QComboBox()
presets.setFixedWidth(220)
presets.addItem("*")
# Icons
icon_path = os.path.join(os.path.dirname(__file__), "resources")
save_icon = os.path.join(icon_path, "save.png")
load_icon = os.path.join(icon_path, "import.png")
config_icon = os.path.join(icon_path, "config.png")
# Create buttons
save = QtWidgets.QPushButton()
save.setIcon(QtGui.QIcon(save_icon))
save.setFixedWidth(30)
save.setToolTip("Save Preset")
save.setStatusTip("Save Preset")
load = QtWidgets.QPushButton()
load.setIcon(QtGui.QIcon(load_icon))
load.setFixedWidth(30)
load.setToolTip("Load Preset")
load.setStatusTip("Load Preset")
config = QtWidgets.QPushButton()
config.setIcon(QtGui.QIcon(config_icon))
config.setFixedWidth(30)
config.setToolTip("Preset configuration")
config.setStatusTip("Preset configuration")
layout.addWidget(presets)
layout.addWidget(save)
layout.addWidget(load)
layout.addWidget(config)
# Make available for all methods
self.presets = presets
self.config = config
self.load = load
self.save = save
# Signals
self.save.clicked.connect(self.on_save_preset)
self.load.clicked.connect(self.import_preset)
self.config.clicked.connect(self.config_opened)
self.presets.currentIndexChanged.connect(self.load_active_preset)
self._process_presets()
def _process_presets(self):
"""Adds all preset files from preset paths to the Preset widget.
Returns:
None
"""
for presetfile in presets.discover():
self.add_preset(presetfile)
def import_preset(self):
"""Load preset files to override output values"""
path = self._default_browse_path()
filters = "Text file (*.json)"
dialog = QtWidgets.QFileDialog
filename, _ = dialog.getOpenFileName(self, "Open preference file",
path, filters)
if not filename:
return
# create new entry in combobox
self.add_preset(filename)
# read file
return self.load_active_preset()
def load_active_preset(self):
"""Load the active preset.
Returns:
dict: The preset inputs.
"""
current_index = self.presets.currentIndex()
filename = self.presets.itemData(current_index)
if not filename:
return {}
preset = lib.load_json(filename)
# Emit preset load signal
log.debug("Emitting preset_loaded: {0}".format(filename))
self.preset_loaded.emit(preset)
# Ensure we preserve the index after loading the changes
# for all the plugin widgets
self.presets.blockSignals(True)
self.presets.setCurrentIndex(current_index)
self.presets.blockSignals(False)
return preset
def add_preset(self, filename):
"""Add the filename to the preset list.
This also sets the index to the filename.
Returns:
None
"""
filename = os.path.normpath(filename)
if not os.path.exists(filename):
log.warning("Preset file does not exist: {0}".format(filename))
return
label = os.path.splitext(os.path.basename(filename))[0]
item_count = self.presets.count()
paths = [self.presets.itemData(i) for i in range(item_count)]
if filename in paths:
log.info("Preset is already in the "
"presets list: {0}".format(filename))
item_index = paths.index(filename)
else:
self.presets.addItem(label, userData=filename)
item_index = item_count
self.presets.blockSignals(True)
self.presets.setCurrentIndex(item_index)
self.presets.blockSignals(False)
return item_index
def _default_browse_path(self):
"""Return the current browse path for save/load preset.
If a preset is currently loaded it will use that specific path
otherwise it will go to the last registered preset path.
Returns:
str: Path to use as default browse location.
"""
current_index = self.presets.currentIndex()
path = self.presets.itemData(current_index)
if not path:
# Fallback to last registered preset path
paths = presets.preset_paths()
if paths:
path = paths[-1]
return path
def save_preset(self, inputs):
"""Save inputs to a file"""
path = self._default_browse_path()
filters = "Text file (*.json)"
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self,
"Save preferences",
path,
filters)
if not filename:
return
with open(filename, "w") as f:
json.dump(inputs, f, sort_keys=True,
indent=4, separators=(',', ': '))
self.add_preset(filename)
return filename
def get_presets(self):
"""Return all currently listed presets"""
configurations = [self.presets.itemText(i) for
i in range(self.presets.count())]
return configurations
def on_save_preset(self):
"""Save the inputs of all the plugins in a preset."""
inputs = self.inputs_getter(as_preset=True)
self.save_preset(inputs)
def apply_inputs(self, settings):
path = settings.get("selected", None)
index = self.presets.findData(path)
if index == -1:
# If the last loaded preset still exists but wasn't on the
# "discovered preset paths" then add it.
if os.path.exists(path):
log.info("Adding previously selected preset explicitly: %s",
path)
self.add_preset(path)
return
else:
log.warning("Previously selected preset is not available: %s",
path)
index = 0
self.presets.setCurrentIndex(index)
def get_inputs(self, as_preset=False):
if as_preset:
# Don't save the current preset into the preset because
# that would just be recursive and make no sense
return {}
else:
current_index = self.presets.currentIndex()
selected = self.presets.itemData(current_index)
return {"selected": selected}
class App(QtWidgets.QWidget):
"""The main application in which the widgets are placed"""
# Signals
options_changed = QtCore.Signal(dict)
playblast_start = QtCore.Signal(dict)
playblast_finished = QtCore.Signal(dict)
viewer_start = QtCore.Signal(dict)
# Attributes
object_name = "CaptureGUI"
application_sections = ["config", "app"]
def __init__(self, title, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
# Settings
# Remove pointer for memory when closed
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.settingfile = self._ensure_config_exist()
self.plugins = {"app": list(),
"config": list()}
self._config_dialog = None
self._build_configuration_dialog()
# region Set Attributes
title_version = "{} v{}".format(title, version.version)
self.setObjectName(self.object_name)
self.setWindowTitle(title_version)
self.setMinimumWidth(380)
# Set dialog window flags so the widget can be correctly parented
# to Maya main window
self.setWindowFlags(self.windowFlags() | QtCore.Qt.Dialog)
self.setProperty("saveWindowPref", True)
# endregion Set Attributes
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# Add accordion widget (Maya attribute editor style)
self.widgetlibrary = AccordionWidget(self)
self.widgetlibrary.setRolloutStyle(AccordionWidget.Maya)
# Add separate widgets
self.widgetlibrary.addItem("Preview",
PreviewWidget(self.get_outputs,
self.validate,
parent=self),
collapsed=True)
self.presetwidget = PresetWidget(inputs_getter=self.get_inputs,
parent=self)
self.widgetlibrary.addItem("Presets", self.presetwidget)
# add plug-in widgets
for widget in plugin.discover():
self.add_plugin(widget)
self.layout.addWidget(self.widgetlibrary)
# add standard buttons
self.apply_button = QtWidgets.QPushButton("Capture")
self.layout.addWidget(self.apply_button)
# default actions
self.apply_button.clicked.connect(self.apply)
# signals and slots
self.presetwidget.config_opened.connect(self.show_config)
self.presetwidget.preset_loaded.connect(self.apply_inputs)
self.apply_inputs(self._read_widget_configuration())
def apply(self):
"""Run capture action with current settings"""
valid = self.validate()
if not valid:
return
options = self.get_outputs()
filename = options.get("filename", None)
self.playblast_start.emit(options)
# The filename can be `None` when the
# playblast will *not* be saved.
if filename is not None:
# Format the tokens in the filename
filename = tokens.format_tokens(filename, options)
# expand environment variables
filename = os.path.expandvars(filename)
# Make relative paths absolute to the "images" file rule by default
if not os.path.isabs(filename):
root = lib.get_project_rule("images")
filename = os.path.join(root, filename)
# normalize (to remove double slashes and alike)
filename = os.path.normpath(filename)
options["filename"] = filename
# Perform capture and store returned filename with extension
options["filename"] = lib.capture_scene(options)
self.playblast_finished.emit(options)
filename = options["filename"] # get filename after callbacks
# Show viewer
viewer = options.get("viewer", False)
if viewer:
if filename and os.path.exists(filename):
self.viewer_start.emit(options)
lib.open_file(filename)
else:
raise RuntimeError("Can't open playblast because file "
"doesn't exist: {0}".format(filename))
return filename
def apply_inputs(self, inputs):
"""Apply all the settings of the widgets.
Arguments:
inputs (dict): input values per plug-in widget
Returns:
None
"""
if not inputs:
return
widgets = self._get_plugin_widgets()
widgets.append(self.presetwidget)
for widget in widgets:
widget_inputs = inputs.get(widget.id, None)
if not widget_inputs:
continue
widget.apply_inputs(widget_inputs)
def show_config(self):
"""Show the advanced configuration"""
# calculate center of main widget
geometry = self.geometry()
self._config_dialog.move(QtCore.QPoint(geometry.x()+30,
geometry.y()))
self._config_dialog.show()
def add_plugin(self, plugin):
"""Add an options widget plug-in to the UI"""
if plugin.section not in self.application_sections:
log.warning("{}'s section is invalid: "
"{}".format(plugin.label, plugin.section))
return
widget = plugin(parent=self)
widget.initialize()
widget.options_changed.connect(self.on_widget_settings_changed)
self.playblast_finished.connect(widget.on_playblast_finished)
# Add to plug-ins in its section
self.plugins[widget.section].append(widget)
# Implement additional settings depending on section
if widget.section == "app":
if not widget.hidden:
item = self.widgetlibrary.addItem(widget.label, widget)
# connect label change behaviour
widget.label_changed.connect(item.setTitle)
# Add the plugin in a QGroupBox to the configuration dialog
if widget.section == "config":
layout = self._config_dialog.layout()
# create group box
group_widget = QtWidgets.QGroupBox(widget.label)
group_layout = QtWidgets.QVBoxLayout(group_widget)
group_layout.addWidget(widget)
layout.addWidget(group_widget)
def validate(self):
"""Validate whether the outputs of the widgets are good.
Returns:
bool: Whether it's valid to capture the current settings.
"""
errors = list()
for widget in self._get_plugin_widgets():
widget_errors = widget.validate()
if widget_errors:
errors.extend(widget_errors)
if errors:
message_title = "%s Validation Error(s)" % len(errors)
message = "\n".join(errors)
QtWidgets.QMessageBox.critical(self,
message_title,
message,
QtWidgets.QMessageBox.Ok)
return False
return True
def get_outputs(self):
"""Return settings for a capture as currently set in the Application.
Returns:
dict: Current output settings
"""
# Get settings from widgets
outputs = dict()
for widget in self._get_plugin_widgets():
widget_outputs = widget.get_outputs()
if not widget_outputs:
continue
for key, value in widget_outputs.items():
# We merge dictionaries by updating them so we have
# the "mixed" values of both settings
if isinstance(value, dict) and key in outputs:
outputs[key].update(value)
else:
outputs[key] = value
return outputs
def get_inputs(self, as_preset=False):
"""Return the inputs per plug-in widgets by `plugin.id`.
Returns:
dict: The inputs per widget
"""
inputs = dict()
# Here we collect all the widgets from which we want to store the
# current inputs. This will be restored in the next session
# The preset widget is added to make sure the user starts with the
# previously selected preset configuration
config_widgets = self._get_plugin_widgets()
config_widgets.append(self.presetwidget)
for widget in config_widgets:
widget_inputs = widget.get_inputs(as_preset=as_preset)
if not isinstance(widget_inputs, dict):
log.debug("Widget inputs are not a dictionary "
"'{}': {}".format(widget.id, widget_inputs))
return
if not widget_inputs:
continue
inputs[widget.id] = widget_inputs
return inputs
def on_widget_settings_changed(self):
"""Set current preset to '*' on settings change"""
self.options_changed.emit(self.get_outputs)
self.presetwidget.presets.setCurrentIndex(0)
def _build_configuration_dialog(self):
"""Build a configuration to store configuration widgets in"""
dialog = QtWidgets.QDialog(self)
dialog.setWindowTitle("Capture - Preset Configuration")
QtWidgets.QVBoxLayout(dialog)
self._config_dialog = dialog
def _ensure_config_exist(self):
"""Create the configuration file if it does not exist yet.
Returns:
unicode: filepath of the configuration file
"""
userdir = os.path.expanduser("~")
capturegui_dir = os.path.join(userdir, "CaptureGUI")
capturegui_inputs = os.path.join(capturegui_dir, "capturegui.json")
if not os.path.exists(capturegui_dir):
os.makedirs(capturegui_dir)
if not os.path.isfile(capturegui_inputs):
config = open(capturegui_inputs, "w")
config.close()
return capturegui_inputs
def _store_widget_configuration(self):
"""Store all used widget settings in the local json file"""
inputs = self.get_inputs(as_preset=False)
path = self.settingfile
with open(path, "w") as f:
log.debug("Writing JSON file: {0}".format(path))
json.dump(inputs, f, sort_keys=True,
indent=4, separators=(',', ': '))
def _read_widget_configuration(self):
"""Read the stored widget inputs"""
inputs = {}
path = self.settingfile
if not os.path.isfile(path) or os.stat(path).st_size == 0:
return inputs
with open(path, "r") as f:
log.debug("Reading JSON file: {0}".format(path))
try:
inputs = json.load(f)
except ValueError as error:
log.error(str(error))
return inputs
def _get_plugin_widgets(self):
"""List all plug-in widgets.
Returns:
list: The plug-in widgets in *all* sections
"""
widgets = list()
for section in self.plugins.values():
widgets.extend(section)
return widgets
# override close event to ensure the input are stored
def closeEvent(self, event):
"""Store current configuration upon closing the application."""
self._store_widget_configuration()
for section_widgets in self.plugins.values():
for widget in section_widgets:
widget.uninitialize()
event.accept()
| {
"content_hash": "3f9116b83b2728b578620f33d2774473",
"timestamp": "",
"source": "github",
"line_count": 711,
"max_line_length": 79,
"avg_line_length": 31.77496483825598,
"alnum_prop": 0.5846759915014165,
"repo_name": "BigRoy/maya-capture-gui",
"id": "1860b084ba351161c167a1eb90a6c18adc840be0",
"size": "22592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture_gui/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127227"
}
],
"symlink_target": ""
} |
"""WebElement implementation."""
import os
import zipfile
try:
from StringIO import StringIO
except ImportError: # 3+
from io import StringIO
import base64
from .command import Command
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value."""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = resp['value']
if attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected."""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element by id."""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element by name."""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element by link text."""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath."""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath."""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element by their class name."""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds elements by their class name."""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element by CSS selector."""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements by CSS selector."""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element."""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""CONSIDERED LIABLE TO CHANGE WITHOUT WARNING. Use this to discover where on the screen an
element is so that we can click it. This method should cause the element to be scrolled
into view.
Returns the top lefthand corner location on the screen, or None if the element is not visible"""
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": old_loc['x'],
"y": old_loc['y']}
return new_loc
@property
def parent(self):
return self._parent
@property
def id(self):
return self._id
def __eq__(self, element):
if self._id == element.id:
return True
else:
return self._execute(Command.ELEMENT_EQUALS, {'other': element.id})['value']
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
if isinstance(by, tuple) or isinstance(value, int) or value==None:
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
if isinstance(by, tuple) or isinstance(value, int) or value==None:
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def _upload(self, filename):
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
try:
return self._execute(Command.UPLOAD_FILE,
{'file': base64.encodestring(fp.getvalue())})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
try:
if os.path.isfile(file_path):
return file_path
except:
pass
return None
| {
"content_hash": "fb864834a433f8cee2bbbc1151dbca40",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 104,
"avg_line_length": 35.065693430656935,
"alnum_prop": 0.599188176519567,
"repo_name": "neumerance/deploy",
"id": "0df87c87a40c301b29e51cf9215e3862074b8842",
"size": "10207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/selenium/webdriver/remote/webelement.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from rest_framework import generics
from rest_framework import serializers as ser
from api.base import utils
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIAttributeException
from api.base.serializers import JSONAPISerializer
from api.base.serializers import LinksField
from api.base.serializers import RelationshipField
from api.base.serializers import HideIfProviderCommentsAnonymous
from api.base.serializers import HideIfProviderCommentsPrivate
from osf.exceptions import InvalidTriggerError
from osf.models import PreprintService, NodeRequest, PreprintRequest
from osf.utils.workflows import DefaultStates, DefaultTriggers, ReviewStates, ReviewTriggers
from osf.utils import permissions
class ReviewableCountsRelationshipField(RelationshipField):
def __init__(self, *args, **kwargs):
kwargs['related_meta'] = kwargs.get('related_meta') or {}
if 'include_state_counts' not in kwargs['related_meta']:
kwargs['related_meta']['include_state_counts'] = True
super(ReviewableCountsRelationshipField, self).__init__(*args, **kwargs)
def get_meta_information(self, metadata, provider):
# Clone metadata because its mutability is questionable
metadata = dict(metadata or {})
# Make counts opt-in
show_counts = utils.is_truthy(self.context['request'].query_params.get('related_counts', False))
# Only include counts on detail routes
is_detail = self.context.get('view') and not isinstance(self.context['view'], generics.ListAPIView)
# Weird hack to avoid being called twice
# get_meta_information is called with both self.related_meta and self.self_meta.
# `is` could probably be used here but this seems more comprehensive.
is_related_meta = metadata.pop('include_state_counts', False)
if show_counts and is_detail and is_related_meta:
# Finally, require users to have view_actions permissions
auth = utils.get_user_auth(self.context['request'])
if auth and auth.logged_in and auth.user.has_perm('view_actions', provider):
metadata.update(provider.get_reviewable_state_counts())
return super(ReviewableCountsRelationshipField, self).get_meta_information(metadata, provider)
class TargetRelationshipField(RelationshipField):
_target_class = None
def __init__(self, *args, **kwargs):
self._target_class = kwargs.pop('target_class', None)
super(TargetRelationshipField, self).__init__(*args, **kwargs)
@property
def TargetClass(self):
if self._target_class:
return self._target_class
raise NotImplementedError()
def get_object(self, object_id):
return self.TargetClass.load(object_id)
def to_internal_value(self, data):
target = self.get_object(data)
return {'target': target}
class BaseActionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'trigger',
'from_state',
'to_state',
'date_created',
'date_modified',
'target',
])
id = ser.CharField(source='_id', read_only=True)
trigger = ser.ChoiceField(choices=DefaultTriggers.choices())
comment = ser.CharField(max_length=65535, required=False)
from_state = ser.ChoiceField(choices=DefaultStates.choices(), read_only=True)
to_state = ser.ChoiceField(choices=DefaultStates.choices(), read_only=True)
date_created = ser.DateTimeField(source='created', read_only=True)
date_modified = ser.DateTimeField(source='modified', read_only=True)
creator = RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
filter_key='creator__guids___id',
always_embed=True,
)
links = LinksField(
{
'self': 'get_action_url',
}
)
def get_absolute_url(self, obj):
return self.get_action_url(obj)
def get_action_url(self, obj):
return utils.absolute_reverse('actions:action-detail', kwargs={'action_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def create(self, validated_data):
trigger = validated_data.pop('trigger')
user = validated_data.pop('user')
target = validated_data.pop('target')
comment = validated_data.pop('comment', '')
permissions = validated_data.pop('permissions', '')
visible = validated_data.pop('visible', '')
try:
if trigger == DefaultTriggers.ACCEPT.value:
return target.run_accept(user=user, comment=comment, permissions=permissions, visible=visible)
if trigger == DefaultTriggers.REJECT.value:
return target.run_reject(user, comment)
if trigger == DefaultTriggers.EDIT_COMMENT.value:
return target.run_edit_comment(user, comment)
if trigger == DefaultTriggers.SUBMIT.value:
return target.run_submit(user)
except InvalidTriggerError as e:
# Invalid transition from the current state
raise Conflict(e.message)
else:
raise JSONAPIAttributeException(attribute='trigger', detail='Invalid trigger.')
class Meta:
type_ = 'actions'
abstract = True
class ReviewActionSerializer(BaseActionSerializer):
class Meta:
type_ = 'review-actions'
filterable_fields = frozenset([
'id',
'trigger',
'from_state',
'to_state',
'date_created',
'date_modified',
'provider',
'target',
])
comment = HideIfProviderCommentsPrivate(ser.CharField(max_length=65535, required=False))
trigger = ser.ChoiceField(choices=ReviewTriggers.choices())
from_state = ser.ChoiceField(choices=ReviewStates.choices(), read_only=True)
to_state = ser.ChoiceField(choices=ReviewStates.choices(), read_only=True)
provider = RelationshipField(
read_only=True,
related_view='providers:preprint-providers:preprint-provider-detail',
related_view_kwargs={'provider_id': '<target.provider._id>'},
filter_key='target__provider___id',
)
creator = HideIfProviderCommentsAnonymous(RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
filter_key='creator__guids___id',
always_embed=True,
))
target = TargetRelationshipField(
target_class=PreprintService,
read_only=False,
required=True,
related_view='preprints:preprint-detail',
related_view_kwargs={'preprint_id': '<target._id>'},
filter_key='target__guids___id',
)
def create(self, validated_data):
trigger = validated_data.get('trigger')
if trigger != ReviewTriggers.WITHDRAW.value:
return super(ReviewActionSerializer, self).create(validated_data)
user = validated_data.pop('user')
target = validated_data.pop('target')
comment = validated_data.pop('comment', '')
try:
return target.run_withdraw(user=user, comment=comment)
except InvalidTriggerError as e:
# Invalid transition from the current state
raise Conflict(e.message)
else:
raise JSONAPIAttributeException(attribute='trigger', detail='Invalid trigger.')
class NodeRequestActionSerializer(BaseActionSerializer):
class Meta:
type_ = 'node-request-actions'
target = TargetRelationshipField(
target_class=NodeRequest,
read_only=False,
required=True,
related_view='requests:request-detail',
related_view_kwargs={'request_id': '<target._id>'},
)
permissions = ser.ChoiceField(choices=permissions.PERMISSIONS, required=False)
visible = ser.BooleanField(default=True, required=False)
class PreprintRequestActionSerializer(BaseActionSerializer):
class Meta:
type_ = 'preprint-request-actions'
target = TargetRelationshipField(
target_class=PreprintRequest,
read_only=False,
required=True,
related_view='requests:request-detail',
related_view_kwargs={'request_id': '<target._id>'},
)
| {
"content_hash": "f931ef3eb4ce9f79ab93bbf347b3e3d9",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 165,
"avg_line_length": 37.17256637168141,
"alnum_prop": 0.6618259730984407,
"repo_name": "sloria/osf.io",
"id": "70b197e2ca06e70f93532651481e74733b49dda2",
"size": "8425",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/actions/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109070"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "263083"
},
{
"name": "JavaScript",
"bytes": "1856674"
},
{
"name": "Mako",
"bytes": "690812"
},
{
"name": "Python",
"bytes": "8397175"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import copy
import typing
from types import SimpleNamespace
from federatedml.param.base_param import BaseParam
from federatedml.param.cross_validation_param import CrossValidationParam
from federatedml.param.predict_param import PredictParam
from federatedml.param.callback_param import CallbackParam
import json
class HomoNNParam(BaseParam):
"""
Parameters used for Homo Neural Network.
Parameters
----------
secure_aggregate : bool
enable secure aggregation or not, defaults to True.
aggregate_every_n_epoch : int
aggregate model every n epoch, defaults to 1.
config_type : {"nn", "keras", "tf"}
config type
nn_define : dict
a dict represents the structure of neural network.
optimizer : str or dict
optimizer method, accept following types:
1. a string, one of "Adadelta", "Adagrad", "Adam", "Adamax", "Nadam", "RMSprop", "SGD"
2. a dict, with a required key-value pair keyed by "optimizer",
with optional key-value pairs such as learning rate.
defaults to "SGD"
loss : str
loss
metrics: str or list of str
metrics
max_iter: int
the maximum iteration for aggregation in training.
batch_size : int
batch size when updating model.
-1 means use all data in a batch. i.e. Not to use mini-batch strategy.
defaults to -1.
early_stop : {'diff', 'weight_diff', 'abs'}
Method used to judge converge or not.
a) diff: Use difference of loss between two iterations to judge whether converge.
b) weight_diff: Use difference between weights of two consecutive iterations
c) abs: Use the absolute value of loss to judge whether converge. i.e. if loss < eps, it is converged.
encode_label : bool
encode label to one_hot.
"""
def __init__(
self,
api_version: int = 0,
secure_aggregate: bool = True,
aggregate_every_n_epoch: int = 1,
config_type: str = "nn",
nn_define: dict = None,
optimizer: typing.Union[str, dict, SimpleNamespace] = "SGD",
loss: str = None,
metrics: typing.Union[str, list] = None,
max_iter: int = 100,
batch_size: int = -1,
early_stop: typing.Union[str, dict, SimpleNamespace] = "diff",
encode_label: bool = False,
predict_param=PredictParam(),
cv_param=CrossValidationParam(),
callback_param=CallbackParam(),
):
super(HomoNNParam, self).__init__()
self.api_version = api_version
self.secure_aggregate = secure_aggregate
self.aggregate_every_n_epoch = aggregate_every_n_epoch
self.config_type = config_type
self.nn_define = nn_define or []
self.encode_label = encode_label
self.batch_size = batch_size
self.max_iter = max_iter
self.early_stop = early_stop
self.metrics = metrics
self.optimizer = optimizer
self.loss = loss
self.predict_param = copy.deepcopy(predict_param)
self.cv_param = copy.deepcopy(cv_param)
self.callback_param = copy.deepcopy(callback_param)
def check(self):
supported_config_type = ["nn", "keras", "pytorch"]
if self.config_type not in supported_config_type:
raise ValueError(f"config_type should be one of {supported_config_type}")
self.early_stop = _parse_early_stop(self.early_stop)
self.metrics = _parse_metrics(self.metrics)
self.optimizer = _parse_optimizer(self.optimizer)
def generate_pb(self):
from federatedml.protobuf.generated import nn_model_meta_pb2
pb = nn_model_meta_pb2.HomoNNParam()
pb.secure_aggregate = self.secure_aggregate
pb.encode_label = self.encode_label
pb.aggregate_every_n_epoch = self.aggregate_every_n_epoch
pb.config_type = self.config_type
if self.config_type == "nn":
for layer in self.nn_define:
pb.nn_define.append(json.dumps(layer))
elif self.config_type == "keras":
pb.nn_define.append(json.dumps(self.nn_define))
elif self.config_type == "pytorch":
for layer in self.nn_define:
pb.nn_define.append(json.dumps(layer))
pb.batch_size = self.batch_size
pb.max_iter = self.max_iter
pb.early_stop.early_stop = self.early_stop.converge_func
pb.early_stop.eps = self.early_stop.eps
for metric in self.metrics:
pb.metrics.append(metric)
pb.optimizer.optimizer = self.optimizer.optimizer
pb.optimizer.args = json.dumps(self.optimizer.kwargs)
pb.loss = self.loss
return pb
def restore_from_pb(self, pb, is_warm_start_mode: bool = False):
self.secure_aggregate = pb.secure_aggregate
self.encode_label = pb.encode_label
self.aggregate_every_n_epoch = pb.aggregate_every_n_epoch
self.config_type = pb.config_type
if self.config_type == "nn":
for layer in pb.nn_define:
self.nn_define.append(json.loads(layer))
elif self.config_type == "keras":
self.nn_define = json.loads(pb.nn_define[0])
elif self.config_type == "pytorch":
for layer in pb.nn_define:
self.nn_define.append(json.loads(layer))
else:
raise ValueError(f"{self.config_type} is not supported")
self.batch_size = pb.batch_size
if not is_warm_start_mode:
self.max_iter = pb.max_iter
self.optimizer = _parse_optimizer(
dict(optimizer=pb.optimizer.optimizer, **json.loads(pb.optimizer.args))
)
self.early_stop = _parse_early_stop(
dict(early_stop=pb.early_stop.early_stop, eps=pb.early_stop.eps)
)
self.metrics = list(pb.metrics)
self.loss = pb.loss
return pb
def _parse_metrics(param):
"""
Examples:
1. "metrics": "Accuracy"
2. "metrics": ["Accuracy"]
"""
if not param:
return []
elif isinstance(param, str):
return [param]
elif isinstance(param, list):
return param
else:
raise ValueError(f"invalid metrics type: {type(param)}")
def _parse_optimizer(param):
"""
Examples:
1. "optimize": "SGD"
2. "optimize": {
"optimizer": "SGD",
"learning_rate": 0.05
}
"""
kwargs = {}
if isinstance(param, str):
return SimpleNamespace(optimizer=param, kwargs=kwargs)
elif isinstance(param, dict):
optimizer = param.get("optimizer", kwargs)
if not optimizer:
raise ValueError(f"optimizer config: {param} invalid")
kwargs = {k: v for k, v in param.items() if k != "optimizer"}
return SimpleNamespace(optimizer=optimizer, kwargs=kwargs)
else:
raise ValueError(f"invalid type for optimize: {type(param)}")
def _parse_early_stop(param):
"""
Examples:
1. "early_stop": "diff"
2. "early_stop": {
"early_stop": "diff",
"eps": 0.0001
}
"""
default_eps = 0.0001
if isinstance(param, str):
return SimpleNamespace(converge_func=param, eps=default_eps)
elif isinstance(param, dict):
early_stop = param.get("early_stop", param.get("converge_func"))
eps = param.get("eps", default_eps)
if not early_stop:
raise ValueError(f"early_stop config: {param} invalid")
return SimpleNamespace(converge_func=early_stop, eps=eps)
else:
raise ValueError(f"invalid type for early_stop: {type(param)}")
| {
"content_hash": "f8648f98e80bdf04f5ba01b2480530da",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 114,
"avg_line_length": 34.83783783783784,
"alnum_prop": 0.6069304370312905,
"repo_name": "FederatedAI/FATE",
"id": "ebb3e67c84f93f7bd72130282bc9f45527fc8460",
"size": "8399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/federatedml/param/homo_nn_param.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
"""Conan recipe package for Google FlatBuffers
"""
import os
from conans import ConanFile, CMake, tools
class FlatbuffersConan(ConanFile):
name = "flatbuffers"
version = "1.8.0"
license = "https://github.com/google/flatbuffers/blob/master/LICENSE.txt"
url = "https://github.com/google/flatbuffers"
description = "Memory Efficient Serialization Library"
settings = "os", "compiler", "build_type", "arch", "os_build", "arch_build"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports = "LICENSE.txt"
exports_sources = ["CMake/*", "include/*", "src/*", "grpc/*", "CMakeLists.txt"]
def _inject_magic_lines(self):
"""Inject Conan setup in cmake file to solve exteral dependencies.
"""
conan_magic_lines = '''project(FlatBuffers)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
'''
tools.replace_in_file("CMakeLists.txt", "project(FlatBuffers)", conan_magic_lines)
def build(self):
"""Configure, build and install FlatBuffers using CMake.
"""
self._inject_magic_lines()
cmake = CMake(self)
cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False
cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared
cmake.configure()
cmake.build()
cmake.install()
def package(self):
"""Copy Flatbuffers' artifacts to package folder
"""
self.copy(pattern="LICENSE.txt", dst="licenses")
self.copy(pattern="flathash*", dst="bin", src="bin")
def package_info(self):
"""Collect built libraries names and solve flatc path.
"""
self.cpp_info.libs = tools.collect_libs(self)
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
| {
"content_hash": "0a2063f7074053f259a14f01bfd87b44",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 90,
"avg_line_length": 37.04,
"alnum_prop": 0.6328293736501079,
"repo_name": "chenkai036/flatbuffers",
"id": "aac7606d0a5c514514c7302254669c33041c9ae2",
"size": "1899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conanfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4995"
},
{
"name": "C",
"bytes": "1809"
},
{
"name": "C#",
"bytes": "178205"
},
{
"name": "C++",
"bytes": "1110133"
},
{
"name": "CMake",
"bytes": "22530"
},
{
"name": "Go",
"bytes": "142871"
},
{
"name": "Java",
"bytes": "131823"
},
{
"name": "JavaScript",
"bytes": "124600"
},
{
"name": "Makefile",
"bytes": "12315"
},
{
"name": "PHP",
"bytes": "147705"
},
{
"name": "Python",
"bytes": "139551"
},
{
"name": "Shell",
"bytes": "16499"
},
{
"name": "TypeScript",
"bytes": "76589"
}
],
"symlink_target": ""
} |
"""A library to generate and store the manifests for cros builders to use."""
from __future__ import print_function
import cPickle
import fnmatch
import glob
import logging
import os
import re
import shutil
import tempfile
from chromite.cbuildbot import constants
from chromite.cbuildbot import repository
from chromite.lib import cidb
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import timeout_util
BUILD_STATUS_URL = '%s/builder-status' % constants.MANIFEST_VERSIONS_GS_URL
PUSH_BRANCH = 'temp_auto_checkin_branch'
NUM_RETRIES = 20
class VersionUpdateException(Exception):
"""Exception gets thrown for failing to update the version file"""
class StatusUpdateException(Exception):
"""Exception gets thrown for failure to update the status"""
class GenerateBuildSpecException(Exception):
"""Exception gets thrown for failure to Generate a buildspec for the build"""
class BuildSpecsValueError(Exception):
"""Exception gets thrown when a encountering invalid values."""
def RefreshManifestCheckout(manifest_dir, manifest_repo):
"""Checks out manifest-versions into the manifest directory.
If a repository is already present, it will be cleansed of any local
changes and restored to its pristine state, checking out the origin.
"""
reinitialize = True
if os.path.exists(manifest_dir):
result = git.RunGit(manifest_dir, ['config', 'remote.origin.url'],
error_code_ok=True)
if (result.returncode == 0 and
result.output.rstrip() == manifest_repo):
logging.info('Updating manifest-versions checkout.')
try:
git.RunGit(manifest_dir, ['gc', '--auto'])
git.CleanAndCheckoutUpstream(manifest_dir)
except cros_build_lib.RunCommandError:
logging.warning('Could not update manifest-versions checkout.')
else:
reinitialize = False
else:
logging.info('No manifest-versions checkout exists at %s', manifest_dir)
if reinitialize:
logging.info('Cloning fresh manifest-versions checkout.')
osutils.RmDir(manifest_dir, ignore_missing=True)
repository.CloneGitRepo(manifest_dir, manifest_repo)
def _PushGitChanges(git_repo, message, dry_run=True, push_to=None):
"""Push the final commit into the git repo.
Args:
git_repo: git repo to push
message: Commit message
dry_run: If true, don't actually push changes to the server
push_to: A git.RemoteRef object specifying the remote branch to push to.
Defaults to the tracking branch of the current branch.
"""
push_branch = None
if push_to is None:
remote, push_branch = git.GetTrackingBranch(
git_repo, for_checkout=False, for_push=True)
push_to = git.RemoteRef(remote, push_branch)
git.RunGit(git_repo, ['add', '-A'])
# It's possible that while we are running on dry_run, someone has already
# committed our change.
try:
git.RunGit(git_repo, ['commit', '-m', message])
except cros_build_lib.RunCommandError:
if dry_run:
return
raise
git.GitPush(git_repo, PUSH_BRANCH, push_to, dryrun=dry_run, force=dry_run)
def CreateSymlink(src_file, dest_file):
"""Creates a relative symlink from src to dest with optional removal of file.
More robust symlink creation that creates a relative symlink from src_file to
dest_file.
This is useful for multiple calls of CreateSymlink where you are using
the dest_file location to store information about the status of the src_file.
Args:
src_file: source for the symlink
dest_file: destination for the symlink
"""
dest_dir = os.path.dirname(dest_file)
osutils.SafeUnlink(dest_file)
osutils.SafeMakedirs(dest_dir)
rel_src_file = os.path.relpath(src_file, dest_dir)
logging.debug('Linking %s to %s', rel_src_file, dest_file)
os.symlink(rel_src_file, dest_file)
class VersionInfo(object):
"""Class to encapsulate the Chrome OS version info scheme.
You can instantiate this class in three ways.
1) using a version file, specifically chromeos_version.sh,
which contains the version information.
2) passing in a string with the 3 version components.
3) using a source repo and calling from_repo().
Args:
version_string: Optional 3 component version string to parse. Contains:
build_number: release build number.
branch_build_number: current build number on a branch.
patch_number: patch number.
chrome_branch: If version_string specified, specify chrome_branch i.e. 13.
incr_type: How we should increment this version -
chrome_branch|build|branch|patch
version_file: version file location.
"""
# Pattern for matching build name format. Includes chrome branch hack.
VER_PATTERN = r'(\d+).(\d+).(\d+)(?:-R(\d+))*'
KEY_VALUE_PATTERN = r'%s=(\d+)\s*$'
VALID_INCR_TYPES = ('chrome_branch', 'build', 'branch', 'patch')
def __init__(self, version_string=None, chrome_branch=None,
incr_type='build', version_file=None):
if version_file:
self.version_file = version_file
logging.debug('Using VERSION _FILE = %s', version_file)
self._LoadFromFile()
else:
match = re.search(self.VER_PATTERN, version_string)
self.build_number = match.group(1)
self.branch_build_number = match.group(2)
self.patch_number = match.group(3)
self.chrome_branch = chrome_branch
self.version_file = None
self.incr_type = incr_type
@classmethod
def from_repo(cls, source_repo, **kwargs):
kwargs['version_file'] = os.path.join(source_repo, constants.VERSION_FILE)
return cls(**kwargs)
def _LoadFromFile(self):
"""Read the version file and set the version components"""
with open(self.version_file, 'r') as version_fh:
for line in version_fh:
if not line.strip():
continue
match = self.FindValue('CHROME_BRANCH', line)
if match:
self.chrome_branch = match
logging.debug('Set the Chrome branch number to:%s',
self.chrome_branch)
continue
match = self.FindValue('CHROMEOS_BUILD', line)
if match:
self.build_number = match
logging.debug('Set the build version to:%s', self.build_number)
continue
match = self.FindValue('CHROMEOS_BRANCH', line)
if match:
self.branch_build_number = match
logging.debug('Set the branch version to:%s',
self.branch_build_number)
continue
match = self.FindValue('CHROMEOS_PATCH', line)
if match:
self.patch_number = match
logging.debug('Set the patch version to:%s', self.patch_number)
continue
logging.debug(self.VersionString())
def FindValue(self, key, line):
"""Given the key find the value from the line, if it finds key = value
Args:
key: key to look for
line: string to search
Returns:
None: on a non match
value: for a matching key
"""
match = re.search(self.KEY_VALUE_PATTERN % (key,), line)
return match.group(1) if match else None
def IncrementVersion(self):
"""Updates the version file by incrementing the patch component.
Args:
message: Commit message to use when incrementing the version.
dry_run: Git dry_run.
"""
if not self.incr_type or self.incr_type not in self.VALID_INCR_TYPES:
raise VersionUpdateException('Need to specify the part of the version to'
' increment')
if self.incr_type == 'chrome_branch':
self.chrome_branch = str(int(self.chrome_branch) + 1)
# Increment build_number for 'chrome_branch' incr_type to avoid
# crbug.com/213075.
if self.incr_type in ('build', 'chrome_branch'):
self.build_number = str(int(self.build_number) + 1)
self.branch_build_number = '0'
self.patch_number = '0'
elif self.incr_type == 'branch' and self.patch_number == '0':
self.branch_build_number = str(int(self.branch_build_number) + 1)
else:
self.patch_number = str(int(self.patch_number) + 1)
return self.VersionString()
def UpdateVersionFile(self, message, dry_run, push_to=None):
"""Update the version file with our current version."""
if not self.version_file:
raise VersionUpdateException('Cannot call UpdateVersionFile without '
'an associated version_file')
components = (('CHROMEOS_BUILD', self.build_number),
('CHROMEOS_BRANCH', self.branch_build_number),
('CHROMEOS_PATCH', self.patch_number),
('CHROME_BRANCH', self.chrome_branch))
with tempfile.NamedTemporaryFile(prefix='mvp') as temp_fh:
with open(self.version_file, 'r') as source_version_fh:
for line in source_version_fh:
for key, value in components:
line = re.sub(self.KEY_VALUE_PATTERN % (key,),
'%s=%s\n' % (key, value), line)
temp_fh.write(line)
temp_fh.flush()
repo_dir = os.path.dirname(self.version_file)
try:
git.CreateBranch(repo_dir, PUSH_BRANCH)
shutil.copyfile(temp_fh.name, self.version_file)
_PushGitChanges(repo_dir, message, dry_run=dry_run, push_to=push_to)
finally:
# Update to the remote version that contains our changes. This is needed
# to ensure that we don't build a release using a local commit.
git.CleanAndCheckoutUpstream(repo_dir)
def VersionString(self):
"""returns the version string"""
return '%s.%s.%s' % (self.build_number, self.branch_build_number,
self.patch_number)
@classmethod
def VersionCompare(cls, version_string):
"""Useful method to return a comparable version of a LKGM string."""
info = cls(version_string)
return map(int, [info.build_number, info.branch_build_number,
info.patch_number])
def BuildPrefix(self):
"""Returns the build prefix to match the buildspecs in manifest-versions"""
if self.incr_type == 'branch':
if self.patch_number == '0':
return '%s.' % self.build_number
else:
return '%s.%s.' % (self.build_number, self.branch_build_number)
# Default to build incr_type.
return ''
class BuilderStatus(object):
"""Object representing the status of a build."""
MISSING_MESSAGE = ('Unknown run, it probably never started:'
' %(builder)s, version %(version)s')
def __init__(self, status, message, dashboard_url=None):
"""Constructor for BuilderStatus.
Args:
status: Status string (should be one of STATUS_FAILED, STATUS_PASSED,
STATUS_INFLIGHT, or STATUS_MISSING).
message: A failures_lib.BuildFailureMessage object with details
of builder failure. Or, None.
dashboard_url: Optional url linking to builder dashboard for this build.
"""
self.status = status
self.message = message
self.dashboard_url = dashboard_url
# Helper methods to make checking the status object easy.
def Failed(self):
"""Returns True if the Builder failed."""
return self.status == constants.BUILDER_STATUS_FAILED
def Passed(self):
"""Returns True if the Builder passed."""
return self.status == constants.BUILDER_STATUS_PASSED
def Inflight(self):
"""Returns True if the Builder is still inflight."""
return self.status == constants.BUILDER_STATUS_INFLIGHT
def Missing(self):
"""Returns True if the Builder is missing any status."""
return self.status == constants.BUILDER_STATUS_MISSING
def Completed(self):
"""Returns True if the Builder has completed."""
return self.status in constants.BUILDER_COMPLETED_STATUSES
@classmethod
def GetCompletedStatus(cls, success):
"""Return the appropriate status constant for a completed build.
Args:
success: Whether the build was successful or not.
"""
if success:
return constants.BUILDER_STATUS_PASSED
else:
return constants.BUILDER_STATUS_FAILED
def AsFlatDict(self):
"""Returns a flat json-able representation of this builder status.
Returns:
A dictionary of the form {'status' : status, 'message' : message,
'dashboard_url' : dashboard_url} where all values are guaranteed
to be strings. If dashboard_url is None, the key will be excluded.
"""
flat_dict = {'status' : str(self.status),
'message' : str(self.message),
'reason' : str(None if self.message is None
else self.message.reason)}
if self.dashboard_url is not None:
flat_dict['dashboard_url'] = str(self.dashboard_url)
return flat_dict
def AsPickledDict(self):
"""Returns a pickled dictionary representation of this builder status."""
return cPickle.dumps(dict(status=self.status, message=self.message,
dashboard_url=self.dashboard_url))
class BuildSpecsManager(object):
"""A Class to manage buildspecs and their states."""
SLEEP_TIMEOUT = 1 * 60
def __init__(self, source_repo, manifest_repo, build_names, incr_type, force,
branch, manifest=constants.DEFAULT_MANIFEST, dry_run=True,
master=False):
"""Initializes a build specs manager.
Args:
source_repo: Repository object for the source code.
manifest_repo: Manifest repository for manifest versions / buildspecs.
build_names: Identifiers for the build. Must match cbuildbot_config
entries. If multiple identifiers are provided, the first item in the
list must be an identifier for the group.
incr_type: How we should increment this version - build|branch|patch
force: Create a new manifest even if there are no changes.
branch: Branch this builder is running on.
manifest: Manifest to use for checkout. E.g. 'full' or 'buildtools'.
dry_run: Whether we actually commit changes we make or not.
master: Whether we are the master builder.
"""
self.cros_source = source_repo
buildroot = source_repo.directory
if manifest_repo.startswith(constants.INTERNAL_GOB_URL):
self.manifest_dir = os.path.join(buildroot, 'manifest-versions-internal')
else:
self.manifest_dir = os.path.join(buildroot, 'manifest-versions')
self.manifest_repo = manifest_repo
self.build_names = build_names
self.incr_type = incr_type
self.force = force
self.branch = branch
self.manifest = manifest
self.dry_run = dry_run
self.master = master
# Directories and specifications are set once we load the specs.
self.all_specs_dir = None
self.pass_dirs = None
self.fail_dirs = None
# Specs.
self.latest = None
self._latest_status = None
self.latest_unprocessed = None
self.compare_versions_fn = VersionInfo.VersionCompare
self.current_version = None
self.rel_working_dir = ''
def _LatestSpecFromList(self, specs):
"""Find the latest spec in a list of specs.
Args:
specs: List of specs.
Returns:
The latest spec if specs is non-empty.
None otherwise.
"""
if specs:
return max(specs, key=self.compare_versions_fn)
def _LatestSpecFromDir(self, version_info, directory):
"""Returns the latest buildspec that match '*.xml' in a directory.
Args:
version_info: A VersionInfo object which will provide a build prefix
to match for.
directory: Directory of the buildspecs.
"""
if os.path.exists(directory):
match_string = version_info.BuildPrefix() + '*.xml'
specs = fnmatch.filter(os.listdir(directory), match_string)
return self._LatestSpecFromList([os.path.splitext(m)[0] for m in specs])
def RefreshManifestCheckout(self):
"""Checks out manifest versions into the manifest directory."""
RefreshManifestCheckout(self.manifest_dir, self.manifest_repo)
def InitializeManifestVariables(self, version_info=None, version=None):
"""Initializes manifest-related instance variables.
Args:
version_info: Info class for version information of cros. If None,
version must be specified instead.
version: Requested version. If None, build the latest version.
Returns:
Whether the requested version was found.
"""
assert version_info or version, 'version or version_info must be specified'
working_dir = os.path.join(self.manifest_dir, self.rel_working_dir)
specs_for_builder = os.path.join(working_dir, 'build-name', '%(builder)s')
buildspecs = os.path.join(working_dir, 'buildspecs')
# If version is specified, find out what Chrome branch it is on.
if version is not None:
dirs = glob.glob(os.path.join(buildspecs, '*', version + '.xml'))
if len(dirs) == 0:
return False
assert len(dirs) <= 1, 'More than one spec found for %s' % version
dir_pfx = os.path.basename(os.path.dirname(dirs[0]))
version_info = VersionInfo(chrome_branch=dir_pfx, version_string=version)
else:
dir_pfx = version_info.chrome_branch
self.all_specs_dir = os.path.join(buildspecs, dir_pfx)
self.pass_dirs, self.fail_dirs = [], []
for build_name in self.build_names:
specs_for_build = specs_for_builder % {'builder': build_name}
self.pass_dirs.append(
os.path.join(specs_for_build, constants.BUILDER_STATUS_PASSED,
dir_pfx))
self.fail_dirs.append(
os.path.join(specs_for_build, constants.BUILDER_STATUS_FAILED,
dir_pfx))
# Calculate the status of the latest build, and whether the build was
# processed.
if version is None:
self.latest = self._LatestSpecFromDir(version_info, self.all_specs_dir)
if self.latest is not None:
self._latest_status = self.GetBuildStatus(self.build_names[0],
self.latest)
if self._latest_status.Missing():
self.latest_unprocessed = self.latest
return True
def GetCurrentVersionInfo(self):
"""Returns the current version info from the version file."""
version_file_path = self.cros_source.GetRelativePath(constants.VERSION_FILE)
return VersionInfo(version_file=version_file_path, incr_type=self.incr_type)
def HasCheckoutBeenBuilt(self):
"""Checks to see if we've previously built this checkout."""
if self._latest_status and self._latest_status.Passed():
latest_spec_file = '%s.xml' % os.path.join(
self.all_specs_dir, self.latest)
# We've built this checkout before if the manifest isn't different than
# the last one we've built.
return not self.cros_source.IsManifestDifferent(latest_spec_file)
else:
# We've never built this manifest before so this checkout is always new.
return False
def CreateManifest(self):
"""Returns the path to a new manifest based on the current checkout."""
new_manifest = tempfile.mkstemp('manifest_versions.manifest')[1]
osutils.WriteFile(new_manifest,
self.cros_source.ExportManifest(mark_revision=True))
return new_manifest
def GetNextVersion(self, version_info):
"""Returns the next version string that should be built."""
version = version_info.VersionString()
if self.latest == version:
message = ('Automatic: %s - Updating to a new version number from %s' %
(self.build_names[0], version))
version = version_info.IncrementVersion()
version_info.UpdateVersionFile(message, dry_run=self.dry_run)
assert version != self.latest
cros_build_lib.Info('Incremented version number to %s', version)
return version
def PublishManifest(self, manifest, version, build_id=None):
"""Publishes the manifest as the manifest for the version to others.
Args:
manifest: Path to manifest file to publish.
version: Manifest version string, e.g. 6102.0.0-rc4
build_id: Optional integer giving build_id of the build that is
publishing this manifest. If specified and non-negative,
build_id will be included in the commit message.
"""
# Note: This commit message is used by master.cfg for figuring out when to
# trigger slave builders.
commit_message = 'Automatic: Start %s %s %s' % (self.build_names[0],
self.branch, version)
if build_id is not None and build_id >= 0:
commit_message += '\nCrOS-Build-Id: %s' % build_id
logging.info('Publishing build spec for: %s', version)
logging.info('Publishing with commit message: %s', commit_message)
logging.debug('Manifest contents below.\n%s', osutils.ReadFile(manifest))
# Copy the manifest into the manifest repository.
spec_file = '%s.xml' % os.path.join(self.all_specs_dir, version)
osutils.SafeMakedirs(os.path.dirname(spec_file))
shutil.copyfile(manifest, spec_file)
# Actually push the manifest.
self.PushSpecChanges(commit_message)
def DidLastBuildFail(self):
"""Returns True if the last build failed."""
return self._latest_status and self._latest_status.Failed()
@staticmethod
def GetBuildStatus(builder, version, retries=NUM_RETRIES):
"""Returns a BuilderStatus instance for the given the builder.
Args:
builder: Builder to look at.
version: Version string.
retries: Number of retries for getting the status.
Returns:
A BuilderStatus instance containing the builder status and any optional
message associated with the status passed by the builder. If no status
is found for this builder then the returned BuilderStatus object will
have status STATUS_MISSING.
"""
url = BuildSpecsManager._GetStatusUrl(builder, version)
ctx = gs.GSContext(retries=retries)
try:
output = ctx.Cat(url)
except gs.GSNoSuchKey:
return BuilderStatus(constants.BUILDER_STATUS_MISSING, None)
return BuildSpecsManager._UnpickleBuildStatus(output)
@staticmethod
def GetSlaveStatusesFromCIDB(master_build_id):
"""Get statuses of slaves associated with |master_build_id|.
Args:
master_build_id: Master build id to check.
Returns:
A dictionary mapping the slave name to a status in
BuildStatus.ALL_STATUSES.
"""
status_dict = dict()
db = cidb.CIDBConnectionFactory.GetCIDBConnectionForBuilder()
assert db, 'No database connection to use.'
status_list = db.GetSlaveStatuses(master_build_id)
for d in status_list:
status_dict[d['build_config']] = d['status']
return status_dict
def GetBuildersStatus(self, master_build_id, builders_array, timeout=3 * 60):
"""Get the statuses of the slave builders of the master.
This function checks the status of slaves in |builders_array|. It
queries CIDB for all builds associated with the |master_build_id|,
then filters out builds that are not in |builders_array| (e.g.,
slaves that are not important).
Args:
master_build_id: Master build id to check.
builders_array: A list of the names of the builders to check.
timeout: Number of seconds to wait for the results.
Returns:
A build-names->status dictionary of build statuses.
"""
builders_completed = set()
def _GetStatusesFromDB():
"""Helper function that iterates through current statuses."""
status_dict = self.GetSlaveStatusesFromCIDB(master_build_id)
for builder in set(builders_array) - set(status_dict.keys()):
logging.warn('No status found for builder %s.', builder)
latest_completed = set(
[b for b, s in status_dict.iteritems() if s in
constants.BUILDER_COMPLETED_STATUSES and b in builders_array])
for builder in sorted(latest_completed - builders_completed):
logging.info('Builder %s completed with status "%s".',
builder, status_dict[builder])
builders_completed.update(latest_completed)
if len(builders_completed) < len(builders_array):
logging.info('Still waiting for the following builds to complete: %r',
sorted(set(builders_array).difference(builders_completed)))
return None
else:
return 'Builds completed.'
def _PrintRemainingTime(remaining):
logging.info('%s until timeout...', remaining)
# Check for build completion until all builders report in.
try:
builds_succeeded = timeout_util.WaitForSuccess(
lambda x: x is None,
_GetStatusesFromDB,
timeout,
period=self.SLEEP_TIMEOUT,
side_effect_func=_PrintRemainingTime)
except timeout_util.TimeoutError:
builds_succeeded = None
# Actually fetch the BuildStatus pickles from Google Storage.
builder_statuses = {}
for builder in builders_array:
logging.debug("Checking for builder %s's status", builder)
builder_status = self.GetBuildStatus(builder, self.current_version)
builder_statuses[builder] = builder_status
if not builds_succeeded:
logging.error('Not all builds finished before timeout (%d minutes)'
' reached.', int((timeout / 60) + 0.5))
return builder_statuses
@staticmethod
def _UnpickleBuildStatus(pickle_string):
"""Returns a BuilderStatus instance from a pickled string."""
try:
status_dict = cPickle.loads(pickle_string)
except (cPickle.UnpicklingError, AttributeError, EOFError,
ImportError, IndexError, TypeError) as e:
# The above exceptions are listed as possible unpickling exceptions
# by http://docs.python.org/2/library/pickle
# In addition to the exceptions listed in the doc, we've also observed
# TypeError in the wild.
logging.warning('Failed with %r to unpickle status file.', e)
return BuilderStatus(constants.BUILDER_STATUS_FAILED, message=None)
return BuilderStatus(**status_dict)
def GetLatestPassingSpec(self):
"""Get the last spec file that passed in the current branch."""
version_info = self.GetCurrentVersionInfo()
return self._LatestSpecFromDir(version_info, self.pass_dirs[0])
def GetLocalManifest(self, version=None):
"""Return path to local copy of manifest given by version.
Returns:
Path of |version|. By default if version is not set, returns the path
of the current version.
"""
if not self.all_specs_dir:
raise BuildSpecsValueError('GetLocalManifest failed, BuildSpecsManager '
'instance not yet initialized by call to '
'InitializeManifestVariables.')
if version:
return os.path.join(self.all_specs_dir, version + '.xml')
elif self.current_version:
return os.path.join(self.all_specs_dir, self.current_version + '.xml')
return None
def BootstrapFromVersion(self, version):
"""Initialize a manifest from a release version returning the path to it."""
# Only refresh the manifest checkout if needed.
if not self.InitializeManifestVariables(version=version):
self.RefreshManifestCheckout()
if not self.InitializeManifestVariables(version=version):
raise BuildSpecsValueError('Failure in BootstrapFromVersion. '
'InitializeManifestVariables failed after '
'RefreshManifestCheckout for version '
'%s.' % version)
# Return the current manifest.
self.current_version = version
return self.GetLocalManifest(self.current_version)
def CheckoutSourceCode(self):
"""Syncs the cros source to the latest git hashes for the branch."""
self.cros_source.Sync(self.manifest)
def GetNextBuildSpec(self, retries=NUM_RETRIES, build_id=None):
"""Returns a path to the next manifest to build.
Args:
retries: Number of retries for updating the status.
build_id: Optional integer cidb id of this build, which will be used to
annotate the manifest-version commit if one is created.
Raises:
GenerateBuildSpecException in case of failure to generate a buildspec
"""
last_error = None
for index in range(0, retries + 1):
try:
self.CheckoutSourceCode()
version_info = self.GetCurrentVersionInfo()
self.RefreshManifestCheckout()
self.InitializeManifestVariables(version_info)
if not self.force and self.HasCheckoutBeenBuilt():
return None
# If we're the master, always create a new build spec. Otherwise,
# only create a new build spec if we've already built the existing
# spec.
if self.master or not self.latest_unprocessed:
git.CreatePushBranch(PUSH_BRANCH, self.manifest_dir, sync=False)
version = self.GetNextVersion(version_info)
new_manifest = self.CreateManifest()
self.PublishManifest(new_manifest, version, build_id=build_id)
else:
version = self.latest_unprocessed
self.current_version = version
return self.GetLocalManifest(version)
except cros_build_lib.RunCommandError as e:
last_error = 'Failed to generate buildspec. error: %s' % e
logging.error(last_error)
logging.error('Retrying to generate buildspec: Retry %d/%d', index + 1,
retries)
# Cleanse any failed local changes and throw an exception.
self.RefreshManifestCheckout()
raise GenerateBuildSpecException(last_error)
@staticmethod
def _GetStatusUrl(builder, version):
"""Get the status URL in Google Storage for a given builder / version."""
return os.path.join(BUILD_STATUS_URL, version, builder)
def _UploadStatus(self, version, status, message=None, fail_if_exists=False,
dashboard_url=None):
"""Upload build status to Google Storage.
Args:
version: Version number to use. Must be a string.
status: Status string.
message: A failures_lib.BuildFailureMessage object with details
of builder failure, or None (default).
fail_if_exists: If set, fail if the status already exists.
dashboard_url: Optional url linking to builder dashboard for this build.
"""
data = BuilderStatus(status, message, dashboard_url).AsPickledDict()
gs_version = None
# This HTTP header tells Google Storage to return the PreconditionFailed
# error message if the file already exists. Unfortunately, with new versions
# of gsutil, PreconditionFailed is sometimes returned erroneously, so we've
# replaced this check with # an Exists check below instead.
# TODO(davidjames): Revert CL:223267 when Google Storage is fixed.
#if fail_if_exists:
# gs_version = 0
logging.info('Recording status %s for %s', status, self.build_names)
for build_name in self.build_names:
url = BuildSpecsManager._GetStatusUrl(build_name, version)
ctx = gs.GSContext(dry_run=self.dry_run)
# Check if the file already exists.
if fail_if_exists and not self.dry_run and ctx.Exists(url):
raise GenerateBuildSpecException('Builder already inflight')
# Do the actual upload.
ctx.Copy('-', url, input=data, version=gs_version)
def UploadStatus(self, success, message=None, dashboard_url=None):
"""Uploads the status of the build for the current build spec.
Args:
success: True for success, False for failure
message: A failures_lib.BuildFailureMessage object with details
of builder failure, or None (default).
dashboard_url: Optional url linking to builder dashboard for this build.
"""
status = BuilderStatus.GetCompletedStatus(success)
self._UploadStatus(self.current_version, status, message=message,
dashboard_url=dashboard_url)
def SetInFlight(self, version, dashboard_url=None):
"""Marks the buildspec as inflight in Google Storage."""
try:
self._UploadStatus(version, constants.BUILDER_STATUS_INFLIGHT,
fail_if_exists=True,
dashboard_url=dashboard_url)
except gs.GSContextPreconditionFailed:
raise GenerateBuildSpecException('Builder already inflight')
except gs.GSContextException as e:
raise GenerateBuildSpecException(e)
def _SetPassSymlinks(self, success_map):
"""Marks the buildspec as passed by creating a symlink in passed dir.
Args:
success_map: Map of config names to whether they succeeded.
"""
src_file = '%s.xml' % os.path.join(self.all_specs_dir, self.current_version)
for i, build_name in enumerate(self.build_names):
if success_map[build_name]:
sym_dir = self.pass_dirs[i]
else:
sym_dir = self.fail_dirs[i]
dest_file = '%s.xml' % os.path.join(sym_dir, self.current_version)
status = BuilderStatus.GetCompletedStatus(success_map[build_name])
logging.debug('Build %s: %s -> %s', status, src_file, dest_file)
CreateSymlink(src_file, dest_file)
def PushSpecChanges(self, commit_message):
"""Pushes any changes you have in the manifest directory."""
_PushGitChanges(self.manifest_dir, commit_message, dry_run=self.dry_run)
def UpdateStatus(self, success_map, message=None, retries=NUM_RETRIES,
dashboard_url=None):
"""Updates the status of the build for the current build spec.
Args:
success_map: Map of config names to whether they succeeded.
message: Message accompanied with change in status.
retries: Number of retries for updating the status
dashboard_url: Optional url linking to builder dashboard for this build.
"""
last_error = None
if message:
logging.info('Updating status with message %s', message)
for index in range(0, retries + 1):
try:
self.RefreshManifestCheckout()
git.CreatePushBranch(PUSH_BRANCH, self.manifest_dir, sync=False)
success = all(success_map.values())
commit_message = ('Automatic checkin: status=%s build_version %s for '
'%s' % (BuilderStatus.GetCompletedStatus(success),
self.current_version,
self.build_names[0]))
self._SetPassSymlinks(success_map)
self.PushSpecChanges(commit_message)
except cros_build_lib.RunCommandError as e:
last_error = ('Failed to update the status for %s with the '
'following error %s' % (self.build_names[0],
e.message))
logging.error(last_error)
logging.error('Retrying to generate buildspec: Retry %d/%d', index + 1,
retries)
else:
# Upload status to Google Storage as well.
self.UploadStatus(success, message=message, dashboard_url=dashboard_url)
return
# Cleanse any failed local changes and throw an exception.
self.RefreshManifestCheckout()
raise StatusUpdateException(last_error)
| {
"content_hash": "d8ec6b7285c438343bc218a3c51232ca",
"timestamp": "",
"source": "github",
"line_count": 920,
"max_line_length": 80,
"avg_line_length": 38.323913043478264,
"alnum_prop": 0.6667422996199444,
"repo_name": "mxOBS/deb-pkg_trusty_chromium-browser",
"id": "3aa28bd3e9e5521573b251e0c00b30ddf545a0e5",
"size": "35428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/cbuildbot/manifest_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "230130"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "12435900"
},
{
"name": "C++",
"bytes": "264378706"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "795726"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "31783"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "19491230"
},
{
"name": "Java",
"bytes": "7637875"
},
{
"name": "JavaScript",
"bytes": "12723911"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "14392"
},
{
"name": "Makefile",
"bytes": "208315"
},
{
"name": "Objective-C",
"bytes": "1460032"
},
{
"name": "Objective-C++",
"bytes": "7760068"
},
{
"name": "PLpgSQL",
"bytes": "175360"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427212"
},
{
"name": "Python",
"bytes": "11447382"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104846"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1208350"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
"""
Measurement channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/integrations/zha/
"""
import logging
import zigpy.zcl.clusters.measurement as measurement
from . import AttributeListeningChannel
from .. import registries
from ..const import (
REPORT_CONFIG_DEFAULT,
REPORT_CONFIG_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
)
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.FlowMeasurement.cluster_id)
class FlowMeasurement(AttributeListeningChannel):
"""Flow Measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.IlluminanceLevelSensing.cluster_id
)
class IlluminanceLevelSensing(AttributeListeningChannel):
"""Illuminance Level Sensing channel."""
REPORT_CONFIG = [{"attr": "level_status", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.IlluminanceMeasurement.cluster_id
)
class IlluminanceMeasurement(AttributeListeningChannel):
"""Illuminance Measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.BINARY_SENSOR_CLUSTERS.register(measurement.OccupancySensing.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.OccupancySensing.cluster_id)
class OccupancySensing(AttributeListeningChannel):
"""Occupancy Sensing channel."""
REPORT_CONFIG = [{"attr": "occupancy", "config": REPORT_CONFIG_IMMEDIATE}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.PressureMeasurement.cluster_id)
class PressureMeasurement(AttributeListeningChannel):
"""Pressure measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.RelativeHumidity.cluster_id)
class RelativeHumidity(AttributeListeningChannel):
"""Relative Humidity measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50),
}
]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.TemperatureMeasurement.cluster_id
)
class TemperatureMeasurement(AttributeListeningChannel):
"""Temperature measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50),
}
]
| {
"content_hash": "375305f5a525cb523d7afc876787db6a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 30.662790697674417,
"alnum_prop": 0.7375805839969662,
"repo_name": "joopert/home-assistant",
"id": "369ecb69aa1020e8f10e33ee540451788dfbc0d1",
"size": "2637",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/channels/measurement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import unittest
import experiments.ud_xilinx.command as UdBoardCommand
from experiments.ud_xilinx.command import UdBoardSimpleCommand
from experiments.ud_xilinx.command import ChangeSwitchCommand, SetPulseCommand, ClockActivationCommand, ClockDeactivationCommand
import experiments.ud_xilinx.exc as UdXilinxExperimentErrors
class UdBoardCommandTestCase(unittest.TestCase):
def test_udboard(self):
cmd = UdBoardCommand.UdBoardCommand("SetPulse on 3, ChangeSwitch on 0, ClockActivation on 250")
codes = cmd.get_codes()
self.assertEquals(
codes,
(27,1,32)
)
self.assertRaises(
UdXilinxExperimentErrors.InvalidUdBoardCommandError,
UdBoardCommand.UdBoardCommand,
"foo"
)
def test_str(self):
self.assertEquals(
"ChangeSwitch on 0",
str(ChangeSwitchCommand("on",0))
)
self.assertEquals(
"ChangeSwitch off 0",
str(ChangeSwitchCommand("off",0))
)
self.assertEquals(
"ChangeSwitch on 9",
str(ChangeSwitchCommand("on",9))
)
self.assertEquals(
"ChangeSwitch off 9",
str(ChangeSwitchCommand("off",9))
)
# SetPulse
self.assertEquals(
"SetPulse on 0",
str(SetPulseCommand("on",0))
)
self.assertEquals(
"SetPulse off 0",
str(SetPulseCommand("off",0))
)
self.assertEquals(
"SetPulse on 3",
str(SetPulseCommand("on",3))
)
self.assertEquals(
"SetPulse off 3",
str(SetPulseCommand("off",3))
)
# ClockActivation
self.assertEquals(
"ClockActivation on 250",
str(ClockActivationCommand(250))
)
self.assertEquals(
"ClockActivation on 500",
str(ClockActivationCommand(500))
)
self.assertEquals(
"ClockActivation on 2000",
str(ClockActivationCommand(2000))
)
self.assertEquals(
"ClockActivation off",
str(ClockDeactivationCommand())
)
def test_bounds(self):
# ChangeSwitch
self.assertEquals(
1,
UdBoardSimpleCommand.create("ChangeSwitch on 0").get_code()
)
self.assertEquals(
2,
UdBoardSimpleCommand.create("ChangeSwitch off 0").get_code()
)
self.assertEquals(
19,
UdBoardSimpleCommand.create("ChangeSwitch on 9").get_code()
)
self.assertEquals(
20,
UdBoardSimpleCommand.create("ChangeSwitch off 9").get_code()
)
# SetPulse
self.assertEquals(
21,
UdBoardSimpleCommand.create("SetPulse on 0").get_code()
)
self.assertEquals(
22,
UdBoardSimpleCommand.create("SetPulse off 0").get_code()
)
self.assertEquals(
27,
UdBoardSimpleCommand.create("SetPulse on 3").get_code()
)
self.assertEquals(
28,
UdBoardSimpleCommand.create("SetPulse off 3").get_code()
)
# ClockActivation
self.assertEquals(
32,
UdBoardSimpleCommand.create("ClockActivation on 250").get_code()
)
self.assertEquals(
33,
UdBoardSimpleCommand.create("ClockActivation on 500").get_code()
)
self.assertEquals(
36,
UdBoardSimpleCommand.create("ClockActivation on 2000").get_code()
)
self.assertEquals(
37,
UdBoardSimpleCommand.create("ClockActivation off").get_code()
)
# def suite():
# return unittest.makeSuite(UdBoardCommandTestCase)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "14d0c674fd7a5b4110f8601f090f1fcf",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 128,
"avg_line_length": 29.437956204379564,
"alnum_prop": 0.5576493925117778,
"repo_name": "weblabdeusto/weblabdeusto",
"id": "7065fdab94e12056998a00067aae5d2a674ffcc1",
"size": "4411",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/src/test/unit/experiments/ud_xilinx/test_commands.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP.NET",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "202991"
},
{
"name": "CoffeeScript",
"bytes": "39146"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "620835"
},
{
"name": "Java",
"bytes": "856300"
},
{
"name": "JavaScript",
"bytes": "1606001"
},
{
"name": "Less",
"bytes": "13422"
},
{
"name": "Makefile",
"bytes": "24995"
},
{
"name": "Mako",
"bytes": "1236"
},
{
"name": "PHP",
"bytes": "159985"
},
{
"name": "Python",
"bytes": "3739523"
},
{
"name": "Shell",
"bytes": "7880"
},
{
"name": "Smarty",
"bytes": "42585"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy
from ..supervised import svm
from ..supervised.classifier import ctransforms
def expected_impacts(D,labels,U):
'''
EIs = expected_impacts(D,labels,U)
Compute Expected impact for each element of U
Eis[i]: P(label[i] == 1) * IMPACT(label[i] == 1) + P(label[i] == 0) * IMPACT(label[i] == 0)
'''
assert len(D) == len(labels), 'Nr of labeled examples should match lenght of labels vector'
K = svm.rbf_kernel(20000)
prob_classifier = ctransforms(svm.svm_raw(kernel=K,C=4),svm.svm_sigmoidal_correction())
label_classifier = ctransforms(svm.svm_raw(kernel=K,C=4),svm.svm_binary())
prob_classifier.train(D,labels)
u_probs = prob_classifier(U)
u_labels = (u_probs > .5)
impacts = []
for u,p in zip(U,u_probs):
print(len(impacts))
label_classifier.train(numpy.vstack((D,u)),numpy.hstack((labels,[0])))
u_labels_0 = label_classifier(U)
label_classifier.train(numpy.vstack((D,u)),numpy.hstack((labels,[1])))
u_labels_1 = label_classifier(U)
e_impact = (1.-p)*(u_labels != u_labels_0).sum() + p*(u_labels != u_labels_1).sum()
impacts.append(e_impact)
return impacts
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
| {
"content_hash": "8e38a84096d0ebf4fa87b8a013378c4e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 96,
"avg_line_length": 33.526315789473685,
"alnum_prop": 0.6365777080062794,
"repo_name": "pombredanne/milk",
"id": "e97b70c9d8482e3067e3386fadd3b91d62f1a0e7",
"size": "2437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "milk/active/eimpact.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "58673"
},
{
"name": "Makefile",
"bytes": "360"
},
{
"name": "Python",
"bytes": "276895"
},
{
"name": "Shell",
"bytes": "200"
}
],
"symlink_target": ""
} |
import os
import io
import unittest
import json
from . import chargeitemdefinition
from .fhirdate import FHIRDate
class ChargeItemDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ChargeItemDefinition", js["resourceType"])
return chargeitemdefinition.ChargeItemDefinition(js)
def testChargeItemDefinition1(self):
inst = self.instantiate_from("chargeitemdefinition-device-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ChargeItemDefinition instance")
self.implChargeItemDefinition1(inst)
js = inst.as_json()
self.assertEqual("ChargeItemDefinition", js["resourceType"])
inst2 = chargeitemdefinition.ChargeItemDefinition(js)
self.implChargeItemDefinition1(inst2)
def implChargeItemDefinition1(self, inst):
self.assertEqual(inst.applicability[0].description, "Verify ChargeItem pertains to Device 12345")
self.assertEqual(inst.applicability[0].expression, "%context.service.suppliedItem='Device/12345'")
self.assertEqual(inst.applicability[0].language, "text/fhirpath")
self.assertEqual(inst.description, "Financial details for custom made device")
self.assertEqual(inst.id, "device")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.currency, "EUR")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.value, 67.44)
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].code, "VK")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].display, "Verkaufspreis (netto)")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/billing-attributes")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].type, "base")
self.assertEqual(inst.propertyGroup[1].applicability[0].description, "Gültigkeit Steuersatz")
self.assertEqual(inst.propertyGroup[1].applicability[0].expression, "%context.occurenceDateTime > '2018-04-01'")
self.assertEqual(inst.propertyGroup[1].applicability[0].language, "text/fhirpath")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].code.coding[0].code, "MWST")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].code.coding[0].display, "Mehrwersteuersatz")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/billing-attributes")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].factor, 1.19)
self.assertEqual(inst.propertyGroup[1].priceComponent[0].type, "tax")
self.assertEqual(inst.propertyGroup[2].applicability[0].description, "Gültigkeit Steuersatz")
self.assertEqual(inst.propertyGroup[2].applicability[0].expression, "%context.occurenceDateTime <= '2018-04-01'")
self.assertEqual(inst.propertyGroup[2].applicability[0].language, "text/fhirpath")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].code.coding[0].code, "MWST")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].code.coding[0].display, "Mehrwersteuersatz")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/billing-attributes")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].factor, 1.07)
self.assertEqual(inst.propertyGroup[2].priceComponent[0].type, "tax")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://sap.org/ChargeItemDefinition/device-123")
def testChargeItemDefinition2(self):
inst = self.instantiate_from("chargeitemdefinition-ebm-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ChargeItemDefinition instance")
self.implChargeItemDefinition2(inst)
js = inst.as_json()
self.assertEqual("ChargeItemDefinition", js["resourceType"])
inst2 = chargeitemdefinition.ChargeItemDefinition(js)
self.implChargeItemDefinition2(inst2)
def implChargeItemDefinition2(self, inst):
self.assertEqual(inst.applicability[0].description, "Excludes billing code 13250 for same Encounter")
self.assertEqual(inst.applicability[0].expression, "[some CQL expression]")
self.assertEqual(inst.applicability[0].language, "text/cql")
self.assertEqual(inst.applicability[1].description, "Applies only once per Encounter")
self.assertEqual(inst.applicability[1].expression, "[some CQL expression]")
self.assertEqual(inst.applicability[1].language, "text/CQL")
self.assertEqual(inst.code.coding[0].code, "30110")
self.assertEqual(inst.code.coding[0].display, "Allergologiediagnostik I")
self.assertEqual(inst.code.coding[0].system, "http://fhir.de/CodingSystem/kbv/ebm")
self.assertEqual(inst.description, "Allergologisch-diagnostischer Komplex zur Diagnostik und/oder zum Ausschluss einer (Kontakt-)Allergie vom Spättyp (Typ IV), einschl. Kosten")
self.assertEqual(inst.effectivePeriod.end.date, FHIRDate("2018-06-30").date)
self.assertEqual(inst.effectivePeriod.end.as_json(), "2018-06-30")
self.assertEqual(inst.effectivePeriod.start.date, FHIRDate("2018-04-01").date)
self.assertEqual(inst.effectivePeriod.start.as_json(), "2018-04-01")
self.assertEqual(inst.id, "ebm")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.currency, "EUR")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.value, 67.44)
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].code, "gesamt-euro")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].display, "Gesamt (Euro)")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/kbv/ebm-attribute")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].type, "base")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].code.coding[0].code, "gesamt-punkte")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].code.coding[0].display, "Gesamt (Punkte)")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].code.coding[0].system, "http://fhir.de/CodeSystem/kbv/ebm-attribute")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].factor, 633)
self.assertEqual(inst.propertyGroup[0].priceComponent[1].type, "informational")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://fhir.de/ChargeItemDefinition/kbv/ebm-30110")
self.assertEqual(inst.version, "2-2018")
| {
"content_hash": "38b0371d5d5ed3ff0042254e896a339f",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 185,
"avg_line_length": 70.63,
"alnum_prop": 0.7217896078153759,
"repo_name": "all-of-us/raw-data-repository",
"id": "79ef9a423ce71bb7c0c629c8aa96c240fd94dd49",
"size": "7197",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_4_0_0/models/chargeitemdefinition_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
import hashlib
from django.db import models
from django.conf import settings
from django.db.models import signals
from django.contrib.auth.models import User
from django.db.models import Q
from cms.managers import BlogPostManager
from cms.storage import OverwriteStorage
import os
MEMBER_IMAGE_FOLDER = 'member'
def make_member_image_name(instance, filename):
if instance.pk is None:
raise Exception('save Member instance before saving ImageField')
return os.path.join(MEMBER_IMAGE_FOLDER, str(instance.pk) + os.path.splitext(filename)[1].lower())
PROJECT_IMAGE_FOLDER = 'project'
def make_project_image_name(instance, filename):
if instance.pk is None:
raise Exception('save Project instance before saving ImageField')
return os.path.join(PROJECT_IMAGE_FOLDER, str(instance.pk) + os.path.splitext(filename)[1].lower())
SPONSOR_IMAGE_FOLDER = 'sponsor'
def make_sponsor_image_name(instance, filename):
if instance.pk is None:
raise Exception('save Sponsor instance before saving ImageField')
return os.path.join(SPONSOR_IMAGE_FOLDER, str(instance.pk) + os.path.splitext(filename)[1].lower())
class Member(models.Model):
GROUP_CHOICES = (
(u'graduate', u'Graduate'),
(u'undergraduate', u'Undergraduate'),
(u'faculty', u'Faculty')
)
CLASS_CHOICES = (
(u'freshman', u'Freshman'),
(u'sophomore', u'Sophomore'),
(u'junior', u'Junior'),
(u'senior', u'Senior')
)
STATUS_EMPTY = 0 # this member is pending "creation" by its owner
STATUS_ACTIVE = 1 # this member has been created
STATUS_ARCHIVED = 2 # this member has been archived and is frozen
STATUS_CHOICES = (
(STATUS_EMPTY, u'Empty'),
(STATUS_ACTIVE, u'Active'),
(STATUS_ARCHIVED, u'Archived')
)
user = models.ForeignKey(User, related_name='profile', unique=True)
group = models.CharField(max_length=255, choices=GROUP_CHOICES)
classification = models.CharField(max_length=255, choices=CLASS_CHOICES, blank=True)
hometown = models.CharField(max_length=255, blank=True)
interests = models.TextField(blank=True)
homepage = models.URLField(blank=True)
blurb = models.TextField(blank=True)
image = models.ImageField(upload_to=make_member_image_name, storage=OverwriteStorage(), blank=True)
status = models.IntegerField(choices=STATUS_CHOICES)
#activation_key = models.CharField(max_length=255, blank=True)
def __unicode__(self):
return unicode(self.user.get_full_name())
@models.permalink
def get_absolute_url(self):
return ('cms:profile_url', (self.pk,), {})
def generate_hashed_email(self):
return hashlib.md5(self.user.email).hexdigest()
def get_coordinated_projects(self):
return Project.objects.filter(pk__in=ProjectMember.objects.filter(member__pk=self.pk, is_coordinator=True).values_list('project__pk', flat=True))
@staticmethod
def get_possible_project_members():
# need to allow member one year old as well since activity status is predicated on project membership
# but you can't create a project for a new year without a project coordinator
# hence, the one year offset avoids the chicken-and-the-egg problem
#return Member.objects.filter(Q(status=Member.STATUS_ACTIVE) | Q(pk__in=ProjectMember.objects.filter(project__year__gte= \
# settings.CURRENT_YEAR-1).distinct().values_list('member'))).order_by('user__first_name', 'user__last_name')
return Member.objects.exclude(status=Member.STATUS_ARCHIVED).order_by('user__first_name', 'user__last_name')
class Meta:
verbose_name = 'member'
verbose_name_plural = 'members'
ordering = ['user__first_name']
class Project(models.Model):
STATUS_EMPTY = 0 # this project is pending "creation" by coordinator
STATUS_ACTIVE = 1 # this project has been created and is editable by coordinators
STATUS_ARCHIVED = 2 # this project has been archived and is frozen
STATUS_CHOICES = (
(STATUS_EMPTY, u'Empty'),
(STATUS_ACTIVE, u'Active'),
(STATUS_ARCHIVED, u'Archived')
)
CATEGORY_OTHER = 0
CATEGORY_OUTREACH = 1
CATEGORY_RESEARCH = 2
CATEGORY_SERVICE = 3
CATEGORY_INTERNSHIP = 4
CATEGORY_ORGANIZATIONAL = 5
CATEGORY_CHOICES = (
(CATEGORY_OUTREACH, u'Outreach'),
(CATEGORY_RESEARCH, u'Research'),
(CATEGORY_SERVICE, u'Service'),
(CATEGORY_INTERNSHIP, u'Internship'),
(CATEGORY_ORGANIZATIONAL, u'Organizational'),
(CATEGORY_OTHER, u'Other'),
)
title = models.CharField(max_length=255)
description = models.TextField()
image = models.ImageField(upload_to=make_project_image_name, storage=OverwriteStorage(), blank=True)
#active = models.BooleanField(default=False)
status = models.IntegerField(choices=STATUS_CHOICES)
category = models.IntegerField(choices=CATEGORY_CHOICES, default=CATEGORY_OTHER)
year = models.IntegerField()
members = models.ManyToManyField(Member, through='ProjectMember')
parent = models.ForeignKey('self', blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return unicode('%s %d' % (self.title, self.year))
@models.permalink
def get_absolute_url(self):
return ('cms:projects_year_pk_url', (), {'year': self.year, 'pk': self.pk})
def is_member_coordinator(self, member):
return ProjectMember.objects.filter(member__pk=member.pk, project__pk=self.pk, is_coordinator=True).count() != 0
class Meta:
verbose_name = 'project'
verbose_name_plural = 'projects'
class ProjectMember(models.Model):
project = models.ForeignKey(Project)
member = models.ForeignKey(Member, blank=True, null=True)
role = models.CharField(max_length=255, blank=True)
volunteer_name = models.CharField(max_length=255, blank=True)
is_coordinator = models.BooleanField()
class Meta:
ordering = [ '-is_coordinator', 'member__user__first_name', 'volunteer_name' ]
def is_volunteer(self):
return self.member is None
def get_full_name(self):
if self.is_volunteer:
return unicode(self.volunteer_name)
else:
return self.member.user.get_full_name()
def __unicode__(self):
if self.role:
return unicode('%s (%s)' % (self.get_full_name(), self.role))
else:
return self.get_full_name()
class News(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(max_length=200)
content = models.TextField()
date = models.DateTimeField()
slug = models.SlugField(max_length=100)
def __unicode__(self):
return unicode(self.title)
class Meta:
verbose_name = 'news'
verbose_name_plural = 'news'
class Page(models.Model):
title = models.CharField(max_length=255)
content = models.TextField()
slug = models.SlugField(max_length=100, unique=True)
weight = models.IntegerField(default=0)
pub_front_page = models.BooleanField(default=False, verbose_name='Publish on homepage')
pub_menu = models.BooleanField(default=False, verbose_name='Publish on top menu')
def __unicode__(self):
return unicode(self.title)
@models.permalink
def get_absolute_url(self):
return ('cms:page_url', (), {'slug': self.slug})
class Tag(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return unicode(self.name)
class BlogPost(models.Model):
author = models.ForeignKey(Member)
title = models.CharField(max_length=255)
date = models.DateTimeField(auto_now_add=True)
edit_date = models.DateTimeField(auto_now=True)
post = models.TextField(help_text='HTML is allowed')
tags = models.ManyToManyField(Tag, blank=True, related_name='blogposts')
objects = BlogPostManager()
@models.permalink
def get_absolute_url(self):
return ('cms:blog_post_url', (), {'pk': self.author.pk, 'blog_pk': self.pk})
def __unicode__(self):
return unicode('%s by %s' % (self.title, self.author))
class Meta:
verbose_name = 'blog post'
verbose_name_plural = 'blog posts'
ordering = ['-date']
class Sponsor(models.Model):
name = models.CharField(max_length=255)
image = models.ImageField(upload_to=make_sponsor_image_name, storage=OverwriteStorage(), blank=True)
def __unicode__(self):
return unicode(self.name)
@models.permalink
def get_absolute_url(self):
return ('cms:sponsors_url')
def save(self):
img_tmp = self.image
self.image = None
super(Sponsor, self).save()
self.image = img_tmp
return super(Sponsor, self).save()
class Meta:
verbose_name = 'sponsor'
verbose_name_plural = 'sponsors'
| {
"content_hash": "b0bda611bd1268362c74eee83a00a6ec",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 153,
"avg_line_length": 37.97570850202429,
"alnum_prop": 0.6353944562899787,
"repo_name": "ncsu-stars/Stars-CMS",
"id": "96337c156b1cac9d8bfad6aec5b355c40eff1f87",
"size": "9380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "12004"
},
{
"name": "HTML",
"bytes": "60895"
},
{
"name": "JavaScript",
"bytes": "60468"
},
{
"name": "Python",
"bytes": "247430"
}
],
"symlink_target": ""
} |
""" Copyright 2015 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Sample client for CCU
Note that in order for this to work you need to provision credentials
specifically for CCU - you cannot extend existing credentials to add
CCU as it's managed under "CCU" in the API credential system.
Configure->Organization->Manage APIs
Select "CCU APIs"
Create client collections/clients
Add authorization
Put the credentials in ~/.edgerc as demonstrated by api-kickstart/sample_edgerc
"""
import requests, logging, json
from http_calls import EdgeGridHttpCaller
from random import randint
from akamai.edgegrid import EdgeGridAuth
from config import EdgeGridConfig
from urlparse import urljoin
import urllib
import os
session = requests.Session()
debug = False
verbose = False
section_name = "ccu"
# If all parameters are set already, use them. Otherwise
# use the config
config = EdgeGridConfig({"verbose":False},section_name)
if hasattr(config, "debug") and config.debug:
debug = True
if hasattr(config, "verbose") and config.verbose:
verbose = True
# Set the config options
session.auth = EdgeGridAuth(
client_token=config.client_token,
client_secret=config.client_secret,
access_token=config.access_token
)
if hasattr(config, 'headers'):
session.headers.update(config.headers)
baseurl = '%s://%s/' % ('https', config.host)
httpCaller = EdgeGridHttpCaller(session, debug, verbose, baseurl)
def getQueue():
purge_queue_result = httpCaller.getResult('/ccu/v2/queues/default')
print ("The queue currently has %s items in it" % int(purge_queue_result['queueLength']))
def checkProgress(resource):
purge_queue_result = httpCaller.getResult(resource)
return purge_queue_result
def postPurgeRequest(action = "invalidate"):
purge_obj = {
"action" : action,
"objects" : [
"http://bc.akamaiapibootcamp.com/index.html"
]
}
print ("Adding %s to queue - %s" % (type, json.dumps(purge_obj)));
purge_post_result = httpCaller.postResult('/ccu/v2/queues/default', json.dumps(purge_obj))
return purge_post_result
if __name__ == "__main__":
getQueue()
purge_post_result = postPurgeRequest()
check_result = checkProgress(purge_post_result["progressUri"])
seconds_to_wait = check_result['pingAfterSeconds']
print "You should wait %s seconds before checking queue again..." % seconds_to_wait
| {
"content_hash": "af159971a2694292a88a0dbe08a2e8a9",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 91,
"avg_line_length": 30.51578947368421,
"alnum_prop": 0.743704725767506,
"repo_name": "dshafik/api-kickstart",
"id": "17dc4c2565630ca3209095af4f856ea7ee75da32",
"size": "2922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python/ccu.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27281"
}
],
"symlink_target": ""
} |
import termcolor
import sys
class StrUtils:
def __init__(self):
pass
@staticmethod
def print_log_row(op_code, url, status_code, documented_reason, body, curlcommand):
if type(curlcommand) == str:
curlstr = curlcommand
else:
curlstr = curlcommand.get()
print(termcolor.colored(StrUtils.fill_string_up_with_blanks(op_code, 7), color='red') + ' | ' +
termcolor.colored(StrUtils.fill_string_up_with_blanks(url, 100), color='green') + ' |-| ' +
StrUtils.fill_string_up_with_blanks(status_code, 3) + ' | ' +
StrUtils.fill_string_up_with_blanks(documented_reason, 20) + ' | ' +
body + ' | ' + curlstr)
@staticmethod
def fill_string_up_with_blanks(fillup_string, num_chars):
if fillup_string is None:
return 'None '
if len(fillup_string) > num_chars:
return fillup_string
else:
num_blanks = num_chars - len(fillup_string)
for x in range(0, num_blanks):
fillup_string = fillup_string + ' '
return fillup_string
| {
"content_hash": "4dcf18dd30770dc60e6f734dfb622ec4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 105,
"avg_line_length": 34.60606060606061,
"alnum_prop": 0.5674255691768827,
"repo_name": "Teebytes/TnT-Fuzzer",
"id": "833059a12a75ffd4db20c5c7f08885fe705577d2",
"size": "1142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tntfuzzer/utils/strutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "295"
},
{
"name": "Python",
"bytes": "51826"
}
],
"symlink_target": ""
} |
""" This package contains the WorkTree object. """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import abc
import ntpath
import locale
import operator
import posixpath
import six
import qibuild.config
import qisys.sh
import qisys.qixml
import qisys.project
import qisys.command
from qisys import ui
class WorkTree(object):
""" This class represent a :term:`worktree`. """
def __init__(self, root, sanity_check=True):
"""
Construct a new worktree
:param root: The root directory of the worktree.
:param allow_nested: Allow nested worktrees.
"""
if six.PY2 and isinstance(root, str):
root = root.decode("utf-8")
if not os.path.exists(root):
raise Exception(""" \
Could not open WorkTree in {0}.
This path does not exist
""".format(root))
self._observers = list()
self.root = root
self.cache = self.load_cache()
# Re-parse every qiproject.xml to visit the subprojects
self.projects = list()
self.load_projects()
if sanity_check:
self.check()
self.register_self()
def register_self(self):
""" Register to the global list of all worktrees in
~/.config/qi/qibuild.xml
"""
qibuild_cfg = qibuild.config.QiBuildConfig()
to_read = qibuild.config.get_global_cfg_path()
qibuild_cfg.read(to_read, create_if_missing=True)
if qibuild_cfg.add_worktree(self.root):
qibuild_cfg.write()
def register(self, observer):
""" Called when an observer wants to be notified about project changes """
self._observers.append(observer)
def load_cache(self):
""" Load the worktree.xml file. """
self.projects = list()
if not os.path.exists(self.worktree_xml):
qisys.sh.mkdir(self.dot_qi)
with open(self.worktree_xml, "w") as fp:
fp.write("<worktree />\n")
cache = WorkTreeCache(self.worktree_xml)
# Remove non-existing sources
for src in cache.get_srcs():
if not os.path.exists(os.path.join(self.root, src)):
cache.remove_src(src)
return cache
def reload(self):
"""
Re-read every qiproject.xml.
Useful when projects are added or removed, or when qiproject.xml change
"""
self.cache = self.load_cache()
self.load_projects()
for observer in self._observers:
observer.reload()
def check(self):
""" Perform a few sanity checks """
# Check that we are not in an other worktree:
parent_worktree = guess_worktree(os.path.join(self.root, ".."))
if parent_worktree and parent_worktree != self.root:
ui.warning("""Nested worktrees detected:
{0} is already in a worktree
(in {1})
""".format(self.root, parent_worktree))
@property
def dot_qi(self):
""" Get the dot_qi directory. """
res = os.path.join(self.root, ".qi")
qisys.sh.mkdir(res)
return res
@property
def worktree_xml(self):
""" Get the path to .qi/worktree.xml """
worktree_xml = os.path.join(self.dot_qi, "worktree.xml")
if not os.path.exists(worktree_xml):
with open(worktree_xml, "w") as fp:
fp.write("<worktree />")
return worktree_xml
def has_project(self, path):
""" Return True if the Path is a Projet """
src = self.normalize_path(path)
srcs = (self.normalize_path(p.src) for p in self.projects)
return src in srcs
def load_projects(self):
""" For every project in cache, re-read the subprojects and and them to the list """
self.projects = list()
srcs = self.cache.get_srcs()
for src in srcs:
project = qisys.project.WorkTreeProject(self, src)
project.parse_qiproject_xml()
self.projects.append(project)
res = set(self.projects)
for project in self.projects:
self._rec_parse_sub_projects(project, res)
self.projects = sorted(res, key=operator.attrgetter("src"))
def _rec_parse_sub_projects(self, project, res):
""" Recursively parse every project and subproject, filling up the res list. """
for sub_project_src in project.subprojects:
src = os.path.join(project.src, sub_project_src)
src = qisys.sh.to_posix_path(src)
sub_project = qisys.project.WorkTreeProject(self, src)
sub_project.parse_qiproject_xml()
res.add(sub_project)
self._rec_parse_sub_projects(sub_project, res)
def get_project(self, src, raises=False):
"""
Get a project
:param src: a absolute path, or a path relative to the worktree
:param raises: Raises if project is not found
:returns: a :py:class:`.WorkTreeProject` instance or None if raises is
False and project is not found
"""
src = self.normalize_path(src)
if not self.has_project(src):
if not raises:
return None
mess = ui.did_you_mean("No project in '%s'\n" % src,
src, [self.normalize_path(x.src) for x in self.projects])
raise WorkTreeError(mess)
match = (p for p in self.projects if self.normalize_path(p.src) == src)
if six.PY3:
return match.__next__()
return match.next()
def add_project(self, path):
"""
Add a project to a worktree
:param path: path to the project, can be absolute,
or relative to the worktree root
"""
src = self.normalize_path(path)
if self.has_project(src):
mess = "Could not add project to worktree\n"
mess += "Path %s is already registered\n" % src
mess += "Current worktree: %s" % self.root
raise WorkTreeError(mess)
self.cache.add_src(src)
self.load_projects()
project = self.get_project(src)
for observer in self._observers:
observer.reload()
return project
def remove_project(self, path, from_disk=False):
"""
Remove a project from a worktree
:param path: path to the project, can be absolute,
or relative to the worktree root
:param from_disk: also erase project files from disk
"""
src = self.normalize_path(path)
project = self.get_project(src, raises=True)
if from_disk:
qisys.sh.rm(project.path)
self.cache.remove_src(src)
self.load_projects()
for observer in self._observers:
observer.reload()
def move_project(self, path, new_path):
""" Move a project from a worktree """
src = self.normalize_path(path)
new_src = self.normalize_path(new_path)
__project = self.get_project(src, raises=True)
if self.has_project(new_src):
mess = "Could not move project\n"
mess += "Path %s is already registered\n" % src
mess += "Current worktree: %s" % self.root
raise WorkTreeError(mess)
self.cache.remove_src(src)
self.cache.add_src(new_src)
self.load_projects()
for observer in self._observers:
observer.reload()
def normalize_path(self, path):
""" Make sure the path is a POSIX path, relative to the worktree root """
if os.path.isabs(path):
path = os.path.relpath(path, start=self.root)
path = path.replace(ntpath.sep, posixpath.sep)
if os.name == 'nt':
path = path.lower()
return path
def __repr__(self):
""" String Representation """
res = "<WorkTree in %s\n" % self.root
res += repr_list_projects(self.projects)
res += ">\n"
return res
def repr_list_projects(projects, name="projects"):
""" String Representation os a Project List """
res = ""
if projects:
res += name
for i, project in enumerate(projects):
res += "(%s) %s, " % (i, project.src)
res += "\n"
return res
def is_worktree(path):
""" Return True if the Path is a WorkTree """
return os.path.isdir(os.path.join(path, ".qi"))
def guess_worktree(cwd=None, raises=False):
""" Look for parent directories until a .qi dir is found somewhere. """
if cwd is None:
cwd = os.getcwd()
cwd = qisys.sh.to_native_path(cwd)
head = cwd
_tail = True
while _tail:
if is_worktree(head):
return head
(head, _tail) = os.path.split(head)
if raises:
raise NotInWorkTree()
else:
return None
class WorkTreeObserver(object):
"""
To be subclasses for objects willing to be notified
when a project is added or removed from the worktree
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def reload(self):
""" Reload """
pass
class WorkTreeCache(object):
""" Cache the paths to all the projects registered in a worktree """
def __init__(self, xml_path):
""" WorkTreeCache Init """
self.xml_path = xml_path
self.xml_root = qisys.qixml.read(xml_path).getroot()
def add_src(self, src):
""" Add a new source to the cache """
project_elem = qisys.qixml.etree.Element("project")
project_elem.set("src", src)
self.xml_root.append(project_elem)
qisys.qixml.write(self.xml_root, self.xml_path)
def remove_src(self, src):
""" Remove one source from the cache """
projects_elem = self.xml_root.findall("project")
for project_elem in projects_elem:
if project_elem.get("src") == src:
self.xml_root.remove(project_elem)
qisys.qixml.write(self.xml_root, self.xml_path)
def get_srcs(self):
""" Get all the sources registered in the cache """
srcs = list()
projects_elem = self.xml_root.findall("project")
for project_elem in projects_elem:
srcs.append(qisys.qixml.parse_required_attr(project_elem, "src"))
return srcs
class WorkTreeError(Exception):
""" Just a custom exception. """
class NotInWorkTree(Exception):
""" Just a custom exception. """
def __str__(self):
""" String Representation """
return """ Could not guess worktree from current working directory
Here is what you can do :
- try from a valid work tree
- specify an existing work tree with --work-tree PATH
- create a new work tree with `qibuild init`
"""
class NoSuchProject(Exception):
""" NoSuchProject Custom Exception """
def __init__(self, name, message):
""" NoSuchProject Init """
super(NoSuchProject, self).__init__()
self.name = name
self.message = message
def __str__(self):
""" String Representation """
return self.message
| {
"content_hash": "5562b44a059406842f142e0af57822c0",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 92,
"avg_line_length": 33.13095238095238,
"alnum_prop": 0.5885734818541143,
"repo_name": "aldebaran/qibuild",
"id": "10dc650a601d7a23ac22b2f51a6ded5e78a23a16",
"size": "11329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qisys/worktree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6892"
},
{
"name": "C++",
"bytes": "23130"
},
{
"name": "CMake",
"bytes": "292637"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1581825"
},
{
"name": "SWIG",
"bytes": "306"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import os.path
import re
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.compat import samefile
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import display_path
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.vcs import VersionControl, vcs
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
HASH_REGEX = re.compile('[a-fA-F0-9]{40}')
def looks_like_hash(sha):
return bool(HASH_REGEX.match(sha))
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see https://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def get_base_rev_args(self, rev):
return [rev]
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(['version'], show_stdout=False)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version becasue
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
if not location.endswith('/'):
location = location + '/'
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path)
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir.path
)
def get_revision_sha(self, dest, rev):
"""
Return a commit hash for the given revision if it names a remote
branch or tag. Otherwise, return None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = self.run_command(['show-ref', rev], cwd=dest,
show_stdout=False, on_returncode='ignore')
refs = {}
for line in output.strip().splitlines():
try:
sha, ref = line.split()
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
return refs.get(branch_ref) or refs.get(tag_ref)
def check_rev_options(self, dest, rev_options):
"""Check the revision options before checkout.
Returns a new RevOptions object for the SHA1 of the branch or tag
if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
sha = self.get_revision_sha(dest, rev)
if sha is not None:
return rev_options.make_new(sha)
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
return rev_options
def is_commit_id_equal(self, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return self.get_revision(dest) == name
def fetch_new(self, dest, url, rev_options):
rev_display = rev_options.to_display()
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev_options.rev:
# Then a specific revision was requested.
rev_options = self.check_rev_options(dest, rev_options)
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
rev = rev_options.rev
# Only fetch the revision if it's a ref
if rev.startswith('refs/'):
self.run_command(
['fetch', '-q', url] + rev_options.to_args(),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
rev = 'FETCH_HEAD'
self.run_command(['checkout', '-q', rev], cwd=dest)
#: repo may contain submodules
self.update_submodules(dest)
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
cmd_args = ['checkout', '-q'] + rev_options.to_args()
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.check_rev_options(dest, rev_options)
cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args()
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
def get_url(self, location):
"""Return URL of the first remote encountered."""
remotes = self.run_command(
['config', '--get-regexp', r'remote\..*\.url'],
show_stdout=False, cwd=location,
)
remotes = remotes.splitlines()
found_remote = remotes[0]
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location,
)
return current_rev.strip()
def _get_subdirectory(self, location):
"""Return the relative path of setup.py to the git repo root."""
# find the repo root
git_dir = self.run_command(['rev-parse', '--git-dir'],
show_stdout=False, cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
root_dir = os.path.join(git_dir, '..')
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
# relative path of setup.py to repo root
if samefile(root_dir, location):
return None
return os.path.relpath(location, root_dir)
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
subdirectory = self._get_subdirectory(location)
if subdirectory:
req += '&subdirectory=' + subdirectory
return req
def get_url_rev(self, url):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes don't
work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in url:
assert 'file:' not in url
url = url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev(url)
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev(url)
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def controls_location(cls, location):
if super(Git, cls).controls_location(location):
return True
try:
r = cls().run_command(['rev-parse'],
cwd=location,
show_stdout=False,
on_returncode='ignore')
return not r
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return False
vcs.register(Git)
| {
"content_hash": "149d9e4a4d452fcf59d3e4ad3333a89c",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 79,
"avg_line_length": 36.48867313915858,
"alnum_prop": 0.5551219512195122,
"repo_name": "astaninger/speakout",
"id": "9ee2e012f95b21510e1f2695729db9cbd6bc9eb0",
"size": "11275",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "venv/lib/python3.6/site-packages/pip/_internal/vcs/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "13728"
},
{
"name": "HTML",
"bytes": "44680"
},
{
"name": "JavaScript",
"bytes": "7320"
},
{
"name": "Python",
"bytes": "4680248"
},
{
"name": "Shell",
"bytes": "3248"
}
],
"symlink_target": ""
} |
def f(x):
import math
return 10*math.e**(math.log(0.5)/5.27 * x)
def f1(x):
import math
return 400*math.e**(math.log(0.5)/3.66 * x)
def radiationExposure(start, stop, step):
'''
Computes and returns the amount of radiation exposed
to between the start and stop times. Calls the
function f (defined for you in the grading script)
to obtain the value of the function at any point.
start: integer, the time at which exposure begins
stop: integer, the time at which exposure ends
step: float, the width of each rectangle. You can assume that
the step size will always partition the space evenly.
returns: float, the amount of radiation exposed to
between start and stop times.
'''
z = 0
i = start
while (i < stop):
# print i
z = z + f(i)*step
# print "z = ",z
i = i + step
return z
print radiationExposure(0, 5, 1)
print radiationExposure(5, 11, 1)
print radiationExposure(0, 11, 1)
print radiationExposure(0, 4, 0.25) | {
"content_hash": "48ec3559f847b73dc9fb3ab9da0d5bee",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 65,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.6290018832391714,
"repo_name": "ahmedraza007/6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python",
"id": "3dc06c7642ea59eb188a18e75ea29d48c9f8bdac",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/radiationExposure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "217904"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Optional, Mapping, Union
from .layered_mapping import LayeredMapping
def capture_context(
context: Optional[Union[int, Mapping[str, Any]]] = 0
) -> Optional[Mapping[str, Any]]:
"""
Explicitly capture the context to be used by subsequent formula
materialisations.
Note: This function is primarily useful in libraries that wrap Formulaic,
allowing them to easily decouple the extraction of evaluation context from
the actual materializations calls, which may be several frames removed from
the users. Also note that implementers are free to do context collection
without this method, since passing of a dictionary context will always be
supported; however using this method allows users to treat formulaic as a
black box.
Args:
context: The context from which variables (and custom transforms/etc)
should be inherited. When specified as an integer, it is interpreted
as a frame offset from the caller's frame (i.e. 0, the default,
means that all variables in the caller's scope should be made
accessible when interpreting and evaluating formulae). Otherwise, a
mapping from variable name to value is expected. When nesting in a
library, and attempting to capture user-context, make sure you
account for the extra frames introduced by your wrappers.
Returns:
The context that should be later passed to the Formulaic materialization
procedure like: `.get_model_matrix(..., context=<this object>)`.
"""
if isinstance(context, int):
if hasattr(sys, "_getframe"):
frame = sys._getframe(context + 1)
context = LayeredMapping(frame.f_locals, frame.f_globals)
else:
context = None # pragma: no cover
return context
| {
"content_hash": "36bc8e424bae6cebabbd719386a963a2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 44.54761904761905,
"alnum_prop": 0.6942811330839124,
"repo_name": "matthewwardrop/formulaic",
"id": "a1e4cb4952337f8d3ecc6d499c2483ce0f29cb63",
"size": "1871",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "formulaic/utils/context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "372702"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from genda.pysam_callbacks.allele_counter import AlleleCounter
from pysam.calignmentfile import AlignedSegment
def make_read(aligned_read, pos, cigar):
aligned_read.pos = pos
aligned_read.cigar = cigar
return aligned_read
class FakeAlignment(AlignedSegment):
""" Simulating sequencing reads
"""
def __init__(self, position, seq, cigar):
""" Only relevent information for this test
"""
self.pos = position
self.seq = seq
self.cigar = cigar
self.is_duplicate = False
self.mapq = 255
self.qual = len(seq) * 'I'
class testCounter(unittest.TestCase):
#:TODO read actual bam file
def setUp(self):
# Read positions in BAM are zero-indexed while SNP positions are almost
# always 1-indexed
self.read = AlignedSegment()
self.read.seq = "ATTAGGATAG"
self.mapq = 255
self.qual = len(self.read.seq) * 'I'
def testRegular(self):
# Position should be an G
regular_read = make_read(self.read, 24, [(0,10)])
test = AlleleCounter("chr1", 29)
test(regular_read)
np.testing.assert_equal(
np.asarray([test.A_n, test.G_n,
test.G_n, test.T_n]),
np.asarray([0,0,1,0]))
def testSNPinIntron(self):
snp_in_intron = make_read(self.read, 24, [(0,5), (3,5), (0,5)])
test = AlleleCounter("chr1", 31)
test(snp_in_intron)
t = np.asarray([test.A_n, test.G_n,
test.G_n, test.T_n])
np.testing.assert_equal(t, np.asarray([0,0,0,0]))
def testSecondExon(self):
snp_in_second_exon = make_read(self.read, 24, [(0,5),(3,5),(0,5)])
test = AlleleCounter("chr1", 37)
test(snp_in_second_exon)
t = np.asarray([test.A_n, test.G_n,
test.G_n, test.T_n])
np.testing.assert_equal(t, np.asarray([0,0,0,1]))
"""
Not yet implemented
def testIndels(self):
indel = FakeAlignment(25, self.seq, [(0,20), (3, 45), (0,10)])
pass
"""
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6fa0618e315cd7a186adae30e3199fc8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 27.641025641025642,
"alnum_prop": 0.5709647495361782,
"repo_name": "jeffhsu3/genda",
"id": "47b0084f440c9952a5e364573e66567267e6720f",
"size": "2156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/allele_counter_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2264"
},
{
"name": "Python",
"bytes": "298119"
}
],
"symlink_target": ""
} |
from celery import task
from openpds.core.models import Profile, Notification, Device
from bson import ObjectId
from pymongo import Connection
from django.conf import settings
import time
from datetime import date, timedelta
import json
import pdb
import math
import cluster
from gcm import GCM
from openpds.core.models import Profile
from SPARQLWrapper import SPARQLWrapper, JSON
from collections import Counter
connection = Connection(
host=getattr(settings, "MONGODB_HOST", None),
port=getattr(settings, "MONGODB_PORT", None)
)
def getDBName(profile):
return "User_" + str(profile.uuid).replace("-", "_")
def getTopAccessPointsForTimeRange(collection, start, end):
accessPoints = collection.find({ "key": { "$regex" : "WifiProbe$" }, "time": { "$gte" : start, "$lt":end }})
if accessPoints.count() > 0:
accessPoints = [ap["value"] for ap in accessPoints]
if len(accessPoints) < 5:
return [ap["bssid"] for ap in accessPoints]
sortedAccessPoints = sorted(accessPoints, key=lambda value: -value["level"])
return [ap["bssid"] for ap in sortedAccessPoints[0:5]]
def getTopAccessPointsForUser(profile):
funf = connection[getDBName(profile)]["funf"]
currentTime = time.time()
today = date.fromtimestamp(currentTime)
firstTime = time.mktime((today - timedelta(days=3)).timetuple())
startTimes = [start for start in range(int(firstTime), int(currentTime) - 3600*4, 600)]
allTopAccessPoints = []
for startTime in startTimes:
topAccessPoints = getTopAccessPointsForTimeRange(funf, startTime, startTime + 600)
if topAccessPoints is not None:
allTopAccessPoints.extend(topAccessPoints)
return set(sorted(allTopAccessPoints))
def getTopAccessPoints():
profiles = Profile.objects.all()
accessPoints = {}
intersections = {}
notUnique = {}
for profile in profiles:
accessPoints[profile.uuid] = getTopAccessPointsForUser(profile)
for profile in profiles:
if len(accessPoints[profile.uuid]) > 0:
unique = True
intersections[profile.uuid] = {}
notUnique[profile.uuid] = []
maxIntersection = 0
#print profile.uuid
for profile2 in profiles:
intersections[profile.uuid][profile2.uuid] = accessPoints[profile.uuid].intersection(accessPoints[profile2.uuid])
notUnique[profile.uuid].extend(intersections[profile.uuid][profile2.uuid])
if profile.uuid <> profile2.uuid:
if len(intersections[profile.uuid][profile2.uuid]) > maxIntersection:
maxIntersection = len(intersections[profile.uuid][profile2.uuid])
if len(intersections[profile.uuid][profile2.uuid]) >= len(accessPoints[profile.uuid]):
unique = False
notUnique[profile.uuid] = set(notUnique[profile.uuid])
answerCollection = connection[getDBName(profile)]["answer"]
answerCollection.remove({ "key": "Uniqueness" })
answer = { "key": "Uniqueness" }
answer["value"] = { "message": "Your data uniquely identifies you" if unique else "Your data is not unique" }
answer["value"]["wifi_count"] = len(accessPoints[profile.uuid])
answer["value"]["unique_wifi_count"] = len(accessPoints[profile.uuid]) - maxIntersection
answerCollection.save(answer)
#print accessPoints[profile.uuid]
print str(len(accessPoints[profile.uuid])) + "," + str(maxIntersection)
#print intersections[profile.uuid]
#print answer
#print "\n"
| {
"content_hash": "a4a1ad494baf64edefa290b77d7dc918",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 129,
"avg_line_length": 42.86046511627907,
"alnum_prop": 0.6559956592512208,
"repo_name": "patcon/openPDS",
"id": "e648f0756d6c823b96e972e1dc4615f0be833813",
"size": "3686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openpds/questions/unique_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14624"
},
{
"name": "HTML",
"bytes": "58028"
},
{
"name": "JavaScript",
"bytes": "211226"
},
{
"name": "Python",
"bytes": "253483"
}
],
"symlink_target": ""
} |
"""
Script for generating a log file from the archive.
"""
import argparse
from datetime import datetime, timedelta
import os
import sys
try:
from ArchiverAccess.archive_data_file_creator import ArchiveDataFileCreator
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
from ArchiverAccess.archive_data_file_creator import ArchiveDataFileCreator
from ArchiverAccess.archive_time_period import ArchiveTimePeriod
from ArchiverAccess.archiver_data_source import ArchiverDataSource
from genie_python.mysql_abstraction_layer import SQLAbstraction
from server_common.utilities import parse_date_time_arg_exit_on_fail
def create_log(pv_names, time_period, filename, host="127.0.0.1"):
"""
Create pv monitors based on the iocdatabase
Returns: monitor for PV
"""
archive_mysql_abstraction_layer = SQLAbstraction("archive", "report", "$report", host=host)
archiver_data_source = ArchiverDataSource(archive_mysql_abstraction_layer)
with open(filename, "w") as log_file:
log_file.write("Initial values\n")
for pv_name, val in zip(pv_names, archiver_data_source.initial_values(pv_names, time_period.start_time)):
log_file.write("{}, {}\n".format(pv_name, val))
log_file.write("Changes for time period\n")
for time_stamp, index, value in archiver_data_source.changes_generator(pv_names, time_period):
time_stamp_as_str = time_stamp.strftime("%Y-%m-%dT%H:%M:%S.%f")
log_file.write("{}, {}, {}\n".format(time_stamp_as_str, pv_names[index], value))
if __name__ == '__main__':
description = "Create a log of events from the archive. E.g. python log_event_generator.py " \
"--start_time 2018-01-10T09:00:00 --end_time 2018-01-11T8:10:00 --host ndximat " \
"--filename.csv " \
"IN:IMAT:MOT:MTR0101.RBV IN:IMAT:MOT:MTR0102.RBV"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--end_time", "-e", help="End time", required=True)
parser.add_argument("--start_time", "-s", help="Start time for sample iso date, 2018-12-20T16:01:02", required=True)
parser.add_argument("--host", default="localhost", help="Host to get data from defaults to localhost")
parser.add_argument("--filename", "-f", default="log.log",
help="Filename to use for the log file.")
parser.add_argument("--default_field", default="VAL",
help="If the pv has no field add this field to it.")
parser.add_argument("pv_names", nargs="+", help="Each pv appearing in the data")
args = parser.parse_args()
data_start_time = parse_date_time_arg_exit_on_fail(args.start_time)
data_end_time = parse_date_time_arg_exit_on_fail(args.end_time)
# in the time period the time delta sets how close the change record end time is to the date end time
the_time_period = ArchiveTimePeriod(data_start_time, timedelta(seconds=1), finish_time=data_end_time)
create_log(args.pv_names, the_time_period, filename=args.filename, host=args.host)
| {
"content_hash": "763fe162d7d01bc9ea287532d8ac5e6f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 120,
"avg_line_length": 46.6865671641791,
"alnum_prop": 0.6835038363171355,
"repo_name": "ISISComputingGroup/EPICS-inst_servers",
"id": "17a4f1be38df4b030a6e1591cadd5aed1e4bd6ed",
"size": "3974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/log_event_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6451"
},
{
"name": "Python",
"bytes": "1060148"
},
{
"name": "Shell",
"bytes": "4460"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from django.db.models import get_model
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.conf import settings
from la_facebook.la_fb_logging import logger
LA_FACEBOOK_PROFILE_PREFIX = 'fb-'
FACEBOOK_GRAPH_TARGET = "https://graph.facebook.com/me"
def get_default_redirect(request, fallback_url=settings.LOGIN_REDIRECT_URL, redirect_field_name="next", session_key_value="redirect_to"):
"""
Returns the URL to be used in login procedures by looking at different
values in the following order:
- a REQUEST value, GET or POST, named "next" by default.
- LOGIN_REDIRECT_URL - the URL in the setting
- LOGIN_REDIRECT_URLNAME - the name of a URLconf entry in the settings
"""
redirect_to = request.REQUEST.get(redirect_field_name)
if not redirect_to:
# try the session if available
if hasattr(request, "session"):
redirect_to = request.session.get(session_key_value)
# light security check -- make sure redirect_to isn't garabage.
if not redirect_to or "://" in redirect_to or " " in redirect_to:
redirect_to = fallback_url
return redirect_to
class BaseFacebookCallback(object):
def __call__(self, request, access, token):
if not request.user.is_authenticated():
logger.debug("BaseFacebookCallback.__call__: request.user not authenticated")
authenticated = False
user_data = self.fetch_user_data(request, access, token)
user = self.lookup_user(request, access, user_data)
if user is None:
logger.debug("BaseFacebookCallback.__call__: no existing django user found for this facebook identifier")
ret = self.handle_no_user(request, access, token, user_data)
# allow handle_no_user to create a user if need be
if isinstance(ret, User):
logger.debug("BaseFacebookCallback.__call__: self.handle_no_user returned valid django user")
user = ret
else:
logger.debug("BaseFacebookCallback.__call__: existing django user found for this facebook identifier")
ret = self.handle_unauthenticated_user(request, user, access, token, user_data)
if isinstance(ret, HttpResponse):
return ret
else:
logger.debug("BaseFacebookCallback.__call__: request.user is authenticated")
authenticated = True
user = request.user
redirect_to = self.redirect_url(request)
if user:
kwargs = {}
if not authenticated:
kwargs["identifier"] = self.identifier_from_data(user_data)
logger.debug("BaseFacebookCallback.__call__: Persisting user token")
access.persist(user, token, **kwargs)
return redirect(redirect_to)
def fetch_user_data(self, request, access, token):
raise NotImplementedError("Callbacks must have a fetch_user_data method")
def lookup_user(self, request, access, user_data):
raise NotImplementedError("Callbacks must have a lookup_user method")
def redirect_url(self, request):
raise NotImplementedError("Callbacks must have a redirect_url method")
def handle_no_user(self, request, access, token, user_data):
raise NotImplementedError("Callbacks must have a handle_no_user method")
def handle_unauthenticated_user(self, request, user, access, token, user_data):
raise NotImplementedError("Callbacks must have a handle_unauthenticated_user method")
def update_profile_from_graph(self, request, access, token, profile):
raise NotImplementedError("Callbacks must have a update_profile_from_graph method")
def create_profile(self, request, access, token, user):
raise NotImplementedError("Callbacks must have a create_profile method")
def create_user(self, request, access, token, user_data):
raise NotImplementedError("Callbacks must have a create_user method")
def identifier_from_data(self, data):
# @@@ currently this is being used to make/lookup users and we don't
# want a clash between services. need to look into the more.
return LA_FACEBOOK_PROFILE_PREFIX + data["id"]
def login_user(self, request, user):
user.backend = "django.contrib.auth.backends.ModelBackend"
logger.debug("BaseFacebookCallback.login_user: logging in user %s" \
"with ModelBackend" % str(user).strip())
login(request, user)
| {
"content_hash": "e111a39208a0a12e7684340862ea7477",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 137,
"avg_line_length": 45.32380952380952,
"alnum_prop": 0.6604328640470687,
"repo_name": "pydanny/django-la-facebook",
"id": "9652e66db850044e83de6ed197ae8c35688508c3",
"size": "4759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "la_facebook/callbacks/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46421"
}
],
"symlink_target": ""
} |
"""
analysis.py
Functions to read and plot figures from the batch simulation results.
Can call readPlotNa() or readPlotNMDA() from __main__() or import analysis and call interactively.
"""
import utils
import json
import matplotlib.pyplot as plt
def plotfINa(dataFolder, batchLabel, params, data):
utils.setPlotFormat(numColors = 8)
Pvals = params[0]['values']
Ivals = params[1]['values']
Pvalsdic = {val: i for i,val in enumerate(Pvals)}
Ivalsdic = {val: i for i,val in enumerate(Ivals)}
rates = [[0 for x in range(len(Pvals))] for y in range(len(Ivals))]
for key, d in data.items():
rate = len(d['simData']['spkt'])
Pindex = Pvalsdic[d['paramValues'][0]]
Iindex = Ivalsdic[d['paramValues'][1]]
rates[Iindex][Pindex] = rate
print(d['paramValues'])
print(rate)
filename = '%s/%s/%s_fIcurve.json' % (dataFolder, batchLabel, batchLabel)
with open(filename, 'w') as fileObj:
json.dump(rates, fileObj)
plt.figure()
handles = plt.plot(rates, marker='o', markersize=10)
plt.xlabel('Somatic current injection (nA)')
plt.xticks(list(range(len(Ivals)))[::2], Ivals[::2])
plt.ylabel('Frequency (Hz)')
plt.legend(handles, params[0]['values'], title = 'dend Na', loc=2)
plt.savefig('%s/%s/%s_fIcurve.png' % (dataFolder, batchLabel, batchLabel))
plt.show()
def plotNMDA(dataFolder, batchLabel, params, data, somaLabel='soma', stimRange=[5000,10000]):
utils.setPlotFormat(numColors = 8)
Pvals = params[0]['values']
Wvals = params[1]['values']
Pvalsdic = {val: i for i,val in enumerate(Pvals)}
Wvalsdic = {val: i for i,val in enumerate(Wvals)}
epsps = [[0 for x in range(len(Pvals))] for y in range(len(Wvals))]
for key, d in data.items():
cellLabel = list(d['simData']['V_soma'].keys())[0]
vsoma = d['simData']['V_'+somaLabel][cellLabel]
epsp = max(vsoma[stimRange[0]:stimRange[1]]) - vsoma[stimRange[0]-1]
Pindex = Pvalsdic[d['paramValues'][0]]
Windex = Wvalsdic[d['paramValues'][1]]
epsps[Windex][Pindex] = epsp
print(d['paramValues'])
print(epsp)
filename = '%s/%s/%s_epsp.json' % (dataFolder, batchLabel, batchLabel)
with open(filename, 'w') as fileObj:
json.dump(epsps, fileObj)
plt.figure()
handles = plt.plot(epsps, marker='o', markersize=10)
plt.xlabel('Weight (of NetStim connection)')
plt.ylabel('Somatic EPSP amplitude (mV) in response to 1 NetStim spike')
plt.xticks(list(range(len(Wvals)))[::2], Wvals[::2])
plt.legend(handles, params[0]['values'], title = 'NMDA tau1 (ms)', loc=2)
plt.savefig('%s/%s/%s_epsp.png' % (dataFolder, batchLabel, batchLabel))
plt.show()
def readPlotNa():
dataFolder = 'data/'
batchLabel = 'batchNa'
params, data = utils.readBatchData(dataFolder, batchLabel, loadAll=0, saveAll=1, vars=None, maxCombs=None)
plotfINa(dataFolder, batchLabel, params, data)
def readPlotNMDA():
dataFolder = 'data/'
batchLabel = 'batchNMDA'
params, data = utils.readBatchData(dataFolder, batchLabel, loadAll=0, saveAll=1, vars=None, maxCombs=None)
plotNMDA(dataFolder, batchLabel, params, data)
# Main code
if __name__ == '__main__':
readPlotNa()
# readPlotNMDA()
| {
"content_hash": "680618092c4735c9f4b304c29f209ed2",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 110,
"avg_line_length": 33.313131313131315,
"alnum_prop": 0.6397816858702243,
"repo_name": "Neurosim-lab/netpyne",
"id": "fc11f8567b313ac09ca2d3d0541e4bc288c2afd5",
"size": "3298",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "examples/batchCellMapping/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "25324"
},
{
"name": "Jupyter Notebook",
"bytes": "2588467"
},
{
"name": "Python",
"bytes": "1802020"
},
{
"name": "Shell",
"bytes": "915"
}
],
"symlink_target": ""
} |
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/pdf/1412.2007v2.pdf
Adapted by Motoki Wu.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from DeepLearning.Utils import data_utils
from DeepLearning.Seq2Seq import seq2seq_model
from DeepLearning.Utils.prepare_corpus import tokenizer, DOCTOR_VOCAB_MAX, PERSON_VOCAB_MAX, DOCTOR_VOCAB_PATH, PERSON_TRAIN_IDS_PATH,DOCTOR_TRAIN_IDS_PATH, PERSON_DEV_IDS_PATH, DOCTOR_DEV_IDS_PATH, PERSON_VOCAB_PATH
from tensorflow.python.platform import gfile
sentences = ["what is your name ?", "are you the doctor ?", "What's that noise ?" ,"(wearing the sunglasses) So what was it ? Your ghost." ," Why did they only come out at night ?", "What will happen to them ?", "What do I do now ?", "I do. You keep going. You have to. Take it from me, there is a whole world out there. A galaxy, a life. What would O'Donnell have wanted ?", "What will UNIT do with the ghosts ?", "Here's what I don't understand. You did change the future. You stopped the Fisher King from returning .", "And saying the chamber will open ?","Smart.", "How do you mean ?"]
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("example", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
"""
PERSON ~ User questions
DOCTOR ~ ANSWARE
TensorFlow examples goes from EN -> FR.
This script goes from MODERN -> ORIGINAL.
"""
from DeepLearning.Utils import CACHE_DIR
from DeepLearning.Utils.data_utils import create_vocabulary, data_to_token_ids
from DeepLearning.Utils.genTrainingData import PERSON_FILENAME, DOCTOR_FILENAME, TRAIN_SUFFIX, DEV_SUFFIX
from DeepLearning.Utils.genTrainingData import PERSON_PATH, DOCTOR_PATH, PERSON_TRAIN_PATH, PERSON_DEV_PATH, DOCTOR_TRAIN_PATH, DOCTOR_DEV_PATH
tf.app.flags.DEFINE_string("en_train", PERSON_TRAIN_IDS_PATH, "modern train ids path")
tf.app.flags.DEFINE_string("fr_train", DOCTOR_TRAIN_IDS_PATH, "original train ids path")
tf.app.flags.DEFINE_string("en_dev", PERSON_DEV_IDS_PATH, "modern dev ids path")
tf.app.flags.DEFINE_string("fr_dev", DOCTOR_DEV_IDS_PATH, "original dev ids path")
tf.app.flags.DEFINE_string("en_vocab", PERSON_VOCAB_PATH, "modern vocab path")
tf.app.flags.DEFINE_string("fr_vocab", DOCTOR_VOCAB_PATH, "original vocab path")
tf.app.flags.DEFINE_integer("en_vocab_size", PERSON_VOCAB_MAX, "modern vocabulary size")
tf.app.flags.DEFINE_integer("fr_vocab_size", DOCTOR_VOCAB_MAX, "original vocabulary size")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (50, 50)] #, (70, 80), (180, 198)] # TODO: maybe filter out long sentences?
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with gfile.GFile(source_path, mode="r") as source_file:
with gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()][:50] # TODO: hmm
target_ids = [int(x) for x in target.split()][:50]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
print("en_vocab_size", FLAGS.en_vocab_size)
print("fr_vocab_size", FLAGS.fr_vocab_size)
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
# session.run(tf.variables.initialize_all_variables())
session.run(tf.initialize_all_variables())
return model
def train():
"""Train a en->fr translation model using WMT data."""
# Prepare WMT data.
# print("Preparing WMT data in %s" % FLAGS.data_dir)
# en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_data(
# FLAGS.data_dir, FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
en_train = FLAGS.en_train
fr_train = FLAGS.fr_train
en_dev = FLAGS.en_dev
fr_dev = FLAGS.fr_dev
print("en_train", en_train)
print("fr_train", fr_train)
print("en_dev", en_dev)
print("fr_dev", fr_dev)
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
# print("encoder_inputs", "-"*80)
# print(encoder_inputs)
# print("decoder_inputs", "-"*80)
# print(decoder_inputs)
# print("bucket_id", bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
print("loss", loss)
sys.stdout.flush()
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(loss) if loss < 300 else float('inf')
print ("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
def example():
with tf.Session() as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
# en_vocab_path = os.path.join(FLAGS.data_dir,
# "vocab%d.en" % FLAGS.en_vocab_size)
# fr_vocab_path = os.path.join(FLAGS.data_dir,
# "vocab%d.fr" % FLAGS.fr_vocab_size)
en_vocab_path = FLAGS.en_vocab
fr_vocab_path = FLAGS.fr_vocab
print("en_vocab_path", FLAGS.en_vocab)
print("fr_vocab_path", FLAGS.fr_vocab)
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
for sentence in sentences:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(sentence, en_vocab, tokenizer=tokenizer)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out sentence corresponding to outputs.
print(" Person: "+sentence)
print(" Doctor: ")
print(" ".join([rev_fr_vocab[output] for output in outputs]))
def decode():
with tf.Session() as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
# en_vocab_path = os.path.join(FLAGS.data_dir,
# "vocab%d.en" % FLAGS.en_vocab_size)
# fr_vocab_path = os.path.join(FLAGS.data_dir,
# "vocab%d.fr" % FLAGS.fr_vocab_size)
en_vocab_path = FLAGS.en_vocab
fr_vocab_path = FLAGS.fr_vocab
print("Person_vocab_path", FLAGS.en_vocab)
print("Doctor_vocab_path", FLAGS.fr_vocab)
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(sentence, en_vocab, tokenizer=tokenizer)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([rev_fr_vocab[output] for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def self_test():
"""Test the translation model."""
with tf.Session() as sess:
print("Self-test for neural translation model.")
# Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
5.0, 32, 0.3, 0.99, num_samples=8)
# sess.run(tf.variables.initialize_all_variables())
sess.run(tf.initialize_all_variables())
# Fake data set for both the (3, 3) and (6, 6) bucket.
data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
[([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
for _ in xrange(5): # Train the fake model for 5 steps.
bucket_id = random.choice([0, 1])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
data_set, bucket_id)
print("en", encoder_inputs)
print("dec", decoder_inputs)
model.step(sess, encoder_inputs, decoder_inputs, target_weights,
bucket_id, False)
def main(_):
if FLAGS.self_test:
self_test()
elif FLAGS.decode:
decode()
elif FLAGS.example:
example()
else:
train()
if __name__ == "__main__":
tf.app.run()
| {
"content_hash": "7f6763c390d47fd926b662c18432b903",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 589,
"avg_line_length": 45.91256830601093,
"alnum_prop": 0.6479409664365627,
"repo_name": "neuralconcept/DrWhoAI",
"id": "83056ca3369b6262a72a38cb1e5fdd3221378b65",
"size": "16804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DeepLearning/Seq2Seq/AIBot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "58914"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
} |
class OutputHandler(object):
'''
Output handler is a class of handler object or function
'''
def __init__(self, cmdapp):
self.cmdapp = cmdapp
def __call__(self, item):
return item
class StdoutOutputHandler(OutputHandler):
def __call__(self, item):
print item | {
"content_hash": "a4b8ef19c9c6566896edfd0ff36ce332",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.590625,
"repo_name": "denz/swarm-crawler",
"id": "3ce0f83a75e6c8452171832a94ebf6bb0bcb1212",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swarm_crawler/output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "57865"
}
],
"symlink_target": ""
} |
def get_primes(value):
return value
for n in range(20):
if value % n == 0:
return False
else:
return True
print(get_primes(20))
| {
"content_hash": "356e78bbd25e9d64c647e045e4930230",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 22,
"avg_line_length": 13,
"alnum_prop": 0.6363636363636364,
"repo_name": "Erianafranky/andela_bootcamp",
"id": "c5069b19bdbce19fabc5f702575e55b1b7cb8637",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Week1/Self_Learning_Clinic/Primes/prime_numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "816"
},
{
"name": "Python",
"bytes": "13269"
}
],
"symlink_target": ""
} |
"""System tests for read_gbq using the BigQuery Storage API."""
import functools
import uuid
import pytest
pytest.importorskip("google.cloud.bigquery", minversion="1.24.0")
@pytest.fixture
def method_under_test(project_id, credentials):
import pandas_gbq
return functools.partial(
pandas_gbq.read_gbq, project_id=project_id, credentials=credentials
)
@pytest.mark.parametrize(
"query_string",
(
("SELECT * FROM (SELECT 1) WHERE TRUE = FALSE;"),
(
"SELECT * FROM (SELECT TIMESTAMP('2020-02-11 16:33:32-06:00')) WHERE TRUE = FALSE;"
),
),
)
def test_empty_results(method_under_test, query_string):
"""Test with an empty dataframe.
See: https://github.com/pydata/pandas-gbq/issues/299
"""
df = method_under_test(
query_string,
use_bqstorage_api=True,
)
assert len(df.index) == 0
def test_large_results(random_dataset, method_under_test):
df = method_under_test(
"""
SELECT
total_amount,
passenger_count,
trip_distance
FROM `bigquery-public-data.new_york.tlc_green_trips_2014`
-- Select non-null rows for no-copy conversion from Arrow to pandas.
WHERE total_amount IS NOT NULL
AND passenger_count IS NOT NULL
AND trip_distance IS NOT NULL
LIMIT 10000000
""",
use_bqstorage_api=True,
configuration={
"query": {
"destinationTable": {
"projectId": random_dataset.project,
"datasetId": random_dataset.dataset_id,
"tableId": "".join(
[
"test_read_gbq_w_bqstorage_api_",
str(uuid.uuid4()).replace("-", "_"),
]
),
},
"writeDisposition": "WRITE_TRUNCATE",
}
},
)
assert len(df) == 10000000
| {
"content_hash": "4b6643f3e727ed3befe7066825037380",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 95,
"avg_line_length": 27.34246575342466,
"alnum_prop": 0.5420841683366734,
"repo_name": "googleapis/python-bigquery-pandas",
"id": "cfb31ea81a6eb9c2e9ad842e3ca7b1a1ca1199d1",
"size": "2160",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/system/test_read_gbq_with_bqstorage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "249027"
},
{
"name": "Shell",
"bytes": "32416"
}
],
"symlink_target": ""
} |
"""add event triggers table
Revision ID: 012
Revises: 011
Create Date: 2016-03-04 09:49:52.481791
"""
# revision identifiers, used by Alembic.
revision = '012'
down_revision = '011'
from alembic import op
import sqlalchemy as sa
from mistral.db.sqlalchemy import types as st
def upgrade():
op.create_table(
'event_triggers_v2',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project_id', sa.String(length=80), nullable=True),
sa.Column('scope', sa.String(length=80), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('workflow_id', sa.String(length=36), nullable=False),
sa.Column('exchange', sa.String(length=80), nullable=False),
sa.Column('topic', sa.String(length=80), nullable=False),
sa.Column('event', sa.String(length=80), nullable=False),
sa.Column('workflow_params', st.JsonEncoded(), nullable=True),
sa.Column('workflow_input', st.JsonEncoded(), nullable=True),
sa.Column('trust_id', sa.String(length=80), nullable=True),
sa.ForeignKeyConstraint(
['workflow_id'],
['workflow_definitions_v2.id'],
),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'exchange',
'topic',
'event',
'workflow_id',
'project_id'
),
sa.Index(
'event_triggers_v2_project_id_workflow_id',
'project_id', 'workflow_id'
)
)
| {
"content_hash": "2829256c1d7b3812c49f8b80914012d1",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 71,
"avg_line_length": 31.98076923076923,
"alnum_prop": 0.5995189416716777,
"repo_name": "openstack/mistral",
"id": "b934543fefe23985b45dab57ef3f1e65b65733da",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/db/sqlalchemy/migration/alembic_migrations/versions/012_add_event_triggers_v2_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2091"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2617595"
},
{
"name": "Shell",
"bytes": "26731"
}
],
"symlink_target": ""
} |
import unittest
from inspector.src.twitter import Twitter
from inspector.src.geocoder import CachedGeocoder
from datetime import datetime, timedelta
class TestTwitter(unittest.TestCase):
""" Test twitter class """
def setUp(self):
self.twitter = Twitter()
def test_twitter_constants(self):
""" Check twitter class has correct values """
self.assertEquals(self.twitter.query_string, u'{query} since:{since} until:{until} {emotion}')
self.assertEquals(self.twitter.date_format, '%Y-%m-%d')
self.assertEquals(self.twitter.geocode, u'44.948056,34.104167,250km')
self.assertEquals(self.twitter.timeout, 1)
def test_twitter_search(self):
""" Test search tweets """
self.twitter.query_string = u'#test lang:en'
self.twitter.timeout = 0
search_list = self.twitter.search(count=10)
for tweet in search_list['statuses']:
self.assertTrue(tweet['text'])
def test_twitter_search_interval(self):
""" Test search tweets """
self.twitter.query_string = u'#test lang:en'
self.twitter.timeout = 0
since = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
search_list = self.twitter.search_interval(query=u'#test lang:en', since=since)
for tweet in search_list:
self.assertTrue(tweet['text'])
def test_tweet2coord(self):
""" Search tweets and get it coords """
self.twitter.query_string = u'#test lang:en'
self.twitter.timeout = 0
search_list = self.twitter.search(count=10)['statuses']
geocoder = CachedGeocoder()
coords = geocoder.tweets_to_coords(search_list)
for coord in coords:
self.assertTrue(isinstance(coord[1], tuple))
| {
"content_hash": "1f4cdf00d70ffcf9ecd537c25061c62a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 102,
"avg_line_length": 37.851063829787236,
"alnum_prop": 0.6430578976953345,
"repo_name": "Samael500/social-inspector",
"id": "9e662571957ed4f7747d1996e336e49e2a54c02b",
"size": "1804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inspector/tests/tests_twitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "578"
},
{
"name": "Python",
"bytes": "21109"
}
],
"symlink_target": ""
} |
from qtpy import QtCore
class SignalContainer(QtCore.QObject):
data_file_created = QtCore.Signal()
data_file_written = QtCore.Signal()
queue_relinquishing_control = QtCore.Signal()
queue_taking_control = QtCore.Signal()
_signal_container = SignalContainer()
data_file_created = _signal_container.data_file_created
data_file_written = _signal_container.data_file_written
queue_relinquishing_control = _signal_container.queue_relinquishing_control
queue_taking_control = _signal_container.queue_taking_control
| {
"content_hash": "f67d254431e463580fabbdd23ebe0e6d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 33.0625,
"alnum_prop": 0.775047258979206,
"repo_name": "wright-group/PyCMDS",
"id": "2f60db5b133984a387fe852913568326c7fb7262",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycmds/somatic/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "212"
},
{
"name": "C",
"bytes": "15060"
},
{
"name": "C++",
"bytes": "43260"
},
{
"name": "Python",
"bytes": "516847"
}
],
"symlink_target": ""
} |
import re
import shortuuid
from enum import Enum
import oa_evidence
from app import db
from app import logger
from pdf_url import PdfUrl
from reported_noncompliant_copies import is_reported_noncompliant_url
from util import clean_doi
from util import is_doi_url
def url_sort_score(url):
url_lower = url.lower()
score = 0
# pmc results are better than IR results, if we've got them
if "europepmc.org" in url_lower:
score += -50
if "/pmc/" in url_lower:
score += -45
# arxiv results are better than IR results, if we've got them
if "arxiv" in url_lower:
score += -40
if ".edu" in url_lower:
score += -30
if "citeseerx" in url_lower:
score += +10
# ftp is really bad
if "ftp" in url_lower:
score += +60
# break ties
if "pdf" in url_lower:
score += -1
# otherwise whatever we've got
return score
def validate_pdf_urls(open_locations):
unvalidated = [x for x in open_locations if x.pdf_url_valid is None]
if unvalidated:
bad_pdf_urls = {
x.url for x in
PdfUrl.query.filter(
PdfUrl.url.in_([x.pdf_url for x in unvalidated]),
PdfUrl.is_pdf.is_(False)
).all()
}
for location in unvalidated:
location.pdf_url_valid = (
location.pdf_url not in bad_pdf_urls
# get rid of this, make PDF checker more robust
or 'journal.csj.jp/doi/pdf' in location.pdf_url
)
if not location.pdf_url_valid:
logger.info(u'excluding location with bad pdf url: {}'.format(location))
class OAStatus(Enum):
closed = 'closed'
green = 'green'
bronze = 'bronze'
hybrid = 'hybrid'
gold = 'gold'
def oa_status_sort_key(location):
keys = {
OAStatus.closed: 0,
OAStatus.green: 1,
OAStatus.bronze: 2,
OAStatus.hybrid: 3,
OAStatus.gold: 4,
}
return keys.get(location.oa_status, 0)
class OpenLocation(db.Model):
id = db.Column(db.Text, primary_key=True)
pub_id = db.Column(db.Text, db.ForeignKey('pub.id'))
doi = db.Column(db.Text) # denormalized from Publication for ease of interpreting
pdf_url = db.Column(db.Text)
metadata_url = db.Column(db.Text)
license = db.Column(db.Text)
evidence = db.Column(db.Text)
updated = db.Column(db.DateTime)
error = db.Column(db.Text)
def __init__(self, **kwargs):
self.id = shortuuid.uuid()[0:20]
self.doi = ""
self.match = {}
self.pmh_id = None
self.endpoint_id = None
self.base_doc = None
self.version = None
self.error = ""
self.pdf_url_valid = None
self.institution = None
super(OpenLocation, self).__init__(**kwargs)
@property
def has_license(self):
if not self.license:
return False
if self.license == "unknown":
return False
return True
@property
def best_url(self):
return self.pdf_url or self.metadata_url
@property
def best_url_is_pdf(self):
if not self.best_url:
return None
if self.pdf_url:
return True
return False
@property
def is_reported_noncompliant(self):
if is_reported_noncompliant_url(self.doi, self.pdf_url) or is_reported_noncompliant_url(self.doi, self.metadata_url):
return True
return False
@property
def is_gold(self):
return self.best_url and self.display_evidence.startswith(oa_evidence.oa_journal_prefix)
@property
def is_green(self):
return self.best_url and self.display_evidence.startswith('oa repository')
@property
def is_hybrid(self):
return self.best_url and not (self.is_gold or self.is_green) and self.has_license
@property
def is_bronze(self):
if self.best_url and not (self.is_gold or self.is_green) and not self.has_license:
return True
if is_doi_url(self.best_url):
return (
clean_doi(self.best_url, return_none_if_error=True) == self.doi
and not (self.is_gold or self.is_hybrid or self.is_green)
)
return False
@property
def display_evidence(self):
return self.evidence or ''
@property
def host_type_calculated(self):
if self.is_gold or self.is_hybrid or self.is_bronze:
return "publisher"
return "repository"
@property
def host_type(self):
if hasattr(self, "host_type_set"):
return self.host_type_set
else:
return self.host_type_calculated
@property
def is_doaj_journal(self):
return "doaj" in self.display_evidence
@property
def display_updated(self):
if self.updated:
try:
return self.updated.isoformat()
except AttributeError:
return self.updated
return None
@property
def oa_status(self):
if self.is_gold:
return OAStatus.gold
if self.is_hybrid:
return OAStatus.hybrid
if self.is_bronze:
return OAStatus.bronze
if self.is_green:
return OAStatus.green
if not self.best_url:
return OAStatus.closed
if not self.display_evidence:
logger.info(u"should have evidence for {} but none".format(self.id))
return OAStatus.green
@property
def is_pmc(self):
if self.best_url and re.findall(u"ncbi.nlm.nih.gov/pmc", self.best_url):
return True
return False
@property
def sort_score(self):
score = 0
if self.host_type == "publisher":
score += -1000
if self.display_evidence in [oa_evidence.oa_journal_manual, oa_evidence.oa_journal_observed]:
score += 100
if self.version == "publishedVersion":
score += -600
if self.metadata_url == u"https://doi.org/{}".format(self.doi):
score += -200
elif self.version == "acceptedVersion":
score += -400
elif self.version == "submittedVersion":
score += -200
# otherwise maybe version is null. sort that to the bottom
# this is very important
if self.pdf_url:
score += -100
# if had a doi match, give it a little boost because more likely a perfect match (negative is good)
if "doi" in self.display_evidence:
score += -10
# let the repos sort themselves out
score += url_sort_score(self.best_url)
return score
def __repr__(self):
return u"<OpenLocation ({}) {} {} {} {}>".format(self.id, self.doi, self.display_evidence, self.pdf_url, self.metadata_url)
def to_dict(self):
response = {
"pdf_url": self.pdf_url,
"metadata_url": self.metadata_url,
"license": self.license,
"evidence": self.display_evidence,
"pmh_id": self.pmh_id,
"endpoint_id": self.endpoint_id,
"oa_color": self.oa_status.value,
"version": self.version
}
if self.is_reported_noncompliant:
response["reported_noncompliant"] = True
return response
def to_dict_v2(self):
if hasattr(self, "is_best"):
is_best = self.is_best
else:
is_best = False
response = {
"updated": self.display_updated,
"url": self.best_url,
"url_for_pdf": self.pdf_url,
"url_for_landing_page": self.metadata_url,
"evidence": self.display_evidence,
"license": self.license,
"version": self.version,
"host_type": self.host_type,
"is_best": is_best,
"pmh_id": self.pmh_id,
"endpoint_id": self.endpoint_id,
"repository_institution": self.institution,
}
if self.is_reported_noncompliant:
response["reported_noncompliant"] = True
return response
| {
"content_hash": "3dfff1fb80bea98711d1a1901b18ec2c",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 131,
"avg_line_length": 27.661073825503355,
"alnum_prop": 0.568239718549072,
"repo_name": "Impactstory/oadoi",
"id": "30641eb54b70c6ce110b15452c9083412792b8de",
"size": "8269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2022"
},
{
"name": "PLpgSQL",
"bytes": "11112"
},
{
"name": "Python",
"bytes": "509115"
},
{
"name": "Shell",
"bytes": "3599"
},
{
"name": "TSQL",
"bytes": "596"
}
],
"symlink_target": ""
} |
from wtforms import Form
from wtforms import TextField, SelectField
from wtforms.validators import DataRequired, ValidationError, Required
from ..base import BaseReactForm
class ReactForm(BaseReactForm):
''' Class that creates a Reaction form for the dashboard '''
title = "Heroku: Restart All Dynos"
description = """
<P>
This reaction provides the ability to restart all Dynos within the specified Heroku Application. This reaction will send an API request to Heroku to restart all Dynos, the Dyno will be restarted whether it is already running or not. This reaction should only be used in events that cover a full failure or within environments that can tolerate the time it takes to restart Dynos.
</P>
"""
placeholders = BaseReactForm.placeholders
placeholders.update({
'appname' : 'StormyWaters58',
'cmd' : 'bash script.sh',
})
apikey = TextField(
"API Key",
description=BaseReactForm.descriptions['apikey'],
validators=[DataRequired(message='API Key is a required field')]
)
appname = TextField(
"Application Name",
description=BaseReactForm.descriptions['heroku']['appname'],
validators=[
DataRequired(message='Application Name is a required field')
]
)
call_on = SelectField(
"Call On",
description=BaseReactForm.descriptions['callon'],
choices=[
('false', 'False Monitors'),
('true', 'True Monitors')
],
validators=[DataRequired(message='Call On is a required field')]
)
if __name__ == '__main__':
pass
| {
"content_hash": "fce92583db6c8516dbfb34224f374faa",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 385,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.6629969418960244,
"repo_name": "madflojo/cloudroutes-service",
"id": "0f8b6a4e5968b71fe2ba07015777ce04c764f9fa",
"size": "1904",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/web/reactionforms/heroku-restart-all-dynos/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17816"
},
{
"name": "HTML",
"bytes": "227943"
},
{
"name": "JavaScript",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "678083"
},
{
"name": "Shell",
"bytes": "5859"
}
],
"symlink_target": ""
} |
import pexpect
import time
import unittest
import node
LEADER = 1
ROUTER1 = 2
class Cert_5_1_06_RemoveRouterId(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
time.sleep(3)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
rloc16 = self.nodes[ROUTER1].get_addr16()
for addr in self.nodes[ROUTER1].get_addrs():
self.nodes[LEADER].ping(addr)
self.nodes[LEADER].release_router_id(rloc16 >> 10)
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
for addr in self.nodes[ROUTER1].get_addrs():
self.nodes[LEADER].ping(addr)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "dc5d9d88a9c39d6c62ff38a399496b21",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 74,
"avg_line_length": 29.384615384615383,
"alnum_prop": 0.612565445026178,
"repo_name": "JiahuiZHONG/Internship_Thread",
"id": "b43d9015f2161e4d7832d362a72448ea1adb076a",
"size": "3120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_5_1_06_RemoveRouterId.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "65304"
},
{
"name": "C++",
"bytes": "913701"
},
{
"name": "M4",
"bytes": "14011"
},
{
"name": "Makefile",
"bytes": "41623"
},
{
"name": "Python",
"bytes": "291017"
},
{
"name": "Shell",
"bytes": "3688"
}
],
"symlink_target": ""
} |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(847, 761)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setObjectName("label_5")
self.horizontalLayout_2.addWidget(self.label_5)
self.selectLine = QtWidgets.QComboBox(self.centralwidget)
self.selectLine.setMinimumSize(QtCore.QSize(400, 0))
self.selectLine.setEditable(True)
self.selectLine.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.selectLine.setObjectName("selectLine")
self.horizontalLayout_2.addWidget(self.selectLine)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setObjectName("groupBox")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.groupBox)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setObjectName("label")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.filterBranch = QtWidgets.QComboBox(self.groupBox)
self.filterBranch.setEnabled(True)
self.filterBranch.setEditable(True)
self.filterBranch.setObjectName("filterBranch")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.filterBranch)
self.filterBus = QtWidgets.QComboBox(self.groupBox)
self.filterBus.setEditable(True)
self.filterBus.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.filterBus.setObjectName("filterBus")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.filterBus)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.filterEquipment = QtWidgets.QLineEdit(self.groupBox)
self.filterEquipment.setMaximumSize(QtCore.QSize(50, 16777215))
self.filterEquipment.setMaxLength(8)
self.filterEquipment.setObjectName("filterEquipment")
self.horizontalLayout_3.addWidget(self.filterEquipment)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.formLayout.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_3)
self.horizontalLayout_4.addLayout(self.formLayout)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.copyButton = QtWidgets.QToolButton(self.groupBox_2)
self.copyButton.setObjectName("copyButton")
self.horizontalLayout.addWidget(self.copyButton)
self.deleteButton = QtWidgets.QToolButton(self.groupBox_2)
self.deleteButton.setObjectName("deleteButton")
self.horizontalLayout.addWidget(self.deleteButton)
self.clearSelection = QtWidgets.QToolButton(self.groupBox_2)
self.clearSelection.setObjectName("clearSelection")
self.horizontalLayout.addWidget(self.clearSelection)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tableView = CustomTableView(self.groupBox_2)
self.tableView.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setSortingEnabled(True)
self.tableView.setObjectName("tableView")
self.verticalLayout.addWidget(self.tableView)
self.verticalLayout_2.addWidget(self.groupBox_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 847, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_5.setText(_translate("MainWindow", "Line"))
self.selectLine.setCurrentText(_translate("MainWindow", "Select Line"))
self.groupBox.setTitle(_translate("MainWindow", "Filters"))
self.label.setText(_translate("MainWindow", "Equipment Letter"))
self.label_3.setText(_translate("MainWindow", "Bus"))
self.label_2.setText(_translate("MainWindow", "Branch"))
self.groupBox_2.setTitle(_translate("MainWindow", "Equipment"))
self.copyButton.setText(_translate("MainWindow", "Copy"))
self.deleteButton.setText(_translate("MainWindow", "Delete"))
self.clearSelection.setText(_translate("MainWindow", "Clear Selection"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
from customtableview import CustomTableView
| {
"content_hash": "0978e0f33e962c1f529aa84b8933a284",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 115,
"avg_line_length": 58.99186991869919,
"alnum_prop": 0.7273980154355016,
"repo_name": "cwebber314/pyqt_db",
"id": "797294dcaa0b41fcaa472616bc3b74e212fb604a",
"size": "7456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sql_example_ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "60535"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.test import SimpleTestCase
from tests.testapp.models import TestModel
from tinymce.widgets import AdminTinyMCE
class TestModels(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_htmlfield(self):
ma = admin.ModelAdmin(TestModel, self.site)
self.assertIsInstance(ma.get_form(None).base_fields["foobar"].widget, AdminTinyMCE)
| {
"content_hash": "016c6e8f3a32850b629d2f37a743bbca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 91,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7574468085106383,
"repo_name": "aljosa/django-tinymce",
"id": "74ccfdca58cd302cadc1343ed93b659239942178",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "675"
},
{
"name": "JavaScript",
"bytes": "2725"
},
{
"name": "Makefile",
"bytes": "1130"
},
{
"name": "Python",
"bytes": "38831"
},
{
"name": "Shell",
"bytes": "530"
}
],
"symlink_target": ""
} |
import sys
import re
import operator
if __name__ == "__main__":
try:
common_tags_filename = sys.argv[1]
tags_file = open(common_tags_filename, "r")
try:
tmp_sequences = {}
pos_general_regex = re.compile(r"((\w+)\\\+)")
for ct in tags_file:
ct_fields = ct.strip().split("\t")
prev_pos_tag = ""
pos_tags_string = ""
pos_tags = ct_fields[2].split()
for pt in pos_tags:
if prev_pos_tag == pt:
if pos_tags_string and pos_tags_string[-1] != "+":
pos_tags_string += "+"
else:
pos_tags_string += (" " + pt)
prev_pos_tag = pt
seq_id = pos_tags_string.replace("+", "")
tmp_sequences.setdefault(seq_id, [])
tmp_sequences[seq_id].append(ct_fields[1:] + [pos_tags_string.strip()])
sequences = {}
for seq_id, eq_seqs in tmp_sequences.items():
main_seq = ""
tmp_sum_seq_ocurrences = 0
for eq in eq_seqs:
if len(eq[2]) > len(main_seq):
main_seq = eq[2]
tmp_sum_seq_ocurrences += int(eq[0])
escaped_main_seq = re.escape(main_seq)
main_seq = re.sub(pos_general_regex, r"(\g<2>)+", escaped_main_seq)
sequences.setdefault(main_seq, 0)
sequences[main_seq] += tmp_sum_seq_ocurrences
print "\n".join(["POSREGEX" + str(i) + "\t" + str(s[1]) + "\t" + s[0]
for i, s in enumerate(sorted(sequences.items(), key=operator.itemgetter(1)), start=1)])
except:
print >> sys.stderr, "E) Sequences: ", sys.exc_info()
except:
print >> sys.stderr
print >> sys.stderr, "usage: python", sys.argv[0], "<extracted_sequences>"
print >> sys.stderr, "example:"
print >> sys.stderr, " python", sys.argv[0], "train_common_pos_tags.dat output.dat"
print >> sys.stderr, "Error: ", sys.exc_info()
| {
"content_hash": "93d9ea0cb08f589d5e9db27bbce6c1a5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 103,
"avg_line_length": 43.54,
"alnum_prop": 0.4648598989435002,
"repo_name": "snovd/test-scripts",
"id": "c664eb2ea4596425bf3867da349a3d37a8e69276",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kpext/kp_seq_collapse_regex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "27598"
},
{
"name": "Python",
"bytes": "128312"
},
{
"name": "Shell",
"bytes": "14499"
}
],
"symlink_target": ""
} |
import os
from .executable import Executable
def get_tool_path(name):
"""Find the path to one of problemtools' external tools.
Args:
name (str): which tool is wanted (one of [default_grader,
default_validator, interactive, checktestdata, viva.sh])
Returns:
str, path to the tool, or None if the tool was not found.
"""
return __locate_executable([os.path.join(os.path.dirname(__file__),
'..', 'support', name),
os.path.join(os.path.dirname(__file__),
'..', '..', 'support',
os.path.splitext(name)[0], name)])
def get_tool(name):
"""Get an Executable instance for one of problemtools' external tools.
Args:
name(str): same as for get_tool_path
Returns:
problemtools.run.Executable object for the tool, or None if
the tool was not found.
"""
path = get_tool_path(name)
return Executable(path) if path is not None else None
def __locate_executable(candidate_paths):
"""Find executable among a set of paths.
Args:
candidate_paths (list of str): list of locations in which to
look for an executable file.
Returns:
str, first entry of candidate_paths that is an executable
file, or None if no such entry.
"""
return next((p for p in candidate_paths
if os.path.isfile(p) and os.access(p, os.X_OK)), None)
| {
"content_hash": "bc35870c5a611cba828e914be809b299",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 32.95744680851064,
"alnum_prop": 0.5648805681084571,
"repo_name": "ghamerly/problemtools",
"id": "78408cc2612b21d0ce336e97f7817fde274ac334",
"size": "1549",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "problemtools/run/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "14066"
},
{
"name": "CSS",
"bytes": "1277"
},
{
"name": "Dockerfile",
"bytes": "702"
},
{
"name": "HTML",
"bytes": "1101"
},
{
"name": "Makefile",
"bytes": "859"
},
{
"name": "Python",
"bytes": "152337"
},
{
"name": "Shell",
"bytes": "4606"
},
{
"name": "TeX",
"bytes": "19397"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShowspikesValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showspikes", parent_name="layout.xaxis", **kwargs):
super(ShowspikesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "modebar"),
**kwargs,
)
| {
"content_hash": "99764e4a550f9bbb6c64a9e293d767f3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 37.72727272727273,
"alnum_prop": 0.636144578313253,
"repo_name": "plotly/plotly.py",
"id": "44f0c8a5c4d91b0dcd3b7a5979460d61cd06e83f",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/xaxis/_showspikes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import sys
PATH_INSTALL = "./"
sys.path.append( PATH_INSTALL )
from androguard.core.bytecodes import dvm, apk
TEST = "../apks/Zitmo.apk"
a = apk.APK( TEST )
a.show()
j = dvm.DalvikVMFormat( a.get_dex() )
# SHOW CLASS (verbose)
#j.show()
| {
"content_hash": "fd7d07bf6c76c49743e0581ebb4b1bc9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 47,
"avg_line_length": 15.25,
"alnum_prop": 0.6598360655737705,
"repo_name": "congthuc/androguard-2.0-custom",
"id": "748d4e980d768115456a314775edd97abd3421e3",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/apk_format_1.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "379022"
},
{
"name": "C++",
"bytes": "57006"
},
{
"name": "Makefile",
"bytes": "6008"
},
{
"name": "Python",
"bytes": "27606868"
}
],
"symlink_target": ""
} |
"""Benchmark halLodExtract by extracting with a bunch of different
stepsizes then seeing how much smaller they get. Also (optionally)
use mafComparator (https://github.com/dentearl/mafTools) together with
comparatorSummarizer.py (https://github.com/dentearl/mwgAlignAnalysis)
"""
import argparse
import os
import sys
import copy
import subprocess
import time
from collections import defaultdict
from hal.stats.halStats import runShellCommand
from hal.stats.halStats import getHalGenomes
from hal.stats.halStats import getHalNumSegments
from hal.lod.halLodInterpolate import runHalLodExtract
from hal.lod.halLodInterpolate import makePath
from hal.lod.halLodInterpolate import getSteps
def getHalTotalSegments(halPath):
total = (0, 0)
for genome in getHalGenomes(halPath):
numSegs = getHalNumSegments(halPath, genome)
total = (total[0] + numSegs[0], total[1] + numSegs[1])
return total
def makeMaf(inHalPath, outDir, step, overwrite, doMaf):
srcHalPath = inHalPath
if step > 0:
srcHalPath = makePath(inHalPath, outDir, step, "lod", "hal")
outMafPath = makePath(inHalPath, outDir, step, "out", "maf")
if doMaf and (overwrite or not os.path.isfile(outMafPath)):
runShellCommand("hal2maf %s %s" % (srcHalPath, outMafPath))
def compMaf(inHalPath, outDir, step, overwrite, doMaf):
srcMaf = makePath(inHalPath, outDir, 0, "out", "maf")
tgtMaf = makePath(inHalPath, outDir, step, "out", "maf")
xmlPath = makePath(inHalPath, outDir, step, "comp", "xml")
sumPath = makePath(inHalPath, outDir, step, "comp", "txt")
if doMaf and (overwrite or not os.path.isfile(xmlPath)):
runShellCommand("mafComparator --maf1 %s --maf2 %s --out %s --samples 100000" % (
srcMaf, tgtMaf, xmlPath))
runShellCommand("comparatorSummarizer.py --xml %s > %s " % (xmlPath,
sumPath))
xmlNearPath = makePath(inHalPath, outDir, step, "comp_near", "xml")
sumNearPath = makePath(inHalPath, outDir, step, "comp_near", "txt")
if doMaf and (overwrite or not os.path.isfile(xmlNearPath)):
runShellCommand(
"mafComparator --maf1 %s --maf2 %s --out %s --near %d --samples 100000" % (
srcMaf, tgtMaf, xmlNearPath, int(step)))
runShellCommand("comparatorSummarizer.py --xml %s > %s " % (
xmlNearPath, sumNearPath))
def getPrecisionRecall(inHalPath, outDir, step, doMaf):
if doMaf:
sumPath = makePath(inHalPath, outDir, step, "comp", "txt")
sumFile = open(sumPath, "r")
line = next(sumFile)
line = next(sumFile)
line = next(sumFile)
tokens = line.split()
assert tokens[2] == "self)"
sumNearPath = makePath(inHalPath, outDir, step, "comp_near", "txt")
sumNearFile = open(sumNearPath, "r")
line = next(sumNearFile)
line = next(sumNearFile)
line = next(sumNearFile)
tokensNear = line.split()
assert tokensNear[2] == "self)"
return [float(tokens[3]), float(tokens[4]),
float(tokensNear[3]), float(tokensNear[4])]
elif step == 0:
return [1., 1., 1., 1.]
else:
return [0., 0., 0., 0.]
def getScanTime(inHalPath, outDir, step):
srcHalPath = inHalPath
if step > 0:
srcHalPath = makePath(inHalPath, outDir, step, "lod", "hal")
genomes = getHalGenomes(inHalPath)
assert len(genomes) > 1
genName = genomes[1]
bedPath = makePath(inHalPath, outDir, step, genName, "bed")
t1 = time.time()
runShellCommand("halBranchMutations %s %s --refFile %s" % (
srcHalPath, genName, bedPath))
elapsedTime = time.time() - t1
return [elapsedTime]
def printTable(table):
print("Step, kb,, nTop,, nBottom,, Prec., Recall, PrecNear, RecallNear, ScanTime")
for step, data in sorted(table.items()):
line = "%d" % step
idx = 0
for elem in data:
line += ", %s" % str(elem)
if idx <= 2:
orig = table[0][idx]
frac = 1
if orig > 0:
frac = float(elem) / float(orig)
line += ", %f" % frac
idx += 1
print(line)
def runSteps(inHalPath, outDir, maxBlock, scale, steps, overwrite, doMaf,
keepSeq, trans, inMemory):
table = defaultdict(list)
makeMaf(inHalPath, outDir, 0, overwrite, doMaf)
table[0] = [os.path.getsize(inHalPath) / 1024]
table[0] += list(getHalTotalSegments(inHalPath))
table[0] += getPrecisionRecall(inHalPath, outDir, 0, False)
table[0] += getScanTime(inHalPath, outDir, 0)
if steps is None:
steps = getSteps(inHalPath, maxBlock, scale)
for stepIdx in range(1,len(steps)):
step = steps[stepIdx]
outPath = makePath(inHalPath, outDir, step, "lod", "hal")
srcPath = inHalPath
if trans is True and stepIdx > 1:
srcPath = makePath(inHalPath, outDir, steps[stepIdx-1],
"lod", "hal")
if overwrite is True or not os.path.isfile(outPath):
stepScale = (scale ** stepIdx)
runHalLodExtract(srcPath, outPath, stepScale, keepSeq, inMemory)
makeMaf(inHalPath, outDir, step, overwrite, doMaf)
compMaf(inHalPath, outDir, step, overwrite, doMaf)
table[step] = [os.path.getsize(outPath) / 1024]
table[step] += list(getHalTotalSegments(outPath))
table[step] += getPrecisionRecall(inHalPath, outDir, step, doMaf)
table[step] += getScanTime(inHalPath, outDir, step)
return table
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("hal", help="input hal")
parser.add_argument("outDir", help="output dir")
parser.add_argument("--maxBlock",
help="maximum desired number of blocks to ever "
"display at once.", type=int,
default=500)
parser.add_argument("--scale",
help="scaling factor between two successive levels"
" of detail", type=float,
default=10.0)
parser.add_argument("--steps",
help="comma-separated list of sampling steps to test"
" for each level of "
"detail. Overrides --scale and --maxBlock options",
type=str, default=None)
parser.add_argument("--overwrite",action="store_true", default=False)
parser.add_argument("--maf",action="store_true", default=False)
parser.add_argument("--keepSequences",action="store_true", default=False)
parser.add_argument("--trans", help="Generate level of detail X from "
"X-1. By default, all levels of detail are generated "
"from the original HAL (X=0)",
action="store_true", default=False)
parser.add_argument("--inMemory", help="Load entire hdf5 arrays into "
"memory, overriding cache.",
action="store_true", default=False)
args = parser.parse_args()
if not os.path.exists(args.outDir):
os.makedirs(args.outDir)
if args.maf is True:
args.keepSequences = True
steps = None
if args.steps is not None:
steps = [0] + [int(x) for x in args.steps.split(",")]
assert steps[1] > 0
table = runSteps(args.hal, args.outDir, args.maxBlock, args.scale,
steps, args.overwrite, args.maf, args.keepSequences,
args.trans, args.inMemory)
# print table
printTable(table)
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "ddcb01456093fc5dba43b20b239e7126",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 89,
"avg_line_length": 39.57,
"alnum_prop": 0.6000758150113723,
"repo_name": "glennhickey/hal",
"id": "11b012fe7e968611b234ebe155c4fef4ce922763",
"size": "8110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lod/halLodBenchmark.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "534"
},
{
"name": "C",
"bytes": "35807"
},
{
"name": "C++",
"bytes": "1432249"
},
{
"name": "CSS",
"bytes": "29478"
},
{
"name": "HTML",
"bytes": "3429146"
},
{
"name": "JavaScript",
"bytes": "45166"
},
{
"name": "Makefile",
"bytes": "27960"
},
{
"name": "Perl",
"bytes": "2517"
},
{
"name": "Python",
"bytes": "339888"
},
{
"name": "R",
"bytes": "2779"
},
{
"name": "Shell",
"bytes": "1286"
}
],
"symlink_target": ""
} |
"""
experiment_poincare_1a.py
Poincare map generation on Colluci Nunez
Author: Yuan Wang
"""
from thesis_utils import *
from thesis_defaults import *
from thesis_poincare_utils import *
from thesis_plot_utils import *
import scipy.integrate as integrate
import scipy.special as special
from scipy.integrate import quad
from scipy.optimize import newton
import numdifftools as nd
from evolution import *
from experiment import *
class ExperimentPoincare1a(Experiment):
def setParams(self, hyperplane = HyperPlane(1, 1, 1, 1, 4), T = 1000, start_pt = default_start):
self.hyperplane = hyperplane
self.params['T'] = T
self.params['start_pt'] = start_pt
self.saveParams()
def run(self, T = None, dt = 0.01, stepCnt = 10000):
"""Simulate path, collecting Poincare crossings"""
start_pt = self.params['start_pt']
if T == None:
T = self.params['T']
if stepCnt != None:
dt = float(T) / stepCnt
else:
stepCnt = math.ceil(T / dt)
# dt = 0.01
# stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
crossings = np.empty((stepCnt + 1,))
pts = np.empty((stepCnt + 1,))
ws[0], xs[0], ys[0], zs[0] = start_pt[0], start_pt[1], start_pt[2], start_pt[3]
current_pt = list(start_pt)
crossings[0] = 0
pts[0] = self.hyperplane(current_pt)
intersect_checker = IntersectChecker(self.hyperplane)
trace = [ws, xs, ys, zs]
## for tracking min/max/mean of path, relative to hyperplane
# Stepping through "time".
self.print("\n\nCrossings:")
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = self.evo(current_pt)
old_pt = list(current_pt)
## compute new point
for j in range(4):
trace[j][i + 1] = old_pt[j] + (derivs[j] * dt)
current_pt[j] = trace[j][i + 1]
pts[i + 1] = self.hyperplane(current_pt)
crossings[i + 1] = intersect_checker(current_pt)
# print(hyperplane(pt))
if crossings[i + 1] != 0:
self.print((ws[i + 1], xs[i + 1], ys[i + 1], zs[i + 1]))
self.print("\nMax:")
self.print(max(pts))
self.print("Min:")
self.print(min(pts))
self.print("Av:")
self.print(sum(pts) / len(pts))
ws, xs, ys, zs = poincareExtract(ws, xs, ys, zs, crossings)
# for i in range(len(ws)):
# self.print( "(" + str(ws[i]) + ", " + str(xs[i]) + ", " + str(ys[i]) + ", " + str(zs[i]) + ")" )
self.savePlot(poincarePlot(ws, xs, ys, zs, str(self.hyperplane)))
# if expmt == 'accumulate':
# return [ws, xs, ys, zs, crossings]
def main():
"""
Testing
"""
print("============")
#evo = Evolution_1a(lmbda = lmbda_set_1)
evo = Evolution_ColluciNunez()
print(evo)
expmt = ExperimentPoincare1a( evo = evo,
title = "Poincare map generation, Colluci Nunez",
descr = "Leveraging Poincare maps to gain insights about our system")
# expmt.setParams(T = 4, start_pt = default_start)
expmt.setParams(hyperplane = HyperPlane(-4, 12, 2, -10, -1.2),
T = 4,
start_pt = default_start )
print("============")
print(expmt)
expmt.run(T = None, stepCnt = 1000)
if __name__=="__main__":
main()
| {
"content_hash": "923243172b5384ed52e1c369284c1475",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 110,
"avg_line_length": 27.268115942028984,
"alnum_prop": 0.5259101780494286,
"repo_name": "yuanagain/seniorthesis",
"id": "c9b8aece9d202af4f104f199a637d13292887f33",
"size": "3763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/experiment_poincare1a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "246695"
},
{
"name": "C++",
"bytes": "3399079"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "128332"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "86661"
},
{
"name": "Makefile",
"bytes": "76057"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "22981564"
},
{
"name": "Shell",
"bytes": "4071"
}
],
"symlink_target": ""
} |
"""
WSGI config for edjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edjango.settings")
application = get_wsgi_application()
| {
"content_hash": "c985a3bdcb4a80d393e4d39961e5fd10",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.4375,
"alnum_prop": 0.7698209718670077,
"repo_name": "sarusso/eDjango",
"id": "3abe3f5ce28d647ab3fbe988daf7d90b7b555743",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edjango/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33310"
}
],
"symlink_target": ""
} |
def main():
# Test suite
tests = [
[None, None, None], # Should throw a TypeError
['abcd', 'abcde', 'e'],
['aaabbcdd', 'abdbacade', 'e'],
['abdbacade', 'aaabbcdd', 'e']
]
for item in tests:
try:
temp_result = find_diff(item[0], item[1])
if temp_result[0] == item[2]:
print('PASSED: find_diff({}, {}) returned {}'.format(item[0], item[1], temp_result))
else:
print('FAILED: find_diff({}, {}) returned {}, should have returned {}'.format(item[0], item[1], temp_result, item[2]))
except TypeError:
print('PASSED TypeError test')
return 0
return
def find_diff(str1, str2):
'''
Finds the one additional character in str 2 vs. str1
Input: two strings
Output: char (one additional character in str2)
Assumes str2 contains all characters from str1, with one additional one
'''
if str1 is None or str2 is None:
raise TypeError
shorter = str1 if len(str1) < len(str2) else str2
longer = str1 if len(str1) >= len(str2) else str2
result = set(longer) - set(shorter)
return result.pop()
if __name__ == '__main__':
main()
| {
"content_hash": "c7610c0bf6b5e3873fc2771dfa29ea24",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 134,
"avg_line_length": 27.863636363636363,
"alnum_prop": 0.5570962479608483,
"repo_name": "HKuz/Test_Code",
"id": "2380ef8fbb0690534d6a375fb9affc8fce7fca07",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problems/stringDiff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259192"
}
],
"symlink_target": ""
} |
import numpy
from .base import Sequence
class Binary(Sequence):
"""Binary sequence."""
# TODO complete docstring.
def __init__(self, picture_time):
bit_planes = 1 # bit depth of the pictures
pic_num = 2 # number of pictures
Sequence.__init__(self, bit_planes, pic_num)
self.picture_time = picture_time
def get_user_array(self):
"""Get stimulus frames."""
dtype = numpy.uint8
width, height = self.device.get_resolution()
size = width * height
min_ = numpy.iinfo(dtype).min
max_ = numpy.iinfo(dtype).max
dtype = numpy.iinfo(dtype).dtype
frames = numpy.kron(numpy.array([min_, max_], dtype=dtype), numpy.ones(size, dtype=dtype))
return frames
| {
"content_hash": "82747e3899eb1b7e0ccaec48bf27184c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 98,
"avg_line_length": 27.5,
"alnum_prop": 0.6012987012987013,
"repo_name": "BaptisteLefebvre/pyalp",
"id": "139643e5ade389bf2976cc4afba1d9bf8d4450b9",
"size": "770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyalp/sequence/binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127366"
}
],
"symlink_target": ""
} |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="PySysC-SCC",
version="0.0.1",
author="MINRES Technologies GmbH",
author_email="info@minres.com",
description="SCC python modules for intergration in PySysC",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Minres/SystemC-Components.git",
project_urls={
"Bug Tracker": "https://github.com/Minres/SystemC-Components.git/issues",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
],
package_dir={"": "src"},
packages=setuptools.find_namespace_packages(where="src"),
python_requires=">=3.6",
install_requires=[
'PySysC'
],
) | {
"content_hash": "e85007192dea4215e03eef455d17929e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 81,
"avg_line_length": 33.70967741935484,
"alnum_prop": 0.6392344497607656,
"repo_name": "Minres/SystemC-Components",
"id": "18780586b95d617ffa67c62a590f4227297b2db6",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "contrib/pysysc/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6897"
},
{
"name": "C++",
"bytes": "935787"
},
{
"name": "CMake",
"bytes": "87821"
},
{
"name": "GDB",
"bytes": "496"
},
{
"name": "HTML",
"bytes": "4607"
},
{
"name": "JavaScript",
"bytes": "1486413"
},
{
"name": "Python",
"bytes": "3745"
},
{
"name": "Shell",
"bytes": "783"
}
],
"symlink_target": ""
} |
import functools
import eventlet
import netaddr
import six
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_MLNX_DIRECT = 'mlnx_direct'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_OTHER = 'other'
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError:
msg = _("Invalid IP format %s") % self['address']
raise exception.InvalidIpAddressError(msg)
def __eq__(self, other):
keys = ['address', 'type', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return cls(**ensure_string_keys(ip))
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@staticmethod
def hydrate(fixed_ip):
fixed_ip = FixedIP(**ensure_string_keys(fixed_ip))
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
def __eq__(self, other):
keys = ['address', 'type', 'version', 'floating_ips']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = cls(**ensure_string_keys(route))
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = cls(**ensure_string_keys(subnet))
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = cls(**ensure_string_keys(network))
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
def __eq__(self, other):
keys = ['id', 'bridge', 'label', 'subnets']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
**kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'type', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
'active']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def fixed_ips(self):
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
@classmethod
def hydrate(cls, vif):
vif = cls(**ensure_string_keys(vif))
vif['network'] = Network.hydrate(vif['network'])
return vif
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
if ip['version'] == 4:
return str(subnet.as_netaddr().netmask)
return subnet.as_netaddr()._prefixlen
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, six.string_types):
network_info = jsonutils.loads(network_info)
return cls([VIF.hydrate(vif) for vif in network_info])
def json(self):
return jsonutils.dumps(self)
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo
in an async manner.
This allows one to start querying for network information before
you know you will need it. If you have a long-running
operation, this allows the network model retrieval to occur in the
background. When you need the data, it will ensure the async
operation has completed.
As an example:
def allocate_net_info(arg1, arg2)
return call_neutron_to_allocate(arg1, arg2)
network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2)
[do a long running operation -- real network_info will be retrieved
in the background]
[do something with network_info]
"""
def __init__(self, async_method, *args, **kwargs):
self._gt = eventlet.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
fn = getattr(self, method)
wrapper = functools.partial(self._sync_wrapper, fn)
functools.update_wrapper(wrapper, fn)
setattr(self, method, wrapper)
def _sync_wrapper(self, wrapped, *args, **kwargs):
"""Synchronize the model before running a method."""
self.wait()
return wrapped(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__getitem__
return self._sync_wrapper(fn, *args, **kwargs)
def __iter__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__iter__
return self._sync_wrapper(fn, *args, **kwargs)
def __len__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__len__
return self._sync_wrapper(fn, *args, **kwargs)
def __str__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__str__
return self._sync_wrapper(fn, *args, **kwargs)
def __repr__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__repr__
return self._sync_wrapper(fn, *args, **kwargs)
def wait(self, do_raise=True):
"""Wait for async call to finish."""
if self._gt is not None:
try:
# NOTE(comstud): This looks funky, but this object is
# subclassed from list. In other words, 'self' is really
# just a list with a bunch of extra methods. So this
# line just replaces the current list (which should be
# empty) with the result.
self[:] = self._gt.wait()
except Exception:
if do_raise:
raise
finally:
self._gt = None
| {
"content_hash": "101dfa576472242eb6c053c469697cc9",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 79,
"avg_line_length": 32.719424460431654,
"alnum_prop": 0.5653034300791556,
"repo_name": "ycl2045/nova-master",
"id": "cba086dfca019c65e5e809bd06deb0db3cf203b8",
"size": "14280",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/network/model.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "2035"
},
{
"name": "Python",
"bytes": "13677408"
},
{
"name": "R",
"bytes": "7817"
},
{
"name": "Ruby",
"bytes": "851"
},
{
"name": "Shell",
"bytes": "14571"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('twilio', '0002_sms_pub_date'),
]
operations = [
migrations.AlterField(
model_name='sms',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name=b'date published'),
),
]
| {
"content_hash": "06ca39d787eecfca648ab885c31af072",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 102,
"avg_line_length": 23.05263157894737,
"alnum_prop": 0.6210045662100456,
"repo_name": "niksolaz/PySms",
"id": "e04b58d4b01aefa60b3d42717c6d07edd378e5d0",
"size": "462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PySms/twilio/migrations/0003_auto_20150403_1431.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2751"
},
{
"name": "Python",
"bytes": "9365"
}
],
"symlink_target": ""
} |
class SystemProperties:
def __init__(self, properties=None):
self.system_properties = {}
if properties:
self.system_properties = properties
def add_property(self, key, value):
self.system_properties.update({key: value})
def to_dict(self):
return self.system_properties
| {
"content_hash": "6959472c593fc4c935a925c976f9ee9d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 51,
"avg_line_length": 29.727272727272727,
"alnum_prop": 0.6330275229357798,
"repo_name": "tbeckham/DeploymentManager",
"id": "e4b790ac372fd34724d96612d16c937e77f3285b",
"size": "941",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "config_manager/eucalyptus/system_properties.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15666"
},
{
"name": "JavaScript",
"bytes": "9683"
},
{
"name": "Python",
"bytes": "132178"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011, The MITRE Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import StringIO
class RedirectStdError(object):
def __init__(self):
self._stderr = StringIO.StringIO()
def __enter__(self):
self.save_stderr = sys.stderr
self.save_stderr.flush()
sys.stderr = self._stderr
return sys.stderr
def __exit__(
self,
exc_type,
exc_value,
traceback,
):
self._stderr.flush()
sys.stderr = self.save_stderr
class RedirectStdOut(object):
def __init__(self):
self._stdout = StringIO.StringIO()
def __enter__(self):
self.save_stdout = sys.stdout
self.save_stdout.flush()
sys.stdout = self._stdout
return sys.stdout
def __exit__(
self,
exc_type,
exc_value,
traceback,
):
self._stdout.flush()
sys.stdout = self.save_stdout
| {
"content_hash": "a4be2e28cfcd619a18e509a5e962ac3a",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 32.72727272727273,
"alnum_prop": 0.7095238095238096,
"repo_name": "nemonik/Intellect",
"id": "ed85b1a533b274087bdb6a4cb0039c19161824f2",
"size": "2554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intellect/IO.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GAP",
"bytes": "23349"
},
{
"name": "Python",
"bytes": "480350"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
import views
urlpatterns = patterns(
'',
url(r'^$', views.index),
url(r'^donate.html$', TemplateView.as_view(template_name='donate.html')),
url(r'^about.html$', TemplateView.as_view(template_name='about.html')),
url(r'^todo-list.html$',
TemplateView.as_view(template_name='todo-list.html')),
url(r'^hiring.html$', TemplateView.as_view(template_name='hiring.html')),
url(r'^privatepolicy.html$', TemplateView.as_view(
template_name='privatepolicy.html')),
url(r'^404.html$', TemplateView.as_view(template_name='404.html')),
)
| {
"content_hash": "64bdf10114ee63b1aa22430c6315d73c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 39.05882352941177,
"alnum_prop": 0.6822289156626506,
"repo_name": "leVirve/NTHU_Course",
"id": "a0fa254832f4fc410a641033d78ba905c2a518e3",
"size": "664",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "index/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61977"
},
{
"name": "HTML",
"bytes": "35840"
},
{
"name": "JavaScript",
"bytes": "16495"
},
{
"name": "Python",
"bytes": "57040"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.volumes \
.cgroups import workflows as vol_cgroup_workflows
from openstack_dashboard.dashboards.project.volumes \
.cgroups import forms as vol_cgroup_forms
from openstack_dashboard.dashboards.project.volumes \
.cgroups import tables as vol_cgroup_tables
from openstack_dashboard.dashboards.project.volumes \
.cgroups import tabs as vol_cgroup_tabs
CGROUP_INFO_FIELDS = ("name",
"description")
INDEX_URL = "horizon:project:volumes:index"
class CreateView(workflows.WorkflowView):
workflow_class = vol_cgroup_workflows.CreateCGroupWorkflow
template_name = 'project/volumes/cgroups/create.html'
page_title = _("Create Volume Consistency Group")
class UpdateView(forms.ModalFormView):
template_name = 'project/volumes/cgroups/update.html'
modal_header = _("Edit Consistency Group")
form_class = vol_cgroup_forms.UpdateForm
success_url = reverse_lazy('horizon:project:volumes:index')
submit_url = "horizon:project:volumes:cgroups:update"
submit_label = _("Submit")
page_title = modal_header
def get_initial(self):
cgroup = self.get_object()
return {'cgroup_id': self.kwargs["cgroup_id"],
'name': cgroup.name,
'description': cgroup.description}
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_object(self):
cgroup_id = self.kwargs['cgroup_id']
try:
self._object = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return self._object
class RemoveVolumesView(forms.ModalFormView):
template_name = 'project/volumes/cgroups/remove_vols.html'
modal_header = _("Remove Volumes from Consistency Group")
form_class = vol_cgroup_forms.RemoveVolsForm
success_url = reverse_lazy('horizon:project:volumes:index')
submit_url = "horizon:project:volumes:cgroups:remove_volumes"
submit_label = _("Submit")
page_title = modal_header
def get_initial(self):
cgroup = self.get_object()
return {'cgroup_id': self.kwargs["cgroup_id"],
'name': cgroup.name}
def get_context_data(self, **kwargs):
context = super(RemoveVolumesView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_object(self):
cgroup_id = self.kwargs['cgroup_id']
try:
self._object = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return self._object
class DeleteView(forms.ModalFormView):
template_name = 'project/volumes/cgroups/delete.html'
modal_header = _("Delete Consistency Group")
form_class = vol_cgroup_forms.DeleteForm
success_url = reverse_lazy('horizon:project:volumes:index')
submit_url = "horizon:project:volumes:cgroups:delete"
submit_label = modal_header
page_title = modal_header
def get_initial(self):
cgroup = self.get_object()
return {'cgroup_id': self.kwargs["cgroup_id"],
'name': cgroup.name}
def get_context_data(self, **kwargs):
context = super(DeleteView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_object(self):
cgroup_id = self.kwargs['cgroup_id']
try:
self._object = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return self._object
class ManageView(workflows.WorkflowView):
workflow_class = vol_cgroup_workflows.UpdateCGroupWorkflow
def get_context_data(self, **kwargs):
context = super(ManageView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs["cgroup_id"]
return context
def _get_object(self, *args, **kwargs):
cgroup_id = self.kwargs['cgroup_id']
try:
cgroup = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return cgroup
def get_initial(self):
cgroup = self._get_object()
return {'cgroup_id': cgroup.id,
'name': cgroup.name,
'description': cgroup.description,
'vtypes': getattr(cgroup, "volume_types")}
class CreateSnapshotView(forms.ModalFormView):
form_class = vol_cgroup_forms.CreateSnapshotForm
modal_header = _("Create Consistency Group Snapshot")
template_name = 'project/volumes/cgroups/create_snapshot.html'
submit_label = _("Create Snapshot")
submit_url = "horizon:project:volumes:cgroups:create_snapshot"
success_url = reverse_lazy('horizon:project:volumes:cg_snapshots_tab')
page_title = modal_header
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
# get number of snapshots we will be creating
search_opts = {'consistencygroup_id': context['cgroup_id']}
volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
num_volumes = len(volumes)
usages = quotas.tenant_limit_usages(self.request)
if usages['snapshotsUsed'] + num_volumes > \
usages['maxTotalSnapshots']:
raise ValueError(_('Unable to create snapshots due to '
'exceeding snapshot quota limit.'))
else:
usages['numRequestedItems'] = num_volumes
context['usages'] = usages
except ValueError as e:
exceptions.handle(self.request, e.message)
return None
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency '
'group information.'))
return context
def get_initial(self):
return {'cgroup_id': self.kwargs["cgroup_id"]}
class CloneCGroupView(forms.ModalFormView):
form_class = vol_cgroup_forms.CloneCGroupForm
modal_header = _("Clone Consistency Group")
template_name = 'project/volumes/cgroups/clone_cgroup.html'
submit_label = _("Clone Consistency Group")
submit_url = "horizon:project:volumes:cgroups:clone_cgroup"
success_url = reverse_lazy('horizon:project:volumes:cgroups_tab')
page_title = modal_header
def get_context_data(self, **kwargs):
context = super(CloneCGroupView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
# get number of volumes we will be creating
cgroup_id = context['cgroup_id']
search_opts = {'consistencygroup_id': cgroup_id}
volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
num_volumes = len(volumes)
usages = quotas.tenant_limit_usages(self.request)
if usages['volumesUsed'] + num_volumes > \
usages['maxTotalVolumes']:
raise ValueError(_('Unable to create consistency group due to '
'exceeding volume quota limit.'))
else:
usages['numRequestedItems'] = num_volumes
context['usages'] = usages
except ValueError as e:
exceptions.handle(self.request, e.message)
return None
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency '
'group information.'))
return context
def get_initial(self):
return {'cgroup_id': self.kwargs["cgroup_id"]}
class DetailView(tabs.TabView):
tab_group_class = vol_cgroup_tabs.CGroupsDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ cgroup.name|default:cgroup.id }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
cgroup = self.get_data()
table = vol_cgroup_tables.VolumeCGroupsTable(self.request)
context["cgroup"] = cgroup
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(cgroup)
return context
@memoized.memoized_method
def get_data(self):
try:
cgroup_id = self.kwargs['cgroup_id']
cgroup = api.cinder.volume_cgroup_get(self.request,
cgroup_id)
cgroup.volume_type_names = []
for vol_type_id in cgroup.volume_types:
vol_type = api.cinder.volume_type_get(self.request,
vol_type_id)
cgroup.volume_type_names.append(vol_type.name)
cgroup.volume_names = []
search_opts = {'consistencygroup_id': cgroup_id}
volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
for volume in volumes:
cgroup.volume_names.append(volume.name)
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=redirect)
return cgroup
@staticmethod
def get_redirect_url():
return reverse('horizon:project:volumes:index')
def get_tabs(self, request, *args, **kwargs):
cgroup = self.get_data()
return self.tab_group_class(request, cgroup=cgroup, **kwargs)
| {
"content_hash": "3e69d0305a285765659294ddc0db9e49",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 79,
"avg_line_length": 39.651006711409394,
"alnum_prop": 0.5987643872714963,
"repo_name": "sandvine/horizon",
"id": "5f120e122dd80238f6848ba7b51f8c6a36fcc36a",
"size": "12389",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/volumes/cgroups/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "95623"
},
{
"name": "HTML",
"bytes": "550017"
},
{
"name": "JavaScript",
"bytes": "1710525"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5260446"
},
{
"name": "Shell",
"bytes": "19049"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import copy
import json
import re
from urllib.parse import urlparse
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Vg(Service, OpenGraphThumbMixin):
supported_domains = ["vg.no", "vgtv.no"]
def get(self):
data = self.get_urldata()
match = re.search(r'data-videoid="([^"]+)"', data)
if not match:
parse = urlparse(self.url)
match = re.search(r"video/(\d+)/", parse.fragment)
if not match:
yield ServiceError("Can't find video file for: {}".format(self.url))
return
videoid = match.group(1)
data = self.http.request("get", "http://svp.vg.no/svp/api/v1/vgtv/assets/{}?appName=vgtv-website".format(videoid)).text
jsondata = json.loads(data)
self.output["title"] = jsondata["title"]
if "hds" in jsondata["streamUrls"]:
streams = hdsparse(
self.config,
self.http.request("get", jsondata["streamUrls"]["hds"], params={"hdcore": "3.7.0"}),
jsondata["streamUrls"]["hds"],
output=self.output,
)
for n in list(streams.keys()):
yield streams[n]
if "hls" in jsondata["streamUrls"]:
streams = hlsparse(
self.config, self.http.request("get", jsondata["streamUrls"]["hls"]), jsondata["streamUrls"]["hls"], output=self.output
)
for n in list(streams.keys()):
yield streams[n]
if "mp4" in jsondata["streamUrls"]:
yield HTTP(copy.copy(self.config), jsondata["streamUrls"]["mp4"], output=self.output)
| {
"content_hash": "f5714b648305ecc2f313e3033922d3ba",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 135,
"avg_line_length": 38.734693877551024,
"alnum_prop": 0.5958904109589042,
"repo_name": "olof/svtplay-dl",
"id": "d6eea127711ea16a7d2a8d7b53b3da7af6014624",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/svtplay_dl/service/vg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3816"
},
{
"name": "Python",
"bytes": "278876"
},
{
"name": "Shell",
"bytes": "2423"
}
],
"symlink_target": ""
} |
"""5
Revision ID: 3ec4ff2461dd
Revises: 57ca0cf95e25
Create Date: 2014-03-28 19:48:38.056555
"""
# revision identifiers, used by Alembic.
revision = '3ec4ff2461dd'
down_revision = '57ca0cf95e25'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| {
"content_hash": "5308fd1e3ccb57413a1d40642d752e17",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 19,
"alnum_prop": 0.6821862348178138,
"repo_name": "SuperQuest/v1",
"id": "9112b19857f44e90a264aaa1ac7dde0aa1edf57a",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/3ec4ff2461dd_5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3082"
},
{
"name": "JavaScript",
"bytes": "645"
},
{
"name": "Python",
"bytes": "120672"
}
],
"symlink_target": ""
} |
from visual_dynamics.envs import Env
class RosEnv(Env):
pass
| {
"content_hash": "161a787506ba53289a2345946d82077d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 13.4,
"alnum_prop": 0.7313432835820896,
"repo_name": "alexlee-gk/visual_dynamics",
"id": "257e1d1616606a6abf644c17e9d1f47f075bdc33",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visual_dynamics/envs/ros_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "926637"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import vim
def GetCurrentBufferNumber():
return vim.current.buffer.number
def BufNumberToName( bufnr ):
return vim.eval( 'bufname({0})'.format( bufnr ) )
def GetCurrentTopLine():
return int( vim.eval( 'line("w0")' ) )
def GetCurrentBottomLine():
return int( vim.eval( 'line("w$")' ) )
def GetCurrentWindowHeight():
return int( vim.current.window.height )
def GetCurrentFileType():
return _ToUnicode( vim.current.buffer.options[ 'filetype' ] )
def GetFileType( bufnr ):
ft = vim.buffers[ bufnr ].options[ 'filetype' ]
return _ToUnicode( ft )
def GetBufferLen( bufnr ):
return len( vim.buffers[ bufnr ] )
def GetLineLen( bufnr, line ):
# line index is 1 based, but vim python interface is 0 based
return len( vim.buffers[ bufnr ][ line - 1 ] )
def GetIntValue( name ):
return int( vim.eval( name ) )
def PostVimWarning( message ):
# Displaying a new message while previous ones are still on the status line
# might lead to a hit-enter prompt or the message appearing without a
# newline so we do a redraw first.
vim.command( 'redraw' )
vim.command( 'echohl WarningMsg' )
vim.command( "echom '{0}'".format( message ) )
vim.command( 'echohl None' )
def _ToUnicode( value ):
if not value:
return str()
if isinstance( value, str ):
return value
if isinstance( value, bytes ):
# All incoming text should be utf8
return str( value, 'utf8' )
return str( value )
| {
"content_hash": "a4ea0ce414ac6161761e5af3ec84906a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 23.92753623188406,
"alnum_prop": 0.6608116293155664,
"repo_name": "davits/DyeVim",
"id": "c2bf6707a96e1440e36e842b3ae91d0beb93deaa",
"size": "2796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/dye/utils/vimsupport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55158"
},
{
"name": "Vim script",
"bytes": "21006"
}
],
"symlink_target": ""
} |
"""Interfaces for the quadrature package."""
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from .base_gp import IBaseGaussianProcess # noqa: F401
from .standard_kernels import ( # noqa: F401
IRBF,
IBrownian,
IProductBrownian,
IProductMatern12,
IProductMatern32,
IProductMatern52,
IStandardKernel,
)
__all__ = [
"IBaseGaussianProcess",
"IStandardKernel",
"IBrownian",
"IRBF",
"IProductBrownian",
"IProductMatern12",
"IProductMatern32",
"IProductMatern52",
]
| {
"content_hash": "5be2bd68f0e0b2c8f1c88c0bdc4a2e64",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 21.814814814814813,
"alnum_prop": 0.6842105263157895,
"repo_name": "EmuKit/emukit",
"id": "b92d6a366fa4817466b3b5d0c9a00ec8d4ff1713",
"size": "589",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "emukit/quadrature/interfaces/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "972291"
},
{
"name": "Stan",
"bytes": "1413"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from .webfront import dispatcher
from ws4py.websocket import EchoWebSocket
from ws4py.server.wsgiutils import WebSocketWSGIApplication
class WebSocketHandler(EchoWebSocket):
def __init__(self, *args, **kwargs):
super(WebSocketHandler, self).__init__(*args, **kwargs)
class WebsocketEngine(object):
def start(self, ctx=None):
dispatcher.mount('/ws', WebSocketWSGIApplication(
handler_cls=WebSocketHandler))
def stop(self, ctx=None):
pass | {
"content_hash": "77ad7c35b8d6c293644ba6ca69fd3573",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 29.95,
"alnum_prop": 0.6894824707846411,
"repo_name": "eavatar/ava",
"id": "96a79dbb7b11654de0fc454fa7be5931801a2c40",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/eavatar.ava/ava/core/websocket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "HTML",
"bytes": "460"
},
{
"name": "Makefile",
"bytes": "549"
},
{
"name": "Python",
"bytes": "156900"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
} |
"""
Basic data structures needed to support dialogue system.
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
#noinspection PyUnresolvedReferences
from random import random, randrange
import textwrap
#todo: complex boolean conditionals
class Condition(object):
"""
A simple condition which can be applied to the global state.
"""
def __init__(self, condition):
self.variable = condition["variable"]
self.operation = condition["operation"]
self.value = condition.get("value", None)
def apply(self, globals):
"""
Checks the condition against the global state dict to evaluate.
:param globals:
"""
value = globals[self.variable]
is_boolean_op = (self.operation == "set" or self.operation == "unset")
if not is_boolean_op and value is None:
raise ValueError("Non boolean op with no value")
ret = {">": lambda: value > self.value,
"<": lambda: value < self.value,
"=": lambda: value == self.value,
"==": lambda: value == self.value,
">=": lambda: value >= self.value,
"<=": lambda: value <= self.value,
"set": lambda: value == 1,
"unset": lambda: value == 0}.get(self.operation)()
return ret
def __repr__(self):
return "<variable: %s, operation: %s, value: %s>" % (
self.variable, self.operation, self.value)
class Effect(object):
"""
An effect that can be triggered by a response.
"""
def __init__(self, effect):
self.variable = effect["variable"]
self.operation = effect["operation"]
self.value = effect.get("value", None)
def apply(self, globals):
"""
Applies an effect to the global state dict.
:param globals:
"""
if isinstance(self.value, unicode):
if self.value.startswith("eval:"):
self.value = eval(self.value[5:])
else:
self.value = globals[self.value]
mutation = {"+": lambda x: x + self.value,
"-": lambda x: x - self.value,
"=": lambda x: self.value,
"set": lambda x: 1,
"unset": lambda x: 0}.get(self.operation)
globals[self.variable] = mutation(globals[self.variable])
class Dialogue(object):
"""
The Dialogue class is the public API to the dialogue system.
"""
def __init__(self, prompt_dict):
self.globals = defaultdict(int)
self.globals.update(prompt_dict["defaults"])
self.prompts = {}
self._create_prompts(prompt_dict["prompts"])
self.current_prompt = 0
self.done = False
def _create_prompts(self, prompt_list):
for prompt in prompt_list:
self.prompts[prompt["id"]] = Prompt(prompt)
def is_done(self):
"""
Returns true if there is nothing left to do in the dialogue.
"""
return self.done
def get_prompt(self):
"""
Returns the prompt from the current conversation node.
"""
if self.done is False:
if not self.prompts[self.current_prompt].responses:
self.done = True
return self.prompts[self.current_prompt].get_prompt()
else:
return None
def get_responses(self):
"""
Returns the list of available responses.
"""
if self.done is False:
return self.prompts[self.current_prompt].get_responses(
self.globals)
else:
return None
def answer(self, response_ix):
"""
Answers the prompt with the chosen response index. Responses are
0-indexed.
:param response_ix:
"""
if not self.done:
active_responses = [response for response in
self.prompts[self.current_prompt].responses if
all([precondition.apply(self.globals) for
precondition in response.preconditions])]
chosen_response = active_responses[response_ix]
chosen_response.apply_effects(self.globals)
next = chosen_response.get_next(self.globals)
self.current_prompt = next
if self.current_prompt == -1:
self.done = True
else:
raise Exception("Trying to answer a finished dialog")
def get_globals(self):
"""
Return the current dialog state.
"""
return self.globals
class Prompt(object):
"""
A Prompt represents a node in the conversation graph.
"""
def __init__(self, prompt):
self.texts = prompt["text"]
self.id = prompt["id"]
self.responses = []
self._create_responses(prompt["responses"])
def _create_responses(self, response_list):
for response in response_list:
self.responses.append(Response(response))
def get_prompt(self):
"""
Returns a list of 2-tuples (speaker, prompt)
"""
return self.texts
def get_responses(self, globals):
"""
Returns a list of responses available given the current global state
:param globals:
"""
active_responses = [response.text for response in self.responses if
all([precondition.apply(globals) for precondition
in
response.preconditions])]
return active_responses
class Response(object):
"""
Responses to prompts.
"""
def __init__(self, response):
self.text = response["text"]
self.effects = []
self.transitions = []
self.preconditions = []
self._create_preconditions(response.get("preconditions", []))
self._create_effects(response.get("effects", []))
self._create_transitions(response["transitions"])
def _create_preconditions(self, preconditions_list):
for precondition in preconditions_list:
self.preconditions.append(Condition(precondition))
def _create_effects(self, effect_list):
for effect in effect_list:
self.effects.append(Effect(effect))
def _create_transitions(self, next_list):
for next in next_list:
target = next["target"]
conditions = []
for condition in next["conditions"]:
conditions.append(Condition(condition))
self.transitions.append((target, conditions))
def get_next(self, globals):
"""
Gets the next prompt id given the current global state.
:param globals:
"""
ret = -1
for target, conditions in self.transitions:
if all([cond.apply(globals) for cond in conditions]):
ret = target
break
return ret
def apply_effects(self, globals):
"""
Applies the effects associated with choosing this response
:param globals:
"""
for effect in self.effects:
effect.apply(globals)
class ConsoleEngine(object):
"""
A simple console engine to demonstrate running a conversation.
"""
def __init__(self, dialog):
self.dialog = dialog
def print_prompts(self, prompts):
"""
Pretty prints prompts
:param prompts:
"""
longest_name = max([len(speaker) for speaker, prompt in prompts]) + 4
column_two = 80 - longest_name
for speaker, prompt in prompts:
prompt_lines = textwrap.wrap(prompt, column_two)
print("{0:{width}}{1:{width_2}}".format(speaker + ":",
prompt_lines[0],
width=longest_name,
width_2=column_two))
for prompt_line in prompt_lines[1:]:
print("{0:{width}}{1:{width_2}}".format("", prompt_line,
width=longest_name,
width_2=column_two))
print()
def run(self):
"""
Runs the conversation.
"""
while True:
prompts = self.dialog.get_prompt()
self.print_prompts(prompts)
if self.dialog.is_done():
break
ix = 1
for response in self.dialog.get_responses():
print("%d) %s" % (ix, response))
ix += 1
while True:
res = input("> ")
try:
res = int(res)
except ValueError:
print("Response must be an int between %d and %d" % (
1, ix - 1))
continue
if res < 1 or res > ix - 1:
print("Response must be between %d and %d" % (1, ix - 1))
else:
self.dialog.answer(res - 1)
break
if self.dialog.is_done():
break
print()
| {
"content_hash": "c65b3517aaa8efee29dee5f376f5407d",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 78,
"avg_line_length": 31.880546075085324,
"alnum_prop": 0.5268172572529708,
"repo_name": "dfuentes/dialogue",
"id": "e359fa77c1e0b01eb69892c39d7c3ffcc367973d",
"size": "9357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dialogue/dialogue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6357"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationradiusaction(base_resource) :
""" Configuration for RADIUS action resource. """
def __init__(self) :
self._name = ""
self._serverip = ""
self._servername = ""
self._serverport = 0
self._authtimeout = 0
self._radkey = ""
self._radnasip = ""
self._radnasid = ""
self._radvendorid = 0
self._radattributetype = 0
self._radgroupsprefix = ""
self._radgroupseparator = ""
self._passencoding = ""
self._ipvendorid = 0
self._ipattributetype = 0
self._accounting = ""
self._pwdvendorid = 0
self._pwdattributetype = 0
self._defaultauthenticationgroup = ""
self._callingstationid = ""
self._ipaddress = ""
self._success = 0
self._failure = 0
self.___count = 0
@property
def name(self) :
ur"""Name for the RADIUS action.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after the RADIUS action is added.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the RADIUS action.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after the RADIUS action is added.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def serverip(self) :
ur"""IP address assigned to the RADIUS server.<br/>Minimum length = 1.
"""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
ur"""IP address assigned to the RADIUS server.<br/>Minimum length = 1
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def servername(self) :
ur"""RADIUS server name as a FQDN. Mutually exclusive with RADIUS IP address.<br/>Minimum length = 1.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
ur"""RADIUS server name as a FQDN. Mutually exclusive with RADIUS IP address.<br/>Minimum length = 1
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def serverport(self) :
ur"""Port number on which the RADIUS server listens for connections.<br/>Minimum length = 1.
"""
try :
return self._serverport
except Exception as e:
raise e
@serverport.setter
def serverport(self, serverport) :
ur"""Port number on which the RADIUS server listens for connections.<br/>Minimum length = 1
"""
try :
self._serverport = serverport
except Exception as e:
raise e
@property
def authtimeout(self) :
ur"""Number of seconds the NetScaler appliance waits for a response from the RADIUS server.<br/>Default value: 3<br/>Minimum length = 1.
"""
try :
return self._authtimeout
except Exception as e:
raise e
@authtimeout.setter
def authtimeout(self, authtimeout) :
ur"""Number of seconds the NetScaler appliance waits for a response from the RADIUS server.<br/>Default value: 3<br/>Minimum length = 1
"""
try :
self._authtimeout = authtimeout
except Exception as e:
raise e
@property
def radkey(self) :
ur"""Key shared between the RADIUS server and the NetScaler appliance.
Required to allow the NetScaler appliance to communicate with the RADIUS server.<br/>Minimum length = 1.
"""
try :
return self._radkey
except Exception as e:
raise e
@radkey.setter
def radkey(self, radkey) :
ur"""Key shared between the RADIUS server and the NetScaler appliance.
Required to allow the NetScaler appliance to communicate with the RADIUS server.<br/>Minimum length = 1
"""
try :
self._radkey = radkey
except Exception as e:
raise e
@property
def radnasip(self) :
ur"""If enabled, the NetScaler appliance IP address (NSIP) is sent to the RADIUS server as the Network Access Server IP (NASIP) address.
The RADIUS protocol defines the meaning and use of the NASIP address.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._radnasip
except Exception as e:
raise e
@radnasip.setter
def radnasip(self, radnasip) :
ur"""If enabled, the NetScaler appliance IP address (NSIP) is sent to the RADIUS server as the Network Access Server IP (NASIP) address.
The RADIUS protocol defines the meaning and use of the NASIP address.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._radnasip = radnasip
except Exception as e:
raise e
@property
def radnasid(self) :
ur"""If configured, this string is sent to the RADIUS server as the Network Access Server ID (NASID).
"""
try :
return self._radnasid
except Exception as e:
raise e
@radnasid.setter
def radnasid(self, radnasid) :
ur"""If configured, this string is sent to the RADIUS server as the Network Access Server ID (NASID).
"""
try :
self._radnasid = radnasid
except Exception as e:
raise e
@property
def radvendorid(self) :
ur"""RADIUS vendor ID attribute, used for RADIUS group extraction.<br/>Minimum length = 1.
"""
try :
return self._radvendorid
except Exception as e:
raise e
@radvendorid.setter
def radvendorid(self, radvendorid) :
ur"""RADIUS vendor ID attribute, used for RADIUS group extraction.<br/>Minimum length = 1
"""
try :
self._radvendorid = radvendorid
except Exception as e:
raise e
@property
def radattributetype(self) :
ur"""RADIUS attribute type, used for RADIUS group extraction.<br/>Minimum length = 1.
"""
try :
return self._radattributetype
except Exception as e:
raise e
@radattributetype.setter
def radattributetype(self, radattributetype) :
ur"""RADIUS attribute type, used for RADIUS group extraction.<br/>Minimum length = 1
"""
try :
self._radattributetype = radattributetype
except Exception as e:
raise e
@property
def radgroupsprefix(self) :
ur"""RADIUS groups prefix string.
This groups prefix precedes the group names within a RADIUS attribute for RADIUS group extraction.
"""
try :
return self._radgroupsprefix
except Exception as e:
raise e
@radgroupsprefix.setter
def radgroupsprefix(self, radgroupsprefix) :
ur"""RADIUS groups prefix string.
This groups prefix precedes the group names within a RADIUS attribute for RADIUS group extraction.
"""
try :
self._radgroupsprefix = radgroupsprefix
except Exception as e:
raise e
@property
def radgroupseparator(self) :
ur"""RADIUS group separator string
The group separator delimits group names within a RADIUS attribute for RADIUS group extraction.
"""
try :
return self._radgroupseparator
except Exception as e:
raise e
@radgroupseparator.setter
def radgroupseparator(self, radgroupseparator) :
ur"""RADIUS group separator string
The group separator delimits group names within a RADIUS attribute for RADIUS group extraction.
"""
try :
self._radgroupseparator = radgroupseparator
except Exception as e:
raise e
@property
def passencoding(self) :
ur"""Encoding type for passwords in RADIUS packets that the NetScaler appliance sends to the RADIUS server.<br/>Default value: pap<br/>Possible values = pap, chap, mschapv1, mschapv2.
"""
try :
return self._passencoding
except Exception as e:
raise e
@passencoding.setter
def passencoding(self, passencoding) :
ur"""Encoding type for passwords in RADIUS packets that the NetScaler appliance sends to the RADIUS server.<br/>Default value: pap<br/>Possible values = pap, chap, mschapv1, mschapv2
"""
try :
self._passencoding = passencoding
except Exception as e:
raise e
@property
def ipvendorid(self) :
ur"""Vendor ID of the intranet IP attribute in the RADIUS response.
NOTE: A value of 0 indicates that the attribute is not vendor encoded.
"""
try :
return self._ipvendorid
except Exception as e:
raise e
@ipvendorid.setter
def ipvendorid(self, ipvendorid) :
ur"""Vendor ID of the intranet IP attribute in the RADIUS response.
NOTE: A value of 0 indicates that the attribute is not vendor encoded.
"""
try :
self._ipvendorid = ipvendorid
except Exception as e:
raise e
@property
def ipattributetype(self) :
ur"""Remote IP address attribute type in a RADIUS response.<br/>Minimum length = 1.
"""
try :
return self._ipattributetype
except Exception as e:
raise e
@ipattributetype.setter
def ipattributetype(self, ipattributetype) :
ur"""Remote IP address attribute type in a RADIUS response.<br/>Minimum length = 1
"""
try :
self._ipattributetype = ipattributetype
except Exception as e:
raise e
@property
def accounting(self) :
ur"""Whether the RADIUS server is currently accepting accounting messages.<br/>Possible values = ON, OFF.
"""
try :
return self._accounting
except Exception as e:
raise e
@accounting.setter
def accounting(self, accounting) :
ur"""Whether the RADIUS server is currently accepting accounting messages.<br/>Possible values = ON, OFF
"""
try :
self._accounting = accounting
except Exception as e:
raise e
@property
def pwdvendorid(self) :
ur"""Vendor ID of the attribute, in the RADIUS response, used to extract the user password.<br/>Minimum length = 1.
"""
try :
return self._pwdvendorid
except Exception as e:
raise e
@pwdvendorid.setter
def pwdvendorid(self, pwdvendorid) :
ur"""Vendor ID of the attribute, in the RADIUS response, used to extract the user password.<br/>Minimum length = 1
"""
try :
self._pwdvendorid = pwdvendorid
except Exception as e:
raise e
@property
def pwdattributetype(self) :
ur"""Vendor-specific password attribute type in a RADIUS response.<br/>Minimum length = 1.
"""
try :
return self._pwdattributetype
except Exception as e:
raise e
@pwdattributetype.setter
def pwdattributetype(self, pwdattributetype) :
ur"""Vendor-specific password attribute type in a RADIUS response.<br/>Minimum length = 1
"""
try :
self._pwdattributetype = pwdattributetype
except Exception as e:
raise e
@property
def defaultauthenticationgroup(self) :
ur"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64.
"""
try :
return self._defaultauthenticationgroup
except Exception as e:
raise e
@defaultauthenticationgroup.setter
def defaultauthenticationgroup(self, defaultauthenticationgroup) :
ur"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64
"""
try :
self._defaultauthenticationgroup = defaultauthenticationgroup
except Exception as e:
raise e
@property
def callingstationid(self) :
ur"""Send Calling-Station-ID of the client to the RADIUS server. IP Address of the client is sent as its Calling-Station-ID.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._callingstationid
except Exception as e:
raise e
@callingstationid.setter
def callingstationid(self, callingstationid) :
ur"""Send Calling-Station-ID of the client to the RADIUS server. IP Address of the client is sent as its Calling-Station-ID.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._callingstationid = callingstationid
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""IP address.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@property
def success(self) :
try :
return self._success
except Exception as e:
raise e
@property
def failure(self) :
try :
return self._failure
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationradiusaction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationradiusaction
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add authenticationradiusaction.
"""
try :
if type(resource) is not list :
addresource = authenticationradiusaction()
addresource.name = resource.name
addresource.serverip = resource.serverip
addresource.servername = resource.servername
addresource.serverport = resource.serverport
addresource.authtimeout = resource.authtimeout
addresource.radkey = resource.radkey
addresource.radnasip = resource.radnasip
addresource.radnasid = resource.radnasid
addresource.radvendorid = resource.radvendorid
addresource.radattributetype = resource.radattributetype
addresource.radgroupsprefix = resource.radgroupsprefix
addresource.radgroupseparator = resource.radgroupseparator
addresource.passencoding = resource.passencoding
addresource.ipvendorid = resource.ipvendorid
addresource.ipattributetype = resource.ipattributetype
addresource.accounting = resource.accounting
addresource.pwdvendorid = resource.pwdvendorid
addresource.pwdattributetype = resource.pwdattributetype
addresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
addresource.callingstationid = resource.callingstationid
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationradiusaction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].serverip = resource[i].serverip
addresources[i].servername = resource[i].servername
addresources[i].serverport = resource[i].serverport
addresources[i].authtimeout = resource[i].authtimeout
addresources[i].radkey = resource[i].radkey
addresources[i].radnasip = resource[i].radnasip
addresources[i].radnasid = resource[i].radnasid
addresources[i].radvendorid = resource[i].radvendorid
addresources[i].radattributetype = resource[i].radattributetype
addresources[i].radgroupsprefix = resource[i].radgroupsprefix
addresources[i].radgroupseparator = resource[i].radgroupseparator
addresources[i].passencoding = resource[i].passencoding
addresources[i].ipvendorid = resource[i].ipvendorid
addresources[i].ipattributetype = resource[i].ipattributetype
addresources[i].accounting = resource[i].accounting
addresources[i].pwdvendorid = resource[i].pwdvendorid
addresources[i].pwdattributetype = resource[i].pwdattributetype
addresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
addresources[i].callingstationid = resource[i].callingstationid
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete authenticationradiusaction.
"""
try :
if type(resource) is not list :
deleteresource = authenticationradiusaction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationradiusaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationradiusaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update authenticationradiusaction.
"""
try :
if type(resource) is not list :
updateresource = authenticationradiusaction()
updateresource.name = resource.name
updateresource.serverip = resource.serverip
updateresource.servername = resource.servername
updateresource.serverport = resource.serverport
updateresource.authtimeout = resource.authtimeout
updateresource.radkey = resource.radkey
updateresource.radnasip = resource.radnasip
updateresource.radnasid = resource.radnasid
updateresource.radvendorid = resource.radvendorid
updateresource.radattributetype = resource.radattributetype
updateresource.radgroupsprefix = resource.radgroupsprefix
updateresource.radgroupseparator = resource.radgroupseparator
updateresource.passencoding = resource.passencoding
updateresource.ipvendorid = resource.ipvendorid
updateresource.ipattributetype = resource.ipattributetype
updateresource.accounting = resource.accounting
updateresource.pwdvendorid = resource.pwdvendorid
updateresource.pwdattributetype = resource.pwdattributetype
updateresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
updateresource.callingstationid = resource.callingstationid
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationradiusaction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].serverip = resource[i].serverip
updateresources[i].servername = resource[i].servername
updateresources[i].serverport = resource[i].serverport
updateresources[i].authtimeout = resource[i].authtimeout
updateresources[i].radkey = resource[i].radkey
updateresources[i].radnasip = resource[i].radnasip
updateresources[i].radnasid = resource[i].radnasid
updateresources[i].radvendorid = resource[i].radvendorid
updateresources[i].radattributetype = resource[i].radattributetype
updateresources[i].radgroupsprefix = resource[i].radgroupsprefix
updateresources[i].radgroupseparator = resource[i].radgroupseparator
updateresources[i].passencoding = resource[i].passencoding
updateresources[i].ipvendorid = resource[i].ipvendorid
updateresources[i].ipattributetype = resource[i].ipattributetype
updateresources[i].accounting = resource[i].accounting
updateresources[i].pwdvendorid = resource[i].pwdvendorid
updateresources[i].pwdattributetype = resource[i].pwdattributetype
updateresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
updateresources[i].callingstationid = resource[i].callingstationid
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of authenticationradiusaction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = authenticationradiusaction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationradiusaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationradiusaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the authenticationradiusaction resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationradiusaction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationradiusaction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationradiusaction() for _ in range(len(name))]
obj = [authenticationradiusaction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationradiusaction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of authenticationradiusaction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiusaction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the authenticationradiusaction resources configured on NetScaler.
"""
try :
obj = authenticationradiusaction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of authenticationradiusaction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiusaction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Passencoding:
pap = "pap"
chap = "chap"
mschapv1 = "mschapv1"
mschapv2 = "mschapv2"
class Callingstationid:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Accounting:
ON = "ON"
OFF = "OFF"
class Radnasip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class authenticationradiusaction_response(base_response) :
def __init__(self, length=1) :
self.authenticationradiusaction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationradiusaction = [authenticationradiusaction() for _ in range(length)]
| {
"content_hash": "48b5337a5daa2bc93a08913945d09457",
"timestamp": "",
"source": "github",
"line_count": 732,
"max_line_length": 296,
"avg_line_length": 32.83196721311475,
"alnum_prop": 0.715849040902093,
"repo_name": "benfinke/ns_python",
"id": "c7991019deb8cf7c8111cf3390a43f1edc5c872d",
"size": "24647",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationradiusaction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.