text stringlengths 8 6.05M |
|---|
# functional programming ... pure function
# some useful functions in python
from functools import reduce
my_list = [1, 2, 3]
# def multiply_by2(li):
# new_list = []
# for item in li:
# new_list.append(item * 2)
# return new_list
# Using the map function
def multiply_by2(item):
return item * 2
map_result = list(map(multiply_by2, my_list))
print(map_result, '\n')
# using lambda expression to produce the same result
map_result2 = list(map(lambda item: item * 2, my_list))
print(map_result2, '\n')
# the filter function
def check_odd(item):
return item % 2 != 0
filter_result = list(filter(check_odd, my_list))
print(filter_result, '\n')
# using lamda expression
filter_result2 = list(filter(lambda item: item % 2 != 0, my_list))
print(filter_result2, '\n')
# using the zip() function
your_list = [10, 20, 30, 40, 50]
zip_result = list(zip(my_list, your_list))
print(zip_result)
# using reduce()
def accumulator(acc, item):
print(acc, item)
return acc + item
reduce_result = reduce(accumulator, my_list, 0)
print(reduce_result, '\n')
# lambda Expressions
reduce_result2 = reduce(lambda acc, item: acc + item, my_list, 0)
print(reduce_result2)
# lambda param: action(param)
# implementing lambda expressions on the above functions
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 16:15:15 2020
@author: chenhj
"""
from pygal_maps_world.i18n import COUNTRIES
def get_code(country_name):
for code,name in COUNTRIES.items():
if name==country_name:
return code |
wordsfile = open("C:\\Users\\Anna\\Desktop\\Learning Community\\lcpython.txt", "r")
newfile= open("C:\\Users\\Anna\\Desktop\\Learning Community\\output.txt", "w")
with open("C:\\Users\\Anna\\Desktop\\Learning Community\\lcpython.txt", "r") as infile, open("C:\\Users\\Anna\\Desktop\\Learning Community\\output.txt","w") as outfile:
content = infile.read()
words = content.split()
words.reverse()
print(words)
for w in words:
outfile.write(w+ " ")
print(outfile)
|
from gym.envs.registration import register
register(
id='baxter_env-v0',
entry_point='baxter_env.envs:BaxterEnv',
) |
import cv2 as cv
import numpy as np
np.set_printoptions(threshold=np.inf)
import math
import copy
# @class 二值区域信息
# @member label 区域的标记
# @member size 区域像素个数
# @member topLeft 区域外接矩形左上点
# @member bottomRigh 区域外接矩形右下点
class BwArea:
def __init__(self,_label = -1,_size = 0):
self.label = _label
self.size = _size
self.topLeft = [99999999,99999999]
self.bottomRight = [-1,-1]
# @fn 更新区域外接矩形两个角点
# @param _x 输入x坐标
# @param _y 输入y坐标
# @return 返回自己
def updatePos(self,_x,_y):
self.topLeft[0] = min(self.topLeft[0],_x)
self.topLeft[1] = min(self.topLeft[1],_y)
self.bottomRight[0] = max(self.bottomRight[0],_x)
self.bottomRight[1] = max(self.bottomRight[1],_y)
return self
# @fn two-pass法标记联通区域
# @param src 输入二值图像(0 - 255)
# @return 连通区域信息
def twoPass(src):
# 扩充上限以标记更多连通区域
dst = np.zeros(src.shape,dtype = 'uint16')
rows = src.shape[0]
cols = src.shape[1]
MAX_PIXEL_VAL = 255
# two-pass
label = np.uint16(1)
# union-find,下标为label,uf[label]为映射的parent label
uf = list([np.uint16(0)])
for i in range(0,rows):
for j in range(0,cols):
# 该点为目标时
if(src[i][j] == MAX_PIXEL_VAL):
top = 0
left = 0
if(i-1 >= 0):
top = dst[i-1][j]
if(j-1 >= 0):
left = dst[i][j-1]
# 左和上都为无效像素值,赋新label并加入union-find中
if(top == 0 and left == 0):
dst[i][j] = label
uf.append(label)
label += 1
# 左和上都为label时
elif(top > 0 and left > 0):
# 如果label相等,则该点赋左或上的值
if(top == left):
dst[i][j] = top
# 如果label不相等,则该点赋两点最小的label,并更新union-find
else:
minVal = min(top,left)
maxVal = max(top,left)
dst[i][j] = np.uint16(minVal)
uf[maxVal] = np.uint16(minVal)
# 如果左和上只有一个点有效,则该点赋有效点的label
elif(top > 0 or left > 0):
dst[i][j] = max(top,left)
# 更新union-find,使每一个label映射到相应的集合里,同时维护一个uniqueLabel
uniqueLabel = list()
for i in range(1,len(uf)):
if(uf[i] == i):
uniqueLabel.append(uf[i])
continue
mark = uf[i]
# 如果某label映射自己,则该label为root label,否则循环找该label映射parent label并直到找到root label
while(uf[mark] != mark):
mark = uf[mark]
uf[i] = mark
# 创建一个dict,将uniqueLabel映射到以1为起始
mappedLabel = dict()
for i in range(0,len(uniqueLabel)):
mappedLabel[uniqueLabel[i]] = i+1
# 映射
for i in range(1,len(uf)):
uf[i] = mappedLabel[uf[i]]
# 建立area list
areas = list()
for i in range(0,len(mappedLabel)+1):
areas.append(BwArea(i))
# 使单个连通区域内的label一致化,同时更新areas list信息
for i in range(0,rows):
for j in range(0,cols):
if(dst[i][j] > 0):
dst[i][j] = uf[dst[i][j]]
areas[dst[i][j]].size += 1
areas[dst[i][j]] = areas[dst[i][j]].updatePos(i,j)
# 连通区域标记不同颜色
labeledImg = copy.deepcopy(src)
labeledImg = cv.cvtColor(labeledImg,cv.COLOR_GRAY2BGR)
for i in range(0,rows):
for j in range(0,cols):
labeledImg[i][j] = [(dst[i][j]*121)%255,(dst[i][j]*246)%255,(dst[i][j]*336)%255]
# 连通区域标记
for i in range(1,len(areas)):
cv.putText(labeledImg,str(areas[i].label),
(areas[i].bottomRight[1],areas[i].bottomRight[0]),
cv.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1)
cv.imshow("labeledImg",labeledImg)
return areas
input = cv.imread("F://MBR.bmp",cv.IMREAD_GRAYSCALE)
input = cv.threshold(input,20,255,cv.THRESH_BINARY)
input = input[1]
areas = twoPass(input)
cv.imshow("input",input)
cv.waitKey(0)
|
"""
These are common models for multi resource.
"""
from dataclasses import dataclass, field
from typing import Optional, List
from .base import BaseModel
@dataclass
class Thumbnail(BaseModel):
"""
A class representing the thumbnail resource info.
Refer: https://developers.google.com/youtube/v3/docs/channels#snippet.thumbnails.(key).url
"""
url: Optional[str] = field(default=None)
width: Optional[int] = field(default=None, repr=False)
height: Optional[int] = field(default=None, repr=False)
@dataclass
class Thumbnails(BaseModel):
"""
A class representing the multi thumbnail resource info.
Refer: https://developers.google.com/youtube/v3/docs/channels#snippet.thumbnails
"""
default: Optional[Thumbnail] = field(default=None)
medium: Optional[Thumbnail] = field(default=None, repr=False)
high: Optional[Thumbnail] = field(default=None, repr=False)
standard: Optional[Thumbnail] = field(default=None, repr=False)
maxres: Optional[Thumbnail] = field(default=None, repr=False)
@dataclass
class Topic(BaseModel):
"""
A class representing the channel topic info. this model also suitable for video.
Refer:
https://developers.google.com/youtube/v3/docs/channels#topicDetails.topicIds[]
https://developers.google.com/youtube/v3/docs/videos#topicDetails.topicIds[]
This model is customized for parsing topic id. YouTube Data Api not return this.
"""
id: Optional[str] = field(default=None)
description: Optional[str] = field(default=None)
@dataclass
class BaseTopicDetails(BaseModel):
"""
This is the base model for channel or video topic details.
"""
topicIds: List[str] = field(default=None, repr=False)
def get_full_topics(self):
"""
Convert topicIds list to Topic model list
:return: List[Topic]
"""
from pyyoutube import TOPICS
r: List[Topic] = []
if self.topicIds:
for topic_id in self.topicIds:
topic = Topic.from_dict(
{"id": topic_id, "description": TOPICS.get(topic_id)}
)
r.append(topic)
return r
@dataclass
class Localized(BaseModel):
"""
A class representing the channel or video snippet localized info.
Refer:
https://developers.google.com/youtube/v3/docs/channels#snippet.localized
https://developers.google.com/youtube/v3/docs/videos#snippet.localized
"""
title: Optional[str] = field(default=None)
description: Optional[str] = field(default=None, repr=False)
@dataclass
class PageInfo(BaseModel):
"""
This is data model for save paging data.
Note:
totalResults is only an approximation/estimate.
Refer:
https://stackoverflow.com/questions/43507281/totalresults-count-doesnt-match-with-the-actual-results-returned-in-youtube-v3
"""
totalResults: Optional[int] = field(default=None)
resultsPerPage: Optional[int] = field(default=None)
@dataclass
class BaseApiResponse(BaseModel):
"""
This is Data Api response structure when retrieve data.
They both have same response structure, but items.
Refer:
https://developers.google.com/youtube/v3/docs/channels/list#response_1
https://developers.google.com/youtube/v3/docs/playlistItems/list#response_1
"""
kind: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None, repr=False)
nextPageToken: Optional[str] = field(default=None, repr=False)
prevPageToken: Optional[str] = field(default=None, repr=False)
pageInfo: Optional[PageInfo] = field(default=None, repr=False)
@dataclass
class BaseResource(BaseModel):
"""
This is a base model for different resource type.
Refer: https://developers.google.com/youtube/v3/docs#resource-types
"""
kind: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None, repr=False)
id: Optional[str] = field(default=None)
@dataclass
class ResourceId(BaseModel):
"""
A class representing the subscription snippet resource info.
Refer:
1. https://developers.google.com/youtube/v3/docs/playlistItems#snippet.resourceId
2. https://developers.google.com/youtube/v3/docs/subscriptions#snippet.resourceId
3. https://developers.google.com/youtube/v3/docs/activities#contentDetails.social.resourceId
"""
kind: Optional[str] = field(default=None)
videoId: Optional[str] = field(default=None)
channelId: Optional[str] = field(default=None)
playlistId: Optional[str] = field(default=None)
@dataclass
class Player(BaseModel):
"""
A class representing the video,playlist player info.
Refer:
https://developers.google.com/youtube/v3/docs/videos#player
"""
embedHtml: Optional[str] = field(default=None)
# Important:
# follows attributions maybe not exists.
embedHeight: Optional[int] = field(default=None, repr=False)
embedWidth: Optional[int] = field(default=None, repr=False)
|
from ..torch_core import *
from ..data import *
import functools
__all__ = ['Image', 'ImageBBox', 'ImageBase', 'ImageMask', 'RandTransform', 'TfmAffine', 'TfmCoord', 'TfmCrop', 'TfmLighting',
'TfmPixel', 'Transform', 'affine_grid', 'affine_mult', 'apply_perspective', 'apply_tfms', 'brightness', 'compute_zs_mat',
'contrast', 'crop', 'crop_pad', 'dihedral', 'find_coeffs', 'flip_lr', 'get_crop_target', 'get_default_args',
'get_resize_target', 'get_transforms', 'get_zoom_mat', 'grid_sample', 'jitter', 'log_uniform', 'logit', 'logit_', 'pad',
'perspective_warp', 'rand_bool', 'rand_crop', 'rand_int', 'rand_zoom', 'resolve_tfms', 'rotate', 'round_multiple', 'skew',
'squish', 'symmetric_warp', 'tilt', 'uniform', 'uniform_int', 'zoom', 'zoom_crop', 'zoom_squish']
def logit(x:Tensor)->Tensor: return -(1/x-1).log()
def logit_(x:Tensor)->Tensor: return (x.reciprocal_().sub_(1)).log_().neg_()
def uniform(low:Number, high:Number, size:List[int]=None)->FloatOrTensor:
"Draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`"
return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high)
def log_uniform(low, high, size=None)->FloatOrTensor:
"Draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)"
res = uniform(log(low), log(high), size)
return exp(res) if size is None else res.exp_()
def rand_bool(p:float, size=None)->BoolOrTensor:
"Draw 1 or shape=`size` random booleans (True occuring probability p)"
return uniform(0,1,size)<p
def uniform_int(low:Number, high:Number, size:Optional[List[int]]=None)->FloatOrTensor:
"Generate int or tensor `size` of ints from uniform(`low`,`high`)"
return random.randint(low,high) if size is None else torch.randint(low,high,size)
def get_default_args(func:Callable):
return {k: v.default
for k, v in inspect.signature(func).parameters.items()
if v.default is not inspect.Parameter.empty}
class ImageBase(ItemBase):
"Img based `Dataset` items derive from this. Subclass to handle lighting, pixel, etc"
def lighting(self, func:LightingFunc, *args, **kwargs)->'ImageBase': return self
def pixel(self, func:PixelFunc, *args, **kwargs)->'ImageBase': return self
def coord(self, func:CoordFunc, *args, **kwargs)->'ImageBase': return self
def affine(self, func:AffineFunc, *args, **kwargs)->'ImageBase': return self
def set_sample(self, **kwargs)->'ImageBase':
"Set parameters that control how we `grid_sample` the image after transforms are applied"
self.sample_kwargs = kwargs
return self
def clone(self)->'ImageBase':
"Clones this item and its `data`"
return self.__class__(self.data.clone())
class Image(ImageBase):
"Supports appying transforms to image data"
def __init__(self, px)->'Image':
"create from raw tensor image data `px`"
self._px = px
self._logit_px=None
self._flow=None
self._affine_mat=None
self.sample_kwargs = {}
@property
def shape(self)->Tuple[int,int,int]:
"Returns (ch, h, w) for this image"
return self._px.shape
@property
def size(self)->Tuple[int,int]:
"Returns (h, w) for this image"
return self.shape[-2:]
@property
def device(self)->torch.device: return self._px.device
def __repr__(self): return f'{self.__class__.__name__} ({self.shape})'
def refresh(self)->None:
"Applies any logit or affine transfers that have been "
if self._logit_px is not None:
self._px = self._logit_px.sigmoid_()
self._logit_px = None
if self._affine_mat is not None or self._flow is not None:
self._px = grid_sample(self._px, self.flow, **self.sample_kwargs)
self.sample_kwargs = {}
self._flow = None
return self
@property
def px(self)->TensorImage:
"Get the tensor pixel buffer"
self.refresh()
return self._px
@px.setter
def px(self,v:TensorImage)->None:
"Set the pixel buffer to `v`"
self._px=v
@property
def flow(self)->FlowField:
"Access the flow-field grid after applying queued affine transforms"
if self._flow is None:
self._flow = affine_grid(self.shape)
if self._affine_mat is not None:
self._flow = affine_mult(self._flow,self._affine_mat)
self._affine_mat = None
return self._flow
@flow.setter
def flow(self,v:FlowField): self._flow=v
def lighting(self, func:LightingFunc, *args:Any, **kwargs:Any)->'Image':
"Equivalent to `image = sigmoid(func(logit(image)))`"
self.logit_px = func(self.logit_px, *args, **kwargs)
return self
def pixel(self, func:PixelFunc, *args, **kwargs)->'Image':
"Equivalent to `image.px = func(image.px)`"
self.px = func(self.px, *args, **kwargs)
return self
def coord(self, func:CoordFunc, *args, **kwargs)->'Image':
"Equivalent to `image.flow = func(image.flow, image.size)`"
self.flow = func(self.flow, self.shape, *args, **kwargs)
return self
def affine(self, func:AffineFunc, *args, **kwargs)->'Image':
"Equivalent to `image.affine_mat = image.affine_mat @ func()`"
m = tensor(func(*args, **kwargs)).to(self.device)
self.affine_mat = self.affine_mat @ m
return self
def resize(self, size:Union[int,TensorImageSize])->'Image':
"Resize the image to `size`, size can be a single int"
assert self._flow is None
if isinstance(size, int): size=(self.shape[0], size, size)
self.flow = affine_grid(size)
return self
@property
def affine_mat(self)->AffineMatrix:
"Get the affine matrix that will be applied by `refresh`"
if self._affine_mat is None:
self._affine_mat = torch.eye(3).to(self.device)
return self._affine_mat
@affine_mat.setter
def affine_mat(self,v)->None: self._affine_mat=v
@property
def logit_px(self)->LogitTensorImage:
"Get logit(image.px)"
if self._logit_px is None: self._logit_px = logit_(self.px)
return self._logit_px
@logit_px.setter
def logit_px(self,v:LogitTensorImage)->None: self._logit_px=v
def show(self, ax:plt.Axes=None, **kwargs:Any)->None:
"Plots the image into `ax`"
show_image(self.px, ax=ax, **kwargs)
@property
def data(self)->TensorImage:
"Returns this images pixels as a tensor"
return self.px
class ImageMask(Image):
"Class for image segmentation target"
def lighting(self, func:LightingFunc, *args:Any, **kwargs:Any)->'Image': return self
def refresh(self):
self.sample_kwargs['mode'] = 'nearest'
return super().refresh()
class ImageBBox(ImageMask):
"Image class for bbox-style annotations"
def clone(self):
return self.__class__(self.px.clone())
@classmethod
def create(cls, bboxes:Collection[Collection[int]], h:int, w:int) -> 'ImageBBox':
"Creates an ImageBBox object from bboxes"
pxls = torch.zeros(len(bboxes),h, w).long()
for i,bbox in enumerate(bboxes):
pxls[i,bbox[0]:bbox[2]+1,bbox[1]:bbox[3]+1] = 1
return cls(pxls)
@property
def data(self) -> LongTensor:
bboxes = []
for i in range(self.px.size(0)):
idxs = torch.nonzero(self.px[i])
if len(idxs) != 0:
bboxes.append(torch.tensor([idxs[:,0].min(), idxs[:,1].min(), idxs[:,0].max(), idxs[:,1].max()])[None])
return torch.cat(bboxes, 0).squeeze()
class Transform():
"Utility class for adding probability and wrapping support to transform funcs"
_wrap=None
order=0
def __init__(self, func:Callable, order:Optional[int]=None)->None:
"Create a transform for `func` and assign it an priority `order`, attach to Image class"
if order is not None: self.order=order
self.func=func
functools.update_wrapper(self, self.func)
self.params = copy(func.__annotations__)
self.def_args = get_default_args(func)
setattr(Image, func.__name__,
lambda x, *args, **kwargs: self.calc(x, *args, **kwargs))
def __call__(self, *args:Any, p:float=1., is_random:bool=True, **kwargs:Any)->Image:
"Calc now if `args` passed; else create a transform called prob `p` if `random`"
if args: return self.calc(*args, **kwargs)
else: return RandTransform(self, kwargs=kwargs, is_random=is_random, p=p)
def calc(self, x:Image, *args:Any, **kwargs:Any)->Image:
"Apply to image `x`, wrapping it if necessary"
if self._wrap: return getattr(x, self._wrap)(self.func, *args, **kwargs)
else: return self.func(x, *args, **kwargs)
@property
def name(self)->str: return self.__class__.__name__
def __repr__(self)->str: return f'{self.name} ({self.func.__name__})'
TfmList = Union[Transform, Collection[Transform]]
Tfms = Optional[TfmList]
class TfmLighting(Transform): order,_wrap = 8,'lighting'
#"decorator for lighting transforms"
@dataclass
class RandTransform():
"Wraps `Transform` to add randomized execution"
tfm:Transform
kwargs:dict
p:int=1.0
resolved:dict = field(default_factory=dict)
do_run:bool = True
is_random:bool = True
def __post_init__(self): functools.update_wrapper(self, self.tfm)
def resolve(self)->None:
"Bind any random variables needed tfm calc"
if not self.is_random:
self.resolved = {**self.tfm.def_args, **self.kwargs}
return
self.resolved = {}
# for each param passed to tfm...
for k,v in self.kwargs.items():
# ...if it's annotated, call that fn...
if k in self.tfm.params:
rand_func = self.tfm.params[k]
self.resolved[k] = rand_func(*listify(v))
# ...otherwise use the value directly
else: self.resolved[k] = v
# use defaults for any args not filled in yet
for k,v in self.tfm.def_args.items():
if k not in self.resolved: self.resolved[k]=v
# anything left over must be callable without params
for k,v in self.tfm.params.items():
if k not in self.resolved: self.resolved[k]=v()
self.do_run = rand_bool(self.p)
@property
def order(self)->int: return self.tfm.order
def __call__(self, x:Image, *args, **kwargs)->Image:
"Randomly execute our tfm on `x`"
return self.tfm(x, *args, **{**self.resolved, **kwargs}) if self.do_run else x
@TfmLighting
def brightness(x, change:uniform):
"`change` brightness of image `x`"
return x.add_(scipy.special.logit(change))
@TfmLighting
def contrast(x, scale:log_uniform):
"`scale` contrast of image `x`"
return x.mul_(scale)
def resolve_tfms(tfms:TfmList):
"Resolve every tfm in `tfms`"
for f in listify(tfms): f.resolve()
def apply_tfms(tfms:TfmList, x:Image, do_resolve:bool=True):
"Apply all the `tfms` to `x`, if `do_resolve` refresh all the random args"
if not tfms: return x
tfms = listify(tfms)
if do_resolve: resolve_tfms(tfms)
x = x.clone()
for tfm in tfms: x = tfm(x)
return x
def grid_sample(x:TensorImage, coords:FlowField, mode:str='bilinear', padding_mode:str='reflect')->TensorImage:
"Grab pixels in `coords` from `input` sampling by `mode`. pad is reflect or zeros."
if padding_mode=='reflect': padding_mode='reflection'
return F.grid_sample(x[None], coords, mode=mode, padding_mode=padding_mode)[0]
def affine_grid(size:TensorImageSize)->FlowField:
size = ((1,)+size)
N, C, H, W = size
grid = FloatTensor(N, H, W, 2)
linear_points = torch.linspace(-1, 1, W) if W > 1 else tensor([-1])
grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(grid[:, :, :, 0])
linear_points = torch.linspace(-1, 1, H) if H > 1 else tensor([-1])
grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(grid[:, :, :, 1])
return grid
def affine_mult(c:FlowField,m:AffineMatrix)->FlowField:
"Multiply `c` by `m` - can adjust for rectangular shaped `c`"
if m is None: return c
size = c.size()
_,h,w,_ = size
m[0,1] *= h/w
m[1,0] *= w/h
c = c.view(-1,2)
c = torch.addmm(m[:2,2], c, m[:2,:2].t())
return c.view(size)
class TfmAffine(Transform):
"Wraps affine tfm funcs"
order,_wrap = 5,'affine'
class TfmPixel(Transform):
"Wraps pixel tfm funcs"
order,_wrap = 10,'pixel'
@TfmAffine
def rotate(degrees:uniform):
"Affine func that rotates the image"
angle = degrees * math.pi / 180
return [[cos(angle), -sin(angle), 0.],
[sin(angle), cos(angle), 0.],
[0. , 0. , 1.]]
def get_zoom_mat(sw:float, sh:float, c:float, r:float)->AffineMatrix:
"`sw`,`sh` scale width,height - `c`,`r` focus col,row"
return [[sw, 0, c],
[0, sh, r],
[0, 0, 1.]]
@TfmAffine
def zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):
"Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom"
s = 1-1/scale
col_c = s * (2*col_pct - 1)
row_c = s * (2*row_pct - 1)
return get_zoom_mat(1/scale, 1/scale, col_c, row_c)
@TfmAffine
def squish(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):
"Squish image by `scale`. `row_pct`,`col_pct` select focal point of zoom"
if scale <= 1:
col_c = (1-scale) * (2*col_pct - 1)
return get_zoom_mat(scale, 1, col_c, 0.)
else:
row_c = (1-1/scale) * (2*row_pct - 1)
return get_zoom_mat(1, 1/scale, 0., row_c)
class TfmCoord(Transform): order,_wrap = 4,'coord'
@TfmCoord
def jitter(c, size, magnitude:uniform):
return c.add_((torch.rand_like(c)-0.5)*magnitude*2)
@TfmPixel
def flip_lr(x): return x.flip(2)
@TfmPixel
def dihedral(x, k:partial(uniform_int,0,8)):
"Randomly flip `x` image based on k"
flips=[]
if k&1: flips.append(1)
if k&2: flips.append(2)
if flips: x = torch.flip(x,flips)
if k&4: x = x.transpose(1,2)
return x.contiguous()
@partial(TfmPixel, order=-10)
def pad(x, padding, mode='reflect'):
"Pad `x` with `padding` pixels. `mode` fills in space ('reflect','zeros',etc)"
return F.pad(x[None], (padding,)*4, mode=mode)[0]
@TfmPixel
def crop(x, size, row_pct:uniform=0.5, col_pct:uniform=0.5):
"Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop"
size = listify(size,2)
rows,cols = size
row = int((x.size(1)-rows+1) * row_pct)
col = int((x.size(2)-cols+1) * col_pct)
return x[:, row:row+rows, col:col+cols].contiguous()
class TfmCrop(TfmPixel): order=99
@TfmCrop
def crop_pad(x, size, padding_mode='reflect',
row_pct:uniform = 0.5, col_pct:uniform = 0.5):
"Crop and pad tfm - `row_pct`,`col_pct` sets focal point"
if padding_mode=='zeros': padding_mode='constant'
size = listify(size,2)
if x.shape[1:] == size: return x
rows,cols = size
if x.size(1)<rows or x.size(2)<cols:
row_pad = max((rows-x.size(1)+1)//2, 0)
col_pad = max((cols-x.size(2)+1)//2, 0)
x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0]
row = int((x.size(1)-rows+1)*row_pct)
col = int((x.size(2)-cols+1)*col_pct)
x = x[:, row:row+rows, col:col+cols]
return x.contiguous() # without this, get NaN later - don't know why
def round_multiple(x:int, mult:int)->int:
"Calc `x` to nearest multiple of `mult`"
return (int(x/mult+0.5)*mult)
def get_crop_target(target_px:Union[int,Tuple[int,int]], mult:int=32)->Tuple[int,int]:
"Calc crop shape of `target_px` to nearest multiple of `mult`"
target_r,target_c = listify(target_px, 2)
return round_multiple(target_r,mult),round_multiple(target_c,mult)
def get_resize_target(img, crop_target, do_crop=False)->TensorImageSize:
"Calc size of `img` to fit in `crop_target` - adjust based on `do_crop`"
if crop_target is None: return None
ch,r,c = img.shape
target_r,target_c = crop_target
ratio = (min if do_crop else max)(r/target_r, c/target_c)
return ch,round(r/ratio),round(c/ratio)
def apply_tfms(tfms:TfmList, x:TensorImage, do_resolve:bool=True,
xtra:Optional[Dict[Transform,dict]]=None, size:Optional[Union[int,TensorImageSize]]=None,
mult:int=32, do_crop:bool=True, padding_mode:str='reflect', **kwargs:Any)->TensorImage:
"Apply all `tfms` to `x` - `do_resolve`: bind random args - size,mult used to crop/pad"
if tfms or xtra or size:
if not xtra: xtra={}
tfms = sorted(listify(tfms), key=lambda o: o.tfm.order)
if do_resolve: resolve_tfms(tfms)
x = x.clone()
x.set_sample(padding_mode=padding_mode, **kwargs)
if size:
crop_target = get_crop_target(size, mult=mult)
target = get_resize_target(x, crop_target, do_crop=do_crop)
x.resize(target)
size_tfms = [o for o in tfms if isinstance(o.tfm,TfmCrop)]
for tfm in tfms:
if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm])
elif tfm in size_tfms: x = tfm(x, size=size, padding_mode=padding_mode)
else: x = tfm(x)
return x
def rand_zoom(*args, **kwargs):
"Random zoom tfm"
return zoom(*args, row_pct=(0,1), col_pct=(0,1), **kwargs)
def rand_crop(*args, **kwargs):
"Random crop and pad"
return crop_pad(*args, row_pct=(0,1), col_pct=(0,1), **kwargs)
def zoom_crop(scale, do_rand=False, p=1.0):
"Randomly zoom and/or crop"
zoom_fn = rand_zoom if do_rand else zoom
crop_fn = rand_crop if do_rand else crop_pad
return [zoom_fn(scale=scale, p=p), crop_fn()]
def find_coeffs(orig_pts:Points, targ_pts:Points)->Tensor:
"Find 8 coeff mentioned [here](https://web.archive.org/web/20150222120106/xenia.media.mit.edu/~cwren/interpolator/)"
matrix = []
#The equations we'll need to solve.
for p1, p2 in zip(targ_pts, orig_pts):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]])
A = FloatTensor(matrix)
B = FloatTensor(orig_pts).view(8)
#The 8 scalars we seek are solution of AX = B
return torch.gesv(B,A)[0][:,0]
def apply_perspective(coords:FlowField, coeffs:Points)->FlowField:
"Transform `coords` with `coeffs`"
size = coords.size()
#compress all the dims expect the last one ang adds ones, coords become N * 3
coords = coords.view(-1,2)
#Transform the coeffs in a 3*3 matrix with a 1 at the bottom left
coeffs = torch.cat([coeffs, FloatTensor([1])]).view(3,3)
coords = torch.addmm(coeffs[:,2], coords, coeffs[:,:2].t())
coords.mul_(1/coords[:,2].unsqueeze(1))
return coords[:,:2].view(size)
_orig_pts = [[-1,-1], [-1,1], [1,-1], [1,1]]
def _perspective_warp(c:FlowField, targ_pts:Points):
"Apply warp to `targ_pts` from `_orig_pts` to `c` `FlowField`"
return apply_perspective(c, find_coeffs(_orig_pts, targ_pts))
@TfmCoord
def perspective_warp(c, img_size, magnitude:partial(uniform,size=8)=0):
"Apply warp to `c` and with size `img_size` with `magnitude` amount"
magnitude = magnitude.view(4,2)
targ_pts = [[x+m for x,m in zip(xs, ms)] for xs, ms in zip(_orig_pts, magnitude)]
return _perspective_warp(c, targ_pts)
@TfmCoord
def symmetric_warp(c, img_size, magnitude:partial(uniform,size=4)=0):
"Apply warp to `c` with size `img_size` and `magnitude` amount"
m = listify(magnitude, 4)
targ_pts = [[-1-m[3],-1-m[1]], [-1-m[2],1+m[1]], [1+m[3],-1-m[0]], [1+m[2],1+m[0]]]
return _perspective_warp(c, targ_pts)
def rand_int(low:int,high:int)->int: return random.randint(low, high)
@TfmCoord
def tilt(c, img_size, direction:rand_int, magnitude:uniform=0):
"Tilt `c` field and resize to`img_size` with random `direction` and `magnitude`"
orig_pts = [[-1,-1], [-1,1], [1,-1], [1,1]]
if direction == 0: targ_pts = [[-1,-1], [-1,1], [1,-1-magnitude], [1,1+magnitude]]
elif direction == 1: targ_pts = [[-1,-1-magnitude], [-1,1+magnitude], [1,-1], [1,1]]
elif direction == 2: targ_pts = [[-1,-1], [-1-magnitude,1], [1,-1], [1+magnitude,1]]
elif direction == 3: targ_pts = [[-1-magnitude,-1], [-1,1], [1+magnitude,-1], [1,1]]
coeffs = find_coeffs(orig_pts, targ_pts)
return apply_perspective(c, coeffs)
@TfmCoord
def skew(c, img_size, direction:rand_int, magnitude:uniform=0):
"Skew `c` field and resize to`img_size` with random `direction` and `magnitude`"
orig_pts = [[-1,-1], [-1,1], [1,-1], [1,1]]
if direction == 0: targ_pts = [[-1-magnitude,-1], [-1,1], [1,-1], [1,1]]
elif direction == 1: targ_pts = [[-1,-1-magnitude], [-1,1], [1,-1], [1,1]]
elif direction == 2: targ_pts = [[-1,-1], [-1-magnitude,1], [1,-1], [1,1]]
elif direction == 3: targ_pts = [[-1,-1], [-1,1+magnitude], [1,-1], [1,1]]
elif direction == 4: targ_pts = [[-1,-1], [-1,1], [1+magnitude,-1], [1,1]]
elif direction == 5: targ_pts = [[-1,-1], [-1,1], [1,-1-magnitude], [1,1]]
elif direction == 6: targ_pts = [[-1,-1], [-1,1], [1,-1], [1+magnitude,1]]
elif direction == 7: targ_pts = [[-1,-1], [-1,1], [1,-1], [1,1+magnitude]]
coeffs = find_coeffs(orig_pts, targ_pts)
return apply_perspective(c, coeffs)
def get_transforms(do_flip:bool=True, flip_vert:bool=False, max_rotate:float=10., max_zoom:float=1.1,
max_lighting:float=0.2, max_warp:float=0.2, p_affine:float=0.75,
p_lighting:float=0.75, xtra_tfms:float=None)->Collection[Transform]:
"Utility func to easily create list of `flip`, `rotate`, `zoom`, `warp`, `lighting` transforms"
res = [rand_crop()]
if do_flip: res.append(dihedral() if flip_vert else flip_lr(p=0.5))
if max_warp: res.append(symmetric_warp(magnitude=(-max_warp,max_warp), p=p_affine))
if max_rotate: res.append(rotate(degrees=(-max_rotate,max_rotate), p=p_affine))
if max_zoom>1: res.append(rand_zoom(scale=(1.,max_zoom), p=p_affine))
if max_lighting:
res.append(brightness(change=(0.5*(1-max_lighting), 0.5*(1+max_lighting)), p=p_lighting))
res.append(contrast(scale=(1-max_lighting, 1/(1-max_lighting)), p=p_lighting))
# train , valid
return (res + listify(xtra_tfms), [crop_pad()])
#To keep?
def compute_zs_mat(sz:TensorImageSize, scale:float, squish:float,
invert:bool, row_pct:float, col_pct:float)->AffineMatrix:
"Utility routine to compute zoom/squish matrix"
orig_ratio = math.sqrt(sz[2]/sz[1])
for s,r,i in zip(scale,squish, invert):
s,r = math.sqrt(s),math.sqrt(r)
if s * r <= 1 and s / r <= 1: #Test if we are completely inside the picture
w,h = (s/r, s*r) if i else (s*r,s/r)
w /= orig_ratio
h *= orig_ratio
col_c = (1-w) * (2*col_pct - 1)
row_c = (1-h) * (2*row_pct - 1)
return get_zoom_mat(w, h, col_c, row_c)
#Fallback, hack to emulate a center crop without cropping anything yet.
if orig_ratio > 1: return get_zoom_mat(1/orig_ratio**2, 1, 0, 0.)
else: return get_zoom_mat(1, orig_ratio**2, 0, 0.)
@TfmCoord
def zoom_squish(c, size, scale:uniform=1.0, squish:uniform=1.0, invert:rand_bool=False,
row_pct:uniform=0.5, col_pct:uniform=0.5):
#This is intended for scale, squish and invert to be of size 10 (or whatever) so that the transform
#can try a few zoom/squishes before falling back to center crop (like torchvision.RandomResizedCrop)
m = compute_zs_mat(size, scale, squish, invert, row_pct, col_pct)
return affine_mult(c, FloatTensor(m))
|
import argparse
import os
import logging
import sys
import time
import gzip
import pickle
from shutil import copy
from math import factorial
import yaml
import numpy as np
import tensorflow as tf
from lm.stitch import init_lm_checkpoints
from cos.tfidf import get_idf_vector
from model import get_model
from utils import get_vocabs, get_embeddings, bad_words, unk_idx
from neighbor_function import get_extractive_initial_states
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, default='test/input.txt')
parser.add_argument('--config', type=str, default='configs/default.yaml')
parser.add_argument('--output_dir', type=str, default='outputs/default')
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--from_line', type=int, default=0)
parser.add_argument('--to_line', type=int, default=-1)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
log_dir = os.path.join(args.output_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, 'log_{}_{}.txt'.format(args.from_line, args.to_line))
file_handler = logging.FileHandler(log_file)
logging.getLogger().addHandler(file_handler)
if args.to_line == -1:
args.to_line = sum(1 for _ in open(args.input_file))
for k, v in vars(args).items():
logging.info('{}: {}'.format(k, v))
config = yaml.safe_load(open(args.config))
copy(args.config, os.path.join(args.output_dir, 'config.yaml'))
copy(config['vocab_file'], os.path.join(args.output_dir, 'vocab.txt'))
if config['mode'] not in ['extractive', 'extractive_norm']:
raise ValueError('mode not supported: {}'.format(config['mode']))
run(args, config)
def run(args, config):
time_start = time.time()
print('time_start: {}'.format(str(time_start)))
sess, model_inputs, model_outputs, embeddings, word2idx, idx2word = build_graph(config)
lines = get_lines(args)
all_max_scores = list()
for line_id, sentence in lines.items():
logging.info('line id: {}'.format(line_id))
line_output_dir = os.path.join(args.output_dir, 'lines', str(line_id))
os.makedirs(line_output_dir, exist_ok=True)
all_outputs = list()
line_time_start = time.time()
for batch_id, batch in enumerate(get_batches(sentence, line_id, args.batch_size, args, config, word2idx)):
x_sentence, sentence_length, initial_state, summary_length, num_steps = batch
logging.info('batch id: {}'.format(batch_id))
print(x_sentence)
feed_dict = {
model_inputs['sentence']: x_sentence,
model_inputs['sentence_length']: sentence_length,
model_inputs['initial_state']: initial_state.state,
model_inputs['initial_internal_state']: initial_state.internal_state,
model_inputs['summary_length']: summary_length,
model_inputs['num_steps']: num_steps
}
outputs = sess.run(model_outputs, feed_dict=feed_dict)
all_outputs.append(outputs)
states = np.concatenate([o['states'] for o in all_outputs], axis=1)
scores = np.concatenate([o['scores'] for o in all_outputs], axis=1)
max_idx = np.unravel_index(np.argmax(scores), scores.shape)
max_score = scores[max_idx]
all_max_scores.append(max_score)
max_state = states[max_idx]
logging.info('max_score: {}'.format(max_score))
logging.info('max_state: {}'.format(max_state))
logging.info('summary: {}'.format(' '.join([idx2word[idx] for idx in max_state])))
logging.info('run_time: {}'.format(int(time.time()-line_time_start)))
outputs = dict(states=states,
scores=scores,
sentence=sentence,
line_id=line_id,
time=time.time()-line_time_start)
with gzip.open(os.path.join(line_output_dir, 'outputs.pickle.gzip'), 'wb') as f:
pickle.dump(outputs, f)
logging.info('')
logging.info('avg max score: {}'.format(np.mean(all_max_scores)))
logging.info('end: {}'.format(str(time.time())))
logging.info('runtime: {}'.format(str(time.time() - time_start)))
def build_graph(config):
word2idx, idx2word = get_vocabs(config['vocab_file'])
embeddings = get_embeddings(word2idx, config['s2v_file'])
weights = config.get('weights', [1 for _ in config['metrics']])
assert len(config['metrics']) == len(weights)
metrics = {m: {'weight': w} for m, w in zip(config['metrics'], weights)}
if 'lm' in metrics:
metrics['lm'].update(dict(forward=config['lm_save_dir'],
reverse=config.get('lm_rev_save_dir', None),
num_words=len(word2idx)))
if 'cos' in metrics:
idf_file = config.get('idf_file', None)
if idf_file is not None:
metrics['cos'].update(dict(idf=get_idf_vector(idf_file, word2idx), embeddings=embeddings))
else:
metrics['cos'].update(dict(embeddings=embeddings))
sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=sess_config)
model_inputs, model_outputs = get_model(metrics, mode=config['mode'])
if 'lm' in metrics:
init_lm_checkpoints(metrics['lm'])
sess.run(tf.global_variables_initializer())
return sess, model_inputs, model_outputs, embeddings, word2idx, idx2word
def get_lines(args):
lines = dict()
for line_id, line in enumerate(open(args.input_file)):
if line_id in range(args.from_line, args.to_line):
sentence = line.split()
lines[line_id] = sentence
return lines
def get_summary_length(config, sentence_length):
summary_length_target = config.get('summary_length', 8)
if type(summary_length_target) == str and summary_length_target.endswith('p'):
summary_percent = int(config['summary_length'][:-1]) / 100.0
summary_length_target = max(1, int(round(sentence_length * summary_percent)))
return np.asarray(summary_length_target, dtype=np.int32)
def get_batches(sentence, line_id, batch_size, args, config, word2idx):
orig_sentence_length = len(sentence)
sentence = [w for w in sentence if w not in bad_words]
x_sentence = np.asarray([word2idx.get(w, unk_idx) for w in sentence], dtype=np.int32)
sentence_length = np.asarray(len(sentence), dtype=np.int32)
summary_length = get_summary_length(config, orig_sentence_length)
logging.info('sentence: {}'.format(' '.join(sentence)))
logging.info('sentence_length: {}'.format(sentence_length))
logging.info('summary_length: {}'.format(summary_length))
num_steps = max(1, int((sentence_length * summary_length**2) * config.get('steps_factor', 0.1)))
num_restarts = max(1, int((sentence_length * summary_length**2) * config.get('restarts_factor', 0.01)))
#num_steps = min(800, num_steps)
#num_restarts = min(300, num_restarts)
#batch_size = int(batch_size * 11 / summary_length)
num_evaluations = num_steps * num_restarts
logging.info('number of evaluations: {}'.format(num_evaluations))
logging.info('number of restarts: {}'.format(num_restarts))
logging.info('number of steps: {}'.format(num_steps))
if config['mode'] == 'extractive':
num_exhaustive = int(factorial(sentence_length) / factorial(summary_length) / factorial(max(1, sentence_length - summary_length)))
exhaustive = num_evaluations > num_exhaustive and config.get('allow_exhaustive', False)
if exhaustive:
logging.info('roughly number of exhaustive evaluations: {}'.format(num_exhaustive))
for initial_state in get_extractive_initial_states(num_restarts,
batch_size,
x_sentence,
summary_length,
exhaustive=exhaustive):
if exhaustive:
num_steps = 0
yield x_sentence, sentence_length, initial_state, summary_length, num_steps
if __name__ == '__main__':
main()
|
import unittest
from katas.kyu_8.ones_and_zeros import binary_array_to_number
class BinaryArrayToNumberTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(binary_array_to_number([0, 0, 0, 1]), 1)
def test_equal_2(self):
self.assertEqual(binary_array_to_number([0, 0, 1, 0]), 2)
def test_equal_3(self):
self.assertEqual(binary_array_to_number([1, 1, 1, 1]), 15)
def test_equal_4(self):
self.assertEqual(binary_array_to_number([0, 1, 1, 0]), 6)
|
# 全局配置
import xadmin
from .models import EmailVerifyRecord
from xadmin import views
from .models import Signon
"""
使用Xadmin的主题功能。
把全站的配置放在users\admin.py中:
绑定之后,后台可以选择自己喜欢的主题
"""
# 创建xadmin的最基本管理器配置,并与view绑定
class BaseSetting(object):
use_bootswatch = True
class SignonAdmin(object):
list_display = ['username','password']
search_fields = ['username']
list_filter = ['username']
# 全局修改,固定写法
class GlobalSettings(object):
# 修改title
site_title = 'CareFree后台管理系统'
# 修改footer
site_footer = 'Copyright © 2018 CareFree Systems Incorporated. All rights reserved. 开发成员:陈志轩,廖智勇,王鹭星.张恺庭,黄凯'
# 收起菜单
menu_style = 'accordion'
# xadmin中这里是继承object,不再是继承admin
class EmailVerifyRecordAdmin(object):
# 显示的列
list_display = ['code', 'email', 'send_type', 'send_time']
# 搜索的字段
search_fields = ['code']
# 过滤
list_filter = ['code', 'email', 'send_type', 'send_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
# 将基本配置管理与view绑定
xadmin.site.register(views.BaseAdminView, BaseSetting)
# 将title和footer信息进行注册
xadmin.site.register(views.CommAdminView, GlobalSettings)
xadmin.site.register(Signon,SignonAdmin)
|
#Dcion[arios - soma
dicionario = {'n1': 8.3, 'n2': 7.2}
#1 calcular soma
soma = 0
for elemento in dicionario.values():
soma = soma + elemento
print(soma)
#2 - Calcular média
media = soma / len(dicionario)
print(media)
|
def digitize(n):
return map(int, str(n))
|
from avaliacao import *
from simulado import *
from template_avaliacao import * |
import serial
import usb.core
import usb.util
# Follow this: https://www.youtube.com/watch?v=xH_y05pIDTo
out = serial.Serial(port='/dev/ttyAMA0', baudrate=115200, timeout=1)
prePack = Queue()
incoming = Queue()
# Python script to handle data from RPi to microbit and back again
# To do:
"""
Function to read from USB (specific USB port)
Function to write to USB (specific port)
Function to package data correctly for USB
Main() function - Should act as an API
"""
# Queue class taken from:
# http://interactivepython.org/courselib/static/pythonds/BasicDS/ImplementingaQueueinPython.html
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def to_list(self):
with self.mutex:
return list(self.queue)
# Function to send data over USB ()
def send(data):
# serial must send data as strings
while True:
if isinstance(data, str):
continue
else:
# Convert data to string
data = str(data)
# add data to queue
prePack.enqueue(data)
# Currently data is stored like so: prePack[data1, data2, data3[data3a, data3b, data3c]]
# Use something similar to: "'{0}F{1}S{2}T'.format(first_data, second_data, third_data)" to package data?
# Some consideration for packet contents and flags must be given so as to successfully
# decide between which pieces of information to handle with USB receive
# This is outside of loop so it gets sent
# out.write(someVarStoringData)
if prePack.isEmpty() == False:
for x in prePack:
out.write(x)
# Function to receive data over USB (New blocks)
def recieve(data):
# Think about how data is going to be sent via USB (probs as strings)
while True:
incoming.enqueue(str(out.readline()))
# split data if needed else return as a string
# Must split extra data?
# Use following to test port numbers:
# python -m serial.tools.list_ports
# Use this to access port in terminal for testing:
# python -m serial.tools.miniterm <port_name>
# Reading for USB:
# http://www.brainboxes.com/faq/items/what-is-rts--cts-hardware-flow-control-
# https://pyserial.readthedocs.io/en/latest/pyserial_api.html
|
"""Internal library."""
from io import BytesIO
import os
import struct
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from urllib.parse import quote
from django.conf import settings
from django.utils.encoding import force_bytes, smart_bytes
from django.utils.translation import gettext as _
from modoboa.lib.exceptions import InternalError
from modoboa.parameters import tools as param_tools
def init_storage_dir():
"""Create the directory whare documents will be stored."""
storage_dir = param_tools.get_global_parameter("storage_dir")
if os.path.exists(storage_dir) and os.access(storage_dir, os.W_OK):
return
elif not os.access(storage_dir, os.W_OK) and os.path.exists(storage_dir):
raise InternalError(
_("Can't write to selected directory to store PDF "
"documents")
)
try:
os.mkdir(storage_dir)
except (OSError, IOError) as inst:
raise InternalError(
_("Failed to create the directory that will contain "
"PDF documents (%s)") % inst
)
def get_creds_filename(account):
"""Return the full path of a document."""
storage_dir = param_tools.get_global_parameter("storage_dir")
return os.path.join(storage_dir, account.username + ".pdf")
def delete_credentials(account):
"""Try to delete a local file."""
fname = get_creds_filename(account)
if not os.path.exists(fname):
return
os.remove(fname)
def _get_cipher(iv):
"""Return ready-to-user Cipher."""
key = smart_bytes(settings.SECRET_KEY[:32])
backend = default_backend()
return Cipher(
algorithms.AES(force_bytes(key)),
modes.CBC(iv),
backend=backend
)
def crypt_and_save_to_file(content, filename, length, chunksize=64*512):
"""Crypt content and save it to a file."""
iv = os.urandom(16)
cipher = _get_cipher(iv)
encryptor = cipher.encryptor()
with open(filename, "wb") as fp:
fp.write(struct.pack(b"<Q", length))
fp.write(iv)
while True:
chunk = content.read(chunksize)
if not len(chunk):
break
elif len(chunk) % 16:
chunk += b" " * (16 - len(chunk) % 16)
fp.write(encryptor.update(force_bytes(chunk)))
fp.write(encryptor.finalize())
def decrypt_file(filename, chunksize=24*1024):
"""Decrypt the content of a file and return it."""
buff = BytesIO()
with open(filename, "rb") as fp:
origsize = struct.unpack(b"<Q", fp.read(struct.calcsize(b"Q")))[0]
iv = fp.read(16)
cipher = _get_cipher(iv)
decryptor = cipher.decryptor()
while True:
chunk = fp.read(chunksize)
if not len(chunk):
break
buff.write(decryptor.update(chunk))
buff.write(decryptor.finalize())
buff.truncate(origsize)
return buff.getvalue()
def get_document_logo():
"""Retrieve path to logo."""
try:
logo = settings.MODOBOA_CUSTOM_LOGO
logo = os.path.join(settings.MEDIA_ROOT, os.path.basename(logo))
except AttributeError:
logo = os.path.join(settings.STATIC_ROOT, "css/modoboa-new.png")
if not os.path.isfile(logo):
return None
return logo
def rfc_6266_content_disposition(filename):
"""Copied from upcoming django 4.2. TBR on release."""
# TODO : remove me when updating to django 4.2
try:
filename.encode("ascii")
file_expr = 'filename="{}"'.format(
filename.replace("\\", "\\\\").replace('"', r"\"")
)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
return f"attachment; {file_expr}"
|
import configparser
import praw
import scraper
import re
import csv
config = configparser.ConfigParser()
config.read("config.ini")
reddit = praw.Reddit(client_id=config.get("ACCOUNT", "CLIENT_ID"),
client_secret=config.get("ACCOUNT", "CLIENT_SECRET"),
username=config.get("ACCOUNT", "USERNAME"),
password=config.get("ACCOUNT", "PASSWORD"),
user_agent="HeadlineScraping, created by u/ItsTheRedditPolice")
subreddit = config.get("SUBREDDIT", "NAME")
user = reddit.user.me()
domain_list = []
def extract_domain(string):
# This will extract domain from a url (e.g. https://www.google.com will become google.com
# this is so i can compare with domains in domains.csv
regex = r"^(?:https?:\/\/)?(?:[^@\/\n]+@)?(?:www\.)?([^:\/\n]+)"
url = re.findall(regex,string)
return [x for x in url]
def load_db(): # this will load domains.csv into domain_list
with open("domains.csv", "r") as f:
reader = csv.reader(f)
for row in reader:
str = "".join(row)
domain_list.append(str)
def scan_submissions():
try:
for submission in reddit.subreddit(subreddit).stream.submissions(skip_existing=True):
if not submission.is_self:
link = submission.url
link_domain = extract_domain(link)[0]
for domain in domain_list:
if domain == link_domain:
title = submission.title
domain_name = link_domain.split(".")[0]
headline = getattr(scraper, domain_name)(link) # call a function using a variable name "getattr(module, varname)(args)"
# TODO: compare submission titles to headline and do the most appropriate action
if headline != title:
comment = submission.reply("Hey, your submission has been removed! \n\n "
"Reason: \n\n "
"```Submission Title does not match the one in the article you "
"linked to!``` \n\n Please repost your submission and be sure to make the title "
"the **exact same** as the headline in the article! Thank you! \n\n ^^I ^^am ^^a ^^bot")
comment.mod.distinguish(how="yes", sticky=True)
submission.mod.remove(spam=False, mod_note="Title did not match headline.")
except Exception as e:
print(f"** ERROR: {e}")
def initialise():
load_db()
scan_submissions()
initialise() |
# Python Standard Libraries
# N/A
# Third-Party Libraries
from django.db import IntegrityError
from rest_framework.decorators import api_view, permission_classes, renderer_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.status import HTTP_201_CREATED
from rest_framework.status import HTTP_401_UNAUTHORIZED
# Custom Libraries
from . import sign_up_serializer
from . import sign_up_renderer
# Documentation link
# http://www.django-rest-framework.org/api-guide/permissions/#object-level-permissions
# http://www.django-rest-framework.org/api-guide/permissions/#api-reference
@api_view(["POST"])
@permission_classes((AllowAny,))
# Response is implicity provided in the context of the renderer now
@renderer_classes((sign_up_renderer.SignUpJSONRenderer,))
def sign_up(request):
user_serializer = sign_up_serializer.SignUpSerializer(data=request.data)
if user_serializer.is_valid():
try:
user_serializer.save()
return Response(user_serializer.data,
status=HTTP_201_CREATED)
except Exception as e:
# user already exists
response_data = {"errors": user_serializer.errors}
return Response(response_data,
status=HTTP_401_UNAUTHORIZED)
else:
response_data = {"errors": user_serializer.errors}
return Response(response_data,
status=HTTP_401_UNAUTHORIZED)
|
'''
(1)[[0 for i in range(len(s))]for i in range(len(s))] 二维dp 数组的生成方法
(2)注意是 if ... elif 而不是 eles if
(3)内层循环是 for j in range(i + 1)
(4)python 中是 substring()方法就是直接截取 s[start : end + 1]
'''
class Solution:
def longestPalindrome(self, s: str) -> str:
if s is None or len(s) == 0:
return s
len_s = len(s)
dp = [[False for i in range(len_s)] for j in range(len_s)]
start, max_sub_len = 0, 0
for i in range(len_s):
for j in range(i, -1, -1):
if s[i] == s[j]:
if i - j <= 1:
dp[j][i] = True
else:
dp[j][i] = dp[j + 1][i - 1]
if dp[j][i] and i - j + 1 > max_sub_len:
max_sub_len = i - j + 1
start = j
return s[start: start + max_sub_len]
|
n = int(input())
ans = 0
for x in range(n):
l, m = [int(x) for x in input().split()]
if(l + m > ans):
ans = l + m
print(ans) |
import time
import threading
import datetime
import math
plot_file = "time-8.txt"
MY_IP = "192.168.0.110"
class Stream:
"""
This is the class that will handle a stream
Stream is defined by two IP's
src_ip | dest_ip
ip1 | ip2
"""
ips = ""
timestamps = []
clocks = []
delays = []
packetNr = 0
recordedTime = [] #Will be used to calculate delay (including processing)
isStream = False
blacklisted = False
isSynchronizer = False
av = [] #List of -audio and -video in payloar #HLS
content_type_hls = [] #list of content_types for hls
content_type_dash = [] #list of content_types for dash
stream_type = 0;
#constants
_debugLevel = 0 #1, 2, 3
_HLS = 0
_DASH = 0
#average calculator delays
average_delay = 0
calc_delays = []
sum_of_delays = 0
#decider variables
packetsChecked = 0
#Hour synchronizer variables
synced_hour = ""
num_pckg = 0
minimum_num_to_sync = -1
#Threads
#decider_thread = threading.Thread(target=self._decider)
#clock_calculator_thread = threading.Thread(target=self._hourCalculate)
def __init__(self, ips, debug=0):
"""
The constructor that initializes this stream with the given ip
Debug level is optional
"""
self.ips = ips
self._debugLevel = debug
if self._debugLevel >= 3:
print("Stream ["+self.ips+"] initialized")
threading.Thread(target=self._decider).start()
def record_packet(self, timestamp, clock, av, cth, ctd, src):
self.timestamps.append(timestamp)
self.clocks.append(clock)
self.av.append(av)
self.content_type_hls.append(cth)
self.content_type_dash.append(ctd)
if cth:
self.stream_type = self._HLS
elif ctd:
self.stream_type = self._DASH
self.packetNr += 1
#calculate delay and delay_average
if src != MY_IP:
self.recordedTime.append(time.time())
self._calculateAverage()
#print(self.calc_delays)
#self._calculateAverage()
#if len(self.calc_delays) > 0:
# print ""+str(self.calc_delays[-1])+" - "+str((src == MY_IP))
if self._debugLevel >= 2:
print("Stream "+self.ips+" has a new packet")
if self._debugLevel >= 3:
print(self.timestamps)
print(self.clocks)
print(self.av)
if self.stream_type == self._HLS:
#print(self.content_type_hls)
print("HLS Stream type")
elif self.stream_type == self._DASH:
#print(self.content_type_dash)
print("DASH Stream type")
print(self.recordedTime)
print("Average Delay is: "+str(self.average_delay))
if self.isStream:
self._hourCalculate()
def _calculateAverage(self):
"""
This will be used as a thread to calculate the average of time
Problem: Many threads will be open!
Asynchronious call this function?
|-> Problem: continuous call of the function!
Call this function on new packet
"""
if len(self.recordedTime) >= 2:
last_delay = self.recordedTime[-1] - self.recordedTime[-2]
self.calc_delays.append(last_delay)
self.sum_of_delays += last_delay
self.average_delay = self.sum_of_delays / float(len(self.calc_delays))
def _decider(self):
"""
Routinely check new packets as a thread
Decide this stream
Keep track of the checked packets
Continue only with unchecked packets
Do this until the packet is decided or blacklisted
CHANGE: no check for HTTP, if stream has clocks, it has HTTP!
CHANGE: no check for timestamps, the stream is automatically discarded for lack of timestamps
The idea here is to find at least one of each 'Stream required' field
which are listed below as variables, so if a stream has at least one of
each we can decide it as a stream.
The problem is that any spoofed packed that has any of these will be
considered as a stream, and the attacked will wreack havoc
The solutions for this are 2:
1. Not checking the stream in general but checking as a two way communication
|-> Check the packets that the client sends
|-> Check the packets that the server responds
|-> Check if the client is asking for the stream
2. Running the algorithm and collecting data
|-> Machine learning to determine optimal threasholds
Using the first method yields to a more efficient algorithm
"""
hasAV = False #Audio - Video
hasCT = False #Content_Type
hasCL = False #Clock
if self._debugLevel >= 2:
print "Decider Started for stream "+self.ips
while not self.isStream and not self.blacklisted:
#TODO: Check each list, filter
#IDEA: Check the delay between packets, if too large, discard
#IDEA: Check contents with threashold, if no (any) between a num. of packets, discard
#IDEA: Upper one, if no (any) until the end, discard (requires min. nr. packets)
#IDEA: Decide faster if all (any) are present -> Increase performance
for i in range(self.packetsChecked, self.packetNr):
#self.timestamps.append(timestamp)
if len(self.clocks[i]) > 0:
hasCL = True
if self.av[i] == True:
hasAV = True
if self.stream_type == self._HLS or self.stream_type == self._DASH:
hasCT = True
self.packetsChecked = i
if hasCL and hasAV and hasCT:
self.decideStream()
break
def _hourCalculate(self):
"""
Will be called for each new packet after the stream is decided
"""
clock_t = self.clocks[-1]
if len(clock_t) <= 0:
if self._debugLevel >= 3:
print "Stream: "+self.ips+": Has no clock, waiting for next packet"
return
else:
c_hour = int(clock_t[4])+1
#c_hour = clock_t[4]
c_minute = clock_t[5]
c_seconds = float(clock_t[6]) + math.ceil(self.average_delay)
min = 0
sec = c_seconds
if c_seconds >= 60:
#127 / 6 = 2 127 % 6 = 7 2min7sec
#67 / 6 = 1 67 % 6 = 7 1min7sec
min = int(c_seconds) / int(60)
sec = int(c_seconds) % int(60)
c_minute = int(c_minute) + min
c_seconds = sec
c_time = ""+str(c_hour)+":"+str(c_minute)+":"+str(c_seconds)
#x = time.strptime(c_time,'%H:%M:%S')
#c_sec = datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()
#add the delay
#c_sec += delay
#cc_time = str(datetime.timedelta(c_sec)) #to convert back
self.synced_hour = c_time
self.num_pckg = self.packetNr
if self.minimum_num_to_sync < 0:
self.minimum_num_to_sync = self.packetNr
if self._debugLevel >= 1:
print "Stream: "+self.ips+": Successfully Synchronized clock"
self.isSynchronizer = True
f = open(plot_file, 'a')
calc_hour = c_time
sys_hour = str(datetime.datetime.now().strftime('%H:%M:%S'))
f.write(""+str(sys_hour)+","+str(calc_hour)+"\n")
f.close()
print "D: "+str(self.average_delay)
def blacklist(self):
"""
Function to clear up the stream if it is blacklisted
"""
self.blacklisted = True
self.timestamps = []
self.clocks = []
self.av = []
self.content_type_hls = []
self.content_type_dash = []
self.packetNr = 0
self.recordedTime = []
if self._debugLevel >= 3:
print("Stream "+str(self.ips)+" is blacklisted successfully!")
def decideStream(self):
"""
Clean up if possible
Start the thread to calculate the hour
NOTE: The hour calculation thread will not start from the begining
until the stream is decided for security reasons
Deciding the stream is equivalent to filtering it
"""
self.isStream =True
if self._debugLevel >= 1:
print "Stream "+self.ips+" is DECIDED!"
#TODO: Clean up not needed parts
#clock_calculator_thread.start() #No need for thread
|
from django.apps import AppConfig
class BgstatsConfig(AppConfig):
name = 'bgstats'
|
from django.contrib import admin
from .models import goovi_db
admin.site.register(goovi_db)
|
#!/usr/bin/python
# -*- coding: cp936 -*-
import sqlite3
import csv
import xlrd
import xlwt
import pandas as pd
from SQLiteDataProcessing.clientLoginEventUtility import *
from ExcelToSQLite.importNewAccountToSQLite import *
from UpdateSQliteTables.updateNewAccount import *
from UpdateSQliteTables.updateLeftMarketPer import *
from ExcelToSQLite.importClientLoginFolderToSQLite import *
from ExcelToSQLite.importHisclientLoginEventToSQLite import *
'''
1. import newacc
2. 拿到sheet2
3. 根据sheet2去update newacc
'''
class getSheet2FromSQLite:
def __init__(self):
self.dataframe = None
self.leftMarketPerIds = []
self.internetReferUsers = []
def generateSheet2ExcelFromSQLite(self):
# import clean newacc to SQLite
print('Preparing to import clean newaccount')
importNewAccountToSQLite()
print('Finsh import clean newaccount')
# import client login
print('Preparing to import this month client login')
importClientLoginFolderToSQLite()
print('Finsh import this month client login')
# import history client login
print('Preparing to import history client login')
importHisClientLoginEventToSQLite()
print('Finsh import history client login')
# checkMarketRelation 函数检查营销人员编号是不是填写,如果没有填写,用以下方法找【离职人员的营销关系】:
# 1. 查看QDBM字段
# 1)如果该字段是以'_'隔开的形式,那么被隔开的第3个字符串, 可能是空字符串/4位营销编码/8位营销编码,如果是一个8位的营销人员编码,则说明该人员因为离职才没有填写,营销人员编码直接写营销部代码;
# * marketperid = 该字段前4位编码
# * marketdepid = 该字段前4位编码
# * marketdepname = 营销营业部代码对应的营销部名称(在input/《营销人员和营业部列表》excel的branchlist表单中查找到对应的支行名称 //在db的marketdep表中查找)
# 2)如果QDBM字段中找不到这个8位的营销编码
# 2.查看newacc表格中TJRSJ字段
# 1)如果该字段是8位编码,那么说明这个员工已经离职,营销人员编码直接写写营业部代码:
# * marketperid = 该字段的前4位编码
# * marketdepid = 该字段的前4位编码
# * marketdepname = 营销营业部代码对应的营销部名称(在input/《营销人员和营业部列表》excel的branchlist表单中查找到对应的支行名称 //在db的marketdep表中查找)
# 2)如果不是8位编码,什么都不做,marketperid, marketdepid, marketdepname 都空着
class CheckMarketRelation:
""" 用于补充营销关系 """
""" return: 1. 营销人员编码(marketperid) 2. 营销营业部编码(marketdepid)"""
def check(marketperid, qdbm, tjrsj, marketdepid):
#print('Here is checkMarketrelation')
mpi = marketperid
mdi = marketdepid
mpiOriginal = None
#print('checkMarketRelation 1')
if marketperid is None:
#print('marketperid is none')
try:
#查QDBM
"""
print(qdbm)
print(len(str(qdbm).split('_')))
print(str(qdbm).split('_'))
print(len(str(qdbm).split('_')[2]))
print(tjrsj)
print( len(str(tjrsj)) )
"""
if not (qdbm is None) and len(str(qdbm).split('_')) >=3:
#print('check 1')
mpi = str(qdbm).split('_')[2] #被'_'隔开的第3个字符串赋给mpi,可能是4位数,也可能位空,也可能是8位
if len(str(qdbm).split('_')[2]) == 8:
#register leftperid
self.leftMarketPerIds.append(str(qdbm).split('_')[2])
mpi = str(qdbm).split('_')[2][0:4]
mpiOriginal = str(qdbm).split('_')[2]
mdi = mpi
else:
#查TJRSJ
if not(tjrsj is None) and len(str(tjrsj).strip()) == 8:
#print('check 2')
# register leftperid
self.leftMarketPerIds.append(str(tjrsj).strip())
mpi = str(tjrsj)[0:4]
mdi = mpi
mpiOriginal = str(tjrsj)
except Exception:
print('exception in check Marketrelation')
#print(mpi)
#print(mdi)
#mpiOriginal一定是8位的
return str(mpi), str(mdi), str(mpiOriginal)
# 打开数据库连接以及需要使用的表格文档
# open('sheet3_baseline.csv', 'rt',
# encoding='utf-8', newline='') as src,
print('Preparing to write in sheet2')
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
# 补充用户月登陆天数所需的dictionary
# 年份:09|| 2009
# 月份 2 || 12
loginDaysDict = clientLoginEventUtility().getClientLoginDaysInYearMonth(19, 8)
# src 为比较范本
"""
workbooksrc = xlrd.open_workbook('D:\DataTool\dataTool.xls')
src = workbooksrc.sheet_by_name('Sheet3')
"""
# 登记用户自定义函数到db(写在sql语句中暂时报错:user-defined function raised exception)
# db.create_function('checkMarketRelation', 4, CheckMarketRelation())
workbookdes = xlwt.Workbook()
dst = workbookdes.add_sheet('sheet2')
sqStatement = 'SELECT newaccount.khdate, newaccount.khcode, newaccount.usrnameshort, newaccount.usrname,\
newaccount.khusrmobile, newaccount.lddepid, newaccount.lddepname,\
newaccount.marketperid, newaccount.qdbm, newaccount.tjrsj, newaccount.marketdepid,\
newaccount.marketpername, newaccount.marketpertype, newaccount.marketpermobile, newaccount.marketdepname,\
newaccount.isLeftMarketPer\
FROM newaccount\
WHERE newaccount.tjrsj IS NOT NULL;'
#抬头补充
dst.write(0, 0, '开户时间') #A
dst.write(0, 1, '交易账号') #B
dst.write(0, 2, '客户简称') #C
dst.write(0, 3, '客户名称') #D
dst.write(0, 4, '开户手机号') #E
dst.write(0, 5, '本月登陆天数') #F
dst.write(0, 6, '落地营业部代码') #G
dst.write(0, 7, '落地营业部名称') #H
dst.write(0, 8, '营销人员编码') #I
dst.write(0, 9, '营销人员名称') #J
dst.write(0, 10, '营销人员类别') #K
dst.write(0, 11, '营销人员手机号') #L
dst.write(0, 12, '营销营业部代码') #M
dst.write(0, 13, '营销营业部名称') #N
dst.write(0, 14, '营销人员变更前原值') #O
row = 1
for khdate, khcode, usrnameshort, usrname,\
khusrmobile, lddepid, lddepname,\
marketperid, qdbm, tjrsj, marketdepid,\
marketpername, marketpertype, marketpermobile, marketdepname, isLeftMarketPer\
in db.execute(sqStatement):
# CheckMarketRelation()
checkedMarketPerId = CheckMarketRelation.check(marketperid, qdbm, tjrsj, marketdepid)[0]
checkedMarketDepId = CheckMarketRelation.check(marketperid, qdbm, tjrsj, marketdepid)[1]
leavedMarketPerOriginalId = CheckMarketRelation.check(marketperid, qdbm, tjrsj, marketdepid)[2]
checkedMarketDepName = None
''''
if (str(checkedMarketPerId).strip() == 'None') or (str(checkedMarketPerId).strip() == ''):
# 如果marketperid经过修正以后仍然为空,说明这个人是互联网拉新开户
if (str(checkedMarketPerId).strip() != '395000010066' ) and (str(checkedMarketPerId).strip() != '395000010065') and (str(checkedMarketPerId).strip() != '398000010900'):
self.internetReferUsers.append(str(khcode).strip())
'''
''''
if (str(checkedMarketDepId).strip() == 'None') or (str(checkedMarketDepId).strip() == '') :
print('row: ' + str(row) + str(khusrmobile) + 'can not find market person and dep')
else:
for name in db.execute('SELECT marketdep.depname FROM marketdep WHERE marketdep.depid =?', (str(checkedMarketDepId),)):
checkedMarketDepName = name[0]
'''
dst.write(row, 0, str(khdate))
dst.write(row, 1, str(khcode))
dst.write(row, 2, str(usrnameshort))
dst.write(row, 3, str(usrname))
dst.write(row, 4, str(khusrmobile))
if str(khcode).strip() in loginDaysDict:
dst.write(row, 5, loginDaysDict[str(khcode).strip()])
else:
dst.write(row, 5, 0)
dst.write(row, 6, str(lddepid))
dst.write(row, 7, str(lddepname))
'''
if str(khcode).strip() != '395000010066' and str(khcode).strip() != '395000010065' and str(khcode).strip() != '398000010900':
dst.write(row, 8, str(checkedMarketPerId))
dst.write(row, 9, str(marketpername))
dst.write(row, 10, str(marketpertype))
dst.write(row, 11, str(marketpermobile))
dst.write(row, 12, str(checkedMarketDepId))
dst.write(row, 13, str(checkedMarketDepName))
#leavedMarketPeriOriginalId 要不就是8位要不就是None
if len(str(leavedMarketPerOriginalId)) == 8:
# 说明这个营销人员已经离职了
dst.write(row, 14, str(leavedMarketPerOriginalId) + ' 离职')
else:
# None
dst.write(row, 14, '')
else:
if str(khcode).strip() == '395000010066':
dst.write(row, 8, "39708036")
dst.write(row, 9, "陈凌")
dst.write(row, 10, "经纪人")
dst.write(row, 11, "15659100118")
dst.write(row, 12, "3970")
dst.write(row, 13, "3970 南平解放路证券营业部")
if str(khcode).strip() == '395000010065':
dst.write(row, 8, "31901042")
dst.write(row, 9, "李靖")
dst.write(row, 10, "财富管理师")
dst.write(row, 11, "13072940875")
dst.write(row, 12, "3190")
dst.write(row, 13, "3190 西安分公司")
if str(khcode).strip() == '398000010900':
dst.write(row, 8, "37809097")
dst.write(row, 9, "张多佳")
dst.write(row, 10,"财富管理师")
dst.write(row, 11, "18247130746")
dst.write(row, 12, "3780")
dst.write(row, 13,"3780 呼和浩特中山西路证券营业部")
row = row + 1
'''
if str(khcode).strip() != '395000010066' and str(khcode).strip() != '395000010065' and str(
khcode).strip() != '398000010900':
dst.write(row, 8, str(marketperid))
dst.write(row, 9, str(marketpername))
dst.write(row, 10, str(marketpertype))
dst.write(row, 11, str(marketpermobile))
dst.write(row, 12, str(marketdepid))
dst.write(row, 13, str(marketdepname))
# leavedMarketPeriOriginalId 要不就是8位要不就是None
if isLeftMarketPer == '1.0' or isLeftMarketPer == '2.0':
# 说明这个营销人员已经离职了
dst.write(row, 14, '离职' + str(marketperid))
self.leftMarketPerIds.append(str(marketperid))
else:
# None
dst.write(row, 14, '')
else:
if str(khcode).strip() == '395000010066':
dst.write(row, 8, "39708036")
dst.write(row, 9, "陈凌")
dst.write(row, 10, "经纪人")
dst.write(row, 11, "15659100118")
dst.write(row, 12, "3970")
dst.write(row, 13, "3970 南平解放路证券营业部")
if str(khcode).strip() == '395000010065':
dst.write(row, 8, "31901042")
dst.write(row, 9, "李靖")
dst.write(row, 10, "财富管理师")
dst.write(row, 11, "13072940875")
dst.write(row, 12, "3190")
dst.write(row, 13, "3190 西安分公司")
if str(khcode).strip() == '398000010900':
dst.write(row, 8, "37809097")
dst.write(row, 9, "张多佳")
dst.write(row, 10, "财富管理师")
dst.write(row, 11, "18247130746")
dst.write(row, 12, "3780")
dst.write(row, 13, "3780 呼和浩特中山西路证券营业部")
row = row + 1
workbookdes.save('../output/sheet2.xls')
"""
return dataframe read from sheet2
"""
dfreturn = pd.read_excel('../output/sheet2.xls', sheetname='sheet2')
print('return')
print(dfreturn.columns)
print(dfreturn)
self.dataframe = dfreturn
#update newaccount based on modified sheet2
#updateNewAccount.update(self.dataframe)
# 这里的逻辑不是修改newaccount里面的值,而是删除以前的数据,然后把sheet2跑出来的结果完全存进去
updateNewAccount.reimport(self.dataframe)
updateLeftMarketPer.update(self.leftMarketPerIds)
def getSheet2DataFrame(self):
self.generateSheet2ExcelFromSQLite()
return self.dataframe
def getLeftMarketPerIDs(self):
self.generateSheet2ExcelFromSQLite()
return list(set(self.leftMarketPerIds))
def getInternetReferUsers(self):
self.generateSheet2ExcelFromSQLite()
return self.internetReferUsers
# generate sheet2 excel
sheet2 = getSheet2FromSQLite()
sheet2.generateSheet2ExcelFromSQLite()
#print(len(sheet2.getInternetReferUsers()))
# leftmarketpers = sheet2.getLeftMarketPerIDs()
# print(leftmarketpers)
# updateLeftMarketPer.update(leftmarketpers)
|
ACROSS = 0
DOWN = 1
# blanks = [(start_x, start_y, direction, length), ...]
def zip_sol(answer, blanks,vocab):
for i in range(len(vocab)):
filled_blank = blanks[i][0],blanks[i][1],blanks[i][2],vocab[i]
answer.append(filled_blank)
return answer
# fill the word into the blank, if success return 1, 0 otherwise
def check(blanks,vocab):
size = max(x[3] for x in blanks ) + 1
gird = [[' ']*size for n in range(size)]
for i in range(len(vocab)):
blank = blanks[i]
word = vocab[i]
start_x = blank[0]
start_y = blank[1]
direction = blank[2]
for l in range(len(word)):
if direction == 0:
new_x, new_y = start_x + l , start_y
else:
new_x, new_y = start_x, start_y + l
w = gird[new_x][new_y]
if w != ' ' and w != word[l]:
return False
gird[new_x][new_y] = word[l]
return True
def crossword_helper(answer,blanks, vocab, vocab_index):
if answer:
return answer
if len(vocab) == vocab_index:
if check(blanks, vocab):
zip_sol(answer,blanks,vocab)
for i in range(vocab_index, len(vocab)):
if blanks[vocab_index][3] >= len(vocab[i]):
vocab[i], vocab[vocab_index] = vocab[vocab_index], vocab[i]
crossword_helper(answer, blanks, vocab, vocab_index + 1)
vocab[i], vocab[vocab_index] = vocab[vocab_index], vocab[i]
return None
def solve_crossword(vocab, blanks):
# basic check
vocab_lens = sorted([len(v) for v in vocab ])
blanks_lens = sorted([b[3] for b in blanks])
for i in range(len(vocab_lens)):
try:
blank_len = blanks_lens[i]
except:
return None
if blank_len < vocab_lens[i]:
return None
# do recursion to fill in the words
vocab = sorted(vocab, key=len)
blanks = sorted(blanks, key=(lambda x:x[3]))
vocab_index = 0
answer = []
crossword_helper(answer, blanks,vocab,vocab_index)
if answer == []:
return None
else:
return answer
# soln = solve_crossword(vocab=['next', 'time', 'expect', 'electric'],
# blanks=[(0, 0, ACROSS, 4),(1, 0, DOWN, 6),(3, 0, DOWN, 4),(1, 3, ACROSS, 8)])
# print(soln)
|
#계산기를 만들건데 값을 따로 저장할 열개의 계산기(값이 누적되야되요)
result1 = 0
result2 = 0
result3 = 0
def calc1(num):
global result1
result1 += num
def calc2(num):
global result2
result2 += num
def calc3(num):
global result3
result3 += num
calc1(3)
calc1(3)
print(result1)
calc2(5)
calc3(7)
|
import copy
# Get data, and save it
with open("input1.txt","r") as f:
data = f.readlines()[0].replace("\n","").split(',')
data = list(map(int, data))
originalMemory = copy.deepcopy(data)
# Compute with opcodes
def compute(data):
readPoint = 0
while(True):
opcode = data[readPoint]
if (opcode == 99):
break
elif (opcode == 1):
data[data[readPoint+3]] = data[data[readPoint+2]]+data[data[readPoint+1]]
elif (opcode == 2):
data[data[readPoint+3]] = data[data[readPoint+2]]*data[data[readPoint+1]]
else:
print("ERROR, Invalid opcode!")
readPoint+=4
return data[0]
# Loop through inputs from 0-100 for both
loop = True
for i in range(100):
for j in range(100):
data = copy.deepcopy(originalMemory)
data[1] = i
data[2] = j
# Check if it's found
if(compute(data) == 19690720):
print(100*i + j)
loop = False
if(loop==False):
break
if(loop==False):
break
|
from django.contrib import admin
from post.models import Post, PostComment
admin.site.register(Post)
admin.site.register(PostComment) |
import torch
import torch.nn as nn
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
import torch.utils.model_zoo as model_zoo
from torch.nn.modules.batchnorm import _BatchNorm
from collections import OrderedDict
import torch
from ..builder import BACKBONES
class ShuffleV2Block(nn.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super(ShuffleV2Block, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
nn.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(mid_channels, mid_channels, ksize, stride, pad, groups=mid_channels, bias=False),
nn.BatchNorm2d(mid_channels),
# pw-linear
nn.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
nn.BatchNorm2d(outputs),
nn.ReLU(inplace=True),
]
self.branch_main = nn.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
nn.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
nn.BatchNorm2d(inp),
# pw-linear
nn.Conv2d(inp, inp, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
]
self.branch_proj = nn.Sequential(*branch_proj)
else:
self.branch_proj = None
def forward(self, old_x):
if self.stride==1:
x_proj, x = self.channel_shuffle(old_x)
return torch.cat((x_proj, self.branch_main(x)), 1)
elif self.stride==2:
x_proj = old_x
x = old_x
return torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.data.size()
assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = x.permute(1, 0, 2)
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
#
# def channel_shuffle(self, x):
# batchsize, num_channels, height, width = x.data.size()
# # assert (num_channels % 4 == 0)
# # x = x.reshape(batchsize * num_channels // 2, 2, height * width)
# # x = x.permute(1, 0, 2)
# # x = x.reshape(2, -1, num_channels // 2, height, width)
# x = x.view(batchsize, 2, num_channels // 2, height, width)
# x = torch.transpose(x, 1, 2).contiguous()
# x = x.view(batchsize, -1, height, width)
# x1, x2 = x.chunk(2, dim=1)
# return x1, x2
@BACKBONES.register_module
class ShuffleNetV2(nn.Module):
def __init__(self, model_size='1.5x',out_indices=( 0,1 , 2, 3),frozen_stages=1 , norm_eval = True):
super(ShuffleNetV2, self).__init__()
print('model size is ', model_size)
self.stage_repeats = [4, 8, 4]
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.model_size = model_size
if model_size == '0.5x':
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif model_size == '1.0x':
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif model_size == '1.5x':
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif model_size == '2.0x':
self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
# building first layer
input_channel = self.stage_out_channels[1]
self.first_conv = nn.Sequential(
nn.Conv2d(3, input_channel, 3, 2, 1, bias=False),
nn.BatchNorm2d(input_channel),
nn.ReLU(inplace=True),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.feat_channel = []
features = []
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
features.append(ShuffleV2Block(input_channel, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=2))
else:
features.append(ShuffleV2Block(input_channel // 2, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=1))
input_channel = output_channel
self.__setattr__("feature_%d" % (idxstage + 1), nn.Sequential(*features))
self.feat_channel.append(input_channel)
features = []
self.conv_last = nn.Sequential(
nn.Conv2d(input_channel, self.stage_out_channels[-1], 1, 1, 0, bias=False),
nn.BatchNorm2d(self.stage_out_channels[-1]),
nn.ReLU(inplace=True)
)
def _freeze_stages(self):
for i in range(self.frozen_stages):
m = getattr(self, 'feature_{}'.format(self.feat_id[i]))
m.eval()
for param in m.parameters():
param.requires_grad = False
def load_model(self, state_dict):
new_model = self.state_dict()
new_keys = list(new_model.keys())
old_keys = list(state_dict.keys())
restore_dict = OrderedDict()
for id in range(len(new_keys)):
restore_dict[new_keys[id]] = state_dict[old_keys[id]]
res = self.load_state_dict(restore_dict)
print(res)
def init_weights(self, pretrained=None):
# from collections import OrderedDict
# temp = OrderedDict()
if isinstance(pretrained, str):
if pretrained.startswith("https"):
state_dict = model_zoo.load_url(pretrained,
progress=True)
else:
state_dict = torch.load(pretrained, map_location=lambda storage, loc: storage)["state_dict"]
# print(state_dict.keys())
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
self.load_model(state_dict)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
y = [x]
for idxstage in range(len(self.stage_repeats)):
x = self.__getattr__("feature_%d" % (idxstage+1))(x)
y.append(x)
x = self.conv_last(x)
y[-1] = x
out = []
for idx in self.out_indices:
out.append(y[idx])
return tuple(out)
def train(self, mode=True):
super(ShuffleNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
if __name__ == "__main__":
model = ShuffleNetV2()
# print(model)
test_data = torch.rand(5, 3, 224, 224)
test_outputs = model(test_data)
print(test_outputs.size()) |
"""
Compute Categorical Cluster Distance
"""
#Author: Yukun Chen
#Contact; cykustc@gmail.com
#Copyright: Penn State University,
import csv
import numpy as np
import sys
from scipy.spatial import distance
from cvxopt import matrix, solvers
def read_tokens(cluster_result_file):
"""read clustering result from csv file (only one record each row) to a list"""
with open(cluster_result_file,'rb') as csvfile:
filereader=csv.reader(csvfile,delimiter=',');
result=list();
for row in filereader:
result.append(row[0]);
return result
def cluster_info(clustering_result):
"""return cluster number: clust_num and cluster name dictionary: cluster_dict in a tuple (clust_num,cluster_dict)
"""
cluster_set=list(set(clustering_result));
cluster_num=len(cluster_set);
cluster_dict=dict(zip(cluster_set,range(cluster_num)));
return (cluster_num,cluster_dict)
def token_to_mat(tokens):
cluster_num, cluster_dict = cluster_info(tokens);
N = len(tokens);
clus_mat = np.zeros((N,cluster_num));
for i in xrange(N):
clus_mat[i][cluster_dict[tokens[i]]]=1;
return clus_mat
def categorical_clust_dist(clus_mat_A,clus_mat_B,method='even'):
"""
Compute Clustering distance from [clusters clus_mat_A with weights w_A] to [clusters clus_mat_B with weights w_B]
More details please refer to Section 4.1 of "A New Mallows Distance Based
Metric for Comparing Clusterings", Ding Zhou, Jia Li, Hongyuan Zha.
Return a dictionary contains the Categorical Cluster Distance and matching weights {"dist":,"matching"}
"""
n = clus_mat_A.shape[1];
m = clus_mat_B.shape[1];
if method=='even':
w_A=1.0/n*np.ones(n)
w_B=1.0/m*np.ones(m)
elif method=='instance_count':
w_A=np.sum(clus_mat_A,axis=0)
w_A=w_A/np.sum(w_A)
w_B=np.sum(clus_mat_B,axis=0)
w_B=w_B/np.sum(w_B)
A = np.zeros((n+m,n*m));
for k in xrange(n):
A[k][np.arange(k,n*m,n)]=1;
for k in xrange(m):
A[n+k][np.arange(k*n,k*n+n)]=1;
A = A[:-1,:]
D = distance.cdist(clus_mat_A.T,clus_mat_B.T,'cityblock'); #Computes the city block or Manhattan distance
f = D.reshape((1,n*m));
b = np.concatenate((w_A.T,w_B.T),axis=0)
b = b[:-1]
c = matrix(f.T);
beq = matrix(b);
Aeq = matrix(A);
G = matrix(-1.0*np.eye(m*n),(m*n,m*n))
h = matrix(0,(m*n,1),'d')
solvers.options['show_progress'] = False
sol = solvers.lp(c,G,h,A=Aeq,b=beq);
x=sol['x'];
#print sol
x=np.array(x);
x=x.reshape((n,m), order='F')
return {"dist":sol['primal objective'],"matching":x}
if __name__ == '__main__':
clus_mat_A = token_to_mat(read_tokens('cluster_resultsA.txt'))
clus_mat_B = token_to_mat(read_tokens('cluster_resultsB.txt'))
# print clus_mat_A, clus_mat_B
if clus_mat_A.shape[0]!=clus_mat_B.shape[0]:
print "number of instances in two clustering result are not the same!";
sys.exit(0)
result=categorical_clust_dist(clus_mat_A,clus_mat_B,method='instance_count')
# result=categorical_clust_dist(clus_mat_A,clus_mat_A,method='even')
print result['dist']
print result['matching']
|
import pymysql
# 指定数据的连接host,user,password,port,schema
db_connect = pymysql.connect(host="152.136.89.241", user="root", password="Huawei123", port=3306)
cursor = db_connect.cursor()
def initdata():
"""初始化数据库和数据表
:return:
"""
# 新建数据库和表
cursor.execute("create database db_student default charset=utf8;")
cursor.execute("use db_student;")
cursor.execute("create table t_student(name varchar(30),stu_num varchar(10)"
",class_num varchar(10),age varchar(3), sex enum('man', 'woman'));")
def destroy():
""" 删除数据库
:return:
"""
cursor.execute("drop database db_student;")
def findstubynum(stu_num):
"""根据学号查询学生
:param stu_num: 学生学号
:return:
"""
cursor.execute("select * from t_student where stu_num = %s;" % stu_num)
data = cursor.fetchall()
return data
# if len(data) == 0:
# return False
# else:
# return True
def addstu(stu_dict):
"""添加学生信息
:param stu_dict: 学生信息字典
:return:
"""
name = stu_dict['name']
stu_num = stu_dict['stu_num']
class_num = stu_dict['class_num']
age = stu_dict['age']
sex = stu_dict['sex']
stu_data = (name, stu_num, class_num, age, sex)
# print(stu_data)
# cursor.execute("insert into t_student values (%s, %s, %s, %d, %s);" % data)
cursor.execute("insert into t_student(name,stu_num,class_num,age,sex) values (%s, %s, %s, %s, %s);", stu_data)
db_connect.commit()
def searchall():
"""查询所有学生信息
:return: 返回查询结果
"""
cursor.execute("select * from t_student order by name;")
data = cursor.fetchall()
return data
def findbyname(name):
"""根据学生姓名查找学生
:param name: 学生姓名
:return:
"""
cursor.execute("select * from t_student where name = '%s';" % name)
data = cursor.fetchall()
return data
|
#! python3
import pyautogui
import time
import sys
scale = 2
type_duration = 0.02 * scale
tab_duration = 0.2 * scale
mouse_duration = 0.5 * scale
double_click_duration = 0.25 * scale
click_button_delay = 0.5 * scale
schedule_delay = 2 * scale
def click_button(button_label):
print("Clicking on " + button_label + " button")
count = 0
button = None
while count < 20 and button == None:
time.sleep(click_button_delay)
button = pyautogui.locateOnScreen(button_label + '.png')
count += 1
if not button:
print("ERROR: Unable to find " + button_label + " button")
sys.exit()
pyautogui.click(button.left + button.width / 2, button.top + button.height / 2)
time.sleep(click_button_delay)
def setup(schedule):
pyautogui.doubleClick(10, 200, interval=double_click_duration)
pyautogui.typewrite(["pageup"] * 20, interval=type_duration)
click_button(schedule)
pyautogui.doubleClick(10, 200, interval=double_click_duration)
def drop_select(index):
print("Selecting dropdown " + index)
pyautogui.typewrite(["pageup"] * 20, interval=type_duration)
pyautogui.typewrite(["down"] * int(index), interval=type_duration)
def press_tab(count):
if count == 0:
return
print("Tabbing " + tokens[0] + " times")
if count < 0:
pyautogui.keyDown('shift')
pyautogui.typewrite('\t' * abs(count), interval=tab_duration)
if count < 0:
pyautogui.keyUp('shift')
time.sleep(tab_duration)
def enter_data(tokens):
tab_count = int(tokens[0])
press_tab(tab_count)
data = tokens[1]
if data.startswith(">"):
arg = data.split(' ', 1)[1]
if data.startswith(">click "):
click_button(arg)
elif data.startswith(">drop "):
drop_select(arg)
elif data.startswith(">press "):
print("Pressing " + arg)
pyautogui.typewrite([arg], interval=type_duration)
elif data.startswith(">sleep "):
print("Sleeping for " + arg + " seconds")
time.sleep(float(arg))
elif data.startswith(">type "):
print("Typing " + arg)
pyautogui.typewrite(arg, interval=tab_duration)
else:
print("Typing " + data)
pyautogui.typewrite(data)
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <data file>')
sys.exit(1)
file = open(sys.argv[1])
for line in file:
line = line.strip()
if line.startswith(">schedule"):
setup(line[1:])
tokens = line.split(",", 1)
if len(tokens) != 2:
continue
enter_data(tokens)
file.close()
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
prefix_sum, total_sum = 0, sum(A)
for i, el in enumerate(A):
prefix_sum += el
if prefix_sum == total_sum // 3:
current_sum = 0
for j in range(i + 1, len(A)):
current_sum += A[j]
if current_sum == total_sum // 3:
return True
return False
if __name__ == "__main__":
solution = Solution()
assert solution.canThreePartsEqualSum([0, 2, 1, -6, 6, -7, 9, 1, 2, 0, 1])
assert not solution.canThreePartsEqualSum([0, 2, 1, -6, 6, 7, 9, -1, 2, 0, 1])
assert solution.canThreePartsEqualSum([3, 3, 6, 5, -2, 2, 5, 1, -9, 4])
|
# import sys
# sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
import random
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.support.ui import WebDriverWait
user_agents = [
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
]
header = {}
def randomUserAgents():
head = random.choice(user_agents)
head = head.split(") ")
agent = head[0]+')'
accept = head[1]
header["User-Agent"] = agent
header["Accept"] = accept
return header
# url = 'https://www.glassdoor.com/Reviews/NIKE-Reviews-E1699.htm'
head = randomUserAgents()
start = time.time()
def soup(url, headers):
''' url = full glassdoor.com/reviews url'''
session = requests.Session()
req = session.get(url, headers=headers)
bs = BeautifulSoup(req.text, 'lxml')
return bs
startTime = start - time.time()
a = []
date = []
revNo = []
employee = []
position = []
summ = []
pro = []
con = []
advice = []
review = []
subReviews = []
workLife = []
culture = []
helpful = []
careerOpp = []
compBenefits = []
srManagement = []
authorlocation = []
recommend = []
outlook = []
ceo = []
link = []
# "NIKE-Reviews-E1699", "adidas-Reviews-E10692"
url_list = {'Nike':"NIKE-Reviews-E1699", 'Adidas':"adidas-Reviews-E10692", 'Walmart':"Walmart-Reviews-E715", 'Macys':"Macy-s-Reviews-E1079", 'Target':"Target-Reviews-E194", 'Gap':"Gap-Reviews-E114118",'HomeDepot':"The-Home-Depot-Reviews-E655",'Apple':"Apple-Reviews-E1138",'McDonalds':"McDonald-s-Reviews-E432",'Starbucks':"Starbucks-Reviews-E2202",'Chipotle':"Chipotle-Reviews-E15228",'BestBuy':"Best-Buy-Reviews-E97"}
# url_list = ["Target-Reviews-E194"]
# for url_str_key,url_str_value in url_list.items():
# m=2
# while m == 2:
print("walmart")
count = 1
# url_prefix = "https://www.glassdoor.com/Reviews/" + url_str_value
# bs = soup(url_prefix + ".htm", head)
i = 1
k = 1
while k == 1:
# print('hello')
# z = bs.find('li',{'class','pagination__PaginationStyle__next'}).find('a')['class'][0].find('disabled')
# print(z)
# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--window-size=1420,1080')
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
# driver = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
#Here Search for the company url and write it here, we have to do this beacause for every company there's a random number associated to it
url = "https://www.glassdoor.com/Reviews/adidas-Reviews-E10692_P" + str(i) + ".htm"
# driver.get(url)
# driver = webdriver.Firefox()
# driver.get(url)
print(url)
i = i + 1
# content = driver.page_source
# time.sleep(1)
# element = WebDriverWait(driver, 10)
# bs = BeautifulSoup(content)
bs = soup(url, head)
print(" ")
# bs.find('li', {'class', 'pagination__PaginationStyle__next'}) == None or 'disabled' in ('\t'.join(bs.find('li', {'class', 'pagination__PaginationStyle__next'}).find('a')['class'])):
if bs.find('li', {'class', 'pagination__PaginationStyle__next'}) == None:
print('None in this page')
elif i == 211:
# here change the value to the last page number of the particular company
# this is being done because glassdoor is changing the identification of last page for a company frequently
# print(bs)
k = 2
exit()
else:
print(i)
for x in bs.findAll('li', {'class', 'empReview cf'}):
a.append(count)
count += 1
## Rev Number
try:
revNo.append(x['id'])
except:
revNo.append('None')
## overall rating
try:
review.append(x.find('span', {'class': 'rating'}).find('span', {'class': 'value-title'})['title'])
except:
review.append('None')
## subRatings list
try:
subclasspresent = {}
subratingarrays = [workLife, culture, careerOpp, compBenefits, srManagement]
subratingclasses = ['Work/Life Balance', 'Culture & Values', 'Career Opportunities',
'Compensation and Benefits', 'Senior Management']
z = 0
for subclass in range(len(subratingclasses)):
if subratingclasses[subclass] in (x.find('ul', {'class': 'undecorated'}).text):
z = 1
else:
subratingarrays[subclass].append(-1)
for rate in x.find('ul', {'class': 'undecorated'}).findAll('li'):
subratingarrays[subratingclasses.index(rate.find('div', {'class': 'minor'}).text)].append(
rate.find('span', {'class': 'gdBars gdRatings med'})['title'])
except:
for subratingarr in subratingarrays:
subratingarr.append(-1)
# print('Error in subratings list')
## Date
try:
# print(x.find('time', {'class': 'date subtle small'}).text)
date.append(x.find('time', {'class': 'date subtle small'}).text)
except:
date.append('None')
## Employee Type
try:
employee.append(x.find('span', {'class': "authorJobTitle"}).text)
# print(x.find('span',{'class':"authorJobTitle"}).text)
except:
employee.append('None')
##Location
try:
position.append(x.find('span', {'class': 'authorLocation'}).text)
except:
position.append('None')
##Recommendoutlookceo
try:
subarraying = [recommend, outlook, ceo]
counti = [0, 0, 0]
indices = {'Recommends':5,'Doesn\'t Recommend':0,'None':-1,'Positive Outlook':5,'Neutral Outlook':2.5,'Negative Outlook':0,'Approves of CEO':5,'No opinion of CEO':2.5,'Disapproves of CEO':0}
for subreview in x.find('div', {'class': 'row reviewBodyCell recommends'}).findAll('div', {
'class': 'col-sm-4'}):
if 'Recommend' in subreview.find('span').text:
counti[0] = 1
recommend.append(indices[subreview.find('span').text])
elif 'Outlook' in subreview.find('span').text:
counti[1] = 1
outlook.append(indices[subreview.find('span').text])
elif 'CEO' in subreview.find('span').text:
counti[2] = 1
ceo.append(indices[subreview.find('span').text])
for indi in range(len(counti)):
if counti[indi] == 0:
subarraying[indi].append('None')
except:
recommend.append('None')
outlook.append('None')
ceo.append('None')
# print('Error in recommendoutlook try block')
## Helpful votes
try:
helpful.append(
re.findall('\d+', x.find('div', {'class': 'helpfulReviews helpfulCount small subtle'}).text)[0])
except:
# print('dfs')
helpful.append('None')
# driver.quit()
# time.sleep(1)
df = pd.DataFrame(index=a)
df['date'] = date
df['reviewNo'] = revNo
df['overallStar'] = review
df['workLifeStar'] = workLife
df['cultureStar'] = culture
df['careerOppStar'] = careerOpp
df['comBenefitsStar'] = compBenefits
df['srManagementStar'] = srManagement
df['employeeType'] = employee
df['location'] = position
df['recommend'] = recommend
df['outlook'] = outlook
df['ceo'] = ceo
df['helpful'] = helpful
print(df)
df.tail()
df.to_csv('Glassdor_Adidas.csv', sep=',') |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the load_forwarding_rules_pipeline."""
from tests.unittest_utils import ForsetiTestCase
import mock
import MySQLdb
import unittest
# pylint: disable=line-too-long
from google.cloud.security.common.data_access import errors as data_access_errors
from google.cloud.security.common.data_access import forwarding_rules_dao as frdao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.gcp_api import compute
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
from google.cloud.security.inventory.pipelines import load_forwarding_rules_pipeline
from tests.inventory.pipelines.test_data import fake_configs
from tests.inventory.pipelines.test_data import fake_forwarding_rules
from tests.inventory.pipelines.test_data import fake_projects
# pylint: enable=line-too-long
def _set_count(*args, **kwargs):
"""Set the pipeline count."""
class LoadForwardingRulesPipelineTest(ForsetiTestCase):
"""Tests for the load_forwarding_rules_pipeline."""
def setUp(self):
"""Set up."""
self.cycle_timestamp = '20001225T120000Z'
self.configs = fake_configs.FAKE_CONFIGS
self.resource_name = 'forwarding_rules'
self.mock_compute_client = mock.create_autospec(compute.ComputeClient)
self.mock_dao = mock.create_autospec(frdao.ForwardingRulesDao)
self.pipeline = (
load_forwarding_rules_pipeline.LoadForwardingRulesPipeline(
self.cycle_timestamp,
self.configs,
self.mock_compute_client,
self.mock_dao))
self.project_ids = fake_forwarding_rules \
.FAKE_PROJECT_FWD_RULES_MAP.keys()
self.projects = [project_dao.ProjectDao.map_row_to_object(p)
for p in fake_projects.EXPECTED_LOADABLE_PROJECTS
if p['project_id'] in self.project_ids]
def test_can_transform_forwarding_rules(self):
"""Test transform function works."""
actual = self.pipeline._transform(
fake_forwarding_rules.FAKE_PROJECT_FWD_RULES_MAP)
self.assertEquals(
fake_forwarding_rules.EXPECTED_LOADABLE_FWD_RULES,
list(actual))
@mock.patch.object(MySQLdb, 'connect')
@mock.patch('google.cloud.security.common.data_access.project_dao.ProjectDao.get_projects')
def test_api_is_called_to_retrieve_forwarding_rules(
self, mock_get_projects, mock_conn):
"""Test that API is called to retrieve forwarding rules."""
mock_get_projects.return_value = self.projects
self.pipeline._retrieve()
self.assertEqual(
len(self.project_ids),
self.pipeline.api_client.get_forwarding_rules.call_count)
@mock.patch.object(MySQLdb, 'connect')
@mock.patch('google.cloud.security.common.data_access.project_dao.ProjectDao.get_projects')
def test_retrieve_data_is_correct(
self, mock_get_projects, mock_conn):
"""Test _retrieve() data is correct."""
mock_get_projects.return_value = self.projects
self.pipeline.api_client.get_forwarding_rules = mock.MagicMock(
side_effect=[fake_forwarding_rules.FAKE_API_RESPONSE1,
fake_forwarding_rules.FAKE_API_RESPONSE2])
actual = self.pipeline._retrieve()
self.assertEquals(
fake_forwarding_rules.FAKE_PROJECT_FWD_RULES_MAP,
actual)
@mock.patch.object(MySQLdb, 'connect')
@mock.patch(
'google.cloud.security.common.data_access.project_dao.ProjectDao'
'.get_projects')
@mock.patch(
'google.cloud.security.inventory.pipelines.base_pipeline.LOGGER')
def test_retrieve_error_logged_when_api_error(
self, mock_logger, mock_get_projects, mock_conn):
"""Test that LOGGER.error() is called when there is an API error."""
mock_get_projects.return_value = self.projects
self.pipeline.api_client.get_forwarding_rules.side_effect = (
api_errors.ApiExecutionError(self.resource_name, mock.MagicMock()))
results = self.pipeline._retrieve()
self.assertEqual({}, results)
self.assertEqual(
len(self.project_ids),
mock_logger.error.call_count)
@mock.patch.object(MySQLdb, 'connect')
@mock.patch('google.cloud.security.common.data_access.project_dao.ProjectDao.get_projects')
def test_pipeline_no_rules_loads_nothing(
self, mock_get_projects, mock_conn):
"""Test the pipeline with no forwarding rules."""
mock_get_projects.return_value = self.projects
base_pipeline.LOGGER = mock.MagicMock()
self.pipeline.api_client.get_forwarding_rules = mock.MagicMock(
side_effect=[[], []])
self.pipeline.dao.select_record_count = mock.MagicMock(
side_effect=data_access_errors.MySQLError(
'forwarding_rules', mock.MagicMock()))
self.pipeline.run()
self.assertEquals(None, self.pipeline.count)
@mock.patch.object(
load_forwarding_rules_pipeline.LoadForwardingRulesPipeline,
'_get_loaded_count')
@mock.patch.object(
load_forwarding_rules_pipeline.LoadForwardingRulesPipeline,
'_load')
@mock.patch.object(
load_forwarding_rules_pipeline.LoadForwardingRulesPipeline,
'_transform')
@mock.patch.object(
load_forwarding_rules_pipeline.LoadForwardingRulesPipeline,
'_retrieve')
def test_subroutines_are_called_by_run(
self,
mock_retrieve,
mock_transform,
mock_load,
mock_get_loaded_count):
"""Test that the subroutines are called by run."""
mock_retrieve.return_value = \
fake_forwarding_rules.FAKE_PROJECT_FWD_RULES_MAP
mock_transform.return_value = (
fake_forwarding_rules.EXPECTED_LOADABLE_FWD_RULES)
self.pipeline.run()
mock_transform.assert_called_once_with(
fake_forwarding_rules.FAKE_PROJECT_FWD_RULES_MAP)
self.assertEquals(1, mock_load.call_count)
# The regular data is loaded.
called_args, called_kwargs = mock_load.call_args_list[0]
expected_args = (
self.pipeline.RESOURCE_NAME,
fake_forwarding_rules.EXPECTED_LOADABLE_FWD_RULES)
self.assertEquals(expected_args, called_args)
if __name__ == '__main__':
unittest.main()
|
"""
Your task in this exercise has two steps:
- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix
the unexpected street types to the appropriate ones in the expected list.
You have to add mappings only for the actual problems you find in this OSMFILE,
not a generalized solution, since that may and will depend on the particular area you are auditing.
- write the update_name function, to actually fix the street name.
The function takes a string with street name as an argument and should return the fixed name
We have provided a simple test so that you see what exactly is expected
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = "c:/Temp/tucson.osm"
dash = "-"
def is_postcode(elem):
return (elem.attrib['k'] == "addr:postcode")
def audit(osmfile):
osm_file = open(osmfile, "r")
postcode_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
i=1
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_postcode(tag):
#print(tag.attrib['v'])
postcode_types[i].add(tag.attrib['v'])
i=+1
return postcode_types
def postcode_format(postcode):
if dash in postcode:
zip,ext=re.split('-', postcode)
return zip
else:
return postcode
def test():
post_types = audit(OSMFILE)
pprint.pprint(dict(post_types))
for post_type, ways in post_types.iteritems():
for pcode in ways:
newPostcode=postcode_format(pcode)
print pcode,"=>",newPostcode
if __name__ == '__main__':
test()
|
import scrapy
class QuestionsSpider(scrapy.Spider):
name = "questions"
allowed_domains = ["stackoverflow.com"]
start_urls = ["https://stackoverflow.com/questions"]
def parse(self, response):
questions = response.xpath(
'//div[@class="summary"]/h3/a'
) # extracting the question title link for moving to another page where question text is asked
for q in questions:
link = q.xpath(".//@href").get()
absolute_url = response.urljoin(link)
yield response.follow(url=absolute_url, callback=self.parse_question)
next_page = response.xpath(
"//*[@rel='next']/@href"
).extract_first() # extracting next page link
absolute_next_page = response.urljoin(next_page)
if absolute_next_page is not None:
yield response.follow(url=absolute_next_page, callback=self.parse)
def parse_question(self, response):
text = response.xpath(
"//div[@class='s-prose js-post-body']/child::node()"
).extract() # Questiin text
title = response.xpath(
"//*[@id='question-header']/h1/a/text()"
).extract() # Question title
tags = response.xpath(
'//*[@id="question"]/div/div[2]/div[2]/div/div/a/text()'
).extract() # Question tags
yield {
"text": text,
"title": title,
"tags": tags,
"user-agent": response.request.headers.get("User-Agent").decode(
"utf-8"
), # look the middleware.py file for more
}
|
from django.shortcuts import render , get_object_or_404
from operator import itemgetter
import requests
import json
import adal
from django.http import JsonResponse
from django.core.paginator import Paginator
#Function to extract patient data based on id,all patients
pid=""
TOKEN=""
encounter_id=""
observation_id=""
url=""
Resource=""
def home(request):
# oauth 2.0 --FHIR SERVER AUTHORIZATION
# Opening JSON file
f = open('app/credentials.json')
data = json.load(f)
TENANT_ID = data.get('TENANT_ID')
CLIENT = data.get('CLIENT_ID')
KEY = data.get('CLIENT_SECRET')
authority_url = 'https://login.microsoftonline.com/' + TENANT_ID
global Resource
Resource=data.get('RESOURCE_ID')
context = adal.AuthenticationContext(authority_url)
global token
token = context.acquire_token_with_client_credentials(
resource=Resource,
client_id=CLIENT,
client_secret=KEY)
global TOKEN
TOKEN = token["accessToken"]
# Get Patient by id
if request.method == "POST":
lst = []
global pid
pid = request.POST.get('ID', '')
url = Resource+"/Patient/{}".format(pid)
newHeaders = {'Content-type': 'application/json',"Authorization": "Bearer %s" %TOKEN}
response = requests.get(url, headers=newHeaders,verify=False)
data = response.json()
if response.ok:
l = []
resourceType = data['resourceType']
l.append(resourceType)
id = data['id']
l.append(id)
try:
birth = data['birthDate']
except:
birth = "No data available"
l.append(birth)
try:
gender = data['gender']
except:
gender = "No data available"
l.append(gender)
try:
name = data['name'][0]['family']
except:
name = "No data available"
l.append(name)
try:
address = data['address'][0]['city']
except:
address = "No data available"
l.append(address)
lst.append(l)
param = {'param': lst,"id":pid}
return render(request, 'app/home.html', param)
#all patients
elif request.method == 'GET':
url = Resource+"/Patient?_count=20"
newHeaders = {'Content-type': 'application/json', "Authorization": "Bearer %s" %TOKEN}
response = requests.get(url, headers=newHeaders, verify=False)
data = response.json()
lst = []
# next_pge_url = response.json()['link'][0].get('url')
key_to_lookup = 'entry'
if key_to_lookup in data:
entry = data['entry']
for all_data in entry:
l = []
resourceType = all_data['resource']['resourceType']
l.append(resourceType)
id = all_data['resource']['id']
l.append(id)
try:
birth = all_data['resource']['birthDate']
except:
birth = "No data available"
l.append(birth)
try:
gender = all_data['resource']['gender']
except:
gender = "No data available"
l.append(gender)
try:
name = all_data['resource']['name'][0]['family']
except:
name = "No data available"
l.append(name)
try:
address = all_data['resource']['address'][0]['city']
except:
address = "No data available"
l.append(address)
lst.append(l)
param = {'param':lst}
return render(request, 'app/home.html', param)
else:
l = []
resourceType ="No data found"
l.append(resourceType)
id = "No data found"
l.append(id)
birth = "No data found"
l.append(birth)
gender = "No data found"
l.append(gender)
name = "No data found"
l.append(name)
address = "No data found"
l.append(address)
lst.append(l)
param = {'param':lst}
return render(request, 'app/home.html', param)
def observation(request):
lst = []
id = pid
if id is None:
obsparam = {'obsparam': "No patient id found"}
return render(request, 'app/home.html', obsparam)
else:
url = Resource+"/Observation?patient={}".format(id)
newHeaders = {'Content-type': 'application/json', "Authorization": "Bearer %s" %TOKEN}
response = requests.get(url, headers=newHeaders,verify=False)
if response.ok:
data = response.json()
if 'entry' not in data.keys():
print("NO Observation available")
else:
entry = data['entry']
for all_data in entry:
l = []
resourceType = all_data['resource']['resourceType']
l.append(resourceType)
id = all_data['resource']['id']
global observation_id
observation_id = id
l.append(id)
try:
reference = all_data['resource']['subject'].get('reference')
except:
reference = "No data available"
l.append(reference)
try:
display = all_data['resource']['code']['coding'][0].get('display')
except:
display = "No data available"
l.append(display)
try:
res = list(map(itemgetter('coding'), all_data['resource']['category']))
category = res[0][0]['display']
except:
category = "No data available"
l.append(category)
try:
v1 = all_data['resource']['valueQuantity'].get('value')
v2 = all_data['resource']['valueQuantity'].get('unit')
value = str(v1) + " " + str(v2)
except:
value = "No data available"
l.append(value)
lst.append(l)
obsparam = {'obsparam': lst,'observation_id':observation_id}
return render(request, 'app/home.html', obsparam)
def encounter(request):
lst = []
if pid is None:
obsparam = {'obsparam': "No patient id found"}
return render(request, 'app/home.html', obsparam)
else:
url = Resource+"/Encounter?patient={}".format(pid)
newHeaders = {'Content-type': 'application/json', "Authorization": "Bearer %s" % TOKEN}
response = requests.get(url, headers=newHeaders,verify=False)
if response.ok:
data = response.json()
if 'entry' not in data.keys():
print("NO Encounter available")
else:
entry = data['entry']
for all_data in entry:
l = []
try:
resourceType = all_data['resource']['resourceType']
except:
resourceType = "No data available"
l.append(resourceType)
try:
id = all_data['resource']['id']
global encounter_id
encounter_id=id
except:
id = "No data available"
l.append(id)
try:
priority = all_data['resource']['priority']['coding'][0]['display']
except:
priority = "No data available"
l.append(priority)
try:
reason = list(map(itemgetter('coding'), all_data['resource']['reasonCode']))
reasonCode = reason[0][0]['display']
except:
reasonCode = "No data available"
l.append(reasonCode)
try:
admitSource = all_data['resource']['hospitalization']['admitSource']['coding'][0]['display']
except:
admitSource = "No data available"
l.append(admitSource)
try:
serviceProvider = all_data['resource']['serviceProvider'].get('reference')
except:
serviceProvider = "No data available"
l.append(serviceProvider)
lst.append(l)
encounter_param = {'encounter_param': lst,'encounter_id':encounter_id}
return render(request, 'app/home.html', encounter_param)
def jsonviewPatient(request,id):
id = str(id)
url = Resource+"/Patient/{}".format(id)
newHeaders = {'Content-type': 'application/json', "Authorization": "Bearer %s" % TOKEN}
response = requests.get(url, headers=newHeaders,verify=False)
if response.ok:
json_data = response.json()
# json_formatted_patient= json.dumps(json_data, sort_keys = True, indent = 4)
# param = {'param':json_formatted_patient}
return JsonResponse(json_data,content_type='application/json')
def jsonviewObservation(request,id):
id = str(id)
url = Resource+"/Observation/{}".format(id)
newHeaders = {'Content-type': 'application/json', "Authorization": "Bearer %s" % TOKEN}
response = requests.get(url, headers=newHeaders,verify=False)
if response.ok:
json_data = response.json()
# json_formated_observation=json.dumps(json_data, sort_keys = True, indent = 4)
# param = {'param':json_formated_observation}
# return render(request,'app/jsondata.html',param)
return JsonResponse(json_data,content_type='application/json')
def jsonviewEncounter(request,id):
id = str(id)
url = Resource+"/Encounter/{}".format(id)
newHeaders = {'Content-type': 'application/json', "Authorization": "Bearer %s" % TOKEN}
response = requests.get(url, headers=newHeaders,verify=False)
if response.ok:
json_data = response.json()
# json_formated_encounter=json.dumps(json_data, sort_keys = True, indent = 4)
# param = {'param':json_formated_encounter}
# return render(request,'app/jsondata.html',param)
return JsonResponse(json_data,content_type='application/json')
def url(request):
if request.method == "POST":
json_data=''
url = request.POST.get('URL', '')
newHeaders = {'Content-type': 'application/json',"Authorization": "Bearer %s" %TOKEN}
response = requests.get(Resource+"/"+url, headers=newHeaders,verify=False)
if response.ok:
json_data = response.json()
return JsonResponse(json_data,content_type='application/json',safe=False)
def error_404_view(request,exception):
return render(request,'app/404.html')
|
"""
Edanur Demir
07 Nov 2019
"""
import argparse
import csv
def init():
parser = argparse.ArgumentParser(description='EENets experiments')
parser.add_argument('--filename', type=str, help='file to be analyzed')
return parser.parse_args()
def read(args):
data = []
keys = []
with open(args.filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for line, row in enumerate(csv_reader):
if line == 0:
keys = row
else:
data.append({col:row[id] for id, col in enumerate(keys)})
return data
def analyze(data):
# Some samples can be classified differently from resnet. How much are they better?
compare_resnet = {"True":0, "False":0}
increasing_conf = 0
early_confident = 0
late_confident = 0
for sample in data:
# Samples classified differently from the last exit
if sample["actual_pred"] != sample["start_pred_seq"]:
if sample["actual_pred"] == sample["target"]:
compare_resnet["True"] += 1
early_confident += 1
elif sample["start_pred_seq"] == sample["target"]:
compare_resnet["False"] += 1
increasing_conf += sample["start_conf_seq"] < sample["actual_conf"]
if sample["actual_exit"] > sample["start_exit_seq"]:
if sample["actual_pred"] == sample["target"]:
late_confident += 1
increasing_conf += sample["start_conf_seq"] > sample["actual_conf"]
print("Samples classified differently from the last exit")
print(compare_resnet)
print()
print("The rate of early true confident samples: {:.2f}".format(early_confident/10000.))
print("The rate of late true confident samples: {:.2f}".format(late_confident/10000.))
print()
print("The rate of samples whose confidence is increasing: {:.2f}".format(increasing_conf/10000.))
def main():
"""Main function of the program.
The function loads the dataset and calls training and validation functions.
"""
args = init()
data = read(args)
analyze(data)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
* http://blog.moertel.com/posts/2013-05-11-recursive-to-iterative.html
"""
ff = []
def factorial_0(n):
if n < 2:
return 1
return n * factorial_0(n - 1)
pass
ff.append(factorial_0)
def factorial_1(n, acc=1):
"""
Tail call, last line just does the recursive call
"""
if n < 2:
return 1 * acc
return factorial_1(n - 1, acc * n)
pass
ff.append(factorial_1)
def factorial_2(n, acc=1):
"""
Stuff in loop
"""
while True:
if n < 2:
return 1 * acc
return factorial_2(n - 1, acc * n)
break
ff.append(factorial_2)
def factorial_3(n, acc=1):
"""
Replace all recursive tail calls f(x=x1, y=y1, ...) with (x, y, ...) = (x1, y1, ...); continue
"""
while True:
if n < 2:
return 1 * acc
(n, acc) = (n - 1, acc * n)
continue
break
ff.append(factorial_3)
def factorial_5(n, acc=1):
while n > 1:
(n, acc) = (n - 1, acc * n)
return acc
ff.append(factorial_5)
if __name__ == '__main__':
for f in ff:
v = f(5)
print(f.__name__, v)
assert v == 120
|
from datetime import datetime
import openpyxl
from .models import Products
from collections import Counter
from django.db.models import Min, Max, Sum
def print_t(ob):
print(type(ob), ob)
def first_item_dict_queryset(ob):
for _ in ob:
for k, v in _.items():
return v
def first_item_dict(dc):
for k, v in dc.items():
return v
def logic(date):
n = 2
dc = {}
products = Products.objects.all()
for product in products:
if Products.objects.filter(title=product.title).count() >= n:
# print('++++++++++++', product.title)
if not product.title in dc:
date_min = Products.objects.filter(title=product.title).aggregate(Min('purchase_date'))
date_max = Products.objects.filter(title=product.title).aggregate(Max('purchase_date'))
date_min_val = first_item_dict(date_min)
date_max_val = first_item_dict(date_max)
sum_count = Products.objects.filter(title=product.title).aggregate(Sum('count'))
sum_count_val = first_item_dict(sum_count)
count_last = Products.objects.values('count').filter(title=product.title, purchase_date=date_max_val)
count_last_val = first_item_dict_queryset(count_last)
# (date_max_val - date_min_val).days
# sum_count_val/((date_max_val - date_min_val).days) - расход в день
if (sum_count_val/((date_max_val - date_min_val).days))*((date-date_max_val).days) > count_last_val:
dc[product.title] = product.title
return dc
def write_db(excel_data):
import sys
import os
project_dir = r'C:\Users\Alex\PycharmProjects\mobile\mobile\mobileapp'
sys.path.append(project_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django
django.setup()
from mobileapp.models import Products
for i in excel_data:
product = Products()
product.title = i[0]
product.count = i[1]
dt = datetime.strptime(i[2], '%Y-%m-%d %H:%M:%S').date()
product.purchase_date = dt
product.save()
def open_data(excel_file):
# you may put validations here to check extension or file size
wb = openpyxl.load_workbook(excel_file)
# getting all sheets
sheets = wb.sheetnames
# print(sheets)
# getting a particular sheet
worksheet = wb["Лист1"]
# print(worksheet)
# getting active sheet
active_sheet = wb.active
# print(active_sheet)
# reading a cell
# print(worksheet["A1"].value)
excel_data = list()
# iterating over the rows and
# getting value from each cell in row
for row in worksheet.iter_rows():
row_data = list()
for cell in row:
row_data.append(str(cell.value))
# print(cell.value)
excel_data.append(row_data)
# print(excel_data)
write_db(excel_data)
# return render(request, 'mobileapp/upload_page.html', {"excel_data": excel_data})
# Определяем минимум или максимум значений в словаре, и получаем на выходе словарь
def min_max_dc(dc, min_max, key_item, return_type):
if min_max == 'min':
val = min(dc.values())
elif min_max == 'max':
val = max(dc.values())
if key_item == 'item':
if return_type == 'dict':
return {k: v for k, v in dc.items() if v == val}
elif return_type == 'list':
return [k for k, v in dc.items() if v == val]
elif key_item == 'key':
dc = dict(dc)
arr = []
for k in dc.keys():
arr.append(k)
arr_return = []
dc_return = {}
if min_max == 'min' and return_type == 'list':
arr_return.append(min(arr))
return arr_return
elif min_max == 'max' and return_type == 'list':
arr_return.append(max(arr))
return arr_return
elif min_max == 'min' and return_type == 'dict':
dc_return = {min(arr): dc[min(arr)]}
return dc_return
elif min_max == 'max' and return_type == 'dict':
dc_return = {max(arr): dc[max(arr)]}
return dc_return
if min_max == 'min':
val = min(dc.values())
elif min_max == 'max':
val = max(dc.values())
if return_type == 'dict':
return {k: v for k, v in dc.iteritems() if v == val}
elif return_type == 'list':
return [k for k, v in dc.iteritems() if v == val]
def min_date():
if Products.objects.count() > 0:
ar_purchase_date = []
product = Products.objects.all()
for _ in product:
ar_purchase_date.append(_.purchase_date)
dc_purchase_date = Counter(ar_purchase_date)
_date = min_max_dc(dc_purchase_date, 'max', 'key', 'list')
return _date[0]
else:
return None
def prediction_main():
# n - количество повторений в запуке товара
n = 4
ar_title, ar_purchase_date = [], []
product = Products.objects.all()
for _ in product:
ar_title.append(_.title)
ar_purchase_date.append(_.purchase_date)
dc_title = Counter(ar_title)
dc_purchase_date = Counter(ar_purchase_date)
min_date = min_max_dc(dc_purchase_date, 'min', 'item', 'list')
max_date = min_max_dc(dc_purchase_date, 'max', 'item', 'list')
min_date, max_date = min_date[0], max_date[0]
delta_day = (max_date - min_date).days
print(delta_day)
# for key, item in dc_title.items():
# if item > n:
# print(key, item)
return None
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
list_group = html.Div(
[
dbc.ListGroup(
[
dbc.ListGroupItem(
"Internal link", href="/l/components/list_group"
),
dbc.ListGroupItem("External link", href="https://google.com"),
dbc.ListGroupItem(
"Disabled link", href="https://google.com", disabled=True
),
dbc.ListGroupItem(
"Button", id="button-item", n_clicks=0, action=True
),
]
),
html.P(id="counter"),
]
)
@app.callback(
Output("counter", "children"), [Input("button-item", "n_clicks")]
)
def count_clicks(n):
return f"Button clicked {n} times"
|
# tf.constant_initializer()
# tf.zeros_initializer()
# tf.random_uniform_initializer()
# tf.random_normal_initializer()
import tensorflow as tf
import matplotlib.pyplot as plt
import random
tf.set_random_seed(777)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
## 코딩하시오 x, y, w, b, hypothesis, cosst train
x = tf.placeholder("float", [None, 28*28])
y = tf.placeholder("float", [None, 10])
nb_classes = 10
w1 = tf.get_variable("W1", shape=[784, 256], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([256]), name='bias1')
layer1 = tf.nn.relu(tf.matmul(x, w1) + b1)
# layer1 = tf.nn.dropout(layer1, keep_prob = 0.5)
w2 = tf.get_variable("W2", shape=[256, 64], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([64]), name='bias2')
layer2 = tf.nn.relu(tf.matmul(layer1, w2) + b2)
# layer2 = tf.nn.dropout(layer2, keep_prob = 0.5)
w3= tf.get_variable("W3", shape=[64, nb_classes], initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([nb_classes]), name='bias3')
hypothesis = tf.nn.softmax(tf.matmul(layer2, w3) + b3)
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(hypothesis), axis = 1))
train = tf.train.AdamOptimizer(learning_rate=0.00027, beta1=0.9, beta2=0.999).minimize(cost)
#.0001 = 71
#.00005 = 51
#.0002 = 82
#.0003 = 0.098
#.00025 = 0.8434
#.00027 = 0.8485
is_correct = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
num_epochs = 15
batch_size = 100
num_iterations = int(mnist.train.num_examples / batch_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(num_epochs):
avg_cost = 0
for i in range(num_iterations):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, cost_val = sess.run([train, cost], feed_dict = {x: batch_xs, y:batch_ys})
avg_cost += cost_val / num_iterations
print("Epoch: {:04d}, Cost: {:.9f}".format(epoch + 1, avg_cost))
print("Learning finished")
print(
"Accuracy: ",
accuracy.eval(
session=sess, feed_dict={x:mnist.test.images, y:mnist.test.labels}
),
)
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r : r + 1], 1)))
print(
"Prediction: ",
sess.run(tf.argmax(hypothesis, 1), feed_dict={x: mnist.test.images[r : r + 1]}),
)
plt.imshow(
mnist.test.images[r : r + 1].reshape(28, 28),
cmap='Greys',
interpolation='nearest',
)
plt.show()
|
#!/usr/bin/env python
# ----------------------------------------------------------
# aircraft_data MODULE for GlassCockpit procject RJGlass
# ----------------------------------------------------------
# This module handels and stores all aircraft data, and communicated via Simconnect to FSX
#
# Copyright 2007 Michael LaBrie
#
# This file is part of RJGlass.
#
# RJGlass is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# RJGlass is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# test mod for CVS
# ---------------------------------------------------------------
import time
class airspeed_c(object):
def set_disp(self, Vspeed):
#This sets what is displayed below speed tape. (Goes blank after a few seconds)
self.Vspeed_disp = Vspeed
def cycle_Vspeed_input(self):
temp = self.Vspeed_input
if temp == self.V1:
out = self.VR
elif temp == self.VR:
out = self.V2
else:
out = self.V1
self.Vspeed_input = out
self.set_disp(out)
def inc_Vspeed_input(self):
self.Vspeed_input.inc()
self.set_disp(self.Vspeed_input)
def dec_Vspeed_input(self):
self.Vspeed_input.dec()
self.set_disp(self.Vspeed_input)
def visible_Vspeed_input(self):
self.Vspeed_input.onoff()
self.set_disp(self.Vspeed_input)
def inc_VT(self):
self.VT.inc()
self.set_disp(self.VT)
def dec_VT(self):
self.VT.dec()
self.set_disp(self.VT)
def visible_VT(self):
self.VT.onoff()
self.set_disp(self.VT)
def set_bug(self, value):
self.bug.set_value(value)
def __init__(self,variable):
self.IAS = variable.byName('IAS').data #self.IAS.value is value read from FSX
self.IAS_guage = 40.0
self.IAS_diff = 10.0 #Pink Line to show accel or decell
self.trend_visible = False #Speed trend turns on H> 20ft, turns off speed <105kts
self.IAS_prev = self.IAS.value
self.IAS_list = [0] * 40 # This is used to compute IAS accelertation for airspped tape
self.TAS = 0.0
self.Mach = variable.byName('Mach').data
self.Mach.active = False
self.GS = variable.byName('GS').data
#self.V1 = V_speed_c("V1 ", 135)
#self.V2 = V_speed_c("V2 ", 144)
#self.VR = V_speed_c("VR ", 137)
#self.VT = V_speed_c("VT ", 110)
#self.Vspeed_input = self.V1 #Currently selected one to be changed by knob
#self.Vspeed_disp = self.V1 #The one that is displayed below speed tape
#self.Vspeed_disp_timer = 0 #Used for delay of timer
self.bug = variable.byName('IAS_Bug').data
self.maxspeed = 260 #Never Exceed speed Red line
self.minspeed = 220 #Stall speed
self.lowspeed = 140
def test(self):
self.IAS_guage += 0.2
def comp(self):
#Comput the data for airspeed
#self.IAS.
if self.IAS.value <=40:
self.IAS_guage = 40
else:
self.IAS_guage = self.IAS.value
def comp_IAS_accel(self, airspeed, frame_rate):
#Computes forcastes IAS in 10 seconds for the IAS tape IAS_diff
#Find difference between new_IAS and last reading
diff = self.IAS.value - self.IAS_prev
self.IAS_prev = self.IAS.value
#Add diff reading to list pop oldest one
self.IAS_list.append(diff)
self.IAS_list.pop(0)
a= self.IAS_list
self.IAS_diff = (sum(a) / len(a)) / frame_rate * 10
class data(object):
def __init__(self, variable):
self.variable = variable
self.airspeed = airspeed_c(variable)
def comp(self):
#Client is true, if RJGlass is in client or test mode.
#global_time = globaltime.value
#Computer delta_t = Time between last comp and this one
self.airspeed.comp()
def comp_second(self):
pass
def test(self):
#time.sleep(0.01)
self.airspeed.IAS.value += 1
|
a, b, c = map(float, input().split(' '))
if abs(b - c) < a and a < b + c:
print('Perimetro = {:.1f}'.format(a + b + c))
else:
s = ((a + b) * c) / 2
print('Area = {:.1f}'.format(s)) |
#!/usr/bin/env python
from __future__ import print_function
import os
import argparse
import tqdm
import pyhepmc_ng
import ROOT
ROOT.gROOT.SetBatch(True)
from pyjetty.mputils import *
# jit improves execution time by 18% - tested with jetty pythia8 events
# BUT produces invalid root file
# from numba import jit
# @jit
def fill_event(run_number, ev_id, event_hepmc, tw_e, tw_p, pdg):
tw_e.fill_branch('run_number', run_number)
tw_e.fill_branch('ev_id', ev_id)
tw_e.fill_branch('z_vtx_reco', 0)
tw_e.fill_branch('is_ev_rej', 0)
tw_e.fill_tree()
for part in event_hepmc.particles:
if part.status == 1 and not part.end_vertex and pdg.GetParticle(part.pid).Charge() != 0:
# print(pdg.GetParticle(p.pid).GetName())
# tlv = ROOT.TLorentzVector()
# tlv.SetPxPyPzE(p.momentum.px, p.momentum.py, p.momentum.pz, p.momentum.e)
tw_p.fill_branch('run_number', run_number)
tw_p.fill_branch('ev_id', ev_id)
tw_p.fill_branch('ParticlePt', part.momentum.pt())
tw_p.fill_branch('ParticleEta', part.momentum.eta())
tw_p.fill_branch('ParticlePhi', part.momentum.phi())
tw_p.fill_branch('ParticlePID', part.pid)
tw_p.fill_tree()
def main():
parser = argparse.ArgumentParser(description='hepmc to ALICE Ntuple format', prog=os.path.basename(__file__))
parser.add_argument('-i', '--input', help='input file', default='', type=str, required=True)
parser.add_argument('-o', '--output', help='output root file', default='', type=str, required=True)
parser.add_argument('--as-data', help='write as data - tree naming convention', action='store_true', default=False)
parser.add_argument('--hepmc', help='what format 2 or 3', default=2, type=int)
parser.add_argument('--nev', help='number of events', default=-1, type=int)
args = parser.parse_args()
if args.hepmc == 3:
input_hepmc = pyhepmc_ng.ReaderAscii(args.input)
if args.hepmc == 2:
input_hepmc = pyhepmc_ng.ReaderAsciiHepMC2(args.input)
if input_hepmc.failed():
print ("[error] unable to read from {}".format(args.input))
sys.exit(1)
outf = ROOT.TFile(args.output, 'recreate')
outf.cd()
tdf = ROOT.TDirectoryFile('PWGHF_TreeCreator', 'PWGHF_TreeCreator')
tdf.cd()
if args.as_data:
t_p = ROOT.TTree('tree_Particle', 'tree_Particle')
else:
t_p = ROOT.TTree('tree_Particle_gen', 'tree_Particle_gen')
t_e = ROOT.TTree('tree_event_char', 'tree_event_char')
tw_p = RTreeWriter(tree=t_p)
tw_e = RTreeWriter(tree=t_e)
# run number will be a double - file size in MB
run_number = os.path.getsize(args.input) / 1.e6
ev_id = 0
# unfortunately pyhepmc_ng does not provide the table
# pdt = pyhepmc_ng.ParticleDataTable()
# use ROOT instead
pdg = ROOT.TDatabasePDG()
event_hepmc = pyhepmc_ng.GenEvent()
if args.nev > 0:
pbar = tqdm.tqdm(range(args.nev))
else:
pbar = tqdm.tqdm()
while not input_hepmc.failed():
ev = input_hepmc.read_event(event_hepmc)
if input_hepmc.failed():
break
fill_event(run_number, ev_id, event_hepmc, tw_e, tw_p, pdg)
ev_id = ev_id + 1
pbar.update()
if args.nev > 0 and ev_id > args.nev:
break
outf.Write()
outf.Close()
if __name__ == '__main__':
main() |
import json
from pathlib import Path
from utils.logger import get_logger
from utils.ipa_utils import get_ipa, IPAError
SOS = '<sos>'
EOS = '<eos>'
SOS_ID = 0
EOS_ID = 1
SUSPICIOUS_PHONES = ['??', 'p̪f', 'əl']
class IPAEncoder:
def __init__(self, data_dir, logger=None):
self.vocab_path = Path(data_dir) / 'vocab.json'
if logger is not None:
self.logger = logger
else:
self.logger = get_logger('root')
self.vocab = {SOS: SOS_ID, EOS: EOS_ID}
self.reverse_vocab = None
self.load_vocab()
def load_vocab(self):
if Path(self.vocab_path).exists():
self.logger.info(f'Reading vocabulary from {self.vocab_path}')
with open(self.vocab_path) as fid:
self.vocab = json.load(fid)
else:
self.logger.debug(f'No vocabulary found at path {self.vocab_path}')
def save_vocab(self):
self.logger.info(f'Saving vocabulary to {self.vocab_path}')
with open(self.vocab_path, 'w', encoding='utf8') as fid:
json.dump(self.vocab, fid, ensure_ascii=False)
def encode(self, s, lang='en', plain_text=False,
skip_lang_tags=False, **kwargs):
if plain_text:
ipa = list(s)
else:
ipa = get_ipa(s, lang, **kwargs)
if skip_lang_tags:
ipa = [SOS] + ipa + [EOS]
else:
ipa = [SOS, f'<{lang}>'] + ipa + [EOS]
res = list()
for phone in ipa:
if len(phone) > 0:
if phone in SUSPICIOUS_PHONES:
raise IPAError(f'Suspicious phoneme found {phone}')
if phone not in self.vocab:
self.vocab[phone] = len(self.vocab)
res.append(self.vocab[phone])
return res
def get_reverse_vocab(self):
if self.reverse_vocab is None and self.vocab is not None:
self.reverse_vocab = {v: k for k, v in self.vocab.items()}
return self.reverse_vocab
def decode(self, ids):
res = []
reverse_vocab = self.get_reverse_vocab()
for phone_id in ids:
res.append(reverse_vocab[phone_id])
if phone_id == EOS_ID:
break
return res
|
from .base import FunctionalTest
from selenium.webdriver.common.keys import Keys
class HomePageTest(FunctionalTest):
def test_home_page_displays_menu(self):
# Marfalo the vet tech is sick of calculating dosages by hand and is too lazy to use a spreadsheet,
# so he decides to use a web app.
# Marfalo finds VeTeCalc on the search engine of his choice and clicks on the link.
self.browser.get(self.server_url)
self.assertIn('VetCalc', self.browser.title)
# Marfalo sees that there is a page to calculate dosages, get information about commonly used drugs,
# and a page to create a treatment sheet
# These should not raise #
self.browser.find_element_by_link_text('Calculate Dosages')
self.browser.find_element_by_link_text('Rx Info')
self.browser.find_element_by_link_text('Create A Treatment Sheet')
def test_navbar_updates_active_class(self):
# Marfalo clicks on a link and sees that the navigation bar updates to show what page he is on
self.browser.get(self.server_url)
self.browser.find_element_by_link_text('Calc').send_keys(Keys.RETURN)
active = self.browser.find_element_by_xpath("//li[@class='active']/a[1]")
self.assertIn('Calc', active.text)
# Marfalo is done calculating dosages and returns to the home page
# When he does, the navbar updates to show that he is back on the home page
navbar_home = self.browser.find_element_by_link_text('Home')
navbar_home.send_keys(Keys.RETURN)
active = self.browser.find_element_by_xpath("//li[@class='active']/a[1]")
self.assertIn('Home', active.text)
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import FrozenInstanceError
import numpy as np
import pytest
from numpy.polynomial import Polynomial
from pulser import Pulse
from pulser.json.coders import PulserDecoder, PulserEncoder
from pulser.parametrized import ParamObj, Variable
from pulser.waveforms import BlackmanWaveform, CompositeWaveform
@pytest.fixture
def a():
return Variable("a", float)
@pytest.fixture
def b():
b = Variable("b", int, size=2)
b._assign([-1.5, 1.5])
return b
@pytest.fixture
def d():
d = Variable("d", float, size=1)
d._assign([0.5])
return d
@pytest.fixture
def t():
return Variable("t", int)
@pytest.fixture
def bwf(t, a):
return BlackmanWaveform(t[0], a[0])
def test_var(a, b):
with pytest.raises(TypeError, match="'name' has to be of type 'str'"):
Variable(1, dtype=int)
with pytest.raises(TypeError, match="Invalid data type"):
Variable("x", dtype=list, size=4)
with pytest.raises(TypeError, match="'size' is not of type 'int'"):
Variable("x", dtype=float, size=(2, 2))
with pytest.raises(ValueError, match="size 1 or larger"):
Variable("x", dtype=int, size=0)
x = Variable("x", dtype=float)
assert x.value is None
assert x._count == 0
with pytest.raises(FrozenInstanceError):
x.value = 0.5
assert a.variables == {"a": a}
assert b.size == 2
with pytest.raises(ValueError, match="to variable of size 2"):
b._assign([1, 4, 5])
assert np.all(b.build() == np.array([-1, 1]))
assert b._count == 1
with pytest.raises(ValueError, match="string to float"):
a._assign("something")
with pytest.raises(ValueError, match="No value"):
a.build()
var_ = Variable("var_", int, size=2)
var_._assign([1, 2])
assert np.all(var_.build() == np.array([1, 2]))
with pytest.raises(TypeError, match="Invalid key type"):
b[[0, 1]]
with pytest.raises(IndexError):
b[2]
def test_varitem(a, b, d):
a0 = a[0]
b1 = b[1]
b01 = b[100::-1]
d0 = d[0]
assert b01.variables == {"b": b}
assert str(a0) == "a[0]"
assert str(b1) == "b[1]"
assert str(b01) == "b[100::-1]"
assert str(d0) == "d[0]"
assert b1.build() == 1
assert np.all(b01.build() == np.array([1, -1]))
assert d0.build() == 0.5
with pytest.raises(FrozenInstanceError):
b1.key = 0
def test_paramobj(bwf, t, a, b):
assert set(bwf.variables.keys()) == {"t", "a"}
pulse = Pulse.ConstantDetuning(bwf, b[0], b[1])
assert set(pulse.variables.keys()) == {"t", "a", "b"}
assert str(bwf) == "BlackmanWaveform(t[0], a[0])"
assert str(pulse) == f"Pulse.ConstantDetuning({str(bwf)}, b[0], b[1])"
pulse2 = Pulse(bwf, bwf, 1)
assert str(pulse2) == f"Pulse({str(bwf)}, {str(bwf)}, 1)"
with pytest.raises(AttributeError):
bwf._duration
cwf = CompositeWaveform(bwf, bwf)
t._assign(1000)
a._assign(np.pi)
assert len(cwf.build().samples) == len(bwf.build().samples) * 2
assert bwf.build().duration == 1000
param_poly = ParamObj(Polynomial, b)
with pytest.warns(
UserWarning, match="Calls to methods of parametrized objects"
):
origin = param_poly(0)
b._assign((0, 1))
assert origin.build() == 0.0
def test_opsupport(a, b):
a._assign(-2.0)
u = 5 + a
u = b - u # u = [-4, -2]
u = u / 2
u = 8 * u # u = [-16, -8]
u = -u // 3 # u = [5, 2]
assert np.all(u.build() == [5.0, 2.0])
v = a**a
assert v.build() == 0.25
v = abs(-v * 8)
assert v.build() == 2.0
v = 3 % v
assert v.build() == 1.0
v = -v
assert v.build() == -1.0
x = a + 11
assert x.build() == 9
x = x % 6
assert x.build() == 3
x = 2 - x
assert x.build() == -1
x = 4 / x
assert x.build() == -4
x = 9 // x
assert x.build() == -3
x = 2**x
assert x.build() == 0.125
x = np.log2(x)
assert x.build() == -3.0
# Trigonometric functions
pi = -a * np.pi / 2
x = np.sin(pi)
np.testing.assert_almost_equal(x.build(), 0.0)
x = np.cos(pi)
np.testing.assert_almost_equal(x.build(), -1.0)
x = np.tan(pi / 4)
np.testing.assert_almost_equal(x.build(), 1.0)
# Other transcendentals
y = np.exp(b)
np.testing.assert_almost_equal(y.build(), [1 / np.e, np.e])
y = np.log(y)
np.testing.assert_almost_equal(y.build(), b.build())
y_ = y + 0.4 # y_ = [-0.6, 1.4]
y = np.round(y_, 1)
np.testing.assert_array_equal(y.build(), np.round(y_.build(), 1))
np.testing.assert_array_equal(round(y_).build(), np.round(y_).build())
np.testing.assert_array_equal(round(y_, 1).build(), y.build())
y = round(y)
np.testing.assert_array_equal(y.build(), [-1.0, 1.0])
y = np.floor(y + 0.1)
np.testing.assert_array_equal(y.build(), [-1.0, 1.0])
y = np.ceil(y + 0.1)
np.testing.assert_array_equal(y.build(), [0.0, 2.0])
y = np.sqrt((y - 1) ** 2)
np.testing.assert_array_equal(y.build(), [1.0, 1.0])
# Test serialization support for operations
def encode_decode(obj):
return json.loads(
json.dumps(obj, cls=PulserEncoder), cls=PulserDecoder
)
# Will raise a SerializationError if they fail
u2 = encode_decode(u)
assert set(u2.variables) == {"a", "b"}
u2.variables["a"]._assign(a.value)
u2.variables["b"]._assign(b.value)
np.testing.assert_array_equal(u2.build(), u.build())
v2 = encode_decode(v)
assert list(v2.variables) == ["a"]
v2.variables["a"]._assign(a.value)
assert v2.build() == v.build()
x2 = encode_decode(x)
assert list(x2.variables) == ["a"]
x2.variables["a"]._assign(a.value)
assert x2.build() == x.build()
y2 = encode_decode(y)
assert list(y2.variables) == ["b"]
y2.variables["b"]._assign(b.value)
np.testing.assert_array_equal(y2.build(), y.build())
|
from random import random
import numpy as np
import math
import matplotlib.pyplot as plt
trainingdata = 'hw3trainingdata.csv'
raw_data = open(trainingdata, 'rt', encoding="utf-8")
data = np.genfromtxt(raw_data, delimiter = ' ', dtype = 'float')
def initialize_weights(h):
a = 2
weights_input_hidden = [[0 for i in range(a)] for j in range(h)]
for i in range(h):
for j in range(a):
weights_input_hidden[i][j] = random()
return weights_input_hidden
def activation(f):
x = (1 / (1 + math.exp(-f)))
return x
def forward_pass_hidden(n, k):
p = len(n)
q = len(n[0])
weighted_sum_input = [[0 for j in range(p)] for i in range(k)]
sum_input = 0
for l in range(k):
for i in range(p):
for j in range(q):
if(j == 0):
sum_input = 1*n[i][j]
else:
sum_input = sum_input + data[l][0]*n[i][j]
weighted_sum_input[l][i] = activation(sum_input)
sum_input = 0
return weighted_sum_input
hidden_layer = 20
# input("Number of Hidden Neurons: ")
# hidden_layer = int(hidden_layer)
r = hidden_layer + 1
data_length = len(data)
weights_hidden_output = [0 for i in range(r)]
for i in range(r):
weights_hidden_output[i] = random()
print("Initial Weights_IJ: ", weights_hidden_output)
def forward_pass_output(n):
p = len(n)
r = hidden_layer + 1
weighted_sum_output = [0 for i in range(p)]
sum_output = 0
for l in range(p):
for j in range(r):
if(j == 0):
sum_output = weights_hidden_output[j]
else:
sum_output = sum_output + n[l][j-1]*weights_hidden_output[j]
weighted_sum_output[l] = activation(sum_output)
sum_output = 0
return weighted_sum_output
W_I = initialize_weights(hidden_layer)
print("Initial Weights_JK: ", W_I)
J = [0 for i in range(data_length)]
Total_Error = 4
Delta_I = [0 for i in range(data_length)]
Delta_J = [[0 for i in range(hidden_layer)] for j in range(data_length)]
Y = [0 for i in range(data_length)]
H = [[0 for i in range(hidden_layer)]for j in range(data_length)]
H = forward_pass_hidden(W_I, data_length)
print("H_J: ", H)
Y = forward_pass_output(H)
print("Y_Hat: ", Y)
for k in range(data_length):
Delta_I[k] = (data[k][1] - Y[k]) * (Y[k]) * (1 - Y[k])
print("Delta_I: ", Delta_I)
for k in range(data_length):
for i in range(hidden_layer):
Delta_J[k][i] = (Delta_I[k] * weights_hidden_output[i + 1]) * (H[k][i]) * (1 - H[k][i])
Delta_J[k][i] = (Delta_I[k] * weights_hidden_output[i + 1]) * (H[k][i]) * (1 - H[k][i])
print("Delta_J: ", Delta_J)
Loss = []
E = []
epoch = 0
threshold = 0.05
while(Total_Error > threshold):
Total_Error = 0
for k in range(data_length):
#Updating Weights
step = 0.1
for i in range(hidden_layer + 1):
if(i == 0):
weights_hidden_output[i] = weights_hidden_output[i] + (step)*(Delta_I[k])
else:
weights_hidden_output[i] = weights_hidden_output[i] + (step)*(Delta_I[k])*((H[k][i-1]))
for i in range(hidden_layer):
for j in range(2):
if(j == 0):
W_I[i][j] = W_I[i][j] + step*Delta_J[k][j]*1
elif(j != 0):
W_I[i][j] = W_I[i][j] + step*Delta_J[k][j]*data[k][0]
#Computing Total Error
J[k] = (Y[k] - data[k][1]) ** 2
Total_Error += J[k]
#Forward Pass for Input to Hidden
H = forward_pass_hidden(W_I, data_length)
# Forward Pass for Hidden to Output
Y = forward_pass_output(H)
#Delta_I Computation
Delta_I[k] = (data[k][1] - Y[k]) * (Y[k]) * (1 - Y[k])
#Delta_J Computation
for i in range(hidden_layer):
Delta_J[k][i] = (Delta_I[k] * weights_hidden_output[i + 1]) * (H[k][i]) * (1 - H[k][i])
Delta_J[k][i] = (Delta_I[k] * weights_hidden_output[i + 1]) * (H[k][i]) * (1 - H[k][i])
epoch += 1
E.append(epoch)
Loss.append(Total_Error)
print("Total Error: ", Total_Error)
print("Final Total Error: ", Total_Error)
print("Number of Epochs: ", epoch)
print("W_JK: ", W_I)
print("W_IJ: ", weights_hidden_output)
plt.plot(E, Loss)
plt.xlabel("Number of Epochs")
plt.ylabel("Loss")
plt.show()
def prediction(h, w1, w2):
testingdata = "hw3testingdata.csv"
raw_data = open(trainingdata, 'rt', encoding="utf-8")
testing_data = np.genfromtxt(raw_data, delimiter=' ', dtype='float')
test_data_length = len(testing_data)
test_J = [0 for i in range(test_data_length)]
test_Forward_input_hidden = [[0 for i in range(h)] for j in range(test_data_length)]
test_Forward_hidden_output = [0 for i in range(test_data_length)]
Testing_Error = 0
for k in range(test_data_length):
test_Forward_input_hidden = forward_pass_hidden(w1, test_data_length)
test_Forward_hidden_output = forward_pass_output(test_Forward_input_hidden)
test_J[k] = (Y[k] - testing_data[k][1]) ** 2
Testing_Error += test_J[k]
print("Testing_Error: ", Testing_Error)
# y_x = [0 for i in range(test_data_length)]
# for k in range(test_data_length):
# y_x[k] = testing_data[k][1]
# plt.plot(test_Forward_hidden_output)
# plt.plot(y_x)
# plt.show()
prediction(hidden_layer, W_I, weights_hidden_output)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-24 12:38
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0006_fill_object_name_and_user_for_events'),
]
operations = [
migrations.AddField(
model_name='event',
name='type',
field=models.PositiveSmallIntegerField(choices=[(1, 'create'), (2, 'update'), (3, 'delete')], default=1),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 16:46:24 2020
@author: MERT
"""
import nltk
import pickle
import numpy as np
import re
from nltk.corpus import stopwords
from sklearn.datasets import load_files
nltk.download("stopwords")
#Dataseti import
reviews = load_files("txt_sentoken/")
X,y =reviews.data,reviews.target
fakeX,fakey = X,y
##şimdi load_files 2000 text için hızlı olabilir ancak bizim textimii 1million datasetimiz felan olduugnda
##sıkıntıya giricek bunun için işlemlerin hızlı olması içinb pickle file ları mesela sımdı X ve y yi pickle file olarak yazıcam
#daha sonra x ve y yi silip yazdıgım pickle file den read etcem7
#e bu gereksiz ama pickle kullanımını ogrenmelıyım
#STORİNG as Pickle Files
with open("X.pickles","wb") as f: #x pickle, wb = write byte
pickle.dump(X,f)
with open("y.pickles","wb") as f:
pickle.dump(y,f)
## okey bunları yazarak pickle file a store ettim
#şimdi pickle file den okuma zamanı
# Unpickling
#with open("X.pickle","rb") as f:
# X = pickle.load(f)
#
#with open("y.pickle","rb") as f:
# y = pickle.load(f)
# Creating The Corpus
corpus = []
for i in range (0,len(X)):
review = re.sub(r"\W"," ",str(X[i]))
review = review.lower()
review = re.sub(r"\s+[a-z]\s+"," ",review)
review = re.sub(r"^[a-z]\s+"," ",review)
review = re.sub(r"\s+"," ",review)
corpus.append(review)
##62 62 62 62 62 62 62 62
##Count vectorizer le histogram oluşturcam
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer(max_features=2000,min_df=3 , max_df=0.6, stop_words=stopwords.words("english"))
X= cv.fit_transform(corpus).toarray()
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer()
X = transformer.fit_transform(X).toarray()
print(X.shape)
from sklearn.feature_extraction.text import TfidfVectorizer
cv=TfidfVectorizer(max_features=2000,min_df=3 , max_df=0.6, stop_words=stopwords.words("english"))
X= cv.fit_transform(corpus).toarray()
from sklearn.model_selection import train_test_split
X_train ,X_test , y_train , y_test = train_test_split(X,y,test_size = 0.3, random_state = 0)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_pred,y_test)
print("From Logistic Regression:")
print(cm)
from sklearn.metrics import accuracy_score, precision_score, recall_score
print("Accuracy score: ", accuracy_score(y_test, y_pred))
print("Precision score: ", precision_score(y_test, y_pred))
print("Recall score: ", recall_score(y_test, y_pred))
## Birde naive bayes için oluşturdum ama daha düşük yüzdeli başarı oranı var
from sklearn.naive_bayes import MultinomialNB
MNB = MultinomialNB()
MNB.fit(X_train , y_train)
y_pred2 =MNB.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_pred2 , y_test)
print("from naive bayes")
print(cm)
#
#from sklearn.metrics import accuracy_score, precision_score, recall_score
#print("Accuracy score: ", accuracy_score(y_test, y_pred2))
#print("Precision score: ", precision_score(y_test, y_pred2))
#print("Recall score: ", recall_score(y_test, y_pred2))
# Visualize etmek için bunu
#from sklearn.metrics import confusion_matrix
#import matplotlib.pyplot as plt
#import seaborn as sns
#sns.heatmap(cm, square=True, annot=True, cmap="RdBu", cbar=False,xticklabels=["mert" , "seven"], yticklabels=["esesdfsdf","sevdigim"])
#plt.xlabel("true label")
#plt.ylabel("predicted label")
#
## Bu aşşağıda yaptııgm şeyler kısaca sunun ıcın ben bi model train ediyorum classifierim var
##twitter da mesela adlıgım verileri bu classifierı import ederek train edicem
# Pickling The Classifier
with open ("classifier.pickle","wb") as f:
pickle.dump(classifier,f)
# Pickling the vectorizer --> vectorizee 0 1 0 101
with open ("tfidfmodel.pickle" , "wb") as f:
pickle.dump(cv , f)
# Unpickling the classifier and vectorizer
with open ("classifier.pickle" , "rb") as f:
clf = pickle.load(f)
with open ("tfidfmodel.pickle" , "rb") as f:
tfidf = pickle.load(f)
print("after the pickle")
sample = ["Hello i really hate you ,you are a bad person i really say it"]
print(sample)
sample = tfidf.transform(sample).toarray()
print("calculating")
a =clf.predict(sample)
if a> 0.5:
print("your Sentence is ",sample)
print("And i classified your sentence as POSİTİVE :) ")
else :
print("your Sentence is ",sample)
print("And i classified your sentence as negative :( ")
#
#import matplotlib.pyplot as plt
#import numpy
#
#objects = ["Positive", "Negative"]
#y_pos = np.arange(len(objects))
#
#plt.bar(y_pos , [total_pos ,total_neg],alpha = 0.5)
#plt.xticks(y_pos,objects)
#plt.ylabel("Number")
#plt.title("Number of Positive and Negative Tweets")
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.trabajo import Trabajo # noqa: F401,E501
from swagger_server import util
class InlineResponse200(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, trabajos: List[Trabajo]=None): # noqa: E501
"""InlineResponse200 - a model defined in Swagger
:param trabajos: The trabajos of this InlineResponse200. # noqa: E501
:type trabajos: List[Trabajo]
"""
self.swagger_types = {
'trabajos': List[Trabajo]
}
self.attribute_map = {
'trabajos': 'trabajos'
}
self._trabajos = trabajos
@classmethod
def from_dict(cls, dikt) -> 'InlineResponse200':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The inline_response_200 of this InlineResponse200. # noqa: E501
:rtype: InlineResponse200
"""
return util.deserialize_model(dikt, cls)
@property
def trabajos(self) -> List[Trabajo]:
"""Gets the trabajos of this InlineResponse200.
array de trabajos # noqa: E501
:return: The trabajos of this InlineResponse200.
:rtype: List[Trabajo]
"""
return self._trabajos
@trabajos.setter
def trabajos(self, trabajos: List[Trabajo]):
"""Sets the trabajos of this InlineResponse200.
array de trabajos # noqa: E501
:param trabajos: The trabajos of this InlineResponse200.
:type trabajos: List[Trabajo]
"""
self._trabajos = trabajos
|
# define the string true if panagram
import string
def ispanagram(str):
alphabet = "abcdefghijklmnopqrstuvwxyz"
for char in alphabet :
if char not in str.lower() :
return False
return True
string = input ( 'write your sentence: ' )
if(ispanagram(string)== True) :
print('yes')
else :
print ('no') |
#This essence of this algorithm is very simple, you define the value of k and for every data points, calculate the eucleadian distance
#of that point to all the other points and select the k nearest neighbors, classify the data point of interest to the group occupy the
# most proportation with the k nearest neighbors.
# Associate with each point, there is also a degree of confidance. Eg. in a binary classification problem:
# if for a specific data points, the 3 nearest neighbors are - - and +, then we can say this point is negative with 66% confidance
# the disadvantage is 1.this algorithm is very slow since you have to calculate the distance between everything
#2. there is no training and testing set since you are essentially not training anything
import numpy as np
from sklearn import preprocessing,neighbors
from sklearn.model_selection import train_test_split
import pandas as pd
df=pd.read_csv('breast-cancer-wisconsin.data')
# in the dataset all missing data is represented by a ?
# most algorithm can recognize -99999 means missing data
df.replace('?',-99999,inplace=True)
# since id doesn't have any effect on if a tumor is benign or malignent, we drop this column
df.drop(['id'],1,inplace=True)
X=np.array(df.drop(['class'],1))
y=np.array(df['class'])
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
# setting n_jobs=-1 will allow the algorithm to use as many threads as possible
# Then it can calculate the distances of the point of interest to many data points at the same time
clf = neighbors.KNeighborsClassifier(n_jobs=-1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test,y_test)
print(accuracy)
#here we are making up an example to pass to the k-nearest neighbor classifier
example_measures = np.array([4,2,1,1,1,2,3,2,1])
# since sklearn algorithms require a list of lists input
# the following line just change our input list example_measures from a list to a list of one list
# better look up what np.array.reshape actually does and how to use it
example_measures = example_measures.reshape(1,-1)
prediction = clf.predict(example_measures)
# the prediction returns a 2, which means it predits it as benign, since in the original dataset
# benign tumors are labels as 2 in the class col and malignent ones are labeled as 4
print(prediction) |
from datetime import datetime
from json import JSONEncoder
from flasgger import Swagger, LazyString, LazyJSONEncoder
from flask import Flask
from flask_cdn import CDN
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_principal import Principal
from os import path
import os
import stat
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from flask_seasurf import SeaSurf
from sqlalchemy import MetaData
from waitlist.utility import config
from waitlist.utility.babili import BabiliFilter
from flask_htmlmin import HTMLMIN
from flask_assets import Environment
from webassets.filter import register_filter
from flask_limiter.extension import Limiter
from flask_limiter.util import get_ipaddr
from flask.globals import request
from waitlist.utility.assets import register_asset_bundles
from flask_babel import Babel
from waitlist.utility.i18n.locale import get_locale, get_langcode_from_locale
from waitlist.utility.webassets.filter.cssoptimizer import CSSOptimizerFilter
app = Flask(import_name=__name__, static_url_path="/static",
static_folder="../static", template_folder=path.join("..", "templates"))
app.secret_key = config.secret_key
# set jinja2 options
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
# flask config
app.config['SESSION_TYPE'] = 'filesystem'
app.config['REMEMBER_COOKIE_HTTPONLY'] = True
app.config['REMEMBER_COOKIE_SECURE'] = config.secure_cookies
app.config['SESSION_COOKIE_SECURE'] = config.secure_cookies
app.config['UPLOAD_FOLDER'] = path.join(".", "sde")
# make sure the upload folder actually exists
# give owner read, write, list(execute)
os.makedirs(app.config['UPLOAD_FOLDER'], mode=(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR), exist_ok=True)
# sqlalchemy config
app.config['SQLALCHEMY_DATABASE_URI'] = config.connection_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_POOL_RECYCLE'] = config.sqlalchemy_pool_recycle
# flask cdn config
app.config['CDN_DOMAIN'] = config.cdn_domain
app.config['CDN_HTTPS'] = config.cdn_https
app.config['CDN_TIMESTAMP'] = False
# flask assets config
app.config['FLASK_ASSETS_USE_CDN'] = config.cdn_assets
app.config['ASSETS_DEBUG'] = config.assets_debug
# flask HTMLMIN config
app.config['MINIFY_PAGE'] = config.html_min
# language config
app.config['LANGUAGES'] = ['en', 'de']
app.config['BABEL_TRANSLATION_DIRECTORIES'] = '../translations'
babel = Babel(app)
@babel.localeselector
def babel_localeselection():
return get_langcode_from_locale(get_locale(app))
# naming conventions for sql
convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
# init SQLAlchemy
db = SQLAlchemy(app, metadata=metadata)
# init login manager
login_manager = LoginManager()
login_manager.init_app(app)
# init flask principal
principals = Principal(app)
# init sqlmigration manager
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
# init SeaSurf
seasurf = SeaSurf(app)
# init flask CDN
if config.cdn_assets:
CDN(app)
# init flask HTMLMIN
HTMLMIN(app)
# init assets environment
assets = Environment(app)
assets.auto_build = (config.debug_enabled or config.auto_build)
register_filter(BabiliFilter)
register_filter(CSSOptimizerFilter)
register_asset_bundles(assets)
if not assets.auto_build:
for bundle in assets:
bundle.build()
class MiniJSONEncoder(LazyJSONEncoder):
"""Minify JSON output."""
item_separator = ','
key_separator = ':'
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()+"Z"
# Let the base class default method raise the TypeError
return super(MiniJSONEncoder, self).default(obj)
app.json_encoder = MiniJSONEncoder
# init rate limiting
limiter = Limiter(key_func=get_ipaddr, storage_uri="memory://",
strategy="moving-window")
limiter.init_app(app)
app.config['SWAGGER'] = {
'swagger_version': '2.0',
'title': 'Swagger Waitlist API',
'headers': [],
'specs': [
{
'version': '0.0.1',
'title': 'Api v1',
'endpoint': 'spec/',
'description': 'Version 1 of the Swagger Waitlist API',
'route': '/spec/v1/swagger.json',
# rule_filter is optional
# it is a callable to filter the views to extract
'rule_filter': lambda rule: ('_v1' in rule.endpoint),
# definition_filter is optional
# it is a callable to filter the definition models to include
'definition_filter': lambda definition: (
'v1_model' in definition.tags)
}
],
'host': LazyString(lambda: request.host),
'basePath': '',
'uiversion': 3,
}
template = {
"schemes": [LazyString(lambda: request.scheme)]
}
swag = Swagger(app, template=template)
|
# -*- coding: utf-8 -*-
from collections import deque
class MyStack:
def __init__(self):
self.queue = deque()
def push(self, x):
self.queue.appendleft(x)
for _ in range(len(self.queue) - 1):
self.queue.appendleft(self.queue.pop())
def pop(self):
return self.queue.pop()
def top(self):
return self.queue[-1]
def empty(self):
return not self.queue
if __name__ == "__main__":
obj = MyStack()
obj.push(1)
obj.push(2)
assert 2 == obj.top()
assert 2 == obj.pop()
assert not obj.empty()
|
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
# /user/test_taro
@app.route('/user/<username>')
def show_user_profile(username):
return 'User %s' % username
# /post/123
@app.route('/post/<int:post_id>')
def show_post(post_id):
return 'Post %d' % post_id
# /path/aaa/bbb
@app.route('/path/<path:subpath>')
def show_subpath(subpath):
return 'Subpath %s' % subpath
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
return 'login'
else:
return 'show_login_page'
if __name__ == '__main__':
app.run()
|
x = float(input ("first number : "))
y = float(input ("second number : "))
print (x == y)
print(x != y)
print(x > y)
print(x < y)
print(x >= y)
print(x <= y) |
#! /usr/bin/env python
"""
Code to do theory folding in order to compare to the measured distributions
The class 'TheoryFolding' below inherits from the 'TheoryFolding' class in:
pyjetty/alice_analysis/analysis/user/substructure/run_fold_theory.py
reynier@lbl.gov
"""
import sys
import os
import argparse
from array import *
import numpy as np
import ROOT
ROOT.gSystem.Load("$HEPPY_DIR/external/roounfold/roounfold-current/lib/libRooUnfold.so")
import yaml
from pyjetty.alice_analysis.analysis.user.substructure import run_fold_theory
# Load pyjetty ROOT utils
ROOT.gSystem.Load('libpyjetty_rutil')
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
################################################################
################################################################
################################################################
class TheoryFolding(run_fold_theory.TheoryFolding):
def load_theory_curves(self):
self.theory_scale_vars = {}
# Loop over each jet R specified in the config file
for jetR in self.jetR_list:
scale_var = []
# Loop through subconfigurations to fold (e.g. in the jet-axis analysis there Standard_WTA, Standard_SD_1, ...)
for i, obs_setting in enumerate(self.obs_settings):
grooming_setting = self.grooming_settings[i] # grooming parameters
zcut = None; beta = None
if grooming_setting and type(obs_setting) == dict and 'zcut' in obs_setting.keys() \
and 'beta' in obs_setting.keys():
zcut = obs_setting["zcut"]
beta = obs_setting["beta"]
#label = ("zcut%s_B%s" % (str(zcut), str(beta))).replace('.','')
label = self.create_label(jetR, obs_setting, grooming_setting)
else: # Not SD grooming
continue
pt_bins = array('d', self.theory_pt_bins)
if self.theory_obs_bins:
obs_bins = array('d', self.theory_obs_bins) # bins which we want to have in the result
else:
obs_bins = array('d', getattr(self, 'binning_' + label))
# Add bin for underflow value (tagging fraction)
if grooming_setting and self.use_tagging_fraction:
obs_bins = np.insert(obs_bins, 0, -0.001)
obs_width = np.subtract(obs_bins[1:], obs_bins[:-1])
# -----------------------------------------------------
# Create histograms where theory curves will be stored
th_hists_no_scaling = [] # Basically a copy of the theory calculations, but binned
th_hists = [] # Histograms that will actually be used in the folding
hist_names = []
# -----------------------------------------------------
# opening theory file by file and fill histograms
th_sub_dir = "tg_bt%s_zc%s" % (str(beta), str(zcut).replace('.',''))
th_path = os.path.join(self.theory_dir, th_sub_dir)
print('reading from files in:', th_path)
# loop over pT bins
for p, pt in enumerate(pt_bins[:-1]):
pt_min = self.theory_pt_bins[p]
pt_max = self.theory_pt_bins[p+1]
# Get scale factor for this pT bin.
# This reverses the self-normalization of 1/sigma for correct
# pT scaling when doing projections onto the y-axis.
scale_f = self.pt_scale_factor_jetR(pt, pt_bins[p+1], jetR)
# load theory file, grab the data, and fill histograms with it
th_file = 'R_%s_pT_%i-%i.dat' % (str(jetR).replace('.',''), int(pt_min), int(pt_max))
th_file = os.path.join(th_path, th_file)
# ------------------------------------------------------------------------------------------------------------
# Load data from theory file
with open(th_file) as f:
lines = [line for line in f.read().split('\n') if line and line[0] != '#']
x_val = [float(line.split()[0]) for line in lines]
n_scale_variations = len(lines[0].split())-1 # number of scale variations
# loop over scale variations and fill histograms
for sv in range(0, n_scale_variations):
y_val_n = [float(line.split()[sv+1]) for line in lines]
# Interpolate the given values and return the value at the requested bin center
y_val_bin_ctr = self.interpolate_values_linear(x_val, y_val_n, obs_bins)
# Remove negative numbers
y_val_bin_ctr = [0 if val < 0 else val for val in y_val_bin_ctr]
if p == 0:
hist_name = 'h2_input_%s_obs_pT_%s_sv%i' % (self.observable, label, sv)
hist_name_no_scaling = hist_name + '_no_scaling'
th_hist = ROOT.TH2D(hist_name, ';p_{T}^{jet};%s' % (self.observable),
len(pt_bins)-1, pt_bins, len(obs_bins)-1, obs_bins)
th_hist_no_scaling = ROOT.TH2D(hist_name_no_scaling, ';p_{T}^{jet};%s' % (self.observable),
len(pt_bins)-1, pt_bins, len(obs_bins)-1, obs_bins)
th_hists.append(th_hist)
hist_names.append(hist_name)
th_hists_no_scaling.append(th_hist_no_scaling)
# Save content into histogram before any scaling has been applied
# (to compare to the theory curves and make sure everything went fine)
for ob in range(0, len(obs_bins)-1):
th_hists_no_scaling[sv].SetBinContent(p+1, ob+1, y_val_bin_ctr[ob])
# Multiply by bin width and scale with pT-dependent factor
y_val_bin_ctr = np.multiply(y_val_bin_ctr, obs_width)
integral_y_val_bin_ctr = sum(y_val_bin_ctr)
y_val_bin_ctr = [ val * scale_f / integral_y_val_bin_ctr for val in y_val_bin_ctr ]
# Save scaled content into the histograms
for ob in range(0, len(obs_bins)-1):
th_hists[sv].SetBinContent(p+1, ob+1, y_val_bin_ctr[ob])
f.close()
# ------------------------------------------------------------------------------------------------------------
new_obs_lab = ("zcut%s_B%s" % (str(zcut), str(beta))).replace('.','')
# ------------------------------------------------------------------------------------------------------------
for n_pt in range(0, len(self.final_pt_bins)-1):
histo_list = []
for sv in range(0, n_scale_variations):
projection_name = 'h1_input_%s_R%s_%s_sv%i_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),obs_setting,sv,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1]))
# Determine the bin number that corresponds to the pT edges given
min_bin, max_bin = self.bin_position( self.theory_pt_bins, self.final_pt_bins[n_pt], self.final_pt_bins[n_pt+1] )
h1_input_hist = th_hists[sv].ProjectionY(projection_name, min_bin, max_bin)
h1_input_hist.SetTitle(projection_name)
h1_input_hist.SetDirectory(0)
# Undo the bin width scaling and set correct normalization
norm_factor = h1_input_hist.Integral()
if norm_factor == 0: norm_factor = 1
h1_input_hist.Scale(1./norm_factor, "width")
for b in range(0, h1_input_hist.GetNbinsX()):
h1_input_hist.SetBinError(b+1, 0)
histo_list.append(h1_input_hist)
# Create envelope histograms
hist_min, hist_max = self.min_max( histo_list )
# Rename some objects
name_h_cent = 'h1_input_%s_R%s_%s_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1]))
name_h_min = 'h1_min_input_%s_R%s_%s_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1]))
name_h_max = 'h1_max_input_%s_R%s_%s_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1]))
h_central = histo_list[0]
h_central.SetName(name_h_cent)
hist_min .SetName(name_h_min )
hist_max .SetName(name_h_max )
# Create a graph out of these histograms
graph_cent = self.histo_to_graph(h_central,hist_min,hist_max)
graph_min = ROOT.TGraph(hist_min)
graph_max = ROOT.TGraph(hist_max)
graph_frac = self.fractional_error(h_central,hist_min,hist_max)
graph_cent.SetName('g_input_%s_R%s_%s_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1])))
graph_min .SetName('g_min_input_%s_R%s_%s_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1])))
graph_max .SetName('g_max_input_%s_R%s_%s_pT_%i_%i' % ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1])))
graph_frac.SetName('g_frac_input_%s_R%s_%s_pT_%i_%i'% ( self.observable,(str)(jetR).replace('.',''),new_obs_lab,(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1])))
xtit = self.obs_label
ytit = '#frac{1}{#sigma} #frac{d#sigma}{d'+xtit+'}'
tit = 'input (hadron-level, no MPI) %i < #it{p}_{T}^{jet} < %i GeV/#it{c}'%((int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1]))
self.pretty_1D_object(graph_cent,2,2,1,tit, xtit, ytit, True)
self.pretty_1D_object(graph_min ,1,1,2,tit, xtit, ytit)
self.pretty_1D_object(graph_max ,1,1,2,tit, xtit, ytit)
self.pretty_1D_object(graph_frac,2,2,1,tit, xtit, ytit, True)
outpdfname = os.path.join(self.output_dir, 'control_plots' , 'processed_plots' )
if not os.path.exists(outpdfname):
os.makedirs(outpdfname)
outpdfname_1 = os.path.join(outpdfname, 'theory_%s_pT_%i_%i_GeVc_input.pdf'%(self.create_label(jetR,obs_setting,grooming_setting),(int)(self.final_pt_bins[n_pt]),(int)(self.final_pt_bins[n_pt+1])) )
self.plot_processed_functions( graph_cent, graph_min, graph_max, outpdfname_1)
# loop over response files (e.g. Pythia, Herwig, ...)
for ri, response in enumerate(self.theory_response_files):
for lev in self.response_levels:
outpdfname_2 = os.path.join(outpdfname, 'comp_gen_input_theory_%s_pT_%i_%i_GeVc_' % \
(self.create_label(jetR, obs_setting, grooming_setting), int(self.final_pt_bins[n_pt]), int(self.final_pt_bins[n_pt+1])) )
outpdfname_2 += lev[0]+"_"+lev[1]+"_MPI"+lev[2]+"_"+self.theory_response_labels[ri]+".pdf"
self.plot_comparison_SCET_gen_input(graph_cent, jetR, obs_setting, grooming_setting, lev[0], lev[1], lev[2], \
self.theory_response_labels[ri], self.final_pt_bins[n_pt], self.final_pt_bins[n_pt+1], outpdfname_2)
self.outfile.cd()
h_central .Write()
hist_min .Write()
hist_max .Write()
graph_cent.Write()
graph_min .Write()
graph_max .Write()
graph_frac.Write()
# -----------------------------------------------------
# Setting the filled histograms as attributes
self.outfile.cd()
for sv in range(0,n_scale_variations):
setattr(self,hist_names[sv],th_hists[sv])
# Only save the 2D histograms for the central scale case
#th_hists_no_scaling[0].Write()
#th_hists[0].Write()
outpdfname = os.path.join(self.output_dir, 'control_plots', 'input')
if not os.path.exists(outpdfname):
os.makedirs(outpdfname)
outpdfname = os.path.join(outpdfname, 'theory_input_%s.pdf' % label)
self.plot_input_theory(th_hists_no_scaling, th_hists, outpdfname)
scale_var.append(n_scale_variations)
self.theory_scale_vars[jetR] = scale_var
#----------------------------------------------------------------------
# Plot input theory curves both as histograms and curves
#----------------------------------------------------------------------
def plot_input_theory(self, h_list_no_scaling, h_list, outpdfname):
for i in range(0,len(h_list_no_scaling)):
c1 = ROOT.TCanvas('c1','c1',1000,800)
c1.Divide(2,2)
for j in range(0,4):
c1.cd(j+1).SetLogz()
c1.cd(j+1).SetLeftMargin(0.20)
if j > 1:
c1.cd(j+1).SetTheta(50)
c1.cd(j+1).SetPhi(220)
else:
c1.cd(j+1).SetBottomMargin(0.20)
c1.cd(j+1).SetRightMargin(0.24)
self.pretty_TH2D(h_list_no_scaling[i],'input theory curves, scale variation %i'%(i),'#it{p}_{T}^{jet} [GeV/#it{c}]',self.obs_label,'#frac{1}{#sigma} #frac{d#sigma}{d'+self.obs_label+'}')
self.pretty_TH2D(h_list [i],'scaled input, scale variation %i'%(i) ,'#it{p}_{T}^{jet} [GeV/#it{c}]',self.obs_label,'~ #frac{d#sigma}{d'+self.obs_label+'}')
h_list_no_scaling[i].GetXaxis().SetTitleOffset(1.6)
h_list [i].GetXaxis().SetTitleOffset(1.6)
h_list_no_scaling[i].GetYaxis().SetTitleOffset(1.5)
h_list [i].GetYaxis().SetTitleOffset(1.5)
h_list_no_scaling[i].GetZaxis().SetTitleOffset(1.4)
h_list [i].GetZaxis().SetTitleOffset(1.4)
c1.cd(1)
h_list_no_scaling[i].Draw('COLZ')
c1.cd(2)
h_list[i].Draw('COLZ')
c1.cd(3)
h_list_no_scaling[i].Draw('LEGO1')
c1.cd(4)
h_list[i].Draw('LEGO1')
c1.Draw()
if len(h_list_no_scaling)==1:
c1.Print(outpdfname)
else:
if i == 0: c1.Print(outpdfname+'(')
elif i == len(h_list_no_scaling)-1: c1.Print(outpdfname+')')
else: c1.Print(outpdfname)
del c1
#----------------------------------------------------------------------
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Folding theory predictions')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='analysis_config.yaml',
help='Path of config file for analysis')
# Parse the arguments
args = parser.parse_args()
print('Configuring...')
print('configFile: \'{0}\''.format(args.configFile))
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = TheoryFolding(config_file = args.configFile)
analysis.run_theory_folding()
|
"""豆瓣FM
"""
from urllib.request import urlopen, HTTPError
from urllib import parse
from json import JSONDecoder
class Fm():
#认证地址
login_url = 'http://www.douban.com/j/app/login'
#频道地址
channel_list_url = 'http://www.douban.com/j/app/radio/channels'
#歌曲列表
music_list = 'http://www.douban.com/j/app/radio/people'
#登陆数据
data = {
'app_name': 'radio_desktop_win',
'version': 100,
'email': '',
'password': '',
}
def __init__(self, email, password):
self.data['email'] = email
self.data['password'] = password
self.data = parse.urlencode(self.data).encode()
self.oauth = ''
self.channel_list = ''
self.get_token()
self.get_channel_list()
def get_token(self):
try:
r = urlopen(self.login_url, self.data)
self.oauth = r.read().decode()
except HTTPError as err:
print(err)
def get_channel_list(self):
"""获取频道列表
"""
try:
r = urlopen(self.channel_list_url)
json = JSONDecoder()
self.channel_list = json.decode(r.read().decode())
except HTTPError as err:
print(err)
if __name__ == '__main__':
fm = Fm('imaguowei@gmail.com', '')
channels = fm.channel_list['channels']
for channel in channels:
print(channel) |
'''
Created on Sep 6, 2015
@author: hugosenari
'''
from os.path import dirname
from inspect import getfile
def app_plugins_path():
my_file = getfile(app_plugins_path)
my_dir = dirname(my_file)
return my_dir
|
from app import app
from flask import render_template
@app.route ('/')
@app.route ('/index')
def index():
user = {'name': 'Balor'}
posts = [
{
'author': 'Person 1',
'message': 'Iron Man is the best Marvel movie.'
},
{
'author': 'Person 2',
'message': 'Avengers is the best Marvel movie.'
}
]
return render_template ('index.html', posts = posts, user = user)
|
from pathlib import Path
from shutil import copyfile
from openpyxl import load_workbook
"""Importante la data se obtiene via SQL de RiesgoCol Tabla= CFC_SQT_RIESGOCO
COLUMNAS: riesgoco_noprestamo, riesgoco_fechasubida, riesgoco_producto,
riesgoco_mtoprestorg,riesgoco_saldoprest,riesgoco_fechadesem
riesgoco_fechacancela,riesgoco_estatusmora1,riesgoco_estatusmora2,
riesgoco_estatusmora3,riesgoco_portafolio1,riesgoco_portaname1
riesgoco_portafolio2,riesgoco_portaname2,riesgoco_tipocartera,
riesgoco_prestamotipo,riesgoco_clasificacion,riesgoco_restructurado
riesgoco_fechareestru ORDER BY 1,2 La misma se exporta via csv y se graba
luego como excel que es el archivo que lee este proceso adicional, si cambia
el nombre del archivo debe cambiar la variable sourcefilename el excel debe
tener una pestaña nombrada datatape """
# Make a copy of a original source data tape file because
# for some reason openpyxl some times corrupt file
if __name__ == "__main__":
path = Path("C:/Users/apalmad/Documents/")
sourcefilename = "datatapecolfitch102021.xlsx"
workfilename = "cop_" + sourcefilename
sourcefilename = path / sourcefilename
workfilename = path / workfilename
# sourcefilename = Path(sourcefilename)
# workfilename = Path(workfilename)
copyfile(sourcefilename, workfilename)
try:
wb = load_workbook(workfilename)
wb.create_sheet("Cosechas", 2)
wsdatatape = wb["datatape"]
wscosecha = wb["Cosechas"]
deemdefault = 0
desemcosecha = balancedefaulted = 0.00
prestamoant = prestamo = ""
maxrows = wsdatatape.max_row + 1
for row in range(2, maxrows):
prestamo = wsdatatape["A" + str(row)].value
if prestamo != prestamoant:
if prestamoant == "":
desemcosecha = wsdatatape["D" + str(row)].value
prestamoant = prestamo
aniodesem = wsdatatape["F" + str(row)].value
tipo = wsdatatape["C" + str(row)].value
else:
wscosecha.append(
[prestamoant, aniodesem, desemcosecha,
balancedefaulted,
tipo
]
)
desemcosecha = wsdatatape["D" + str(row)].value
aniodesem = wsdatatape["F" + str(row)].value
tipo = wsdatatape["C" + str(row)].value
prestamoant = prestamo
deemdefault = 0
balancedefaulted = 0.0
else:
delincuency = wsdatatape["H" + str(row)].value
if delincuency == "181-210 DAYS":
if deemdefault == 0:
balancedefaulted = wsdatatape["E" + str(row)].value
deemdefault = 1
finally:
wb.save(workfilename)
wb.close()
print(f'Proceso Concluido, Revise el archivo {workfilename} ')
|
from optparse import make_option
from django.core.management.base import BaseCommand
from cms.models import Page
def find_pages_without_publisher_draft():
return [p for p in Page.objects.all()
if not hasattr(p, 'publisher_draft')]
def find_duplicates_pages():
urls = [p.get_absolute_url() for p in Page.objects.all()]
return [p for p in Page.objects.all()
if urls.count(p.get_absolute_url()) == 2]
def print_pages(pages):
for p in pages:
print('{} ({})'.format
(p.get_absolute_url(),
'Draft' if p.publisher_is_draft else 'Public'))
def delete_pages(pages):
for p in pages:
print('Deleting page: {} ({})'.format
(p.get_absolute_url(),
'Draft' if p.publisher_is_draft else 'Public'))
p.delete()
class Command(BaseCommand):
help = "Make sure the CMS database is consistent."
option_list = BaseCommand.option_list + (
make_option(
'--delete', action='store_true', dest='delete',
default=True, help='Delete broken and inconsistent entries.'),
)
def handle(self, *args, **options):
pages = find_pages_without_publisher_draft()
print('Pages without .publisher_draft: {}'.format(len(pages)))
if pages:
if options['delete']:
print_pages(pages)
else:
delete_pages(pages)
pages = find_duplicates_pages()
print('Duplicate pages: {}'.format(len(pages)))
if pages:
print_pages(pages)
if options['delete']:
print('Please remove them manually.')
if not options['delete']:
print('If you want any inconsistencies fixed, please re-run this '
'command with the --delete option.')
|
import base64
import io
# https://palletsprojects.com/p/flask/
from flask import Flask
from flask import render_template
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
app = Flask(__name__)
app.debug = False
@app.route("/")
def plot_flowers():
flowers = pd.read_csv('http://danielykim.me/data/iris.csv')
colormap = {
'setosa' : '#4C72B0',
'versicolor' : '#DD8452',
'virginica' : '#55A868'
}
colors = [ colormap[x] for x in flowers['species'] ]
x_label, y_label = 'petal_length', 'petal_width'
plt.scatter(
flowers[x_label], flowers[y_label],
c = colors, alpha = 0.33)
plt.xlabel(x_label)
plt.ylabel(y_label)
b_io = io.BytesIO()
plt.savefig(b_io)
img_buf = b_io.getbuffer()
img_b64 = base64.b64encode(img_buf)
base64_image = img_b64.decode('utf-8')
"""
See also:
http://flask.pocoo.org/docs/1.0/quickstart/#rendering-templates
"""
return render_template('temp.html', base64_image=base64_image)
if __name__ == "__main__":
# Based on https://gist.github.com/ianschenck/977379a91154fe264897
reactor_args = {}
def run_twisted_wsgi():
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
resource = WSGIResource(reactor, reactor.getThreadPool(), app)
site = Site(resource)
reactor.listenTCP(5000, site)
reactor.run(**reactor_args)
if app.debug:
# Disable twisted signal handlers in development only.
reactor_args['installSignalHandlers'] = 0
# Turn on auto reload.
from werkzeug.serving import run_with_reloader
run_twisted_wsgi = run_with_reloader(run_twisted_wsgi)
run_twisted_wsgi()
|
from sklearn.datasets import *
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target
print(df.keys())
ss = StandardScaler(copy=True, with_mean=True, with_std=True)
ss.fit(df)
print((ss.mean_[:-1]))
scaled_df = pd.DataFrame(ss.transform(df), columns=data.feature_names + ['target'])
print(df.head())
print(scaled_df.head())
# scatter, hist, bar, barh, hexbin, pie, kde
scaled_df.plot(x='petal width (cm)', kind='kde')
scaled_df.plot(x='petal width (cm)', y='petal length (cm)', kind='scatter')
plt.show()
|
from django.conf import settings
from django_hosts import patterns, host
host_patterns = patterns('',
host(r'www', settings.ROOT_URLCONF, name='www'),
#host(r'blog', 'abcd.hostsconf.urls', name='blog'),
host(r'(?!www).*', 'fes.hostsconf.urls', name='wildcard'),
)
# from abcd.hostsconf import urls as redirect_urls
# host_patterns=[
# host(r'www', settings.ROOT_URLCONF, name='www'),
# #host(r'blog', 'abcd.hostsconf.urls', name='blog'),
# host(r'(?!www).*', redirect_urls, name='wildcard'),
# ] |
import pandas as pd
import numpy as np
import os
import json
import re
import sys
def make_weekly_products(argv):
root_dir = argv
province_id_list = list(range(1,21))
for id in province_id_list :
try :
f = open(root_dir + '/product' + str(id) + '_fr.json', 'r+')
newfile = f.readline()
f.close()
pattern = re.compile('{"version":"[0-9]+"},')
match = re.search(pattern, newfile)
if match :
newfile = newfile.replace(str(match.group()), '')
f = open(root_dir + '/weekly_products_' + str(id) + '.json', 'w')
f.write(newfile)
f.close()
except :
print('No file for province id ' + str(id) + '. ')
if __name__ == "__main__":
make_weekly_products(sys.argv[1]) |
# -*- coding:utf-8 -*-
# author: will
import redis
from config.db_config import DBConfig
class RedisClient(object):
__redis_cli = redis.StrictRedis(host=DBConfig.get_redis_host(), port=DBConfig.get_redis_port(), db=9)
@classmethod
def create_redis_cli(cls):
return cls.__redis_cli
|
'''给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的
那两个整数,并返回他们的数组下标。
你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。
'''
def twoSum( nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
n=len(nums)
for i in range(n-1):
for j in range(i+1,n):
if target-nums[i]==nums[j]:
return [i,j]
return None
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 17 12:57:50 2017
@author: harryholt
Plot.py
Purpose:
- Almost stand alone module which plots the results to the rest of the program
- Loads the data form the stored files
"""
import numpy as np
import time
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import pickle
from scipy.optimize import curve_fit
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import Print
start_time = time.clock()
def plotMapCircular(address, address_fronts, run, n_comp, plotFronts=True):
print("Plot.plotMapCircular")
# Load lat, lon and labels
lon, lat, varTime, labels = None, None, None, None
lon, lat, varTime, labels = Print.readLabels(address, run)
# Plot the data in map form - individual
colorname = 'RdYlBu'
colormap = plt.get_cmap(colorname,n_comp)
# proj = ccrs.Orthographic(central_longitude=0.0, central_latitude=-90.0, globe=None)
proj = ccrs.SouthPolarStereo()
proj_trans = ccrs.PlateCarree()
ax1 = plt.axes(projection=proj)
CS = ax1.scatter(lon, lat, s = 0.5, lw = 0, c = labels, cmap=colormap, \
vmin = -0.5, vmax = n_comp-0.5, transform = proj_trans)
if plotFronts:
SAF, SACCF, SBDY, PF = None, None, None, None
SAF, SACCF, SBDY, PF = loadFronts(address_fronts) # Format is Lon col = 0 and Lat col = 1
ax1.plot(SAF[:,0], SAF[:,1], lw = 1, ls='-', label='SAF', color='black', transform=proj_trans)
ax1.plot(SACCF[:,0], SACCF[:,1], lw = 1,ls='-', label='SACCF', color='green', transform=proj_trans)
ax1.plot(SBDY[:,0], SBDY[:,1], lw = 1,ls='-', label='SBDY', color='blue', transform=proj_trans)
ax1.plot(PF[:,0], PF[:,1], lw = 1,ls='-', label='PF', color='grey', transform=proj_trans)
#ax1.legend(loc='upper left')
ax1.legend(bbox_to_anchor=( 1.25,1.2), ncol=4, columnspacing = 0.8)
# Compute a circle in axes coordinates, which we can use as a boundary for the map.
theta = np.linspace(0, 2*np.pi, 100)
center = [0.5, 0.5]
radius = 0.46 # 0.46 corresponds to roughly 30S Latitude
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
ax1.set_boundary(circle, transform=ax1.transAxes)
# Add features
ax1.gridlines()
# ax1.add_feature(cfeature.LAND)
ax1.coastlines()
colorbar = plt.colorbar(CS)
colorbar.set_label('Class', rotation=270, labelpad=10)
plt.savefig(address+"Plots/Labels_Map_n"+str(n_comp)+".pdf",bbox_inches="tight",transparent=True)
plt.show()
def loadFronts(address_fronts):
SAF, SACCF, SBDY, PF = None, None, None, None
SAF = np.loadtxt(address_fronts+'saf.txt')
SACCF = np.loadtxt(address_fronts+'saccf.txt')
SBDY = np.loadtxt(address_fronts+'sbdy.txt')
PF = np.loadtxt(address_fronts+'pf.txt')
return SAF, SACCF, SBDY, PF
###############################################################################
###############################################################################
def plotPosterior(address, address_fronts, run, n_comp, plotFronts=True):
print("Plot.plotPosterior")
# Load lat, lon and labels
lon, lat, varTime, labels = None, None, None, None
lon, lat, varTime, labels = Print.readLabels(address, run)
# Load the posterior probabilities for each class
class_number_array = None
class_number_array = np.arange(0,n_comp).reshape(-1,1)
lon_pp, lat_pp, varTime_pp, post_prob = Print.readPosteriorProb(address, run, class_number_array)
for k in class_number_array:
lon_k, lat_k, post_k, indices_k = None, None, None, None
indices_k = (np.where(labels == (1.0*k)))
lon_k, lat_k = lon[indices_k], lat[indices_k]
post_k = post_prob[:,k][indices_k]
# Idea is to take one class, n, and select all indices_k that are actaully assigned to that class.
likelihood = np.zeros(len(post_k))
for i in range(len(post_k)):
if post_k[i] >= 0.99:
likelihood[i] = 0.99
elif post_k[i] >= 0.9 and post_k[i] < 0.99 :
likelihood[i] = 0.9
elif post_k[i] >= 0.66 and post_k[i] < 0.9:
likelihood[i] = 0.66
elif post_k[i] >= 1/(n_comp) and post_k[i] < 0.66:
likelihood[i] = 1/(n_comp)
else:
print("WARNING : Posterior Value less than 1/k")
# Plot the posterior probabilites
ax = plt.subplot(111, polar=True)
colorname = 'RdYlBu'
colormap = plt.get_cmap(colorname, 4)
theta = np.pi*(lon_k)/180.0
rho = 90 - abs(lat_k)
CS = ax.scatter(theta,rho, 0.5, lw = 0, c = likelihood, cmap=colormap, vmin = 0, vmax = 1)
if plotFronts:
SAF, SACCF, SBDY, PF = None, None, None, None
SAF, SACCF, SBDY, PF = loadFronts(address_fronts) # Format is Lon col = 0 and Lat col = 1
theta_saf = np.pi*(SAF[:,0])/180.0
rho_saf = 90 - abs(SAF[:,1])
ax.plot(theta_saf, rho_saf, lw = 1, ls='-', label='SAF', color='black')
theta_saccf = np.pi*(SACCF[:,0])/180.0
rho_saccf = 90 - abs(SACCF[:,1])
ax.plot(theta_saccf, rho_saccf, lw = 1,ls='-', label='SACCF', color='green')
theta_sbdy = np.pi*(SBDY[:,0])/180.0
rho_sbdy = 90 - abs(SBDY[:,1])
ax.plot(theta_sbdy, rho_sbdy, lw = 1,ls='-', label='SBDY', color='blue')
theta_pf = np.pi*(PF[:,0])/180.0
rho_pf = 90 - abs(PF[:,1])
ax.plot(theta_pf, rho_pf, lw = 1,ls='-', label='PF', color='grey')
#ax1.legend(loc='upper left')
ax.legend(bbox_to_anchor=( 1.25,1.25), ncol=4, columnspacing = 0.8)
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
ax.set_ylim(0,60)
#ax.set_title("Class "+str(k[0]), horizontalalignment='left')
plt.text(0, 1, "Class "+str(k[0]), transform = ax.transAxes)
ax.set_yticklabels([])
colorbar = plt.colorbar(CS)
colorbar.set_label('Probability of belonging to Class', rotation=270, labelpad=10)
plt.savefig(address+"Plots/PostProb_Class"+str(k[0])+"_n"+str(n_comp)+".pdf",bbox_inches="tight",transparent=True)
plt.show()
###############################################################################
###############################################################################
def plotProfileClass(address, run, n_comp, space):
# space will be 'depth', 'reduced' or 'uncentred'
print("Plot.plotProfileClass "+str(space))
# Load depth
depth = None
depth = Print.readDepth(address, run)
# Load reduced depth
col_reduced = None
col_reduced = Print.readColreduced(address, run)
col_reduced_array = np.arange(col_reduced)
#
depth_array = None
depth_array = depth
if space == 'reduced':
depth_array = col_reduced_array
# Load class properties
gmm_weights, gmm_means, gmm_covariances = None, None, None
gmm_weights, gmm_means, gmm_covariances = Print.readGMMclasses(address,\
run, depth_array, space)
fig, ax1 = plt.subplots()
for d in range(n_comp):
ax1.plot(gmm_means[d,:], depth_array, lw = 1, label = "Class "+str(d))
if space == 'depth':
ax1.set_xlabel("Normalized Temperature Anomaly /degree")
ax1.set_ylabel("Depth")
ax1.set_xlim(-3,3)
elif space == 'uncentred':
ax1.set_xlabel("Temperature /degrees")
ax1.set_ylabel("Depth")
elif space == 'reduced':
ax1.set_xlabel("Normalized Anomaly")
ax1.set_ylabel("Reduced Depth")
ax1.invert_yaxis()
ax1.grid(True)
ax1.legend(loc='best')
#ax1.set_title("Class Profiles with Depth in SO - "+space)
filename = address+"Plots/Class_Profiles_"+space+"_n"+str(n_comp)+".pdf"
if run != None:
filename = address+"Plots/Class_Profiles_"+space+"_run"+str(int(run))+"_n"+str(n_comp)+".pdf"
plt.savefig(filename,bbox_inches="tight",transparent=True)
plt.show()
###############################################################################
###############################################################################
def plotProfile(address, run, space): # Uses traing profiles at the moment
# space will be 'depth', 'original' or 'uncentred'
print("Plot.plotProfileClass "+str(space))
# Load depth
depth = None
depth = Print.readDepth(address, run)
#
depth_array = None
depth_array = depth
X_profiles = None
if space == 'uncentred' or space == 'depth':
# Load profiles
lon_train, lat_train, X_train, X_train_centred, varTime_train = None, None, None, None, None
lon_train, lat_train, X_train, X_train_centred, varTime_train = \
Print.readReconstruction(address, run, depth, True)
"""
lon_train, lat_train, Tint_train_array, X_train_array, \
Sint_train_array, varTime_train = None, None, None, None, None, None
lon_train,lat_train, Tint_train_array, X_train_array, \
Sint_train_array, varTime_train = Print.readLoadFromFile_Train(address, run, depth)
X_train_centred = X_train_array
"""
if space == 'uncentred':
X_profiles = X_train
if space == 'depth':
X_profiles = X_train_centred
elif space == 'original':
lon_train, lat_train, Tint_train_array, X_train_array, \
Sint_train_array, varTime_train = None, None, None, None, None, None
lon_train, lat_train, Tint_train_array, X_train_array, \
Sint_train_array, varTime_train = Print.readLoadFromFile_Train(address, run, depth)
X_profiles = Tint_train_array
fig, ax1 = plt.subplots()
for d in range(np.ma.size(X_profiles, axis=0)):
ax1.plot(X_profiles[d,:], depth_array, lw = 1, alpha = 0.01, color = 'grey')
if space == 'depth':
ax1.set_xlabel("Normalized Temperature Anomaly /degree")
ax1.set_ylabel("Depth")
elif space == 'uncentred':
ax1.set_xlabel("Temperature /degrees")
ax1.set_ylabel("Depth")
ax1.invert_yaxis()
ax1.grid(True)
ax1.legend(loc='best')
#ax1.set_title("Profiles with Depth in SO - "+space)
ax1.set_xlabel("Temperature /degrees")
ax1.set_ylabel("Depth /dbar")
filename = address+"Plots/Profiles_"+space+".pdf"
if run != None:
filename = address+"Plots/Profiles_"+space+"_run"+str(int(run))+".pdf"
plt.savefig(filename,bbox_inches="tight",transparent=True)
plt.show()
###############################################################################
###############################################################################
def plotGaussiansIndividual(address, run, n_comp, space, Nbins=1000):
# space will be 'depth', 'reduced' or 'uncentred'
print("Plot.plotGaussiansIndividual "+str(space))
if space == 'depth' or space == 'uncentred':
# Load depth
depth = None
depth = Print.readDepth(address, run)
depth_array = depth
print("depth.shape = ", depth.shape)
depth_array_mod = np.array([0,50,100,150,-1])
print("depth_array_mod.shape = ", depth_array_mod.shape)
# Load X_train array and X_train_centred array
lon_train, lat_train, X_train, X_train_centred, varTime_train = None, None, None, None, None
lon_train, lat_train, X_train, X_train_centred, varTime_train = \
Print.readReconstruction(address, run, depth, True)
"""
lon_train, lat_train, Tint_train_array, X_train_array, \
Sint_train_array, varTime_train = None, None, None, None, None, None
lon_train,lat_train, Tint_train_array, X_train_array, \
Sint_train_array, varTime_train = Print.readLoadFromFile_Train(address, run, depth)
X_train_centred = X_train_array
"""
print("VALUE = ", X_train_centred[10,0])
if space == 'reduced':
# Load reduced depth
col_reduced = None
col_reduced = Print.readColreduced(address, run)
depth_array = np.arange(col_reduced)
depth_array_mod = depth_array
lon_train, lat_train, X_train_centred, varTime_train = None, None, None, None
lon_train, lat_train, X_train_centred, varTime_train = \
Print.readPCAFromFile_Train(address, run, col_reduced)
print("VALUE = ", X_train_centred[10,0])
# Load class properties
gmm_weights, gmm_means, gmm_covariances = None, None, None
gmm_weights, gmm_means, gmm_covariances = Print.readGMMclasses(address,\
run, depth_array, space)
if space == 'uncentred':
stand = None
with open(address+"Objects/Scale_object.pkl", 'rb') as input:
stand = pickle.load(input)
gmm_means = stand.inverse_transform(gmm_means)
gmm_covariances = stand.inverse_transform(gmm_covariances)
print("Shapes: ", gmm_weights.shape, gmm_means.shape, gmm_covariances.shape)
print("depth_array_mod.shape = ", depth_array_mod.shape)
# Define the gaussian function
def gaussianFunc(x, mu, cov):
return (np.exp(-np.power(x - mu, 2.) / (2 * cov)))/(np.sqrt(cov*np.pi*2))
for i in range(len(depth_array_mod)):
print("About to plot")
X_row = None
X_row = X_train_centred[:,int(depth_array_mod[i])]
if space == 'uncentred':
X_row = None
X_row = X_train[:,int(depth_array_mod[i])]
means_row, cov_row = None, None
means_row = gmm_means[:,int(depth_array_mod[i])]
cov_row = abs(gmm_covariances[:,int(depth_array_mod[i])])
print("Covariance = ", cov_row)
xmax, xmin = None, None
xmax = np.max(X_row)*1.1
xmin = np.min(X_row)*1.1
print("Xmin = ", xmin, "Xmax = ", xmax)
fig, ax1 = plt.subplots()
x_values = None
x_values = np.linspace(xmin, xmax, 120)
print(x_values.shape, min(x_values), max(x_values))
y_total = np.zeros(n_comp*120).reshape(n_comp,120)
for n in range(n_comp):
y_gaussian = None
# y_gaussian = gmm_weights[n]*gaussianFunc(x_values, gmm_means[n,int(depth_array_mod[i])] , gmm_covariances[n,int(depth_array_mod[i])]) # Use if diag
y_gaussian = gmm_weights[n]*gaussianFunc(x_values, means_row[n] , cov_row[n]) # Use if diag
y_total[n,:] = y_gaussian
ax1.plot(x_values, y_gaussian, label=str(n))
ax1.plot(x_values, np.sum(y_total,axis=0), lw = 2, color = 'black', label="Overall") # Use if diag
ax1.hist(X_row, bins=Nbins, normed=True, facecolor='grey', lw = 0)
ax1.set_ylabel("Probability density")
ax1.set_xlabel("Normalized Temperature Anomaly")
if space == 'reduced':
ax1.set_xlabel("Normalized Anomaly")
if space == 'uncentred':
ax1.set_xlabel("Temperature /degrees")
ax1.set_title("GMM n = "+str(n_comp)+", "+space+" = "+str(int(depth_array[depth_array_mod[i]])))
ax1.grid(True)
ax1.set_xlim(xmin,xmax)
ax1.legend(loc='best')
plt.savefig(address+"Plots/TrainHisto_Gaussians_n"+str(n_comp)+"_"+space+str(int((depth_array[depth_array_mod[i]])))+".pdf",bbox_inches="tight",transparent=True)
plt.show()
###############################################################################
def plotBIC(address, repeat_bic, max_groups, trend=True):
# Load the data and define variables first
bic_many, bic_mean, bic_stdev, n_mean, n_stdev, n_min = None, None, None, None, None, None
bic_many, bic_mean, bic_stdev, n_mean, n_stdev, n_min = Print.readBIC(address, repeat_bic)
n_comp_array = None
n_comp_array = np.arange(1, max_groups)
print("Calculating n and then averaging across runs, n = ", n_mean, "+-", n_stdev)
print("Averaging BIC scores and then calculating, n = ", n_min)
# Plot the results
fig, ax1 = plt.subplots()
ax1.errorbar(n_comp_array, bic_mean, yerr = bic_stdev, lw = 2, ecolor = 'black', label = 'Mean BIC Score')
if trend:
# Plot the trendline
#initial_guess = [20000, 1, 20000, 0.001]
initial_guess = [47030, 1.553, 23080, 0.0004652]
def expfunc(x, a, b, c, d):
return (a * np.exp(-b * x)) + (c * np.exp(d * x))
popt, pcov, x, y = None, None, None, None
popt, pcov = curve_fit(expfunc, n_comp_array, bic_mean, p0 = initial_guess, maxfev=10000)
print("Exponential Parameters = ", *popt)
x = np.linspace(1, max_groups, 100)
y = expfunc(x, *popt)
ax1.plot(x, y, 'r-', label="Exponential Fit")
y_min_index = np.where(y==y.min())[0]
x_min = (x[y_min_index])[0]
ax1.axvline(x=x_min, linestyle=':', color='black', label = 'Exponential Fit min = '+str(np.round_(x_min, decimals=1)))
# Plot the individual and minimum values
ax1.axvline(x=n_mean, linestyle='--', color='black', label = 'n_mean_min = '+str(n_mean))
ax1.axvline(x=n_min, linestyle='-.', color='black', label = 'n_bic_min = '+str(n_min))
for r in range(repeat_bic):
ax1.plot(n_comp_array, bic_many[r,:], alpha = 0.3, color = 'grey')
ax1.set_ylabel("BIC value")
ax1.set_xlabel("Number of classes in GMM")
ax1.grid(True)
ax1.set_title("BIC values for GMM with different number of components")
ax1.set_ylim(min(bic_mean)*0.97, min(bic_mean)*1.07)
ax1.legend(loc='best')
if trend:
plt.savefig(address+"Plots/BIC_trend.pdf",bbox_inches="tight",transparent=True)
else:
plt.savefig(address+"Plots/BIC.pdf",bbox_inches="tight",transparent=True)
plt.show()
###############################################################################
# Use the VBGMM to determine how many classes we should use in the GMM
def plotWeights(address, run):
# Load depth
depth = None
depth = Print.readDepth(address, run)
# Load Weights
gmm_weights, gmm_means, gmm_covariances = None, None, None
gmm_weights, gmm_means, gmm_covariances = Print.readGMMclasses(address, run, depth, 'depth')
n_comp = len(gmm_weights)
class_array = np.arange(0,n_comp,1)
# Plot weights against class number
fig, ax1 = plt.subplots()
ax1.scatter(class_array, np.sort(gmm_weights)[::-1], s = 20, marker = '+', color = 'blue', label = 'Class Weights')
ax1.axhline(y=1/(n_comp+1), linestyle='-.', color='black', label = str(np.round_(1/(n_comp+1), decimals=3))+' threshold')
ax1.axhline(y=1/(n_comp+5), linestyle='--', color='black', label = str(np.round_(1/(n_comp+5), decimals=3))+' threshold')
ax1.set_xlabel("Class")
ax1.set_xlim(-1,n_comp)
ax1.set_ylabel("Weight")
ax1.grid(True)
ax1.set_title("VBGMM Class Weights")
ax1.legend(loc='best')
plt.savefig(address+"Plots/Weights_VBGMM.pdf", bbox_inches="tight",transparent=True)
print('Plot runtime = ', time.clock() - start_time,' s') |
#import packages
from sklearn.externals import joblib
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, classification_report, r2_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn import metrics
import time
import joblib
import pickle
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold
# k fold cross validation
tic = time.perf_counter()
# different alpha levels
alphas = [0.001, 0.01, 0.1, 1, 10]
print('All errors are RMSE')
wine = pd.read_csv('dataComplextestC1.csv', sep=',')
pd.isnull(wine).sum() > 0
#wine.replace([np.inf, -np.inf], np.nan, inplace=True)
wine.replace([np.inf, -np.inf], np.nan).dropna(how="all")
data = wine.drop('tau1', axis=1)
target = wine['tau1']
K = 10
#cross validation
kf = KFold(n_splits=K, shuffle=True, random_state=42)
for alpha in alphas:
train_errors = []
validation_errors = []
for train_index, val_index in kf.split(data, target):
# split data
X_train, X_val = data[train_index], data[val_index]
y_train, y_val = target[train_index], target[val_index]
# instantiate model
model = MLPRegressor(activation='relu', solver='lbfgs',alpha=alpha, hidden_layer_sizes=(30,30,30,30,30), max_iter=10000000000000000000000)
#calculate errors
train_error, val_error = calc_metrics(X_train, y_train, X_val, y_val, model)
# append to appropriate list
train_errors.append(train_error)
validation_errors.append(val_error)
# generate report
print('alpha: {:6} | mean(train_error): {:7} | mean(val_error): {}'.
format(alpha,
round(np.mean(train_errors),4),
round(np.mean(validation_errors),4)))
# kfold2 = model_selection.ShuffleSplit(n_splits=10, test_size=0.20, random_state=100)
# model_shufflecv = MLPRegressor(activation='relu', solver='lbfgs',alpha=0.001, hidden_layer_sizes=(30,30,30,30,30), max_iter=10000000000000000000000)
# sc = StandardScaler()
# X = sc.fit_transform(X)
# results_4 = model_selection.cross_val_score(model_shufflecv, X, y, cv=kfold2)
# print("Accuracy: %.2f%% (%.2f%%)" % (results_4.mean()*100.0, results_4.std()*100.0))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-18 08:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='IssueRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=100, unique=True, verbose_name='工程名')),
('issue_content', models.CharField(max_length=500, null=True, verbose_name='发布内容')),
('issue_time', models.TimeField(null=True)),
('dev_person', models.CharField(max_length=100, null=True, verbose_name='开发人员')),
('test_person', models.CharField(max_length=100, null=True, verbose_name='测试人员')),
('issue_person', models.CharField(max_length=100, null=True, verbose_name='发布人员')),
('remark', models.CharField(max_length=500, null=True, verbose_name='备注信息')),
],
options={
'db_table': 'issue_issuerecord',
'ordering': ['id'],
},
),
]
|
#!/usr/bin/python
"""
A program that makes recommendations, using either user-based or
item-based filtering.
Item-based filtering usually outperforms traditional user-based filtering
in sparse datasets, and the two perform about equally in dense datasets.
Traditional user-based filtering is simpler to implement and is often more
appropriate with smaller in-memory datasets that change frequently. This
method doesn't have overhead.
This program also includes an efficient version of user-based filtering
that has equal performance as that of item-based filtering.
"""
from math import sqrt
# A sample user-centric dictionary
critics = {
'Lisa Rose': {
'Lady in the Water': 2.5,
'Snakes on a Plane': 3.5,
'Just My Luck': 3.0,
'Superman Returns': 3.5,
'You, Me and Dupree': 2.5,
'The Night Listener': 3.0,
},
'Gene Seymour': {
'Lady in the Water': 3.0,
'Snakes on a Plane': 3.5,
'Just My Luck': 1.5,
'Superman Returns': 5.0,
'The Night Listener': 3.0,
'You, Me and Dupree': 3.5,
},
'Michael Phillips': {
'Lady in the Water': 2.5,
'Snakes on a Plane': 3.0,
'Superman Returns': 3.5,
'The Night Listener': 4.0,
},
'Claudia Puig': {
'Snakes on a Plane': 3.5,
'Just My Luck': 3.0,
'The Night Listener': 4.5,
'Superman Returns': 4.0,
'You, Me and Dupree': 2.5,
},
'Mick LaSalle': {
'Lady in the Water': 3.0,
'Snakes on a Plane': 4.0,
'Just My Luck': 2.0,
'Superman Returns': 3.0,
'The Night Listener': 3.0,
'You, Me and Dupree': 2.0,
},
'Jack Matthews': {
'Lady in the Water': 3.0,
'Snakes on a Plane': 4.0,
'The Night Listener': 3.0,
'Superman Returns': 5.0,
'You, Me and Dupree': 3.5,
},
'Toby': {'Snakes on a Plane': 4.5,
'You, Me and Dupree': 1.0,
'Superman Returns': 4.0
}}
def sim_distance(prefs, p1, p2):
"""
Computes a distance-based similarity score for p1 and p2.
:param: prefs: a user-centric or item-centric dictionary
:param: p1: string name of a person or item
:param: p2: string name of another person or item
:return: a distance-based similarity score (0 < score < 1;
higher -> more similar)
"""
shared = [item for item in prefs[p1] if item in prefs[p2]]
# No rating in common, return 0
if len(shared) == 0:
return 0
sum_of_squares = sum((prefs[p1][item] - prefs[p2][item])**2
for item in shared)
return 1 / (1 + sum_of_squares)
def sim_pearson(prefs, p1, p2):
"""
Computes Pearson correlation coefficient for p1 and p2.
:param: prefs: a user-centric or item-centric dictionary
:param: p1: string name of a person or item
:param: p2: string name of another person or item
:return: Pearson correlation coefficient for p1 and p2
(-1 < coefficient < 1; 1 means perfect match)
"""
shared = [item for item in prefs[p1] if item in prefs[p2]]
if len(shared) == 0:
return 0
# Add up all the preferences
p1_sum = sum(prefs[p1][item] for item in shared)
p2_sum = sum(prefs[p2][item] for item in shared)
# Sum up the squares
p1_sum_sq = sum(prefs[p1][item]**2 for item in shared)
p2_sum_sq = sum(prefs[p2][item]**2 for item in shared)
# Sum up the products
sum_product = sum(prefs[p1][item] * prefs[p2][item]
for item in shared)
# Calculate Pearson score
num_shared_item = len(shared)
numerator = sum_product - (p1_sum * p2_sum / num_shared_item)
denominator = sqrt((p1_sum_sq - p1_sum**2 / num_shared_item) *
(p2_sum_sq - p2_sum**2 / num_shared_item))
if denominator == 0:
return 0
return numerator / denominator
def sim_tanimoto(prefs, p1, p2):
"""
Computes a Tanimoto score (intersection / union) for p1 and p2.
This score depends on the number of same items between
p1 and p2, but doesn't consider the ratings of the items.
Best use case: binary values (on/off; 0/1)
:param: prefs: a user-centric or item-centric dictionary
:param: p1: string name of a person or item
:param: p2: string name of another person or item
:return: a Tanimoto score (0 < score < 1;
higher -> more similar)
"""
p1_len = len([item for item in prefs[p1]])
p2_len = len([item for item in prefs[p2]])
shared_len = len([item for item in prefs[p1] if item in prefs[p2]])
# No item or no item in common
if p1_len == 0 or p2_len == 0 or shared_len == 0:
return 0
return float(shared_len) / (p1_len + p2_len - shared_len)
def top_matches(prefs, person, n=5, similarity=sim_pearson):
"""
Computes the top n matches for person from the pref dictionary.
:param: prefs: a user-centric or item-centric dictionary
:param: person: string name of a person
:param: n: number of matches (default=5)
:param: similarity: similarity metrics (default=pearson coefficient)
:return: a list of tuples (similarity socre, name)
"""
sim_list = [(similarity(prefs, person, other), other)
for other in prefs if other != person]
sim_list.sort(reverse=True)
return sim_list[:n]
def transform_prefs(prefs):
"""
Transforms the format between user-centric and item-centric dictionary.
:param: prefs: a user-centric or item-centric dictionary
:return: a item-centric dictionary if input is user-centric;
user-centric one otherwise
"""
transformed = {}
for i in prefs:
for j in prefs[i]:
transformed.setdefault(j, {})
# flip item and person
transformed[j][i] = prefs[i][j]
return transformed
def user_based_recommend(prefs, person, similarity=sim_pearson):
"""
Recommends items to user with user-based methods.
:param: prefs: a user-centric or item-centric dictionary
:param: person: string name of a person
:param: similarity: similarity metrics (default=pearson coefficient)
:return: a list of tuples (estimated rating, recommended item)
"""
rating_by_sim_total = {}
sim_score_total = {}
for other in prefs:
if other == person:
continue # don't compare to oneself
sim_score = similarity(prefs, person, other)
if sim_score <= 0:
continue # ignore scores of zero or lower
for item in prefs[other]:
# only find items this person hasn't got yet
if item not in prefs[person] or prefs[person][item] == 0:
# other's rating * sim_score
rating_by_sim_total.setdefault(item, 0)
rating_by_sim_total[item] += prefs[other][item] * sim_score
# sum of sim_score
sim_score_total.setdefault(item, 0)
sim_score_total[item] += sim_score
# weighted score
rankings = [(total / sim_score_total[item], item)
for item, total in rating_by_sim_total.items()]
rankings.sort(reverse=True)
return rankings
def sim_user_base(prefs, n=5):
"""
Creates a dictionary of user and thier top n similar users.
Note: This function needs to be run more often early on when the user base
and number of rating is small. As the user base grows, the scores will
become more stable.
:param: prefs: a user-centric dictionary
:param: n: the number of most similar users (default=5)
:return: a dictionary of users and their most similar users
"""
user_base = {}
c = 0
for user in prefs:
# Status update for large datasets
c += 1
if c % 100 == 0:
print "{0} / {1}".format(c, len(prefs))
# Find the n most similar users to this user
scores = top_matches(prefs, user, n=n, similarity=sim_distance)
user_base[user] = scores
return user_base
def user_based_recommend_efficient(prefs, sim_user_base, user):
"""
An efficient version of user-based recommendation that
builds a similarity user base in advance.
:param: prefs: a user-centric dictionary
:param: sim_user_base: a pre-calculated similar users base
:param: user: a string user name
:return: a list of tuples (estimated score, recommended item)
"""
rating_by_sim_total = {}
sim_score_total = {}
# loop over users similar to this user
for (sim_score, user2) in sim_user_base[user]:
# loop over items rated by user2
for (item, rating) in prefs[user2].items():
if item in prefs[user]:
continue
# other's rating * sim_score
rating_by_sim_total.setdefault(item, 0)
rating_by_sim_total[item] += rating * sim_score
# sum of sim_score
sim_score_total.setdefault(item, 0)
sim_score_total[item] += sim_score
rankings = [(total / sim_score_total[item], item)
for item, total in rating_by_sim_total.items()]
rankings.sort(reverse=True)
return rankings
# Item-based filtering: comparisons between items will not change as often
# as comparison between users. This means it can be done at low-traffic
# times or on a computer separate from the main application
def sim_item_base(prefs, n=10):
"""
Creates a dictionary of items and thier top n similar items.
Note: This function needs to be run more often early on when the user base
and number of rating is small. As the user base grows, the scores will
become more stable.
:param: prefs: a user-centric dictionary
:param: n: the number of most similar items (default=10)
:return: a dictionary of items and their most similar items
"""
item_base = {}
# Invert the preference matrix to be item-centric
item_prefs = transform_prefs(prefs)
c = 0
for item in item_prefs:
# Status update for large datasets
c += 1
if c % 100 == 0:
print "{0} / {1}".format(c, len(item_prefs))
# Find the n most similar items to this one
scores = top_matches(item_prefs, item, n=n, similarity=sim_distance)
item_base[item] = scores
return item_base
def item_based_recommend(prefs, sim_item_base, user):
"""
Recommends items to user with item-based methods.
(Don't have to calculate the similarities scores for all the other critics
because the item similarity dataset was built in advance)
:param: prefs: a user-centric dictionary
:param: sim_item_base: a pre-calculated similar item base
:param: user: a string user name
:return: a list of tuples (estimated score, recommended item)
"""
user_rating = prefs[user]
rating_by_sim_total = {}
sim_score_total = {}
# loop over items rated by this user
for (item, rating) in user_rating.items():
# loop over items similar to this one
for (sim_score, item2) in sim_item_base[item]:
if item2 in user_rating:
continue # ignore if user has rated item2
# weighted sum of rating by similartiy
rating_by_sim_total.setdefault(item2, 0)
rating_by_sim_total[item2] += rating * sim_score
# sum of all similarities
sim_score_total.setdefault(item2, 0)
sim_score_total[item2] += sim_score
# divide weighted sum by sum
rankings = [(score / sim_score_total[item], item)
for item, score in rating_by_sim_total.items()]
rankings.sort(reverse=True)
return rankings
# MovieLens Dataset (http://www.grouplens.org/node/12)
# The MovieLens dataset on your local disk
path = ""
def load_movielens(path=path):
"""
Loads in the MovieLens dataset and transform it into user-centric dict.
:param: path: the path of the MovieLens dataset on your local disk
:return: a user-centric dictionary
"""
# get the movie title
movies = {}
for line in open(path + "/u.item"):
mov_id, mov_title = line.split("|")[0:2]
movies[mov_id] = mov_title
# load data
prefs = {}
for line in open(path + "/u.data"):
(user_id, movie_id, rating, timestamp) = line.split("\t")
prefs.setdefault(user_id, {})
prefs[user_id][movies[movie_id]] = float(rating)
return prefs
|
class Solution:
def findClosestNumber(self, nums: List[int]) -> int:
res = 100005
for i in nums:
if abs(i) < abs(res) or (abs(i) == abs(res) and i > res):
res = i
return res
|
import sys
import re
while True:
line = input()
if not re.match(sys.argv[1], line):
print(line)
|
from enum import IntEnum
class Align(IntEnum):
Left = 0
Right = 1
Center = 2
Top = 3
Bottom = 4
|
import inspect
import json
from collections import Callable
from functools import wraps
from pydoc import locate
import time
def debug(fn):
"""
Decorate a function for debug purposes. In this instance, I was just measuring the time.
:param fn: the function that was decorated
:return: a debug, after running the function
"""
@wraps(fn)
def decorated_func(*args, **kwargs):
print('=====================================================')
print('fn: {0} \nargs: {1} \nkwargs: {2}'.format(fn.__name__, args, kwargs))
start = time.time()
result = fn(*args, **kwargs)
print("Execution time = {0:.5f}".format(time.time() - start))
# pprint(result[0])
# print(result[1])
print('=====================================================')
return result
return decorated_func
@debug
def pull_data(json_input):
"""
Dynamically import and use a method within your code
:param json_input: the parameters comes in as a string but in JSON format
:return: the expected function call
"""
json_input = json.loads(json_input)
location = json_input.get('begin').get('location')
fn = json_input.get('begin').get('function')
arguments = json_input.get('args')
expected_variables, needed_function = [], getattr(locate(location), fn)
if isinstance(needed_function, Callable):
if arguments:
for each in inspect.getargspec(needed_function).args:
# the example below only works for methods but not functions
# get_the_method.setdefault(each, fields.get(each))
expected_variables.append(arguments.get(each))
return needed_function(*expected_variables)
else:
return needed_function()
if __name__ == "__main__":
data = '{"begin": {"location": "collectibles", "function": "defines"}, "args": {"age": "3", "name": "ChatBot"}}'
print(pull_data(data))
print("\n")
data = '{"begin": {"location": "collectibles", "function": "utc"}}'
print(pull_data(data))
|
# -*- coding: utf-8 -*-
"""
Much of this program is quoted from "Python Cookbook, 2nd Edition" p.479 (O'Reilly Japan).
Only the method Element.__getattr__ is written by me.
And changed to encode strings to UTF-8.
"""
from xml.parsers import expat
class Element(object):
def __init__(self, name, attributes):
self.name = name
self.attributes = attributes
self.cdata = u''
self.children = []
def __getattr__(self, key):
return self.getElements(key)
def addChild(self, element):
self.children.append(element)
def getAttribute(self, key):
return self.attributes.get(key)
def getData(self):
return self.cdata
def getElements(self, name=''):
if name:
return [child for child in self.children if child.name == name]
class Xml2Obj(object):
def __init__(self):
self.root = None
self.nodeStack = []
def StartElement(self, name, attributes):
attributes = dict([(key, attributes[key].decode("utf-8")) for key in attributes])
element = Element(name.decode("utf-8"), attributes)
if self.nodeStack:
parent = self.nodeStack[-1]
parent.addChild(element)
else:
self.root = element
self.nodeStack.append(element)
def EndElement(self, name):
# self.nodeStack[-1].pop()
# I think following is exact.
self.nodeStack.pop()
def CharacterData(self, data):
if data.strip():
data = data.decode("utf-8")
element = self.nodeStack[-1]
element.cdata += data
def parse(self, string):
Parser = expat.ParserCreate()
Parser.StartElementHandler = self.StartElement
Parser.EndElementHandler = self.EndElement
Parser.CharacterDataHandler = self.CharacterData
ParserStatus = Parser.Parse(string, True)
return self.root
|
import controller
import plant
import shac
ha = shac.comp([controller.controller, plant.plant])
shac.compile(ha, COMPOSED=True, ABOF=True)
|
from backbone import *
import re
import os
import time
'''
To check if the base job is running continuously and with similar timings
'''
nodes = get_nodes_by_type('namenode')
jobStats = {}
for node in nodes :
if node.isMaster(log = False ):
for job in get_var('basejobs'):
output = node.oozieCmd('show coordinator RUNNING jobs' ).split('\n')
output = filter(lambda x : job in x ,output )
logger.info('AAA %s : %s' % (job,output ))
if len(output) >1 :
for line in output:
logger.info(line)
line = re.sub('\t', ' ',line)
if job in line.split(' ') :
jobid = line.split(' ')[0]
else :
jobid = re.sub('\s+' , ' ',output[0]).split(' ')[0]
logger.info('%s : Job id : %s ' % (job,jobid) )
# job Id found now checking the job statistics
output = node.oozieCmd('show job %s ' % jobid )
output = filter(lambda x : jobid in x ,output.split('\n') )
output = output[1:]
for line in output:
line = re.sub('\t' , ' ', line ).split(' ')
for k,v in enumerate(line) :
logger.info('k,v : %s,%s ' %(k,v) )
os.environ['TZ']=line[11]
startTime = int(time.mktime(time.strptime('%s %s' % (line[9],line[10] ) ,'%Y-%m-%d %H:%M:%S') ))
endTime = int(time.mktime(time.strptime('%s %s' % (line[15],line[16] ) ,'%Y-%m-%d %H:%M:%S') ))
jobStats['%s:%s:%s' % (job,jobid,line[1])] = '%s,%s '% (endTime - startTime,line[14] )
logger.info('Job stats for job : %s : \n %s ' % ( job,jobStats ) )
stats = {}
stateStats = {}
for k,v in enumerate(jobStats ) :
stats[v.split(':')[-1]] = jobStats[v].split(',')[0]
stateStats[v.split(':')[-1]] = jobStats[v].split(',')[-1][3:]
logger.info('State of iterations :\n%s ' % stateStats )
for k in stateStats :
if 'KILL' in k or 'FAIL' in k :
logger.error('%s : Itreation number %s is getting killed please check. ' % ( job, k ) )
stats = dict(map(lambda (x,y) : (x,int(y)) , stats.iteritems() ) )
logger.info('AAAAAA : %s' % stats )
mean = reduce(lambda x,y : x+y, stats.values() )/len(stats.values())
logger.info('Mean avg : %s' % reduce(lambda x,y : x+y, stats.values() ) )
for (k,v) in stats.iteritems() :
if v == 0 :
stats[k] = 0
else :
stats[k] = (abs(v-mean)/float(v))*100
logger.info('XXXX %s ' % stats )
ERROR_FLAG = 0
for v in stats.values() :
if v > int(get_var('job_time_threshold' )) :
logger.error ( 'Deviation from threshold seen in job: %s. Mean : %s and percentage deviation(%%) : %s ' % (job,mean,v) )
ERROR_FLAG = 1
else :
logger.info('No deviation seen for job : %s ' % job )
if ERROR_FLAG :
report.fail('Deviation seen in job run timmings. Job : %s ' % job )
|
# Generated by Django 2.1.7 on 2019-06-18 09:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('whiskydatabase', '0011_auto_20190618_1718'),
]
operations = [
migrations.RenameField(
model_name='generalwhiskynote',
old_name='sweetness',
new_name='sweet',
),
migrations.RenameField(
model_name='personalwhiskynote',
old_name='sweetness',
new_name='sweet',
),
]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script Parser Scope Handler Functions
This module provides the functions registered into parser under with_scope or for_scope category.
Scope handler nodes are StmtNodes with body, which are used to handle such scenarios.
1. For scope handler
When registering a for scope handler, the first 4 arguments must be parser, node, body, loop_vars
and these arguments will provided by TVM Script parser automatically
.. code-block:: python
for loop_vars in tir.xxx():
2. With scope handler
There are 4 subtypes of with scope handlers, classified by
1) with or without as
2) allow concise scoping or not
1) with as & concise
the first 2 arguments must be parser, node
Need to parse the body manually
Example : tir.alloc_with_scope
.. code-block:: python
target = tir.xxx()
with tir.xxx() as target:
2) with as & not concise
the first 2 arguments must be parser, node
Need to parse the body manually
Example : None atm
.. code-block:: python
with tir.xxx() as target:
3) without as & concise
the first 3 arguments must be parser, node, body
TVM Script parser will parse the body automatically
Example : tir.allocate()/tir.realize()/tir.attr()
.. code-block:: python
tir.xxx()
with tir.xxx():
4) without as & not concise
the first 3 arguments must be parser, node, body
TVM Script parser will parse the body automatically
Example : tir.assert()/tir.let()
.. code-block:: python
with tir.xxx():
"""
# pylint: disable=redefined-builtin, unused-argument, invalid-name
from typed_ast import ast3 as ast
import tvm.tir
from .registry import register_with_scope, register_for_scope
# With scope handler
@register_with_scope(concise=True, with_var=True)
def allocate(parser, node, extents, dtype, scope, condition=True):
""" With scope handler function tir.alloc_with_scope(var, extents, dtype, scope, condition) """
# defining buffer var and parse the body manually
buffer_var = tvm.te.var(parser.target[0], "handle")
# (TODO) Uncomment this line if we have richer type info for buffer var
# buffer_var = tvm.te.var(parser.target[0], tvm.ir.PointerType(tvm.ir.PrimType(dtype)))
if isinstance(node, ast.With):
parser.scope_emitter.new_scope()
parser.scope_emitter.update_symbol(buffer_var.name, buffer_var)
parser.scope_emitter.node_stack[-1].extend(reversed(node.body))
body = parser.get_body()
parser.scope_emitter.pop_scope()
else:
parser.scope_emitter.update_symbol(buffer_var.name, buffer_var)
body = parser.get_body()
condition = tvm.runtime.convert(condition)
scope = tvm.runtime.convert(scope)
body = tvm.tir.Allocate(buffer_var, dtype, extents, condition, body)
return tvm.tir.AttrStmt(buffer_var, "storage_scope", scope, body)
@register_with_scope(concise=True)
def launch_thread(parser, node, body, env_var, extent):
extent = tvm.runtime.convert(extent)
return tvm.tir.AttrStmt(
tvm.tir.IterVar(
None, env_var, getattr(tvm.tir.IterVar, "ThreadIndex"), parser.var_env_dict[env_var]
),
"thread_extent",
extent,
body,
)
@register_with_scope(concise=True)
def realize(parser, node, body, buffer_bounds, scope, condition=True):
""" With scope handler function tir.realize(buffer_bounds, scope, condition) """
buffer, bounds = buffer_bounds
scope = tvm.runtime.convert(scope)
return tvm.tir.AttrStmt(
buffer, "realize_scope", scope, tvm.tir.BufferRealize(buffer, bounds, condition, body)
)
@register_with_scope(concise=True)
def attr(parser, node, body, attr_node, attr_key, value):
""" With scope handler function tir.attr(attr_node, attr_key, value) """
attr_node = tvm.runtime.convert(attr_node)
value = tvm.runtime.convert(value)
return tvm.tir.AttrStmt(attr_node, attr_key, value, body)
@register_with_scope(concise=False)
def Assert(parser, node, body, condition, message):
""" With scope handler function tir.Assert(condition, message) """
return tvm.tir.AssertStmt(condition, tvm.runtime.convert(message), body)
@register_with_scope(concise=False)
def let(parser, node, body, var, value):
""" With scope handler function tir.let(var, value) """
return tvm.tir.LetStmt(var, value, body)
# For scope handler
@register_for_scope()
def serial(parser, node, body, loop_vars, begin, end):
""" For scope handler function tir.serial(begin, end)"""
if len(loop_vars) != 1:
parser.report_error("Expect exact 1 loop var")
ana = tvm.arith.Analyzer()
extent = end if begin == 0 else ana.simplify(end - begin)
return tvm.tir.For(loop_vars[0], begin, extent, 0, 0, body)
@register_for_scope()
def parallel(parser, node, body, loop_vars, begin, end):
""" For scope handler function tir.parallel(begin, end)"""
if len(loop_vars) != 1:
parser.report_error("Expect exact 1 loop var")
ana = tvm.arith.Analyzer()
extent = end if begin == 0 else ana.simplify(end - begin)
return tvm.tir.For(loop_vars[0], begin, extent, 1, 0, body)
@register_for_scope()
def vectorized(parser, node, body, loop_vars, begin, end):
""" For scope handler function tir.vectorized(begin, end)"""
if len(loop_vars) != 1:
parser.report_error("Expect exact 1 loop var")
ana = tvm.arith.Analyzer()
extent = end if begin == 0 else ana.simplify(end - begin)
return tvm.tir.For(loop_vars[0], begin, extent, 2, 0, body)
@register_for_scope()
def unroll(parser, node, body, loop_vars, begin, end):
""" For scope handler function tir.unroll(begin, end)"""
if len(loop_vars) != 1:
parser.report_error("Expect exact 1 loop var")
ana = tvm.arith.Analyzer()
extent = end if begin == 0 else ana.simplify(end - begin)
return tvm.tir.For(loop_vars[0], begin, extent, 3, 0, body)
@register_for_scope(name="range")
def Range(parser, node, body, loop_vars, begin, end, annotation=None):
""" For scope handler function range(begin, end, annotation)"""
if len(loop_vars) != 1:
parser.report_error("Expect exact 1 loop var")
ana = tvm.arith.Analyzer()
extent = end if begin == 0 else ana.simplify(end - begin)
if annotation is None:
annotation = []
else:
annotation = [
tvm.tir.Annotation(key, tvm.runtime.convert(val) if isinstance(val, str) else val)
for key, val in annotation.items()
]
return tvm.tir.Loop(loop_vars[0], begin, extent, annotation, body)
@register_for_scope()
def grid(parser, node, body, loop_vars, *extents):
""" For scope handler function tir.grid(*extents) """
if len(loop_vars) != len(extents):
parser.report_error("Inconsitent number of loop vars and extents")
for loop_var, extent in zip(reversed(loop_vars), reversed(extents)):
body = tvm.tir.Loop(loop_var, 0, extent, [], body)
return body
|
__author__ = "Thibaut Louis and Xavier Garrido"
__version__ = "0.1"
__year__ = "2019"
__url__ = "https://github.com/thibautlouis/toy_likelihood"
|
import os
import sys
import subprocess
import shutil
import time
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/trees')
sys.path.insert(0, 'tools/mappings')
import experiments as exp
import fam
from read_tree import read_tree
import saved_metrics
import get_dico
import run_astral_multi as astral
import run_duptree
import species_analyze
def init_gene_trees_file(datadir, gene_trees, subst_model, output_dir):
return run_duptree.init_gene_trees_file(datadir, gene_trees, subst_model, output_dir)
return astral_gene_trees
return
print("astral_gene_trees: " + astral_gene_trees)
astral_mappings = astral.init_mapping_file(datadir, output_dir)
command = []
command.append("python")
command.append(exp.prepare_fastrfs_script)
command.append("-i")
command.append(astral_gene_trees)
command.append("-a")
command.append(astral_mappings)
subprocess.check_call(command)
res = astral_gene_trees[:-4] + "_g_trees-mult.trees"
return res
def exec_fastmulrfs(gene_trees_file, output_prefix):
preprocessed_gene_trees = gene_trees_file + ".preprocessed"
pre_command = []
pre_command.append("python")
pre_command.append(exp.fastmulrfs_preprocess)
pre_command.append("-i")
pre_command.append(gene_trees_file)
pre_command.append("-o")
pre_command.append(preprocessed_gene_trees)
subprocess.check_call(pre_command)
command = []
command.append(exp.fastrfs_exec)
command.append("-i")
command.append(preprocessed_gene_trees)
command.append("-o")
command.append(output_prefix)
FNULL = open(os.devnull, 'w')
subprocess.check_call(command)
#subprocess.check_call(command, stdout=FNULL, stderr=FNULL)
def extract_species_trees(datadir, subst_model, output_prefix, method, sub_run_name):
greedy = output_prefix + "." + method
greedy_out = fam.get_species_tree(datadir, subst_model, sub_run_name)
shutil.copyfile(greedy, greedy_out)
print(greedy_out)
def run_fastmulrfs(datadir, gene_trees, subst_model):
run_name = "fastmulrfs_" + gene_trees
output_dir = fam.get_run_dir(datadir, subst_model, run_name + "_run")
shutil.rmtree(output_dir, True)
os.makedirs(output_dir)
prefix = os.path.join(output_dir, "output")
gene_trees_file = init_gene_trees_file(datadir, gene_trees, subst_model, output_dir)
start = time.time()
exec_fastmulrfs(gene_trees_file, prefix)
time1 = (time.time() - start)
#for method in ["greedy", "single", "strict", "majority"]:
for method in ["single"]:
sub_run_name = run_name.replace("fastmulrfs", "fastmulrfs-" + method)
saved_metrics.save_metrics(datadir, fam.get_run_name(sub_run_name, subst_model), time1, "runtimes")
extract_species_trees(datadir, subst_model, prefix, method, sub_run_name)
if (__name__ == "__main__"):
if (len(sys.argv) != 4):
print("Syntax python run_fastmulrfs.py datadir gene_trees subst_model")
sys.exit(1)
run_fastmulrfs(sys.argv[1], sys.argv[2], sys.argv[3])
species_analyze.analyze(sys.argv[1])
|
__author__ = 'korhammer'
import numpy as np
def based_floor(x, base=10):
"""
rounds down to the last integer of base 10 (or other)
"""
return np.int(base * np.floor(np.float(x) / base))
def based_ceil(x, base=10):
"""
rounds up to the next integer of base 10 (or other)
"""
return np.int(base * np.ceil(np.float(x) / base))
def float(x):
return np.float(x) if len(x) > 0 else np.nan
def int(x):
return np.int(x) if len(x) > 0 else 0
|
class Assertion(Exception):
"""
断言
"""
pass |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class NewVistorTest(FunctionalTest):
def test_can_start_a_list_for_one_user(self):
# Someone heard about a list application
# He went to the home page of this application
self.browser.get(self.live_server_url)
# He noticed the title and header containing a word "To-Do"
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# The applicatio invites him to input a to-do list
inputbox = self.get_item_input_box()
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# He type "Buy peacock feathers" in a input box
inputbox.send_keys('Buy peacock feathers')
# He hit enter key and the page updated
# The table shows "1: Buy peacock feathers"
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# Then the page showed another input box which could be typed other to-do list
# He typed "Use peacock feathers to make a fly"
inputbox = self.get_item_input_box()
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
# Then the page updated again, the list show those two tasks
self.wait_for_row_in_list_table('1: Buy peacock feathers')
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
# He wanted to know if this website could remember his lists
# He was satisfried and went to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
inputbox = self.get_item_input_box()
inputbox.send_keys('Buy peacock feathers')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# He saw a unique URL generated by the website
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
inputbox = self.get_item_input_box()
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep |
import requests
import os
import json
from bs4 import BeautifulSoup
url1 = "http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%s&cl=&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=0&word=%s&s=&se=&tab=&width=0&height=0&face=&istype=&qc=&nc=&fr=&expermode=&pn=%d&rn=30&gsm=78&1542077533697="
print("下载器启动!!")
keyword = input("请输入需要下载的图片内容:")
path = "C:\\Users\\cnzhougi1\\Desktop\\"+keyword
if os.path.exists(path):
print("该内容您已经下载过,本次下载将会覆盖")
else:
os.mkdir(path)
while 1:
try:
max_str = input("请输入下载数量:")
num = int(max_str)
break
except Exception as e:
print("输入的数量格式有误")
continue
filename =0
m = 0
while filename<num:
m = m+30
url = url1 % (keyword,keyword,m)
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3602.2 Safari/537.36',
'Referer': 'http://image.baidu.com',
}
res = requests.get(url,headers=headers)
#部分response.content中存在个别字符无法以utf编码,先将其以文本将个别字符替换,再进行编码
res = res.text.replace("\0xe4"," ").encode("utf-8")
res = json.loads(res)
res = res['data']
for img in res:
if 'thumbURL' in img and filename<num:
filename = filename +1
image = requests.get(img['thumbURL'],headers=headers)
imagepath = path +"\\"+str(filename)+'.jpeg'
print("正在下载第%d张图片,url为%s" % (filename,img['thumbURL']))
try:
f = open(imagepath,'wb')
f.write(image.content)
f.close()
except Exception as e:
print("第%d张图片下载失败" % filename)
else:
break
print("共%d个图片下载完成!!!" % filename)
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import math
import cv2
import pymorph as pym
import numpy as np
from scipy import ndimage
#from evaluate import *
#from sklearn.metrics import confusion_matrix
#from skimage.segmentation import clear_border
#from PIL import Image
from skimage.measure import label
from skimage.measure import regionprops
#from util import preprocess_pred_gt
from morphology import *
# Define colors spaces to transform frames
colorSpaceConversion={}
colorSpaceConversion['YCrCb'] = cv2.COLOR_BGR2YCR_CB
colorSpaceConversion['HSV'] = cv2.COLOR_BGR2HSV
colorSpaceConversion['gray'] = cv2.COLOR_BGR2GRAY
# Path to save images and videos
images_path = "std-mean-images/"
video_path = "background-subtraction-videos/"
# Define groundtruth labels namely
STATIC = 0
HARD_SHADOW = 50
OUTSIDE_REGION = 85
UNKNOW_MOTION = 170
MOTION = 255
def get_accumulator(path_test):
"""
Description: get accumulator structure data
Depends on image size to define borders
Data are coded into 32 bits of floats
Input: path test
Output: accumulator
"""
# Initialize accumualtor
accumulator = np.zeros((0,0), np.float32)
# Set accumulator depending on dataset choosen
if path_test == "/imatge/froldan/work/highway/input/":
accumulator = np.zeros((240,320,150), np.float32)
if path_test == "/imatge/froldan/work/fall/input/":
accumulator = np.zeros((480,720,50), np.float32)
if path_test == "/imatge/froldan/work/traffic/input/":
accumulator = np.zeros((240,320,50), np.float32)
return accumulator
def gaussian_color(frame, mu_matrix, sigma_matrix, alpha, colorSpace, connectivity, areaPixels,ac_morphology,SE1size,SE2size):
# Define the codec and create VideoWriter object
# Define structuring element according to connectivity
structuring_element = [[0,0,0],[0,0,0],[0,0,0]]
if connectivity == '4':
structuring_element = [[0,1,0],[1,1,1],[0,1,0]]
if connectivity == '8':
structuring_element = [[1,1,1],[1,1,1],[1,1,1]]
if colorSpace != 'RGB':
frame = cv2.cvtColor(frame, colorSpaceConversion[colorSpace])
background = np.prod(abs(frame - mu_matrix) >= alpha*(sigma_matrix+2),axis=2)
background = background.astype(int)
background = ndimage.binary_fill_holes(background, structure=structuring_element).astype(int)
if ac_morphology==1:
background = dilation(background,SE1size)
background = ndimage.binary_fill_holes(background, structure=structuring_element).astype(int)
background = erosion(background,SE1size)
background = remove_dots(background,SE2size)
# Replace 1 by 255
background[background == 1] = 255
# Scales, calculates absolute values, and converts the result to 8-bit
background = cv2.convertScaleAbs(background)
# Area filltering, label background regions
label_image = label(background)
# Measure properties of labeled background regions
if areaPixels > 0:
for region in regionprops(label_image):
# Remove regions smaller than fixed area
if region.area < areaPixels:
minr, minc, maxr, maxc = region.bbox
background[minr:maxr,minc:maxc] = 0
return background
|
from rest_framework import serializers
from profiles_api import models
# this is a simple serializer
class HelloSerializer(serializers.Serializer):
"""Serializers a name field for testing our APIView"""
# So sterilizers also take care of validation rules so if you want to say
#you want to accept a certain field of a certain type serializers will make
# sure that the content past that api is of the correct type that you want
# to require for that field
name = serializers.CharField(max_length=10)
# this is a new serializer (this is a model serializer)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializer a user profile object"""
# this set up the serializer to point to the user model
# first it will validate the data here
class Meta:
model = models.UserProfile
# specify a list of fields that you want to make acessible
fields =('id','email','name','password')
# we want to the password right functionality
# we will create a dictionary with the fields that we want to pass configuration # TODO: s
# you can only use it to create new objects or update
# objects you can't use it to retrieve objects so when you do a get you won't
# see the password field included in that response
extra_kwargs = {
'password': {
'write_only':True,
# costum style to it, you only see dots or starts
'style': {'input_type':'password'}
}
}
# this will overwrite the create_user function in views
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email = validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializers profile feed items"""
# this sets our serializer or our model serializer
# to our profile feed item model that we created in models dot py
class Meta:
model = models.ProfileFeedItem
# we need to make the fields in the model available trough the serializer!
# the ID is set up by Django by default it's automatically set to read
# only so when you create a new object the new ID is created by the database and it is
# set to the next available integer field or integer value in the table so just
# increases for every object you create
fields = ('id', 'user_profile', 'status_text', 'created_on' )
# we want the user_profile to be read only, we do this with a dictionary
extra_kwargs = {'user_profile' : {'read_only':True}}
|
def isUnique(s):
s_list = list(s)
s_set = set(s_list)
len_list = len(s_list)
len_set = len(s_set)
return len_list == len_set
def main():
s1 = 'i have a dream'
s2 = 'qwertyuiop'
print isUnique(s1), isUnique(s2)
if __name__ == '__main__':
main() |
import re
__all__ = [ 'nodepair_pattern', 'git_commit_pattern']
nodepair_pattern = re.compile('(\d+)[-,\s]+(\d+)')
git_commit_pattern = re.compile('commit ([0-9a-f]{40}$)')
|
# -*- coding: utf-8 -*-
class Solution:
def numberOfArithmeticSlices(self, A):
result = 0
if len(A) < 3:
return result
first, step = 0, A[1] - A[0]
for i in range(2, len(A)):
if A[i] - A[i - 1] == step:
result += i - first - 1
else:
first, step = i - 1, A[i] - A[i - 1]
return result
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.numberOfArithmeticSlices([1, 2, 3, 4])
assert 2 == solution.numberOfArithmeticSlices([1, 2, 3, 5, 8, 9, 10])
|
# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import unittest
from multicloud_azure.pub.utils import syscomm
class SyscommTest(unittest.TestCase):
def test_keystone_version(self):
url = "http://a.com/test"
version = "v3"
expected = "http://a.com/test/v3"
self.assertEquals(expected, syscomm.keystoneVersion(url, version))
def test_verify_keystone(self):
param = \
{
"auth": {
"tenantName": "12345",
"passwordCredentials": {
"username": "admin",
"password": "admin"
}
}
}
self.assertEquals(True, syscomm.verifyKeystoneV2(param))
def test_json_response(self):
data = "abcde"
res = data
content_type = "text/plain"
self.assertEquals((res, content_type), syscomm.jsonResponse(data))
|
# coding=utf-8
import os
import pickle
import random
import shutil
import sys
import tempfile
import unittest
from collections import namedtuple
from nose2.tools import params
from threading import Thread
from persistqueue.serializers import json as serializers_json
from persistqueue.serializers import pickle as serializers_pickle
from persistqueue.serializers import msgpack as serializers_msgpack
from persistqueue.serializers import cbor2 as serializers_cbor2
from persistqueue import Queue, Empty, Full
# map keys as params for readable errors from nose
serializer_params = {
"serializer=default": {},
"serializer=json": {"serializer": serializers_json},
"serializer=msgpack": {"serializer": serializers_msgpack},
"serializer=cbor2": {"serializer": serializers_cbor2},
"serializer=pickle": {"serializer": serializers_pickle},
}
class PersistTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='queue')
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
@params(*serializer_params)
def test_open_close_single(self, serializer):
"""Write 1 item, close, reopen checking if same item is there"""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
del q
q = Queue(self.path, **serializer_params[serializer])
self.assertEqual(1, q.qsize())
self.assertEqual('var1', q.get())
q.task_done()
del q
def test_empty(self):
q = Queue(self.path)
self.assertEqual(q.empty(), True)
q.put('var1')
self.assertEqual(q.empty(), False)
q.get()
self.assertEqual(q.empty(), True)
def test_full(self):
q = Queue(self.path, maxsize=3)
for i in range(1, q.maxsize):
q.put('var{}'.format(i))
self.assertEqual(q.full(), False)
q.put('var{}'.format(q.maxsize))
self.assertEqual(q.full(), True)
q.get()
self.assertEqual(q.full(), False)
@params(*serializer_params)
def test_open_close_1000(self, serializer):
"""Write 1000 items, close, reopen checking if all items are there"""
q = Queue(self.path, **serializer_params[serializer])
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = Queue(self.path, **serializer_params[serializer])
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % i, data)
q.task_done()
with self.assertRaises(Empty):
q.get_nowait()
# assert adding another one still works
q.put('foobar')
data = q.get()
@params(*serializer_params)
def test_partial_write(self, serializer):
"""Test recovery from previous crash w/ partial write"""
q = Queue(self.path, **serializer_params[serializer])
for i in range(100):
q.put('var%d' % i)
del q
with open(os.path.join(self.path, 'q00000'), 'ab') as f:
pickle.dump('文字化け', f)
q = Queue(self.path, **serializer_params[serializer])
self.assertEqual(100, q.qsize())
for i in range(100):
self.assertEqual('var%d' % i, q.get())
q.task_done()
with self.assertRaises(Empty):
q.get_nowait()
@params(*serializer_params)
def test_random_read_write(self, serializer):
"""Test random read/write"""
q = Queue(self.path, **serializer_params[serializer])
n = 0
for i in range(1000):
if random.random() < 0.5:
if n > 0:
q.get_nowait()
q.task_done()
n -= 1
else:
with self.assertRaises(Empty):
q.get_nowait()
else:
q.put('var%d' % random.getrandbits(16))
n += 1
@params(*serializer_params)
def test_multi_threaded(self, serializer):
"""Create consumer and producer threads, check parallelism"""
q = Queue(self.path, **serializer_params[serializer])
def producer():
for i in range(1000):
q.put('var%d' % i)
def consumer():
for i in range(1000):
q.get()
q.task_done()
c = Thread(target=consumer)
c.start()
p = Thread(target=producer)
p.start()
c.join()
p.join()
q.join()
with self.assertRaises(Empty):
q.get_nowait()
@params(*serializer_params)
def test_garbage_on_head(self, serializer):
"""Adds garbage to the queue head and let the internal integrity
checks fix it"""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
del q
with open(os.path.join(self.path, 'q00000'), 'ab') as f:
f.write(b'garbage')
q = Queue(self.path, **serializer_params[serializer])
q.put('var2')
self.assertEqual(2, q.qsize())
self.assertEqual('var1', q.get())
q.task_done()
@params(*serializer_params)
def test_task_done_too_many_times(self, serializer):
"""Test too many task_done called."""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
q.get()
q.task_done()
with self.assertRaises(ValueError):
q.task_done()
@params(*serializer_params)
def test_get_timeout_negative(self, serializer):
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
with self.assertRaises(ValueError):
q.get(timeout=-1)
@params(*serializer_params)
def test_get_timeout(self, serializer):
"""Test when get failed within timeout."""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
q.get()
with self.assertRaises(Empty):
q.get(timeout=1)
@params(*serializer_params)
def test_put_nowait(self, serializer):
"""Tests the put_nowait interface."""
q = Queue(self.path, **serializer_params[serializer])
q.put_nowait('var1')
self.assertEqual('var1', q.get())
q.task_done()
@params(*serializer_params)
def test_put_maxsize_reached(self, serializer):
"""Test that maxsize reached."""
q = Queue(self.path, maxsize=10, **serializer_params[serializer])
for x in range(10):
q.put(x)
with self.assertRaises(Full):
q.put('full_now', block=False)
@params(*serializer_params)
def test_put_timeout_reached(self, serializer):
"""Test put with block and timeout."""
q = Queue(self.path, maxsize=2, **serializer_params[serializer])
for x in range(2):
q.put(x)
with self.assertRaises(Full):
q.put('full_and_timeout', block=True, timeout=1)
@params(*serializer_params)
def test_put_timeout_negative(self, serializer):
"""Test and put with timeout < 0"""
q = Queue(self.path, maxsize=1, **serializer_params[serializer])
with self.assertRaises(ValueError):
q.put('var1', timeout=-1)
@params(*serializer_params)
def test_put_block_and_wait(self, serializer):
"""Test block until queue is not full."""
q = Queue(self.path, maxsize=10, **serializer_params[serializer])
def consumer():
for i in range(5):
q.get()
q.task_done()
def producer():
for j in range(16):
q.put('var%d' % j)
p = Thread(target=producer)
p.start()
c = Thread(target=consumer)
c.start()
c.join()
val = q.get_nowait()
p.join()
self.assertEqual('var5', val)
@params(*serializer_params)
def test_clear_tail_file(self, serializer):
"""Test that only remove tail file when calling task_done."""
q = Queue(self.path, chunksize=10, **serializer_params[serializer])
for i in range(35):
q.put('var%d' % i)
for _ in range(15):
q.get()
q = Queue(self.path, chunksize=10, **serializer_params[serializer])
self.assertEqual(q.qsize(), 35)
for _ in range(15):
q.get()
# the first tail file gets removed after task_done
q.task_done()
for _ in range(16):
q.get()
# the second and third files get removed after task_done
q.task_done()
self.assertEqual(q.qsize(), 4)
def test_protocol(self):
# test that protocol is set properly
expect_protocol = 2 if sys.version_info[0] == 2 else 4
self.assertEqual(
serializers_pickle.protocol,
expect_protocol,
)
# test that protocol is used properly
serializer = namedtuple("Serializer", ["dump", "load"])(
serializers_pickle.dump, lambda fp: fp.read())
q = Queue(path=self.path, serializer=serializer)
q.put(b'a')
self.assertEqual(q.get(), pickle.dumps(b'a', protocol=expect_protocol))
@params(*serializer_params)
def test_del(self, serializer):
"""test that __del__ can be called successfully"""
q = Queue(self.path, **serializer_params[serializer])
q.__del__()
self.assertTrue(q.headf.closed)
self.assertTrue(q.tailf.closed)
@params(*serializer_params)
def test_autosave_get(self, serializer):
"""test the autosave feature saves on get()"""
q = Queue(self.path, autosave=True, **serializer_params[serializer])
q.put('var1')
q.put('var2')
self.assertEqual('var1', q.get())
del q
# queue should save on get(), only one item should remain
q = Queue(self.path, autosave=True, **serializer_params[serializer])
self.assertEqual(1, q.qsize())
self.assertEqual('var2', q.get())
del q
@params(*serializer_params)
def test_autosave_join(self, serializer):
"""Enabling autosave should still allow task_done/join behavior"""
q = Queue(self.path, autosave=True, **serializer_params[serializer])
for i in range(10):
q.put('var%d' % i)
def consumer():
for i in range(10):
q.get()
# this should still 'count down' properly and allow q.join()
# to finish
q.task_done()
c = Thread(target=consumer)
c.start()
q.join()
with self.assertRaises(Empty):
q.get_nowait()
|
while 1:
ans = 0
try: l=list(map(int,input().split(',')))
except: break
s2,s1=l.pop(),l.pop()
t=sum(l)/(s1+s2)
x=s1*t
for i in range(10):
ans = ans + l[i]
if ans >= x: break
print(i+1) |
import down_util
import pandas as pd
if __name__=='__main__':
df,_=down_util.split_collection(r"D:\DATA\GOOGLE\landsat_index.csv.gz")
prlist = pd.read_csv('cnpr.txt').PR
sta_table = r'C:\Users\ranyu\Desktop\half_year_count_50.csv'
# testprlist=['121032','121033','020020']
down_util.seasonal_count(df, sta_table, todopr=prlist, date_split=('1-1', '7-1'), year_start=1986, year_end=2018, cloudlt=50) |
import struct
import random
import socket
import array
import time
import sys
import socket
NETFINDER_SERVER_PORT = 3040
NF_IDENTIFY = 0
NF_IDENTIFY_REPLY = 1
NF_ASSIGNMENT = 2
NF_ASSIGNMENT_REPLY = 3
NF_FLASH_ERASE = 4
NF_FLASH_ERASE_REPLY = 5
NF_BLOCK_SIZE = 6
NF_BLOCK_SIZE_REPLY = 7
NF_BLOCK_WRITE = 8
NF_BLOCK_WRITE_REPLY = 9
NF_VERIFY = 10
NF_VERIFY_REPLY = 11
NF_REBOOT = 12
NF_SET_ETHERNET_ADDRESS = 13
NF_SET_ETHERNET_ADDRESS_REPLY = 14
NF_TEST = 15
NF_TEST_REPLY = 16
NF_SUCCESS = 0
NF_CRC_MISMATCH = 1
NF_INVALID_MEMORY_TYPE = 2
NF_INVALID_SIZE = 3
NF_INVALID_IP_TYPE = 4
NF_MAGIC = 0x5A
NF_IP_DYNAMIC = 0
NF_IP_STATIC = 1
NF_ALERT_OK = 0x00
NF_ALERT_WARN = 0x01
NF_ALERT_ERROR = 0xFF
NF_MODE_BOOTLOADER = 0
NF_MODE_APPLICATION = 1
NF_MEMORY_FLASH = 0
NF_MEMORY_EEPROM = 1
NF_REBOOT_CALL_BOOTLOADER = 0
NF_REBOOT_RESET = 1
HEADER_FMT = "!2cH6s2x"
IDENTIFY_FMT = HEADER_FMT
IDENTIFY_REPLY_FMT = "!H6c4s4s4s4s4s4s32s"
ASSIGNMENT_FMT = "!3xc4s4s4s32x"
ASSIGNMENT_REPLY_FMT = "!c3x"
FLASH_ERASE_FMT = HEADER_FMT
FLASH_ERASE_REPLY_FMT = HEADER_FMT
BLOCK_SIZE_FMT = HEADER_FMT
BLOCK_SIZE_REPLY_FMT = "!H2x"
BLOCK_WRITE_FMT = "!cxHI"
BLOCK_WRITE_REPLY_FMT = "!c3x"
VERIFY_FMT = HEADER_FMT
VERIFY_REPLY_FMT = "!c3x"
REBOOT_FMT = "!c3x"
SET_ETHERNET_ADDRESS_FMT = "!6s2x"
SET_ETHERNET_ADDRESS_REPLY_FMT = HEADER_FMT
TEST_FMT = HEADER_FMT
TEST_REPLY_FMT = "!32s"
MAX_ATTEMPTS = 10
MAX_TIMEOUT = 0.5
#-----------------------------------------------------------------------------
def MkHeader(id, seq, eth_addr):
return struct.pack(
HEADER_FMT,
chr(NF_MAGIC),
chr(id),
seq,
eth_addr
);
#-----------------------------------------------------------------------------
def MkIdentify(seq):
return MkHeader(NF_IDENTIFY, seq, '\xFF\xFF\xFF\xFF\xFF\xFF')
#-----------------------------------------------------------------------------
def MkAssignment(seq, eth_addr, ip_type, ip_addr, netmask, gateway):
return MkHeader(NF_ASSIGNMENT, seq, eth_addr) + \
struct.pack(
ASSIGNMENT_FMT,
chr(ip_type),
socket.inet_aton(ip_addr),
socket.inet_aton(netmask),
socket.inet_aton(gateway)
)
#-----------------------------------------------------------------------------
def MkFlashErase(seq, eth_addr):
return MkHeader(NF_FLASH_ERASE, seq, eth_addr)
#-----------------------------------------------------------------------------
def MkBlockSize(seq, eth_addr):
return MkHeader(NF_BLOCK_SIZE, seq, eth_addr)
#-----------------------------------------------------------------------------
def MkBlockWrite(seq, eth_addr, memtype, addr, data):
return MkHeader(NF_BLOCK_WRITE, seq, eth_addr) + \
struct.pack(
BLOCK_WRITE_FMT,
chr(memtype),
len(data),
addr,
) + \
data
#-----------------------------------------------------------------------------
def MkVerify(seq, eth_addr):
return MkHeader(NF_VERIFY, seq, eth_addr)
#-----------------------------------------------------------------------------
def MkReboot(seq, eth_addr, reboottype):
return MkHeader(NF_REBOOT, seq, eth_addr) + \
struct.pack(
REBOOT_FMT,
chr(reboottype)
);
#-----------------------------------------------------------------------------
def MkTest(seq, eth_addr):
return MkHeader(NF_TEST, seq, eth_addr)
#-----------------------------------------------------------------------------
def MkSetEthernetAddress(seq, eth_addr, new_eth_addr):
return MkHeader(NF_SET_ETHERNET_ADDRESS, seq, eth_addr) + \
struct.pack(
SET_ETHERNET_ADDRESS_FMT,
new_eth_addr
);
#-----------------------------------------------------------------------------
def UnMkHeader(msg):
params = struct.unpack(
HEADER_FMT,
msg
);
d = {}
d['magic'] = ord(params[0])
d['id'] = ord(params[1])
d['sequence'] = params[2]
d['eth_addr'] = params[3]
return d
#-----------------------------------------------------------------------------
def UnMkIdentifyReply(msg):
hdrlen = struct.calcsize(HEADER_FMT)
d = UnMkHeader(msg[0:hdrlen])
params = struct.unpack(
IDENTIFY_REPLY_FMT,
msg[hdrlen:]
);
d['uptime_days'] = params[0]
d['uptime_hrs'] = ord(params[1])
d['uptime_min'] = ord(params[2])
d['uptime_secs'] = ord(params[3])
d['mode'] = ord(params[4])
d['alert'] = ord(params[5])
d['ip_type'] = ord(params[6])
d['ip_addr'] = params[7]
d['ip_netmask'] = params[8]
d['ip_gw'] = params[9]
d['app_ver'] = params[10]
d['boot_ver'] = params[11]
d['hw_ver'] = params[12]
d['name'] = params[13]
return d
#-----------------------------------------------------------------------------
def UnMkAssignmentReply(msg):
hdrlen = struct.calcsize(HEADER_FMT)
d = UnMkHeader(msg[0:hdrlen])
params = struct.unpack(
ASSIGNMENT_REPLY_FMT,
msg[hdrlen:]
);
d['result'] = ord(params[0])
return d
#-----------------------------------------------------------------------------
def UnMkFlashEraseReply(msg):
return UnMkHeader(msg)
#-----------------------------------------------------------------------------
def UnMkBlockSizeReply(msg):
hdrlen = struct.calcsize(HEADER_FMT)
d = UnMkHeader(msg[0:hdrlen])
params = struct.unpack(
BLOCK_SIZE_REPLY_FMT,
msg[hdrlen:]
);
d['size'] = params[0]
return d
#-----------------------------------------------------------------------------
def UnMkBlockWriteReply(msg):
hdrlen = struct.calcsize(HEADER_FMT)
d = UnMkHeader(msg[0:hdrlen])
params = struct.unpack(
BLOCK_WRITE_REPLY_FMT,
msg[hdrlen:]
);
d['result'] = ord(params[0])
return d
#-----------------------------------------------------------------------------
def UnMkVerifyReply(msg):
hdrlen = struct.calcsize(HEADER_FMT)
d = UnMkHeader(msg[0:hdrlen])
params = struct.unpack(
VERIFY_REPLY_FMT,
msg[hdrlen:]
);
d['result'] = ord(params[0])
return d
#-----------------------------------------------------------------------------
def UnMkTestReply(msg):
hdrlen = struct.calcsize(HEADER_FMT)
d = UnMkHeader(msg[0:hdrlen])
params = struct.unpack(
TEST_REPLY_FMT,
msg[hdrlen:]
);
result = ''
for i in params[0]:
if ord(i) == 0:
break
result = result + i
d['result'] = result
return d
#-----------------------------------------------------------------------------
def UnMkSetEthernetAddressReply(msg):
return UnMkHeader(msg)
#-----------------------------------------------------------------------------
def SendMsg(s, msg):
try:
s.sendto(msg, ('<broadcast>', NETFINDER_SERVER_PORT))
except socket.error, e:
print e
return False;
return True;
#-----------------------------------------------------------------------------
def RecvMsg(s):
try:
return s.recv(256)
except socket.error: # ignore socket errors
return ''
#-----------------------------------------------------------------------------
def Discover(s, r):
devices = {}
attempts = 2
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkIdentify(seq)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(IDENTIFY_REPLY_FMT):
continue
d = UnMkIdentifyReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_IDENTIFY_REPLY:
continue
if d['sequence'] != seq:
continue
devices[d['eth_addr']] = d
return devices
#-----------------------------------------------------------------------------
def Identify(s, r, eth_addr):
attempts = 2
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkIdentify(seq)
if (SendMsg(s, msg)):
exp = time.time() + 2 # Longer timeout
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(IDENTIFY_REPLY_FMT):
continue
d = UnMkIdentifyReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_IDENTIFY_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def Assignment(s, r, eth_addr, ip_type, ip_addr, netmask, gateway):
attempts = MAX_ATTEMPTS
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkAssignment(seq, eth_addr, ip_type, ip_addr, netmask, gateway)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(ASSIGNMENT_REPLY_FMT):
continue
d = UnMkAssignmentReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_ASSIGNMENT_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def FlashErase(s, r, eth_addr):
attempts = MAX_ATTEMPTS
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkFlashErase(seq, eth_addr)
if (SendMsg(s, msg)):
exp = time.time() + 10 # Flash erase could take a while
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT):
continue
d = UnMkFlashEraseReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_FLASH_ERASE_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def BlockSize(s, r, eth_addr):
attempts = MAX_ATTEMPTS
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkBlockSize(seq, eth_addr)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(BLOCK_SIZE_REPLY_FMT):
continue
d = UnMkBlockSizeReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_BLOCK_SIZE_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def BlockWrite(s, r, eth_addr, memtype, addr, data):
attempts = MAX_ATTEMPTS
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkBlockWrite(seq, eth_addr, memtype, addr, data)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
#sys.stdout.write('.'),
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(BLOCK_WRITE_REPLY_FMT):
continue
d = UnMkBlockWriteReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_BLOCK_WRITE_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def Verify(s, r, eth_addr):
attempts = MAX_ATTEMPTS
while attempts > 0:
attempts = attempts - 1
seq = random.randint(1, 65535)
msg = MkVerify(seq, eth_addr)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(VERIFY_REPLY_FMT):
continue
d = UnMkVerifyReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_VERIFY_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def Reboot(s, eth_addr, reboottype):
seq = random.randint(1, 65535)
msg = MkReboot(seq, eth_addr, reboottype)
if (SendMsg(s, msg)):
return
#-----------------------------------------------------------------------------
def Test(s, r, eth_addr):
seq = random.randint(1, 65535)
msg = MkTest(seq, eth_addr)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT) + struct.calcsize(TEST_REPLY_FMT):
continue
d = UnMkTestReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_TEST_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def SetEthernetAddress(s, r, eth_addr, new_eth_addr):
seq = random.randint(1, 65535)
msg = MkSetEthernetAddress(seq, eth_addr, new_eth_addr)
if (SendMsg(s, msg)):
exp = time.time() + MAX_TIMEOUT
while time.time() < exp:
sys.stdout.write('.')
reply = RecvMsg(r)
if len(reply) != struct.calcsize(HEADER_FMT):
continue
d = UnMkSetEthernetAddressReply(reply)
if d['magic'] != NF_MAGIC:
continue
if d['id'] != NF_SET_ETHERNET_ADDRESS_REPLY:
continue
if d['sequence'] != seq:
continue
if d['eth_addr'] != eth_addr:
continue
return d
return {}
#-----------------------------------------------------------------------------
def FormatEthAddr(a):
return "%02X-%02X-%02X-%02X-%02X-%02X" % (ord(a[0]), ord(a[1]), ord(a[2]), ord(a[3]), ord(a[4]), ord(a[5]))
#-----------------------------------------------------------------------------
def PrintDetails(d):
print
print "Ethernet Address:", FormatEthAddr(d['eth_addr'])
print "Hardware:", socket.inet_ntoa(d['hw_ver']), "Bootloader:", socket.inet_ntoa(d['boot_ver']), "Application:", socket.inet_ntoa(d['app_ver'])
print "Uptime:", d['uptime_days'], 'days', d['uptime_hrs'], 'hours', d['uptime_min'], 'minutes', d['uptime_secs'], 'seconds'
if d['ip_type'] == NF_IP_STATIC:
print "Static IP"
elif d['ip_type'] == NF_IP_DYNAMIC:
print "Dynamic IP"
else:
print "Unknown IP type"
print "IP Address:", socket.inet_ntoa(d['ip_addr']), "Mask:", socket.inet_ntoa(d['ip_netmask']), "Gateway:", socket.inet_ntoa(d['ip_gw'])
print "Mode:",
if d['mode'] == NF_MODE_BOOTLOADER:
print 'Bootloader'
elif d['mode'] == NF_MODE_APPLICATION:
print 'Application'
else:
print 'Unknown'
|
import click, urllib2, json
class Movie:
@classmethod
def Load_JSON(Movie, data, get_plot):
movie = Movie()
if data[u'Response'] == 'True':
movie.title = data[u'Title']
if get_plot:
movie.plot = data[u'Plot']
movie.runtime = int(data[u'Runtime'].split()[0])
movie.metascore = data[u'Metascore']
movie.imdb_rating = data[u'imdbRating']
movie.imdb_votes = data[u'imdbVotes']
return movie
return None
def __str__(self):
string = ''
string += 'Title: {0}\n\n'.format(self.title)
if hasattr(self, 'plot'):
string += 'Plot: {0}\n\n'.format(self.plot)
string += 'Runtime: {0}hrs {1}mins\n'.format(self.runtime//60, self.runtime%60)
string += 'Metascore: {0}\n'.format(self.metascore)
string += 'imdb rating: {0}\n'.format(self.imdb_rating)
string += 'imdb votes: {0}\n'.format(self.imdb_votes)
return string
@click.command()
@click.argument('search', metavar='movie_name')
@click.option('--plot', is_flag=True, help='show plot')
def smibbler(search, plot):
""" Print info fetched from omdbapi.com """
# generate search string and fetch data
query = search.replace(' ', '+')
request = 'http://www.omdbapi.com/?t={0}&y=&plot=short&r=json'.format(query)
res = urllib2.urlopen(request)
data = json.load(res)
movie = Movie.Load_JSON(data, plot)
if movie is None:
click.echo('Movie not found')
else:
click.echo(movie)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.