text stringlengths 8 6.05M |
|---|
N, B, C = map(int, input().split())
arr = list(map(int, input().split()))
curr_b = B
curr_c = C
answer = 0
for i in range(N):
if arr[i] == 0:
if curr_b == 0:
# Must wash clothes
curr_b = B
curr_c = C
answer += 1
curr_b -= 1
else:
if curr_c == 0:
# Must wash clothes
curr_b = B
curr_c = C
answer += 1
curr_c -= 1
print(answer) |
# Generated by Django 2.2.12 on 2020-05-12 15:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('artical', '0004_auto_20200512_1504'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=models.TextField(max_length=6000),
),
migrations.AlterField(
model_name='blog',
name='headline',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='trap',
name='solution',
field=models.TextField(default='我想到了一个绝妙的解决方法, 但是这里位置太小, 我写不下...', max_length=1000),
),
]
|
for rooster in range(0,20):
for hen in range(0,33):
chick = 100-rooster-hen
if chick/3 + rooster * 5 + hen *3==100:
print('公鸡有:%d只,母鸡有%d只,小鸡有%d只.' % (rooster,hen,chick))
|
from django.db import models
from catalog.models import Specification
from django.contrib.auth.models import User
from accounts.models import Address
from django.db import connection, transaction
def get_number():
cursor = connection.cursor()
cursor.execute("SELECT nextval('comm_order_number')")
return int(cursor.fetchone()[0])
class Order(models.Model):
dt=models.DateTimeField(auto_now_add=True, verbose_name=u'DateTime')
number=models.IntegerField(verbose_name=u'Number', default=get_number(), editable=True)
item_specification=models.ManyToManyField(Specification, through='OrderSpecification')
user=models.ForeignKey(User, verbose_name=u'User')
address=models.ForeignKey(Address, verbose_name=u'Address')
def __unicode__(self):
return self.dt.strftime('%d/%m/%Y %H:%M') + ' ' + self.user.first_name + ' ' + self.user.last_name + " (" + self.user.username + ")"
class Meta:
verbose_name=u'Order'
verbose_name_plural=u'Orders'
class OrderSpecification(models.Model):
quantity=models.IntegerField(verbose_name=u'Quantity')
order=models.ForeignKey(Order, verbose_name=u'Order')
specification=models.ForeignKey(Specification, verbose_name=u'Item specification')
def __unicode__(self):
return self.specification.__unicode__()
class Basket(models.Model):
item_specification=models.ManyToManyField(Specification, through='BasketSpecification')
user=models.OneToOneField(User, verbose_name=u'User')
def __unicode__(self):
return self.user.first_name + ' ' + self.user.last_name + " (" + self.user.username + ")"
class Meta:
verbose_name=u'Basket'
verbose_name_plural=u'Baskets'
class BasketSpecification(models.Model):
quantity=models.IntegerField(verbose_name=u'Quantity')
basket=models.ForeignKey(Basket, verbose_name=u'Basket')
specification=models.ForeignKey(Specification, verbose_name=u'Item specification')
def __unicode__(self):
return self.specification.__unicode__()
|
import cv2
import integral_images as ii
from patches import Patches
import numpy as np
class FragTracker:
DEFAULT_VIDEO_PATH = "videos/times_square2.mp4"
DEFAULT_SPLIT = (10, 10)
DEFAULT_RADIUS = 20
def __init__(self, video_path=DEFAULT_VIDEO_PATH, split=DEFAULT_SPLIT, radius=DEFAULT_RADIUS):
self.video_path = video_path
self.video = cv2.VideoCapture(self.video_path)
self.split = split
self.radius = radius
ok, frame = self.video.read()
self.frame_width = len(frame[0])
self.frame_height = len(frame)
if not ok:
raise RuntimeError("First frame not available")
self.bound_box = cv2.selectROI(frame, False)
print(self.bound_box)
self.template_width = self.bound_box[2]
self.template_height = self.bound_box[3]
self.t_half_width = int(self.template_width / 2)
self.t_half_height = int(self.template_height / 2)
end_x, end_y = self.bound_box[0] + self.template_width, self.bound_box[1] + self.template_height
template = frame[self.bound_box[1]:end_y, self.bound_box[0]:end_x]
template = cv2.cvtColor(template, cv2.COLOR_BGR2HSV)
integral_bins = ii.hue_integral_bins(template)
self.template_patches = Patches(integral_bins, 0, 0, self.template_width, self.template_height
, split= self.split)
def execute(self, step=1):
center_x = int(self.bound_box[0] + self.t_half_width)
center_y = int(self.bound_box[1] + self.t_half_height)
create_video = cv2.VideoWriter('results/times_square22.mp4', cv2.VideoWriter_fourcc('M','J','P','G')
, 30, (self.frame_width, self.frame_height))
while True:
ok, frame = self.video.read()
if not ok:
break
integral_bins, new_center = self.calculate_needed_integral_bins(frame, center_x, center_y)
c_x = new_center[0]
c_y = new_center[1]
min_dist = None
for j in range(-self.radius, self.radius + 1, step):
for i in range(-self.radius, self.radius + 1, step):
x, y = center_x + i, center_y + j
if not self.check_is_rectangle_in_bounds(x - self.t_half_width, y - self.t_half_height):
continue
start_x = c_x + i - self.t_half_width
start_y = c_y + j - self.t_half_height
new_bb = (x - self.t_half_width, y - self.t_half_height,
self.template_width, self.template_height)
new_patches = Patches(integral_bins, start_x, start_y, self.template_width, self.template_height,
split=self.split)
d = self.template_patches.distance(new_patches)
if min_dist is None:
min_dist = (d, new_bb)
if min_dist[0] > d:
min_dist = (d, new_bb)
new_bb = min_dist[1]
left_top = (new_bb[0], new_bb[1])
right_bot = (new_bb[0] + new_bb[2], new_bb[1] + new_bb[3])
cv2.rectangle(frame, left_top, right_bot, (255, 0, 0), 2, 1)
cv2.imshow("Tracking", frame)
create_video.write(frame)
center_x = int(new_bb[0] + new_bb[2] / 2)
center_y = int(new_bb[1] + new_bb[3] / 2)
k = cv2.waitKey(1) & 0xff
if k == 27: break
create_video.release()
cv2.destroyAllWindows()
def calculate_needed_integral_bins(self, frame, center_x, center_y):
start_x = center_x - self.radius - self.t_half_width
start_y = center_y - self.radius - self.t_half_height
end_x = center_x + self.radius + self.t_half_width + 1
end_y = center_y + self.radius + self.t_half_height + 1
s_x, s_y, e_x, e_y = self.calibrate_bounds(start_x, start_y, end_x, end_y)
frame = frame[s_y:e_y, s_x:e_x]
hues = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hues = cv2.split(hues)[0]
y_pad_top, y_pad_bot = (0, 0)
x_pad_left, x_pad_right = (0, 0)
if start_y < 0:
y_pad_top = -1 * start_y
if start_y > self.frame_height - 1:
y_pad_bot = end_y - self.frame_height
if start_x < 0:
x_pad_left = -1 * start_x
if end_x > self.frame_width - 1:
x_pad_right = end_x - self.frame_width
y_padding = (y_pad_top, y_pad_bot)
x_padding = (x_pad_left, x_pad_right)
hues = np.pad(hues, [y_padding, x_padding], 'constant')
shape = hues.shape
new_center = int(shape[1] / 2), int(shape[0] / 2)
return ii.hue_integral_bins(hues), new_center
def calibrate_bounds(self, start_x, start_y, end_x, end_y):
if start_x < 0:
start_x = 0
elif start_x >= self.frame_width:
start_x = self.frame_width - 1
if end_x < 0:
end_x = 0
elif end_x >= self.frame_width:
end_x = self.frame_width - 1
if start_y < 0:
start_y = 0
elif start_y >= self.frame_height:
start_y = self.frame_height - 1
if end_y < 0:
end_y = 0
elif end_y >= self.frame_height:
end_y = self.frame_height - 1
return start_x, start_y, end_x, end_y
def is_in_bounds(self, x, y):
if x < 0 or x >= self.frame_width or y < 0 or y > self.frame_height:
return False
return True
def check_is_rectangle_in_bounds(self, i, j):
if not self.is_in_bounds(i, j):
return False
if not self.is_in_bounds(i + self.template_width, j + self.template_height):
return False
return True
|
import random
import pygame
if __name__ == '__main__':
# Create a pygame window
pygame.init()
width = 640
height = 480
screen = pygame.display.set_mode([width, height])
screen.fill([255, 255, 255])
#########################
# Actual drawing here...
#########################
for i in range(100):
width = random.randint(0, 250)
height = random.randint(0, 100)
top = random.randint(0, 400)
left = random.randint(0, 500)
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
pygame.draw.rect(screen, [r, g, b], [left, top, width, height], 1)
pygame.display.flip()
# Keep the window alive...
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
|
# -*- coding: utf-8 -*-
# list, Indentation
langs = [
'Python',
'Java',
'Swift'
]
for lang in langs:
print(lang)
# tuple
company_names = (
'Google',
'Apple',
'Amazon'
)
print('len(company_names) =', len(company_names)) # 3
print('company_names[0] =', company_names[0]) # Google
print('company_names[-1] =', company_names[-1]) # Amazon
# set
fruits = {'apple', 'banana', 'cherry'}
print(fruits)
first_number_set = set([1, 1, 2, 3, 3])
print(first_number_set) # {1, 2, 3}
second_number_set = set([2, 3, 4])
print(first_number_set & second_number_set) # {2, 3}
print(first_number_set | second_number_set) # {1, 2, 3, 4}
# dictionary
person = {
'name': 'Johnny',
'age': 30
}
print('person[\'name\'] =', person['name'])
print('person.get(\'age\', -1) =', person.get('age', -1))
for x in range(10):
print(x)
|
'''
A frog wants to cross a river that is 11 feet across.
There are 10 stones in a line leading across the river, separated by 1 foot,
and the frog is only ever able to jump one foot forward to the next stone,
or two feet forward to the stone after the next.
In how many different ways can he jump exactly 11 feet to the other side of the river?
'''
'''
My notes:
First ask, how many ways can the frog jump in 11 jupmps
this is just 1,1,1,1,1,1,1,1,1,1 so thats 11 C 0
Now in 10 jupmps
2,1,1,1,1,1,1,1,1,1 this can happen 10 ways
or just 10C1
Now in 9 jumps
2,2,1,1,1,1,1,1,1
1,2,2,1,1,1,1,1,1
8+7+6+5+4+3+2+1
in 8 jumps
2,2,2,
in 7 jumps
2,2,2,2,1,1,1
shortest is
6 jumps
2,2,2,2,2,1 this can happen 6 ways
if we define f(n) = something
as n feet away how many jumps
for example
f(1) = 1
since its one foot away
and f(2) = 2
since the frog can either jump 1 or two feet
one jump will bring it to n-1
two jumps will bring it to n-2
for n greater than 2, we can define a recursive relationship as
f(n) = f(n-1) + f(n-2)
which is just the fibonacci sequence
'''
import math
def count_ways(n,r):
num = math.factorial(n)
den = math.factorial(r)*math.factorial(n-r)
return(num/den)
print(count_ways(n=9,r=2))
def fibonacci(n):
if n < 0:
raise ValueError("invalid index!")
if n == 0:
return 0
if n == 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
print(fibonacci(3))
#code binary search
def binary_search(A,item):
'''
A is an array, the item is what we want to search
'''
if len(A) == 0:
return False
else:
middle = len(A) // 2
if A[middle] == item:
return(True)
if item < A[middle]:
return binary_search(A[middle], item)
else:
return
numbers = [1, 2, 3, 5, 8, 22, 34, 42, 87, 103]
print(binary_search(numbers, 4))
print(binary_search(numbers, 42))
|
import flask
import pandas as pd
import numpy as np
# Initialize the app
app = flask.Flask(__name__)
# HTTP extension
@app.route("/")
def hello():
return "Flask app works!"
@app.route("/predict", methods=["POST"])
def predict():
df = pd.read_pickle("test.pkl")
input_data = flask.request.json
country = input_data["Country"]
ingredients = list(df[country])
results = {"Ingredients" : ingredients}
return flask.jsonify(results)
app.run(debug=True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2019-08-13 11:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0025_auto_20190711_1357'),
]
operations = [
migrations.AddField(
model_name='event',
name='location',
field=models.CharField(choices=[('Egypt', 'Egypt'), ('Nigeria', 'Nigeria'), ('Uganda', 'Uganda'), ('Kenya', 'Kenya'), ('San-Fransisco', 'San-Fransisco'), ('Kigali', 'Kigali')], default='San-Fransisco', max_length=50),
),
]
|
from __future__ import unicode_literals
import re, bcrypt
from django.db import models
from datetime import datetime, date
# email regex for use later on
# edit, not actually used in this project
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def validate_registration(self,form_data):
# will append any errors with inputs to be shown after
errors=[]
# check name first
if form_data['name'] == '':
errors.append('Name is required.')
else:
if len(form_data['name'])<3:
errors.append('Name must have at least 3 characters.')
if not str.isalpha(str(form_data['name'].replace(' ',''))): #removes spaces when checking for non letters
errors.append('Name may not contain numbers or symbols.')
# add something to check if name is too long
# now check username
if form_data['username'] == '':
errors.append('Username is required.')
else:
if len(form_data['username'])<3:
errors.append('Username must have at least 3 characters.')
if len(self.filter(username=form_data['username']))>0:
errors.append('This username is already associated with an account.')
# finally check password
if not 'password' in form_data:
errors.append('Password is required')
else:
if len(form_data['password'])<8:
errors.append('Password must be at least 8 characters.')
if form_data['password'] != form_data['pw_confirm']:
errors.append('Passwords must match.')
# if there are errors, will return the errors to be displayed
if len(errors)>0:
return {'error':errors}
return {'success':'Successful registration attempt'}
def validate_login(self,form_data):
errors = []
# check username
if len(self.filter(username=form_data['username']))<1:
errors.append('No account registered with this username.')
else:
user = self.filter(username=form_data['username'])[0]
# check to see if password is valid iff username is ok
if form_data['password'] == '':
errors.append('Please enter your password.')
elif not bcrypt.checkpw(str(form_data['password']),str(user.password)):
errors.append('Incorrect password.')
if len(errors)>0:
return {'error':errors}
return {'success':user.id}
def register_user(self, form_data):
user = User.objects.create()
user.name = form_data['name']
user.username = form_data['username']
user.password = bcrypt.hashpw(str(form_data['password']),bcrypt.gensalt())
user.save()
return User.objects.last()
class User(models.Model):
name=models.CharField(max_length=50)
username=models.CharField(max_length=20)
password=models.CharField(max_length=255)
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
objects = UserManager()
def __repr__(self):
return str(self.username)
"""Trip tables and manager below"""
class TripManager(models.Manager):
def validate_trip(self,form_data):
# will append any errors with inputs to be shown after
errors=[]
# check destination first
if form_data['destination'] == '':
errors.append('Destination is required.')
else:
if len(form_data['destination'])<2:
errors.append('Destination must have at least 2 characters.')
# if not str.isalpha(str(form_data['destination'])):
# errors.append('Destination may not contain numbers or symbols.') #probably not needed for this
# now check description
if form_data['description'] == '':
errors.append('Description is required.')
else:
if len(form_data['description'])<2:
errors.append('Description must have at least 2 characters.')
# check date_from
if form_data['date_from'] == 'invalid':
errors.append('Please enter a start date.')
elif form_data['date_from'] < date.today():
errors.append('Please enter a start date in the future.')
# check date_to
if form_data['date_to'] == 'invalid':
errors.append('Please enter an end date.')
else:
if form_data['date_to'] < date.today():
errors.append('Please enter an end date in the future.')
if form_data['date_to'] < form_data['date_from']:
errors.append('Please enter an end date that is after your start date.')
# if there are errors, will return the errors to be displayed
if len(errors)>0:
return {'error':errors}
return {'success':'Successful trip attempt'}
def register_trip(self, form_data, user_id):
# create trip
# add user/creator to trip as organizer AND member
user = User.objects.get(id=user_id)
trip = Trip.objects.create(
destination = form_data['destination'],
description = form_data['description'],
date_from = form_data['date_from'],
date_to = form_data['date_to'],
organizer = user
)
trip.save()
trip.users.add(user)
return Trip.objects.last().id
def add_user(self,trip_id,user_id):
# adds a user as a part of the trip, but not the organizer
trip = Trip.objects.get(id=trip_id)
user = User.objects.get(id=user_id)
trip.users.add(user)
return trip.id
class Trip(models.Model):
destination=models.CharField(max_length=50)
description=models.CharField(max_length=255)
date_from=models.DateTimeField()
date_to=models.DateTimeField()
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
users = models.ManyToManyField(User, related_name='joined_trips')
organizer = models.ForeignKey(User,related_name='organized_trips')
objects = TripManager()
def __repr__(self):
return str(self.destination)
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from functions_script import noisy_sine
import numpy as np
def y(X, W):
WT = np.transpose(W)
return np.product(WT, X)
def main(*args, **kwargs):
X = noisy_sine(samples=10, precision=10)
# Normally MxN where M is the rows, N is the cols
N = len(X) # Rows
D = 1 # Columns
""" # Pseudo Inverse calculation and multiplication
X = np.random.randn(2, 3)
for element in X:
print(element)
print()
PX = np.linalg.pinv(X)
for element in PX:
print(element)
print()
new = np.dot(X, PX)
for element in new:
print(element)
print()
"""
if __name__ == '__main__':
main()
|
"""
This type stub file was generated by pyright.
"""
from typing import Callable, TypeVar
from marshmallow.schema import Schema
RT = TypeVar("RT")
"""ETag feature"""
class EtagMixin:
"""Extend Blueprint to add ETag handling"""
METHODS_CHECKING_NOT_MODIFIED = ...
METHODS_NEEDING_CHECK_ETAG = ...
METHODS_ALLOWING_SET_ETAG = ...
ETAG_INCLUDE_HEADERS = ...
def etag(
self, etag_schema: Schema = ...
) -> Callable[[Callable[..., RT]], Callable[..., RT]]:
"""Decorator generating an endpoint response
:param etag_schema: :class:`Schema <marshmallow.Schema>` class
or instance. If not None, will be used to serialize etag data.
Can be used as either a decorator or a decorator factory:
Example: ::
@blp.etag
def view_func(...):
...
@blp.etag(EtagSchema)
def view_func(...):
...
The ``etag`` decorator expects the decorated view function to return a
``Response`` object. It is the case if it is decorated with the
``response`` decorator.
See :doc:`ETag <etag>`.
"""
...
def check_etag(self, etag_data, etag_schema=...):
"""Compare If-Match header with computed ETag
Raise 412 if If-Match header does not match.
Must be called from resource code to check ETag.
Unfortunately, there is no way to call it automatically. It is the
developer's responsability to do it. However, a warning is logged at
runtime if this function was not called.
Logs a warning if called in a method other than one of
PUT, PATCH, DELETE.
"""
...
def set_etag(self, etag_data, etag_schema=...):
"""Set ETag for this response
Raise 304 if ETag identical to If-None-Match header
Must be called from resource code, unless the view function is
decorated with the ``response`` decorator, in which case the ETag is
computed by default from response data if ``set_etag`` is not called.
Logs a warning if called in a method other than one of
GET, HEAD, POST, PUT, PATCH.
"""
...
|
from collections import defaultdict
from typing import List, Tuple, DefaultDict
class SCCFinder():
"""
Kosaraju's two-pass algorithm to find strongly connected components;
probably not the most concise/efficient Python implementation of the
Kosaraju algorithm.
"""
def __init__(self, graph: List[Tuple[int, int]]) -> None:
"""
Converts List[Tuple[int, int]] graph to DefaultDict[int, List[int]]
graph, and computes the range of vertices to be looped over (backwards)
"""
self.graph = defaultdict(list)
self.graph_rev = defaultdict(list)
self.V_min = float('inf')
self.V_max = float('-inf')
for edge in graph:
(v, w) = edge
self.graph[v].append(w)
self.graph_rev[w].append(v)
if min(v, w) < self.V_min: self.V_min = min(v, w)
if max(v, w) > self.V_max: self.V_max = max(v, w)
self.V = range(self.V_max, self.V_min-1, -1)
def _dfs_first_pass(self,
graph_rev: DefaultDict[int, List[int]],
node: int
) -> None:
"""Obtains finishing times of the reversed graph"""
stack = [node]
while len(stack):
# notice no pop of stack, need to label it for finishing time
node = stack[-1]
if not self.visited[node]:
self.visited[node] = True
for node_adj in graph_rev[node]:
stack.append(node_adj)
else:
node = stack.pop()
if not self.finished[node]:
self.finished[node] = True
self.finishing_time[self.i] = node
self.i += 1
def _dfs_second_pass(self,
graph: DefaultDict[int, List[int]],
node: int
) -> None:
"""Obtains strongly connected components of graph"""
stack = [node]
source_node = stack[-1]
while len(stack):
node = stack.pop()
if not self.visited[node]:
self.visited[node] = True
self.scc[source_node].append(node)
for node_adj in graph[node]:
stack.append(node_adj)
def find_scc(self) -> DefaultDict[int, List[int]]:
# first pass
self.i = self.V_min # number of nodes processed so far
self.finishing_time = defaultdict(bool)
self.visited = defaultdict(bool)
self.finished = defaultdict(bool)
for v in self.V:
if not self.visited[v]:
self._dfs_first_pass(self.graph_rev, v)
# second pass
self.visited = defaultdict(bool)
self.scc = defaultdict(list)
for v in self.V:
v = self.finishing_time[v]
if not self.visited[v]:
self._dfs_second_pass(self.graph, v)
return self.scc
if __name__ == "__main__":
graph = [
(1, 2),
(2, 3),
(2, 4),
(2, 5),
(3, 6),
(4, 5),
(4, 7),
(5, 2),
(5, 6),
(5, 7),
(6, 3),
(6, 8),
(7, 8),
(7, 11),
(8, 7),
(9, 7),
(10, 7),
(10, 9),
(11, 10),
(11, 12),
(12, 13),
(13, 11),
]
scc_finder = SCCFinder(graph)
print("Graph: ")
for k, v in scc_finder.graph.items():
print("{:>2d} : {}".format(k, v))
print('--'*20)
scc = scc_finder.find_scc()
print("SCCs:")
for k, v in scc.items():
print(k, v)
|
import pandas as pd
url = "data_v3.csv"
insider = pd.read_csv(url, header=0)
print insider.shape
row_num = insider['side'].count()+1
train_num = int(row_num /3*2)
test_num = -1*int(row_num /3)
print "Training size: %d, Testing size: %d" % (train_num, test_num)
col_list = ['side', 'return_t5', "return_t30", "vol_sh_out_pct","stake_pct_chg", "tran_value","mkt_cap", "prev_tran_num","hit_rate_5d", "hit_rate_30d", "hit_rate_90d"]
X_train = insider[col_list][:train_num]
y_train_5d = insider.return_5d[:train_num]
y_train_30d = insider.return_30d[:train_num]
y_train_90d = insider.return_90d[:train_num]
X_test = insider[col_list][:test_num]
y_test_5d = insider.return_5d[:test_num]
y_test_30d = insider.return_30d[:test_num]
y_test_90d = insider.return_90d[:test_num]
import numpy as np
from sklearn.svm import SVC
clf = SVC()
clf.fit(X_train, y_train_5d)
#print clf.score(X_test, y_test_5d)
#clf.fit(X_train, y_train_30d)
#print clf.score(X_test, y_test_30d)
#clf.fit(X_train, y_train_90d)
#print clf.score(X_test, y_test_90d)
|
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from err import ExceptionMiddleware
sys.path.insert(0, '/home/bao/public_html')
os.environ['DJANGO_SETTINGS_MODULE'] = 'bao.settings'
import django.core.handlers.wsgi
@ExceptionMiddleware
def application(environ, start_response):
t = django.core.handlers.wsgi.WSGIHandler()
return t(environ, start_response)
|
import numpy as np
import cv2
import pickle
DxyvUxy = []
cap = cv2.VideoCapture('slow_traffic_small.mp4')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
count = 1
while(1):
count += 1
if count == 150:
# with open('DxyvUxy.pkl','wb') as file:
# pickle.dump(DxyvUxy,file)
break
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dx = cv2.Sobel(old_gray,cv2.CV_16S,1,0)
dy = cv2.Sobel(old_gray,cv2.CV_16S,0,1)
VI = frame_gray - old_gray
dx = cv2.resize(dx,(6400,3600))
dy = cv2.resize(dy,(6400,3600))
VI = cv2.resize(VI,(6400,3600))
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
good_old_around = np.around(good_old*10).astype(np.int64)
for i in range(len(good_old)):
temp1 = []
if good_old_around[i][1] >= 3600:
good_old_around[i][1] = 3599
if good_old_around[i][0] >= 6400:
good_old_around[i][0] = 6399
x = dx[(good_old_around[i][1]),(good_old_around[i][0])]
y = dy[(good_old_around[i][1]),(good_old_around[i][0])]
vi = (VI[(good_old_around[i][1]),(good_old_around[i][0])])*4
ux = good_new[i][1] - good_old[i][1]
uy = good_new[i][0] - good_old[i][0]
temp1.append(x)
temp1.append(y)
temp1.append(vi)
temp1.append(ux)
temp1.append(uy)
DxyvUxy.append(temp1)
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('expected flow',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
|
# -*- coding: utf-8 -*-
"""Amazon SQS message implementation."""
from __future__ import absolute_import, unicode_literals
from .ext import (
RawMessage, Message, MHMessage, EncodedMHMessage, JSONMessage,
)
__all__ = [
'BaseAsyncMessage', 'AsyncRawMessage', 'AsyncMessage',
'AsyncMHMessage', 'AsyncEncodedMHMessage', 'AsyncJSONMessage',
]
class BaseAsyncMessage(object):
"""Base class for messages received on async client."""
def delete(self, callback=None):
if self.queue:
return self.queue.delete_message(self, callback)
def change_visibility(self, visibility_timeout, callback=None):
if self.queue:
return self.queue.connection.change_message_visibility(
self.queue, self.receipt_handle, visibility_timeout, callback,
)
class AsyncRawMessage(BaseAsyncMessage, RawMessage):
"""Raw Message."""
class AsyncMessage(BaseAsyncMessage, Message):
"""Serialized message."""
class AsyncMHMessage(BaseAsyncMessage, MHMessage):
"""MHM Message (uhm, look that up later)."""
class AsyncEncodedMHMessage(BaseAsyncMessage, EncodedMHMessage):
"""Encoded MH Message."""
class AsyncJSONMessage(BaseAsyncMessage, JSONMessage):
"""Json serialized message."""
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies *_wrapper in environment.
"""
import os
import sys
import TestGyp
test_format = ['ninja']
os.environ['CC_wrapper'] = 'distcc'
os.environ['LINK_wrapper'] = 'distlink'
os.environ['CC.host_wrapper'] = 'ccache'
test = TestGyp.TestGyp(formats=test_format)
old_env = dict(os.environ)
os.environ['GYP_CROSSCOMPILE'] = '1'
test.run_gyp('wrapper.gyp')
os.environ.clear()
os.environ.update(old_env)
if test.format == 'ninja':
cc_expected = ('cc = ' + os.path.join('..', '..', 'distcc') + ' ' +
os.path.join('..', '..', 'clang'))
cc_host_expected = ('cc_host = ' + os.path.join('..', '..', 'ccache') + ' ' +
os.path.join('..', '..', 'clang'))
ld_expected = 'ld = ../../distlink $cc'
if sys.platform != 'win32':
ldxx_expected = 'ldxx = ../../distlink $cxx'
if sys.platform == 'win32':
ld_expected = 'link.exe'
test.must_contain('out/Default/build.ninja', cc_expected)
test.must_contain('out/Default/build.ninja', cc_host_expected)
test.must_contain('out/Default/build.ninja', ld_expected)
if sys.platform != 'win32':
test.must_contain('out/Default/build.ninja', ldxx_expected)
test.pass_test()
|
from collections import defaultdict
from datetime import datetime, timedelta
import json
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponse
from django.utils.functional import cached_property
from django.views import View
from django.views.generic import TemplateView
from gim.timers import sleep
from gim.core.tasks.issue import (
IssueEditAssigneesJob,
IssueEditRequestedReviewersJob,
IssueEditLabelsJob,
IssueEditMilestoneJob,
IssueEditProjectsJob,
IssueEditStateJob,
)
from gim.front.repository.issues.forms import update_columns
from gim.front.repository.views import RepositoryViewMixin
from gim.front.mixins.views import WithAjaxRestrictionViewMixin
from gim.subscriptions.models import SUBSCRIPTION_STATES
from gim.ws import sign
class MultiSelectViewBase(WithAjaxRestrictionViewMixin, RepositoryViewMixin):
def __init__(self, *args, **kwargs):
self.issues_pks = []
super(MultiSelectViewBase, self).__init__(*args, **kwargs)
http_method_names = ['post']
ajax_only = True
allowed_rights = SUBSCRIPTION_STATES.WRITE_RIGHTS
def convert_issues_pks_from_post(self):
self.issues_pks = []
done = set()
try:
for pk in self.request.POST.getlist('issues[]'):
converted_pk = int(pk)
if converted_pk in done:
continue
self.issues_pks.append(converted_pk)
done.add(converted_pk)
except Exception:
raise SuspiciousOperation
def get_issues_from_pks(self, pks):
return self.repository.issues.filter(
id__in=set(pks)
)
@classmethod
def order_issue_from_pk_list(cls, issues, pks):
by_pk = {issue.pk: issue for issue in issues}
result = []
for pk in pks:
if pk in by_pk:
result.append(by_pk[pk])
return result
class ListViewBase(MultiSelectViewBase, TemplateView):
def post(self, request, *args, **kwargs):
self.convert_issues_pks_from_post()
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_issues_info(self, issues):
issues_pks = [issue.pk for issue in self.order_issue_from_pk_list(issues, self.issues_pks)]
issues_pks_json = json.dumps(issues_pks)
return {
'issues_pks': issues_pks_json,
'issues_count': len(issues_pks),
'issues_hash': sign(issues_pks),
}
class MultiSelectListManyUsersViewBase(ListViewBase):
field_name = None
def get_issues_queryset(self):
return self.get_issues_from_pks(self.issues_pks)
def get_context_data(self, **kwargs):
context = super(MultiSelectListManyUsersViewBase, self).get_context_data(**kwargs)
issues = self.get_issues_queryset().prefetch_related(self.field_name)
collaborators = list(self.repository.collaborators.all())
users = defaultdict(int)
for issue in issues:
for collaborator in getattr(issue, self.field_name).all():
users[collaborator.pk] += 1
for collaborator in collaborators:
collaborator.multiselect_pre_count = users.get(collaborator.pk)
context.update({
'collaborators': collaborators,
'has_data': True, # always at least the current user
'data_count': len(collaborators),
})
context.update(self.get_issues_info(issues))
return context
class MultiSelectListAssigneesView(MultiSelectListManyUsersViewBase):
template_name = 'front/repository/issues/multiselect/list_assignees.html'
url_name = 'list-assignees'
field_name = 'assignees'
class MultiSelectListRequestedReviewersView(MultiSelectListManyUsersViewBase):
template_name = 'front/repository/issues/multiselect/list_requested_reviewers.html'
url_name = 'list-requested_reviewers'
field_name = 'requested_reviewers'
def get_issues_queryset(self):
return super(MultiSelectListRequestedReviewersView, self).get_issues_queryset().filter(is_pull_request=True)
def get_context_data(self, **kwargs):
context = super(MultiSelectListRequestedReviewersView, self).get_context_data(**kwargs)
if not context['issues_count']:
# for when we don't have any PR selected
context['has_data'] = False
return context
class MultiSelectListLabelsView(ListViewBase):
template_name = 'front/repository/issues/multiselect/list_labels.html'
url_name = 'list-labels'
def get_context_data(self, **kwargs):
context = super(MultiSelectListLabelsView, self).get_context_data(**kwargs)
issues = self.get_issues_from_pks(self.issues_pks).prefetch_related('labels')
label_types = list(self.label_types)
simple_labels = list(self.repository.labels.filter(label_type_id__isnull=True).order_by('lower_name'))
has_data = False
data_count = 0
if label_types or simple_labels:
has_data = True
data_count = len(simple_labels) + sum([len(label_type.labels.all()) for label_type in label_types])
issues_labels = defaultdict(int)
for issue in issues:
for label in issue.labels.all():
issues_labels[label.pk] += 1
for label_type in label_types:
for label in label_type.labels.all():
label.multiselect_pre_count = issues_labels.get(label.pk)
for label in simple_labels:
label.multiselect_pre_count = issues_labels.get(label.pk)
context.update({
'label_types': label_types,
'simple_labels': simple_labels,
'has_data': has_data,
'data_count': data_count,
})
context.update(self.get_issues_info(issues))
return context
class MultiSelectListMilestonesView(ListViewBase):
template_name = 'front/repository/issues/multiselect/list_milestones.html'
url_name = 'list-milestones'
def get_context_data(self, **kwargs):
context = super(MultiSelectListMilestonesView, self).get_context_data(**kwargs)
issues = self.get_issues_from_pks(self.issues_pks)
milestones = list(self.milestones)
milestones_by_pk = {milestone.pk: milestone for milestone in milestones}
count_without_milestones = 0
for issue in issues:
if not issue.milestone_id:
count_without_milestones += 1
continue
milestone = milestones_by_pk[issue.milestone_id]
if not hasattr(milestone, 'multiselect_pre_count'):
milestone.multiselect_pre_count = 0
milestone.multiselect_pre_count += 1
context.update({
'count_without_milestones': count_without_milestones,
'milestones': {
'open': [milestone for milestone in milestones if milestone.state == 'open'],
'closed': [milestone for milestone in milestones if milestone.state == 'closed'],
},
'has_data': True, # always the "no milestone" entry
'data_count': len(milestones),
})
context.update(self.get_issues_info(issues))
return context
class MultiSelectListProjectsView(ListViewBase):
template_name = 'front/repository/issues/multiselect/list_projects.html'
url_name = 'list-projects'
def get_context_data(self, **kwargs):
context = super(MultiSelectListProjectsView, self).get_context_data(**kwargs)
issues = self.get_issues_from_pks(self.issues_pks).prefetch_related('cards__column')
projects = list(self.projects)
has_data = False
data_count = 0
if projects:
has_data = True
data_count = sum([project.num_columns for project in projects])
nb_issues = len(issues)
count_not_in_projects = {project.pk: nb_issues for project in projects}
issues_columns = defaultdict(int)
for issue in issues:
for card in issue.cards.all():
issues_columns[card.column_id] += 1
if card.column.project_id in count_not_in_projects:
count_not_in_projects[card.column.project_id] -= 1
for project in projects:
project.multiselect_absent_count = count_not_in_projects[project.pk]
for column in project.columns.all():
column.multiselect_pre_count = issues_columns.get(column.pk)
context.update({
'projects': projects,
'has_data': has_data,
'data_count': data_count,
})
context.update(self.get_issues_info(issues))
return context
class MultiSelectListStatesView(ListViewBase):
template_name = 'front/repository/issues/multiselect/list_states.html'
url_name = 'list-state'
def get_context_data(self, **kwargs):
context = super(MultiSelectListStatesView, self).get_context_data(**kwargs)
issues = self.get_issues_from_pks(self.issues_pks)
states = {'open': {'key': 1, 'multiselect_pre_count': 0}, 'closed': {'key': 0, 'multiselect_pre_count': 0}}
for issue in issues:
states[issue.state]['multiselect_pre_count'] += 1
context.update({
'states': states,
'has_data': True,
'data_count': 2,
})
context.update(self.get_issues_info(issues))
return context
class ApplyViewBase(MultiSelectViewBase, View):
repository_relation = None
job_model = None
field_name = None
is_related_field = False
change_updated_at = 'exact'
fuzzy_delta = timedelta(seconds=120)
def post(self, request, *args, **kwargs):
self.convert_issues_pks_from_post()
issues, to_set, to_unset, front_uuid = self.get_data()
count_success, failures = self.process_data(issues, to_set, to_unset, front_uuid)
return HttpResponse(
json.dumps({
'count_success': count_success,
'failures': sorted(failures),
}),
content_type='application/json',
)
def convert_issues_pks_from_post(self):
super(ApplyViewBase, self).convert_issues_pks_from_post()
if not self.issues_pks or sign(self.issues_pks) != self.request.POST.get('hash'):
raise SuspiciousOperation
def verify_values(self, pks):
if not self.repository_relation:
raise NotImplementedError
converted_pks = []
try:
for pk in pks:
converted_pks.append(int(pk))
except Exception:
raise SuspiciousOperation
objects = getattr(self.repository, self.repository_relation).filter(pk__in=converted_pks)
if len(converted_pks) != len(objects):
raise SuspiciousOperation
return objects
def get_data(self):
try:
to_set = self.verify_values([int(value) for value in self.request.POST.getlist('set[]')])
to_unset = self.verify_values([int(value) for value in self.request.POST.getlist('unset[]')])
except (ValueError, TypeError):
raise SuspiciousOperation
return (
self.get_issues_from_pks(self.issues_pks),
to_set,
to_unset,
str(self.request.POST.get('front_uuid', '') or '')[:36]
)
def process_data(self, issues, to_set, to_unset, front_uuid):
raise NotImplementedError
@classmethod
def get_current_job_for_issue(cls, issue):
try:
job = cls.job_model.collection(identifier=issue.pk, queued=1).instances()[0]
except IndexError:
return None, None
else:
who = job.gh_args.hget('username')
return job, who
@cached_property
def user_gh(self):
return self.request.user.get_connection()
def save_value(self, issue, value):
if self.field_name is None:
raise NotImplementedError
if self.is_related_field:
getattr(issue, self.field_name).set(value)
else:
setattr(issue, self.field_name, value)
return value
def update_issue(self, issue, value, front_uuid):
iteration = 0
while True:
current_job, who = self.get_current_job_for_issue(issue)
if not current_job:
break
else:
if iteration >= 2:
return who
else:
sleep(0.1)
iteration += 1
revert_status = None
if self.is_related_field:
if issue.github_status == issue.GITHUB_STATUS_CHOICES.FETCHED:
# We'll wait to have m2m saved to run signals
issue.github_status = issue.GITHUB_STATUS_CHOICES.SAVING
revert_status = issue.GITHUB_STATUS_CHOICES.FETCHED
if self.change_updated_at is not None:
now = datetime.utcnow()
if not issue.updated_at:
issue.updated_at = now
elif self.change_updated_at == 'fuzzy':
if now > issue.updated_at + self.fuzzy_delta:
issue.updated_at = now
else: # 'exact'
if now > issue.updated_at:
issue.updated_at = now
issue.front_uuid = front_uuid
issue.skip_reset_front_uuid = True
value_for_job = self.save_value(issue, value)
issue.save()
if revert_status:
# Ok now the signals could work
issue.github_status = revert_status
issue.save()
self.job_model.add_job(issue.pk, gh=self.user_gh, value=self.format_value_for_job(value_for_job))
def format_value_for_job(self, value):
return value
class MultiSelectApplyManyUsersViewBase(ApplyViewBase):
repository_relation = 'collaborators'
is_related_field = True
change_updated_at = 'fuzzy'
def process_data(self, issues, to_set, to_unset, front_uuid):
issues = issues.prefetch_related(self.field_name)
count_success = 0
failures = []
for issue in self.order_issue_from_pk_list(issues, self.issues_pks):
users = set(getattr(issue, self.field_name).all())
touched = False
for user in to_set:
if user not in users:
users.add(user)
touched = True
for user in to_unset:
if user in users:
users.remove(user)
touched = True
if touched:
current_update_by = self.update_issue(issue, users, front_uuid)
if current_update_by:
failures.append((issue.number, current_update_by))
else:
count_success += 1
return count_success, failures
def format_value_for_job(self, value):
return json.dumps([user.username for user in value] if value else [])
class MultiSelectApplyAssigneesView(MultiSelectApplyManyUsersViewBase):
url_name = 'apply-assignees'
job_model = IssueEditAssigneesJob
field_name = 'assignees'
class MultiSelectApplyRequestedReviewersView(MultiSelectApplyManyUsersViewBase):
url_name = 'apply-requested_reviewers'
job_model = IssueEditRequestedReviewersJob
field_name = 'requested_reviewers'
@classmethod
def order_issue_from_pk_list(cls, issues, pks):
return [
issue
for issue in
super(MultiSelectApplyRequestedReviewersView, cls).order_issue_from_pk_list(issues, pks)
if issue.is_pull_request
]
class MultiSelectApplyLabelsView(ApplyViewBase):
url_name = 'apply-labels'
repository_relation = 'labels'
job_model = IssueEditLabelsJob
field_name = 'labels'
is_related_field = True
change_updated_at = 'fuzzy'
def process_data(self, issues, to_set, to_unset, front_uuid):
issues = issues.prefetch_related('labels')
count_success = 0
failures = []
for issue in self.order_issue_from_pk_list(issues, self.issues_pks):
labels = set(issue.labels.all())
touched = False
for label in to_set:
if label not in labels:
labels.add(label)
touched = True
for label in to_unset:
if label in labels:
labels.remove(label)
touched = True
if touched:
current_update_by = self.update_issue(issue, labels, front_uuid)
if current_update_by:
failures.append((issue.number, current_update_by))
else:
count_success += 1
return count_success, failures
def format_value_for_job(self, value):
return json.dumps([label.name for label in value] if value else [])
class MultiSelectApplyMilestoneView(ApplyViewBase):
url_name = 'apply-milestone'
repository_relation = 'milestones'
job_model = IssueEditMilestoneJob
field_name = 'milestone'
is_related_field = False
change_updated_at = 'fuzzy'
def process_data(self, issues, to_set, to_unset, front_uuid):
issues = issues.select_related('milestone')
count_success = 0
failures = []
new_milestone = None if not to_set else to_set[0]
for issue in self.order_issue_from_pk_list(issues, self.issues_pks):
if issue.milestone != new_milestone:
current_update_by = self.update_issue(issue, new_milestone, front_uuid)
if current_update_by:
failures.append((issue.number, current_update_by))
else:
count_success += 1
return count_success, failures
def format_value_for_job(self, value):
return value.number if value else ''
class MultiSelectApplyProjectsView(ApplyViewBase):
url_name = 'apply-projects'
repository_relation = 'project_columns'
is_related_field = True
job_model = IssueEditProjectsJob
change_updated_at = 'fuzzy'
def process_data(self, issues, to_set, to_unset, front_uuid):
issues = issues.prefetch_related('cards__column')
count_success = 0
failures = []
for issue in self.order_issue_from_pk_list(issues, self.issues_pks):
columns = set([card.column for card in issue.cards.all()])
touched = False
for column in to_set:
if column not in columns:
columns.add(column)
touched = True
for column in to_unset:
if column in columns:
columns.remove(column)
touched = True
if touched:
current_update_by = self.update_issue(issue, columns, front_uuid)
if current_update_by:
failures.append((issue.number, current_update_by))
else:
count_success += 1
return count_success, failures
def save_value(self, issue, value):
return update_columns(issue, value)
def format_value_for_job(self, value):
return json.dumps(value) # result of `update_columns` called in `save_value`
class MultiSelectApplyStateView(ApplyViewBase):
url_name = 'apply-state'
repository_relation = None
job_model = IssueEditStateJob
field_name = 'state'
is_related_field = False
change_updated_at = 'fuzzy'
def verify_values(self, pks):
if pks and pks != [1]:
raise SuspiciousOperation
return pks
def process_data(self, issues, to_set, to_unset, front_uuid):
count_success = 0
failures = []
if to_set and to_unset:
raise SuspiciousOperation
new_state = 'open' if to_set else 'closed'
for issue in self.order_issue_from_pk_list(issues, self.issues_pks):
if issue.state != new_state:
current_update_by = self.update_issue(issue, new_state, front_uuid)
if current_update_by:
failures.append((issue.number, current_update_by))
else:
count_success += 1
return count_success, failures
|
n = []
while True:
su = int(input())
if su == 0:
break
n.append(su)
big_one = max(n)
sosu = [0 for i in range(2*big_one+1)]
for i in range(2, 2*big_one+1):
if sosu[i] == 0:
sosu[i] = 1
else:
continue
for j in range(2, 2*big_one+1):
if i*j > 2*big_one:
break
else:
sosu[i*j] = 2
for i in range(len(n)):
count = 0
for j in range(n[i]+1, 2*n[i]+1):
if sosu[j] == 1:
count += 1
print(count) |
import pandas as pd
import numpy as np
all_data = pd.read_csv('datasets/New_all_data_Pollutors_legitimate.csv')
all_data.rename(columns={'No.1': 'UserID', 'char_count': 'TweetLen'}, inplace=True)
print(all_data)
legitimate_new = pd.read_csv('datasets/Legitimate_New.csv')
print(legitimate_new.head())
polluter_new = pd.read_csv('datasets/Polluter_New12.csv')
print(polluter_new)
legitimate_Polluter_new = pd.concat([legitimate_new, polluter_new], sort=False)
print(legitimate_Polluter_new.head())
all_data = pd.merge(all_data, legitimate_Polluter_new, on='UserID')
print(all_data)
# all_data.to_csv('abcd2.csv',index=False)
Legitimate_created = pd.read_csv('datasets/legitimate_users.csv', usecols=['CreatedAt','UserID'])
# print(Legitimate_created)
Polluter_created = pd.read_csv('datasets/content_polluters.csv', usecols=['CreatedAt','UserID'])
# print(Polluter_created)
legitimate_polluter_created = pd.concat([Legitimate_created,Polluter_created],sort=False)
# print(legitimate_polluter_created)
all_data = pd.merge(all_data, legitimate_polluter_created, on='UserID')
# print(all_data)
# all_data['Followership'] = (all_data['FollowerCount']/all_data['FriendsCount'])
# all_data = pd.merge(all_data, all_data['Followership'], on='UserID')
# print(all_data)
all_data['CreatedAt'] = all_data['CreatedAt'].str.extract('(\d\d\d\d)', expand=True)
# print(all_data['CreatedAt'])
all_data['Date_time'] = all_data['Date_time'].str.extract('(\d\d\d\d)', expand=True)
# print(all_data['Date_time'])
# print(all_data)
# all_data.to_csv('abc11.csv', index=False)
all_data = all_data.dropna()
# print(all_data)
# # By Calculatig Entropy
import math
import nltk
def entropy(labels):
freqdist = nltk.FreqDist(labels)
probs = [freqdist.freq(l) for l in freqdist]
return -sum(p * math.log(p, 2) for p in probs)
all_data['Entropy'] = all_data['Tweet_text'].map(entropy)
# all_data.to_csv('Honeypot-New.csv', index=False)
# print(all_data)
# # Count of each User
count_user = all_data['UserID'].value_counts()
print(count_user)
all_data = all_data.drop(columns=['UserID','Tweet_text'])
# # Correlation Heatmap
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 15))
sns.heatmap(data=all_data.corr(), annot=True, linewidths=.3, fmt="1.2f")
plt.show()
print(all_data.describe())
# # Class Distribution
import seaborn as sns
import matplotlib.pyplot as plt
data = all_data
sns.countplot(data=data, x="Class")
plt.show()
data.loc[:, "Class"].value_counts()
Class_lable = all_data.Class
Text_Features_train = all_data.drop(columns=['Class'])
# print(all_data)
# all_data.to_csv('reg.csv',index=False)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(Text_Features_train, Class_lable, test_size=0.30, random_state=700)
# # RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
Ran_For_uni = RandomForestClassifier(n_estimators=200, max_depth=30, random_state=9, max_leaf_nodes=30)
Ran_For_uni = Ran_For_uni.fit(X_train, y_train)
print(Ran_For_uni)
y_pred1 = Ran_For_uni.predict(X_test)
print('Random Forest= {:.2f}'.format(Ran_For_uni.score(X_test, y_test)))
# # Precision, Recall, F1
from sklearn.metrics import classification_report
print('\n')
print("Precision, Recall, F1")
print('\n')
CR = classification_report(y_test, y_pred1)
print(CR)
print('\n')
# # ROC CURVE
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr, tpr, thresholds = roc_curve(y_test, y_pred1)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC CURVE')
plt.legend(loc="lower right")
plt.show()
# # GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingClassifier
Gr_uni = GradientBoostingClassifier()
Gr_uni = Gr_uni.fit(X_train, y_train)
Gr_uni
y_pred1 = Gr_uni.predict(X_test)
print('Gradient boosting = {:.2f}'.format(Gr_uni.score(X_test, y_test)))
# # Precision, Recall, F1
from sklearn.metrics import classification_report
print('\n')
print("Precision, Recall, F1")
print('\n')
CR = classification_report(y_test, y_pred1)
print(CR)
print('\n')
# # ROC Curve
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr, tpr, thresholds = roc_curve(y_test, y_pred1)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC CURVE')
plt.legend(loc="lower right")
plt.show()
# # ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesClassifier
extra_tree = ExtraTreesClassifier(n_estimators=150)
Ran_For_uni = extra_tree.fit(X_train, y_train)
extra_tree
y_pred1 = extra_tree.predict(X_test)
print('ExtraTree = {:.2f}'.format(extra_tree.score(X_test, y_test)))
# # Precision, Recall, F1
from sklearn.metrics import classification_report
print('\n')
print("Precision, Recall, F1")
print('\n')
CR = classification_report(y_test, y_pred1)
print(CR)
print('\n')
# # ROC CURVE
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr, tpr, thresholds = roc_curve(y_test, y_pred1)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC CURVE')
plt.legend(loc="lower right")
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(Text_Features_train, Class_lable , test_size=0.3,
stratify=Class_lable, random_state=1)
from sklearn.svm import SVC
svclassifier = SVC(kernel='linear',cache_size=4000)
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print('SVM= {:.2f}'.format(svclassifier.score(X_test, y_test)))
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=3, label='SVM (area = %0.2f)' % roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC CURVE')
plt.legend(loc="lower right")
plt.show()
# # Comparison of Results
from prettytable import PrettyTable
x = PrettyTable()
print('\n')
print("Deatiled Performance of the all models")
x.field_names = ["Model", "Accuracy"]
x.add_row(["RandomForestClassifier", 0.88])
x.add_row(["GradientBoostingClassifier", 0.88])
x.add_row(["ExtraTreesClassifier", 0.98])
print(x)
print('\n')
|
import threading
import connection
import socket
class networkThread(threading.Thread):
def __init__(self, pyccConnection, inputQueue, notifyEvent):
threading.Thread.__init__(self)
self.pyccConnection = pyccConnection
self.inputQueue = inputQueue
self.notifyEvent = notifyEvent
def run(self):
''' main loop of theard'''
run = True
while run:
try:
data = self.pyccConnection.parseInput()
except socket.timeout:
continue
except socket.error:
run = False
continue
if data is False: # connection closed
self.run = False
if type(data) is not list: # no new packages
continue
for package in data:
if type(package) is not connection.PyCCPackage:
continue
# send package to logicThread
self.inputQueue.put(package)
self.notifyEvent.set() # new data for logicThread
class logicThread(threading.Thread):
def __init__(self, pyccConnection, inputQueue, todoQueue, notifyEvent):
threading.Thread.__init__(self)
self.pyccConnection = pyccConnection
self.inputQueue = inputQueue
self.todoQueue = todoQueue
self.notifyEvent = notifyEvent
self.syncRequestEvent = threading.Event()
self.callbacks = {}
self.accounts = {}
self.accountLock = threading.Lock()
def run(self):
''' main loop of thread'''
work = True
self.requestAccountList()
while work and self.notifyEvent.wait(): # wait for new input packages or console request
while not self.inputQueue.empty(): # handle all packages from net
package = self.inputQueue.get()
if package.handle in self.callbacks:
self.callbacks[package.handle](package)
continue
print('$$$${type}{handle}:{command}'.format(type=package.type,
handle=package.handle,command=package.command))
try:
print(package.data.decode('utf8'))
except AttributeError:
pass
except UnicodeError:
print(package.data)
while not self.todoQueue.empty(): # handle data from cmd
newData = self.todoQueue.get()
if 'stop' in (newData,newData[0]):
work = False
break
if newData[0] == 'status':
self.sendStatus()
continue
if newData[0] == 'connectTo':
self.connectTo(newData[1])
continue
if newData[0] == 'shutdown':
self.shutdown()
continue
if newData[0] == 'sendMessage':
self.sendMessage(newData[1], newData[2])
continue
if newData[0] == 'list':
self.sendList()
continue
self.notifyEvent.clear() # all task done
def sendPackage(self, package, callback=None):
''' send a package to backend - support for callback methods'''
if callback is not None:
self.callbacks[package.handle]=callback
self.pyccConnection.sendPackage(package)
def newRequest(self):
''' return new PyCCPackage (type is request)'''
package = connection.PyCCPackage()
package.type = package.TYPE_REQUEST
package.handle = self.pyccConnection.newRequest()
return package
def sendStatus(self):
''' send status command to backend'''
package = self.newRequest()
package.command = 'status'
self.sendPackage(package, self.recvStatus)
def recvStatus(self, package):
''' get status response'''
print(package.data.decode('utf8'))
self.syncRequestEvent.set()
def connectTo(self, args):
''' send connetTo to backend'''
package = self.newRequest()
package.command = 'connectTo {0}'.format(args)
self.sendPackage(package)
self.syncRequestEvent.set()
def shutdown(self):
''' send shutdown to backend'''
package = self.newRequest()
package.command = 'shutdown'
self.sendPackage(package)
self.syncRequestEvent.set()
def requestAccountList(self):
''' ask backend for list of all accounts'''
package = self.newRequest()
package.command = 'getAccounts'
self.sendPackage(package, self.getAccountList)
def getAccountList(self, package):
''' get response for account list request'''
try:
self.accountLock.acquire()
self.accounts = {user.split(":")[0]: user.split(":")[1] for user in package.data.decode('utf8').split(",") }
self.accountLock.release()
except:
print("error")
def sendMessage(self, user, message):
''' send message to other user'''
package = self.newRequest()
package.command = 'sendMessage {0}'.format(user)
package.data = message
self.sendPackage(package, self.recvStatus)
self.syncRequestEvent.set()
def sendList(self):
''' send listContactStates to backend'''
package = self.newRequest()
package.command = 'listContactStates'
self.sendPackage(package, self.recvList)
def recvList(self, package):
''' get listContactStates response'''
print(package.data.decode('utf8'))
self.syncRequestEvent.set()
|
#!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
try:
if x == 0:
print()
return 0
else:
for i in range(x):
print("{}".format(my_list[i]), end='')
print()
return i + 1
except:
print()
return i
|
import pygame
import time
import random
'''Game initialization with pygame'''
pygame.init()
'''Screen Display size'''
display_width = 400
display_height = 400
display = pygame.display.set_mode((display_width,display_height))
''' Background color RGB'''
white=(255,255,255)
''' Snake Color RGB'''
black=(0,0,0)
'''Text Color RGB'''
red=(255,0,0)
magenta = (255,0,255)
'''Food Color RGB'''
blue=(0,0,255)
'''Covid Color'''
green = (0, 255, 0)
'''Snake Size and characteristics'''
snake_size = 10
'''Snake Speed'''
speed = pygame.time.Clock()
snake_speed = 30
'''Message display'''
font_style = pygame.font.SysFont("comicsansms", 15)
score_style = pygame.font.SysFont("comicsansms", 25)
random_num_width = random.randint(0,display_width)
random_num_height = random.randint(0,display_height)
def snake(snake_size, snake_body,color):
for x in snake_body:
pygame.draw.rect(display, color,[x[0], x[1], snake_size, snake_size])
def score(score):
value = score_style.render("Your Score is: "+str(score), True, magenta )
display.blit(value, [ 0,0] )
def message_lost(msg, color):
message = font_style.render(msg, True, color)
display.blit(message,[display_width/12,display_height/2])
'''game loop '''
def repeat_game():
close_display = False
game_over=False
''' Starting coordinates '''
x1 = display_width/2
y1 = display_height/2
''' Coordinate Changes'''
x1_change = 0
y1_change = 0
'''Directions'''
up = False
down = False
left = False
right = False
'''Snake Body'''
snake_body = []
length_of_snake = 1
snake_speed = 20
snake_color = True
random_num_width = random.randint(0,display_width)
random_num_height = random.randint(0,display_height)
'''Food Coordinates'''
foodX = round(random.randrange(0, display_width - (snake_size*4))/10)*10
foodY = round(random.randrange(0, display_height - (snake_size*4))/10)*10
''' Covid coordinates'''
covidX = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX2 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY2 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX3 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY3 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX4 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY4 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX5 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY5 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX6 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY6 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
while not game_over:
while close_display == True:
display.fill(white)
message_lost(':( you lost , press Q to quit or P to play again', red)
score(length_of_snake-1)
pygame.display.update()
for key_pressed in pygame.event.get():
if key_pressed.type == pygame.QUIT:
game_over = True
close_display = False
if key_pressed.type == pygame.KEYDOWN:
if key_pressed.key == pygame.K_q:
game_over = True
close_display = False
if key_pressed.key == pygame.K_p:
repeat_game()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over=True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and right == False:
x1_change = -snake_size
y1_change = 0
left = True
up = False
down = False
right = False
elif event.key == pygame.K_RIGHT and left == False:
x1_change = snake_size
y1_change = 0
left = False
up = False
down = False
right = True
elif event.key == pygame.K_UP and down == False:
x1_change = 0
y1_change = -snake_size
left = False
up = True
down = False
right = False
elif event.key == pygame.K_DOWN and up == False:
x1_change = 0
y1_change = snake_size
left = False
up = False
down = True
right = False
if x1 >=display_width or x1 < 0 or y1 >= display_height or y1 < 0:
close_display = True
if length_of_snake < 0:
close_display = True
x1 += x1_change
y1 += y1_change
display.fill(white)
pygame.draw.rect(display,blue,[foodX,foodY,snake_size,snake_size])
pygame.draw.circle(display,green,[covidX,covidY], 5)
pygame.draw.circle(display,green,[covidX2,covidY2], 5)
pygame.draw.circle(display,green,[covidX3,covidY3], 5)
pygame.draw.circle(display,green,[covidX4,covidY4], 5)
pygame.draw.circle(display,green,[covidX5,covidY5], 5)
pygame.draw.circle(display,green,[covidX6,covidY6], 5)
snake_Head = []
snake_Head.append(x1)
snake_Head.append(y1)
snake_body.append(snake_Head)
if len(snake_body) > length_of_snake:
del snake_body[0]
for x in snake_body[:-1]:
if x == snake_Head:
close_display = True
if snake_color:
snake(snake_size,snake_body,black)
else:
snake(snake_size,snake_body,red)
score(length_of_snake-1)
pygame.display.update()
if x1 == foodX and y1 == foodY:
foodX = round(random.randrange(0, display_width - (snake_size*4))/10)*10
foodY = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX2 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY2 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX3 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY3 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX4 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY4 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX5 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY5 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX6 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY6 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
length_of_snake += 1
snake_speed += 0.8
snake_color = True
if x1 + 6 == covidX + 6 and y1 == covidY or x1 + 6 == covidX2 + 6 and y1 == covidY2 or x1 + 6 == covidX3 + 6 and y1 == covidY3 or x1 + 6 == covidX4 + 6 and y1 == covidY4 or x1 + 6 == covidX5 + 6 and y1 == covidY5 or x1 + 6 == covidX6 + 6 and y1 == covidY6:
covidX = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX2 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY2 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX3 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY3 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX4 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY4 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX5 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY5 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
covidX6 = round(random.randrange(0, display_width - (snake_size*4))/10)*10
covidY6 = round(random.randrange(0, display_height - (snake_size*4))/10)*10
foodX = round(random.randrange(0, display_width - (snake_size*4))/10)*10
foodY = round(random.randrange(0, display_height - (snake_size*4))/10)*10
length_of_snake -= 1
snake_color = False
snake_body.pop()
speed.tick(snake_speed)
pygame.quit()
quit()
repeat_game() |
import discord
import random
from discord.ext import commands
class Guess(commands.Cog):
def __init__(self,client):
self.client = client
@commands.command()
async def guess(self, ctx):
pick = random.randint(1,10)
await ctx.send(f'Pick a number between 1 and 10. {ctx.author.mention}')
message = await self.client.wait_for('message', check = lambda message: message.author.id == ctx.author.id and message.channel.id == ctx.message.channel.id and message.guild.id == ctx.guild.id, timeout = 10.0)
def number(message):
if message.content == '1':
return 1
elif message.content == '2':
return 2
elif message.content == '3':
return 3
elif message.content == '4':
return 4
elif message.content == '5':
return 5
elif message.content == '6':
return 6
elif message.content == '7':
return 7
elif message.content == '8':
return 8
elif message.content == '9':
return 9
elif message.content == '10':
return 10
player = number(message)
if pick == player:
await ctx.send(f'<:tick:712964971079925790> You got it right! Now you have full bragging rights. {ctx.author.mention}')
else:
await ctx.send(f"<:wrong:712965175493394432> WA WAAA, thats wrong :( it's actually {pick}. {ctx.author.mention}")
def setup(client):
client.add_cog(Guess(client)) |
from django.apps import AppConfig
class MoodleAdminConfig(AppConfig):
name = 'moodle_admin'
|
import test
if __name__ == '__main__':
a=open("num_tests.txt","r")
num = a.readline().strip("\n")
test.sss(num) |
# ====== main code ====================================== #
word = input() + ' запретил букву'
b = ['а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
exclu = 0
while word.strip() != '':
if word == word.replace(b[exclu], '') and exclu != 31:
exclu += 1
continue
else:
print(word, b[exclu])
word = word.replace(b[exclu], '')
word = word.replace(' ', ' ').strip()
exclu += 1
# ====== end of code ==================================== #
|
import util
etfs = util.getFromHoldings()
ivv = set(util.getStocks("IVV"))
common = set()
test = set()
path = "etf_report"
with open(path, "w") as f:
for etf in etfs:
if etf == "IVV" or etf == "USRT" or etf == "VLUE":
continue
other = set(util.getStocks(etf))
f.write ("ivv - {}\n".format(etf))
f.write (", ".join(ivv - other))
f.write ("\n")
f.write ("{} - ivv\n".format(etf))
f.write (", ".join(other-ivv))
f.write ("\n")
f.write ("common {}\n".format(etf))
temp = (other&ivv)
if not common:
common = temp
test = temp
else:
test = (common & temp)
if test:
common = test
f.write (", ".join(temp))
f.write ("\n")
f.write ("COMMON\n")
f.write (", ".join(common))
|
from argparse import ArgumentParser
import penman as pp
import json, re
from penman import Graph
def main(args):
pattern = re.compile(r'''[\s()":/,\\'#]+''')
with open(args.input, encoding='utf-8') as f, open(args.output, mode='w', encoding='utf-8') as out:
for amr_data in f.readlines():
if amr_data == '' or amr_data is None:
break
amr_data = json.loads(amr_data)
triples = [("c0", ":instance", "none")]
if 'tokenization' in amr_data:
id = amr_data['id']
tokenization = amr_data['tokenization']
properties = tokenization[0]['properties']
snt = json.dumps([token['label'] for token in tokenization], ensure_ascii=False)
token = json.dumps([token['label'] for token in tokenization], ensure_ascii=False)
lemma = json.dumps([token['values'][properties.index('lemma')] for token in tokenization],
ensure_ascii=False)
upos = json.dumps([token['values'][properties.index('upos')] for token in tokenization],
ensure_ascii=False)
xpos = json.dumps([token['values'][properties.index('xpos')] for token in tokenization],
ensure_ascii=False)
# ner = json.dumps([token['values'][properties.index('ner')] for token in tokenization], ensure_ascii=False)
graph = Graph(triples, metadata=dict(
id=id, snt=snt, token=token, lemma=lemma, upos=upos, xpos=xpos
))
else:
id = amr_data['id']
snt = json.dumps(amr_data['input'], ensure_ascii=False)
token = json.dumps(amr_data['token'], ensure_ascii=False)
lemma = json.dumps(amr_data['lemma'], ensure_ascii=False)
upos = json.dumps(amr_data['upos'], ensure_ascii=False)
xpos = json.dumps(amr_data['xpos'], ensure_ascii=False)
ner = json.dumps(amr_data['ner'], ensure_ascii=False)
graph = Graph(triples, metadata=dict(
id=id, snt=snt, token=token, lemma=lemma, upos=upos, xpos=xpos, ner=ner
))
graph_en = pp.encode(graph)
graph_de = pp.decode(graph_en)
out.write(graph_en + '\n\n')
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument('--input', '-i', required=True)
argparser.add_argument('--output', '-o', required=True)
args = argparser.parse_args()
main(args)
|
# Agar stringda biz index larni chiqarmoqchi bulsak masalan berilgan o'zgaruvchidagi A xarfi nechinti tartib raqam ostida joylashgan
a="Khamzayev Jamshid is wonderfull Python programmer!!!"
print(a.find('J')) # bu yerda " find " methodidan foydalanamiz |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 13:44:14 2019
@author: se14
"""
import pandas as pd
import os
# script to merge results for all folds
print('Merging the results from all folds')
out_foldr = 'final_results/'
if (not os.path.exists(out_foldr)) & (out_foldr != ""):
os.makedirs(out_foldr)
out_filename = '3D_CNN_FP_reduction.csv'
all_results_df = pd.DataFrame(columns=['seriesuid','coordX','coordY','coordZ','probability'])
for fold_k in range(10):
try:
results_filename = f'test_results_fold{fold_k}.csv'
out_path = f'results_fold_{fold_k}/'
results_filename = out_path + results_filename
tmpDf = pd.read_csv(results_filename)
all_results_df = all_results_df.append(tmpDf,ignore_index=False,sort=False).reset_index(drop=True)
except:
print('Error')
continue
all_results_df.to_csv(out_foldr + out_filename,index=False)
#%% |
# DKats 2021
#
#---Thanks to---
# PySimpleGUI module from MikeTheWatchGuy at https://pypi.org/project/PySimpleGUI/
# executable's icon downloaded from www.freeiconspng.com
import PySimpleGUI as sg
import json
import sqlite3
import datetime
from datetime import timezone, datetime
import os
import webbrowser
import shutil
import ffmpeg
#---functions definition
def concatVidAudFile(copiedFilesFolder, currentVidID, vidNum):
videoFileList = []
fileDict = {} # gia na sortarw ta files kai na ginei swsta to concat me th seira pou prepei
vidFoundFlag = False
if vidNum != 4:
for file in os.listdir(copiedFilesFolder):
if file.startswith(f'{currentVidID}.null.{vidNum}'):
vidFoundFlag = True
fpos = file.find('mp4.')+4 #!!!!!!!!!!!!!! pi8ano provlima ston kwdika an den einai mp4 ta arxeia exo kai den periexoun to mp4. sto onoma tous
fileDict[file] = int(file[fpos:file.find('.',fpos)])
if vidFoundFlag:
sDict = dict(sorted(fileDict.items(), key=lambda item: item[1]))
# print(sDict)
sList = list(sDict.keys())
# print(sList)
shutil.copy(f'{copiedFilesFolder}\\{sList[0]}', f'{copiedFilesFolder}\\vid{vidNum}_{currentVidID}_final.mp4')
for i in range(1,len(sList)):
with open(f'{copiedFilesFolder}\\vid{vidNum}_{currentVidID}_final.mp4', 'ab') as inp, open(f'{copiedFilesFolder}\\{sList[i]}', 'rb') as out:
inp.write(out.read())
return f'{copiedFilesFolder}\\vid{vidNum}_{currentVidID}_final.mp4'
else:
return f'not found'
else: # creating the audio
for file in os.listdir(copiedFilesFolder):
if file.startswith(f'{currentVidID}.null.4'):
vidFoundFlag = True
fpos = file.find('mp4.')+4
fileDict[file] = int(file[fpos:file.find('.',fpos)])
if vidFoundFlag:
sDict = dict(sorted(fileDict.items(), key=lambda item: item[1]))
# print(sDict)
sList = list(sDict.keys())
# print(sList)
shutil.copy(f'{copiedFilesFolder}\\{sList[0]}', f'{copiedFilesFolder}\\aud_{currentVidID}_final.mp4')
for i in range(1,len(sList)):
with open(f'{copiedFilesFolder}\\aud_{currentVidID}_final.mp4', 'ab') as inp, open(f'{copiedFilesFolder}\\{sList[i]}', 'rb') as out:
inp.write(out.read())
return f'{copiedFilesFolder}\\aud_{currentVidID}_final.mp4'
else:
return f'not found'
def ffmpegFinalConcat(currentVidID, vid, aud, copiedFilesFolder, vidNum):
in1 = ffmpeg.input(vid)
in2 = ffmpeg.input(aud)
out = ffmpeg.output(in1, in2, f'{copiedFilesFolder}\\output_{currentVidID}_{vidNum}.mkv')
try:
out.run()
return f'output_{currentVidID}_{vidNum}.mkv'
except Exception as e:
print('!!!!!!!!!!!!!!!!!!!!!!!!!')
print(f'ffmpeg conversion for vid {vidNum} of video with ID: {currentVidID} failed')
print(e)
return 'video conversion failed'
#---menu definition
menu_def = [['File', ['Exit']],
['Help', ['Documentation', 'About']],]
#---layout definition
DBFrameLayout = [[sg.Text('Choose the threads_db2 database file to parse', background_color='#2a363b')],
[sg.In(key='-DB-', readonly=True, background_color='#334147'), sg.FileBrowse(file_types=(('Database', '*.sqlite'),('Database', '*.db'), ('All files', '*.*')))],
[sg.Text('Choose the videocache folder to parse', background_color='#2a363b')],
[sg.In(key='-CACHE-', readonly=True, background_color='#334147'), sg.FolderBrowse()]]
OutputSaveFrameLayout = [[sg.Text('Choose folder to save the html report file', background_color='#2a363b')],
[sg.In(key='-OUTPUT-', readonly=True, background_color='#334147'), sg.FolderBrowse()]] #key='-SAVEBTN-', disabled=True, enable_events=True
col_layout = [[sg.Frame('Input DB File and Videocache Folder', DBFrameLayout, background_color='#2a363b', pad=((0,0),(0,65)))],
# [sg.Frame('Keywords (Optional - only for Documents)', KeywordsFrameLayout, background_color='#2a363b', pad=((0,0),(0,65))) ],
[sg.Frame('Output Folder', OutputSaveFrameLayout, background_color='#2a363b')],
[sg.Button('Exit', size=(7,1)), sg.Button('Parse', size=(7,1))]]
#---GUI Definition
layout = [[sg.Menu(menu_def, key='-MENUBAR-')],
[sg.Column(col_layout, element_justification='c',background_color='#2a363b'), sg.Frame('Output Console',
[[sg.Output(size=(50,25), key='-OUT-', background_color='#334147', text_color='#fefbd8')]], background_color='#2a363b')],
[sg.Text('FB Messenger VideoCache Parser Ver. 1.0.1', background_color='#2a363b', text_color='#b2c2bf')]]
window = sg.Window('FB Messenger VideoCache Parser', layout, background_color='#2a363b')
#---run
while True:
event, values = window.read()
# print(event, values)
if event in (sg.WIN_CLOSED, 'Exit'):
break
#---menu events
if event == 'Documentation':
try:
webbrowser.open_new('https://github.com/D-Kats/FB_MESSENGER_VIDEOCACHE_PARSER/blob/main/README.md')
except:
sg.PopupOK('Visit https://github.com/D-Kats/FB_MESSENGER_VIDEOCACHE_PARSER/blob/main/README.md for documentation', title='Documentation', background_color='#2a363b')
if event == 'About':
sg.PopupOK('FB Messenger VideoCache Parser Ver. 1.0.1 \n\n --DKats 2021', title='-About-', background_color='#2a363b')
#---buttons events
if event == "Parse":
if values['-DB-'] == '': # den exei epileksei db gia analysh
sg.PopupOK('Please choose the threads_db2 database to parse!', title='!', background_color='#2a363b')
elif values['-CACHE-'] == '': # den exei epileksei db gia analysh
sg.PopupOK('Please choose the videocache folder to parse!', title='!', background_color='#2a363b')
elif values['-OUTPUT-'] == '': #den exei epileksei fakelo gia output
sg.PopupOK('Please choose a folder to save the html report to!', title='!', background_color='#2a363b')
else:
now = datetime.now()
currentLocalTime = now.strftime("%H:%M:%S")
print(f'Script started executing at {currentLocalTime}')
print('Initializing...')
try: #catching db connection error
db = values['-DB-']
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT _id, timestamp_ms, sender, attachments FROM messages")
data = c.fetchall()
except Exception as e: #db conn error
print('!!!!!!!!!!!!!!!!!!!!!!!!!')
print(f'ERROR: {e}')
sg.PopupOK('Error connecting to the database!\nCheck Output Console for more details', title='!', background_color='#2a363b')
else: #db connected successfully
print('Database connection successful')
outputFolder = values['-OUTPUT-']
inputFolder = values['-CACHE-']
# try except else block for exo files copying
try: #trying to copy exo files to my report files folder
print('creating html report/files folder in output folder')
os.makedirs(f'{outputFolder}\\report\\files')
copiedFilesFolder = f'{outputFolder}\\report\\files'
print('copying exo files to html report files folder')
# copy sygkentrwtika ola ta arxeia sto files gia na kanw ta concat
for root, folders, files in os.walk(inputFolder):
for file in files:
rootAbsPath = os.path.abspath(root)
shutil.copy(os.path.join(rootAbsPath, file), f'{copiedFilesFolder}\\{file}')
print('copying exo files to html report files folder finished successfully')
except Exception as e: #exo copying error
print('!!!!!!!!!!!!!!!!!!!!!!!!!')
print(f'ERROR: {e}')
sg.PopupOK('Error copying exo files to output folder!\nCheck output folder permission and\n Output Console for more details', title='!', background_color='#2a363b')
else: #exo files copied successfully, main parsing begins
print('main parsing begins')
#----html string
html_code ='''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title> Report </title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" type="text/css" media="screen" href="style.css" />
</head>
<body>
<div class="wrapper">
<div class="header">
<H1> Report </H1>
</div>'''
html_code += '\n<table align="center"> \n <caption><h2><b>Facebook Messenger Videos</b></h2></caption>'
html_code += '\n<tr style="background-color:DarkGrey"> \n <th>Database RecordID</th> \n <th>Video Send Timestamp (UTC)</th> \n <th>Sender Facebook ID</th> \n <th>Sender Facebook Name</th> \n <th>Video</th>'
try: #trying to catch any json errors
for row in data:
if row[3] != None:
attachmentString = row[3]
attachmentJson = json.loads(attachmentString)
if 'video' in attachmentJson[0]['mime_type']: #giati perikleietai apo [] sth vash to json opote mou to kanei lista h python
currentVidID = attachmentJson[0]['id']
print(f'concatenating exo files for ID:{currentVidID} video')
currentVideo1 = concatVidAudFile(copiedFilesFolder, currentVidID, 1)
currentVideo2 = concatVidAudFile(copiedFilesFolder, currentVidID, 2)
currentVideo3 = concatVidAudFile(copiedFilesFolder, currentVidID, 3)
currentAudio = concatVidAudFile(copiedFilesFolder, currentVidID, 4)
#html rest of info
timestamp = datetime.fromtimestamp(row[1]/1000, tz=timezone.utc)
senderString = row[2]
senderJson = json.loads(senderString)
FB_id = senderJson['user_key'][senderJson['user_key'].find(':')+1:]
FB_name = senderJson['name']
if currentVideo1 == 'not found' and currentVideo2 == 'not found' and currentVideo3 == 'not found' and currentAudio == 'not found':
print(f'Video with ID:{currentVidID} not found on videocache folder')
html_code += f'\n<tr> \n <td>{row[0]}</td> \n <td>{timestamp}</td> \n <td>{FB_id}</td> \n <td>{FB_name}</td> \n<td> video not found in videocache folder</td>'
else:
print(f'Fragments for video with ID:{currentVidID} found. FFmpeg conversion commencing')
html_code += f'\n<tr> \n <td>{row[0]}</td> \n <td>{timestamp}</td> \n <td>{FB_id}</td> \n <td>{FB_name}</td> \n<td>'
if currentVideo1 != 'not found':
html_vidName = ffmpegFinalConcat(currentVidID, currentVideo1, currentAudio, copiedFilesFolder, 1)
if html_vidName != 'video conversion failed':
html_code +=f' <a href=".\\files\\{html_vidName}">video </a>'
else:
html_code += 'video conversion failed '
if currentVideo2 != 'not found':
html_vidName = ffmpegFinalConcat(currentVidID, currentVideo2, currentAudio, copiedFilesFolder, 2)
if html_vidName != 'video conversion failed':
html_code +=f' <a href=".\\files\\{html_vidName}">video </a>'
else:
html_code += 'video conversion failed '
if currentVideo3 != 'not found':
html_vidName = ffmpegFinalConcat(currentVidID, currentVideo3, currentAudio, copiedFilesFolder, 3)
if html_vidName != 'video conversion failed':
html_code +=f' <a href=".\\files\\{html_vidName}">video </a>'
else:
html_code += 'video conversion failed '
html_code += '</td>'
print('main parsing completed!')
html_code += '\n</table> \n<br> \n<div class="push"></div> \n</div> \n <div class="footer">--DEE 7o</div>\n</body>\n</html>'
css_code = '''table{border-collapse:collapse;}
th{text-align:center;background-color:#4a4343;color=white;}
table,th,td{border:1px solid #000;}
tr{text-align:center;background-color:#595555; color:white;}
html, body {
height: 100%;
margin: 0;
}
.wrapper {
min-height: 100%;
background-color: #4a4349;
/* Equal to height of footer */
/* But also accounting for potential margin-bottom of last child */
margin-bottom: -50px;
font-family: "Courier New", sans-serif;
color=white;
}
.header{
background-color: dark grey;
color=white;
}
.header h1 {
text-align: center;
font-family: "Courier New", sans-serif;
color=red;
}
.push {
height: 50px;
background-color: #4a4349;
}
.footer {
height: 50px;
background-color: #4a4349;
color=white;
text-align: right;
} '''
print('deleting exo files from output folder...')
for file in os.listdir(copiedFilesFolder):
if file.startswith('output_'):
continue
os.remove(f'{copiedFilesFolder}\\{file}')
print('exo files deleted!')
#----reporting
reportFolder = f'{outputFolder}\\report'
print('creating HTML report!')
with open(f'{reportFolder}\\report.html', 'w', encoding='utf8') as fout:
fout.write(html_code)
with open(f'{reportFolder}\\style.css', 'w', encoding= 'utf8') as cssout:
cssout.write(css_code)
print('HTML report created successfully')
now = datetime.now()
currentLocalTime = now.strftime("%H:%M:%S")
print(f'Script finished executing at {currentLocalTime}')
with open(f'{reportFolder}\\FB_MESSENGER_VIDEOCACHE_PARSER.log', 'w', encoding='utf-8') as Logfout:
Logfout.write(window['-OUT-'].Get())
print('Log file was created successfully!')
sg.PopupOK(f'Parsing completed!\nHTML report created successfully at {reportFolder}.', title=': )', background_color='#2a363b')
except Exception as e: #possible json error
print('!!!!!!!!!!!!!!!!!!!!!!!!!')
print(f'ERROR: {e}')
sg.PopupOK('Error during main parsing!\nCheck Output Console for more details', title='!', background_color='#2a363b')
window.close()
|
#!/usr/bin/env python
# coding=utf-8
try:
#assert 1==0,"1 != 0"
print("pass")
except:
import time
time.sleep(1)
print("retry")
assert 1==0,"1 != 0"
print("succ") |
# -*- coding: utf-8 -*-
import json
import time
import logging
from uuid import uuid4
import base64
import datetime
from tornado import web
from tornado import gen
from webchat.handlers.base import BaseHandler, BaseSocketHandler
from webchat.modules.room import Rooms
from webchat.utils.pytea import str_encrypt, str_decrypt
from webchat.utils.common import md5twice
from webchat.config import CONFIG
LOG = logging.getLogger("__name__")
class ChatHandler(BaseHandler):
@gen.coroutine
def get(self):
self.render(
"chat/chat.html",
current_nav = "Chat",
scheme = "http",
locale = "en_US"
)
class ChatSocketHandler(BaseSocketHandler):
guest_prefix = "Guest_"
room_num = 100
rooms = Rooms(100)
connection_id_counter = 0
@gen.coroutine
def open(self, room_id):
self.nickname = "%s%s" % (ChatSocketHandler.guest_prefix, self.get_id())
self.room_id = int(room_id)
self.password = uuid4()
ChatSocketHandler.rooms[self.room_id].add(self)
message = "* %s joined" % (self.nickname)
data = {}
data["cmd"] = "init_rooms_list"
data["msg"] = base64.b64encode(message.encode("utf-8")).decode("utf-8")
data["password"] = self.password.hex
data["rooms_list"] = []
for i in range(1, ChatSocketHandler.room_num + 1):
data["rooms_list"].append({
"room_id": ChatSocketHandler.rooms[i].room_id,
"current_members": ChatSocketHandler.rooms[i].current_members,
})
data["default_nick_name"] = base64.b64encode(self.nickname.encode("utf-8")).decode("utf-8")
self.write_message(data)
@gen.coroutine
def on_message(self, msg):
now = datetime.datetime.now()
msg = json.loads(msg)
cmd = msg["cmd"]
if cmd == "change_room":
room_id = int(msg["room_id"])
if room_id != 0:
data = {}
ChatSocketHandler.rooms[self.room_id].remove(self)
data["cmd"] = "new_msg"
message = "System (%s) : %s left." % (now.strftime("%Y-%m-%d %H:%M:%S"), self.nickname)
data["msg"] = base64.b64encode(message.encode("utf-8")).decode("utf-8")
ChatSocketHandler.rooms[self.room_id].broadcast(data)
data = {}
self.room_id = room_id
ChatSocketHandler.rooms[self.room_id].add(self)
data["cmd"] = "new_msg"
message = "System (%s) : %s joined." % (now.strftime("%Y-%m-%d %H:%M:%S"), self.nickname)
data["msg"] = base64.b64encode(message.encode("utf-8")).decode("utf-8")
ChatSocketHandler.rooms[self.room_id].broadcast(data)
else:
data = {}
self.room_id = room_id
ChatSocketHandler.rooms[self.room_id].add(self)
data["cmd"] = "new_msg"
message = "System (%s) : %s joined." % (now.strftime("%Y-%m-%d %H:%M:%S"), self.nickname)
data["msg"] = base64.b64encode(message.encode("utf-8")).decode("utf-8")
ChatSocketHandler.rooms[self.room_id].broadcast(data)
data = {}
data["cmd"] = "change_rooms_list"
data["rooms_list"] = []
data_refresh = {}
data_refresh["cmd"] = "refresh_rooms_list"
data_refresh["rooms_list"] = []
for i in range(1, ChatSocketHandler.room_num + 1):
room_info = {
"room_id": ChatSocketHandler.rooms[i].room_id,
"current_members": ChatSocketHandler.rooms[i].current_members,
}
data["rooms_list"].append(room_info)
data_refresh["rooms_list"].append(room_info)
self.broadcast_all(data_refresh)
data["room_id"] = self.room_id
data["nick_name"] = base64.b64encode(self.nickname.encode("utf-8")).decode("utf-8")
self.write_message(data)
elif cmd == "send_msg":
room_id = int(msg["room_id"])
nickname_base64 = msg["nick_name"]
default_name_base64 = msg["default_nick_name"]
msg_base64 = msg["msg"]
msg_string = self.decrypt_msg(msg_base64)
data = {}
if room_id != 0:
data["cmd"] = "new_msg"
data["date_time"] = now.strftime("%Y-%m-%d %H:%M:%S")
data["nick_name"] = nickname_base64
data["default_nick_name"] = default_name_base64
data["encrypt"] = msg_string
data["msg"] = msg_string
ChatSocketHandler.rooms[room_id].broadcast(data)
@gen.coroutine
def on_close(self):
now = datetime.datetime.now()
ChatSocketHandler.rooms[self.room_id].remove(self)
data = {}
data["cmd"] = "new_msg"
message = "System (%s) : %s left." % (now.strftime("%Y-%m-%d %H:%M:%S"), self.nickname)
data["msg"] = base64.b64encode(message.encode("utf-8")).decode("utf-8")
ChatSocketHandler.rooms[self.room_id].broadcast(data)
data = {}
data["cmd"] = "refresh_rooms_list"
data["rooms_list"] = []
for i in range(1, ChatSocketHandler.room_num + 1):
room_info = {
"room_id": ChatSocketHandler.rooms[i].room_id,
"current_members": ChatSocketHandler.rooms[i].current_members,
}
data["rooms_list"].append(room_info)
self.broadcast_all(data)
self.close()
LOG.info("close websocket")
def get_id(self):
ChatSocketHandler.connection_id_counter += 1
return ChatSocketHandler.connection_id_counter
def broadcast_all(self, msg):
for i in range(ChatSocketHandler.room_num + 1):
ChatSocketHandler.rooms[i].broadcast(msg)
def encrypt_msg(self, msg):
key = md5twice(self.password.hex)
result_string = str_encrypt(msg, key)
return base64.b64encode(result_string).decode("utf-8")
def decrypt_msg(self, msg):
key = md5twice(self.password.hex)
msg_bytes = base64.b64decode(msg)
result_string = str_decrypt(msg_bytes, key)
return result_string
|
# Generated by Django 2.2.13 on 2020-07-09 17:14
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('shop', '0025_auto_20200709_2238'),
]
operations = [
migrations.AddField(
model_name='about',
name='title',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='contact',
name='title',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
from . import _internal # usort: skip
from ._dataset import Dataset
from ._encoded import EncodedData, EncodedImage
from ._resource import GDriveResource, HttpResource, KaggleDownloadResource, ManualDownloadResource, OnlineResource
|
from django.urls import include, path,re_path
from rest_framework import routers, urls
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
from .views import RegisterView
from . import views
urlpatterns = [
path("login/", obtain_jwt_token),
path("register/", RegisterView.as_view()),
]
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from flask import request
import dataflow_pipeline.workforce.workforce_beam as workforce_beam
import dataflow_pipeline.workforce.Iti_beam as Iti_beam
import dataflow_pipeline.workforce.Iti_detalle_beam as Iti_detalle_beam
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import dataflow_pipeline.massive as pipeline
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import os
import time
import socket
import _mssql
import datetime
import sys
#coding: utf-8
workforce_api = Blueprint('workforce_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@workforce_api.route("/workforce" , methods=['GET'] )
def workforce():
dateini = request.args.get('dateini')
dateend = request.args.get('dateend')
if dateini is None:
dateini = ""
else:
dateini = dateini
if dateend is None:
dateend = ""
else:
dateend = dateend
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "workforce"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
# Nos conectamos a la BD y obtenemos los registros
if dateini == "":
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT documento_neg, segmento,Iter,Fecha_Malla,Hora_Inicio,Fecha_Final,Hora_Final,logueo,Deslogueo,Dif_Inicio,Dif_Final,Ausentismo,tiempo_malla,tiempo_conexion,tiempo_conexion_tiempo,Tiempo_EstAux,Tiempo_EstAux_tiempo,tiempo_estaux_out,tiempo_estaux_out_tiempo,adherencia_malla,adherencia_tiempo,Centro_Costo,rel_unico,rel_orden FROM ' + tabla_bd + ' WHERE CONVERT(DATE, FECHA_MALLA) = CONVERT(DATE,GETDATE())')
cloud_storage_rows = ""
# conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
# conn.execute_query('SELECT documento_neg, segmento,Iter,Fecha_Malla,Hora_Inicio,Fecha_Final,Hora_Final,logueo,Deslogueo,Dif_Inicio,Dif_Final,Ausentismo,tiempo_malla,tiempo_conexion,tiempo_conexion_tiempo,Tiempo_EstAux,Tiempo_EstAux_tiempo,tiempo_estaux_out,tiempo_estaux_out_tiempo,adherencia_malla,adherencia_tiempo,Centro_Costo,rel_unico,rel_orden FROM ' + tabla_bd )
# cloud_storage_rows = ""
else:
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT documento_neg, segmento,Iter,Fecha_Malla,Hora_Inicio,Fecha_Final,Hora_Final,logueo,Deslogueo,Dif_Inicio,Dif_Final,Ausentismo,tiempo_malla,tiempo_conexion,tiempo_conexion_tiempo,Tiempo_EstAux,Tiempo_EstAux_tiempo,tiempo_estaux_out,tiempo_estaux_out_tiempo,adherencia_malla,adherencia_tiempo,Centro_Costo,rel_unico,rel_orden FROM ' + tabla_bd + ' WHERE CONVERT(DATE, FECHA_MALLA)' ' between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'" )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['documento_neg']).encode('utf-8') + "|"
text_row += str(row['segmento']).encode('utf-8') + "|"
text_row += str(row['Iter']).encode('utf-8') + "|"
text_row += str(row['Fecha_Malla']).encode('utf-8') + "|"
text_row += str(row['Hora_Inicio']).encode('utf-8') + "|"
text_row += str(row['Fecha_Final']).encode('utf-8') + "|"
text_row += str(row['Hora_Final']).encode('utf-8') + "|"
text_row += str(row['logueo']).encode('utf-8') + "|"
text_row += str(row['Deslogueo']).encode('utf-8') + "|"
text_row += str(row['Dif_Inicio']).encode('utf-8') + "|"
text_row += str(row['Dif_Final']).encode('utf-8') + "|"
text_row += str(row['Ausentismo']).encode('utf-8') + "|"
text_row += str(row['tiempo_malla']).encode('utf-8') + "|"
text_row += str(row['tiempo_conexion']).encode('utf-8') + "|"
text_row += str(row['tiempo_conexion_tiempo']).encode('utf-8') + "|"
text_row += str(row['Tiempo_EstAux']).encode('utf-8') + "|"
text_row += str(row['Tiempo_EstAux_tiempo']).encode('utf-8') + "|"
text_row += str(row['tiempo_estaux_out']).encode('utf-8') + "|"
text_row += str(row['tiempo_estaux_out_tiempo']).encode('utf-8') + "|"
text_row += str(row['adherencia_malla']).encode('utf-8') + "|"
text_row += str(row['adherencia_tiempo']).encode('utf-8') + "|"
text_row += str(row['Centro_Costo']).encode('utf-8') + "|"
text_row += str(row['rel_unico']).encode('utf-8') + "|"
text_row += str(row['rel_orden']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "adherencia/workforce" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-workforce")
# try:
# deleteQuery = "DELETE FROM `contento-bi.Workforce.Adherencia` WHERE FECHA = '" + mifecha + "'"
try:
if dateini == "":
deleteQuery = 'DELETE FROM `contento-bi.Workforce.Adherencia` WHERE CAST(Fecha_Malla AS DATE) = CURRENT_DATE()'
# deleteQuery = 'DELETE FROM `contento-bi.Workforce.Adherencia` WHERE 1=1
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
else:
deleteQuery2 = 'DELETE FROM `contento-bi.Workforce.Adherencia` WHERE CAST(Fecha_Malla AS DATE) between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'"
client = bigquery.Client()
query_job = client.query(deleteQuery2)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = workforce_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-workforce')
blob = bucket.blob("adherencia/workforce" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
# return jsonify(flowAnswer), 200
return "data cargada" + "flowAnswer"
##################################### TABLA ITI #####################################
@workforce_api.route("/Iti", methods=['GET'] )
def Iti():
dateini = request.args.get('dateini')
dateend = request.args.get('dateend')
if dateini is None:
dateini = ""
else:
dateini = dateini
if dateend is None:
dateend = ""
else:
dateend = dateend
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "iti"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
# Nos conectamos a la BD y obtenemos los registros
if dateini == "":
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT Id_Iti,Fecha,Hora,Centro_Costo,Peso,fecha_ejecucion,Estado FROM ' + tabla_bd + " WHERE CONVERT(DATE, Fecha) = CONVERT(DATE,GETDATE())")
# conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
# conn.execute_query('SELECT Id_Iti,Fecha,Hora,Centro_Costo,Peso,fecha_ejecucion,Estado FROM ' + tabla_bd)
else:
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT Id_Iti,Fecha,Hora,Centro_Costo,Peso,fecha_ejecucion,Estado FROM ' + tabla_bd + ' WHERE CONVERT(DATE, Fecha)' ' between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'" )
# conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
# conn.execute_query('SELECT Id_Iti,Fecha,Hora,Centro_Costo,Peso,fecha_ejecucion,Estado FROM ' + tabla_bd)
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Iti']).encode('utf-8') + "|"
text_row += str(row['Fecha']).encode('utf-8') + "|"
text_row += str(row['Hora']).encode('utf-8') + "|"
text_row += str(row['Centro_Costo']).encode('utf-8') + "|"
text_row += str(row['Peso']).encode('utf-8') + "|"
text_row += str(row['fecha_ejecucion']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "workforce/iti" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-workforce")
try:
if dateini == "":
deleteQuery = 'DELETE FROM `contento-bi.Workforce.Iti` WHERE CAST(Fecha AS DATE) = CURRENT_DATE()'
# deleteQuery = "DELETE FROM `contento-bi.Workforce.Iti` WHERE id_iti is not null"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
else:
deleteQuery2 = "DELETE FROM `contento-bi.Workforce.Iti` WHERE CAST(Fecha AS DATE) between " + "'" + dateini + "'" +" and " + "'" + dateend + "'"
# deleteQuery = "DELETE FROM `contento-bi.Workforce.Iti` WHERE id_iti is not null"
client = bigquery.Client()
query_job = client.query(deleteQuery2)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
flowAnswer = Iti_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-workforce')
blob = bucket.blob("workforce/iti" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
# return jsonify(flowAnswer), 200
return "data cargada" + "flowAnswer"
################################### TII V2 #################################################
# @workforce_api.route("/Iti_detalle")
# def Iti_detalle():
# reload(sys)
# sys.setdefaultencoding('utf8')
# SERVER="BDA01\DOKIMI"
# USER="BI_Workforce"
# PASSWORD="340$Uuxwp7Mcxo7Khy.*"
# DATABASE="Workforce"
# TABLE_DB ="tb_iti_detalle"
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
# # Nos conectamos a la BD y obtenemos los registros
# conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
# conn.execute_query('SELECT Centro_Costos,fecha_malla,"07:30:00","07:45:00","08:00:00","08:15:00","08:30:00","08:45:00","09:00:00","09:15:00","09:30:00","09:45:00","10:00:00","10:15:00","10:30:00","10:45:00","11:00:00","11:15:00","11:30:00","11:45:00","12:00:00","12:15:00","12:30:00","12:45:00","13:00:00","13:15:00","13:30:00","13:45:00","14:00:00","14:15:00","14:30:00","14:45:00","15:00:00","15:15:00","15:30:00","15:45:00","16:00:00","16:15:00","16:30:00","16:45:00","17:00:00","17:15:00","17:30:00","17:45:00","18:00:00","18:15:00","18:30:00","18:45:00","19:00:00","19:15:00","19:30:00","19:45:00","20:00:00","20:15:00","20:30:00","20:45:00","21:00:00" FROM ' + TABLE_DB)
# # + " WHERE CONVERT(DATE, Fecha_malla) = CONVERT(DATE,GETDATE())")
# cloud_storage_rows = ""
# # Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
# for row in conn:
# text_row = ""
# text_row += str(row['Centro_Costos']).encode('utf-8') + "|"
# text_row += str(row['fecha_malla']).encode('utf-8') + "|"
# text_row += str(row['07:30:00']).encode('utf-8') + "|"
# text_row += str(row['07:45:00']).encode('utf-8') + "|"
# text_row += str(row['08:00:00']).encode('utf-8') + "|"
# text_row += str(row['08:15:00']).encode('utf-8') + "|"
# text_row += str(row['08:30:00']).encode('utf-8') + "|"
# text_row += str(row['08:45:00']).encode('utf-8') + "|"
# text_row += str(row['09:00:00']).encode('utf-8') + "|"
# text_row += str(row['09:15:00']).encode('utf-8') + "|"
# text_row += str(row['09:30:00']).encode('utf-8') + "|"
# text_row += str(row['09:45:00']).encode('utf-8') + "|"
# text_row += str(row['10:00:00']).encode('utf-8') + "|"
# text_row += str(row['10:15:00']).encode('utf-8') + "|"
# text_row += str(row['10:30:00']).encode('utf-8') + "|"
# text_row += str(row['10:45:00']).encode('utf-8') + "|"
# text_row += str(row['11:00:00']).encode('utf-8') + "|"
# text_row += str(row['11:15:00']).encode('utf-8') + "|"
# text_row += str(row['11:30:00']).encode('utf-8') + "|"
# text_row += str(row['11:45:00']).encode('utf-8') + "|"
# text_row += str(row['12:00:00']).encode('utf-8') + "|"
# text_row += str(row['12:15:00']).encode('utf-8') + "|"
# text_row += str(row['12:30:00']).encode('utf-8') + "|"
# text_row += str(row['12:45:00']).encode('utf-8') + "|"
# text_row += str(row['13:00:00']).encode('utf-8') + "|"
# text_row += str(row['13:15:00']).encode('utf-8') + "|"
# text_row += str(row['13:30:00']).encode('utf-8') + "|"
# text_row += str(row['13:45:00']).encode('utf-8') + "|"
# text_row += str(row['14:00:00']).encode('utf-8') + "|"
# text_row += str(row['14:15:00']).encode('utf-8') + "|"
# text_row += str(row['14:30:00']).encode('utf-8') + "|"
# text_row += str(row['14:45:00']).encode('utf-8') + "|"
# text_row += str(row['15:00:00']).encode('utf-8') + "|"
# text_row += str(row['15:15:00']).encode('utf-8') + "|"
# text_row += str(row['15:30:00']).encode('utf-8') + "|"
# text_row += str(row['15:45:00']).encode('utf-8') + "|"
# text_row += str(row['16:00:00']).encode('utf-8') + "|"
# text_row += str(row['16:15:00']).encode('utf-8') + "|"
# text_row += str(row['16:30:00']).encode('utf-8') + "|"
# text_row += str(row['16:45:00']).encode('utf-8') + "|"
# text_row += str(row['17:00:00']).encode('utf-8') + "|"
# text_row += str(row['17:15:00']).encode('utf-8') + "|"
# text_row += str(row['17:30:00']).encode('utf-8') + "|"
# text_row += str(row['17:45:00']).encode('utf-8') + "|"
# text_row += str(row['18:00:00']).encode('utf-8') + "|"
# text_row += str(row['18:15:00']).encode('utf-8') + "|"
# text_row += str(row['18:30:00']).encode('utf-8') + "|"
# text_row += str(row['18:45:00']).encode('utf-8') + "|"
# text_row += str(row['19:00:00']).encode('utf-8') + "|"
# text_row += str(row['19:15:00']).encode('utf-8') + "|"
# text_row += str(row['19:30:00']).encode('utf-8') + "|"
# text_row += str(row['19:45:00']).encode('utf-8') + "|"
# text_row += str(row['20:00:00']).encode('utf-8') + "|"
# text_row += str(row['20:15:00']).encode('utf-8') + "|"
# text_row += str(row['20:30:00']).encode('utf-8') + "|"
# text_row += str(row['20:45:00']).encode('utf-8') + "|"
# text_row += str(row['21:00:00']).encode('utf-8') + "|"
# text_row += "\n"
# cloud_storage_rows += text_row
# conn.close()
# filename = "workforce/iti_detalle" + ".csv"
# #Finalizada la carga en local creamos un Bucket con los datos
# gcscontroller.create_file(filename, cloud_storage_rows, "ct-workforce")
# try:
# # deleteQuery = 'DELETE FROM `contento-bi.Workforce.Iti.detalle` WHERE CAST(Fecha_malla AS DATE) = CURRENT_DATE()'
# deleteQuery = "DELETE FROM `contento-bi.Workforce.Iti_detalle` WHERE Centro_Costos is not null"
# client = bigquery.Client()
# query_job = client.query(deleteQuery)
# query_job.result()
# except:
# print("no se pudo eliminar")
# #Primero eliminamos todos los registros que contengan esa fecha
# flowAnswer = Iti_detalle_beam.run()
# # time.sleep(60)
# # Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
# storage_client = storage.Client()
# bucket = storage_client.get_bucket('ct-workforce')
# blob = bucket.blob("workforce/iti_detalle" + ".csv")
# # Eliminar el archivo en la variable
# blob.delete()
# # return jsonify(flowAnswer), 200
# return "data cargada" + "flowAnswer" |
import pandas as pd
import seaborn as sns
import numpy as np
import re
import matplotlib.pyplot as plt
raw_data = pd.read_csv('menu.csv')
menu_data = raw_data[['Category', 'Serving Size', 'Calories']]
# Parse numerical data.
for index, row in menu_data.iterrows():
weight = (re.search('\\((\\d+) g\\)', row['Serving Size']))
if weight is None:
menu_data.at[index, 'Serving Size'] = np.nan
else:
menu_data.at[index, 'Serving Size'] = float(weight.group(1))
menu_data.dropna(inplace=True)
print(menu_data)
# Calculate calorie density.
menu_data['Calorie Density'] = menu_data['Calories']/menu_data['Serving Size']
# Serving size boxplot.
axes = sns.boxplot(
data=menu_data,
x='Category',
y='Serving Size',
).set(
xlabel='Kategori',
ylabel='Takaran sajian (gram)',
title='Boxplot Takaran Sajian Makanan'
)
plt.show()
# Calorie boxplot.
axes = sns.boxplot(
data=menu_data,
x='Category',
y='Calories',
).set(
xlabel='Kategori',
ylabel='Kalori (kalori)',
title='Boxplot Kalori Makanan'
)
plt.show()
# Calorie density boxplot
axes = sns.boxplot(
data=menu_data,
x='Category',
y='Calorie Density'
).set(
xlabel='Kategori',
ylabel='Densitas Kalori (kalori/gram)',
title='Boxplot Densitas Kalori Makanan'
)
plt.show()
# Calorie density histogram.
grid = sns.FacetGrid(
menu_data,
col='Category',
height=2,
col_wrap=3
)
grid.map(sns.histplot, 'Calorie Density')
plt.show()
# Menu scatterplot combined.
sns.scatterplot(
data=menu_data,
x='Serving Size',
y='Calories',
hue='Category'
)
plt.show()
# Menu scatterplot separate.
grid = sns.FacetGrid(
menu_data,
col='Category',
height=2,
col_wrap=3
)
grid.map(sns.scatterplot, 'Serving Size', 'Calories')
plt.show()
|
# book = dict()
book = {}
book["apple"] = 0.67
book["milk"] = 1.69
book["avocado"] = 1.49
print(book)
print(book["apple"]) |
class ResID:
def __init__(self, setChainID, setSeqNum, setICode):
self.chainID = setChainID
self.seqNum = setSeqNum
self.iCode = setICode
def __lt__(self, other):
return ((self.chainID, self.seqNum, self.iCode) <
(other.chainID, other.seqNum, other.iCode))
def __le__(self, other):
return not other < self
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def __eq__(self, other):
return (not self < other) and (not other < self)
def __ne__(self, other):
return (self < other) or (other < self)
def __hash__(self):
numChainIDs = 128
numICodes = 128
i = self.seqNum
i *= numChainIDs
i += ord(self.chainID)
i *= numICodes
i += ord(self.iCode)
return i
def __str__(self):
return self.chainID+str(self.seqNum)+self.iCode
def __repr__(self):
return str(self)
|
# -*- coding: utf-8 -*-
import pytz
import datetime
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.utils import timezone
def get_timezone_aware(_date):
tzinfo=pytz.timezone("Asia/Kathmandu")
try:
if _date.is_aware():
return _date
except AttributeError, err:
try:
if _date.tzinfo:
return _date
except AttributeError, err:
return timezone.make_aware(_date, tzinfo)
def get_inspection_starting_datetime(start_date):
return get_timezone_aware(datetime.datetime.combine(start_date, datetime.time(hour=10)))
def get_inspection_ending_date(start_date, hours):
_end = start_date + datetime.timedelta(hours=hours)
return get_timezone_aware(_end)
def get_accepted_ending_date(start_date, hours):
_end = start_date + datetime.timedelta(hours=hours)
return get_timezone_aware(_end)
def get_completion_starting_date(end_date, hours):
_start = end_date - datetime.timedelta(hours=hours)
return get_timezone_aware(_start)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'JobScheduler'
db.create_table(u'jobs_jobscheduler', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['jobs.Jobs'])),
('inspection_start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('inspection_end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('job_start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('job_end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'jobs', ['JobScheduler'])
# Create Job Scheduler for current jobs
for obj in orm['jobs.Jobs'].objects.filter().order_by('id'):
try:
job_scheduler = orm.JobScheduler.objects.get(job=obj)
except orm.JobScheduler.DoesNotExist, err:
status = False if obj.status in ['0', '4', '5'] else True
if obj.status == '1':
if obj.inspection_date:
job_scheduler = orm.JobScheduler(
job=obj,
active=status,
inspection_start_date=get_inspection_starting_datetime(obj.inspection_date),
inspection_end_date=get_inspection_ending_date(get_inspection_starting_datetime(obj.inspection_date), 1)
)
else:
job_scheduler = orm.JobScheduler(
job=obj,
active=status
)
elif obj.status == '2':
if obj.accepted_date and obj.inspection_date:
job_scheduler = orm.JobScheduler(
job=obj,
active=status,
inspection_start_date=get_inspection_starting_datetime(obj.inspection_date),
inspection_end_date=get_inspection_ending_date(get_inspection_starting_datetime(obj.inspection_date), 1),
job_start_date=get_timezone_aware(obj.accepted_date),
job_end_date=get_accepted_ending_date(obj.accepted_date, 4)
)
else:
job_scheduler = orm.JobScheduler(
job=obj,
active=status
)
elif obj.status == '3':
if obj.accepted_date and obj.inspection_date:
job_scheduler = orm.JobScheduler(
job=obj,
active=status,
inspection_start_date=get_inspection_starting_datetime(obj.inspection_date),
inspection_end_date=get_inspection_ending_date(get_inspection_starting_datetime(obj.inspection_date), 1),
job_start_date=get_completion_starting_date(obj.completion_date, 4),
job_end_date=get_timezone_aware(obj.completion_date)
)
else:
job_scheduler = orm.JobScheduler(
job=obj,
active=status
)
else:
job_scheduler = orm.JobScheduler(
job=obj,
active=status
)
job_scheduler.save()
def backwards(self, orm):
# Deleting model 'JobScheduler'
db.delete_table(u'jobs_jobscheduler')
models = {
u'jobs.jobevents': {
'Meta': {'object_name': 'JobEvents'},
'event': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '2'}),
'extrainfo': ('jsonfield.fields.JSONField', [], {'default': "'{}'", 'max_length': '9999'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['jobs.Jobs']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'jobs.jobs': {
'Meta': {'object_name': 'Jobs'},
'accepted_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'completion_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs_subscriber'", 'to': u"orm['subscription.Subscriber']"}),
'fee': ('djmoney.models.fields.MoneyField', [], {'default_currency': "'NPR'", 'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'fee_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'NPR'"}),
'handyman': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'orders'", 'symmetrical': 'False', 'to': u"orm['users.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inspection_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ishidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jobref': ('django.db.models.fields.CharField', [], {'default': "'9cd89410c19b417493ffba7aa390a31e'", 'unique': 'True', 'max_length': '100'}),
'jobtype': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '1'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'location_landmark': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '1'})
},
u'jobs.jobscheduler': {
'Meta': {'object_name': 'JobScheduler'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inspection_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'inspection_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['jobs.Jobs']"}),
'job_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'job_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'subscription.subscriber': {
'Meta': {'object_name': 'Subscriber'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_office': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'office_number': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '7'}),
'primary_contact_person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primaryContactPerson'", 'to': u"orm['users.UserProfile']"}),
'secondary_contact_person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'secondaryContactPerson'", 'to': u"orm['users.UserProfile']"}),
'subscriber_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'})
},
u'users.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'account_status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'address': ('jsonfield.fields.JSONField', [], {'default': "{'city': 'Kathmandu', 'streetaddress': 'Tripureshwore'}", 'max_length': '9999', 'blank': 'True'}),
'address_coordinates': ('django.contrib.gis.db.models.fields.PointField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'current_address': ('django.contrib.gis.db.models.fields.PointField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'extrainfo': ('jsonfield.fields.JSONField', [], {'default': "'{}'", 'max_length': '9999'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('phonenumber_field.modelfields.PhoneNumberField', [], {'unique': 'True', 'max_length': '16'}),
'phone_status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'user_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'userref': ('django.db.models.fields.CharField', [], {'default': "'149b12765a434ee6856cf869c297c9e5'", 'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['jobs'] |
# Generated by Django 3.2.5 on 2021-07-24 09:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0007_rename_searchedwords_searchedword'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='rating',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
from flask_marshmallow import Marshmallow
marsh = Marshmallow()
def init(app):
marsh.init_app(app) |
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.tools.taplo import rules as taplo_rules
def rules():
return [
*taplo_rules.rules(),
]
|
#!/usr/bin/env python
import time
import roslib; roslib.load_manifest('rtg_proje')
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from cv_bridge import CvBridge
import rospy # Python library for ROS
from sensor_msgs.msg import Image # Image is the message type
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
from skimage.metrics import structural_similarity
import time
images = []
imageNames = ["h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9", "h10", "h11", "h12", "varil"]
for i in range(1, 13):
images += [cv.imread('images/h' + str(i) + '.png', cv.IMREAD_GRAYSCALE)]
#images += [cv.imread('images/varilmask.png', cv.IMREAD_GRAYSCALE)]
varil_img = cv.imread('images/varilmask.png', cv.IMREAD_GRAYSCALE)
br = CvBridge()
sift = cv.SIFT_create()
bf = cv.BFMatcher()
descriptors = []
for img in images:
kp1, des1 = sift.detectAndCompute(img, None)
descriptors.append([kp1, des1])
def callback(data):
# Convert ROS Image message to OpenCV image
current_frame = br.imgmsg_to_cv2(data)
img_real = cv.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
kp2, des2 = sift.detectAndCompute(img_real, None)
#images = [cv.imread('images/h1.png', cv.IMREAD_GRAYSCALE)]
for desc, img, i in zip(descriptors, images, imageNames):
# Initiate SIFT detector
# find the keypoints and descriptors with SIFT
#kp1, des1 = sift.detectAndCompute(img, None)
kp1, des1 = desc[0], desc[1]
# BFMatcher with default params
try:
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m, n in matches:
if m.distance < 0.50 * n.distance:
good.append([m])
# cv.drawMatchesKnn expects list of lists as matches.
except:
continue
if len(good) > 10:
#img3 = cv.drawMatchesKnn(img, kp1, img_real, kp2, good, None, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
print(i , " is found")
# Convert BGR to HSV
hsv = cv2.cvtColor(current_frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([0, 50, 20], dtype=np.uint8)
upper_blue = np.array([5, 255, 255], dtype=np.uint8)
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
#res = cv2.bitwise_and(current_frame, current_frame, mask=mask)
#cv2.imshow("camera1", hsv)
#cv2.imshow("camera2", mask)
#cv2.imshow("camera3", res)
varilConf = meanMatrix(mask)
#print(varilConf)
if(varilConf > 40):
print("Varil is found")
print("*******************************************")
def meanMatrix(matrix):
matrix = np.array(matrix)
return matrix.mean()
def receive_message():
# Tells rospy the name of the node.
# Anonymous = True makes sure the node has a unique name. Random
# numbers are added to the end of the name.
rospy.init_node('video_sub_py', anonymous=True)
# Node is subscribing to the video_frames topic
rospy.Subscriber('/camera/rgb/image_raw', Image, callback)
#rospy.Rate(1)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
# Close down the video stream when done
#cv2.destroyAllWindows()
if __name__ == '__main__':
receive_message()
|
"""
---TASK DETAILS---
--- Day 2: Bathroom Security ---
You arrive at Easter Bunny Headquarters under cover of darkness.
However, you left in such a rush that you forgot to use the bathroom!
Fancy office buildings like this one usually have keypad locks on their bathrooms, so you search the front desk for the code.
"In order to improve security," the document you find says, "bathroom codes will no longer be written down. Instead, please memorize and follow the procedure below to access the bathrooms."
The document goes on to explain that each button to be pressed can be found by starting on the previous button and moving to adjacent buttons on the keypad:
U moves up, D moves down, L moves left, and R moves right.
Each line of instructions corresponds to one button, starting at the previous button (or, for the first line, the "5" button); press whatever button you're on at the end of each line.
If a move doesn't lead to a button, ignore it.
You can't hold it much longer, so you decide to figure out the code as you walk to the bathroom.
You picture a keypad like this:
1 2 3
4 5 6
7 8 9
Suppose your instructions are:
ULL
RRDDD
LURDL
UUUUD
You start at "5" and move up (to "2"), left (to "1"), and left (you can't, and stay on "1"), so the first button is 1.
Starting from the previous button ("1"), you move right twice (to "3") and then down three times (stopping at "9" after two moves and ignoring the third), ending up with 9.
Continuing from "9", you move left, up, right, down, and left, ending with 8.
Finally, you move up four times (stopping at "2"), then down once, ending with 5.
So, in this example, the bathroom code is 1985.
Your puzzle input is the instructions from the document you found at the front desk. What is the bathroom code?
"""
import os
import sys
keypad = [["1", "2", "3"],
["4", "5", "6"],
["7", "8", "9"]]
current = (1, 1)
movements = {
"U": (-1, 0),
"D": (1, 0),
"L": (0, -1),
"R": (0, 1),
}
finalPos = ""
instructions = open(os.path.join(sys.path[0], "input.txt"))
instrLine = instructions.read().strip().split("\n")
for l in instrLine:
for i in l:
nextMovement = movements[i]
newCurrent = (current[0] + nextMovement[0], current[1] + nextMovement[1])
if newCurrent[0] < 0 or newCurrent[0] > 2 or newCurrent[1] < 0 or newCurrent[1] > 2:
continue
else:
current = newCurrent
finalPos += keypad[current[0]][current[1]]
print(finalPos)
|
# coding: utf-8
# In[1]:
import cv2
import numpy as np
import imutils
# In[15]:
img = cv2.imread('./datasets/flower3.jpg')
logo_img = cv2.imread('./datasets/pyimagesearch_logo_github.png')
car_img = cv2.imread('./datasets/licence_plate1.jpg')
gray_car = cv2.cvtColor(car_img, cv2.COLOR_BGR2GRAY)
car_img_light = cv2.imread('./datasets/licence_plate2.jpg')
gray_car_light = cv2.cvtColor(car_img_light, cv2.COLOR_BGR2GRAY)
car_img_blue = cv2.imread('./datasets/licence_plate3.jpg')
gray_car_blue = cv2.cvtColor(car_img_blue, cv2.COLOR_BGR2GRAY)
#cv2.imshow("Original Image",img)
#cv2.waitKey(0)
# In[3]:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_logo = cv2.cvtColor(logo_img, cv2.COLOR_BGR2GRAY)
#eroded = cv2.erode(gray, None, iterations=2)
#cv2.imshow("Eroded image", eroded)
#cv2.waitKey(0)
# In[61]:
eroded_new = cv2.erode(gray, None, 30)
# In[62]:
#Eroding a grayscale image
for i in range(0,10):
eroded = cv2.erode(gray, None, iterations = i)
cv2.imshow("Eroded {} times".format(i+1), eroded)
cv2.waitKey(0)
# In[49]:
cv2.imshow('Eroded One time', eroded_new)
cv2.waitKey(0)
# In[59]:
#Dilating an image
for i in range(0,30):
dilated = cv2.dilate(img, None, iterations = i)
cv2.imshow("Dilated Image", dilated)
cv2.waitKey(0)
# In[68]:
# Opening operation - Erosion followed by dilation
kernelSizes = [(3,3),(5,5),(7,7), (9,9),(11,11),(13,13)]
for kernelSize in kernelSizes:
kernel_structuring = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize)
opened_image = cv2.morphologyEx(gray_logo, cv2.MORPH_OPEN, kernel_structuring)
cv2.imshow("Opening: ({}, {})".format(kernelSize[0], kernelSize[1]), opened_image)
cv2.waitKey(0)
# In[69]:
#Closing operation - dialtion followed by erosion
kernelSizes = [(3,3),(5,5),(7,7),(9,9),(11,11),(13,13)]
for kernelSize in kernelSizes:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize)
closed_image = cv2.morphologyEx(gray_logo, cv2.MORPH_CLOSE, kernel)
cv2.imshow("Closed image", closed_image)
cv2.waitKey(0)
# In[4]:
#morphological gradient - difference between dilation and erosion
#morphological gradient revelas the outline of the image
kernelSizes = [(3,3),(5,5),(7,7)]
for kernelSize in kernelSizes:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernelSize)
gradient = cv2.morphologyEx(gray_logo, cv2.MORPH_GRADIENT, kernel)
cv2.imshow("Gradient: ({}, {})".format(kernelSize[0], kernelSize[1]), gradient)
cv2.waitKey(0)
# In[10]:
#Top hat operator - detect bright objects against dark background
#Tophat is the difference between input image and the opening
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13,5)) # since license plat is 3 times wider than it is tall
# As the color of the car is dark, detecting license plate was easier.
tophat = cv2.morphologyEx(gray_car, cv2.MORPH_TOPHAT, kernel)
cv2.imshow("Tophat", tophat)
cv2.waitKey(0)
# In[11]:
#Black hat operation - difference between closing of the input image and input image itself
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13,5))
blackhat = cv2.morphologyEx(gray_car_light, cv2.MORPH_BLACKHAT, kernel)
cv2.imshow('Black Hat', blackhat)
cv2.waitKey(0)
# In[19]:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13,5))
blackhat = cv2.morphologyEx(gray_car_blue, cv2.MORPH_BLACKHAT, kernel)
tophat = cv2.morphologyEx(blackhat, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('License Plate', tophat)
cv2.waitKey(0)
# In[ ]:
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 09:39:43 2019
@author: HP
"""
def swap(a,b):
temp=arr[a]
arr[a]=arr[b]
arr[b]=temp
def InsertionSort():
for i in range(1,len(arr)):
j=i
while(arr[j-1]>arr[j] and j-1>=0):
swap(j-1,j)
j=j-1
arr=[5,7,8,2,1,7,10,13]
InsertionSort()
print(arr) |
import os
from collections import defaultdict
from configparser import ConfigParser, _UNSET, NoSectionError, NoOptionError
from functools import partial
MY_DIR = os.path.dirname(__file__)
def absdir(path):
return os.path.abspath(os.path.join(MY_DIR, path))
# -------------------------------------------
# Subclass the config parser to be able to obtain
# options from the default config
# -------------------------------------------
def setpaths(conf, path):
secs = ['paths', 'files']
for sec in secs:
if sec in conf.sections():
for opt in conf[sec]:
v = conf[sec][opt]
conf.set(sec, opt, os.path.abspath(os.path.join(os.path.dirname(path), v)))
class PathRelativeConfigParser(ConfigParser):
def __init__(self, *args, path=None, **kwargs):
super().__init__(*args, **kwargs)
setpaths(self, path)
def read(self, filenames, encoding=None):
super().read(filenames, encoding=encoding)
if isinstance(filenames, str):
setpaths(self, filenames)
@classmethod
def load(cls, filename):
prcp = cls()
prcp.read(filename)
return prcp
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
if section not in self.sections():
return fallback
else:
return super().get(section, option, raw=raw, vars=vars, fallback=fallback)
# -------------------------------------------
# The following options are concerned with various
# folders for holding temporary and debug files
# are.
# -------------------------------------------
# The directory in which to place the human readable feature files.
def FEAT_DIR(obj):
return getattr(obj, 'feat_dir')
# Directory to the gold standard data for evaluation.
def GOLD_DIR(obj):
return getattr(obj, 'gold_dir')
# Directory in which to place output classified files
def OUT_DIR(obj):
return getattr(obj, 'classified_dir')
# Whether or not to output debugging information
def DEBUG_ON(obj):
return getattr(obj, 'debug_on')
# The directory in which to store the information about the classifier feature
# weights, and raw labels
def DEBUG_DIR(obj):
return getattr(obj, 'debug_dir')
# -------------------------------------------
# Path to various text files
# -------------------------------------------
# Large English language wordlist.
EN_WORDLIST = 'en_wordlist'
# List of gloss-line words extracted from ODIN-2.
# 1
GLS_WORDLIST = 'gls_wordlist'
# List of meta line words extracted from ODIN-2.1
MET_WORDLIST = 'met_wordlist'
# List of language names
LNG_NAMES = 'lng_names'
thresh_dict = {}
def get_thresh(config, var):
global thresh_dict
if var not in thresh_dict:
thresh_dict[var] = config.getfloat('thresholds', var)
return thresh_dict.get(var)
def HIGH_OOV_THRESH(config): return get_thresh(config, 'high_oov')
def MED_OOV_THRESH(config): return get_thresh(config, 'med_oov')
def HIGH_ISCORE_THRESH(config): return get_thresh(config, 'high_iscore')
def MED_ISCORE_THRESH(config): return get_thresh(config, 'med_iscore')
def LOW_ISCORE_THRESH(config): return get_thresh(config, 'low_iscore')
# -------------------------------------------
# Load the Wordlist if it is defined in the config.
# -------------------------------------------
class WordlistFile(set):
def __init__(self, path):
super().__init__()
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if line.strip():
self.add(line.split()[0])
USE_BI_LABELS = 'use_bi_labels'
# Some lines appear as combinations of labels, such as "L-G-T" for all
# three on a single line. If this is set to true, these types of
# combined labels are allowed. If set to false, only the first
# of the multiple labels will be used.
USE_MULTI_LABELS = 'use_multi_labels'
# "Flags" are additional information that is intended to be included in
# the information about the line, such as +AC (for Author Citation)
# or +LN (for Language Name). These are stripped out by default, as
# otherwise they would result in an explosion of labels.
STRIP_FLAGS = 'strip_flags'
# =============================================================================
# Feature selection.
#
# In this section, various features are defined and can be enabled or
# disabled by the user. Read the comments, as some definitions are constants
# and should not be edited.
# =============================================================================
# -------------------------------------------
# High-level features.
#
# Set these to True or False, depending
# on whether you want that feature set enabled
# or not.
# -------------------------------------------
# Use the freki-block based features
FREKI_FEATS_ENABLED = True
# Use the text-based features
TEXT_FEATS_ENABLED = True
# -------------------------------------------
# These three features control whether the
# features are included for the previous line,
# the line before that (prev_prev), or the next
# line.
# -------------------------------------------
true_vals = set(['t','true','1','on','enabled'])
def getbool(args, k):
val = args.get(k, False)
return str(val).lower() in true_vals
def USE_PREV_LINE(args):
return getbool(args, 'use_prev_line')
# return args.getboolean('featuresets', 'use_prev_line')
def USE_PREV_PREV_LINE(args):
return getbool(args, 'use_prev_prev_line')
# return args.getboolean('featuresets', 'use_prev_prev_line')
def USE_NEXT_LINE(args):
return getbool(args, 'use_next_line')
# return args.getboolean('featuresets', 'use_next_line')
# -------------------------------------------
# FEATURE CONSTANTS
#
# Associating a variable with the text string used in the config file.
# -------------------------------------------
F_IS_INDENTED = 'is_indented'
F_IS_FIRST_PAGE = 'is_first_page'
F_PREV_LINE_SAME_BLOCK = 'prev_line_same_block'
F_NEXT_LINE_SAME_BLOCK = 'next_line_same_block'
F_HAS_NONSTANDARD_FONT = 'has_nonstandard_font'
F_HAS_SMALLER_FONT = 'has_smaller_font'
F_HAS_LARGER_FONT = 'has_larger_font'
F_HIGH_ISCORE = 'f_high_iscore'
F_MED_ISCORE = 'f_med_iscore'
F_LOW_ISCORE = 'f_low_iscore'
# List of all the above
F_LIST = [F_IS_INDENTED, F_IS_FIRST_PAGE, F_PREV_LINE_SAME_BLOCK, F_NEXT_LINE_SAME_BLOCK, F_HAS_NONSTANDARD_FONT, F_HAS_SMALLER_FONT, F_HAS_LARGER_FONT, F_HIGH_ISCORE, F_MED_ISCORE, F_LOW_ISCORE]
T_PREV_TAG = 'prev_tag'
T_BASIC = 'words'
T_HAS_LANGNAME = 'has_langname'
T_HAS_GRAMS = 'has_grams'
T_HAS_PARENTHETICAL = 'has_parenthetical'
T_HAS_CITATION = 'has_citation'
T_HAS_ASTERISK = 'has_asterisk'
T_HAS_UNDERSCORE = 'has_underscore'
T_HAS_BRACKETING = 'has_bracketing'
T_HAS_QUOTATION = 'has_quotation'
T_HAS_NUMBERING = 'has_numbering'
T_HAS_LEADING_WHITESPACE = 'has_leading_whitespace'
T_HIGH_OOV_RATE = 'high_oov_rate'
T_MED_OOV_RATE = 'med_oov_rate'
T_HIGH_GLS_OOV_RATE = 'high_gls_oov'
T_HIGH_MET_OOV_RATE = 'high_met_oov'
T_MED_GLS_OOV_RATE = 'med_gls_oov'
T_HAS_JPN = 'has_jpn'
T_HAS_GRK = 'has_grk'
T_HAS_KOR = 'has_kor'
T_HAS_CYR = 'has_cyr'
T_HAS_ACC = 'has_acc_lat'
T_HAS_DIA = 'has_dia'
T_HAS_UNI = 'has_uni'
T_HAS_YEAR = 'has_year'
T_LIST = [T_BASIC, T_HAS_LANGNAME, T_HAS_GRAMS, T_HAS_PARENTHETICAL, T_HAS_CITATION, T_HAS_ASTERISK, T_HAS_UNDERSCORE, T_HAS_BRACKETING,
T_HAS_QUOTATION, T_HAS_NUMBERING, T_HAS_LEADING_WHITESPACE, T_HIGH_OOV_RATE, T_MED_OOV_RATE, T_HIGH_GLS_OOV_RATE, T_MED_GLS_OOV_RATE,
T_HIGH_GLS_OOV_RATE, T_MED_GLS_OOV_RATE, T_HIGH_MET_OOV_RATE,
T_HAS_JPN, T_HAS_GRK, T_HAS_KOR, T_HAS_CYR, T_HAS_ACC, T_HAS_DIA, T_HAS_UNI, T_HAS_YEAR]
# =============================================================================
# EDIT THIS SECTION
# =============================================================================
# -------------------------------------------
# Now, to enable/disable a particular feature,
# just comment out the line the feature is
# contained on.
# -------------------------------------------
def enabled_feats(config: ConfigParser, section, featlist):
enabled = set([])
for feat in featlist:
if config.has_option(section, feat):
b = config.getboolean(section, feat)
if b:
enabled.add(feat)
return enabled
_enabled_freki_feats = None
_enabled_text_feats = None
def ENABLED_FREKI_FEATS(config: ConfigParser):
global _enabled_freki_feats
if _enabled_freki_feats is None:
_enabled_freki_feats = enabled_feats(config, 'freki_features', F_LIST)
return _enabled_freki_feats
def ENABLED_TEXT_FEATS(config: ConfigParser):
global _enabled_text_feats
if _enabled_text_feats is None:
_enabled_text_feats = enabled_feats(config, 'text_features', T_LIST)
return _enabled_text_feats
# =============================================================================
# Regular Expressions
#
# These are multiline expressions that were initially used for IGT detection.
#
# These are currently unused, but could be included to fire for lines which
# find themselves contained in such a regex.
# =============================================================================
REGEXES = '''
\s*(\()\d*\).*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*(\()\d*\)\s\w\..*\n
\s*.*\n
\s\[`'"].*\n
~
\s*(\(\d)*\)\s*\(.*\n
\s.*\n
\s*.*\n
\s\[`'"].*\n
~
\s*(\()\d*\).*\n
\s*\w\..*\n
\s*.*\n
\s\[`'"].*\n
~
\s*\w.\s*.*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*\w\)\s*.*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*(\()\w*\).*\n
\s*.*\n
\s*\[`'"].*\n
~
//added 02-03-2005
\s*\d.*.*\n
\s*.*\n
\s*\[`'"].*\n
~
\s*(\()\d*\).*\n
.*\n
\s*.*\n
\s*\[`'"].*\n
~''' |
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from django.views.generic.list import ListView
from .models import ExamLibItem, ExamItem, Paper, ExamResult
from .forms import PaperForm, ExamItemForm, TestItemForm, ExamLibItemForm, ExamLibItemFormSet, ExamItemFormSet, TestItemFormSet
from django.views.generic.edit import FormMixin
from django.contrib import messages
from django.http import Http404
from django import forms
from django.core.urlresolvers import reverse
# Create your views here.
#Examhome -> ExamItem (model)
class ExamItemDetail(DetailView):
model = ExamItem
template_name = 'exam/examresultitem_detail.html'
class ExamResultDetailView(ListView): # should be DetailView (ExamResult) + ListView (ExamItem)
queryset = ExamItem.objects.all()
model = ExamItem
template_name = 'exam/examresult_detail.html'
def get_context_data(self, *args, **kwargs):
context = super(ExamResultDetailView, self).get_context_data(*args, **kwargs)
context["formset"] = ExamItemFormSet(queryset=self.get_queryset())
return context
def get_object(self, *args, **kwargs):
ExamResult_pk = self.kwargs.get("pk")
if ExamResult_pk:
exam_result = get_object_or_404(ExamResult, pk=ExamResult_pk)
return exam_result
return None
def get_queryset(self, *args, **kwargs):
ExamResult_pk = self.kwargs.get("pk")
if ExamResult_pk:
exam_result = get_object_or_404(ExamResult, pk=ExamResult_pk)
queryset = ExamItem.objects.filter(exam_result=exam_result)
return queryset
def post(self, request, *args, **kwargs):
formset = ExamItemFormSet(request.POST, request.FILES)
bValid = True
if formset.is_valid():
print "formset is valid"
instances = formset.save(commit=False)
for i,form in enumerate(formset):
if form.is_valid:
instance = form.save(commit=False)
# print form.cleaned_data, i
if form.cleaned_data['score_result'] == None:
bValid = False
else:
form.save()
else:
# print form.errors, i
bValid = False
else:
# print formset.errors
bValid = False
if bValid == True:
obj = self.get_object(*args, **kwargs)
if obj:
obj.save() # update score
return redirect("examhome")
else:
template = 'exam/examresultitem_list.html'
context = {
'formset' : formset,
}
return render(request, template, context)
# return redirect("examhome")
# Paper -> Examlib
class PaperList(ListView):
queryset = Paper.objects.all()
model = Paper
def get_context_data(self, *args, **kwargs):
context = super(PaperList, self).get_context_data(*args, **kwargs)
# context["formset"] = TestItemFormSet(queryset=self.get_queryset())
return context
class PaperDetailView(ListView): # should be DetailView (Paper) + ListView (ExamLibItem)
queryset = ExamLibItem.objects.all()
model = ExamLibItem
def get_context_data(self, *args, **kwargs):
context = super(PaperDetailView, self).get_context_data(*args, **kwargs)
queryset = self.get_queryset()
total_score=0
for object in queryset:
total_score += object.score
context["total_score"] = total_score
context["formset"] = ExamLibItemFormSet(queryset=self.get_queryset())
context["queryset"] = self.get_queryset(args, kwargs)
try:
Paper_pk = self.kwargs.get("pk")
if Paper_pk:
paper = get_object_or_404(Paper, pk=Paper_pk)
context['paperForm'] = PaperForm(instance = paper)
except:
raise Http404
return context
def get_queryset(self, *args, **kwargs):
Paper_pk = self.kwargs.get("pk")
if Paper_pk:
paper = get_object_or_404(Paper, pk=Paper_pk)
queryset = ExamLibItem.objects.filter(paper=paper)
return queryset
def post(self, request, *args, **kwargs):
formset = ExamLibItemFormSet(request.POST, request.FILES)
paperForm = PaperForm(request.POST, request.FILES)
if paperForm.is_valid():
paper = paperForm.save(commit=False)
# paper.save() # it will create a new object, maybe the reason is two Form in this view
paper.id = self.kwargs.get("pk")
paper.save()
paperForm.save_m2m()
# GET the option value, it's the model id
# <option value="1" selected="selected">How to ? </option>
# print paperForm['ExamLibItem']['select']['option']
#print request.POST['ExamLibItem']
if formset.is_valid():
instances = formset.save(commit=False)
for form in formset:
if form.is_valid():
new_item = form.save(commit=False)
if new_item.title != '': #prevent empty form
Paper_pk = self.kwargs.get("pk")
paper = get_object_or_404(Paper, pk=Paper_pk)
new_item.save()
if form in formset.deleted_forms:
new_item.paper_set.remove(paper)
else:
new_item.paper_set.add(paper)
# form.save_m2m()
# https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#the-save-method
messages.success(request, "Exam lib item updated.")
return redirect("paper")
template = 'exam/examlibitem_list.html'
context = {
'formset' : formset,
'paperForm' : paperForm
}
return render(request, template, context)
#raise Http404
class ExamLibItemUpdateView(UpdateView): #(FormMixin, DetailView):
model = ExamLibItem
form_class = ExamLibItemForm
#template_name = "carts/checkout_view.html"
# def get_context_data(self, *args, **kwargs):
# context = super(ExamLibItemUpdateView, self).get_context_data(*args, **kwargs)
# context["form"] = ExamLibItemForm(instance = self.get_object())
# return context
# def get_object(self, *args, **kwargs):
# ExamLibItem_pk = self.kwargs.get("pk")
# if ExamLibItem_pk:
# examlibitem = get_object_or_404(ExamLibItem, pk=ExamLibItem_pk)
# return examlibitem
def get_success_url(self):
return reverse("paper")
# def post(self, request, *args, **kwargs):
# form = self.get_form()
# if form.is_valid():
# return self.form_valid(form)
# else:
# self.object = self.get_object()
# print form.errors
# return self.form_invalid(form)
#default context object
#Testhome -> TestItem (form)
class TestItemList(ListView): # ExamLibItem (Library) > ExamItem (Result)
queryset = ExamLibItem.objects.all()
model = ExamLibItem
template_name = 'exam/testitem_list.html'
def get_context_data(self, *args, **kwargs):
context = super(TestItemList, self).get_context_data(*args, **kwargs)
context["formset"] = TestItemFormSet(queryset=self.get_queryset(),
initial=[{'ref_answer': '',}])
paper_pk = self.kwargs.get("pk")
paper = get_object_or_404(Paper, pk=paper_pk)
bExist = False
try:
ExamResult.objects.get(paper=paper, user=self.request.user)
bExist = True
except:
bExist = False
context["bExist"] = bExist
return context
def get_queryset(self, *args, **kwargs):
paper_pk = self.kwargs.get("pk")
if paper_pk:
paper = get_object_or_404(Paper, pk=paper_pk)
queryset = ExamLibItem.objects.filter(paper=paper)
return queryset
def post(self, request, *args, **kwargs):
formset = TestItemFormSet(request.POST, request.FILES)
if formset.is_valid():
instances = formset.save(commit=False)
for form in formset:
instance = form.save(commit=False)
paper_pk = self.kwargs.get("pk")
try:
paper = get_object_or_404(Paper, pk=paper_pk)
examLibItem = instance #form.instance can be only called by ModelFormset
try:
exam_item, created = ExamItem.objects.get_or_create(ExamLibItem=examLibItem,
paper=paper, user=self.request.user)
except:
exam_item = ExamItem.objects.create(
examlibitem=examLibItem,
paper=paper,
user = self.request.user,
answer = '',
exam_result = None,
score_result = 1)
exam_item.answer = form.cleaned_data.get("answer")
try:
exam_result, created = ExamResult.objects.get_or_create(
paper=paper, user=self.request.user)
except:
exam_result = ExamResult.objects.create(
paper=paper,
user = self.request.user,
score = 1)
exam_item.exam_result = exam_result
exam_item.save()
except:
raise Http404
else:
print formset.errors
return redirect("paper")
#default context object_list
def examhome(request):
exam_results = ExamResult.objects.filter(user=request.user)
if request.user.is_superuser:
exam_results = ExamResult.objects.all()
context = {
"exam_results": exam_results
}
return render(request, "exam/examresultpaper_list.html", context)
def testhome(request): #no Model to track test itself. it dynamically generate other Models
test_papers = Paper.objects.all()
context = {
"test_papers": test_papers
}
return render(request, "exam/testpaper_list.html", context) |
import json
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
SOURCE_TELEGRAM = 'telegram'
SOURCE_SAHAMYAB = 'sahamyab'
SOURCE_CHOICES = (
(SOURCE_TELEGRAM, 'Telegram'),
(SOURCE_SAHAMYAB, 'Sahamyab')
)
SENTIMENT_NEUTRAL = 'neutral'
SENTIMENT_POSITIVE = 'positive'
SENTIMENT_NEGATIVE = 'negative'
SENTIMENT_CHOICES = (
(SENTIMENT_NEUTRAL, 'Neutral'),
(SENTIMENT_NEGATIVE, 'Negative'),
(SENTIMENT_POSITIVE, 'Positive')
)
class Post(models.Model):
messageId = models.IntegerField(null=False)
content = models.TextField(null=False, blank=False)
messageDate = models.DateTimeField()
elasticPushDate = models.DateTimeField()
senderId = models.IntegerField()
senderUsername = models.CharField(max_length=200, null=True, blank=True)
senderName = models.CharField(max_length=200)
isGroup = models.BooleanField()
channelId = models.IntegerField()
channelName = models.CharField(max_length=200)
channelUsername = models.CharField(max_length=200, null=True, blank=True)
parentId = models.IntegerField(null=True, blank=True)
likeCount = models.IntegerField(null=True, blank=True)
source = models.CharField(max_length=100, choices=SOURCE_CHOICES)
stock = models.CharField(max_length=100)
sentiment = models.CharField(max_length=100, choices=SENTIMENT_CHOICES)
image = models.TextField(null=True, blank=True)
version = models.CharField(max_length=20)
class Meta(object):
ordering = ["messageId"]
def __str__(self):
return self.stock
|
import re
import sys
from typing import List
from datetime import datetime
from openpyxl import load_workbook
from openpyxl.workbook import Workbook
from openpyxl.worksheet.worksheet import Worksheet
from rich import print
from rich.prompt import Confirm
"""
This class is for exporting the all the important information
that is required to keep track of the change request for who
the change request is belong to.
written by: jiaul_islam
"""
class Data_Export:
def __init__(self, file_path: str) -> None:
try:
self._is_used(file_path)
self._change_list_excel: Workbook = load_workbook(filename=file_path)
self._sheet: Worksheet = self._change_list_excel.active
except FileNotFoundError as error:
print(error)
sys.exit()
def change_sheet(self, sheet_name: str) -> None:
try:
self._sheet = self._change_list_excel[sheet_name]
except Exception as error:
raise error
def insert_date(self, index: int, date: str) -> None:
""" insert the date of the Change requesting """
date_to_insert = datetime.strptime(str(date), '%Y-%m-%d %H:%M:%S')
my_date = str(date_to_insert.strftime("%d-%b-%y"))
self._sheet['B' + str(index)] = my_date
def insert_project_coordinator(self, index: int, name: str) -> None:
""" insert the project coordinator name in the excel """
self._sheet['C' + str(index)] = name
def insert_project_name(self, index: int, project_name: str) -> None:
""" insert the project name in the excel """
self._sheet['D' + str(index)] = project_name
def insert_change_activity(self, index: int, activity: str) -> None:
""" insert the change activity in the excel """
self._sheet['E' + str(index)] = activity
def insert_impact_site_list(self, index: int, impact_site_list: str) -> None:
""" insert the impact site list in the excel """
_PATTERN = r'([A-Z]{5}(?:(?:[A-Z0-9][0-9])|(?:[0-9][A-Z0-9])))'
sites: List[str] = re.findall(_PATTERN, impact_site_list)
self._sheet['F' + str(index)] = ",".join(sites)
def insert_service_type(self, index: int, service_type: str) -> None:
""" insert the service type of the change request in the excel file"""
self._sheet['G' + str(index)] = service_type
def insert_downtime_duration(self, index: int, duration: str) -> None:
""" insert the downtime duration limit in the excel file """
self._sheet['H' + str(index)] = duration
def insert_commercial_zone(self, index: int, commercial_zone: str) -> None:
""" insert the commercial zone in the excel file """
self._sheet['I' + str(index)] = commercial_zone
def insert_change_number(self, index: int, change_number: str) -> None:
""" insert the change number in the excel file for respective Change request """
self._sheet['J' + str(index)] = change_number
def insert_change_manager(self, index: int, change_manager: str) -> None:
""" insert the change manager in the excel file """
self._sheet['K' + str(index)] = change_manager
def save_workbook(self, file_path: str) -> None:
""" Save the workbook """
self._change_list_excel.save(file_path)
def close_workbook(self) -> None:
""" Close the workbook """
self._change_list_excel.close()
@staticmethod
def _is_used(my_file: str) -> None:
""" Checks if the user is already kept opened the excel file trying to write """
while True:
try:
with open(my_file, "a+"):
break
except IOError:
if Confirm.ask("File Closed ?", default="(y)", show_default=True):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from web_backend.nvlserver.module import nvl_meta
from sqlalchemy import BigInteger, String, Column, Boolean, ForeignKey, DateTime, Table
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.sql.functions import func
user = Table(
'user',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('email', String(255), nullable=False),
Column('password', String(128), nullable=False),
Column('fullname', String(70), nullable=False, default=''),
Column('locked', Boolean, default=False, nullable=False),
Column('language_id', BigInteger, ForeignKey('language.id'), nullable=False),
Column('meta_information', JSONB, default=lambda: {"timezone": "Europe/Zagreb"}, nullable=False),
Column('account_type_id', BigInteger, ForeignKey('account_type.id'), nullable=False),
Column('active', Boolean, default=True, nullable=False),
Column('deleted', Boolean, default=False, nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True),
server_default=func.now(), onupdate=func.now(), nullable=False),
Column('gendar', String(50), nullable=False),
Column('companyName', String(200), nullable=False),
Column('address', String(200), nullable=False),
Column('city', String(50), nullable=False),
Column('postalCode', String(20), nullable=False),
Column('mobileNumber', String(15), nullable=False),
Column('country', String(20), nullable=False),
Column('webPage', String(15), nullable=False),
Column('updateByMails', String(15), nullable=False),
Column('vatId', String(40), nullable=False),
Column('distance_unit', String(100), nullable=True),
)
|
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Merging Dataframes
#
# In[1]:
import pandas as pd
df = pd.DataFrame([{'Name': 'Chris', 'Item Purchased': 'Sponge', 'Cost': 22.50},
{'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50},
{'Name': 'Filip', 'Item Purchased': 'Spoon', 'Cost': 5.00}],
index=['Store 1', 'Store 1', 'Store 2'])
df
# In[ ]:
df['Date'] = ['December 1', 'January 1', 'mid-May']
df
# In[ ]:
df['Delivered'] = True
df
# In[ ]:
df['Feedback'] = ['Positive', None, 'Negative']
df
# In[ ]:
adf = df.reset_index()
adf['Date'] = pd.Series({0: 'December 1', 2: 'mid-May'})
adf
# In[ ]:
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR'},
{'Name': 'Sally', 'Role': 'Course liasion'},
{'Name': 'James', 'Role': 'Grader'}])
staff_df = staff_df.set_index('Name')
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business'},
{'Name': 'Mike', 'School': 'Law'},
{'Name': 'Sally', 'School': 'Engineering'}])
student_df = student_df.set_index('Name')
print(staff_df.head())
print()
print(student_df.head())
# In[ ]:
pd.merge(staff_df, student_df, how='outer', left_index=True, right_index=True)
# In[ ]:
pd.merge(staff_df, student_df, how='inner', left_index=True, right_index=True)
# In[ ]:
pd.merge(staff_df, student_df, how='left', left_index=True, right_index=True)
# In[ ]:
pd.merge(staff_df, student_df, how='right', left_index=True, right_index=True)
# In[ ]:
staff_df = staff_df.reset_index()
student_df = student_df.reset_index()
pd.merge(staff_df, student_df, how='left', left_on='Name', right_on='Name')
# In[ ]:
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR', 'Location': 'State Street'},
{'Name': 'Sally', 'Role': 'Course liasion', 'Location': 'Washington Avenue'},
{'Name': 'James', 'Role': 'Grader', 'Location': 'Washington Avenue'}])
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business', 'Location': '1024 Billiard Avenue'},
{'Name': 'Mike', 'School': 'Law', 'Location': 'Fraternity House #22'},
{'Name': 'Sally', 'School': 'Engineering', 'Location': '512 Wilson Crescent'}])
pd.merge(staff_df, student_df, how='left', left_on='Name', right_on='Name')
# In[ ]:
staff_df = pd.DataFrame([{'First Name': 'Kelly', 'Last Name': 'Desjardins', 'Role': 'Director of HR'},
{'First Name': 'Sally', 'Last Name': 'Brooks', 'Role': 'Course liasion'},
{'First Name': 'James', 'Last Name': 'Wilde', 'Role': 'Grader'}])
student_df = pd.DataFrame([{'First Name': 'James', 'Last Name': 'Hammond', 'School': 'Business'},
{'First Name': 'Mike', 'Last Name': 'Smith', 'School': 'Law'},
{'First Name': 'Sally', 'Last Name': 'Brooks', 'School': 'Engineering'}])
staff_df
student_df
pd.merge(staff_df, student_df, how='inner', left_on=['First Name','Last Name'], right_on=['First Name','Last Name'])
# # Idiomatic Pandas: Making Code Pandorable
# In[ ]:
import pandas as pd
df = pd.read_csv('census.csv')
df
# In[ ]:
#假如数据量比较大或者有冗余,我们可以删掉有缺失值的数据dropna()
(df.where(df['SUMLEV']==50)
.dropna()
.set_index(['STNAME','CTYNAME'])
.rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'}))
# In[ ]:
df = df[df['SUMLEV']==50]
df.set_index(['STNAME','CTYNAME'], inplace=True)
df.rename(columns={'ESTIMATESBASE2010': 'Estimates Base 2010'})
# In[ ]:
import numpy as np
def min_max(row):
data = row[['POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']]
return pd.Series({'min': np.min(data), 'max': np.max(data)})
# In[ ]:
df.apply(min_max, axis=1)
# In[ ]:
import numpy as np
def min_max(row):
data = row[['POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']]
row['max'] = np.max(data)
row['min'] = np.min(data)
return row
df.apply(min_max, axis=1)
# In[ ]:
rows = ['POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df.apply(lambda x: np.max(x[rows]), axis=1)
# # Group by
# In[ ]:
import pandas as pd
import numpy as np
df = pd.read_csv('census.csv')
df = df[df['SUMLEV']==50]
df
# In[ ]:
get_ipython().run_cell_magic('timeit', '-n 10', "for state in df['STNAME'].unique():\n avg = np.average(df.where(df['STNAME']==state).dropna()['CENSUS2010POP'])\n print('Counties in state ' + state + ' have an average population of ' + str(avg))")
# In[ ]:
get_ipython().run_cell_magic('timeit', '-n 10', "for group, frame in df.groupby('STNAME'):\n avg = np.average(frame['CENSUS2010POP'])\n print('Counties in state ' + group + ' have an average population of ' + str(avg))")
# In[ ]:
df.head()
# In[ ]:
df = df.set_index('STNAME')
def fun(item):
if item[0]<'M':
return 0
if item[0]<'Q':
return 1
return 2
for group, frame in df.groupby(fun):
print('There are ' + str(len(frame)) + ' records in group ' + str(group) + ' for processing.')
# In[ ]:
print(df.groupby('Category').apply(lambda df,a,b: sum(df[a] * df[b]), 'Weight (oz.)', 'Quantity'))
# Or alternatively without using a lambda:
# def totalweight(df, w, q):
# return sum(df[w] * df[q])
#
# print(df.groupby('Category').apply(totalweight, 'Weight (oz.)', 'Quantity'))
# In[ ]:
df = pd.read_csv('census.csv')
df = df[df['SUMLEV']==50]
# In[ ]:
df.groupby('STNAME').agg({'CENSUS2010POP': np.average})
# In[ ]:
print(type(df.groupby(level=0)['POPESTIMATE2010','POPESTIMATE2011']))
print(type(df.groupby(level=0)['POPESTIMATE2010']))
# In[ ]:
(df.set_index('STNAME').groupby(level=0)['CENSUS2010POP']
.agg({'avg': np.average, 'sum': np.sum}))
# In[ ]:
(df.set_index('STNAME').groupby(level=0)['POPESTIMATE2010','POPESTIMATE2011']
.agg({'avg': np.average, 'sum': np.sum}))
# In[ ]:
(df.set_index('STNAME').groupby(level=0)['POPESTIMATE2010','POPESTIMATE2011']
.agg({'POPESTIMATE2010': np.average, 'POPESTIMATE2011': np.sum}))
# # Scales
# In[ ]:
df = pd.DataFrame(['A+', 'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C', 'C-', 'D+', 'D'],
index=['excellent', 'excellent', 'excellent', 'good', 'good', 'good', 'ok', 'ok', 'ok', 'poor', 'poor'])
df.rename(columns={0: 'Grades'}, inplace=True)
df
# In[ ]:
df['Grades'].astype('category').head()
# In[ ]:
grades = df['Grades'].astype('category',
categories=['D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+'],
ordered=True)
grades.head()
# In[ ]:
grades > 'C'
# In[ ]:
df = pd.read_csv('census.csv')
df = df[df['SUMLEV']==50]
df = df.set_index('STNAME').groupby(level=0)['CENSUS2010POP'].agg({'avg': np.average})
pd.cut(df['avg'],10)
# # Pivot Tables
# In[ ]:
#http://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64
df = pd.read_csv('cars.csv')
# In[ ]:
df.head()
# In[ ]:
df.pivot_table(values='(kW)', index='YEAR', columns='Make', aggfunc=np.mean)
# In[ ]:
df.pivot_table(values='(kW)', index='YEAR', columns='Make', aggfunc=[np.mean,np.min], margins=True)
# # Date Functionality in Pandas
# In[2]:
import pandas as pd
import numpy as np
# ### Timestamp
# In[3]:
pd.Timestamp('9/1/2016 10:05AM')
# ### Period
# In[4]:
pd.Period('1/2016')
# In[5]:
pd.Period('3/5/2016')
# ### DatetimeIndex
# In[6]:
t1 = pd.Series(list('abc'), [pd.Timestamp('2016-09-01'), pd.Timestamp('2016-09-02'), pd.Timestamp('2016-09-03')])
t1
# In[7]:
type(t1.index)
# ### PeriodIndex
# In[8]:
t2 = pd.Series(list('def'), [pd.Period('2016-09'), pd.Period('2016-10'), pd.Period('2016-11')])
t2
# In[9]:
type(t2.index)
# ### Converting to Datetime
# In[10]:
d1 = ['2 June 2013', 'Aug 29, 2014', '2015-06-26', '7/12/16']
ts3 = pd.DataFrame(np.random.randint(10, 100, (4,2)), index=d1, columns=list('ab'))
ts3
# In[11]:
ts3.index = pd.to_datetime(ts3.index)
ts3
# In[12]:
pd.to_datetime('4.7.12', dayfirst=True)
# ### Timedeltas
# In[13]:
pd.Timestamp('9/3/2016')-pd.Timestamp('9/1/2016')
# In[14]:
pd.Timestamp('9/2/2016 8:10AM') + pd.Timedelta('12D 3H')
# ### Working with Dates in a Dataframe
# In[15]:
dates = pd.date_range('10-01-2016', periods=9, freq='2W-SUN')
dates
# In[16]:
df = pd.DataFrame({'Count 1': 100 + np.random.randint(-5, 10, 9).cumsum(),
'Count 2': 120 + np.random.randint(-5, 10, 9)}, index=dates)
df
# In[17]:
df.index.weekday_name
# In[18]:
df.diff()
# In[19]:
df.resample('M').mean()
# In[20]:
df['2017']
# In[21]:
df['2016-12']
# In[22]:
df['2016-12':]
# In[ ]:
df.asfreq('W', method='ffill')
# In[ ]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
df.plot()
|
import torch
from torch import nn
import torch.nn.functional as F
from utils import (
get_same_padding_conv2d,
Swish,
MemoryEfficientSwish,
round_filters,
round_repeats,
drop_connect,
efficientnet_params,
load_pretrained_weights,
get_model_params
)
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Arguments:
block_args: nametupled
global_params: namedtupled
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._batch_norm_momentum = 1 - global_params.batch_norm_momentum
self._batch_norm_epsilon = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) \
and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
input_filters = self._block_args.input_filters # number of input channels
output_filters = self._block_args.input_filters * self._block_args.expand_ratio
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(input_filters, output_filters, 1, bias=False)
self._bn0 = nn.BatchNorm2d(output_filters,
eps=self._batch_norm_epsilon,
momentum=self._batch_norm_momentum)
# Depthwise separable convolution
kernel_size = self._block_args.kernel_size
stride = self._block_args.stride
self._depthwise_conv = Conv2d(output_filters, output_filters, kernel_size,
stride=stride,
groups=output_filters,
bias=False)
self._bn1 = nn.BatchNorm2d(output_filters,
eps=self._batch_norm_epsilon,
momentum=self._batch_norm_momentum)
# Squeeze and Excitation layer if desired
if self.has_se:
num_squeezed_channels = max(1,
int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(output_filters, num_squeezed_channels, 1)
self._se_expand = Conv2d(num_squeezed_channels, output_filters, 1)
# Output phase
final_output_filters = self._block_args.output_filters
self._project_conv = Conv2d(output_filters, final_output_filters, 1, bias=False)
self._bn2 = nn.BatchNorm2d(final_output_filters,
eps=self._batch_norm_epsilon,
momentum=self._batch_norm_momentum)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
# Expansion and Depthwise Convolution
if self._block_args.expand_ratio != 1:
x = self._swish(self._bn0(self._expand_conv(inputs)))
else:
x = inputs
x = self._swish(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))
x = x * torch.sigmoid(x)
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, drop_connect_rate, self.training)
x = x + inputs
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with `.from_name` or `.from_pretrained` methods
Arguments:
block_args: List of Block Arguments to construct blocks
global_params: namedtuple, A set of GlobalParams shared between blocks
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'block_args should be a list'
assert len(blocks_args) > 0, 'length of block_args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Batch norm parameters
batch_norm_momentum = 1 - self._global_params.batch_norm_momentum
batch_norm_epsilon = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # RGB
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, 3, stride=2, bias=False)
# !: self._conv_stem = Conv2d(in_channels, out_channels, 3, stride=2, padding=1, bias=False)
self._bn0 = nn.BatchNorm2d(out_channels,
eps=batch_norm_epsilon,
momentum=batch_norm_momentum)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(
input_filters=block_args.output_filters,
stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, 1, bias=False)
self._bn1 = nn.BatchNorm2d(out_channels,
eps=batch_norm_epsilon,
momentum=batch_norm_momentum)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_features(self, inputs):
"""Returns output of the final convolutional layer"""
# Stem
x = self._swish(self.bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
"""Calls extract_features to extract features, applies final linear layer and return logits"""
# Convolutional layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.flatten(x, start_dim=1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return cls(blocks_args=blocks_args, global_params=global_params)
@classmethod
def from_pretrained(cls, model_name, num_classes=1000, in_channels=3):
model = cls.from_name(model_name, override_params={'num_classes': num_classes})
load_pretrained_weights(model, model_name, not_load=False, load_fc=(num_classes == 1000))
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size=model._global_params.image_size)
out_channels = round_filters(32, model._global_params)
model._conv_stem = Conv2d(in_channels, out_channels, 3, stride=2, bias=False)
return model
@staticmethod
def _check_model_name_is_valid(model_name, need_pretrained_weights=False):
"""Validate model name. Currently, Only EfficientNet-B0, 1, 2, 3 available"""
num_models = 4 if need_pretrained_weights else 8
valid_models = [f'efficientnet-b{i}' for i in range(num_models)]
if model_name not in valid_models:
raise ValueError(f"Model_name should be one of: {', '.join(valid_models)}")
if __name__ == '__main__':
model = EfficientNet.from_pretrained('efficientnet-b0') |
import tkinter as tk
from tkinter import ttk
def clickMe():
clickMe_Button.configure(text = 'Hello ' + name_Entry.get() +
' ' + number.get())
clickMe_Button.configure(state = 'disabled')
win = tk.Tk()
win.title('The gui excerise')
clickMe_Button = ttk.Button(win, text = 'Click me', command = clickMe)
clickMe_Button.grid(column = 2, row = 1)
name_Label = ttk.Label(win, text = 'Enter your name')
name_Label.grid(column = 0, row = 0)
name_Entry = tk.StringVar()
name_Entry = ttk.Entry(win, width = 12, textvariable = name_Entry)
name_Entry.grid(column = 0, row = 1)
name_Entry.focus() # cursor foucus
label_2 = ttk.Label(win, text = 'Choose a number:')
label_2.grid(column =1, row = 0)
number = tk.StringVar()
number_Combobox = ttk.Combobox(win, width = 12, textvariable = number,
state = 'readonly')
number_Combobox['values'] = (1, 2, 4, 42, 100)
number_Combobox.grid(column = 1, row = 1)
number_Combobox.current(0) # assign a default value to be displayed
win.mainloop()
|
import os
import pytest
from ai.backend.client.config import APIConfig, set_config
@pytest.fixture(autouse=True)
def defconfig():
endpoint = os.environ.get('BACKEND_TEST_ENDPOINT', 'http://127.0.0.1:8081')
access_key = os.environ.get('BACKEND_TEST_ADMIN_ACCESS_KEY',
'AKIAIOSFODNN7EXAMPLE')
secret_key = os.environ.get('BACKEND_TEST_ADMIN_SECRET_KEY',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY')
c = APIConfig(endpoint=endpoint, access_key=access_key, secret_key=secret_key)
set_config(c)
return c
@pytest.fixture
def userconfig():
endpoint = os.environ.get('BACKEND_TEST_ENDPOINT', 'http://127.0.0.1:8081')
access_key = os.environ.get('BACKEND_TEST_USER_ACCESS_KEY',
'AKIANABBDUSEREXAMPLE')
secret_key = os.environ.get('BACKEND_TEST_USER_SECRET_KEY',
'C8qnIo29EZvXkPK_MXcuAakYTy4NYrxwmCEyNPlf')
c = APIConfig(endpoint=endpoint, access_key=access_key, secret_key=secret_key)
set_config(c)
return c
@pytest.fixture
def example_keypair(defconfig):
return (defconfig.access_key, defconfig.secret_key)
@pytest.fixture
def user_keypair(userconfig):
return (userconfig.access_key, userconfig.secret_key)
@pytest.fixture
def dummy_endpoint(defconfig):
return str(defconfig.endpoint) + '/'
|
class calisan():
def __init__(self, isim, soyisim,maas,departman, yas = 10):
print("çalışan sınıfının yapıcı metodu çalıştı")
self.isim = isim
self.soyisim = soyisim
self.yas = yas
self.maas = maas
self.departman = departman
def __str__(self):
return "{} {} {}".format(self.isim,self.soyisim,self.yas)
def bigigoster(self):
print("çalışan sınıfına ait bilgiler gösterilmektedir")
print("-"*50)
print("isim : {}\nsoyisim : {}\ndepartman : {}\nmaaş : {}".format(
self.isim,self.soyisim,self.departman,self.maas))
def zamyap(self,zam_orani):
print("çalışanın maaşına zam yapıldı")
maas = self.maas
self.maas += zam_orani
print("{} {} personelin maaşı : {} tl den {} tl ye yükseldi".format(self.isim,self.soyisim,maas,self.maas))
def depertmandegistir(self, departman):
print("çalışanın departmanı değişti")
departman = self.departman
self.departman = departman
print("{} {} personelin departmanı: {} departmanından {} departmanına geçişi sağlandı".format(
self.isim, self.soyisim,departman,self.departman))
#personelin maaşına zam yapıldığında veya departamnı değiştiğinde kullanıcıya eski değerleri ve yeni değerleri gösterin
#x personelin > departmanından y departmanına geçişi sağlandı
#x personelin maaşı a tl den b tl ye yükseldi
# yas parametresi göndermezsek içeride tnaımlaı sefault değeri geçerli olacaktır
#personel = calisan("simge","karademir")
#personel = calisan("simge ","karademir",9999999,"yazılım",100)
#print(personel)
#personel.zamyap(1000)
#personel.depertmandegistir("ogernci")
#personel.bigigoster()
class yonetici(calisan): # yönetici sınıfına çalışan sınıfını miras veriyoruz
def __init__(self, isim, soyisim, maas, departman,yas, kisi_sayısı):
print("yönetici sınıfı yapıcı metodu çalıştı")
self.isim = isim
self.soyisim = soyisim
self.departman = departman
self.maas = maas
self.kisi_sayısı = int(kisi_sayısı)
self.yas = yas
def print_base(self):
for base in self.__class__.__bases__:
print("miras alınan sınıf : ", base.__name__)
def __str__(self):
return "{} {} {}".format(self.isim,self.soyisim,self.departman)
Yonetici = yonetici("ahmet","mehmet",9000,"sistem",20,35)
print(Yonetici)
Yonetici.print_base()
Yonetici.bigigoster()
Yonetici.zamyap(10)
Yonetici.depertmandegistir("ogrenci") |
from django.contrib import admin
from .models import Blog, Tags
# Register your models here.
@admin.register(Blog)
class BlogAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'blog_type', 'creat_date', 'reads', 'likes')
list_filter = ['creat_date']
search_fields = ['name']
@admin.register(Tags)
class TagsAdmin(admin.ModelAdmin):
pass
|
import pygame
from pygame.locals import *
import numpy as np
import sys
#
# X=600
# Y=600
# cell_size=15
# pygame.init()
# screen=pygame.display.set_mode((X,Y))
# pygame.display.set_caption("LIFE GAME")
# while(1):
# screen.fill((0,0,0))
# A=np.zeros((int(X/cell_size),int(Y/cell_size)))
# pygame.draw.rect(screen,(0,80,0),Rect(10,10,20,20))
# pygame.display.update()
# for event in pygame.event.get():
# if event.type == QUIT: # 閉じるボタンが押されたら終了
# pygame.quit() # Pygameの終了(画面閉じられる)
# sys.exit()
def main():
(w,h) = (400,400) # 画面サイズ
(x,y) = (w/2, h/2)
pygame.init() # pygame初期化
pygame.display.set_mode((w, h), 0, 32) # 画面設定
screen = pygame.display.get_surface()
while (1):
pygame.display.update() # 画面更新
pygame.time.wait(30) # 更新時間間隔
screen.fill((0, 20, 0, 0)) # 画面の背景色
# 円の中心座標が画面の範囲外にある場合
if x < 0:
x = 0
if x > w:
x = w
if y < 0:
y = 0
if y > h:
y = h
# 円を描画
pygame.draw.circle(screen, (0, 200, 0), (int(x), int(y)), 5)
# イベント処理
for event in pygame.event.get():
# 画面の閉じるボタンを押したとき
if event.type == QUIT:
pygame.quit()
sys.exit()
# キーを押したとき
if event.type == KEYDOWN:
# ESCキーなら終了
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
# 矢印キーなら円の中心座標を矢印の方向に移動
if event.key == K_LEFT:
x -= 1
if event.key == K_RIGHT:
x += 1
if event.key == K_UP:
y -= 1
if event.key == K_DOWN:
y += 1
if event.key == K_SPACE:
x -= 1
if __name__ == "__main__":
main()
|
import json
import os
import sys
import warnings
import deepsecurity as api
from deepsecurity.rest import ApiException
from datetime import datetime
def format_for_csv(line_item):
"""Converts a list into a string of comma-separated values, ending with a newline character.
:param line_item: The list of lists to convert to a string of comma-spearated values
:return: A string that can be saved as a CSV file.
"""
csv_line = ""
for num, item in enumerate(line_item):
csv_line += str(item)
if num != (len(line_item) - 1):
csv_line += ";"
else:
csv_line += "\n"
return csv_line
# Setup
if not sys.warnoptions:
warnings.simplefilter("ignore")
# Get the DSM URL and API key from a JSON file
property_file = os.path.dirname(os.path.abspath(__file__)) + '/../properties.json'
with open(property_file) as raw_properties:
properties = json.load(raw_properties)
secret_key = properties['secretkey']
url = properties['url']
api_version = 'v1'
# Add DSM host information to the API client configuration
configuration = api.Configuration()
configuration.host = url
configuration.api_key['api-secret-key'] = secret_key
# Initialization
# Set Any Required Values
api_instance = api.ComputersApi(api.ApiClient(configuration))
# Add AV and IPS information
expand_options = api.Expand()
expand_options.add(api.Expand.computer_status)
expand_options.add(api.Expand.security_updates)
expand_options.add(api.Expand.intrusion_prevention)
expand_options.add(api.Expand.anti_malware)
expand_options.add(api.Expand.interfaces)
expand_options.add(api.Expand.azure_arm_virtual_machine_summary)
expand = expand_options.list()
overrides = False
# Set search criteria
search_criteria = api.SearchCriteria()
search_criteria.id_value = 0
search_criteria.id_test = "greater-than"
# Create a search filter with maximum returned items
page_size = 50
search_filter = api.SearchFilter()
search_filter.max_items = page_size
search_filter.search_criteria = [search_criteria]
# Add column titles to comma-separated values string
csv = "Host Name;Displayname;DNS Name;Agent version;Platform;IP Address;Agent Status;Agent Status Message;PolicyId;GroupId;Last Communication;Last Policy Sent;Last Policy Success;Update Status;AM Module State;AM Status;AM Status Message;AM Update Status;IPS Status;IPS Status Message\n"
try:
# Perform the search and do work on the results
print("Start reading computers")
while True:
computers = api_instance.search_computers(api_version, search_filter=search_filter, expand=expand, overrides=False)
num_found = len(computers.computers)
if num_found == 0:
print("No computers found.")
break
for computer in computers.computers:
# Module information to add to the CSV string
module_info = []
module_info.append(computer.host_name)
module_info.append(computer.display_name)
if computer.azure_arm_virtual_machine_summary:
module_info.append(computer.azure_arm_virtual_machine_summary.dns_name)
else:
module_info.append("None")
module_info.append(computer.agent_version)
module_info.append(computer.platform)
ips_list = []
if computer.interfaces:
for interface in computer.interfaces.interfaces:
if type(interface.ips) is list:
ips_list.append(", ".join(interface.ips))
if computer.azure_arm_virtual_machine_summary:
ips_list.append(computer.azure_arm_virtual_machine_summary.public_ip_address)
ips_list.append(computer.azure_arm_virtual_machine_summary.private_ip_address)
module_info.append(" ".join(ips_list))
module_info.append(computer.computer_status.agent_status)
agent_status_message = ' '.join(computer.computer_status.agent_status_messages)
module_info.append(agent_status_message)
module_info.append(computer.policy_id)
module_info.append(computer.group_id)
if computer.last_agent_communication:
posix_time = int(computer.last_agent_communication)/1000
last_comm = datetime.fromtimestamp(posix_time).isoformat()
else:
last_comm = None
if computer.last_send_policy_request:
posix_time = int(computer.last_send_policy_request)/1000
last_send = datetime.fromtimestamp(posix_time).isoformat()
else:
last_send = None
if computer.last_send_policy_success:
posix_time = int(computer.last_send_policy_success)/1000
last_success = datetime.fromtimestamp(posix_time).isoformat()
else:
last_success = None
module_info.append(last_comm)
module_info.append(last_send)
module_info.append(last_success)
if computer.security_updates:
update_status = "{} ({})".format(computer.security_updates.update_status.status,computer.security_updates.update_status.status_message)
else:
update_status = "No update status available"
module_info.append(update_status)
module_info.append(computer.anti_malware.state)
# Check that the computer has a an agent status
if computer.anti_malware.module_status:
am_agent_status = computer.anti_malware.module_status.agent_status
am_agent_status_message = computer.anti_malware.module_status.agent_status_message
else:
am_agent_status = None
am_agent_status_message = None
module_info.append(am_agent_status)
module_info.append(am_agent_status_message)
update_status = []
if computer.security_updates:
for am_update in computer.security_updates.anti_malware:
update_status.append(" {} ({} {})".format(am_update.name, am_update.version, am_update.latest))
module_info.append(''.join(update_status))
if computer.intrusion_prevention.module_status:
ips_agent_status = computer.intrusion_prevention.module_status.agent_status
else:
ips_agent_status = None
module_info.append(ips_agent_status)
module_info.append(computer.intrusion_prevention.module_status.agent_status_message)
# Add the module info to the CSV string
csv += format_for_csv(module_info)
# Get the ID of the last computer in the page and return it with the
# number of computers on the page
last_id = computers.computers[-1].id
search_criteria.id_value = last_id
print("Last ID: " + str(last_id), "Computers found: " + str(num_found))
if num_found != page_size:
break
with open("../output/computers.csv", "w") as text_file:
text_file.write(csv)
except ApiException as e:
print("An exception occurred when calling ComputersApi.list_computers: %s\n" % e)
|
from django.shortcuts import render
from django.contrib.auth.forms import AuthenticationForm
from mainapp.models import Category, Board
def index(request):
return render(request, 'mainapp/index.html')
def catalog(request):
categories = Category.objects.all()
context = {
'categories': categories
}
return render(request, 'mainapp/catalog.html', context)
def basket(request):
return render(request, 'mainapp/basket.html')
def secret(request):
return render(request, 'mainapp/secret.html')
def catalog_page(request, pk):
board = Category.objects.filter(category_id=pk)
context = {
'boards': board,
'page_title': 'страница каталога'
}
return render(request, 'mainapp/catalog_page.html', context) |
# coding: utf-8
# Use semantic versioning: MAJOR.MINOR.PATCH
__version__ = '0.5.1'
|
import time
from datetime import datetime
from utils.requests import generate_query_string, make_get_request
FACEIT_API_BASE_URL = 'https://open.faceit.com/data/v4/'
FACEIT_API_ENDPOINTS = {
'players': 'players',
'players_matches': 'players/:player_id:/history'
}
def get_player(parent, api_key, nickname):
params = {
'nickname': nickname
}
request_url = FACEIT_API_BASE_URL + FACEIT_API_ENDPOINTS['players'] + generate_query_string(params)
return make_get_request(parent, request_url, api_key)
def get_player_matches(parent, api_key, player_id, matches_from=None, matches_to=None, offset=0, limit=None):
params = {
'from': matches_from,
'to': matches_to if matches_to else int(time.mktime(datetime.now().timetuple())),
'offset': offset,
'limit': limit
}
endpoint = FACEIT_API_ENDPOINTS['players_matches'].replace(':player_id:', player_id)
request_url = FACEIT_API_BASE_URL + endpoint + generate_query_string(params)
return make_get_request(parent, request_url, api_key)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
In memory key value store based on python dict with TCP interface and
basic language parsing.
"""
__author__ = "Niall O'Connor zechs dot marquie at gmail"
__version__ = "1.0"
import cPickle
from functools import wraps
from hashlib import sha1
import logging
import re
import socket
import warnings
logger = logging.getLogger(__name__)
def func2key(func, *args, **kw):
"""
Convert a function and its arguments to a cache key.
"""
kw = kw.items() # dicts have no order so converting to a list of tuples and sort
kw.sort()
return sha1('%s_%s_%s' % (func.__name__, args, kw)).hexdigest()
class NCache(object):
def __init__(self, ip='127.0.0.1', port=5005, buffer=1024, key_rexp="[\w\d-]{2,}", pickled=True):
"""
Create a new cache key.
:param str ip: Ip address of tcp server.
:param str port: Port number of tcp server.
:param int buffer: TCP buffer size.
:param str key_regx: Key names must match this regex.
:param bool pickled: Pickle data when recording them.
usage:
>>> my_cache = NCache()
>>> my_cache.set('Something', 'Not nothing')
>>> my_cache.get('Something')
Not nothing
"""
super(NCache, self).__init__()
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((ip, port,))
self.buffer_size = buffer
self.pickled = pickled
self.__rexp = re.compile(key_rexp)
self.__key_rexp = key_rexp
def _execute_command(self, command):
"""
Execute the constructed command and handle the response.
"""
self.conn.send(command)
response = self.conn.recv(self.buffer_size)
if response.startswith('ERROR: '):
raise ValueError(response)
return response
def cachable(self, key_name=None, seconds=None, overwrite=True, cache_until=None):
"""
Decorate an expensive calculation to save on computing. All values are
pickled before being stored. Keys may be hashed for some security. The may
be prefixed for easy lookups.
Note - Only cache module methods. To cache class methods, specify a key_name.
Additional fix/support is needed for caching class methods
:param str key_name: The specific name for this key. If omitted this key will be made of the calling function name and a list of its args.
:param int seconds: The expiry time of this key
:param bool overwrite: A flag to set whether a key may be overwritten
:param datetime.datetime cache_until: Datetime that this key will expire on
Usage:
>>> cache = Cache()
>>> @cache.cachable()
def addit(a, b):
return a+b
"""
def collect(f):
# using functools.wrap allows all func parametres to be passed to this decorator
# while preserving the doc strings and other meta data.
@wraps(f)
def do_caching(*args, **kw):
# key is hashed by func2key
key = func2key(f, *args, **kw) if key_name is None else key_name
value = self.get(key, hash=False)
if not value: # If value is none nothing exists and we must call the decorated function
value = f(*args, **kw)
if value is None:
# You shouldn't decorate fuctions that retrun None with a cache decorator.
# The warning is helpfully printed before the raise statement.
warnings.warn("""
Decorated function must exit using the return keyword and must NOT return None.
Instead return something similar but meaningful in the context of your function eg:
[], {}, 0, False, str("None"), str("No records") etc.""")
raise TypeError('NoneType is not cachable. If required None can be cached using Cache.set()')
self.set(key, value, seconds=seconds)
return value
return do_caching
return collect
def __validate_key(self, key):
"""
Ensures a key name passes the regex check with expression in self.__key_rexp
Raises and exception if the key cannot pass the regular expression check.
:param str key: A key name.
"""
is_match = self.__rexp.match(key)
if not is_match or is_match.group() is not key:
raise KeyError('"%s" is an invalid key as it does not match with the following regular expression, %s'%(key, self.__key_rexp))
return key
def set(self, key, value, seconds=None):
"""
Set a key in the cache.
:param str key: The specific name for this key.
:param object value: The object to be cached.
:param int seconds: The expiry time of this key.
"""
key = self.__validate_key(key)
if self.pickled:
value = cPickle.dumps(value)
ttl = ""
if seconds is not None:
ttl = "TTL={0}".format(seconds)
command = "SET {0} {1} {2}".format(key, value, ttl)
return self._execute_command(command)
def get(self, key):
"""
Get a key from the cache
:param str key: The specific name for this key.
"""
key = self.__validate_key(key)
command = "GET {0}".format(key)
resp = self._execute_command(command)
if resp == 'NOT FOUND':
resp = None
elif self.pickled:
resp = cPickle.loads(resp)
return resp
|
import os
from pathlib import Path
from ipaddress import IPv4Network
from urllib.request import urlretrieve
import pytest
from ips import (ServiceIPRange, parse_ipv4_service_ranges,
get_aws_service_range)
URL = "https://bites-data.s3.us-east-2.amazonaws.com/ip-ranges.json"
TMP = os.getenv("TMP", "/tmp")
PATH = Path(TMP, "ip-ranges.json")
IP = IPv4Network('192.0.2.8/29')
@pytest.fixture(scope='module')
def json_file():
"""Import data into tmp folder"""
urlretrieve(URL, PATH)
return PATH
def test_ValueError(json_file):
with pytest.raises(ValueError) as excinfo:
service_ranges = parse_ipv4_service_ranges(json_file)
get_aws_service_range("256.0.0.0", service_ranges)
assert 'Address must be a valid IPv4 address' in str(excinfo.value)
def test_valid(json_file):
service_ranges = parse_ipv4_service_ranges(json_file)
x = get_aws_service_range("35.180.0.0", service_ranges)
assert x == [ServiceIPRange(service='AMAZON', region='eu-west-3', cidr=IPv4Network('35.180.0.0/16')),
ServiceIPRange(service='EC2', region='eu-west-3', cidr=IPv4Network('35.180.0.0/16'))]
assert str(x[0]) == '35.180.0.0/16 is allocated to the AMAZON service in the eu-west-3 region'
|
class man_player:
def __init__(self, name, id, comm_module, position=None, team=None, game=None, cards=None):
self.name = name
self.id = id
self.team = team
self.cards = cards
self.position = position
self.game = game
self.comm = comm_module
def set_game(self, game):
self.game = game
def add_team(self, team):
self.team = team
def set_cards(self, cards):
self.cards = cards
def use_card(self, card):
self.cards.remove(card)
def send(self, command, data=None):
self.comm.send_client(command, self.id, data=data)
def reset(self):
self.cards = []
def set_team(self, team):
self.team = team
class man_team:
def __init__(self, name, position):
self.name = name
self.players = [None]*2
self.score = 0
self.prev_scores = []
self.game = None
self.started = False
self.cards = []
self.position = position
def set_game(self, game):
self.game = game
def increase_score(self, amount):
self.score += amount
def add_player(self, player):
# 0 and 1 should go to first place of team, 2 and 3 to second place, position of team doesn't matter
self.players[player.position//2] = player
def remove_player(self, player):
self.players.remove(player)
def reset(self):
self.cards = []
self.score = 0
self.started = False
def add_points(self, value):
self.score += value
self.prev_scores.append(self.score)
def reset_round(self):
self.cards = []
class man_game:
def __init__(self, table_name):
self.name = table_name
self.players = [None] * 4
self.teams = [None] * 2
self.troef_chooser_pos = 0
self.round_start_pos = 1
self.round_play_offset = 0
self.troef_choosen = False
self.troef = None
self.table_cards = []
self.cards_played = 0
self.viewers = []
self.started = False
self.removed_players = []
self.state_dict={"not_started": True, "rejoin_waiting": False, "playing": False, "wait_decision": False}
def add_team(self, team):
if self.teams[team.position] is None:
self.teams[team.position] = team
#players get placed directly in their playing order, add_team sets team as last by append
for i in range(len(team.players)):
self.players[team.position + 2*i] = team.players[i]
return True
else:
print("SHOULD NOT HAPPEN, team added when exist")
return False
def get_team(self, name):
for team in self.teams:
if team is not None and team.name == name:
return team
return None
def add_player(self, player):
if self.players[player.position] is None:
self.players[player.position] = player
return True
else:
print("SHOULD NOT HAPPEN, player added when exist")
return False
def add_viewer(self, viewer):
self.viewers.append(viewer)
def __len__(self):
amount = 0
for player in self.players:
amount += player is not None
return amount
def set_default_indices(self):
self.round_play_offset = 0
self.round_start_pos = 1
def remove_viewer(self, viewer):
self.viewers.remove(viewer)
def remove_player(self, this_player):
this_team = None
for team in self.teams:
if team is not None:
for player in team.players:
if player is not None and player == this_player:
this_team = team
index_team = this_team.players.index(this_player)
this_team.players[index_team] = None
index_game = self.players.index(this_player)
self.players[index_game] = None
self.removed_players.append(this_player)
def reset(self):
self.troef_chooser_pos = 0
self.round_start_pos = 1
self.round_play_offset = 0
self.troef_choosen = False
self.troef = None
self.table_cards = []
self.cards_played = 0
self.viewers = []
self.started = False
self.removed_players = []
for team in self.teams:
if team is not None:
team.reset()
for player in team.players:
if player is not None:
player.reset()
def get_deleted_player(self, name):
for player in self.removed_players:
if player.name == name:
return player
return None
def get_state(self, name):
return self.state_dict[name]
def set_state(self, name):
for key in self.state_dict:
self.state_dict[key] = False
self.state_dict[name] = True
def reinstate_player(self, player):
this_team = player.team
this_team.add_player(player)
self.add_player(player)
class viewer:
def __init__(self, id, comm_module):
self.id = id
self.comm = comm_module
self.game = None
def send(self, command, data=None):
self.comm.send_client(command, self.id, data=data)
def set_game(self, game):
self.game = game |
class Solution(object):
def isPalindrome(self, s):
"""
https://leetcode.com/problems/valid-palindrome/
should have used alnum() function
"""
x = ''
for i in range(len(s)):
if (s[i] >= 'A' and s[i] <= 'Z') or (s[i] >= 'a' and s[i] <= 'z') or (s[i] >='0' and s[i]<='9'):
x += s[i]
for i in range(len(x)//2):
if x[i].lower() != x[-i-1].lower():
return False
return True
"""
alternate solution: uses O(1) space and O(N/2) time.
def isPalindrome(self, s):
l, r = 0, len(s)-1
while l < r:
while l < r and not s[l].isalnum():
l += 1
while l <r and not s[r].isalnum():
r -= 1
if s[l].lower() != s[r].lower():
return False
l +=1; r -= 1
return True
""" |
#!/usr/bin/env python
from auth import get_cred, test_auth
if __name__ == '__main__':
import argh
argh.dispatch_commands([test_auth, get_cred])
|
class Car:
def __init__(self,model, regno, no_gears):
self.model = model
self.regno = regno
self.no_gears = no_gears
self.is_started = False
self.c_gear = 0
def start(self):
if self.is_started:
print(f"{self.model} with reg_no: {self.regno} is already started")
else:
print(f"{self.model} with reg_no: {self.regno} started....")
self.is_started = True
def stop(self):
if self.is_started:
print(f"{self.model} with reg_no: {self.regno} stopped....")
self.is_started = False
else:
print(f"{self.model} with reg_no: {self.regno} has already stopped....")
def change_gear(self):
if self.is_started:
if self.c_gear < self.no_gears:
self.c_gear += 1
print(f"{self.model} with reg_no: {self.regno} changed gear....")
else:
print(f"{self.model} with reg_no: {self.regno} already on top gear....")
else:
print(f"{self.model} with reg_no: {self.regno} has already stopped... can't change gear....")
def showInfo(self):
print(f"Name: {self.model} - regno: {self.regno} - Started: {self.is_started} - No of gears: {self.no_gears} - gear_count: {self.c_gear}")
if __name__ == "__main__":
bmw = Car("KA01300",4)
audi = Car("KA450067",6)
benz = Car("KA450068",5)
Nano = Car("KA450069",4)
audi.start()
audi.stop()
audi.change_gear()
lst = [bmw, audi, benz, Nano]
for car in lst:
car.showInfo()
c= len(list(filter(lambda x: x.is_started and x.c_gear == 0, lst)))
print(c)
|
#!/usr/bin/python
# Copyright 2014 Symantec.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class HookCreate (object):
def pre(self, context, *args, **kwargs):
LOG.debug("HookCreate PRE start")
import inspect
orig_kwargs = inspect.stack()[1][0].f_locals['kwargs']
metadata = kwargs.get('metadata')
metadata.update({'user_name': args[0].user_name})
LOG.debug("Contrail api start")
import contrailrest
vdns=contrailrest.ContrailRest(args[0].project_id, args[0].auth_token)
domain_name = vdns.generate_tenant_dns_zone()
if domain_name:
metadata.update({'dns_domain': domain_name})
else:
metadata.update({'dns_domain': 'none'})
LOG.debug("Contrail api end")
orig_kwargs["metadata"] = metadata
LOG.debug("HookCreate PRE end")
def post(self, context, *args, **kwargs):
LOG.debug("HookCreate POST pass")
class HookDelete (object):
def pre(self, *args, **kwargs):
LOG.debug("HookDelete PRE pass")
def post(self, *args, **kwargs):
LOG.debug("HookDelete POST pass")
|
#multi dimensional list 2
import math
import random
listTable = [[0] * 10 for i in range(10)]
for i in range(10):
for j in range(10):
listTable[i][j] = "{} : {}" .format(i,j)
for i in range(10):
for j in range(10):
print(listTable[i][j], end = " || ")
print()
|
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.db import models
from django.conf import settings
from django_extensions.db.fields import AutoSlugField
from model_utils import Choices
from abs_models import Abs_titulado_slugfy
from Corretor.utils import get_corretor_choices, get_corretor_por_id
from Corretor.base import CorretorException
from Corretor.models import RetornoCorrecao
from Corretor.tasks import run_corretor_validar_gabarito
from tipo_questao import TipoQuestao
from lockable import Lockable
class Questao(Abs_titulado_slugfy,Lockable):
"""
Representa uma Questao, isso é um problema que esta ligado a uma avaliacao que esta por sua vez ligada
a um aluno.
"""
CORRETORES = Choices(*get_corretor_choices())
# CORRETORES = Choices((0,'base','Base'))
enunciado = models.TextField(u"Enunciado")
respostaDiscursiva = models.TextField(u"Resposta Discursiva",blank=True, null=True)
#:Representa o percentual que a programacao tem nessa questao.
percentNotaProgramacao = models.DecimalField(u"Percentual da Nota de Programação",max_digits=10, decimal_places=2,default=Decimal("100"))
#:Representa o percentual que a multipla escolha tem nessa questao.
percentNotaMultipla = models.DecimalField(u"Percentual da Nota das Multiplas Escolhas",max_digits=10, decimal_places=2,default=Decimal("0"))
#:Representa o percentual que a discursiva tem nessa questao.
percentNotaDiscursiva = models.DecimalField(u"Percentual da Nota da Discursiva",max_digits=10, decimal_places=2,default=Decimal("0"))
#:indica se uma questão está pronta ou não para ser usada num template avaliacao.
verificada = models.BooleanField(u"Verificada",default=False)
#:o autor(usuario) dessa questao
autor = models.ForeignKey('auth.User',blank=True,null=True, related_name='questoes_autor')
id_corretor = models.SmallIntegerField(u"Corretor",choices=CORRETORES)#, default=CORRETORES.c)
#tipo que da questao, usado para filtragem
tipo = models.ManyToManyField(TipoQuestao, related_name="questoes")
retorno_correcao = models.ForeignKey('Corretor.RetornoCorrecao',blank=True,null=True, on_delete=models.SET_NULL)
@property
def corretor(self):
"recupera um corretor dado o id_corretor"
return get_corretor_por_id(self.id_corretor)
class Meta:
verbose_name = u'Questão'
app_label = 'Questao'
def __unicode__(self):
return self.slug
def get_rand_entrada(self):
"retorna uma entrada randomica"
import random
count = self.entradasGabarito.all().count()
rand_entrada_num = 0
if count >= 1:
rand_entrada_num = random.randint(0,count -1)
return self.entradasGabarito.all()[rand_entrada_num]
else:
return None
# print rand_entrada_num
def verificar_questao(self):
"""verifica se uma questão esta pronta para ser usada em uma avaliacao
Ou seja, pode ser compilada e executada.
"""
#se nao for uma questao com programacao nao faz essa verificacao
if not self.percentNotaProgramacao > 0:
self.verificada=True
return
corretor = self.corretor()
retorno = self.get_retorno_or_create
self.save(verificar=False)
corretor_task = run_corretor_validar_gabarito.delay(corretor=corretor,questao=self)
retorno = retorno.__class__.objects.get(pk=retorno.pk)
retorno.task_id = corretor_task.task_id
retorno.save()
@property
def is_programacao(self):
"retorna true se essa for uma questao de programação"
return self.percentNotaProgramacao > Decimal("0")
@property
def get_retorno_or_create(self):
retorno = self.retorno_correcao
if not self.retorno_correcao:
retorno = RetornoCorrecao()
retorno.save()
self.retorno_correcao = retorno
return retorno
def save(self, *args, **kwargs):
#Antes de salvar deve verificar se a questão é propria para ser usada em uma avaliacao
#ou seja, da para compilar e executar sem erro.
verificar = kwargs.get('verificar',True)
if self.slug != "" and self.slug != None and verificar == True:
self.verificar_questao()
try:
kwargs.pop('verificar')
except KeyError:
pass
super(Questao, self).save(*args, **kwargs)
|
"""Defining the benchmarks for OoD generalization in time-series"""
import os
import copy
import h5py
from PIL import Image
import warnings
import scipy.io
import numpy as np
from scipy import fft
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
DATASETS = [
# 1D datasets
'Basic_Fourier',
'Spurious_Fourier',
# Small images
"TMNIST",
# Small correlation shift dataset
"TCMNIST_seq",
"TCMNIST_step",
## EEG Dataset
"CAP",
"SEDFx",
"MI",
## Financial Dataset
"StockVolatility",
## Sign Recognition
"LSA64",
## Activity Recognition
"HAR"
]
def get_dataset_class(dataset_name):
""" Return the dataset class with the given name.
Taken from : https://github.com/facebookresearch/DomainBed/
Args:
dataset_name (str): Name of the dataset to get the function of. (Must be a part of the DATASETS list)
Returns:
function: The __init__ function of the desired dataset that takes as input ( flags: parser arguments of the train.py script, training_hparams: set of training hparams from hparams.py )
Raises:
NotImplementedError: Dataset name not found in the datasets.py globals
"""
if dataset_name not in globals() or dataset_name not in DATASETS:
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
def num_environments(dataset_name):
""" Returns the number of environments of a dataset
Args:
dataset_name (str): Name of the dataset to get the number of environments of. (Must be a part of the DATASETS list)
Returns:
int: Number of environments of the dataset
"""
return len(get_dataset_class(dataset_name).ENVS)
def get_environments(dataset_name):
""" Returns the environments of a dataset
Args:
dataset_name (str): Name of the dataset to get the number of environments of. (Must be a part of the DATASETS list)
Returns:
list: list of environments of the dataset
"""
return get_dataset_class(dataset_name).ENVS
def get_setup(dataset_name):
""" Returns the setup of a dataset
Args:
dataset_name (str): Name of the dataset to get the number of environments of. (Must be a part of the DATASETS list)
Returns:
dict: The setup of the dataset ('seq' or 'step')
"""
return get_dataset_class(dataset_name).SETUP
def XOR(a, b):
""" Returns a XOR b (the 'Exclusive or' gate)
Args:
a (bool): First input
b (bool): Second input
Returns:
bool: The output of the XOR gate
"""
return ( a - b ).abs()
def bernoulli(p, size):
""" Returns a tensor of 1. (True) or 0. (False) resulting from the outcome of a bernoulli random variable of parameter p.
Args:
p (float): Parameter p of the Bernoulli distribution
size (int...): A sequence of integers defining hte shape of the output tensor
Returns:
Tensor: Tensor of Bernoulli random variables of parameter p
"""
return ( torch.rand(size) < p ).float()
def make_split(dataset, holdout_fraction, seed=0, sort=False):
""" Split a Torch TensorDataset into (1-holdout_fraction) / holdout_fraction.
Args:
dataset (TensorDataset): Tensor dataset that has 2 tensors -> data, targets
holdout_fraction (float): Fraction of the dataset that is gonna be in the validation set
seed (int, optional): seed used for the shuffling of the data before splitting. Defaults to 0.
sort (bool, optional): If ''True'' the dataset is gonna be sorted after splitting. Defaults to False.
Returns:
TensorDataset: 1-holdout_fraction part of the split
TensorDataset: holdout_fractoin part of the split
"""
in_keys, out_keys = get_split(dataset, holdout_fraction, seed=seed, sort=sort)
in_split = dataset[in_keys]
out_split = dataset[out_keys]
return torch.utils.data.TensorDataset(*in_split), torch.utils.data.TensorDataset(*out_split)
def get_split(dataset, holdout_fraction, seed=0, sort=False):
""" Generates the keys that are used to split a Torch TensorDataset into (1-holdout_fraction) / holdout_fraction.
Args:
dataset (TensorDataset): TensorDataset to be split
holdout_fraction (float): Fraction of the dataset that is gonna be in the out (validation) set
seed (int, optional): seed used for the shuffling of the data before splitting. Defaults to 0.
sort (bool, optional): If ''True'' the dataset is gonna be sorted after splitting. Defaults to False.
Returns:
list: in (1-holdout_fraction) keys of the split
list: out (holdout_fraction) keys of the split
"""
split = int(len(dataset)*holdout_fraction)
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
in_keys = keys[split:]
out_keys = keys[:split]
if sort:
in_keys.sort()
out_keys.sort()
return in_keys, out_keys
class InfiniteSampler(torch.utils.data.Sampler):
""" Infinite Sampler for PyTorch.
Inspired from : https://github.com/facebookresearch/DomainBed
Args:
sampler (torch.utils.data.Sampler): Sampler to be used for the infinite sampling.
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
for batch in self.sampler:
yield batch
def __len__(self):
return len(self.sampler)
class InfiniteLoader(torch.utils.data.IterableDataset):
""" InfiniteLoader is a torch.utils.data.IterableDataset that can be used to infinitely iterate over a finite dataset.
Inspired from : https://github.com/facebookresearch/DomainBed
Args:
dataset (Dataset): Dataset to be iterated over
batch_size (int): Batch size of the dataset
num_workers (int, optional): Number of workers to use for the data loading. Defaults to 0.
"""
def __init__(self, dataset, batch_size, num_workers=0):
super(InfiniteLoader, self).__init__()
self.dataset = dataset
sampler = torch.utils.data.RandomSampler(dataset, replacement=True)
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, drop_last=True)
self.infinite_iterator = iter(
torch.utils.data.DataLoader(dataset, batch_sampler=InfiniteSampler(batch_sampler), num_workers=num_workers)
)
def __iter__(self):
while True:
yield next(self.infinite_iterator)
def __len__(self):
return len(self.infinite_iterator)
class Multi_Domain_Dataset:
""" Abstract class of a multi domain dataset for OOD generalization.
Every multi domain dataset must redefine the important attributes: SETUP, PRED_TIME, ENVS, INPUT_SHAPE, OUTPUT_SIZE
The data dimension needs to be (batch_size, SEQ_LEN, *INPUT_SHAPE)
TODO:
* Make a package test that checks if every class has 'time_pred' and 'setup'
"""
#:int: The number of training steps taken for this dataset
N_STEPS = 5001
#:int: The frequency of results update
CHECKPOINT_FREQ = 100
#:int: The number of workers used for fast dataloaders used for validation
N_WORKERS = 4
#:string: The setup of the dataset ('seq' or 'step')
SETUP = None
#:int: The sequence length of the dataset
SEQ_LEN = None
#:list: The time steps where predictions are made
PRED_TIME = [None]
#:list: The environments of the dataset
ENVS = [None]
#:int: The shape of the input (excluding batch size and time dimension)
INPUT_SHAPE = None
#:int: The size of the output
OUTPUT_SIZE = None
def __init__(self):
pass
def get_class_weight(self):
""" Compute class weight for class balanced training
Returns:
list: list of weights of length OUTPUT_SIZE
"""
_, train_loaders = self.get_train_loaders()
n_labels = torch.zeros(self.OUTPUT_SIZE)
for env_loader in train_loaders:
labels = env_loader.dataset.tensors[1][:]
for i in range(self.OUTPUT_SIZE):
n_labels[i] += torch.eq(torch.as_tensor(labels), i).sum()
weights = n_labels.max() / n_labels
return weights
def get_train_loaders(self):
""" Fetch all training dataloaders and their ID
Returns:
list: list of string names of the data splits used for training
list: list of dataloaders of the data splits used for training
"""
return self.train_names, self.train_loaders
def get_val_loaders(self):
""" Fetch all validation/test dataloaders and their ID
Returns:
list: list of string names of the data splits used for validation and test
list: list of dataloaders of the data splits used for validation and test
"""
return self.val_names, self.val_loaders
def split_data(self, out, labels):
""" Group data and prediction by environment
Args:
out (Tensor): output from a model of shape ((n_env-1)*batch_size, len(PRED_TIME), output_size)
labels (Tensor): labels of shape ((n_env-1)*batch_size, len(PRED_TIME), output_size)
Returns:
Tensor: The reshaped output (n_train_env, batch_size, len(PRED_TIME), output_size)
Tensor: The labels (n_train_env, batch_size, len(PRED_TIME))
"""
n_train_env = len(self.ENVS)-1 if self.test_env is not None else len(self.ENVS)
out_split = torch.zeros((n_train_env, self.batch_size, *out.shape[1:])).to(out.device)
labels_split = torch.zeros((n_train_env, self.batch_size, labels.shape[-1])).long().to(labels.device)
all_logits_idx = 0
for i in range(n_train_env):
out_split[i,...] = out[all_logits_idx:all_logits_idx + self.batch_size,...]
labels_split[i,...] = labels[all_logits_idx:all_logits_idx + self.batch_size,...]
all_logits_idx += self.batch_size
return out_split, labels_split
class Basic_Fourier(Multi_Domain_Dataset):
""" Fourier_basic dataset
A dataset of 1D sinusoid signal to classify according to their Fourier spectrum.
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
No download is required as it is purely synthetic
"""
SETUP = 'seq'
SEQ_LEN = 50
PRED_TIME = [49]
ENVS = ['no_spur']
INPUT_SHAPE = [1]
OUTPUT_SIZE = 2
def __init__(self, flags, training_hparams):
super().__init__()
# Make important checks
assert flags.test_env == None, "You are using a dataset with only a single environment, there cannot be a test environment"
# Save stuff
self.test_env = flags.test_env
self.batch_size = training_hparams['batch_size']
self.class_balance = training_hparams['class_balance']
## Define label 0 and 1 Fourier spectrum
self.fourier_0 = np.zeros(1000)
self.fourier_0[900] = 1
self.fourier_1 = np.zeros(1000)
self.fourier_1[850] = 1
## Make the full time series with inverse fft
signal_0 = fft.irfft(self.fourier_0, n=10000)
signal_1 = fft.irfft(self.fourier_1, n=10000)
signal_0 /= np.max(np.abs(signal_0))
signal_1 /= np.max(np.abs(signal_1))
## Sample signals frames with a bunch of offsets
all_signal_0 = torch.zeros(0,50,1)
all_signal_1 = torch.zeros(0,50,1)
for i in range(0, 50, 2):
offset_signal_0 = copy.deepcopy(signal_0)[i:i-50]
offset_signal_1 = copy.deepcopy(signal_1)[i:i-50]
split_signal_0 = offset_signal_0.reshape(-1,50,1).clone().detach().float()
split_signal_1 = offset_signal_1.reshape(-1,50,1).clone().detach().float()
all_signal_0 = torch.cat((all_signal_0, split_signal_0), dim=0)
all_signal_1 = torch.cat((all_signal_1, split_signal_1), dim=0)
signal = torch.cat((all_signal_0, all_signal_1), dim=0)
## Create the labels
labels_0 = torch.zeros((all_signal_0.shape[0],1)).long()
labels_1 = torch.ones((all_signal_1.shape[0],1)).long()
labels = torch.cat((labels_0, labels_1), dim=0)
## Create tensor dataset and dataloader
self.train_names, self.train_loaders = [], []
self.val_names, self.val_loaders = [], []
for i, e in enumerate(self.ENVS):
dataset = torch.utils.data.TensorDataset(signal, labels)
in_dataset, out_dataset = make_split(dataset, flags.holdout_fraction, seed=i)
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(e+'_in')
self.train_loaders.append(in_loader)
fast_in_loader = torch.utils.data.DataLoader(copy.deepcopy(in_dataset), batch_size=1028, shuffle=False)
self.val_names.append(e+'_in')
self.val_loaders.append(fast_in_loader)
fast_out_loader = torch.utils.data.DataLoader(out_dataset, batch_size=1028, shuffle=False)
self.val_names.append(e+'_out')
self.val_loaders.append(fast_out_loader)
class Spurious_Fourier(Multi_Domain_Dataset):
""" Spurious_Fourier dataset
A dataset of 1D sinusoid signal to classify according to their Fourier spectrum.
Peaks in the fourier spectrum are added to the signal that are spuriously correlated to the label.
Different environment have different correlation rates between the labels and the spurious peaks in the spectrum.
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
No download is required as it is purely synthetic
"""
SETUP = 'seq'
INPUT_SHAPE = [1]
OUTPUT_SIZE = 2
SEQ_LEN = 50
PRED_TIME = [49]
#:float: Level of noise added to the labels
LABEL_NOISE = 0.25
#:list: The correlation rate between the label and the spurious peaks
ENVS = [0.1, 0.8, 0.9]
def __init__(self, flags, training_hparams):
super().__init__()
if flags.test_env is not None:
assert flags.test_env < len(self.ENVS), "Test environment chosen is not valid"
else:
warnings.warn("You don't have any test environment")
## Save stuff
self.test_env = flags.test_env
self.class_balance = training_hparams['class_balance']
self.batch_size = training_hparams['batch_size']
## Define label 0 and 1 Fourier spectrum
self.fourier_0 = np.zeros(1000)
self.fourier_0[900] = 1
self.fourier_1 = np.zeros(1000)
self.fourier_1[850] = 1
## Define the spurious Fourier spectrum (one direct and the inverse)
self.direct_fourier_0 = copy.deepcopy(self.fourier_0)
self.direct_fourier_1 = copy.deepcopy(self.fourier_1)
self.direct_fourier_0[250] = 0.5
self.direct_fourier_1[400] = 0.5
self.inverse_fourier_0 = copy.deepcopy(self.fourier_0)
self.inverse_fourier_1 = copy.deepcopy(self.fourier_1)
self.inverse_fourier_0[400] = 0.5
self.inverse_fourier_1[250] = 0.5
## Create the sequences for direct and inverse
direct_signal_0 = fft.irfft(self.direct_fourier_0, n=10000)
direct_signal_0 = torch.tensor( direct_signal_0.reshape(-1,50,1) ).float()
direct_signal_0 /= direct_signal_0.max()
direct_signal_1 = fft.irfft(self.direct_fourier_1, n=10000)
direct_signal_1 = torch.tensor( direct_signal_1.reshape(-1,50,1) ).float()
direct_signal_1 /= direct_signal_1.max()
direct_signal_0, direct_signal_1 = self.super_sample(direct_signal_0, direct_signal_1)
perm_0 = torch.randperm(direct_signal_0.shape[0])
direct_signal_0 = direct_signal_0[perm_0,:]
perm_1 = torch.randperm(direct_signal_1.shape[0])
direct_signal_1 = direct_signal_1[perm_1,:]
direct_signal = [direct_signal_0, direct_signal_1]
inverse_signal_0 = fft.irfft(self.inverse_fourier_0, n=10000)
inverse_signal_0 = torch.tensor( inverse_signal_0.reshape(-1,50,1) ).float()
inverse_signal_0 /= inverse_signal_0.max()
inverse_signal_1 = fft.irfft(self.inverse_fourier_1, n=10000)
inverse_signal_1 = torch.tensor( inverse_signal_1.reshape(-1,50,1) ).float()
inverse_signal_1 /= inverse_signal_1.max()
inverse_signal_0, inverse_signal_1 = self.super_sample(inverse_signal_0, inverse_signal_1)
perm_0 = torch.randperm(inverse_signal_0.shape[0])
inverse_signal_0 = inverse_signal_0[perm_0,:]
perm_1 = torch.randperm(inverse_signal_1.shape[0])
inverse_signal_1 = inverse_signal_1[perm_1,:]
inverse_signal = [inverse_signal_0, inverse_signal_1]
## Create the environments with different correlations
env_size = 4000
self.train_names, self.train_loaders = [], []
self.val_names, self.val_loaders = [], []
for i, e in enumerate(self.ENVS):
## Create set of labels
env_labels_0 = torch.zeros((env_size // 2, 1)).long()
env_labels_1 = torch.ones((env_size // 2, 1)).long()
env_labels = torch.cat((env_labels_0, env_labels_1))
## Fill signal
env_signal = torch.zeros((env_size, 50, 1))
for j, label in enumerate(env_labels):
# Label noise
if bool(bernoulli(self.LABEL_NOISE, 1)):
# Correlation to label
if bool(bernoulli(e, 1)):
env_signal[j,...] = inverse_signal[label][0,...]
inverse_signal[label] = inverse_signal[label][1:,...]
else:
env_signal[j,...] = direct_signal[label][0,...]
direct_signal[label] = direct_signal[label][1:,...]
# Flip the label
env_labels[j, -1] = XOR(label, 1)
else:
if bool(bernoulli(e, 1)):
env_signal[j,...] = direct_signal[label][0,...]
direct_signal[label] = direct_signal[label][1:,...]
else:
env_signal[j,...] = inverse_signal[label][0,...]
inverse_signal[label] = inverse_signal[label][1:,...]
# Make Tensor dataset
dataset = torch.utils.data.TensorDataset(env_signal, env_labels)
in_dataset, out_dataset = make_split(dataset, flags.holdout_fraction, seed=i)
if i != self.test_env:
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(str(e) + '_in')
self.train_loaders.append(in_loader)
fast_in_loader = torch.utils.data.DataLoader(copy.deepcopy(in_dataset), batch_size=4000, shuffle=False)
self.val_names.append(str(e) + '_in')
self.val_loaders.append(fast_in_loader)
fast_out_loader = torch.utils.data.DataLoader(out_dataset, batch_size=4000, shuffle=False)
self.val_names.append(str(e) + '_out')
self.val_loaders.append(fast_out_loader)
def super_sample(self, signal_0, signal_1):
""" Sample signals frames with a bunch of offsets """
all_signal_0 = torch.zeros(0,50,1)
all_signal_1 = torch.zeros(0,50,1)
for i in range(0, 50, 2):
new_signal_0 = copy.deepcopy(signal_0)[i:i-50]
new_signal_1 = copy.deepcopy(signal_1)[i:i-50]
split_signal_0 = new_signal_0.reshape(-1,50,1).clone().detach().float()
split_signal_1 = new_signal_1.reshape(-1,50,1).clone().detach().float()
all_signal_0 = torch.cat((all_signal_0, split_signal_0), dim=0)
all_signal_1 = torch.cat((all_signal_1, split_signal_1), dim=0)
return all_signal_0, all_signal_1
class TMNIST(Multi_Domain_Dataset):
""" Temporal MNIST dataset
Each sample is a sequence of 4 MNIST digits.
The task is to predict at each step if the sum of the current digit and the previous one is odd or even.
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
The MNIST dataset needs to be downloaded, this is automaticaly done if the dataset isn't in the given data_path
"""
N_STEPS = 5001
SETUP = 'seq'
SEQ_LEN = 4
PRED_TIME = [1, 2, 3]
INPUT_SHAPE = [28,28]
OUTPUT_SIZE = 2
ENVS = ['grey']
def __init__(self, flags, training_hparams):
super().__init__()
assert flags.test_env == None, "You are using a dataset with only a single environment, there cannot be a test environment"
# Save stuff
self.test_env = flags.test_env
self.batch_size = training_hparams['batch_size']
## Import original MNIST data
MNIST_tfrm = transforms.Compose([ transforms.ToTensor() ])
# Get MNIST data
train_ds = datasets.MNIST(flags.data_path, train=True, download=True, transform=MNIST_tfrm)
test_ds = datasets.MNIST(flags.data_path, train=False, download=True, transform=MNIST_tfrm)
# Concatenate all data and labels
MNIST_images = torch.cat((train_ds.data.float(), test_ds.data.float()))
MNIST_labels = torch.cat((train_ds.targets, test_ds.targets))
# Create sequences of 3 digits
TMNIST_images = MNIST_images.reshape(-1,self.SEQ_LEN,1,28,28)
# With their corresponding label
TMNIST_labels = MNIST_labels.reshape(-1,self.SEQ_LEN)
# Assign label to the objective : Is the last number in the sequence larger than the current
# self.train_ds.targets = ( self.train_ds.targets[:,:-1] > self.train_ds.targets[:,1:] ) # Is the previous one bigger than the current one?
TMNIST_labels = ( TMNIST_labels[:,:-1] + TMNIST_labels[:,1:] ) % 2 # Is the sum of this one and the last one an even number?
TMNIST_labels = TMNIST_labels.long()
## Create tensor dataset and dataloader
self.train_names, self.train_loaders = [], []
self.val_names, self.val_loaders = [], []
for i, e in enumerate(self.ENVS):
# Make whole dataset and get splits
dataset = torch.utils.data.TensorDataset(TMNIST_images, TMNIST_labels)
in_dataset, out_dataset = make_split(dataset, flags.holdout_fraction, seed=i)
# Make the training loaders (No testing environment)
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(str(e) + '_in')
self.train_loaders.append(in_loader)
# Make validation loaders
fast_in_loader = torch.utils.data.DataLoader(in_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
self.val_names.append(str(e) + '_in')
self.val_loaders.append(fast_in_loader)
fast_out_loader = torch.utils.data.DataLoader(out_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
self.val_names.append(str(e) + '_out')
self.val_loaders.append(fast_out_loader)
def plot_samples(TMNIST_images, TMNIST_labels):
fig, axs = plt.subplots(3,4)
axs[0,0].imshow(TMNIST_images[0,0,:,:], cmap='gray')
axs[0,0].set_ylabel('Sequence 1')
axs[0,1].imshow(TMNIST_images[0,1,:,:], cmap='gray')
axs[0,1].set_title('Label = '+str(TMNIST_labels[0,0].cpu().item()))
axs[0,2].imshow(TMNIST_images[0,2,:,:], cmap='gray')
axs[0,2].set_title('Label = '+str(TMNIST_labels[0,1].cpu().item()))
axs[0,3].imshow(TMNIST_images[0,3,:,:], cmap='gray')
axs[0,3].set_title('Label = '+str(TMNIST_labels[0,2].cpu().item()))
axs[1,0].imshow(TMNIST_images[1,0,:,:], cmap='gray')
axs[1,0].set_ylabel('Sequence 2')
axs[1,1].imshow(TMNIST_images[1,1,:,:], cmap='gray')
axs[1,1].set_title('Label = '+str(TMNIST_labels[1,0].cpu().item()))
axs[1,2].imshow(TMNIST_images[1,2,:,:], cmap='gray')
axs[1,2].set_title('Label = '+str(TMNIST_labels[1,1].cpu().item()))
axs[1,3].imshow(TMNIST_images[1,3,:,:], cmap='gray')
axs[1,3].set_title('Label = '+str(TMNIST_labels[1,2].cpu().item()))
axs[2,0].imshow(TMNIST_images[2,0,:,:], cmap='gray')
axs[2,0].set_ylabel('Sequence 3')
axs[2,0].set_xlabel('Time Step 1')
axs[2,1].imshow(TMNIST_images[2,1,:,:], cmap='gray')
axs[2,1].set_xlabel('Time Step 2')
axs[2,1].set_title('Label = '+str(TMNIST_labels[2,0].cpu().item()))
axs[2,2].imshow(TMNIST_images[2,2,:,:], cmap='gray')
axs[2,2].set_xlabel('Time Step 3')
axs[2,2].set_title('Label = '+str(TMNIST_labels[2,1].cpu().item()))
axs[2,3].imshow(TMNIST_images[2,3,:,:], cmap='gray')
axs[2,3].set_xlabel('Time Step 4')
axs[2,3].set_title('Label = '+str(TMNIST_labels[2,2].cpu().item()))
for row in axs:
for ax in row:
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.savefig('./figure/TCMNIST_'+self.SETUP+'.pdf')
class TCMNIST(Multi_Domain_Dataset):
""" Abstract class for Temporal Colored MNIST
Each sample is a sequence of 4 MNIST digits.
The task is to predict at each step if the sum of the current digit and the previous one is odd or even.
Color is added to the digits that is correlated with the label of the current step.
The formulation of which is defined in the child of this class, either sequences-wise of step-wise
Args:
flags (argparse.Namespace): argparse of training arguments
Note:
The MNIST dataset needs to be downloaded, this is automaticaly done if the dataset isn't in the given data_path
"""
N_STEPS = 5001
SEQ_LEN = 4
PRED_TIME = [1, 2, 3]
INPUT_SHAPE = [2,28,28]
OUTPUT_SIZE = 2
def __init__(self, flags):
super().__init__()
## Import original MNIST data
MNIST_tfrm = transforms.Compose([ transforms.ToTensor() ])
# Get MNIST data
train_ds = datasets.MNIST(flags.data_path, train=True, download=True, transform=MNIST_tfrm)
test_ds = datasets.MNIST(flags.data_path, train=False, download=True, transform=MNIST_tfrm)
# Concatenate all data and labels
MNIST_images = torch.cat((train_ds.data.float(), test_ds.data.float()))
MNIST_labels = torch.cat((train_ds.targets, test_ds.targets))
# Create sequences of 3 digits
self.TCMNIST_images = MNIST_images.reshape(-1, self.SEQ_LEN, 28, 28)
# With their corresponding label
TCMNIST_labels = MNIST_labels.reshape(-1, self.SEQ_LEN)
########################
### Choose the task:
# MNIST_labels = ( MNIST_labels[:,:-1] > MNIST_labels[:,1:] ) # Is the previous one bigger than the current one?
TCMNIST_labels = ( TCMNIST_labels[:,:-1] + TCMNIST_labels[:,1:] ) % 2 # Is the sum of this one and the last one an even number?
self.TCMNIST_labels = TCMNIST_labels.long()
def plot_samples(self, images, labels, name):
show_images = torch.cat([images,torch.zeros_like(images[:,:,0:1,:,:])], dim=2)
fig, axs = plt.subplots(3,4)
axs[0,0].imshow(show_images[0,0,:,:,:].permute(1,2,0))
axs[0,0].set_ylabel('Sequence 1')
axs[0,1].imshow(show_images[0,1,:,:,:].permute(1,2,0))
axs[0,1].set_title('Label = '+str(labels[0,0].cpu().item()))
axs[0,2].imshow(show_images[0,2,:,:,:].permute(1,2,0))
axs[0,2].set_title('Label = '+str(labels[0,1].cpu().item()))
axs[0,3].imshow(show_images[0,3,:,:,:].permute(1,2,0))
axs[0,3].set_title('Label = '+str(labels[0,2].cpu().item()))
axs[1,0].imshow(show_images[1,0,:,:,:].permute(1,2,0))
axs[1,0].set_ylabel('Sequence 2')
axs[1,1].imshow(show_images[1,1,:,:,:].permute(1,2,0))
axs[1,1].set_title('Label = '+str(labels[1,0].cpu().item()))
axs[1,2].imshow(show_images[1,2,:,:,:].permute(1,2,0))
axs[1,2].set_title('Label = '+str(labels[1,1].cpu().item()))
axs[1,3].imshow(show_images[1,3,:,:,:].permute(1,2,0))
axs[1,3].set_title('Label = '+str(labels[1,2].cpu().item()))
axs[2,0].imshow(show_images[2,0,:,:,:].permute(1,2,0))
axs[2,0].set_ylabel('Sequence 3')
axs[2,0].set_xlabel('Time Step 1')
axs[2,1].imshow(show_images[2,1,:,:,:].permute(1,2,0))
axs[2,1].set_xlabel('Time Step 2')
axs[2,1].set_title('Label = '+str(labels[2,0].cpu().item()))
axs[2,2].imshow(show_images[2,2,:,:,:].permute(1,2,0))
axs[2,2].set_xlabel('Time Step 3')
axs[2,2].set_title('Label = '+str(labels[2,1].cpu().item()))
axs[2,3].imshow(show_images[2,3,:,:,:].permute(1,2,0))
axs[2,3].set_xlabel('Time Step 4')
axs[2,3].set_title('Label = '+str(labels[2,2].cpu().item()))
for row in axs:
for ax in row:
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.savefig('./assets/TCMNIST_'+ self.SETUP + '_'+name+'.pdf')
# plt.show()
class TCMNIST_seq(TCMNIST):
""" Temporal Colored MNIST Sequence
Each sample is a sequence of 4 MNIST digits.
The task is to predict at each step if the sum of the current digit and the previous one is odd or even.
Color is added to the digits that is correlated with the label of the current step.
The correlation of the color to the label is constant across sequences and whole sequences are sampled from an environmnent definition
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
The MNIST dataset needs to be downloaded, this is automaticaly done if the dataset isn't in the given data_path
"""
SETUP = 'seq'
## Correlation shift parameters
#:list: list of different correlation values between the color and the label
ENVS = [0.1, 0.8, 0.9]
#:float: Level of noise added to the labels
LABEL_NOISE = 0.25
def __init__(self, flags, training_hparams):
super().__init__(flags)
if flags.test_env is not None:
assert flags.test_env < len(self.ENVS), "Test environment chosen is not valid"
else:
warnings.warn("You don't have any test environment")
# Save stuff
self.test_env = flags.test_env
self.class_balance = training_hparams['class_balance']
self.batch_size = training_hparams['batch_size']
# Make the color datasets
self.train_names, self.train_loaders = [], []
self.val_names, self.val_loaders = [], []
for i, e in enumerate(self.ENVS):
# Choose data subset
images = self.TCMNIST_images[i::len(self.ENVS)]
labels = self.TCMNIST_labels[i::len(self.ENVS)]
# Color subset
colored_images, colored_labels = self.color_dataset(images, labels, e, self.LABEL_NOISE)
# self.plot_samples(colored_images, colored_labels, str(e))
# Make Tensor dataset and the split
dataset = torch.utils.data.TensorDataset(colored_images, colored_labels)
in_dataset, out_dataset = make_split(dataset, flags.holdout_fraction, seed=i)
if i != self.test_env:
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(str(e) + '_in')
self.train_loaders.append(in_loader)
fast_in_loader = torch.utils.data.DataLoader(in_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
self.val_names.append(str(e) + '_in')
self.val_loaders.append(fast_in_loader)
fast_out_loader = torch.utils.data.DataLoader(out_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
self.val_names.append(str(e) + '_out')
self.val_loaders.append(fast_out_loader)
def color_dataset(self, images, labels, p, d):
""" Color the dataset
Args:
images (Tensor): 3 channel images to color
labels (Tensor): labels of the images
p (float): correlation between the color and the label
d (float): level of noise added to the labels
Returns:
colored_images (Tensor): colored images
"""
# Add label noise
labels = XOR(labels, bernoulli(d, labels.shape)).long()
# Choose colors
colors = XOR(labels, bernoulli(1-p, labels.shape))
# Stack a second color channel
images = torch.stack([images,images], dim=2)
# Apply colors
for sample in range(colors.shape[0]):
for frame in range(colors.shape[1]):
images[sample,frame+1,colors[sample,frame].long(),:,:] *= 0
return images, labels
class TCMNIST_step(TCMNIST):
""" Temporal Colored MNIST Step
Each sample is a sequence of 4 MNIST digits.
The task is to predict at each step if the sum of the current digit and the previous one is odd or even.
Color is added to the digits that is correlated with the label of the current step.
The correlation of the color to the label is varying across sequences and time steps are sampled from an environmnent definition.
By definition, the test environment is always the last time step in the sequence.
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
The MNIST dataset needs to be downloaded, this is automaticaly done if the dataset isn't in the given data_path
"""
SETUP = 'step'
# Correlation shift parameters
#:list: list of different correlation values between the color and the label
ENVS = [0.9, 0.8, 0.1]
#:float: Level of noise added to the labels
LABEL_NOISE = 0.25
def __init__(self, flags, training_hparams):
super(TCMNIST_step, self).__init__(flags)
if flags.test_env is not None:
assert flags.test_env < len(self.ENVS), "Test environment chosen is not valid"
else:
warnings.warn("You don't have any test environment")
## Save stuff
self.test_env = flags.test_env
self.class_balance = training_hparams['class_balance']
self.batch_size = training_hparams['batch_size']
# Define array of training environment dataloaders
self.train_names, self.train_loaders = [], []
self.val_names, self.val_loaders = [], []
# Permute env/steps
self.ENVS[-1], self.ENVS[self.test_env] = self.ENVS[self.test_env], self.ENVS[-1]
## Make the color datasets
# Stack a second color channel
colored_labels = self.TCMNIST_labels
colored_images = torch.stack([self.TCMNIST_images, self.TCMNIST_images], dim=2)
for i, e in enumerate(self.ENVS):
# Color i-th frame subset
colored_images, colored_labels = self.color_dataset(colored_images, colored_labels, i, e, self.LABEL_NOISE)
# Make Tensor dataset and dataloader
dataset = torch.utils.data.TensorDataset(colored_images, colored_labels.long())
in_dataset, out_dataset = make_split(dataset, flags.holdout_fraction, seed=i)
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names = [str(e)+'_in' for e in self.ENVS[:-1]]
self.train_loaders.append(in_loader)
fast_in_loader = torch.utils.data.DataLoader(copy.deepcopy(in_dataset), batch_size=252, shuffle=False)
self.val_names.append([str(e)+'_in' for e in self.ENVS])
self.val_loaders.append(fast_in_loader)
fast_out_loader = torch.utils.data.DataLoader(out_dataset, batch_size=252, shuffle=False)
self.val_names.append([str(e)+'_out' for e in self.ENVS])
self.val_loaders.append(fast_out_loader)
def color_dataset(self, images, labels, env_id, p, d):
""" Color a single step 'env_id' of the dataset
Args:
images (Tensor): 3 channel images to color
labels (Tensor): labels of the images
env_id (int): environment id
p (float): correlation between the color and the label
d (float): level of noise added to the labels
Returns:
colored_images (Tensor): all dataset with a new step colored
"""
# Add label noise
labels[:,env_id] = XOR(labels[:,env_id], bernoulli(d, labels[:,env_id].shape)).long()
# Choose colors
colors = XOR(labels[:,env_id], bernoulli(1-p, labels[:,env_id].shape))
# Apply colors
for sample in range(colors.shape[0]):
images[sample,env_id+1,colors[sample].long(),:,:] *= 0
return images, labels
def split_data(self, out, labels):
""" Group data and prediction by environment
Args:
out (Tensor): output data from a model (batch_size, len(PRED_TIME), n_classes)
labels (Tensor): labels of the data (batch_size, len(PRED_TIME))
Returns:
Tensor: The reshaped data (n_env-1, batch_size, 1, n_classes)
Tensor: The reshaped labels (n_env-1, batch_size, 1)
"""
n_train_env = len(self.ENVS)-1 if self.test_env is not None else len(self.ENVS)
out_split = torch.zeros((n_train_env, self.batch_size, 1, out.shape[-1])).to(out.device)
labels_split = torch.zeros((n_train_env, self.batch_size, 1)).long().to(labels.device)
for i in range(n_train_env):
# Test env is always the last one
out_split[i,...] = out[:,i,...].unsqueeze(1)
labels_split[i,...] = labels[:,i,...].unsqueeze(1)
return out_split, labels_split
class EEG_dataset(Dataset):
""" HDF5 dataset for EEG data
The HDF5 file is expected to have the following nested dict structure::
{'env0': {'data': np.array(n_samples, time_steps, input_size),
'labels': np.array(n_samples, len(PRED_TIME))},
'env1': {'data': np.array(n_samples, time_steps, input_size),
'labels': np.array(n_samples, len(PRED_TIME))},
...}
Good thing about this is that it imports data only when it needs to and thus saves ram space
Args:
h5_path (str): absolute path to the hdf5 file
env_id (int): environment id key in the hdf5 file
split (list): list of indices of the dataset the belong to the split. If 'None', all the data is used
"""
def __init__(self, h5_path, env_id, split=None):
self.h5_path = h5_path
self.env_id = env_id
self.hdf = h5py.File(self.h5_path, 'r')
self.data = self.hdf[env_id]['data']
self.targets = self.hdf[env_id]['labels']
self.split = list(range(self.hdf[env_id]['data'].shape[0])) if split==None else split
def __len__(self):
return len(self.split)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
split_idx = self.split[idx]
seq = torch.as_tensor(self.data[split_idx, ...])
labels = torch.as_tensor(self.targets[split_idx])
return (seq, labels)
def close(self):
""" Close the hdf5 file link """
self.hdf.close()
class Sleep_DB(Multi_Domain_Dataset):
""" Class for Sleep Staging datasets with their data stored in a HDF5 file
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
"""
CHECKPOINT_FREQ = 500
SETUP = 'seq'
SEQ_LEN = 3000
PRED_TIME = [2999]
OUTPUT_SIZE = 6
#:str: realative path to the hdf5 file
DATA_FILE = None
def __init__(self, flags, training_hparams):
super().__init__()
if flags.test_env is not None:
assert flags.test_env < len(self.ENVS), "Test environment chosen is not valid"
else:
warnings.warn("You don't have any test environment")
## Save stuff
self.test_env = flags.test_env
self.class_balance = training_hparams['class_balance']
self.batch_size = training_hparams['batch_size']
## Create tensor dataset and dataloader
self.val_names, self.val_loaders = [], []
self.train_names, self.train_loaders = [], []
for j, e in enumerate(self.ENVS):
# Get full environment dataset and define in/out split
full_dataset = EEG_dataset(os.path.join(flags.data_path, self.DATA_FILE), e)
in_split, out_split = get_split(full_dataset, flags.holdout_fraction, seed=j, sort=True)
full_dataset.close()
# Make training dataset/loader and append it to training containers
if j != flags.test_env:
in_dataset = EEG_dataset(os.path.join(flags.data_path, self.DATA_FILE), e, split=in_split)
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(e + '_in')
self.train_loaders.append(in_loader)
# # Get in/out hdf5 dataset
# out_dataset = EEG_dataset(os.path.join(flags.data_path, self.DATA_FILE), e, split=out_split)
# Make validation loaders
fast_in_dataset = EEG_dataset(os.path.join(flags.data_path, self.DATA_FILE), e, split=in_split)
fast_in_loader = torch.utils.data.DataLoader(fast_in_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# fast_in_loader = torch.utils.data.DataLoader(fast_in_dataset, batch_size=256, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
fast_out_dataset = EEG_dataset(os.path.join(flags.data_path, self.DATA_FILE), e, split=out_split)
fast_out_loader = torch.utils.data.DataLoader(fast_out_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# fast_out_loader = torch.utils.data.DataLoader(fast_out_dataset, batch_size=256, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# Append to val containers
self.val_names.append(e + '_in')
self.val_loaders.append(fast_in_loader)
self.val_names.append(e + '_out')
self.val_loaders.append(fast_out_loader)
def get_class_weight(self):
"""Compute class weight for class balanced training
Returns:
list: list of weights of length OUTPUT_SIZE
"""
_, train_loaders = self.get_train_loaders()
n_labels = torch.zeros(self.OUTPUT_SIZE)
for env_loader in train_loaders:
labels = env_loader.dataset.targets[:]
for i in range(self.OUTPUT_SIZE):
n_labels[i] += torch.eq(torch.as_tensor(labels), i).sum()
weights = n_labels.max() / n_labels
return weights
class CAP(Sleep_DB):
""" CAP Sleep stage dataset
The task is to classify the sleep stage from EEG and other modalities of signals.
This dataset only uses about half of the raw dataset because of the incompatibility of some measurements.
We use the 5 most commonly used machines in the database to create the 5 seperate environment to train on.
The machines that were used were infered by grouping together the recording that had the same channels, and the
final preprocessed data only include the channels that were in common between those 5 machines.
You can read more on the data itself and it's provenance on Physionet.org:
https://physionet.org/content/capslpdb/1.0.0/
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
This dataset need to be downloaded and preprocessed. This can be done with the download.py script.
"""
DATA_FILE = 'physionet.org/CAP.h5'
ENVS = ['Machine0', 'Machine1', 'Machine2', 'Machine3', 'Machine4']
INPUT_SHAPE = [19]
def __init__(self, flags, training_hparams):
super().__init__(flags, training_hparams)
class SEDFx(Sleep_DB):
""" SEDFx Sleep stage dataset
The task is to classify the sleep stage from EEG and other modalities of signals.
This dataset only uses about half of the raw dataset because of the incompatibility of some measurements.
We split the dataset in 5 environments to train on, each of them containing the data taken from a given group age.
You can read more on the data itself and it's provenance on Physionet.org:
https://physionet.org/content/sleep-edfx/1.0.0/
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
This dataset need to be downloaded and preprocessed. This can be done with the download.py script
"""
DATA_FILE = 'physionet.org/SEDFx.h5'
ENVS = ['Age 20-40', 'Age 40-60', 'Age 60-80','Age 80-100']
INPUT_SHAPE = [4]
def __init__(self, flags, training_hparams):
super().__init__(flags, training_hparams)
class MI(Sleep_DB):
""" MI datasets
The task is to classify the motor imaginary from EEG and other modalities of signals.
The raw data comes from the three MI Databases:
['Cho2017', 'PhysionetMI', 'BNCI2014001']
You can read more on the data itself and it's provenance on:
http://moabb.neurotechx.com/docs/api.html#motor-imagery-datasets
This dataset need to be downloaded and preprocessed. This can be done with the download.py script
"""
DATA_FILE = 'MI.h5'
ENVS = ['Cho2017', 'PhysionetMI', 'BNCI2014001']
SEQ_LEN = 750
PRED_TIME = [749]
INPUT_SHAPE = [22]
OUTPUT_SIZE = 2
def __init__(self, flags, training_hparams):
""" Dataset constructor function
Args:
flags (Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
"""
super().__init__(flags, training_hparams)
class StockVolatility(Multi_Domain_Dataset):
""" Stock Volatility Dataset
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Ressources:
* https://github.com/lukaszbanasiak/yahoo-finance
* https://medium.com/analytics-vidhya/predicting-the-volatility-of-stock-data-56f8938ab99d
* https://medium.com/analytics-vidhya/univariate-forecasting-for-the-volatility-of-the-stock-data-using-deep-learning-6c8a4df7edf9
"""
N_STEPS = 5001
SETUP = 'step'
PRED_TIME = [3000]
# Choisir une maniere de split en [3,10] environment
ENVS = ['2000-2004', '2005-2009', '2010-2014', '2015-2020']
INPUT_SHAPE = [1000000]
OUTPUT_SIZE = 1
CHECKPOINT_FREQ = 500
def __init__(self, flags, training_hparams):
super().__init__()
pass
# data = [Dataloader for e in self.ENVS]
## Pour tous les index
# Prendre tous les donnees de l'index
# Faire des trucs de preprocessing si besoin
# split en chunk d'annnee en fonction de self.ENVS
## Pour tous les chunks e
# env_data = split les chunks en sequence de X jours
# data[e].append(env_data)
class Video_dataset(Dataset):
""" Video dataset
Folder structure::
data_path
└── 001
└─ 001
├── frame000001.jpg
├── ...
└── frame0000{n_frames}.jpg
└─ 002
└─ (samples) ...
└── 002
└─ 001
└─ 002
└─ (samples) ...
└── 003
└── (labels) ...
Args:
data_path (str): path to the folder containing the data
n_frames (int): number of frames in each video
transform (callable, optional): Optional transform to be applied
on a sample.
"""
def __init__(self, data_path, n_frames, transform=None, split=None):
self.data_path = data_path
self.n_frames = n_frames
self.transform = transform
self.targets = []
self.folders = []
for label in os.listdir(self.data_path):
for rep in os.listdir(os.path.join(self.data_path, label)):
self.folders.append(os.path.join(self.data_path, label, rep))
self.targets.append(int(label)-1)
self.split = list(range(len(self.folders))) if split==None else split
def __len__(self):
"Denotes the total number of samples"
return len(self.split)
def read_images(self, selected_folder, use_transform):
""" Read images from a folder (single video consisting of n_frames images)
Args:
selected_folder (str): path to the folder containing the images
use_transform (callable): transform to apply on the images
Returns:
Tensor: images tensor (n_frames, 3, 224, 224)
"""
X = []
for i in range(self.n_frames):
image = Image.open(os.path.join(selected_folder, 'frame_{:06d}.jpg'.format(i)))
if use_transform is not None:
image = use_transform(image)
X.append(image)
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
""" Reads an image given anindex
Args:
index (int): index of the video sample to get
Returns:
Tensor: video tensor (n_frames, 3, 224, 224)
"""
# Select sample
split_index = self.split[index]
folder = self.folders[split_index]
# Load data
X = self.read_images(folder, self.transform) # (input) spatial images
y = torch.LongTensor([self.targets[split_index]]) # (labels) LongTensor are for int64 instead of FloatTensor
return X, y
class LSA64(Multi_Domain_Dataset):
""" LSA64: A Dataset for Argentinian Sign Language dataset
This dataset is composed of videos of different signers.
You can read more on the data itself and it's provenance from it's source:
http://facundoq.github.io/datasets/lsa64/
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
This dataset need to be downloaded and preprocessed. This can be done with the download.py script
Ressources:
* http://facundoq.github.io/datasets/lsa64/
* http://facundoq.github.io/guides/sign_language_datasets/slr
* https://sci-hub.mksa.top/10.1007/978-981-10-7566-7_63
* https://github.com/hthuwal/sign-language-gesture-recognition/
"""
N_STEPS = 5001
SETUP = 'seq'
PRED_TIME = [19]
ENVS = ['001', '002', '003', '004', '005', '006', '007', '008', '009', '010']
INPUT_SHAPE = [3, 224, 224]
OUTPUT_SIZE = 64
CHECKPOINT_FREQ = 100
#:int: number of frames in each video
SEQ_LEN = 20
#:str: path to the folder containing the data
DATA_FOLDER = 'LSA64'
def __init__(self, flags, training_hparams):
super().__init__()
if flags.test_env is not None:
assert flags.test_env < len(self.ENVS), "Test environment chosen is not valid"
else:
warnings.warn("You don't have any test environment")
## Save stuff
self.test_env = flags.test_env
self.class_balance = training_hparams['class_balance']
self.batch_size = training_hparams['batch_size']
self.normalize = transforms.Compose([transforms.ToTensor(),
transforms.Normalize( mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
## Create tensor dataset and dataloader
self.val_names, self.val_loaders = [], []
self.train_names, self.train_loaders = [], []
for j, e in enumerate(self.ENVS):
env_path = os.path.join(flags.data_path, self.DATA_FOLDER, e)
# Get full environment dataset and define in/out split
full_dataset = Video_dataset(env_path, self.SEQ_LEN, transform=self.normalize)
in_split, out_split = get_split(full_dataset, flags.holdout_fraction, seed=j, sort=True)
# Make training dataset/loader and append it to training containers
if j != flags.test_env:
in_dataset = Video_dataset(env_path, self.SEQ_LEN, transform=self.normalize, split=in_split)
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(e + '_in')
self.train_loaders.append(in_loader)
# Make validation loaders
fast_in_dataset = Video_dataset(env_path, self.SEQ_LEN, transform=self.normalize, split=in_split)
fast_in_loader = torch.utils.data.DataLoader(fast_in_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# fast_in_loader = torch.utils.data.DataLoader(fast_in_dataset, batch_size=256, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
fast_out_dataset = Video_dataset(env_path, self.SEQ_LEN, transform=self.normalize, split=out_split)
fast_out_loader = torch.utils.data.DataLoader(fast_out_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# fast_out_loader = torch.utils.data.DataLoader(fast_out_dataset, batch_size=256, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# Append to val containers
self.val_names.append(e + '_in')
self.val_loaders.append(fast_in_loader)
self.val_names.append(e + '_out')
self.val_loaders.append(fast_out_loader)
def get_class_weight(self):
"""Compute class weight for class balanced training
Returns:
list: list of weights of length OUTPUT_SIZE
"""
_, train_loaders = self.get_train_loaders()
n_labels = torch.zeros(self.OUTPUT_SIZE)
for env_loader in train_loaders:
labels = env_loader.dataset.targets[:]
for i in range(self.OUTPUT_SIZE):
n_labels[i] += torch.eq(torch.as_tensor(labels), i).sum()
weights = n_labels.max() / n_labels
return weights
class HAR(Multi_Domain_Dataset):
""" Heterogeneity Acrivity Recognition Dataset (HAR)
This dataset is composed of wearables measurements during different activities.
The goal is to classify those activities (stand, sit, walk, bike, stairs up, stairs down).
You can read more on the data itself and it's provenance from it's source:
https://archive.ics.uci.edu/ml/datasets/Heterogeneity+Activity+Recognition
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
Note:
This dataset need to be downloaded and preprocessed. This can be done with the download.py script
Ressources:
* https://archive.ics.uci.edu/ml/datasets/Heterogeneity+Activity+Recognition
* https://dl.acm.org/doi/10.1145/2809695.2809718
"""
N_STEPS = 5001
SETUP = 'seq'
SEQ_LEN = 500
PRED_TIME = [499]
ENVS = ['nexus4', 's3', 's3mini', 'lgwatch', 'gear']
INPUT_SHAPE = [6]
OUTPUT_SIZE = 6
CHECKPOINT_FREQ = 100
#:str: Path to the file containing the data
DATA_FILE = 'HAR/HAR.h5'
def __init__(self, flags, training_hparams):
""" Dataset constructor function
Args:
flags (argparse.Namespace): argparse of training arguments
training_hparams (dict): dictionnary of training hyper parameters coming from the hyperparams.py file
"""
super().__init__()
if flags.test_env is not None:
assert flags.test_env < len(self.ENVS), "Test environment chosen is not valid"
else:
warnings.warn("You don't have any test environment")
# Save stuff
self.test_env = flags.test_env
self.batch_size = training_hparams['batch_size']
# Label definition
self.label_dict = { 'stand': 0,
'sit': 1,
'walk': 2,
'bike': 3,
'stairsup': 4,
'stairsdown': 5}
## Create tensor dataset and dataloader
self.val_names, self.val_loaders = [], []
self.train_names, self.train_loaders = [], []
for j, e in enumerate(self.ENVS):
with h5py.File(os.path.join(flags.data_path, self.DATA_FILE), 'r') as f:
# Load data
data = torch.tensor(f[e]['data'][...])
labels = torch.tensor(f[e]['labels'][...])
# Get full environment dataset and define in/out split
full_dataset = torch.utils.data.TensorDataset(data, labels)
in_dataset, out_dataset = make_split(full_dataset, flags.holdout_fraction, seed=j)
# Make training dataset/loader and append it to training containers
if j != flags.test_env:
in_loader = InfiniteLoader(in_dataset, batch_size=training_hparams['batch_size'])
self.train_names.append(e + '_in')
self.train_loaders.append(in_loader)
# Make validation loaders
fast_in_loader = torch.utils.data.DataLoader(copy.deepcopy(in_dataset), batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
fast_out_loader = torch.utils.data.DataLoader(out_dataset, batch_size=64, shuffle=False, num_workers=self.N_WORKERS, pin_memory=True)
# Append to val containers
self.val_names.append(e + '_in')
self.val_loaders.append(fast_in_loader)
self.val_names.append(e + '_out')
self.val_loaders.append(fast_out_loader) |
#If you were going to draw a regular polygon with 18 sides, what angle would you need to turn the turtle at each corner?
#360/18=20 degrees turn |
"""Contains methods related to manipulation of xarray datasets in a test
environment.
"""
def make_on_variable(dataframe, variable):
"""Returns a new dataset created on the specified variable of the dataframe.
"""
return dataframe[variable]
def get_statistic_function(statistic):
"""Returns the appropriate function for the given statistic string.
"""
statistic_dict = {}
statistic_dict["max"] = get_max
statistic_dict["min"] = get_min
statistic_dict["mean"] = get_mean
return statistic_dict[statistic]
def get_max(dataset):
"""Returns the maximum value for the specified dataset.
"""
return dataset.max()
def get_min(dataset):
"""Returns the maximum value for the specified dataset.
"""
return dataset.min()
def get_mean(dataset):
"""Returns the mean value for the specified dataset.
"""
return dataset.mean()
def get_as_list(dataset):
"""Returns the dataset as a list. First converts to ndarray then python list.
This is a utility function for testing, do not use in production environment.
"""
return dataset.values.tolist()
|
# -*- coding: utf-8 -*-
age=input('age:')
if age.isdigit() == False:
print('wrong')
else:
age=int(age)
if age<4:
price=0
elif age<18:
price=5
elif age<65:
price=10
elif age>=65:
price=5
print("Your admission cost is $"+str(price)+".") |
# Functions
print("Start")
def kahihi(kka):
username = a.split(" ")
print(username)
username = "Satyen Deshpande"
#kahihi(username)
a = ("string", "number", "symbol")
b1 = ["string", "number", "symbol"]
b = [56, 32, 98712]
print(type(a))
print(type(b))
total = 0
for i in b:
total = b[i] + total
print(total)
# Write a Python function to find the Max of three numbers.
# Write a Python function to check whether a number is even or odd
#Write a Python function to sum all the numbers in a list.
#Sample List : (8, 2, 3, 0, 7)
#Expected Output : 20
#Write a Python program to reverse a string. Go to the editor
#Sample String : "1234abcd"
#Expected Output : "dcba4321"
#Write a Python function to check whether a number is even or odd |
import yaml
class NomenclatureEntry(yaml.YAMLObject):
def __init__(self, label, text):
self.label = label
self.text = text
def __repr__(self):
return ('Nom(%r, %r)' % (self.label, self.text))
class Symbol(yaml.YAMLObject):
yaml_tag = u'!Symbol'
def __init__(self, symbol, tex, definition_order, tag=None, desc=None,
long=None, example=None, nargs=0, # @ReservedAssignment
where=None, nomenclature=None, other={}):
self.symbol = symbol
self.tex = tex
self.definition_order = definition_order
self.nargs = nargs
self.desc = desc
self.long = long
self.example = example
self.tag = tag
self.where = where
self.nomenclature = nomenclature
self.other = other
def __repr__(self):
return ('Symbol(%r, %r, %r, %r, %r, %r, %r)' %
(self.symbol, self.tex, self.tag, self.nargs, self.example,
self.nomenclature, self.other))
def tex_definition_short(self):
cmd = self.symbol
assert isinstance(cmd, str)
if self.nargs:
params = '{%s}[%s]{%s}' % (cmd, self.nargs, self.tex)
else:
params = '{%s}{%s}' % (cmd, self.tex)
return '\\newcommand%s' % params
def tex_definition(self, wrapper=None):
if wrapper is None:
tex = self.tex
else:
tex = wrapper(self.tex)
def single_def(cmd):
if self.nargs:
params = '{%s}[%s]{%s}' % (cmd, self.nargs, tex)
else:
params = '{%s}{%s}' % (cmd, tex)
s = ('\\ifdefined%s%%\n \\renewcommand%s%%\n\\else%%\n '
'\\newcommand%s%%\n\\fi\n' % (cmd, params, params))
return s
if isinstance(self.symbol, list):
s = "\n".join([single_def(t) for t in self.symbol])
else:
s = single_def(self.symbol)
if self.desc:
s += '%% %s' % self.desc
return s
def symbol_dependencies(self):
""" Returns all the commands used by the definition """
from latex_symbol_manager.programs.collect.find_commands import find_all_commands_in_string
return find_all_commands_in_string(self.tex)
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" BaseHandler Class
All handlers must be inherit form this class
"""
__all__ = [
'BaseHandler'
]
class BaseHandler:
""" Base of all handler class
"""
@classmethod
def handler_name(cls) -> str:
""" Return handler name, used by serializer
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
raise NotImplementedError
|
import logging
import re
import time
from datetime import datetime
import pytest
from django.contrib.auth import get_user_model
from django.template.loader import render_to_string
from django.utils.http import int_to_base36, base36_to_int
from django_email_verification import send_email
from django_email_verification.errors import NotAllFieldCompiled, InvalidUserModel
from django_email_verification.confirm import DJANGO_EMAIL_VERIFICATION_MORE_VIEWS_ERROR, \
DJANGO_EMAIL_VERIFICATION_NO_VIEWS_ERROR, DJANGO_EMAIL_VERIFICATION_NO_PARAMETER_WARNING
class LogHandler(logging.StreamHandler):
def __init__(self, levelname, match, callback):
super().__init__()
self.levelname = levelname
self.match = match
self.callback = callback
self.warning_found = False
self.error_found = False
def emit(self, record):
msg = self.format(record)
if record.levelname == self.levelname and msg.startswith(self.match):
self.callback()
def get_mail_params(content):
expiry = re.findall(r'\d{1,2}:\d{1,2}', content)[0]
url = re.findall(r'(http|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?',
content)[0][-1]
return url, expiry
@pytest.fixture
def test_user():
user = get_user_model()(username='test_user', password='test_passwd', email='test@test.com')
return user
@pytest.fixture
def wrong_token_template():
match = render_to_string('confirm.html', {'success': False, 'user': None})
return match
@pytest.mark.django_db
def test_missing_params(test_user, settings, client):
with pytest.raises(NotAllFieldCompiled):
settings.EMAIL_FROM_ADDRESS = None
send_email(test_user, thread=False)
with pytest.raises(InvalidUserModel):
send_email(None, thread=False)
with pytest.raises(NotAllFieldCompiled):
settings.EMAIL_PAGE_TEMPLATE = None
url = '/email/_'
client.get(url)
@pytest.mark.django_db
def test_email_content(test_user, mailoutbox, settings):
test_user.is_active = False
send_email(test_user, thread=True)
time.sleep(0.5)
email = mailoutbox[0]
email_content = email.alternatives[0][0]
url, expiry = get_mail_params(email_content)
assert email.subject == re.sub(r'({{.*}})', test_user.username, settings.EMAIL_MAIL_SUBJECT), "The subject changed"
assert email.from_email == settings.EMAIL_FROM_ADDRESS, "The from_address changed"
assert email.to == [test_user.email], "The to_address changed"
assert len(expiry) > 0, f"No expiry time detected, {email_content}"
assert len(url) > 0, "No link detected"
@pytest.mark.django_db
def test_email_custom_params(test_user, mailoutbox):
s_expiry = datetime.now()
test_user.is_active = False
send_email(test_user, thread=False, custom_salt='test_salt', expiry=s_expiry)
email = mailoutbox[0]
email_content = email.alternatives[0][0]
_, expiry = get_mail_params(email_content)
expiry = expiry.split(':')
assert s_expiry.time().hour == int(expiry[0]) or s_expiry.time().hour - 12 == int(expiry[0])
assert s_expiry.time().minute == int(expiry[1])
@pytest.mark.django_db
def test_email_link_correct(test_user, mailoutbox, client):
test_user.is_active = False
send_email(test_user, thread=False)
email = mailoutbox[0]
email_content = email.alternatives[0][0]
url, _ = get_mail_params(email_content)
response = client.get(url)
match = render_to_string('confirm.html', {'success': True, 'user': test_user})
assert response.content.decode() == match
assert get_user_model().objects.get(email='test@test.com').is_active
@pytest.mark.django_db
def test_email_link_wrong(client, wrong_token_template):
url = '/email/dGVzdEB0ZXN0LmNvbE-agax3s-00348f02fabc98235547361a0fe69129b3b750f5'
response = client.get(url)
assert response.content.decode() == wrong_token_template, "Invalid token accepted"
url = '/email/_'
response = client.get(url)
assert response.content.decode() == wrong_token_template, "Short token accepted"
url = '/email/dGVzdEB0ZXN0LmNvbE++-agax3sert-00=00348f02fabc98235547361a0fe69129b3b750f5'
response = client.get(url)
assert response.content.decode() == wrong_token_template, "Long token accepted"
@pytest.mark.django_db
def test_token_different_timestamp(test_user, mailoutbox, client, wrong_token_template):
test_user.is_active = False
send_email(test_user, thread=False)
email = mailoutbox[0]
email_content = email.alternatives[0][0]
url, _ = get_mail_params(email_content)
# Increment timestamp
token = url.split('-')
token[1] = int_to_base36(base36_to_int(token[1]) + 1)
url = '-'.join(token)
response = client.get(url)
assert response.content.decode() == wrong_token_template
@pytest.mark.django_db
def test_token_expired(test_user, mailoutbox, settings, client, wrong_token_template):
settings.EMAIL_TOKEN_LIFE = 1
test_user.is_active = False
send_email(test_user, thread=False)
email = mailoutbox[0]
email_content = email.alternatives[0][0]
url, _ = get_mail_params(email_content)
time.sleep(2)
response = client.get(url)
assert response.content.decode() == wrong_token_template
@pytest.mark.django_db
def test_multi_user(mailoutbox, settings, client):
setattr(settings, 'EMAIL_MULTI_USER', True)
test_user_1 = get_user_model().objects.create(username='test_user_1', password='test_passwd_1',
email='test@test.com')
test_user_2 = get_user_model().objects.create(username='test_user_2', password='test_passwd_2',
email='test@test.com')
test_user_1.is_active = False
test_user_2.is_active = False
test_user_1.save()
test_user_2.save()
send_email(test_user_1, thread=False)
email = mailoutbox[0]
email_content = email.alternatives[0][0]
url, _ = get_mail_params(email_content)
response = client.get(url)
match = render_to_string('confirm.html', {'success': True, 'user': test_user_1})
assert response.content.decode() == match
assert list(get_user_model().objects.filter(email='test@test.com').values_list('is_active')) == [(True,), (False,)]
@pytest.mark.urls('django_email_verification.tests.urls_test_1')
@pytest.mark.django_db
def test_too_many_verify_view(test_user):
error_raised = False
def raise_error():
nonlocal error_raised
error_raised = True
handler = LogHandler('ERROR', DJANGO_EMAIL_VERIFICATION_MORE_VIEWS_ERROR, raise_error)
logger = logging.getLogger('django_email_verification')
logger.addHandler(handler)
test_user.is_active = False
send_email(test_user, thread=False)
assert error_raised, 'No error raised if multiple views are found'
@pytest.mark.urls('django_email_verification.tests.urls_test_2')
@pytest.mark.django_db
def test_no_verify_view(test_user):
error_raised = False
def raise_error():
nonlocal error_raised
error_raised = True
handler = LogHandler('ERROR', DJANGO_EMAIL_VERIFICATION_NO_VIEWS_ERROR, raise_error)
logger = logging.getLogger('django_email_verification')
logger.addHandler(handler)
test_user.is_active = False
send_email(test_user, thread=False)
assert error_raised, 'No error raised if no views are found'
@pytest.mark.django_db
def test_incomplete_verify_view(test_user):
warning_raised = False
def raise_warning():
nonlocal warning_raised
warning_raised = True
handler = LogHandler('WARNING', DJANGO_EMAIL_VERIFICATION_NO_PARAMETER_WARNING, raise_warning)
logger = logging.getLogger('django_email_verification')
logger.addHandler(handler)
test_user.is_active = False
send_email(test_user, thread=False)
assert warning_raised, 'No warning raised if incomplete urls are found'
def test_app_config():
from .. import apps
assert apps.DjangoEmailConfirmConfig.name == 'django_email_verification', "Wrong App name"
|
import pyodbc
# conn_str = (
# "DRIVER={PostgreSQL Unicode};"
# "DATABASE=postgres;"
# "UID=postgres;"
# "PWD=whatever;"
# "SERVER=localhost;"
# "PORT=5432;"
# )
# conn = pyodbc.connect(conn_str)
conn = pyodbc.connect(dsn="my_driver")
crsr = conn.cursor()
# Open and read the file as a single buffer
script_path = '/Users/liyuan/Desktop/Docker-hw/create_banking_db.sql'
fd = open(script_path, 'r')
sql_script = fd.read()
fd.close()
# Get all SQL statements (split on ';')
sql_statements = sql_script.split(';')
# Execute SQL statements
for statement in sql_statements:
if not statement.strip():
continue
crsr.execute(statement)
print('Statement executed:'+ ' %s' % str(statement) + '\n')
conn.commit()
crsr.close()
conn.close()
|
import botocore
import sys
import unittest
import urllib3
from mock import Mock, patch
from collections import namedtuple
from patroni.scripts.aws import AWSConnection, main as _main
class MockVolumes(object):
@staticmethod
def filter(*args, **kwargs):
oid = namedtuple('Volume', 'id')
return [oid(id='a'), oid(id='b')]
class MockEc2Connection(object):
volumes = MockVolumes()
@staticmethod
def create_tags(Resources, **kwargs):
if len(Resources) == 0:
raise botocore.exceptions.ClientError({'Error': {'Code': 503, 'Message': 'Request limit exceeded'}},
'create_tags')
return True
@patch('boto3.resource', Mock(return_value=MockEc2Connection()))
class TestAWSConnection(unittest.TestCase):
@patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(
status=200, body=b'{"instanceId": "012345", "region": "eu-west-1"}')))
def setUp(self):
self.conn = AWSConnection('test')
def test_on_role_change(self):
self.assertTrue(self.conn.on_role_change('primary'))
with patch.object(MockVolumes, 'filter', Mock(return_value=[])):
self.conn._retry.max_tries = 1
self.assertFalse(self.conn.on_role_change('primary'))
@patch('patroni.scripts.aws.requests_get', Mock(side_effect=Exception('foo')))
def test_non_aws(self):
conn = AWSConnection('test')
self.assertFalse(conn.on_role_change("primary"))
@patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(status=200, body=b'foo')))
def test_aws_bizare_response(self):
conn = AWSConnection('test')
self.assertFalse(conn.aws_available())
@patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(status=503, body=b'Error')))
@patch('sys.exit', Mock())
def test_main(self):
self.assertIsNone(_main())
sys.argv = ['aws.py', 'on_start', 'replica', 'foo']
self.assertIsNone(_main())
|
from django.db.models.loading import get_models
import os
import xmltodict
class ModelGenerator:
def __init__(self, file):
self.file = file
def run(self):
try:
doc = xmltodict.parse(self.file.read())
except:
return False
result, admin, form = self.parseXML(doc)
mfile = open('dynamic/models.py', 'w')
mfile.write(result)
mfile.close()
os.system('./manage.py schemamigration dynamic --auto')
os.system('./manage.py migrate dynamic')
mfile = open('dynamic/admin.py', 'w')
mfile.write(admin)
mfile.close()
mfile = open('dynamic/forms.py', 'w')
mfile.write(form)
mfile.close()
return True
def parseXML(self, doc):
result = "# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\n\n\n"
form = 'from django.forms import ModelForm\nfrom dynamic.models import *\n\n'
admin = "from django.contrib import admin\nfrom dynamic.models import *\n\n"
for k, v in doc['models'].items():
model = k.title()[:-1]
#models.append(model)
admin += "admin.site.register(globals().get('%s'))\n" % model
form += 'class %sForm(ModelForm):\n\tclass Meta:\n\t\tmodel = %s\n\n' \
% (model, model)
result += "class %s(models.Model):\n\tdb_table = '%s'\n\n" \
% (model, k)
for i, j in v.items():
if type(j) == list:
for ki in j:
name = ''
_type = ''
title = ''
for vi, vj in ki.items():
if vi == '@id':
name = '%s' % vj
elif vi == '@type':
if vj == 'char':
_type = 'Char'
elif vj == 'int':
_type = 'Integer'
elif vj == 'date':
_type = 'Date'
elif vi == '@title':
title = vj
if name and title and _type:
result += "\t%s = models.%sField(_('%s')%s)\n" % \
(name, _type, title, ', max_length=255' \
if _type == 'Char' else '')
result += '\n\n'
result += '\n'
form += '\n'
return result, admin, form
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step12_generate_country_level_regression_file
# @Date: 2020/4/16
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m ConstructRegressionFile.Stata.step12_generate_country_level_regression_file
"""
import os
import pandas as pd
from pandas import DataFrame
from Constants import Constants as const
from Utilities.generate_stata_code import generate_foreach2_dep_ind_code
if __name__ == '__main__':
date_str = '20200416'
save_file = os.path.join(const.STATA_CODE_PATH, '{}_country_reg_code_2.do'.format(date_str))
output_path = os.path.join(const.STATA_RESULT_PATH, '{}_country_reg_code_1'.format(date_str))
if not os.path.isdir(output_path):
os.makedirs(output_path)
data_path = os.path.join(const.STATA_DATA_PATH, '20200415_country_year_reg_data.dta')
cmd_list = ['clear', 'use "{}"'.format(data_path)]
ind_list = ['Extend', 'ToUnlimit', 'Shrink', 'ToLimit']
c_ctrl_info = ['ln_GDP', 'ln_GDP_PC', 'UNEMP_RATE', 'GDP_GROWTH']
data_df: DataFrame = pd.read_stata(data_path)
dep_keys = [i for i in data_df.keys() if
i.endswith("_1") and not i.startswith('formal') and not i.startswith('real') and i not in {
'BUS_SCORE_1'}]
for pre in ['formal', 'real']:
output_file = os.path.join(output_path, 'country_result_{}.xls'.format(pre))
ind_vars = ['{}_{}_post2'.format(pre, suf) for suf in ind_list]
real_ctrl = ' '.join(c_ctrl_info)
cmd_list.extend(
generate_foreach2_dep_ind_code(' '.join(dep_keys), ' '.join(ind_vars), real_ctrl,
fe_option='{} fyear'.format(const.COUNTRY_ISO3),
cluster_option=const.COUNTRY_ISO3, output_path=output_file, condition='',
text_option='Country Dummy, Yes, Year Dummy, Yes, Cluster, Country',
data_description='tstat bdec(4) tdec(4) rdec(4)'))
with open(save_file, 'w') as f:
f.write('\n'.join(cmd_list))
print('do "{}"'.format(save_file))
|
import socket
import json
import requests
import self as self
"""
client_socket = socket.socket()
client_socket.connect(('127.0.0.1', 6457))#ip of the localhost
#nb = str(input('Choose a number: '))
client_socket.send(bytes("shir","utf-8"))
data = client_socket.recv(1024)
print("The server sent: " + data)
data = data.decode("utf-8")
client_socket.close()
########## class Menu #########
class Menu():
def __init__(self):
None #do nothing
def displayMenu(self):
print("to join to existent room input 0" + "\nto ask from the server the List of existing rooms input 1" + "\nto ask from the server to create(and join to the) new room input 2" +
"\nto choose cell please input 3" +"\n")
def displayResult(self,res):
print(res)
########## class Session ############
class Session:
def __init__(self):
self.client1 = socket.socket() #open socket
def SocketsConnection(self):
self.client1.connect(("127.0.0.1", 9876)) #open connection with the server
def SocketCloser(self):
self.client1.close()
"""
"""""
########### class UserChoice ###########
class UserChoice:
def __init__(self):
None # do nothing
def joinExistentRoom_0(self,connect):
num_of_room = input("please enter number of room: ")
data = "{NumOfExistentRoom:" +num_of_room+" , NumberOfOption: 0 }" #0 Indicates to the server what the client meant to do
data_json = json.dumps(data)
connect.send(data_json, "utf-8") #encode
serverResponse = connect.recv(1024)
serverResponse = serverResponse.decode("utf-8") #decode
print(serverResponse)
if( serverResponse == 300) : #300 is not-success
print("ERROR : The room probably does not exist OR there are ander 5 players in the room")
else:
print("The game starts when you have 5 players in the room") # The game starts when you have at least 5 players in the room
def ToSeeTheListOfRooms_1(self,connect):
data = "{NumberOfOption: 1 }"
data_json = json.dumps(data)
connect.send(data_json, "utf-8") # encode
serverResponse = connect.recv(1024)
serverResponse = serverResponse.decode("utf-8") #decode
print("The lost of the rooms : " + serverResponse)
def AskForCreateNewRoom_2(self,connect):
data = "{NumberOfOption: 1 }"
data_json = json.dumps(data)
connect.send(data_json, "utf-8") # encode
########## Main ##############
def main():
S = Session()
connect = S.SocketsConnection()
m = Menu()
m.displayMenu()
while True:
str = input("please enter number between 0 to 3")
number = int(str) #casting from string to int
U=UserChoice()
if (number == 0):
U.joinExistentRoom_0(connect)
elif(number == 1):
U.ToSeeTheListOfRooms_1(connect)
elif (number == 2):
U.AskForCreateNewRoom_2(connect)
S.SocketCloser()
if __name__ == "__main__":
main()
""" |
from bst import BinarySearchTree
from vpython import *
bst = BinarySearchTree()
a=[1,2,4,5,6,7]
pos=vector(0,0,0)
leftstartteta=pi
rightstartteta=0
bst.createTree(a,pos,1)
bst.insertElement(3)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
def gene_django_frame(head, version, json_inst):
Body = json.dumps(json_inst)
Version = '{:{fill}{width}{base}}'.format(version, fill = '0', width = 2 * D_version_byte, base = 'x')
Length = '{:{fill}{width}{base}}'.format(len(Body), fill = '0', width = 2 * D_lenght_byte, base = 'x')
message = head + a2b_hex(Version) + a2b_hex(Length) + Body
return message |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import pytest
from pants.backend.python.macros.python_artifact import _normalize_entry_points
from pants.testutil.pytest_util import no_exception
@pytest.mark.parametrize(
"entry_points, normalized, expect",
[
(
dict(console_scripts=dict(foo="bar:baz")),
dict(console_scripts=dict(foo="bar:baz")),
no_exception(),
),
(
dict(console_scripts=["foo=bar:baz", "barty=slouch:ing"]),
dict(console_scripts=dict(foo="bar:baz", barty="slouch:ing")),
no_exception(),
),
(
dict(
console_scripts=["foo=bar:baz"],
other_plugins=["plug=this.ok"],
my=dict(already="norm.alize:d"),
),
dict(
console_scripts=dict(foo="bar:baz"),
other_plugins=dict(plug="this.ok"),
my=dict(already="norm.alize:d"),
),
no_exception(),
),
(
["not=ok"],
None,
pytest.raises(
ValueError,
match=re.escape(
r"The `entry_points` in `python_artifact()` must be a dictionary, but was ['not=ok'] with type list."
),
),
),
(
dict(ep=["missing.name:here"]),
None,
pytest.raises(
ValueError,
match=re.escape(
r"Invalid `entry_point`, expected `<name> = <entry point>`, but got 'missing.name:here'."
),
),
),
(
dict(ep="whops = this.is.a:mistake"),
None,
pytest.raises(
ValueError,
match=re.escape(
r"The values of the `entry_points` dictionary in `python_artifact()` must be a list of strings "
r"or a dictionary of string to string, but got 'whops = this.is.a:mistake' of type str."
),
),
),
],
)
def test_normalize_entry_points(entry_points, normalized, expect) -> None:
with expect:
assert _normalize_entry_points(entry_points) == normalized
|
import pandas as pd
import os
import numpy as np
import statsmodels.formula.api as smf
###############################################################################
def get_data():
df = pd.read_stata("data/ReplicationDataset_ThePriceofForcedAttendance.dta")
df["grade"] = df["grade"].astype(float)
# treatment variable
df["treat"] = 0
df.loc[df["firstyeargpa"] < 7, ["treat"]] = 1
# centered running variable
df["firstyeargpa_centered"] = -1*(df["firstyeargpa"] - 7)
# pass course variable
df["passcourse"] = 0
df.loc[df["grade"] >= 5.5, ["passcourse"]] = 1
return df
########################################################################
def get_truncated_data(df,bandwidth,cohort,coursetype):
if cohort==1:
df_temp = df.loc[df["cohort"] < 6]
elif cohort==6:
df_temp = df.loc[df["cohort"] == 6]
elif cohort== "all cohorts":
pass
if bandwidth == "total range":
pass
else:
df_temp = df_temp.loc[df_temp["firstyeargpa"]<=7 + bandwidth]
df_temp = df_temp.loc[df_temp["firstyeargpa"]>=7 - bandwidth]
if coursetype == "all courses":
pass
elif coursetype in ["voluntary","encouraged","forced"]:
df_temp = df_temp.loc[df_temp["coursepolicy"]== coursetype]
df_temp.reset_index(inplace=True)
return df_temp
#################################################################################
def collect_each_student(df):
sing_id = [df["studentid"][0]] # studendid of each student
sing_gpa = [df["firstyeargpa"][0]] # firstyeargpa of each student
for i in range(len(df)-1):
if df["studentid"][i] != df["studentid"][i+1]:
sing_id.append(df["studentid"][i+1])
sing_gpa.append(df["firstyeargpa"][i+1])
df_temp = pd.DataFrame(sing_id, columns=["studentid"])
df_temp["firstyeargpa"] = sing_gpa
return df_temp
##################################################################################
def get_bins_func(df,variable,cohort,coursetype):
mean_loc = np.zeros((20,1))
numobs_loc = np.zeros((20,1))
pos_loc = np.zeros((20,1))
df_temp = get_truncated_data(df,"total range",cohort,coursetype)
#df.temp = df.loc[df["coursepolicy"] == coursetype]
for i, xlow in enumerate(np.arange(6.5,7.5,0.05)):
df_temp1 = df_temp
df_temp1 = df_temp1.loc[df_temp1["firstyeargpa"]>=xlow]
df_temp1 = df_temp1.loc[df_temp1["firstyeargpa"]< xlow+0.05]
#df.temp1 = df.temp1.loc[df.temp1["attendance"]!=0]
mean_loc[i] = df_temp1[variable].mean()
numobs_loc[i] = len(df_temp1)
return(mean_loc, numobs_loc)
####################################################################################
def get_interactionterms(df_input):
df = df_input
### treatment interaction term:
df["pol1"] = df["firstyeargpa"] - 7
df["pol1t"] = df["pol1"]*df["treat"]
### Coursetype indicator:
df["volcourse"] = 0
df.loc[df["coursepolicy"] == "voluntary", ["volcourse"]] = 1
df["forcourse"] = 0
df.loc[df["coursepolicy"] == "forced", ["forcourse"]] = 1
### Interaction terms: treatment x coursetype
df["treatmentvol"] = df["treat"]*df["volcourse"]
df["treatmentfor"] = df["treat"]*df["forcourse"]
df["pol1vol"] = df["pol1"]*df["volcourse"]
df["pol1for"] = df["pol1"]*df["forcourse"]
df["pol1tvol"] = df["pol1t"]*df["volcourse"]
df["pol1tfor"] = df["pol1t"]*df["forcourse"]
return df
########################################################################################
def get_kweights(df_input,bandwidth):
df = df_input
df["kwgt"] = (1-abs((df["firstyeargpa"]-7)/bandwidth))
return df
########################################################################################
def get_fakecutoff(df,coursetype,y_var):
rslt_temp = np.zeros((4,3))
for i,c in enumerate([6,8,8.25,9]):
### data
df_reg = get_truncated_data(df,"total range",1,coursetype)
### create running variable centered at fake cutoff and fake treatment variables
df_reg["X_fake"] = -1*(df_reg["firstyeargpa"] - c)
df_reg["treat_fake"] = 0
df_reg.loc[df_reg["firstyeargpa"] < c, ["treat_fake"]] = 1
df_reg["treat_X_fake"] = df_reg["treat_fake"] * df_reg["X_fake"]
df_reg["kwgt_fake"] = 0
df_reg.loc[abs(df_reg["X_fake"]) <= 0.365, ["kwgt_fake"]] = (1-abs((df["firstyeargpa"]-c)/0.365))
df_reg = df_reg.loc[df_reg["firstyeargpa"]<= c + 0.365]
df_reg = df_reg.loc[df_reg["firstyeargpa"]>= c - 0.365]
### locally linear regression
formula = y_var + " ~ treat_fake + X_fake + treat_X_fake"
rslt = smf.ols(formula=formula, data=df_reg, weights=df_reg["kwgt_fake"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
### save results
rslt_temp[i,0] = c
rslt_temp[i,1] = rslt.params[1]
rslt_temp[i,2] = rslt.pvalues[1]
#rslt_temp = np.round(rslt_temp,3)
return rslt_temp
###########################################################################################################################
def get_results_abolition(df,coursetype):
rslt_temp = np.zeros((4,1))
df_temp = get_truncated_data(df,0.365,6,coursetype)
df_temp1 = get_interactionterms(df_temp)
df_reg = get_kweights(df_temp1,0.365)
rslt = smf.ols(formula="stdgradeabolition ~ treat + firstyeargpa_centered + pol1t", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt_temp[0,0] = rslt.params[1]
rslt_temp[1,0] = rslt.bse[1]
rslt_temp[2,0] = rslt.pvalues[1]
rslt_temp[3,0] = len(df_reg)
rslt_temp = np.round(rslt_temp,3)
return rslt_temp
######################################################################################################################
def get_bandwidth_results(df,coursetype,y_var):
### empty results canvas:
rslt_temp = np.zeros((6,3))
for i,h in enumerate([0.5, 0.4, 0.365, 0.3, 0.2, 0.1]):
### data within bandwidth:
df_temp = get_truncated_data(df,h,1,coursetype)
df_temp1 = get_interactionterms(df_temp)
df_reg = get_kweights(df_temp1, h)
### locally linear regression
formula = y_var + " ~ treat + firstyeargpa_centered + pol1t"
rslt = smf.ols(formula=formula, data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
### save regression results
rslt_temp[i,0] = h
rslt_temp[i,1] = rslt.params[1]
rslt_temp[i,2] = rslt.pvalues[1]
return rslt_temp
###############################################################################################################
def get_bandwidth_results2(df,coursetype,y_var):
### empty results canvas:
rslt_temp = np.zeros((6,3))
for i,h in enumerate([0.5, 0.4, 0.365, 0.3, 0.2, 0.1]):
### data within bandwidth:
df_temp = get_truncated_data(df,h,1,coursetype)
df_temp1 = get_interactionterms(df_temp)
df_reg = get_kweights(df_temp1, h)
df_reg["X2"] = df_reg["firstyeargpa_centered"]**2
df_reg["pol1t2"] = df_reg["X2"]*df_reg["treat"]
### locally quadratic regression
formula = y_var + " ~ treat + firstyeargpa_centered + X2 + pol1t + pol1t2"
#formula = y_var + " ~ treat + firstyeargpa_centered + pol1t"
rslt = smf.ols(formula=formula, data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
### save regression results
rslt_temp[i,0] = h
rslt_temp[i,1] = rslt.params[1]
rslt_temp[i,2] = rslt.pvalues[1]
return rslt_temp
##########################################################################################################
def get_bandwidth_results3(df,coursetype,y_var):
### empty results canvas:
rslt_temp = np.zeros((6,3))
for i,h in enumerate([0.5, 0.4, 0.365, 0.3, 0.2, 0.1]):
### data within bandwidth:
df_temp = get_truncated_data(df,h,1,coursetype)
df_temp1 = get_interactionterms(df_temp)
df_reg = get_kweights(df_temp1, h)
df_reg["X2"] = df_reg["firstyeargpa_centered"]**2
df_reg["X3"] = df_reg["firstyeargpa_centered"]**3
df_reg["pol1t2"] = df_reg["X2"]*df_reg["treat"]
df_reg["pol1t3"] = df_reg["X3"]*df_reg["treat"]
### locally cubic regression
formula= y_var + " ~ treat + firstyeargpa_centered + X2 + X3 + pol1t + pol1t2 + pol1t3"
#formula = y_var + " ~ treat + firstyeargpa_centered + pol1t"
rslt = smf.ols(formula=formula, data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
### save regression results
rslt_temp[i,0] = h
rslt_temp[i,1] = rslt.params[1]
rslt_temp[i,2] = rslt.pvalues[1]
return rslt_temp
###################################################################################################################### |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the head node in the linked list
def reorderList(self, A):
if not A or not A.next or not A.next.next:
return A
"""
My simple three-pointers solution was correct, but got failed for not being
time-efficient enough :-( Let's try this one now ...
"""
nodes_list = []
current = A
while current:
nodes_list.append(current)
current = current.next
for i in range((len(nodes_list) - 1) / 2):
print i, nodes_list[i].val
temp = nodes_list[i].next
nodes_list[i].next = nodes_list[-(i + 1)]
nodes_list[-(i + 1)].next = temp
nodes_list[-(i + 2)].next = None
return A
|
#!/usr/bin/env python
"""
v0.1 Given a source-id & XML fpath (to be created),
query RDBs and form, write XML.
NOTE: To be called by PHP script.
TODO: test this code (form VOSource.xml) for pairitel, tcptutor, sdss
"""
import sys, os
"""
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract'))
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract/Code'))
# These are only needed in: retrieve_tcptutor_vosource_and_add_features()
from Code import generators_importers
try:
from Code import *
except:
pass # lyra fails somewhere in import, probably due to Python2.4 use.
import db_importer
"""
import random
#sys.path.append("/scisoft/Library/Frameworks/Python.framework/Versions/2.4/lib/python2.4/site-packages")
#os.environ["TCP_DIR"] = "/Network/Servers/boom.cluster.private/Home/pteluser/src/TCP/"
import MySQLdb
import ingest_tools # This seems overkill, but module contains all RDB params.
import feature_extraction_interface
import db_importer
class Rdb_Form_VOsource:
def __init__(self, pars, rdbt, srcdbt, feat_db, dbi_src):
self.pars = pars
self.rdbt = rdbt
self.srcdbt = srcdbt
self.feat_db = feat_db
self.rdb_gen_vosource_urlroot = pars['rdb_gen_vosource_urlroot']
self.rdb_gen_vosource_dirpath = pars['rdb_gen_vosource_dirpath']
self.dbi_src = dbi_src
def add_object_table_data_to_sdict(self, src_id, survey_name, sdict):
""" Query RDB and add data to source's sdict{}.
"""
# SDSS:
#SELECT t, jsb_mag, jsb_mag_err FROM sdss_events_a JOIN obj_srcid_lookup USING (obj_id) WHERE obj_srcid_lookup.src_id = 106597;
# PAIRITEL:
#SELECT t, jsb_mag, jsb_mag_err,filt FROM pairitel_events_a JOIN obj_srcid_lookup USING (obj_id) WHERE obj_srcid_lookup.src_id = 18804;
# TCPTUTOR:
#SELECT obs_data.obsdata_time, obs_data.obsdata_val, obs_data.obsdata_err from obs_data JOIN observations ON observations.observation_id = obs_data.observation_id WHERE observations.source_id = 9183;
### Object table query:
if survey_name == 'tcptutor':
tcptutor_src_id = src_id - 100000000
self.db = MySQLdb.connect(host=self.pars['tcptutor_hostname'], user=self.pars['tcptutor_username'], db=self.pars['tcptutor_database'], passwd=self.pars['tcptutor_password'])
self.cursor = self.db.cursor()
select_str = "SELECT obs_data.obsdata_time, obs_data.obsdata_val, obs_data.obsdata_err, filters.filter_name from obs_data JOIN observations ON observations.observation_id = obs_data.observation_id JOIN filters ON filters.filter_id = observations.filter_id WHERE observations.source_id = %s" % (tcptutor_src_id)
self.cursor.execute(select_str)
results = self.cursor.fetchall()
elif survey_name == 'pairitel':
select_str = "SELECT t, jsb_mag, jsb_mag_err,filt FROM %s JOIN %s USING (obj_id) WHERE %s.src_id=%d" % \
(self.pars['rdb_table_names']['pairitel'], \
self.pars['obj_srcid_lookup_tablename'], \
self.pars['obj_srcid_lookup_tablename'], src_id)
self.rdbt.cursor.execute(select_str)
results = self.rdbt.cursor.fetchall()
elif survey_name == 'sdss':
select_str = "SELECT t, jsb_mag, jsb_mag_err,filt FROM %s JOIN %s USING (obj_id) WHERE %s.src_id=%d" % \
(self.pars['rdb_table_names']['sdsss'], \
self.pars['obj_srcid_lookup_tablename'], \
self.pars['obj_srcid_lookup_tablename'], src_id)
self.rdbt.cursor.execute(select_str)
results = self.rdbt.cursor.fetchall()
try:
for result in results:
if type(result[3]) == type(2):
# sdss & pairitel case
filt_name = self.feat_db.final_features.filter_list[result[3]]
else:
# (tcptutor) Explicit filtername given
filt_name = result[3]
if not sdict['ts'].has_key(filt_name):
sdict['ts'][filt_name] = {}
sdict['ts'][filt_name]['t'] = []
sdict['ts'][filt_name]['m'] = []
sdict['ts'][filt_name]['m_err'] = []
sdict['ts'][filt_name]['t'].append(result[0])
sdict['ts'][filt_name]['m'].append(result[1])
sdict['ts'][filt_name]['m_err'].append(result[2])
except:
print "Failed query:", select_str
raise
def add_source_table_data_to_sdict(self, src_id, survey_name, sdict):
""" Query RDB and add data to source's sdict{}.
"""
### Source table query:
select_str = "SELECT ra, decl, ra_rms, dec_rms, feat_gen_date FROM %s WHERE src_id=%d" % (self.pars['srcid_table_name'], src_id)
self.srcdbt.cursor.execute(select_str)
results = self.srcdbt.cursor.fetchall()
try:
sdict['ra'] = str(results[0][0])
sdict['dec'] = str(results[0][1])
sdict['ra_rms'] = str(results[0][2])
sdict['dec_rms'] = str(results[0][3])
sdict['feat_gen_date'] = str(results[0][4])
except:
print "Failed query:", select_str
raise
def add_feature_table_data_to_sdict(self, src_id, survey_name, sdict):
""" Query RDB and add data to source's sdict{}.
"""
### Features table query:
select_str = "SELECT %s.feat_val, %s.feat_name, %s.filter_id, %s.doc_str FROM %s JOIN %s USING (feat_id) WHERE %s.src_id = %d" % (self.pars['feat_values_tablename'], self.pars['feat_lookup_tablename'], self.pars['feat_lookup_tablename'], self.pars['feat_lookup_tablename'], self.pars['feat_values_tablename'], self.pars['feat_lookup_tablename'], self.pars['feat_values_tablename'], src_id)
self.feat_db.cursor.execute(select_str)
results = self.feat_db.cursor.fetchall()
sdict['feature_docs'] = {}
try:
for result in results:
filt_name = self.feat_db.final_features.filter_list[result[2]]
sdict['feature_docs'][result[1]] = result[3] # __doc__ string in TABLE
if not sdict['features'].has_key(filt_name):
sdict['features'][filt_name] = {}
sdict['features'][filt_name][result[1]] = str(result[0])
except:
print "Failed query:", select_str
raise
def determine_survey_name(self, src_id):
""" Determine which survey a source-id came from.
"""
#NOTE: survey_name : 'pairitel', 'sdss', OR 'tcptutor'
if src_id > 100000000:
return 'tcptutor'
else:
select_str = "SELECT survey_id from %s WHERE src_id=%d LIMIT 1" % \
(self.pars['obj_srcid_lookup_tablename'], src_id)
self.rdbt.cursor.execute(select_str)
results = self.rdbt.cursor.fetchall()
if len(results) != 1:
print "src_id unknown"
raise
else:
for survey_name,survey_id in self.pars['survey_id_dict'].\
iteritems():
if results[0][0] == survey_id:
return survey_name
print "src_id unknown:", type(results[0][0])
raise
print "src_id unknown" # probably dont get here
raise
def form_sdict_via_rdb_selects(self, src_id, survey_name):
""" Form db_importer.Source style sdict{} using src_id
by quering RDB tables.
"""
sdict = {}
sdict['src_id'] = src_id
sdict['features'] = {}
sdict['ts'] = {}
self.add_object_table_data_to_sdict(src_id, survey_name, sdict)
self.add_source_table_data_to_sdict(src_id, survey_name, sdict)
self.add_feature_table_data_to_sdict(src_id, survey_name, sdict)
return sdict
def retrieve_tcptutor_xml_and_merge_with_xml(self,src_id, rdb_gen_xml_str):
""" Retrieve TCPTUTOR VOSource from TCPTUTOR server and merge with
given XML, which has feature info.
"""
tcptutor_src_id = src_id - 100000000
source_url = "http://lyra.berkeley.edu/tutor/pub/vosource.php?Source_ID=%d" % (tcptutor_src_id)
wget_fpath = "/tmp/%d.wget" % (random.randint(0,100000000))
wget_str = "wget -t 1 -T 5 -O %s %s" % (wget_fpath, source_url)
os.system(wget_str)
if not os.path.exists(wget_fpath):
raise
fp = open(wget_fpath)
mondo_str = fp.read()
fp.close()
lines = mondo_str.split('\n')
return_xml_str = ""
i_tcptut_votimeseries_end = 0
for line in lines:
return_xml_str += line + '\n'
if "</VOTIMESERIES>" in line:
#i_line_votimeseries_end = i
break
i_tcptut_votimeseries_end += 1
#TODO: append write_xml... to line
gen_lines = rdb_gen_xml_str.split('\n')
i = 0
for line in gen_lines:
if "<Features>" in line:
i_gen_features_begin = i
elif "</Features>" in line:
i_gen_features_end = i
i += 1
for line in gen_lines[i_gen_features_begin:i_gen_features_end+1]:
return_xml_str += line + '\n'
for line in lines[i_tcptut_votimeseries_end+1:]:
return_xml_str += line + '\n'
os.system("rm " + wget_fpath)
return return_xml_str
# TODO: read XML from file
# find </VOTIMESERIES>
# insert <Features>
# </Features>
# return resulting XML
def generate_vosource_file(self, src_id, vosource_fpath):
"""Given a srcid, retrieve from RDB and form VOSource XML.
Write XML to web local path.
"""
survey_name = self.determine_survey_name(src_id)
sdict = self.form_sdict_via_rdb_selects(src_id, survey_name)
self.dbi_src.source_dict_to_xml(sdict)
write_xml_str = self.dbi_src.xml_string
if survey_name == 'tcptutor':
merged_xml_str = self.retrieve_tcptutor_xml_and_merge_with_xml(\
src_id, write_xml_str)
write_xml_str = merged_xml_str
# KLUDGE: write xml locally, then scp to server/web host (lyra):
fpath = '/tmp/' + vosource_fpath[vosource_fpath.rfind('/')+1:]
fp = open(fpath, 'w')
fp.write(write_xml_str)
fp.close()
scp_command = "scp -q %s %s:%s" % (fpath, self.pars['rdb_gen_vosource_hostname'], vosource_fpath)
os.system(scp_command)
os.system("rm " + fpath)
def get_vosource_url_for_srcid(self, src_id):
""" Given a srcid, retrieve from RDB and form VOSource XML.
Write XML to web local path and return URL to VOSource XML.
"""
vosource_url = "%s/%d.xml" % (self.rdb_gen_vosource_urlroot, src_id)
vosource_fpath = "%s/%d.xml" % (self.rdb_gen_vosource_dirpath, src_id)
# # # # # # # # # #
# # # # I comment this out for TESTING only.
#if os.path.exists(vosource_fpath):
# return vosource_url # VOSource...xml already exists
#try:
if 1:
self.generate_vosource_file(src_id, vosource_fpath)
#except:
# return "database_query_error"
print '<A href="%s">Source ID=%d VOSource.xml</A>' % (vosource_url, \
src_id)
return '<A href="%s">Source ID=%d VOSource.xml</A>' % (vosource_url, \
src_id)
if __name__ == '__main__':
#src_id = 100013522 # 8
server_ip = "192.168.1.65"
server_user = "pteluser"
ingest_tools.pars['rdb_host_ip_2'] = server_ip
ingest_tools.pars['rdb_user'] = server_user
ingest_tools.pars['rdb_name_2'] = 'object_test_db'
ingest_tools.pars['rdb_host_ip_4'] = server_ip
ingest_tools.pars['rdb_user_4'] = server_user
ingest_tools.pars['rdb_name_4'] = 'source_test_db'
ingest_tools.pars['rdb_features_host_ip'] = server_ip
ingest_tools.pars['rdb_features_user'] = server_user
ingest_tools.pars['rdb_features_db_name'] = 'source_test_db'
ingest_tools.pars['tcptutor_hostname'] = 'lyra.berkeley.edu'
ingest_tools.pars['tcptutor_username'] = 'pteluser'
ingest_tools.pars['tcptutor_password'] = 'Edwin_Hubble71'
ingest_tools.pars['source_region_lock_host_ip'] = server_ip
ingest_tools.pars['source_region_lock_user'] = server_user
ingest_tools.pars['source_region_lock_dbname'] = 'source_test_db'
ingest_tools.pars['footprint_host_ip'] = server_ip
ingest_tools.pars['footprint_user'] = server_user
ingest_tools.pars['footprint_dbname'] = "object_test_db"
#if (len(sys.argv) != 2):
# print "invalid input"
# sys.exit()
#try:
# src_id = int(sys.argv[1])
#except:
# print "invalid src_id"
# sys.exit()
rdbt = ingest_tools.Rdb_Tools(ingest_tools.pars, None, None, \
rdb_host_ip=ingest_tools.pars['rdb_host_ip_2'], \
rdb_user=ingest_tools.pars['rdb_user'], \
rdb_name=ingest_tools.pars['rdb_name_2'])
srcdbt = ingest_tools.Source_Database_Tools(\
ingest_tools.pars, None, None, \
rdb_host_ip=ingest_tools.pars['rdb_host_ip_4'], \
rdb_user=ingest_tools.pars['rdb_user_4'],\
rdb_name=ingest_tools.pars['rdb_name_4'])
feat_db = feature_extraction_interface.Feature_database()
feat_db.initialize_mysql_connection(\
rdb_host_ip=ingest_tools.pars['rdb_features_host_ip'],\
rdb_user=ingest_tools.pars['rdb_features_user'], \
rdb_name=ingest_tools.pars['rdb_features_db_name'], \
feat_lookup_tablename=ingest_tools.pars['feat_lookup_tablename'], \
feat_values_tablename=ingest_tools.pars['feat_values_tablename'])
dbi_src = db_importer.Source(make_dict_if_given_xml=False)
#rfv = Rdb_Form_VOsource(ingest_tools.pars, rdbt, srcdbt, feat_db, dbi_src)
#rfv.get_vosource_url_for_srcid(src_id)
#sys.exit()
import SimpleXMLRPCServer
server = SimpleXMLRPCServer.SimpleXMLRPCServer(\
("lyra.berkeley.edu", \
34583))
#server = SimpleXMLRPCServer.SimpleXMLRPCServer(\
# ("192.168.1.65", \
# 34583))
server.register_instance(Rdb_Form_VOsource(ingest_tools.pars, rdbt, srcdbt, feat_db, dbi_src))
server.register_multicall_functions()
server.register_introspection_functions()
server.serve_forever()
|
import os
from datetime import datetime
import json
import webapp2
import jinja2
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import mail
class NewHandler(webapp2.RequestHandler):
def post(self):
title = self.request.get('title')
tag = self.request.get('tag')
desc = self.request.get('desc')
user = users.get_current_user()
if not title or not tag or not desc or not user:
self.error(400)
story = Story(author=user.email(), title=title, tag=tag, desc=desc, audience=[user.email()])
key = story.put()
self.response.headers['Content-Type'] = 'application/json'
if key:
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
template = jinja_environment.get_template('mail_template.html')
email_body = template.render({'id': key.id(), 'title': title, 'desc': desc})
message = mail.EmailMessage(
sender = user.email(),
to = user.email(),
subject = 'Por favor califica esto',
html = email_body)
try:
message.send()
self.response.out.write(json.dumps({'error': False}))
except:
self.error(500)
else:
self.response.out.write(json.dumps({'error': True}))
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
template = jinja_environment.get_template('nuevo.html')
now = datetime.now().date()
self.response.write(template.render({'email': user.email(), 'date': now}))
class Story(db.Model):
author = db.StringProperty(required=True)
title = db.TextProperty(required=True)
desc = db.TextProperty(required=True)
tag = db.StringProperty(required=True)
audience = db.StringListProperty(required=True)
date = db.DateProperty(auto_now_add=True) |
# app/models.py
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app
from flask_login import UserMixin
from datetime import datetime
from app import db, login
import rq
import sys
"""
This module shall contain the tables of the database
"""
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.Text, nullable=False)
password_hash = db.Column(db.Text, nullable=False)
telephone = db.Column(db.Integer, unique=True, nullable=False)
paid = db.Column(db.Boolean, nullable=False, default=False)
date_paid = db.Column(db.DateTime, nullable=True)
creation_date = db.Column(db.DateTime, default=datetime.now())
# Relation between users and payments
payer = db.relationship(
'Payment',
primaryjoin='Payment.source == User.telephone',
backref='payer', lazy='dynamic',
)
@property
def password(self):
"""
Prevent password from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
def save(self):
try:
db.session.add(self)
db.session.commit()
return 0
except Exception as err:
print(err)
db.session.rollback()
return 1
def __repr__(self):
return f"User('{self.username}', '{self.telephone}')"
@login.user_loader
def load_user(user_id):
"""
Flask-Login knows nothing about databases, it needs the application's help in loading a user.
For that reason, the extension expects that the application will configure a user loader function,
that can be called to load a user given the ID
"""
return User.query.get(int(user_id))
class Payment(db.Model):
__tablename__ = 'payments'
code = db.Column(db.Text, primary_key=True)
sender = db.Column(db.Text, default='')
creation_date = db.Column(db.DateTime, default=datetime.now())
amount = db.Column(db.Text, default='Ksh0.00')
source = db.Column('User', db.String(60),
db.ForeignKey('users.telephone', ondelete='CASCADE', onupdate='CASCADE'), )
scheduled_task_id = db.Column(db.String(36), default='')
def __repr__(self):
return f"Payment('{self.code}', 'Sender: {self.sender}', 'Phone: {self.source}')"
class ScheduledTask(db.Model):
"""
Create a Scheduled Task table
All processes that are planned to be executed at a specified or periodically shall be stored here
Interval shall be saved in seconds
"""
__tablename__ = 'scheduled_tasks'
id = db.Column(db.String(36), primary_key=True, nullable=False)
name = db.Column(db.String(128), index=True)
start = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
interval = db.Column(db.Integer, default=0)
description = db.Column(db.Text)
cancelled = db.Column(db.Boolean, default=False)
@staticmethod
def get_scheduled_task(task_id):
try:
# Loads the Job instance from the data that exists in Redis about it
rq_job = rq.job.Job.fetch(task_id, connection=current_app.redis)
except BaseException as err:
print(err)
current_app.logger.exception(err, exc_info=sys.exc_info())
return None
return rq_job
def cancel_scheduled_task(self, job):
"""
Given a job, check if it is in scheduler and cancel it if true
:param job: RQ Job or Job ID
:return: None
"""
try:
status = 0
scheduler = current_app.scheduler
if job and (type(job) == rq.job or type(job) == str) and job in scheduler:
scheduler.cancel(job)
self.cancelled = True
status = self.save()
return status
except BaseException as err:
print(err)
current_app.logger.exception(err, exc_info=sys.exc_info())
return 'Unable to cancel scheduled task'
@staticmethod
def retrieve_scheduled_tasks(tasks: list):
_tasks = []
for task in tasks:
_tasks.append(
{
'id': task.id,
'name': task.name,
'description': task.description,
'cancelled': task.cancelled,
'beginning': task.start.strftime("%A %b %d, %Y %I:%M %p"),
'interval': task.interval,
}
)
return _tasks
def save(self):
try:
db.session.add(self)
db.session.commit()
return 0
except Exception as err:
print(err)
db.session.rollback()
return 1
|
import datetime
import json
import websockets
from typing import Dict, Callable, List
import logging
from websockets import WebSocketClientProtocol
from bitmex_futures_arbitrage.is_running import is_running
from bitmex_futures_arbitrage.models import Quote
logger = logging.getLogger()
class BitmexQuotesTracker:
"""
Track Bitmex quotes and call `on_quotes` handler on any change
"""
URL = 'wss://www.bitmex.com/realtime'
def __init__(self, symbols: List[str]):
self._symbols = symbols
self._topics = [f'quote:{symbol}' for symbol in self._symbols]
self.symbol2quote: Dict[str, Quote] = {}
def _url(self):
topics = ",".join(self._topics)
return f'{self.URL}?subscribe={topics}'
@staticmethod
async def _ensure_first_message(ws: WebSocketClientProtocol):
""" ensure receiving correct first message """
# e.g.
# {'docs': 'https://www.bitmex.com/app/wsAPI',
# 'info': 'Welcome to the BitMEX Realtime API.',
# 'limit': {'remaining': 36},
# 'timestamp': '2020-05-04T15:37:55.326Z',
# 'version': '2020-04-30T00:58:37.000Z'}
msg_raw = await ws.recv()
first_msg = json.loads(msg_raw)
assert first_msg['info'] == "Welcome to the BitMEX Realtime API.", 'unexpected first message'
async def _ensure_subscribed_to_topics(self, ws: WebSocketClientProtocol):
subscribed_topics = []
for _ in range(len(self._topics)):
msg_raw = await ws.recv()
subscribed_topics.append(json.loads(msg_raw)['subscribe'])
assert set(subscribed_topics) == set(self._topics), 'something wrong with subscriptions'
@staticmethod
def _record2quote(record: Dict) -> Quote:
return Quote(
symbol=record['symbol'],
timestamp=datetime.datetime.strptime(record['timestamp'], '%Y-%m-%dT%H:%M:%S.%f%z').timestamp(),
bid_price=record['bidPrice'],
ask_price=record['askPrice'],
bid_size=record['bidSize'],
ask_size=record['askSize'],
)
def _on_quote_msg(self, msg):
assert msg['action'] in ['partial', 'insert'], 'incorrect quote message'
for record in msg['data']:
quote = self._record2quote(record)
# it happens that record timestamp == previous quote timestamp (because of millisecond rounding)
assert quote.symbol not in self.symbol2quote or quote.timestamp >= self.symbol2quote[quote.symbol].timestamp
self.symbol2quote[quote.symbol] = quote
async def handle_events_forever(self, on_quotes_callback: Callable[[Dict[str, Quote]], None]):
url = self._url()
logger.info(f'connect to WS {url}')
async with websockets.connect(url) as ws:
await self._ensure_first_message(ws)
await self._ensure_subscribed_to_topics(ws)
while is_running:
msg_raw = await ws.recv()
msg = json.loads(msg_raw)
if msg['table'] == 'quote':
self._on_quote_msg(msg)
on_quotes_callback(self.symbol2quote)
else:
raise ValueError(f'unknown table {msg["table"]}')
|
import os
import glob
from flask import request, Blueprint, current_app
from flask.json import jsonify
from ckanpackager import logic
actions = Blueprint('actions', __name__)
@actions.route('/clear_caches', methods=['POST'])
def clear_caches():
logic.authorize_request(request.form)
matching_files = os.path.join(
current_app.config['STORE_DIRECTORY'],
'*.zip'
)
for file_name in glob.glob(matching_files):
os.remove(file_name)
return jsonify(
status='success',
message='Done.'
) |
"""
Defines the preset values in the mimic api.
"""
from __future__ import absolute_import, division, unicode_literals
get_presets = {"loadbalancers": {"lb_building": "On create load balancer, keeps the load balancer in "
"building state for given seconds",
"lb_error_state": "Puts the LB in error state, and such an LB can only"
"be deleted",
"lb_pending_update": "Changes the load balancer to PENDING-UPDATE"
"state for the given number of seconds, any action"
"other than delete is performed on the server",
"lb_pending_delete": "Changes the load balancer to PENDING-DELETE"
"state for the given seconds, when deleted"},
"servers": {"create_server_failure": "{\"message\": \"given message\","
"\"code\": given code}",
"delete_server_failure": "{\"code\": given code,"
"\"times\": returns given code that many times}",
"invalid_image_ref": ["INVALID-IMAGE-ID", "1111", "image_ends_with_Z"],
"invalid_flavor_ref": ["INVALID-FLAVOR-ID", "8888", "-4", "1"],
"server_error": "sets server state to error on create",
"server_building": "sets the server to be in building state for given time"
" in seconds"},
"identity": {
# On ``validate_token`` the tokens listed below
# result in 'monitoring-service-admin' impersonator role.
"maas_admin_roles": [
"this_is_an_impersonator_token",
"this_is_an_impersonator_token_also",
"impersonate_watson",
"impersonate_creator",
"this_is_an_impersonator_token_also_2",
"impersonate_foo_token"],
# On ``validate_token`` the tokens listed below
# result in 'racker' impersonator role.
"racker_token": ["this_is_a_racker_token"],
# Tenants with user observer role
"observer_role": ["09876"],
# Tenants with user creator role
"creator_role": ["09090"],
# Tenants with user admin role
"admin_role": ["9999"],
# Tenants with this token result in a 401 when validating the token
"token_fail_to_auth": ["never-cache-this-and-fail-to-auth"],
# Users presenting these tokens have contact IDs that correspond
# to presets in the Valkyrie plugin...
"non_dedicated_observer": ["OneTwo"],
"non_dedicated_admin": ["ThreeFour"],
"non_dedicated_impersonator": ["ThreeFourImpersonator"],
"non_dedicated_racker": ["ThreeFourRacker"],
"dedicated_full_device_permission_holder": ["HybridOneTwo"],
"dedicated_account_permission_holder": ["HybridThreeFour"],
"dedicated_impersonator": ["HybridThreeFourImpersonator"],
"dedicated_racker": ["HybridOneTwoRacker"],
"dedicated_limited_device_permission_holder": ["HybridFiveSix"],
"dedicated_non_permission_holder": ["HybridSevenEight"],
"dedicated_quasi_user_impersonator": ["HybridNineZero"]}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.