text
stringlengths 8
6.05M
|
|---|
VOWELS = frozenset('aeiouAEIOU')
def count_vowels(s=''):
return sum(a in VOWELS for a in s) if isinstance(s, str) else None
|
import sys
import os
import curses
from dataclasses import dataclass
from typing import Optional
@dataclass
class WindowContent:
title: str
subtitle: str
content: str
def main_menu():
return WindowContent("What's new in Python 3.8",
f"Alexander Hagerman DerbyPy November 2019",
"""
[1] walrus := assignment_expressions
[2] from sys import audit
[3] typing protocols
[4] vectorcall
[5] release notes
[6] release schedule
""",
)
def window_one():
return WindowContent("Assignment Expressions",
"Naming the result of expressions",
"""
# Handle a matched regex
if (match := pattern.search(data)) is not None:
# Do something with match
# A loop that can't be trivially rewritten using 2-arg iter()
while chunk := file.read(8192):
process(chunk)
# Reuse a value that's expensive to compute
[y := f(x), y**2, y**3]
# Share a subexpression between a comprehension filter
# clause and its output
filtered_data = [y for x in data if (y := f(x)) is not None]
""")
def window_two():
return WindowContent("Audithooks and metadata",
"No more secrets",
"""
import sys
sys.audit(django.execute_sql, sql, params)
$ manage.py migrate
$ Operations to perform: Apply all migrations:
$ Running migrations:
$ django.execute_sql(SELECT "django_migrations"."app"m "dj..., ())
$ django.execute_sql(.......)
https://mastodon.social/@freakboy3742/103019925896462510
""",
)
def window_three():
return WindowContent("Typehint Updates",
"Protocols, TypedDict and more",
"""
from typing import Iterable
from typing_extensions import Protocol
class SupportsClose(Protocol):
def close(self) -> None:
... # Empty method body (explicit '...')
class Resource: # No SupportsClose base class!
# ... some methods ...
def close(self) -> None:
self.resource.release()
def close_all(items: Iterable[SupportsClose]) -> None:
for item in items:
item.close()
close_all([Resource(), open('some/file')])
Also included in 3.8: Literal, Final and TypedDict
""",
)
def window_four():
return WindowContent("C Updates and Python Optimizations",
"Gotta go fast!",
"""
C API for Python Initialization Configuration.
C API for CPython, the “vectorcall” calling protocol allowing
faster calls to internal Python methods without temp objects.
Many shutil functions now use platform specific "fast-copy" syscalls.
Sped-up field lookups in collections.namedtuple(). They are now
the fastest form of instance variable lookup in Python.
Doubled the speed of class variable writes.
Reduced overhead of converting arguments passed to many builtin
functions. This sped up calling some simple builtins 20–50%.
LOAD_GLOBAL instruction now uses new “per opcode cache” mechanism.
It is about 40% faster now.
""",
)
def window_five():
return WindowContent("Release Notes",
"",
"""
https://docs.python.org/3.9/whatsnew/3.8.html
https://docs.python.org/3.9/whatsnew/changelog.html#changelog
"""
)
def window_six():
return WindowContent("Release Schedule",
"",
"""
Releases
--------
3.8.0 release: Monday, 2019-10-14
Subsequent bugfix releases at a bi-monthly cadence.
Expected: -
3.8.1 candidate 1: Monday, 2019-12-09
3.8.1 final: Monday, 2019-12-16
3.8 Lifespan
------------
3.8 will receive bugfix updates approximately every 1-3 months for approximately 18 months.
After the release of 3.9.0 final, a final 3.8 bugfix update will be released. After that,
it is expected that security updates (source only) will be released until 5 years after the
release of 3.8 final, so until approximately October 2024.
https://www.python.org/dev/peps/pep-0569/
"""
)
def render(stdscr):
cursor_x = 0
cursor_y = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_CYAN, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLACK)
#Don't render cursor
curses.curs_set(0)
# Default window content
content = main_menu()
status = "Press 'q' to exit | PRESENTING | Pos: {}, {} | Last key pressed: {} | Python {}"
k = ord("-")
_py = f"{'-' * 4}🐍{'-' * 4}"
# Loop where k is the last character pressed
while (k != ord('q')):
# Initialization
stdscr.clear()
height, width = stdscr.getmaxyx()
if k == curses.KEY_DOWN:
cursor_y = cursor_y + 1
elif k == curses.KEY_UP:
cursor_y = cursor_y - 1
elif k == curses.KEY_RIGHT:
cursor_x = cursor_x + 1
elif k == curses.KEY_LEFT:
cursor_x = cursor_x - 1
elif k == 49:
content = window_one()
elif k == 50:
content = window_two()
elif k == 51:
content = window_three()
elif k == 52:
content = window_four()
elif k == 53:
content = window_five()
elif k == 54:
content = window_six()
elif k == 77 or k == 109:
content = main_menu()
cursor_x = max(0, cursor_x)
cursor_x = min(width-1, cursor_x)
cursor_y = max(0, cursor_y)
cursor_y = min(height-1, cursor_y)
# Centering calculations
start_x_title = int((width // 2) - (len(content.title) // 2) - len(content.title) % 2)
start_x_py = int((width // 2) - (len(_py) // 2) - len(_py) % 2)
start_x_subtitle = int((width // 2) - (len(content.subtitle) // 2) - len(content.subtitle) % 2)
start_y = int((height // 8) - 2)
# Render status bar
stdscr.attron(curses.color_pair(3))
stdscr.addstr(height-1, 0, status.format(cursor_x, cursor_y, chr(k), f"{sys.version_info.major}.{sys.version_info.minor}"))
stdscr.addstr(height-1, len(status), " " * (width - len(status) - 1))
stdscr.attroff(curses.color_pair(3))
# Turning on attributes for title
stdscr.attron(curses.color_pair(2))
stdscr.attron(curses.A_BOLD)
# Rendering title
stdscr.addstr(start_y, start_x_title, content.title)
stdscr.addstr(start_y + 3, start_x_py, _py)
# Turning off attributes for title
stdscr.attroff(curses.color_pair(2))
stdscr.attroff(curses.A_BOLD)
# Print rest of text
stdscr.addstr(start_y + 1, start_x_subtitle, content.subtitle)
stdscr.addstr(start_y + 5, 0, content.content)
stdscr.move(cursor_y, cursor_x)
# Refresh the screen
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
def main():
curses.wrapper(render)
if __name__ == "__main__":
main()
|
import torch.nn as nn
import torch
N, D_in, H, D_out = 64, 1000, 100, 10
# 随机创建一些训练数据
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H, bias=False), # w_1 * x + b_1
torch.nn.ReLU(),
torch.nn.Linear(H, D_out, bias=False),
)
torch.nn.init.normal_(model[0].weight)
torch.nn.init.normal_(model[2].weight)
# model = model.cuda()
loss_fn = nn.MSELoss(reduction='sum')
learning_rate = 1e-6
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for it in range(500):
# Forward pass
y_pred = model(x) # model.forward()
# compute loss
loss = loss_fn(y_pred, y) # computation graph
print(it, loss.item())
optimizer.zero_grad()
# Backward pass
loss.backward()
# update model parameters
optimizer.step()
import scipy
|
#!/usr/bin/env python
# -*- coding::utf-8 -*-
# Author :GG
# 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
#
# 示例:
#
# 输入: [-2,1,-3,4,-1,2,1,-5,4],
# 输出: 6
# 解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。
#
#
# 进阶:
#
# 如果你已经实现复杂度为 O(n) 的解法,尝试使用更为精妙的分治法求解。
# Related Topics 数组 分治算法 动态规划
# 👍 2176 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
sum = 0
ans = nums[0]
for num in nums:
if sum + num > num:
sum += num
else:
sum = num
ans = max(sum, ans)
return ans
# leetcode submit region end(Prohibit modification and deletion)
|
from flask import Flask, request
from flask_cors import CORS
import jsonpickle
from storage import *
app = Flask(__name__)
CORS(app)
#player endpoints
#TODO: Bug fix - 500 error on baseball and softball
@app.route("/player", methods=["GET"])
def Player():
sport = request.args.get("sport")
id = request.args.get("id")
data = GetPlayerData(id, sport)
return data
@app.route("/player/stats", methods=["GET"])
def PlayerStats():
id = request.args.get("id")
sport = request.args.get("sport")
data = GetPlayerStats(id, sport)
return data
#team endpoints
@app.route("/team/roster", methods=["GET"])
def TeamRoster():
sport = request.args.get('sport')
data = GetTeamRoster(sport)
return data
@app.route("/team/stats", methods=["GET"])
def TeamStats():
sport = request.args.get('sport')
data = GetTeamStats(sport)
return data
@app.route("/team/stats/progress", methods=["GET"])
def TeamStatsProgress():
sport = request.args.get('sport')
stat = request.args.get('stat')
data = GetTeamStatsPerformance(sport, stat)
return data
@app.route("/team/stats/statlist", methods=["GET"])
def Stats():
sport = request.args.get('sport')
data = GetStats(sport)
return data
if __name__ == "__main__":
app.run()
|
from math import pi
def circleArea(r):
return round(pi * r**2, 2) if isinstance(r, (int, float)) and r > 0 else False
'''
Complete the function circleArea so that it will return the area of a circle with
the given radius. Round the returned number to two decimal places (except for Haskell).
If the radius is not positive or not a number, return false.
Example:
circleArea(-1485.86) #returns false
circleArea(0) #returns false
circleArea(43.2673) #returns 5881.25
circleArea(68) #returns 14526.72
circleArea("number") #returns false
'''
|
from django.conf.urls import url
from . import views
from django.urls import path
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
sitemaps = {
'posts' : PostSitemap
}
urlpatterns =[
path('', views.post_list_view, name='post_list_view'),
path('<int:year>)/<int:month>/<int:day>/<slug:post>/', views.post_detail_view, name='post_detail_view'),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap')
]
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly
import pandas as pd
from plotly.graph_objs import *
from dash.dependencies import Input, Output
app = dash.Dash()
data_df = pd.read_csv('https://data.austintexas.gov/api/views/ecmv-9xxi/rows.csv?accessType=DOWNLOAD')
data_df['Coordinates']=data_df['Address'].apply(lambda s: s[s.find("(")+1:s.find(")")])
new_df = data_df['Coordinates'].apply(lambda x: pd.Series([i for i in reversed(x.split(','))]))
data_df['Longitude']=new_df[0]
data_df['Latitude']=new_df[1]
data_df['Date'] = pd.to_datetime(data_df['Inspection Date'])
data_df['Year']=data_df['Date'].dt.year
data_df= data_df[data_df['Year']==2017]
data_df = data_df[['Restaurant Name','Zip Code','Score','Latitude','Longitude','Year']]
data_df['Zip Code'] = data_df['Zip Code'].str[-5:]
data_df['Zip Code'] = data_df['Zip Code'].astype(str).astype(int)
data_df['Latitude'] = pd.to_numeric(data_df['Latitude'], errors='coerce').fillna(0)
data_df['Longitude'] = pd.to_numeric(data_df['Longitude'], errors='coerce').fillna(0)
final_df = data_df.groupby(['Restaurant Name','Year','Zip Code'],as_index = False).mean()
final_df['Restaurant Name'] = 'Restaurant Name:' + final_df['Restaurant Name'].astype(str) + ', Inspection Score:'+ final_df['Score'].astype(str)
final_df2 = final_df[['Restaurant Name','Zip Code','Latitude','Longitude']]
final_df2 = final_df2[final_df2.Latitude != 0]
final_df2 = final_df2[final_df2.Longitude !=0]
final_df2.rename(columns={'Restaurant Name': 'Restaurant'}, inplace=True)
final_df2.rename(columns={'Zip Code': 'Zip_Code'}, inplace=True)
available_zipcode = final_df2['Zip_Code'].unique()
mapbox_access_token = 'pk.eyJ1IjoiYWxpc2hvYmVpcmkiLCJhIjoiY2ozYnM3YTUxMDAxeDMzcGNjbmZyMmplZiJ9.ZjmQ0C2MNs1AzEBC_Syadg'
layout = dict(
autosize=True,
hovermode="closest",
title='Restaurant Map',
mapbox=dict(
accesstoken=mapbox_access_token,
center=dict(
lon=-97.755996,
lat=30.307182
),
zoom=7,
)
)
app.layout = html.Div([
html.Div([
html.Label('Enter Zip Code'),
dcc.Dropdown(
id='Available ZipCodes',
options=[{'label': i, 'value': i} for i in available_zipcode],
value= 78610
)]
),
html.Div(
[
dcc.Graph(id='main_graph')
]
)
])
@app.callback(Output('main_graph', 'figure'),
[Input('Available ZipCodes', 'value')])
def update_figure(selected_zipcode):
filtered_df = final_df2[final_df2.Zip_Code == selected_zipcode]
traces = []
counter_res = filtered_df.Restaurant.count()
for rest in range(counter_res):
trace = dict(
type='scattermapbox',
lon=filtered_df['Longitude'][rest],
lat=filtered_df['Latitude'][rest],
text=filtered_df['Restaurant'][rest],
marker=dict(
size=10,
)
)
traces.append(trace)
figure = dict(data=traces, layout=layout)
return figure
if __name__ == '__main__':
app.run_server(debug=True)
|
#일반적 사용
squares = list()
for x in range(10):
squares.append(x**2)
print(squares) #[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
#리스트 컴프리헨션을 사용했을 때
squares = [x**2 for x in range(10)] #x를 정의해줘야 한다
print(squares) #[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]squares
combs = []
for x in [1, 2, 3]:
for y in [2, 3, 4]:
if x != y:
combs.append((x, y)) #튜플 형태로 요소를 추가하라
print(combs) [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 2), (3, 4)]
combs = [(x, y) for x in [1, 2, 3] for y in [2, 3, 4] if x != y]
print(combs) #[(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 2), (3, 4)]
from math import pi
l = [str(round(pi, i)) for i in range(1, 6)]
print(l) #['3.1', '3.14', '3.142', '3.1416', '3.14159']
vec = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
for row in vec:
print(row)
"""
[1, 2, 3]
[4, 5, 6]
[7, 8, 9]
"""
l = [num ** 2 for e in vec for num in e] #바깥쪽에서 안쪽으로 작성해가니 순서대로 간다
print(l) #[1, 4, 9, 16, 25, 36, 49, 64, 81] #1차원적 리스트로 나타냄
#구구단
gugudan = list("%d * %d = %d" % (i, j, i*j) for i in range(2, 10, 2) for j in range(1, 10))
print(gugudan)
"""
['2 * 1 = 2', '2 * 2 = 4', '2 * 3 = 6', '2 * 4 = 8', '2 * 5 = 10', '2 * 6 = 12', '2 * 7
= 14', '2 * 8 = 16', '2 * 9 = 18', '4 * 1 =
4', '4 * 2 = 8', '4 * 3 = 12', '4 * 4 = 16', '4 * 5 = 20', '4 * 6 = 24', '4 * 7 = 28',
'4 * 8 = 32', '4 * 9 = 36', '6 * 1 = 6', '6 * 2 = 12', '6 * 3 = 18', '6 * 4 = 24', '6 * 5 = 30',
'6 * 6 = 36', '6 * 7 = 42', '6 * 8 = 48', '6 * 9 = 54', '8 * 1 = 8', '8 * 2 = 16', '8 * 3 = 24',
'8 * 4 = 32', '8 * 5 = 40', '8 * 6 = 48', '8 * 7 = 56', '8 * 8 = 64', '8 * 9 = 72']
"""
#튜플 컴프리헨션: tuple()을 붙여야 작동한다
t = tuple(num for num in range(10))
print(t) #(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
|
a = int(input("ENTER A NUMBER = "))
b = int (input("ENTER A NUMBER = "))
PI = 3.14
select = int(input("chose any one \n 1.square 2. rectangle \n 3. circle "))
if(select == 1 ):
print("Area Of Square = ",a**2)
if(select == 2):
print("AREA OF RECTANGLE = ",a*b)
if(select == 3):
print("AREA OF CIRCLE = ",PI*a*a)
else:
print("SORRY INVALID INPUT")
|
from django.db import models
class Computer(models.Model):
name = models.CharField(max_length=128)
desc = models.TextField(blank=True)
videocard = models.CharField(max_length=128)
ram = models.CharField(max_length=128)
cpu = models.CharField(max_length=128)
mother_board = models.CharField(max_length=128)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Компьютер'
verbose_name_plural = 'Компьютеры'
|
""" Contains facies-related functions. """
import os
from copy import copy
from textwrap import dedent
import numpy as np
import pandas as pd
from scipy.ndimage import find_objects
from skimage.measure import label
from ..plotters import plot_image
from ..utils import groupby_min, groupby_max
from ...batchflow import HistoSampler
class GeoBody:
""" Container for a 3D object inside seismic volume.
Described by two matrices: upper and lower boundary.
Main storages are `matrix_1` and `matrix_2`, which are upper and lower boundary depth maps respectively, and
`points`, which is an array of (N, 4) shape: iline, crossline, upper and lower points.
"""
# Custom facies format: spatial point (iline and xline), upper and bottom point of a body (heights 1 and 2)
FACIES_SPEC = ['iline', 'xline', 'height_1', 'height_2']
# Columns that are used from the file
COLUMNS = ['iline', 'xline', 'height_1', 'height_2']
# Value to place into blank spaces
FILL_VALUE = -999999
def __init__(self, storage, geometry, name=None, **kwargs):
# Meta information
self.path = None
self.name = name
self.format = None
# Location of the geobody inside cube spatial range
self.i_min, self.i_max = None, None
self.x_min, self.x_max = None, None
self.i_length, self.x_length = None, None
self.bbox = None
self._len = None
# Underlying data storages
self.matrix_1, self.matrix_2 = None, None
self.points = None
# Depth stats
self.h_min_1, self.h_min_2 = None, None
self.h_max_1, self.h_max_2 = None, None
self.h_mean_1, self.h_mean_2 = None, None
self.h_mean = None
# Attributes from geometry
self.geometry = geometry
self.cube_name = geometry.name
self.cube_shape = geometry.cube_shape
self.sampler = None
# Check format of storage, then use it to populate attributes
if isinstance(storage, str):
# path to csv-like file
self.format = 'file'
elif isinstance(storage, np.ndarray):
if storage.ndim == 2 and storage.shape[1] == 4:
# array with row in (iline, xline, height) format
self.format = 'points'
else:
raise NotImplementedError
getattr(self, f'from_{self.format}')(storage, **kwargs)
def __len__(self):
return len(self.points)
# Coordinate transforms
def lines_to_cubic(self, array):
""" Convert ilines-xlines to cubic coordinates system. """
array[:, 0] -= self.geometry.ilines_offset
array[:, 1] -= self.geometry.xlines_offset
array[:, 2:] -= self.geometry.delay
array[:, 2:] /= self.geometry.sample_rate
return array
def cubic_to_lines(self, array):
""" Convert cubic coordinates to ilines-xlines system. """
array = array.astype(float)
array[:, 0] += self.geometry.ilines_offset
array[:, 1] += self.geometry.xlines_offset
array[:, 2:] *= self.geometry.sample_rate
array[:, 2:] += self.geometry.delay
return array
# Initialization from different containers
def from_points(self, points, transform=False, verify=True, **kwargs):
""" Base initialization: from point cloud array of (N, 4) shape.
Parameters
----------
points : ndarray
Array of points. Each row describes one point inside the cube: iline, crossline,
upper and lower depth points.
transform : bool
Whether transform from line coordinates (ilines, xlines) to cubic system.
verify : bool
Whether to remove points outside of the cube range.
"""
_ = kwargs
# Transform to cubic coordinates, if needed
if transform:
points = self.lines_to_cubic(points)
if verify:
idx = np.where((points[:, 0] >= 0) &
(points[:, 1] >= 0) &
(points[:, 2] >= 0) &
(points[:, 3] >= 0) &
(points[:, 0] < self.cube_shape[0]) &
(points[:, 1] < self.cube_shape[1]) &
(points[:, 2] < self.cube_shape[2]) &
(points[:, 3] < self.cube_shape[2]))[0]
points = points[idx]
self.points = np.rint(points).astype(np.int32)
# Collect stats on separate axes. Note that depth stats are properties
self.i_min, self.x_min, self.h_min_1, self.h_min_2 = np.min(self.points, axis=0).astype(np.int32)
self.i_max, self.x_max, self.h_max_1, self.h_max_2 = np.max(self.points, axis=0).astype(np.int32)
self.h_mean_1, self.h_mean_2 = np.mean(self.points[:, 2:], axis=0)
self.h_mean = np.mean(self.points[:, 2:])
self.i_length = (self.i_max - self.i_min) + 1
self.x_length = (self.x_max - self.x_min) + 1
self.bbox = np.array([[self.i_min, self.i_max],
[self.x_min, self.x_max]],
dtype=np.int32)
self.matrix_1, self.matrix_2 = self.points_to_matrix(self.points,
self.i_min, self.x_min,
self.i_length, self.x_length)
def from_file(self, path, transform=True, **kwargs):
""" Init from path to csv-like file. """
_ = kwargs
self.path = path
self.name = os.path.basename(path)
points = self.file_to_points(path)
self.from_points(points, transform)
def file_to_points(self, path):
""" Get point cloud array from file values. """
#pylint: disable=anomalous-backslash-in-string
with open(path, encoding='utf-8') as file:
line_len = len(file.readline().split(' '))
if line_len == 4:
names = GeoBody.FACIES_SPEC
else:
raise ValueError('GeoBody labels must be in FACIES_SPEC format.')
df = pd.read_csv(path, sep=r'\s+', names=names, usecols=GeoBody.COLUMNS)
df.sort_values(GeoBody.COLUMNS, inplace=True)
return df.values
@staticmethod
def points_to_matrix(points, i_min, x_min, i_length, x_length):
""" Convert array of (N, 4) shape to a pair of depth maps (upper and lower boundaries of geobody). """
matrix_1 = np.full((i_length, x_length), GeoBody.FILL_VALUE, np.int32)
matrix_1[points[:, 0] - i_min, points[:, 1] - x_min] = points[:, 2]
matrix_2 = np.full((i_length, x_length), GeoBody.FILL_VALUE, np.int32)
matrix_2[points[:, 0] - i_min, points[:, 1] - x_min] = points[:, 3]
return matrix_1, matrix_2
@staticmethod
def from_mask(mask, grid_info=None, geometry=None, shifts=None,
threshold=0.5, minsize=0, prefix='predict', **kwargs):
""" Convert mask to a list of geobodies. Returned list is sorted on length (number of points).
Parameters
----------
grid_info : dict
Information about mask creation parameters. Required keys are `geom` and `range`
to infer geometry and leftmost upper point, or they can be passed directly.
threshold : float
Parameter of mask-thresholding.
minsize : int
Minimum number of points in a geobody to be saved.
prefix : str
Name of geobody to use.
"""
_ = kwargs
if grid_info is not None:
geometry = grid_info['geom']
shifts = np.array([item[0] for item in grid_info['range']])
if geometry is None or shifts is None:
raise TypeError('Pass `grid_info` or `geometry` and `shifts`.')
# Labeled connected regions with an integer
labeled = label(mask >= threshold)
objects = find_objects(labeled)
# Create an instance of GeoBody for each separate region
geobodies = []
for i, sl in enumerate(objects):
max_possible_length = 1
for j in range(3):
max_possible_length *= sl[j].stop - sl[j].start
if max_possible_length >= minsize:
indices = np.nonzero(labeled[sl] == i + 1)
if len(indices[0]) >= minsize:
coords = np.vstack([indices[i] + sl[i].start for i in range(3)]).T
points_min = groupby_min(coords) + shifts
points_max = groupby_max(coords) + shifts
points = np.hstack([points_min, points_max[:, -1].reshape(-1, 1)])
geobodies.append(GeoBody(points, geometry, name=f'{prefix}_{i}'))
geobodies.sort(key=len)
return geobodies
def filter(self, *args, **kwargs):
""" Remove points outside of the cube data. Yet to be implemented. """
_ = args, kwargs
# GeoBody usage: point/mask generation
def create_sampler(self, bins=None, **kwargs):
""" Create sampler based on the upper boundary of a geobody.
Parameters
----------
bins : sequence
Size of ticks alongs each respective axis.
"""
_ = kwargs
default_bins = self.cube_shape // np.array([5, 20, 20])
bins = bins if bins is not None else default_bins
self.sampler = HistoSampler(np.histogramdd(self.points[:, :3]/self.cube_shape, bins=bins))
def add_to_mask(self, mask, locations=None, alpha=1, **kwargs):
""" Add geobody to a background.
Note that background is changed in-place.
Parameters
----------
mask : ndarray
Background to add to.
locations : ndarray
Where the mask is located.
"""
_ = kwargs
mask_bbox = np.array([[slc.start, slc.stop] for slc in locations], dtype=np.int32)
# Getting coordinates of overlap in cubic system
(mask_i_min, mask_i_max), (mask_x_min, mask_x_max), (mask_h_min, mask_h_max) = mask_bbox
i_min, i_max = max(self.i_min, mask_i_min), min(self.i_max + 1, mask_i_max)
x_min, x_max = max(self.x_min, mask_x_min), min(self.x_max + 1, mask_x_max)
if i_max >= i_min and x_max >= x_min:
overlap_1 = self.matrix_1[i_min - self.i_min:i_max - self.i_min,
x_min - self.x_min:x_max - self.x_min]
overlap_2 = self.matrix_2[i_min - self.i_min:i_max - self.i_min,
x_min - self.x_min:x_max - self.x_min]
# Coordinates of points to use in overlap local system
idx_i_1, idx_x_1 = np.asarray((overlap_1 != self.FILL_VALUE) &
(overlap_1 >= mask_h_min) &
(overlap_1 <= mask_h_max)).nonzero()
idx_i_2, idx_x_2 = np.asarray((overlap_2 != self.FILL_VALUE) &
(overlap_2 >= mask_h_min) &
(overlap_2 <= mask_h_max)).nonzero()
set_1 = set(zip(idx_i_1, idx_x_1))
set_2 = set(zip(idx_i_2, idx_x_2))
set_union = set_1 | set_2
idx_union = np.array(tuple(set_union))
if len(idx_union) > 0:
idx_i, idx_x = idx_union[:, 0], idx_union[:, 1]
heights_1 = overlap_1[idx_i, idx_x]
heights_2 = overlap_2[idx_i, idx_x]
# Convert coordinates to mask local system
idx_i += i_min - mask_i_min
idx_x += x_min - mask_x_min
heights_1 -= mask_h_min
heights_2 -= mask_h_min
max_depth = mask.shape[-1] - 1
heights_1[heights_1 < 0] = 0
heights_1[heights_1 > max_depth] = max_depth
heights_2[heights_2 < 0] = 0
heights_2[heights_2 > max_depth] = max_depth
n = (heights_2 - heights_1).max()
for _ in range(n + 1):
mask[idx_i, idx_x, heights_1] = alpha
heights_1 += 1
mask_ = heights_1 <= heights_2
idx_i = idx_i[mask_]
idx_x = idx_x[mask_]
heights_1 = heights_1[mask_]
heights_2 = heights_2[mask_]
return mask
# Properties
@property
def full_matrix_1(self):
""" Matrix in cubic coordinate system. """
return self.put_on_full(self.matrix_1)
@property
def full_matrix_2(self):
""" Matrix in cubic coordinate system. """
return self.put_on_full(self.matrix_2)
def dump(self, path, transform=None, add_height=True):
""" Save geobody points on disk.
Parameters
----------
path : str
Path to a file to save to.
transform : None or callable
If callable, then applied to points after converting to ilines/xlines coordinate system.
add_height : bool
Whether to concatenate average height to a file name.
"""
values = self.cubic_to_lines(copy(self.points))
values = values if transform is None else transform(values)
df = pd.DataFrame(values, columns=self.COLUMNS)
df.sort_values(['iline', 'xline'], inplace=True)
path = path if not add_height else f'{path}_#{self.h_mean}'
df.to_csv(path, sep=' ', columns=self.COLUMNS, index=False, header=False)
# Methods of (visual) representation of a geobody
def __repr__(self):
return f"""<geobody {self.name} for {self.cube_name} at {hex(id(self))}>"""
def __str__(self):
msg = f"""
GeoBody {self.name} for {self.cube_name} loaded from {self.format}
Ilines range: {self.i_min} to {self.i_max}
Xlines range: {self.x_min} to {self.x_max}
Depth range: {self.h_min_1} to {self.h_max_2}
Depth mean: {self.h_mean:.6}
Length: {len(self)}
"""
return dedent(msg)
@property
def centers(self):
""" Midpoints between upper and lower boundaries. """
return (self.matrix_1 + self.matrix_2) // 2
def put_on_full(self, matrix=None, fill_value=None):
""" Create a matrix in cubic coordinate system. """
fill_value = fill_value or self.FILL_VALUE
background = np.full(self.cube_shape[:-1], fill_value, dtype=np.float32)
background[self.i_min:self.i_max+1, self.x_min:self.x_max+1] = matrix
return background
def show(self, src='centers', fill_value=None, on_full=True, **kwargs):
""" Nice visualization of a depth map matrix. """
matrix = getattr(self, src) if isinstance(src, str) else src
fill_value = fill_value or self.FILL_VALUE
if on_full:
matrix = self.put_on_full(matrix=matrix, fill_value=fill_value)
else:
matrix = copy(matrix).astype(np.float32)
# defaults for plotting if not supplied in kwargs
kwargs = {
'cmap': 'viridis_r',
'title': f'{src if isinstance(src, str) else ""} {"on full"*on_full} '
f'of `{self.name}` on `{self.cube_name}`',
'xlabel': self.geometry.index_headers[0],
'ylabel': self.geometry.index_headers[1],
**kwargs
}
matrix[matrix == fill_value] = np.nan
return plot_image(matrix, **kwargs)
def show_slide(self, loc, width=3, axis='i', order_axes=None, zoom_slice=None, **kwargs):
""" Show slide with geobody on it.
Parameters
----------
loc : int
Number of slide to load.
axis : int
Number of axis to load slide along.
stable : bool
Whether or not to use the same sorting order as in the segyfile.
"""
# Make `locations` for slide loading
axis = self.geometry.parse_axis(axis)
locations = self.geometry.make_slide_locations(loc, axis=axis)
shape = np.array([(slc.stop - slc.start) for slc in locations])
# Load seismic and mask
seismic_slide = self.geometry.load_slide(loc=loc, axis=axis)
mask = np.zeros(shape)
mask = self.add_to_mask(mask, locations=locations, width=width)
seismic_slide, mask = np.squeeze(seismic_slide), np.squeeze(mask)
xticks = list(range(seismic_slide.shape[0]))
yticks = list(range(seismic_slide.shape[1]))
if zoom_slice:
seismic_slide = seismic_slide[zoom_slice]
mask = mask[zoom_slice]
xticks = xticks[zoom_slice[0]]
yticks = yticks[zoom_slice[1]]
# defaults for plotting if not supplied in kwargs
if axis in [0, 1]:
header = self.geometry.index_headers[axis]
xlabel = self.geometry.index_headers[1 - axis]
ylabel = 'depth'
total = self.geometry.lens[axis]
if axis == 2:
header = 'Depth'
xlabel = self.geometry.index_headers[0]
ylabel = self.geometry.index_headers[1]
total = self.geometry.depth
xticks = tuple(xticks[::max(1, round(len(xticks)//8/100))*100])
yticks = tuple(yticks[::max(1, round(len(yticks)//10/100))*100][::-1])
kwargs = {
'alpha': 0.25,
'title': (f'GeoBody `{self.name}` on `{self.geometry.name}`' +
f'\n {header} {loc} out of {total}'),
'xlabel': xlabel,
'ylabel': ylabel,
'xticks': xticks,
'yticks': yticks,
'y': 1.02,
**kwargs
}
return plot_image([seismic_slide, mask], order_axes=order_axes, **kwargs)
|
#!/usr/bin/env python3
import re
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.parse import urlparse, urljoin
import urllib.error
import numpy as np
import colorama
# init the colorama module
colorama.init()
GREEN = colorama.Fore.GREEN
YELLOW = colorama.Fore.YELLOW
GRAY = colorama.Fore.LIGHTBLACK_EX
RESET = colorama.Fore.RESET
# initialize the set of links (unique links)
internal_urls = set()
external_urls = set()
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def fetch(url):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
response = requests.get(url, headers=headers)
# print(response.content.decode())
soup = BeautifulSoup(response.text, 'html.parser')
assets = []
for img in soup.findAll('img'):
if not is_valid(img.get('src')):
# not a valid URL
continue
assets.append(img.get('src'))
print("\n")
links = []
for link in soup.find_all(attrs={'href': re.compile("http")}):
if not is_valid(link.get('href')):
# not a valid URL
continue
links.append(link.get('href'))
print("\n")
return(assets,links)
except urllib.error.HTTPError as e:
urlList.append( e )
def getWebsiteAssets(url):
"""
Returns all URLs that is found on `url` in which it belongs to the same website
"""
# all URLs of `url`
urls = set()
# domain name of the URL without the protocol
domain_name = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# print(href)
if not is_valid(href):
# not a valid URL
continue
if href in internal_urls:
# already in the set
continue
if domain_name not in href:
# external link
if href not in external_urls:
external_urls.add(href)
continue
urls.add(href)
internal_urls.add(href)
return urls
def webassets_downloader(url):
for site_url in url:
print('Download Starting...')
print(f"{YELLOW}Downloading image: {site_url} {RESET} \n")
r = requests.get(site_url, verify=False)
print(r)
filename = site_url.split('/')[-1] # this will take only -1 splitted part of the url
with open(filename,'wb') as output_file:
output_file.write(r.content)
print('Download Completed!!!')
if __name__ == "__main__":
site = 'https://www.bgr.in'
getWebsiteAssets(site)
print("\n")
for site_url in internal_urls:
assets,links = fetch(site_url)
print(f"{GREEN}Url: {site_url} \nAssets are as follows: {RESET} \n")
print(*assets, sep='\n')
print("[+] Total assets:", len(assets))
print("\n")
for site_url in internal_urls:
assets,links = fetch(site_url)
webassets_downloader(assets)
print("\n")
|
import sys
sys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')
#print(vars())
from numpy import sin
#print(vars())
from numpy import cos, linspace
#print(vars())
#x = linspace(0, 7, 70) #solis = (7-0)/(70-1)
x = linspace(0, 4, 11) #solis = (4-0)/(11-1)
y = cos(x)
y1 = sin(x)
#print(vars())
from matplotlib import pyplot as plt
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Funkcija $cos(x)$ UN $sin(x)$')
plt.plot(x, y, color = "#FF0000")
plt.plot(x, y1, color = '#FFFF00')
#plt.show()
plt.plot(x, y, 'bo')
plt.plot(x, y1, 'go')
plt.legend(['$cos(x)$' '$sin(x)$' '$cos(x)$', 'sin(x)'])
plt.show()
|
import markovgen
original = open("remezcla.txt", encoding='utf-8')
nuevo = open("mezclota.txt", "w", encoding="utf-8")
newtext = []
mk = markovgen.Markov(original)
counter = 0
while counter < 200:
line = mk.generate_markov_text() + '\n'
exclude = ['"', '(', ')', ';']
line = ''.join(ch for ch in line if ch not in exclude)
print(line)
newtext.append(line)
counter = counter + 1
for line in newtext:
nuevo.write(line)
|
LOGIN_FORM_PREFIX = 'login-form'
REGISTER_FORM_PREFIX = 'register-form'
|
list1 = [1,2,3,4,5]
list2 = ['a','b','c']
list3 = [1,'a','abc',[1,2,3,4,5],['a','b','c']]
list1[0] = 6
print(list1) # [6,2,3,4,5]가 출력됨
def myfunc():
print('안녕하세요')
list4=[1,2,myfunc]
list4[2]() # '안녕하세요'가 출력됨
|
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from Chapter2 import MyTransform_6, downloadData
if __name__ == '__main__':
downloadData.fetch_housing_data()
data = downloadData.load_housing_data()
data["income_cat"] = np.ceil(data["median_income"] / 1.5) # 중간수입을 1.5로 나눈값을 올림
# income_cat의 값이 5보다 작지 않으면 5로 세팅
data["income_cat"].where(data["income_cat"] < 5, 5.0, True) # 판다스 시리즈에 조건식 적용, where(조건식, 조건안맞을때 바꿀 값, inplace여부)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=42) # 세트의 개수 1개(K폴드 알고리즘), 나누는 비율 0.5, 시드값 42
for train_index, test_index in split.split(data, data["income_cat"]): # c2행의 비율을 고려해서 나눔
start_train_set = data.loc[train_index] # 인덱스를 기준으로 행을 읽기, iloc은 행번호를 기준으로 행을 읽음
start_test_set = data.loc[test_index]
for set_ in (start_train_set, start_test_set):
set_.drop("income_cat", axis=1, inplace=True) # income_cat 열 삭제
housing = start_train_set.drop("median_income", axis=1)
housing_labels = start_train_set["median_income"].copy()
# 결측값 제거 방법
housing.dropna(subset=["total_bedrooms"]) # 1번 방법, 해당 구역 제거
housing.drop("total_bedrooms", axis=1) # 2번 방법, 전체 특성값 제거
# 3번 방법
median = housing["total_bedrooms"].median() # 중간값 저장
housing["total_bedrooms"].fillna(median, inplace=True) # 결측값을 모두 평균값으로 채움
# Imputer를 사용해 결측값 다루기
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median") # 결측값을 중간값으로 대체할 것임
housing_num = housing.drop("ocean_proximity", axis=1) # 텍스트 형은 제외
########파이프 라인###################
num_attribs = housing_num.columns
cat_attribs = "ocean_proximity"
num_pipline = Pipeline([
('selector',MyTransform_6.DataFrameSelector(num_attribs)),
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', MyTransform_6.CombinedAttributesAdder()),
('stdScaler',StandardScaler()),
]) #Pipeline은 연속된 단계를 나타내는 이름/추정기 쌍의 목록을 입력받음
#마지막 단계는 변환기/추정기 모두 사용 가능, 그외는 모두 변환기
#파이프라인 하나만 실행시킬 경우
housing_num_tr = num_pipline.fit_transform(housing_num) #파이프라인의 fit_transform을 호출하면 모든 변환기의 fit_transform을 차례대로 호출
cat_pipeline = Pipeline([
('cat_encoder',MyTransform_6.MyCategoricalEncoder(cat_attribs,encoding="onehot-dense")),
])
#파이프라인 여러개 실행시킬 경우
from sklearn.pipeline import FeatureUnion
full_pipeline = FeatureUnion(transformer_list=[
("num_pipline",num_pipline),
("cat_pipeline",cat_pipeline)
])
housing_prepared = full_pipeline.fit_transform(housing)
print(housing_prepared.shape)
|
import pytest
a = 0
@pytest.mark.skip(reason='out-of-date api')
def test_connect():
pass
@pytest.mark.skipif(a > 1, reason='out-of-date api')
def test_connect2():
pass
|
#!/usr/bin/python
# coding: utf-8
# haacheuur 0.24
# port industriel de port la nouvelle - couleur - 60cm*30cm
# image source : pln.jpg
# image rendue : pln..20150910-11h59m53s.jpg
import sys
import Image
import random
import os
import ImageDraw
import ImageFont
import ImageFilter
from time import gmtime, strftime
# modifs du 30/10/2013
import ImageEnhance
#ouvertrure de l'image source et conversion en mode 1bit
#im1 = Image.open(str(sys.argv[1])).convert('1')
im1 = Image.open(str(sys.argv[1]))
im2 = im1.copy()
#im3 = Image.new("1", (im1.size[0], im1.size[1]))
#im4 = Image.new("1", (im1.size[0]*3, im1.size[1]))
#rapport d'allongement de la nouvelle image par rapport à la largeur de l'image originale
allongement = 1
im3 = Image.new("RGBA",(im1.size[0], im1.size[1]))
im4 = Image.new("RGBA",(im1.size[0]*allongement, im1.size[1]))
Larg = im1.size[0]
Haut = im1.size[1]
for i in range(50):
# nombre aleatoire compris dans les limites de l'image
def randHaut(): return random.randint(0, im1.size[1])
# constitution de la liste des tranches horizontales
# genre comme si qu'on avait un 16 pistes :)
randomCoupeHauteur = [0, \
randHaut(),randHaut(),randHaut(),randHaut(),randHaut(), \
randHaut(),randHaut(),randHaut(),randHaut(),randHaut(), \
randHaut(),randHaut(),randHaut(),randHaut(),randHaut(), \
randHaut(),randHaut(),randHaut(),randHaut(),randHaut(), \
randHaut(),randHaut(),randHaut(),randHaut(),randHaut(), \
im1.size[1]]
# rangement des valeurs des plus petites au plus grandes
randomCoupeHauteur.sort()
# DEBUG
liste = []
# les hachures
def Hacheur(haut, bas) :
n=0
while n<im4.size[0] :
# constitution d'une liste de dimensions et de repetitions
#dimen FAAAAT
#randomListe = [(10240,1),(5120,1),(2560,1),(1280,2),(640,4),(320,8),(320,3),(160,12),(160,6),(120,8),(80,24),(40,16),(20,32),(20,16),(10,32),(10,16),(5,64)]
#dimen FUZITU
#PLN back
#randomListe = [(2560,1),(1280,2),(640,4),(320,8),(320,3),(160,12),(160,6),(120,8),(80,24),(40,16),(20,24),(20,16),(10,32),(10,16),(5,64),(2,64),(1,64),(1,16)]
#PLN recursif
#randomListe = [(2560,1),(1280,2),(640,4),(320,8),(320,3),(160,12),(160,6),(120,8),(80,24),(40,16),(20,24),(20,16),(10,32),(10,16),(5,64)]
randomListe = [(5120,1),(2560,1),(1280,2),(640,4),(320,8),(320,3),(160,12),(160,6),(120,8),(80,24),(40,16),(10,32),(5,64),(3,24)]
#dimen BLOG CUMULONIMBUS
#randomListe = [(320,8),(320,3),(160,12),(160,6),(120,8),(80,24),(40,16),(20,32),(20,16),(10,32),(10,16),(5,64)]
# repeter ce qui suit 2 ou 3 fois pour realiser non pas
# un sample, mais carrement ue sequence
# 8>< ------------------------------------------------------------------
# tirage au sort
#randomFacteur = random.randint(0, len(randomListe)*3)
choix = 0
# DEBUG
#print len(randomListe)*3
# ponderation du tirage au sort
randomFacteur = random.randint(0, len(randomListe)-1)
# DEBUG
#liste.append(choix)
# assignation des valeurs (paires) finales choisies
randomCopyLargFinal = randomListe[randomFacteur][0]
repeat = randomListe[randomFacteur][1]
# positionnement de la copie, aleatoirement, entre 0 et la largeur totale de l'image
randomCopyPosi = random.randint(0, (im1.size[0]-randomCopyLargFinal))
cx1 = randomCopyPosi
cx2 = randomCopyPosi + randomCopyLargFinal
# decoupage du sample
im3 = im2.crop((cx1,haut,cx2,bas))
# 8>< ------------------------------------------------------------------
draw = ImageDraw.Draw(im4)
loop = 0
#collage, n fois, du sample
while loop<repeat:
px1 = n
px2 = n + randomCopyLargFinal
draw = ImageDraw.Draw(im3)
#lignes noires 1px autour
#draw.line((0, 0, im3.size[0]-1, 0), fill="rgb(255,255,255)")
#draw.line((im3.size[0]-1, 0, im3.size[0]-1, im3.size[1]-1), fill="rgb(255,255,255)")
im4.paste(im3, (px1, haut, px2, bas))
n = n + randomCopyLargFinal
loop = loop + 1
# les tranches horizontales intactes soulignees de blanc
def TrancheHorizontale() :
# tirage au hasard de la bande copiee
pos = random.randint(0, im1.size[1]-im1.size[1]/20)
# copiage
im5 = im2.crop((0,pos,im1.size[0],pos+im1.size[1]/20))
# le soulignage en blanc
draw = ImageDraw.Draw(im5)
draw.line((0, im5.size[1]-1, im5.size[0], im5.size[1]-1), fill="black")
draw.line((0, 1, im5.size[0], 1), fill="black")
# collage
im4.paste(im5, (0,pos,im1.size[0],pos+im1.size[1]/20))
# HAACHEUUR
for i in range(len(randomCoupeHauteur)-1):
Hacheur(randomCoupeHauteur[i], randomCoupeHauteur[i+1])
# DEBUG
#print liste
#print sorted(set(liste),key=liste.count)
# CTRL + S
#chemin du script
#scriptpy = sys.argv[0]
#chemin de l'image : str(sys.argv[1])
scriptpy = str(sys.argv[1])
script = scriptpy[:-3]
im4.save(script+"."+strftime("%Y%m%d-%Hh%Mm%Ss", gmtime())+".jpg",'JPEG', quality=100)
|
### ADD BUTTON REGARDING COUNTER. 2 is the last line in counter!
from tkinter import *
import os
import json
from selenium import webdriver
from getpass import getpass
from functools import partial
creds = 'tempfile.temp'# This just sets the variable creds to 'tempfile.temp'
store = 'storefile.json'
global lines#made lines global
global lines2
global editcounter
global editnum
global nye
global nyl
global nyk
nye=" "
nyl=" "
nyk=" "
editnum =0
editcounter =0
global listedit
listedit =[]
global linebutton
linebutton =1
global ld
ld = []
with open('storefile.json') as fii:
lines =[line.rstrip('\n') for line in fii.readlines()]
with open('storefile.json') as fii:
lines2 =[line.rstrip('\n') for line in fii.readlines()]
global linecount
#data = json.load(open("dat.json"))# This just sets the variable cstore to 'storefile.temp'
def Signup(): # This is the signup definition,
global pwordE # These globals just make the variables global to the entire script, meaning any definition can use them
global nameE
global roots
global editB
global deleteB
# make button universal
roots = Tk() # This creates the window, just a blank one.
roots.title('Signup') # This renames the title of said window to 'signup'
roots.geometry("700x700+550+100")
canvasSign = Canvas(roots, width=700, height=700, bg="white")
canvasSign.pack()
img = PhotoImage(file="lock.png")
# img =img.resize(100,100)
canvasSign.create_image(350, 70, anchor=N, image=img)
intruction = Label(roots,
text='Please Enter New Credidentials\n',font ='Helvetica 25 bold',fg="red") # This puts a label, so just a piece of text saying 'please enter blah'
intruction.place(x=160,y=0) # This just puts it in the window, on row 0, col 0. If you want to learn more look up a tkinter tutorial :)
nameL = Label(roots, text='New Username: ',font ='Helvetica 14 bold',fg="#F8A51D") # This just does the same as above, instead with the text new username.
pwordL = Label(roots, text='New Password: ',font ='Helvetica 14 bold',fg="#F8A51D") # ^^
nameL.place(x=100,y=100) # Same thing as the instruction var just on different rows. :) Tkinter is like that.
pwordL.place(x=100,y=150) # ^^
nameE = Entry(roots) # This now puts a text box waiting for input.
pwordE = Entry(roots,
show='*') # Same as above, yet 'show="*"' What this does is replace the text with *, like a password box :D
nameE.place(x=230,y=100) # You know what this does now :D
pwordE.place(x=230,y=150) # ^^
signupButton = Button(roots, text='Signup',
command=FSSignup) # This creates the button with the text 'signup', when you click it, the command 'fssignup' will run. which is the def
signupButton.place(x=440,y=125)
roots.mainloop() # This just makes the window keep open, we will destroy it soon
def FSSignup():
with open(creds, 'w') as f: # Creates a document using the variable we made at the top.
f.write(
nameE.get()) # nameE is the variable we were storing the input to. Tkinter makes us use .get() to get the actual string.
f.write('\n') # Splits the line so both variables are on different lines.
f.write(pwordE.get()) # Same as nameE just with pword var
f.close() # Closes the file
roots.destroy() # This will destroy the signup window. :)
Login() # This will move us onto the login definition :D
def Login():
global nameEL
global pwordEL # More globals :D
global rootA
rootA = Tk() # This now makes a new window.
rootA.title('Login') # This makes the window title 'login'
rootA.geometry("700x700+550+100")
canvas = Canvas(rootA, width=700, height=700,bg="#03A9F4")
canvas.pack()
rootA.config(bg="#03A9F4")
img = PhotoImage(file="lock.png")
#img =img.resize(100,100)
canvas.create_image(350, 70, anchor=N, image=img)
intruction = Label(rootA, text='KeyPass Login\n',font ='Helvetica 25 bold',fg="#F8A51D",bg="#03A9F4") # More labels to tell us what they do
intruction.place(x=249,y=10)# Blahdy Blah
nameL = Label(rootA, text='Username ',bg="#03A9F4",fg="#F8A51D",font ='Helvetica 18')
#nameL['bg'] = nameL.rootA['bg']# More labels
pwordL = Label(rootA, text='Password ',bg="#03A9F4",fg="#F8A51D",font ='Helvetica 18') # ^
nameL.place(x=310, y=315)
pwordL.place(x=310, y=380)
nameEL = Entry(rootA)# The entry input
pwordEL = Entry(rootA, show='*')
nameEL.place(x=260, y=345)
pwordEL.place(x=260, y=410)
loginB = Button(rootA, text='Login',
command=CheckLogin) # This makes the login button, which will go to the CheckLogin def.
loginB.place(x= 330, y =510)
rmuser = Button(rootA, text='Delete User', fg='red',
command=DelUser) # This makes the deluser button. blah go to the deluser def.
rmuser.place(x=312,y=530)
rootA.mainloop()
#Adding screen
def addAccount():
global accountWindow
global webEn
global nameEn
global pwEn
storageWindow.destroy()
accountWindow =Tk()
accountWindow.title("Account")
accountWindow.geometry("700x700+550+100")
canvasadd = Canvas(accountWindow, width=700, height=700, bg="#03A9F4")
canvasadd.pack()
img = PhotoImage(file="add.png")
canvasadd.create_image(350, 70, anchor=N, image=img)
label = Label(accountWindow,text ="Please fill out the information bellow: ",font ='Helvetica 36 ',fg="#F8A51D",bg="#03A9F4")
label.place(x=50,y=10)
#Left Side: Stating website, username and password:
webaccount = Label(accountWindow, text='Website: ',fg="#F8A51D",)
nameaccount = Label(accountWindow, text='Username:',fg="#F8A51D") # More labels
pwordaccount = Label(accountWindow, text='Password: ',fg="#F8A51D") # ^
webaccount.place(x=230,y=160)
nameaccount.place(x=230,y=200)
pwordaccount.place(x=230,y=240)
#Entry for website, username and password:
webEn =Entry(accountWindow)
nameEn =Entry(accountWindow)
pwEn =Entry(accountWindow, show ='*')
webEn.place(x= 330, y=160)
nameEn.place(x= 330, y=200)
pwEn.place(x= 330, y=240)
saveB = Button(accountWindow, text='Save',fg="#03A9F4",command= lambda : save())
saveB.place(x=330,y=300)
accountWindow.mainloop()
#save button
def save():# function will save info in a text file
global lines
global lines2
with open(store, 'a') as fp:
fp.write("Breakfrom1line000")#to tell myself it's the next line
fp.write('\n')
fp.write(webEn.get())
fp.write('\n')
fp.write(nameEn.get())
fp.write('\n')
fp.write(pwEn.get())
fp.write('\n')
fp.close()
lines.append("Breakfrom1line000")
lines.append(webEn.get())
lines.append(nameEn.get())
lines.append(pwEn.get())
lines2.append("Breakfrom1line000")
lines2.append(webEn.get())
lines2.append(nameEn.get())
lines2.append(pwEn.get())
accountWindow.destroy()
pageOne()
def pageOne():
global storageWindow
storageWindow = Tk()
global editcounter
global editnum
global listedit
global linebutton
global deleteB
storageWindow.title('Password Storage') # makes the title of window "Password Storage"
storageWindow.geometry("700x700+550+100")
storageWindow.config(bg="#03A9F4")
lit =Label(storageWindow, text ="Your accounts:", font ='Helvetica 18 bold',fg="white",bg="#03A9F4")
lit.grid(row =0, column = 0, sticky = W)
Label(storageWindow, text ="_______________________________________________________________________________________________________________",fg="#F8A51D",bg="#03A9F4").grid()
#row 2 available and onwards
AddButton = Button(storageWindow, text='Add Account', command=lambda: addAccount())
AddButton.grid(row =0, column = 0,sticky = E)
#####For loop for adding
h=1
i=1
linecount =2
for line in lines:
if line == "Breakfrom1line000":
global editB
counter = 0
editnum = editnum+1
listedit.append(editnum)
editB = Button(storageWindow, text='Edit', command=partial(edit,i))
editB.grid(row=linecount+1, column=0, sticky=E)
ld.append(editB)
linebutton = linebutton +1
i = i+4
if line != "Breakfrom1line000":
if counter ==0:
global deleteB
Label(storageWindow, text="Website:"+nye,font="Helvetica 16 bold",fg="white",bg="#F8A51D" ).grid(row=linecount,column = 0, sticky =W)
Label(storageWindow, text=line,font="Helvetica 14 bold",fg="white",bg="#F8A51D").grid(row=linecount,column = 0,sticky =N)
deleteB=Button(storageWindow, text="Delete",command=partial(deleteCommand,i))
deleteB.grid(row=linecount,column =0,sticky=E)
elif counter ==1:
Label(storageWindow, text="Username:"+nyl,font="Helvetica 16 bold",fg="white",bg="#F8A51D").grid(row=linecount, column=0, sticky=W)
Label(storageWindow, text=line,font="Helvetica 16 bold",fg="white",bg="#F8A51D").grid(row=linecount,column = 0, sticky =N)
#editB = Button(storageWindow, text='Edit' +str(editnum), command=lambda: edit(editnum))
#editB.grid(row=linecount,column = 0, sticky =E)
elif counter == 2:
Label(storageWindow, text="Password:"+nyk,font="Helvetica 16 bold",fg="white",bg="#F8A51D").grid(row=linecount, column=0, sticky=W)
Label(storageWindow, text=line,font="Helvetica 16 bold",fg="white",bg="#F8A51D").grid(row=linecount,column = 0, sticky = N)
autologB = Button(storageWindow, text='Auto Login',bg="#03A9F4", command=partial(autoLogin,h))
autologB.grid(row=linecount, column=0, sticky=E)
linecount = linecount + 1
h=h+4
Label(storageWindow, text = "________________________________________________________________________________________________________________",fg="#F8A51D",bg="#03A9F4").grid(row=linecount,column = 0, sticky =W)# add line :D
counter = counter + 1
linecount = linecount +1
#######################look at add button addAccount() --> save case
print (editcounter)
storageWindow.mainloop()
def edit(number):# Edit the username password or website
global go
go = number
print(number)
print(ld)
global editWindow
global webUp
global nameUp
global pwUp
storageWindow.destroy()
editWindow = Tk()
editWindow.title("Edit")
editWindow.geometry("700x700+550+100")
editWindow.config(bg="white")
img = PhotoImage(file="edit.png")
canvasedit = Canvas(editWindow, width=700, height=700, bg="white")
canvasedit.pack()
canvasedit.create_image(350, 70, anchor=N, image=img)
label = Label(editWindow, text="Please update the information bellow **",font="Helvetica 25 bold",fg="red")
label.place(x=100,y=0)
# Left Side: Stating website, username and password:
webupdate = Label(editWindow, text='Website: ',font="Helvetica 18 ",fg="#03A9F4")
nameupdate = Label(editWindow, text='Username: ',font="Helvetica 18 ",fg="#03A9F4") # More labels
pwordupdate = Label(editWindow, text='Password: ',font="Helvetica 18 ",fg="#03A9F4") # ^
webupdate.place(x=5, y=100)
nameupdate.place(x=5, y=150)
pwordupdate.place(x=5, y=200)
# Entry for website, username and password:
webUp = Entry(editWindow)
nameUp = Entry(editWindow)
pwUp = Entry(editWindow, show='*')
webUp.place(x=100,y=100)
nameUp.place(x=100,y=150)
pwUp.place(x=100, y=200)
saveB = Button(editWindow, text='Save', command=lambda: updateSave())
saveB.place(x=90,y=250)
editWindow.mainloop()
def deleteCommand(number):
numbers=number-4
print("Delete "+str(numbers+3))
with open('storefile.json', 'r') as file:
# read a list of lines into data
datas = file.readlines()
datas[numbers]=""
datas[numbers+1]=""
datas[numbers +2]=""
datas[numbers+3]=""
lines[numbers]=""
lines[numbers +1]=""
lines[numbers +2]=""
lines[numbers+3] = ""
with open('storefile.json', 'w') as file:
file.writelines(datas)
file.close()
storageWindow.mainloop()
storageWindow.destroy()
pageOne()
def updateSave():###FOR THE EDIT FUNCTION
global const
global go
print("HI")
with open('storefile.json', 'r') as file:
# read a list of lines into data
datas = file.readlines()
datas[go]=webUp.get() +"\n"
datas[go+1]=nameUp.get()+"\n"
datas[go +2]=pwUp.get()+"\n"
lines[go]=webUp.get()
lines[go +1]=nameUp.get()
lines[go +2]=pwUp.get()
with open('storefile.json', 'w') as file:
file.writelines(datas)
file.close()
editWindow.destroy()
pageOne()
def autoLogin(number):
nump1=number +1
nump2= number +2
jangweb =""
usr =""
pwd=""
with open(store) as f9:
for i, line in enumerate(f9):
if i == number:
jangweb = str(line)
f9.close()
with open(store) as f99:
for i, line in enumerate(f99):
if i == nump1:
usr = line
f99.close()
with open(store) as f999:
for i, line in enumerate(f999):
if i == nump2:
pwd = line
f999.close()
print(jangweb)
print(usr)
print(pwd)
driver = webdriver.Chrome("/Users/cssi/Desktop/chromedriver")
#get i line
if jangweb == 'www.facebook.com\n':
driver.get('https://'+jangweb+'/')
username_box = driver.find_element_by_id('email') # insepect element and find Id
username_box.send_keys(usr) # Put username
password_box = driver.find_element_by_id('pass') # insepect element and find Id
password_box.send_keys(pwd) # put Password
login_btn = driver.find_element_by_id('u_0_3') # insepect element and find Id
login_btn.submit()
elif jangweb == 'www.twitter.com\n':
driver.get('https://' + jangweb + '/login')
username_box = driver.find_element_by_class_name('js-username-field email-input js-initial-focus') # insepect element and find Id
username_box.send_keys(usr) # Put username
password_box = driver.find_element_by_class_name('js-password-field') # insepect element and find Id
password_box.send_keys(pwd) # put Password
login_btn = driver.find_element_by_css_selector('button.submit.EdgeButton.EdgeButton--primary.EdgeButtom--medium') #put button # insepect element and find Id
login_btn.submit()
else:
driver.get('https://' + jangweb + '/')
def CheckLogin():
with open(creds) as f:
data = f.readlines() # This takes the entire document we put the info into and puts it into the data variable
uname = data[0].rstrip() # Data[0], 0 is the first line, 1 is the second and so on.
pword = data[1].rstrip() # Using .rstrip() will remove the \n (new line) word from before when we input it
if nameEL.get() == uname and pwordEL.get() == pword: # Checks to see if you entered the correct data.
# Pack is like .grid(), just different
rootA.destroy()# destroys RootA meaning the login window
pageOne()
else:
r = Tk()
r.title('D:')
r.geometry('150x50')
rlbl = Label(r, text='\n[!] Invalid Login')
rlbl.pack()
r.mainloop()
def DelUser():####remove everything from file
os.remove(creds) # Removes the file
rootA.destroy() # Destroys the login window
Signup() # And goes back to the start!"""
if os.path.isfile(creds):
Login()
else: # This if else statement checks to see if the file exists. If it does it will go to Login, if not it will go to Signup :)
Signup()
#Edit needs to show text that you already posted!
# for edit 123..for edit 1, 567.. edit 2, 9,10,11.. for edit3 13,14,15... 17,18,19, skip the 4
#can edit button name so it's button1, button5, button9, button13, button17, button21
|
# integer
x = 100
print type(x)
if x >= 100:
print "That's a big number!"
else:
print "That's a small number"
# string
y = 'hello world'
print type(y)
if y >= 50:
print 'Long sentence'
else:
print 'Short setence'
z = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
print type(z)
if len(z) >= 10:
print 'Big List!'
else:
print 'Small List!'
|
import sys
# Modified from cpo example
# https://github.com/chrisvam/psana_cpo/blob/master/hexanode_save_lcls1.py
if len(sys.argv) == 1:
print("Usage: ")
print("source /reg/g/psdm/etc/psconda.sh")
print("python cspad_save_lcls1.py exp run detname n_events xtc_dir")
print("note: use n,m for run n to m, -1 for n_events for all events, leave out xtc_dir if using the experiment folder")
print("ex: python cspad_save_lcls1.py cxid9114 95,114 CxiDs2.0:Cspad.0 -1 /reg/d/psdm/cxi/cxid9114/demo/xtc")
exit()
from psana import *
import numpy as np
import time
exp = sys.argv[1]
run = sys.argv[2]
detname = sys.argv[3]
n_events = int(sys.argv[4])
xtc_dir = None
if len(sys.argv) == 6:
xtc_dir = sys.argv[5]
if run.find(',') > -1:
run_range = run.split(',')
run_st, run_en = int(run_range[0]), int(run_range[1])+1
else:
run_st = int(run)
run_en = run_st + 1
for run in range(run_st, run_en):
#hit_ts = np.loadtxt('hits_r%s.txt'%(str(run).zfill(4)), dtype=np.int)
if xtc_dir:
dsource = MPIDataSource('exp=%s:run=%s:dir=%s:smd'%(exp, str(run), xtc_dir))
else:
dsource = MPIDataSource('exp=%s:run=%s:smd'%(exp, str(run)))
det = Detector(detname)
epics = dsource.env().epicsStore()
h5fname = "%s_r%d.h5"%(exp, run)
smldata = dsource.small_data(h5fname, gather_interval=1)
for nevt,evt in enumerate(dsource.events()):
raw = det.raw(evt)
if raw is None: continue
photon_energy = epics.value('SIOC:SYS0:ML00:AO541')
evt_id = evt.get(EventId)
sec = evt_id.time()[0]
nsec = evt_id.time()[1]
timestamp = (sec << 32) | nsec
t = evt_id.time()
ms = "%03d" % (t[1]/1000000)
tstring = int(time.strftime("%Y%m%d%H%M%S", time.gmtime(t[0])) + ms)
#found = np.searchsorted(hit_ts, tstring)
#if hit_ts[found] == tstring:
print(run, nevt, raw.shape, photon_energy, timestamp, tstring)
smldata.event(raws=raw, photon_energies=photon_energy, timestamps=timestamp)
if n_events > -1:
if nevt>n_events: break
smldata.save()
smldata.close()
print('Done with run %d'%run)
|
''' Like stack, queue is a linear data structure that stores items in First In First Out (FIFO) manner.
With a queue the least recently added item is removed first.
A good example of queue is any queue of consumers for a resource where the consumer that came first is served first.'''
class Queue(object):
def __init__(self, size):
self.queue = []
self.size = size
def __str__(self):
myString = ' '.join(str(i) for i in self.queue)
return myString
def enqueue(self, item):
'''This function adds an item to the rear end of the queue '''
if(self.isFull() != True):
self.queue.insert(0, item)
else:
print('Queue is Full!')
def dequeue(self):
''' This function removes an item from the front end of the queue '''
if(self.isEmpty() != True):
return self.queue.pop()
else:
print('Queue is Empty!')
def isEmpty(self):
''' This function checks if the queue is empty '''
return self.queue == []
def isFull(self):
''' This function checks if the queue is full '''
return len(self.queue) == self.size
def peek(self):
''' This function helps to see the first element at the front end of the queue '''
if(self.isEmpty() != True):
return self.queue[-1]
else:
print('Queue is Empty!')
def main():
myQueue = Queue(int(input("Enter size of queue : ")))
while(True):
print(
'------------OPERATIONS-----------\n'
'\t1. enqueue\n'
'\t2. dequeue\n'
'\t3. Front of queue\n'
'\t4. check for empty\n'
'\t5. check for full\n'
'\t6. display Queue\n'
'---------------------------------\n'
)
#for performing certain operations make a choice
ch = int(input('Enter your choice(0 to exit) : '))
print('\n','-'*35)
#breaking condition
if ch == 0:
break
#push operation
elif ch == 1:
e = (input('Enter the element : '))
msg = myQueue.enqueue(e)
if msg == -1:
print('Queue is full item cannot be enqueued!!')
else:
print('item enqueued successfully!!')
#pop operation
elif ch == 2:
msg = myQueue.dequeue()
if msg == -1:
print('Queue is empty item cannot be dequeued!!')
else:
print(' item dequeued successfully!! \n\n\t item dequeued : ',msg)
#peek operation
elif ch == 3:
print('PEEK SUCCESSFUL! \n\n\t : ',myQueue.peek())
#isEmpty operation
elif ch == 4:
print('QUEUE EMPTY ? : ',myQueue.isEmpty())
#isFull operation
elif ch == 5:
print('QUEUE FULL ? : ',myQueue.isFull())
#display operation
elif ch == 6:
print(myQueue)
#default operation
else:
print('INVALID CHOICE!!!')
print('-'*30,'\n')
#---------------------calling main function----------------------#
if __name__ == '__main__':
main()
|
# Given two integer arrays of equal length target and arr.
#
# In one step, you can select any non-empty sub-array of arr and reverse it.
# You are allowed to make any number of steps.
#
# Return True if you can make arr equal to target, or False otherwise.
class Solution:
def canBeEqual(self, target, arr):
return sorted(target) == sorted(arr)
if __name__ == "__main__":
testinput1 = [1, 2, 3, 4]
testinput2 = [2, 4, 1, 3]
print(Solution.canBeEqual(Solution, testinput1, testinput2))
|
import requests, dicttoxml, xmltodict
from xml.dom.minidom import parse
import xml.dom.minidom
from xml.dom.minidom import parseString
import glob
import os, json
import time
# xml 自动化本地测试框架
filepath = 'xml/G2/'
base_url = 'http://124.70.178.153:8082/'
xml_list = glob.glob(os.path.join(filepath, '*.xml'))
for i in range(0, 100):
#print(xml_list)
time.sleep(0.05)
xml_file = open(xml_list[i], 'r')
xml_str = xml_file.read()
#print(type(xml_str))
title_content = xml_list[i][16:-4]
str_url = base_url + title_content
if title_content == 'dish_residue':
response = requests.post(str_url, xml_str)
else:
response = requests.post(str_url, xml_str)
time.sleep(0.5)
print(xml_list[i][7:16], 'receive successfully!')
#if xml_list[i][12]
jsonstr = xmltodict.parse(response.text)
jsonstr = json.dumps(jsonstr)
#print(json.loads(jsonstr)['root'])
|
Part I - What is a wrapper? (not rapper)
A wrapper function is making your code more efficient and DRY (keep it in one language) by wrapping it in a method we can invoke when we need it.
In terms of Python and APIs we want to wrap our api calls in a method so we don't have to fill our code repeating endpoints
when we built SQL and it was wrapped in the Python language -
APIs and wrappers
Application Program Interface - external db interfaces - opening up data to the world
API keys are used to limit who and how many requests can be made in a given time frame.
Part II - API Limitations/API Keys
API key requirement
API keys are used to limit who and how many requests can be made in a given time frame, bc you dont want anyone to DDoS the system with a hashkey.
When you get a key to an API make sure to see if there are limitations and what those limitations are
Part III - Gitignore and Environmental Variables
NEVER EVER EVER push your KEY to GitHub
You can create a gitignore file and choose which files to not push to github - it will automatically ignore that part of the file when you push to github.
-a (looks for all files.
gitignore will not send the files over movies/gitignore to get it done - a lot in Phase 2.
API keys can be stored in the file and not go anywhere.
You can also use Environmental Variables.
Part IV - Brief Extension of RESTful API Practices
when you build a RESTful API - it does not live on the server.
only deals with HTTP methods - GET, POST, PUT, DELETE.
RESTful design position
make the code so that it is always useful, can be re-used.
|
#!/usr/bin/env python
"""
v0.1 Go through various "interesting" source resources and re-evaluate epochs, features. classes
Source resources include:
- ptf_09xxx associated sources in source_test_db.caltech_classif_summary
- list of high nobjs sources, which actually have < 2 epochs associated with them.
"""
import os, sys
import reevaluate_feat_class
if __name__ == '__main__':
src_id = int(sys.argv[1])
reeval_fc = reevaluate_feat_class.Reevaluate_Feat_Class()
reeval_fc.reevaluate_for_srcid(src_id=src_id)
|
import mariadb
def settingUpTables(user, password, host, port):
print("######### Creating tables #########")
try:
conn = mariadb.connect(
user=user,
password=password,
host=host,
port=port,
database="steamscrape")
# initiating Cursor
cur = conn.cursor()
# Creating table for the steamscrape database
# Query for creating new table
createTableQuery = ''' CREATE TABLE IF NOT EXISTS game_information (
id_game INT,
game_name VARCHAR(255) NOT NULL,
release_date DATE,
game_URL VARCHAR(255),
PRIMARY KEY (id_game)
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS rating (
id_game INT,
game_rating VARCHAR(255),
rating_percentage INT,
total_review INT,
PRIMARY KEY (id_game),
CONSTRAINT FK_rating_gameinformation
FOREIGN KEY (id_game) REFERENCES game_information(id_game)
ON UPDATE CASCADE ON DELETE CASCADE
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS price (
id_game INT,
original_price INT,
discount_price INT,
PRIMARY KEY (id_game),
CONSTRAINT FK_price_gameinformation
FOREIGN KEY (id_game) REFERENCES game_information(id_game)
ON UPDATE CASCADE ON DELETE CASCADE
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS genre (
id_genre VARCHAR(100),
genre_name VARCHAR(255) NOT NULL,
PRIMARY KEY (id_genre)
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS game_genre (
id_game INT,
id_genre VARCHAR(100),
PRIMARY KEY (id_game, id_genre),
CONSTRAINT FK_gamegenre_genre
FOREIGN KEY (id_genre) REFERENCES genre(id_genre)
ON UPDATE CASCADE ON DELETE RESTRICT,
CONSTRAINT FK_gamegenre_gameinformation
FOREIGN KEY (id_game) REFERENCES game_information(id_game)
ON UPDATE CASCADE ON DELETE CASCADE
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS compatibility (
id_compatibility VARCHAR(100),
platform_name VARCHAR(255) NOT NULL,
PRIMARY KEY (id_compatibility)
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS game_compatibility (
id_game INT,
id_compatibility VARCHAR(199),
PRIMARY KEY (id_game, id_compatibility),
CONSTRAINT FK_gamecompatibility_compatibility
FOREIGN KEY (id_compatibility) REFERENCES compatibility(id_compatibility)
ON UPDATE CASCADE ON DELETE RESTRICT,
CONSTRAINT FK_gamecompatibility_gameinformation
FOREIGN KEY (id_game) REFERENCES game_information(id_game)
ON UPDATE CASCADE ON DELETE CASCADE
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS developer (
id_developer VARCHAR (100),
developer_name VARCHAR(255),
PRIMARY KEY (id_developer)
) ;
'''
cur.execute(createTableQuery)
conn.commit()
createTableQuery = ''' CREATE TABLE IF NOT EXISTS game_developer (
id_game INT,
id_developer VARCHAR (100),
PRIMARY KEY (id_game, id_developer),
CONSTRAINT FK_gamedeveloper_developer
FOREIGN KEY (id_developer) REFERENCES developer(id_developer)
ON UPDATE CASCADE ON DELETE RESTRICT,
CONSTRAINT FK_gamedeveloper_gameinformation
FOREIGN KEY (id_game) REFERENCES game_information(id_game)
ON UPDATE CASCADE ON DELETE CASCADE
) ;
'''
cur.execute(createTableQuery)
conn.commit()
# To avoid error when trying to insert 4 bytes unicode character into a table
# Change the encoding from utf8 to utf8mb4
# Useful when there's VARACHAR with Korean or Chinese words that need to be inserted
encodingQuery = '''ALTER TABLE developer MODIFY COLUMN developer_name VARCHAR(255)
CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL'''
cur.execute (encodingQuery)
conn.commit()
encodingQuery = '''ALTER TABLE game_information MODIFY COLUMN game_name VARCHAR(255)
CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL;'''
cur.execute (encodingQuery)
conn.commit()
conn.close()
print("Creating Table is Done")
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
def settingUpDatabase(user, password, host, port):
print("######### Setting Up Database #########")
try:
conn = mariadb.connect(
user=user,
password=password,
host=host,
port=port)
# initiating Cursor
cur = conn.cursor()
# Creating database
# Query for creating database
dbName = "steamscrape"
createQuery = "CREATE DATABASE steamscrape"
cur.execute(createQuery)
# Checking if the new database has been successfully created
# Iterarte through list of database name that are availabe
cur.execute("SHOW DATABASES")
print("List of databases :")
for db in cur:
print(db)
conn.commit()
conn.close()
print("Creating Database is Done")
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
user="root"
password=""
host="localhost"
port=3306
print("These are MariaDB settings that's currently set, do you want to reconfigure it?(Y/N)")
print("user:", user)
print("password:", password)
print("host", host)
print("port:", port)
respone = str(input("Y/N :"))
if (respone == "Y"):
print("Configuring settings :")
user=str(input("user:"))
password=str(input("password:"))
host=str(input("host:"))
port=int(input("port:"))
settingUpDatabase(user, password, host, port)
settingUpTables(user, password, host, port)
elif(respone == "N"):
print("configuration is retained")
settingUpDatabase(user, password, host, port)
settingUpTables(user, password, host, port)
else:
print("Response is not recognizable, will pass with the existance configuration")
settingUpDatabase(user, password, host, port)
settingUpTables(user, password, host, port)
|
import requests
from os import getcwd
import os
from git import Repo
import aws_encryption_sdk
# Importants necessary dependencies
ver = 1.3
# version number
keyvalue = []
# creates keyvalue variable
github_dir = "https://github.com/Dithilli/kongappend.git"
working_dir = "./testdir"
def getkey():
key = str(input('Enter the key: '))
value = str(input('Enter the value associated with this key: '))
return [key,":",value]
def fileappend(appendingvalue):
filename = os.path.join(working_dir, 'testfile.txt')
addedkey = ""
with open(filename, 'a') as f:
for each in keyvalue:
f.write(each)
addedkey += str(each)
return addedkey
# if keyvalue in set(f):
# print("This key is already in use, please select another")
# else:
# f.write(keyvalue) # consider a check to make sure that the appending value isn't already present.
# print("Appending your value")
# if keyvalue in set(f):
# print("Succsessfuly appended your value")
# else:
# "Appending your key value failed, please try again."
def getgitpy():
repo = Repo.clone_from(github_dir, working_dir)
return repo
def pushgit(localrepo):
pass
"""
def filecheck(checkingvalue):
filename = "testfile.txt"
# if file.mode =/ unfinished process for checking if the file is already open in any mode
f = open(filename, "r")
test = f.readlines()
url = "https://raw.github.com/Dithilli/kongappend/master/testfile.txt"
filename = os.path.join(getcwd(), 'testfile.txt')
print(filename)
with requests.get(url) as r:
with open(filename,'w+') as f:
f.write(r.text)
print(r.text)
"""
"""
def cycle_string(key_arn, source_plaintext, botocore_session=None):
Encrypts and then decrypts a string using a KMS customer master key (CMK)
:param str key_arn: Amazon Resource Name (ARN) of the KMS CMK
(http://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html)
:param bytes source_plaintext: Data to encrypt
:param botocore_session: Existing Botocore session instance
:type botocore_session: botocore.session.Session
# Create a KMS master key provider
kms_kwargs = dict(key_ids=[key_arn])
if botocore_session is not None:
kms_kwargs['botocore_session'] = botocore_session
master_key_provider = aws_encryption_sdk.KMSMasterKeyProvider(**kms_kwargs)
# Encrypt the plaintext source data
ciphertext, encryptor_header = aws_encryption_sdk.encrypt(
source=source_plaintext,
key_provider=master_key_provider
)
print('Ciphertext: ', ciphertext)
# Decrypt the ciphertext
cycled_plaintext, decrypted_header = aws_encryption_sdk.decrypt(
source=ciphertext,
key_provider=master_key_provider
)
# Verify that the "cycled" (encrypted, then decrypted) plaintext is identical to the source
# plaintext
assert cycled_plaintext == source_plaintext
# Verify that the encryption context used in the decrypt operation includes all key pairs from
# the encrypt operation. (The SDK can add pairs, so don't require an exact match.)
#
# In production, always use a meaningful encryption context. In this sample, we omit the
# encryption context (no key pairs).
assert all(
pair in decrypted_header.encryption_context.items()
for pair in encryptor_header.encryption_context.items()
)
print('Decrypted: ', cycled_plaintext)
"""
# actual running code
localrepo = getgitpy()
print("This is the WeWork KongConfig appending App Version {} Use this app to add your environmental variable key:values to the KongConfig file.".format(ver) )
keyvalue = getkey()
addedkey = fileappend(keyvalue)
print("{} was added to the list of Key:Values".format(addedkey))
|
# -*- coding: utf-8 *-*
import os
import tornado
from app.helpers import DB
from tornado.options import options as opts
import routes
from app import path
class Application(tornado.web.Application):
def __init__(self):
self.r_db = DB(opts.db_r_host,opts.db_r_port,opts.db_r_name,opts.db_r_user,opts.db_r_password)
self.w_db = DB(opts.db_w_host,opts.db_w_port,opts.db_w_name,opts.db_w_user,opts.db_w_password)
self.theme_path = os.path.join(opts.themes_directory,opts.selected_theme)
self.theme_path = os.path.join(path.HOME_PATH, self.theme_path)
print os.path.join(self.theme_path, 'static')
settings = {
'login_url': '/login',
'static_path': os.path.join(self.theme_path, 'static'),
'template_path': os.path.join(self.theme_path, 'templates'),
'xsrf_cookies': True,
'cookie_secret': opts.cookie_secret,
'debug': opts.debug
}
if opts.static_url_prefix:
settings['static_url_prefix'] = opts.static_url_prefix
tornado.web.Application.__init__(self, routes.urls +
[(r"/(favicon\.ico)", tornado.web.StaticFileHandler,{'path': settings['static_path']})], **settings)
|
import dash_bootstrap_components as dbc
from dash import html
badges = html.Div(
[
html.H1(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H2(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H3(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H4(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H5(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H6(["Example heading", dbc.Badge("New", className="ms-1")]),
]
)
|
#!usr/bin/python
from Tkinter import *
import Tkinter
import tkMessageBox
top = Tkinter.Tk()
frame = Frame(top)
frame.pack()
bFrame = Frame(root)
bFrame.pack(side = BOTTOM)
rbut= Button(frame(text="Red", fg="red")
rbut.pack(side=LEFT)
gbut = Button(frame, text="Brown", fg="brown")
gbut.pack(side=LEFT)
bbut = Button(frame, text="Blue", fg="blue")
bbut.pack(side=LEFT)
blbut = Button( )
CheckVar1 = IntVar()
CheckVar2 = IntVar()
C1 = Checkbutton(top, text="Music", variable=CheckVar1, \
onvalue = 1, offvalue = 0, height=6, \
width = 24)
C2 = Checkbutton(top, text="Video", variable=CheckVar2, \
onvalue = 1, offvalue = 0, height=6, \
width = 24)
L1 = Label(top,text="Whart be'est tho'u na'em: ")
L1.pack(side=LEFT)
E1 = Entry(top, bd=5)
E1.pack(side=RIGHT)
def ayo():
tkMessageBox.showinfo("Popeye's","Fast " + E1.get())
B = Tkinter.Button(top, text="Louisiana", command=ayo)
B.pack();
C1.pack();
C2.pack();
top.mainloop();
|
def total_licks(env):
total = 252
max_env = max(env, key=env.get) if env else -1
tc =' The toughest challenge was {}.'.format(max_env) if env.get(max_env)>0 else ''
for x in env:
total-= -env[x]
return 'It took {} licks to get to the tootsie roll center of a tootsie pop.{}'.format(total,tc)
'''
How many licks does it take to get to the tootsie roll center of a tootsie pop?
A group of engineering students from Purdue University reported that its
licking machine, modeled after a human tongue, took an average of 364 licks
to get to the center of a Tootsie Pop. Twenty of the group's volunteers assumed
the licking challenge-unassisted by machinery-and averaged 252 licks each to the center.
Your task, if you choose to accept it, is to write a function that will return the
number of licks it took to get to the tootsie roll center of a tootsie pop, given
some environmental variables.
Everyone knows it's harder to lick a tootsie pop in cold weather but it's easier
if the sun is out. You will be given an object of environmental conditions for
each trial paired with a value that will increase or decrease the number of licks.
The environmental conditions all apply to the same trial.
Assuming that it would normally take 252 licks to get to the tootsie roll center
of a tootsie pop, return the new total of licks along with the condition that
proved to be most challenging (causing the most added licks) in that trial.
Example:
totalLicks({ "freezing temps": 10, "clear skies": -2 });
Should return:
"It took 260 licks to get to the tootsie roll center of a tootsie pop.
The toughest challenge was freezing temps."
Other cases: If there are no challenges, the toughest challenge sentence
should be omitted. If there are multiple challenges with the highest toughest
amount, the first one presented will be the toughest. If an environment
variable is present, it will be either a positive or negative integer.
No need to validate.
'''
|
from django.urls import include, path, re_path
from rest_framework import routers # add this
from .import views
router = routers.DefaultRouter() # add this
router.register(r'home', views.TodoView, 'todo')
urlpatterns = [
path('', include(router.urls))
]
|
import uuid
from datetime import datetime
from src.common.database import Database
class Plantation(object):
def __init__(self, typeOfPlantation, typeOfCrop, block, totalPits, workName, totalSanctionedPlants, user_name, user_id,costOfCrops, plantationStatus= 'Open', plantationDate=None, hectre=None, plotNo=None, typeOfRoad=None, KM=None, plantation_id=None, pitsToBeTaken = None,panchayat = None, overseer = None, habitation = None,survivalRateOfCrops = None,pitsTaken = None ):
if plantationDate:
self.plantationDate = datetime.combine(datetime.strptime(plantationDate, '%Y-%m-%d').date(),
datetime.now().time())
else:
self.plantationDate = None
self.typeOfPlantation = typeOfPlantation
self.typeOfCrop = typeOfCrop
self.block = block
self.totalPits = totalPits
self.workName = workName
self.hectre = hectre
self.typeOfRoad = typeOfRoad
self.KM = KM
self.totalSanctionedPlants = totalSanctionedPlants
self.panchayat = panchayat
self.habitation = habitation
self.overseer = overseer
self.pitsTaken = pitsTaken
self.pitsToBeTaken = pitsToBeTaken
self.plotNo = plotNo
self.survivalRateOfCrops = survivalRateOfCrops
self.costOfCrops = costOfCrops
self.plantationStatus = plantationStatus
self.user_id = user_id
self.user_name = user_name
self.plantation_id = uuid.uuid4().hex if plantation_id is None else plantation_id
def save_to_mongo(self):
Database.insert(collection='plantations', data=self.json())
@classmethod
def update_plantation(cls, plantation_id,panchayat, typeOfPlantation, workName, totalPits, overseer,habitation, totalSanctionedPlants,pitsTaken, pitsToBeTaken, typeOfCrop, block, survivalRateOfCrops,user_name, user_id, costOfCrops, plantationDate, plantationStatus, plotNo = None, hectre = None, KM = None, typeOfRoad = None):
Database.Update_Plantation(collection='plantations', query={'plantation_id': plantation_id}, typeOfCrop=typeOfCrop, KM = KM, typeOfRoad = typeOfRoad,
plotNo=plotNo, workName=workName, totalPits=totalPits, hectre=hectre, panchayat = panchayat, habitation = habitation, overseer = overseer, survivalRateOfCrops=survivalRateOfCrops, block = block, totalSanctionedPlants=totalSanctionedPlants, pitsTaken=pitsTaken, pitsToBeTaken=pitsToBeTaken,
costOfCrops=costOfCrops, typeOfPlantation=typeOfPlantation, plantationDate=plantationDate, plantationStatus=plantationStatus, user_id=user_id,
user_name=user_name)
def json(self):
return {
'typeOfCrop': self.typeOfCrop,
'typeOfPlantation': self.typeOfPlantation,
'plotNo': self.plotNo,
'survivalRateOfCrops': self.survivalRateOfCrops,
'totalPits': self.totalPits,
'hectre': self.hectre,
'typeOfRoad': self.typeOfRoad,
'KM':self.KM,
'workName':self.workName,
'totalSanctionedPlants': self.totalSanctionedPlants,
'pitsTaken': self.pitsTaken,
'pitsToBeTaken': self.pitsToBeTaken,
'costOfCrops': self.costOfCrops,
'block': self.block,
'panchayat': self.panchayat,
'habitation': self.habitation,
'overseer': self.overseer,
'plantationDate': self.plantationDate,
'plantationStatus': self.plantationStatus,
'user_id': self.user_id,
'user_name': self.user_name,
'plantation_id': self.plantation_id,
}
@classmethod
def from_mongo(cls, plantation_id):
Intent = Database.find_one(collection='plantations', query={'plantation_id': plantation_id})
return cls(**Intent)
@classmethod
def deletefrom_mongo(cls, plantation_id):
Database.delete_from_mongo(collection='plantations', query={'plantation_id': plantation_id})
@classmethod
def find_by_district(cls, blocks):
intent = Database.find(collection='plantations', query={'blocks': blocks})
return [cls(**inten) for inten in intent]
|
import vrep
import math
from collections import namedtuple
VrepProximitySensorResult = namedtuple('VrepProximitySensorResult', 'detectionState distance detectedPoint detectedObjectHandle detectedSurfaceNormalVector')
class VrepObject(object):
def __init__(self, connection, handle):
self.connection = connection
self.handle = handle
def readProximitySensor(self, operationMode = vrep.simx_opmode_oneshot_wait):
return self.connection.readProximitySensor(self.handle, operationMode)
def setJointTargetVelocity(self, targetVelocity, operationMode = vrep.simx_opmode_oneshot_wait):
return self.connection.setJointTargetVelocity(self.handle, targetVelocity, operationMode)
class VrepConnection(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.clientID = vrep.simxStart(host, port, True, True, 5000, 5)
if (not self.connected()):
raise Exception('Unable to connect to vrep server', host, port)
def connected(self):
return self.clientID != -1
def disconnect(self):
return vrep.simxFinish(self.clientID)
def handleErrorCode(self, errorCode):
if (errorCode != 0):
raise Exception('Unexpected error', errorCode)
def raw_getObjectGroupData(self, objectType, dataType, operationMode = vrep.simx_opmode_oneshot_wait):
return vrep.simxGetObjectGroupData(self.clientID, objectType, dataType, operationMode)
def getObjectNames(self, objectType = vrep.sim_appobj_object_type, operationMode = vrep.simx_opmode_oneshot_wait):
errorCode, handles, intData, floatData, stringData = self.raw_getObjectGroupData(objectType, 0, operationMode)
self.handleErrorCode(errorCode)
return stringData
def getObjectInstanceNames(self, baseObjectName, objectType = vrep.sim_appobj_object_type, operationMode = vrep.simx_opmode_oneshot_wait):
objectNames = self.getObjectNames(objectType, operationMode)
return [objectName for objectName in objectNames
if ((objectName == baseObjectName) or
objectName.startswith(baseObjectName + '#'))]
def getObjects(self, objectType, operationMode = vrep.simx_opmode_oneshot_wait):
return vrep.simxGetObjects(self.clientID, objectType, operationMode)
def raw_getObjectHandle(self, objectName, operationMode = vrep.simx_opmode_oneshot_wait):
return vrep.simxGetObjectHandle(self.clientID, objectName, operationMode)
def getObjectHandle(self, objectName, operationMode = vrep.simx_opmode_oneshot_wait):
res, handle = self.raw_getObjectHandle(objectName, operationMode)
self.handleErrorCode(res)
return handle
def getObject(self, objectName, operationMode = vrep.simx_opmode_oneshot_wait):
return VrepObject(self, self.getObjectHandle(objectName, operationMode))
def objectExists(self, objectName, operationMode = vrep.simx_opmode_oneshot_wait):
result = self.raw_getObjectHandle(objectName, operationMode)
return result[0] == 0
def setJointTargetVelocity(self, jointHandle, targetVelocity, operationMode = vrep.simx_opmode_oneshot_wait):
return vrep.simxSetJointTargetVelocity(self.clientID, jointHandle, targetVelocity, operationMode)
def raw_readProximitySensor(self, sensorHandle, operationMode = vrep.simx_opmode_oneshot_wait):
return vrep.simxReadProximitySensor(self.clientID, sensorHandle, operationMode)
def readProximitySensor(self, sensorHandle, operationMode = vrep.simx_opmode_oneshot_wait):
res, detectionState, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector = self.raw_readProximitySensor(sensorHandle, operationMode)
self.handleErrorCode(res)
distance = math.sqrt(sum([x ** 2 for x in detectedPoint]))
return VrepProximitySensorResult(detectionState, distance, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector)
class VrepWrapper(object):
def connect(self, host, port):
return VrepConnection(host, port)
|
from linker import Linker
from selenium import webdriver
driver = webdriver.Chrome('./chromedriver')
class JSLinker(Linker):
def __init__(self):
pass
def setBoardState(self, board_info):
'''
设置棋盘状态,输入是一个迭代的线性数据结构,不返回任何数据
'''
self.place = board_info[0] * 7 + board_info[1]
driver.get('file:///Users/nikoyou/Desktop/quoridor-ai-7-7/display.html')
js_code = 'setAIPlace(' + str(board_info[0]) + ', ' + str(board_info[1]) + ');\n'
js_code += 'setOpponentPlace(' + str(board_info[2]) + ', ' + str(board_info[3]) + ');\n'
js_code += 'setAIWallNumbers(' + str(board_info[4]) + ');\n'
js_code += 'setOpponentWallNumbers(' + str(board_info[5]) + ');\n'
for i in range(6, len(board_info)):
order = i - 6
if board_info[i] == 1: # 放置横向挡板
js_code += 'setHorizontalWall(' + str(order // 6) + ', ' + str(order % 6) + ');\n'
elif board_info[i] == 2: # 放置竖直挡板
js_code += 'setVerticalWall(' + str(order // 6) + ', ' + str(order % 6) + ');\n'
# print(js_code)
driver.execute_script(js_code)
def computeDecision(self):
'''
计算最佳移动步骤
返回一个动作编号
'''
js_code = 'return computerMove()'
result = driver.execute_script(js_code)
result = result.split('-')
result = [int(r) for r in result]
# 解析命令
if len(result) == 2:
row = result[0] // 7
col = result[0] % 7
intersection = row * 6 + col
# 摆放挡板
if result[1] - result[0] == 1:
# 纵向挡板
result = 'v' + str(intersection)
elif result[1] - result[0] == 7:
# 横向挡板
result = 'h' + str(intersection)
# elif len(result) == 1:
# # 移动棋子
# diff = result[0] - self.place
# if diff == 1:
# result = 3 # 向右
# elif diff == -1:
# result = 2 # 向左
# elif diff == 7:
# result = 0
# elif diff == -7:
# result = 1
print(result)
return result
if __name__ == '__main__':
linker = JSLinker()
linker.setBoardState([
1, 2,
3, 4,
6, 7,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 2, 0, 0, 0,
0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0 ])
linker.computeDecision()
input('Press any key to quit')
driver.quit()
|
#!/usr/bin/python2.7
#-*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.cm as cm
import matplotlib.image as mpimg
import numpy as np
from matplotlib import rc
from matplotlib import gridspec
plt.rcParams["legend.fontsize"]=35
plt.rcParams["font.size"]=15
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
# set the figure style
plt.figure(figsize=(16,12),dpi=200)
ax = plt.gca()
#Info
domain_width = 18
domain_height = 6
dx = domain_width / 2
dy = domain_height
#fig limiter
yLim = 7.5
xLim = 10
############################ pyPost ############################
# read and plot
data = np.loadtxt('./offsetShipSectionModel2.dat',skiprows=0)
x = data[:,0]*0.007
y = data[:,1]*0.007
x1 = [-np.max(x),np.max(x)]
x2 = [np.max(y),np.max(y)]
# plot1=plt.plot(x1,y1,'bo',markersize=12,markeredgecolor='blue',markeredgewidth=2,markerfacecolor='white',label='Exp. KRISO')
plot=plt.plot(x,y,'-k',linewidth=2)
plot=plt.plot(-x,y,'-k',linewidth=2)
plot=plt.plot(x1,x2,'-k',linewidth=2)
############################ pyPost ############################
# plot lines
# x = np.linspace(0,5,100)
# y = np.tan(np.pi/6)*x
# temp = np.tan(np.pi/6)*5
# Wedge outlines:
# x = [0,5]
# y = [0,2]
# plt.plot(x,y)
# x = [-5,0]
# y = [2,0]
# plt.plot(x,y)
# x = [-5,5]
# y = [2,2]
# plt.plot(x,y)
# domain size
x1 = [-dx,-dx]
x2 = [dx,dx]
x3 = [-dx,dx]
y1 = [-dy,dy/2]
y3 = [-dy,-dy]
plt.plot(x1,y1,'-k',linewidth=2.5)
plt.plot(x2,y1,'-k',linewidth=2.5)
plt.plot(x3,y3,'-k',linewidth=2.5)
# annotate size
y = [-dy,-dy-1]
plt.plot(x1,y,'-k',linewidth=1)
plt.plot(x2,y,'-k',linewidth=1)
plt.annotate(r'$3.0\rm m$',xy=(-dx,-dy-0.7),
xytext=(0,-dy-0.7),size=35,
va="center",ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,rad=0.1",fc='k')
)
plt.annotate(r'$3.0\rm m$',xy=(dx,-dy-0.7),
xytext=(0,-dy-0.7),size=35,
va="center",ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,rad=0.1",fc='k')
)
x1 = [-dx-1,-dx]
x2 = [dx,dx+1]
y1 = [-dy,-dy]
y2 = [0,0]
y3 = [dy/2,dy/2]
plt.plot(x1,y1,'-k',linewidth=1)
plt.plot(x1,y2,'-k',linewidth=1)
plt.plot(x2,y1,'-k',linewidth=1)
plt.plot(x2,y3,'-k',linewidth=1)
plt.annotate(r'$1.0\rm m$',xy=(-dx-0.9,-dy),
xytext=(-dx-0.9,-dy/2),size=35,
va="center",ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,rad=0.1",fc='k')
)
plt.annotate(r'$1.0\rm m$',xy=(-dx-0.9,0),
xytext=(-dx-0.9,-dy/2),size=35,
va="center",ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,rad=0.1",fc='k')
)
plt.annotate(r'$1.5\rm m$',xy=(dx+0.9,-dy),
xytext=(dx+0.9,-dy/4),size=35,
va="center",ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,rad=0.1",fc='k')
)
plt.annotate(r'$1.5\rm m$',xy=(dx+0.9,dy/2),
xytext=(dx+0.9,-dy/4),size=35,
va="center",ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,rad=0.1",fc='k')
)
# plt.arrow(0,0,2,2,head_width=0.05, head_length=0.1, fc='k', ec='k') # x1,y1,x2,y2
############################ pyPost ############################
# plot rectangle
ax.add_patch(
patches.Rectangle(
(-dx,-dy), # (x,y) left bottom
domain_width, # width
domain_height, # height
)
)
############################ pyPost ############################
# add comments
plt.text( -1.0,4.5, r'Ship07',fontdict={'size':40, 'color':'r'})
# x0 = 1.5
# y0 = 0.2
# plt.text( x0+2.0,y0+0.7, r'Deadrise angle',fontdict={'size':16, 'color':'k'})
# plt.annotate(r'$\alpha=30^\circ$', xy=(x0, y0), xycoords='data',xytext=(+40,+5),
# textcoords='offset points', fontsize=16,
# arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=.2"))
############################ pyPost ############################
plt.xlim(-xLim,xLim)
plt.ylim(-yLim,yLim)
#plt.ylim(-5.0,4)
# ax.set_xticks(np.arange(0,10,1.5)+0.4)
# ax.set_yticks(np.arange(0,10,1.5)+0.4)
# ax.spines['right'].set_color('none')
# ax.spines['bottom'].set_color('none')
# ax.spines['left'].set_color('none')
# ax.spines['top'].set_color('none')
# plt.axis('scaled')
# plt.xlim(0.5,10.5)
# plt.ylim(-1,9)
# plt.tick_params(labelbottom='off', labelleft='off', left='off', right='off',
# bottom='off', top='off')
# add the legend
# ax.legend(loc='lower center', handlelength=2.3, handletextpad=1, labelspacing=1,
# ncol=2,mode="expand",borderpad=1, fancybox=True)
# plt.show()
# make ticks of x/y invisible
plt.xticks([])
plt.yticks([])
# make border lines invisible
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.tight_layout()
plt.savefig("ship07-domain.eps")
|
#!/usr/bin/python3
import unittest
from datetime import datetime, timedelta
import pandas as pd
from pandas.testing import assert_frame_equal
from model.time_series.time_series import TimeSeries
from model.time_series.time_series_row import TimeSeriesRow
class TestTimeSeries(unittest.TestCase):
def test_to_json(self):
# Given
time_series_row1 = TimeSeriesRow(datetime(1949, 1, 1), 112.0)
time_series_row2 = TimeSeriesRow(datetime(1949, 2, 1), 118.0)
time_series_row3 = TimeSeriesRow(datetime(1949, 3, 1), 132.0)
time_series = TimeSeries([time_series_row1, time_series_row2, time_series_row3],
"Date", "Passengers", "yyyy-MM")
# When
actual_json = time_series.to_json()
# Then
expected_json = dict(dateColumnName='Date',
valueColumnName='Passengers',
dateFormat='yyyy-MM',
rows=[dict(date='1949-01', value=112.0),
dict(date='1949-02', value=118.0),
dict(date='1949-03', value=132.0)])
assert expected_json == actual_json
def test_from_json(self):
# Given
json = dict(dateColumnName='Date',
valueColumnName='Passengers',
dateFormat='yyyy-MM-dd HH:mm:ss',
rows=[dict(date='1949-01-01 11:00:01', value=112.0),
dict(date='1949-01-01 11:00:02', value=118.0),
dict(date='1949-01-01 11:00:03', value=132.0)])
# When
actual_time_series = TimeSeries.from_json(json)
# Then
time_series_row1 = TimeSeriesRow(datetime(1949, 1, 1, 11, 0, 1), 112.0)
time_series_row2 = TimeSeriesRow(datetime(1949, 1, 1, 11, 0, 2), 118.0)
time_series_row3 = TimeSeriesRow(datetime(1949, 1, 1, 11, 0, 3), 132.0)
expected_time_series = TimeSeries([time_series_row1, time_series_row2, time_series_row3],
"Date", "Passengers", "yyyy-MM-dd HH:mm:ss")
assert expected_time_series == actual_time_series
def test_from_data_frame(self):
# Given
initial_data = {"Date": [datetime.strptime("1960-08-01 11:00:00", "%Y-%m-%d %H:%M:%S") + timedelta(hours=i)
for i in range(0, 3)],
"Passengers": [112.0, 118.0, 132.0]}
data_frame = pd.DataFrame.from_dict(initial_data)
# When
actual_time_series = TimeSeries.from_data_frame(data_frame, "Date", "Passengers", "yyyy-MM-dd HH:mm:ss")
# Then
time_series_row1 = TimeSeriesRow(datetime(1960, 8, 1, 11, 0, 0), 112.0)
time_series_row2 = TimeSeriesRow(datetime(1960, 8, 1, 12, 0, 0), 118.0)
time_series_row3 = TimeSeriesRow(datetime(1960, 8, 1, 13, 0, 0), 132.0)
expected_time_series = TimeSeries([time_series_row1, time_series_row2, time_series_row3],
"Date", "Passengers", "yyyy-MM-dd HH:mm:ss")
assert expected_time_series == actual_time_series
def test_to_data_frame(self):
# Given
time_series_row1 = TimeSeriesRow(datetime(1960, 8, 1, 11, 0, 0), 112.0)
time_series_row2 = TimeSeriesRow(datetime(1960, 8, 1, 12, 0, 0), 118.0)
time_series_row3 = TimeSeriesRow(datetime(1960, 8, 1, 13, 0, 0), 132.0)
time_series = TimeSeries([time_series_row1, time_series_row2, time_series_row3],
"Date", "Passengers", "yyyy-MM-dd HH:mm:ss")
# When
actual_data_frame = time_series.to_data_frame()
# Then
initial_data = {"Date": [datetime.strptime("1960-08-01 11:00:00", "%Y-%m-%d %H:%M:%S") + timedelta(hours=i)
for i in range(0, 3)],
"Passengers": [112.0, 118.0, 132.0]}
expected_data_frame = pd.DataFrame.from_dict(initial_data)
assert_frame_equal(expected_data_frame, actual_data_frame)
if __name__ == "__main__":
unittest.main()
|
from PIL import ImageGrab as IG
import pyautogui as pa
import sys
import os
import time
import re
pa.FAILSAFE = True
sec_between_keys = 0.25
sec_between_term = 3
sec_sleep = 0.5
#스크린샷
def screenGrab():
box = ()
im = IG.grab(box)
im.save(os.getcwd() + '\\img\\full_snap__' + str(int(time.time())) + '.png', 'PNG')
#화면이 켜질때 까지 기다리기
all = pa.getWindows()
for i in all:
if 'http://bms.ken.go.kr/?USERID=driver' in i:
print(i, 'yes')
else:
continue
pa.getWindow('카카오톡').set_foreground()
te = pa.getWindow('카카오톡').get_position()
print(te)
'''
RAON K Hybrid Agent
Study_webCrawling from 2018 [D:\OneDrive - 학현초등학교\Gdrive\★작업중\SW_PyCharm\studyPython from 2018] - ...\crawling\crawler_naver news_all_180802 .py [Study_webCrawling from 2018] - PyCharm
...\pyAutogui\pyAutoGui_Neis_Gyeljae_180906.py [Study_webCrawling from 2018] - PyCharm
http://bms.ken.go.kr/?USERID=driver1&APPRIDLIST=J10CB182424951849000&APPRDEPTID=J100004848&APPR - Internet Explorer
http://bms.ken.go.kr/ - 결재대기 | 업무관리시스템 - Internet Explorer
업무포털 - 석진일/학현초등학교 - Internet Explorer
경기도교육청
카카오톡
이미지 014.png - 픽픽
WorkFlowy
Windows에서 파이썬 스크립트 실행용 exe 실행파일 구성방법 - Chrome
cli.exe - Everything
받은 쪽지 - 최종철(학현초등학교 전담)
받은 쪽지 - 김소희(학현초등학교 4학년)
Daum 지도 - Chrome
Total Commander 7.04 - University of Amsterdam
백업 및 동기화
'''
|
print('*****CELSIUS TO FAHRENHEIT CONVERTER*****')
celsius_temp = int(input('Enter the temperature in Celsius: '))
fahrenheit_temp = int(1.8 * celsius_temp + 32)
print(f'Temperature in Fahrenheit is {fahrenheit_temp}˚F')
|
# -*- coding: utf-8 -*-
class Solution:
def findOcurrences(self, text, first, second):
result = []
words = text.split()
for i in range(len(words) - 2):
if words[i] == first and words[i + 1] == second:
result.append(words[i + 2])
return result
if __name__ == "__main__":
solution = Solution()
assert ["girl", "student"] == solution.findOcurrences(
"alice is a good girl she is a good student", "a", "good"
)
assert ["we", "rock"] == solution.findOcurrences(
"we will we will rock you", "we", "will"
)
|
#!/usr/bin/python3
# -*- Mode: Python; py-indent-offset: 4 -*-
#
# Copyright (C) 2005,2007 Ray Burr
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# DISCLAIMER: I don't pretend to be a math wizard. I don't have a
# deep understanding of all of the theory behind CRCs. Part of the
# reason I originally wrote this is to help understand and verify CRC
# algorithms in practice. It is likely that some of my terminology is
# inaccurate.
# Requires at least Python 2.4; tested with 2.4 and 2.5.
"""
This module can model common CRC algorithms given the set of defining
parameters. This is intended to be easy to use for experimentation
rather than optimized for speed. It is slow even for a native Python
CRC implementation.
Several common CRC algorithms are predefined in this module.
:authors: Ray Burr
:license: MIT License
:contact: http://www.nightmare.com/~ryb/
Examples
========
>>> '%X' % CRC32.calcString('123456789')
'CBF43926'
This test function runs all of the defined algorithms on the test
input string '123456789':
>>> _printResults()
CRC-5-USB: 19
CRC-8-SMBUS: F4
CRC-15: 059E
CRC-16: BB3D
CRC-16-USB: B4C8
CRC-CCITT: 29B1
CRC-HDLC: 906E
CRC-24: 21CF02
CRC-32: CBF43926
CRC-32C: E3069283
CRC-64: 46A5A9388A5BEFFE
CRC-256: 79B96BDC0C519B239BE759EC0688C86FD25A3F4DF1E7F054AD1F923D0739DAC8
Calculating in parts:
>>> value = CRC32.calcString('1234')
>>> '%X' % CRC32.calcString('56789', value)
'CBF43926'
Or, done a different way:
>>> crc = CrcRegister(CRC32)
>>> crc.takeString('1234')
>>> crc.takeString('56789')
>>> '%X' % crc.getFinalValue()
'CBF43926'
Inversion of a CRC function:
>>> CRC_CCITT.reverse().reflect().calcWord(54321, 16, 0)
1648
>>> CRC_CCITT.calcWord(_, 16, 0)
54321
A 15-bit CRC is used in CAN protocols. The following sample CAN frame
(in binary here) is converted to hexadecimal for the calcWord call.
The bits after the 15-bit CRC are not included in the CRC::
0 11101000001 0 0 0 0001 00010010 011000010111011 1 1 1 1111111
This sample CAN frame was found in this paper:
<http://www.anthony-marino.com/documents/HDL_implementation_CAN.pdf>
>>> '%X' % CRC15.calcWord(0x3A08112, 27)
'30BB'
If the CRC is included, the remainder should always be zero:
>>> print(CRC15.calcWord(0x1D0408930BB, 42))
0
A 5-bit CRC is used some kinds of USB packets. Here is a sample
start-of-frame packet:
10100101 01100111000 01111
(found at <http://www.nital.com/corporate/usb2snooper.html>)
The first field is the PID (not included in the CRC), the next 11-bit
field is the frame number (0xE6, LSb-first order), and the final five
bits are the CRC (0x1E, LSb-first order).
>>> '%X' % CRC5_USB.calcWord(0xE6, 11)
'1E'
"""
# <http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html>
__docformat__ = "restructuredtext en"
__version__ = "20070611"
class CrcAlgorithm:
"""
Represents the parameters of a CRC algorithm.
"""
# FIXME: Instances are supposed to be immutable, but attributes are
# writable.
def __init__(self,
width,
polynomial,
name=None,
seed=0,
lsbFirst=False,
lsbFirstData=None,
xorMask=0):
"""
:param width:
The number of bits in the CRC register, or equivalently, the
degree of the polynomial.
:type width:
an integer
:param polynomial:
The generator polynomial as a sequence of exponents
:type polynomial:
sequence or integer
:param name:
A name identifying algorithm.
:type name:
*str*
:param seed:
The initial value to load into the register. (This is the
value without *xorMask* applied.)
:type seed:
an integer
:param lsbFirst:
If ``true``, the register shifts toward the
least-significant bit (sometimes called the *reflected* or
*reversed* algorithim). Otherwise, the register shifts
toward the most-significant bit.
:type lsbFirst:
*bool*
:param lsbFirstData:
If ``true``, input data is taken least-significant bit
first. Otherwise, data is taken most-significant bit first.
If ``None`` or not given, the value of *lsbFirst* is used.
:type lsbFirstData:
*bool*
:param xorMask:
An integer mask indicating which bits should be inverted
when returning the final result. This is also used for the
input value if provided.
:type xorMask:
an integer
"""
if width > 0:
try:
polyMask = int(polynomial)
except TypeError:
# Guess it is already a sequence of exponents.
polynomial = list(polynomial)
polynomial.sort()
polynomial.reverse()
polynomial = tuple(polynomial)
else:
# Convert a mask to a tuple of exponents.
if lsbFirst:
polyMask = reflect(polyMask, width)
polynomial = (width, )
for i in range(width - 1, -1, -1):
if (polyMask >> i) & 1:
polynomial += (i, )
if polynomial[:1] != (width, ):
ValueError("mismatch between width and polynomial degree")
self.width = width
self.polynomial = polynomial
self.name = name
self.seed = seed
self.lsbFirst = lsbFirst
self.lsbFirstData = lsbFirstData
self.xorMask = xorMask
if not hasattr(width, "__rlshift__"):
raise ValueError
# FIXME: Need more checking of parameters.
def __repr__(self):
info = ""
if self.name is not None:
info = ' "%s"' % str(self.name)
result = "<%s.%s%s @ %#x>" % (self.__class__.__module__,
self.__class__.__name__, info, id(self))
return result
def calcString(self, s, value=None):
"""
Calculate the CRC of the 8-bit string *s*.
"""
r = CrcRegister(self, value)
r.takeString(s)
return r.getFinalValue()
def calcWord(self, word, width, value=None):
"""
Calculate the CRC of the integer *word* as a sequence of
*width* bits.
"""
r = CrcRegister(self, value)
r.takeWord(word, width)
return r.getFinalValue()
def reflect(self):
"""
Return the algorithm with the bit-order reversed.
"""
ca = CrcAlgorithm(0, 0)
ca._initFromOther(self)
ca.lsbFirst = not self.lsbFirst
if self.lsbFirstData is not None:
ca.lsbFirstData = not self.lsbFirstData
if ca.name:
ca.name += " reflected"
return ca
def reverse(self):
"""
Return the algorithm with the reverse polynomial.
"""
ca = CrcAlgorithm(0, 0)
ca._initFromOther(self)
ca.polynomial = [(self.width - e) for e in self.polynomial]
ca.polynomial.sort()
ca.polynomial.reverse()
ca.polynomial = tuple(ca.polynomial)
if ca.name:
ca.name += " reversed"
return ca
def _initFromOther(self, other):
self.width = other.width
self.polynomial = other.polynomial
self.name = other.name
self.seed = other.seed
self.lsbFirst = other.lsbFirst
self.lsbFirstData = other.lsbFirstData
self.xorMask = other.xorMask
class CrcRegister:
"""
Holds the intermediate state of the CRC algorithm.
"""
def __init__(self, crcAlgorithm, value=None):
"""
:param crcAlgorithm:
The CRC algorithm to use.
:type crcAlgorithm:
`CrcAlgorithm`
:param value:
The initial register value to use. The result previous of a
previous CRC calculation, can be used here to continue
calculation with more data. If this parameter is ``None``
or not given, the register will be initialized with
algorithm's default seed value.
:type value:
an integer
"""
self.crcAlgorithm = crcAlgorithm
p = crcAlgorithm
self.bitMask = (1 << p.width) - 1
word = 0
for n in p.polynomial:
word |= 1 << n
self.polyMask = word & self.bitMask
if p.lsbFirst:
self.polyMask = reflect(self.polyMask, p.width)
if p.lsbFirst:
self.inBitMask = 1 << (p.width - 1)
self.outBitMask = 1
else:
self.inBitMask = 1
self.outBitMask = 1 << (p.width - 1)
if p.lsbFirstData is not None:
self.lsbFirstData = p.lsbFirstData
else:
self.lsbFirstData = p.lsbFirst
self.reset()
if value is not None:
self.value = value ^ p.xorMask
def __str__(self):
return formatBinaryString(self.value, self.crcAlgorithm.width)
def reset(self):
"""
Reset the state of the register with the default seed value.
"""
self.value = int(self.crcAlgorithm.seed)
def takeBit(self, bit):
"""
Process a single input bit.
"""
outBit = ((self.value & self.outBitMask) != 0)
if self.crcAlgorithm.lsbFirst:
self.value >>= 1
else:
self.value <<= 1
self.value &= self.bitMask
if outBit ^ bool(bit):
self.value ^= self.polyMask
def takeWord(self, word, width=8):
"""
Process a binary input word.
:param word:
The input word. Since this can be a Python ``long``, there
is no coded limit to the number of bits the word can
represent.
:type word:
an integer
:param width:
The number of bits *word* represents.
:type width:
an integer
"""
if self.lsbFirstData:
bitList = list(range(0, width))
else:
bitList = list(range(width - 1, -1, -1))
for n in bitList:
self.takeBit((word >> n) & 1)
def takeString(self, s):
"""
Process a string as input. It is handled as a sequence of
8-bit integers.
"""
for c in s:
self.takeWord(ord(c))
def getValue(self):
"""
Return the current value of the register as an integer.
"""
return self.value
def getFinalValue(self):
"""
Return the current value of the register as an integer with
*xorMask* applied. This can be used after all input data is
processed to obtain the final result.
"""
p = self.crcAlgorithm
return self.value ^ p.xorMask
def reflect(value, width):
return sum(((value >> x) & 1) << (width - 1 - x) for x in range(width))
def formatBinaryString(value, width):
return "".join("01" [(value >> i) & 1] for i in range(width - 1, -1, -1))
# Some standard algorithms are defined here. I believe I was able to
# verify the correctness of each of these in some way (against an
# existing implementation or sample data with a known result).
#: Same CRC algorithm as Python's zlib.crc32
CRC32 = CrcAlgorithm(name="CRC-32",
width=32,
polynomial=(32, 26, 23, 22, 16, 12, 11, 10, 8, 7, 5, 4, 2,
1, 0),
seed=0xFFFFFFFF,
lsbFirst=True,
xorMask=0xFFFFFFFF)
CRC16 = CrcAlgorithm(name="CRC-16",
width=16,
polynomial=(16, 15, 2, 0),
seed=0x0000,
lsbFirst=True,
xorMask=0x0000)
#: Used in USB data packets.
CRC16_USB = CrcAlgorithm(name="CRC-16-USB",
width=16,
polynomial=(16, 15, 2, 0),
seed=0xFFFF,
lsbFirst=True,
xorMask=0xFFFF)
CRC_CCITT = CrcAlgorithm(name="CRC-CCITT",
width=16,
polynomial=(16, 12, 5, 0),
seed=0xFFFF,
lsbFirst=False,
xorMask=0x0000)
#: This is the algorithm used in X.25 and for the HDLC 2-byte FCS.
CRC_HDLC = CrcAlgorithm(name="CRC-HDLC",
width=16,
polynomial=(16, 12, 5, 0),
seed=0xFFFF,
lsbFirst=True,
xorMask=0xFFFF)
#: Used in ATM HEC and SMBus.
CRC8_SMBUS = CrcAlgorithm(name="CRC-8-SMBUS",
width=8,
polynomial=(8, 2, 1, 0),
seed=0,
lsbFirst=False,
xorMask=0)
#: Used in RFC-2440 and MIL STD 188-184.
CRC24 = CrcAlgorithm(name="CRC-24",
width=24,
polynomial=(24, 23, 18, 17, 14, 11, 10, 7, 6, 5, 4, 3, 1,
0),
seed=0xB704CE,
lsbFirst=False,
xorMask=0)
#: Used in Controller Area Network frames.
CRC15 = CrcAlgorithm(name="CRC-15",
width=15,
polynomial=(15, 14, 10, 8, 7, 4, 3, 0),
seed=0,
lsbFirst=False,
xorMask=0)
#: Used in iSCSI (RFC-3385); usually credited to Guy Castagnoli.
CRC32C = CrcAlgorithm(name="CRC-32C",
width=32,
polynomial=(32, 28, 27, 26, 25, 23, 22, 20, 19, 18, 14,
13, 11, 10, 9, 8, 6, 0),
seed=0xFFFFFFFF,
lsbFirst=True,
xorMask=0xFFFFFFFF)
#: CRC used in USB Token and Start-Of-Frame packets
CRC5_USB = CrcAlgorithm(name="CRC-5-USB",
width=5,
polynomial=(5, 2, 0),
seed=0x1F,
lsbFirst=True,
xorMask=0x1F)
#: ISO 3309
CRC64 = CrcAlgorithm(name="CRC-64",
width=64,
polynomial=(64, 4, 3, 1, 0),
seed=0,
lsbFirst=True,
xorMask=0)
#: This is just to show off the ability to handle a very wide CRC.
# If this is a standard, I don't know where it is from. I found the
# polynomial on a web page of an apparent Czech "Lady Killer"
# <http://www.volny.cz/lk77/crc256mmx/>.
POLYNOM256 = 0x82E2443E6320383A20B8A2A0A1EA91A3CCA99A30C5205038349C82AAA3A8FD27
CRC256 = CrcAlgorithm(
name="CRC-256",
width=256,
polynomial=POLYNOM256,
seed=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
lsbFirst=True,
xorMask=0)
# For the following I haven't found complete information and/or have
# no way to verify the result. I started with the list on Wikipedia
# <http://en.wikipedia.org/wiki/Cyclic_redundancy_check>.
#
# CRC4_ITU = CrcAlgorithm(
# name = "CRC-4-ITU",
# width = 4,
# polynomial = (4, 1, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC5_ITU = CrcAlgorithm(
# name = "CRC-5-ITU",
# width = 5,
# polynomial = (5, 4, 2, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC6_ITU = CrcAlgorithm(
# name = "CRC-6-ITU",
# width = 6,
# polynomial = (6, 1, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC7 = CrcAlgorithm(
# name = "CRC-7",
# width = 7,
# polynomial = (7, 3, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC8_CCITT = CrcAlgorithm(
# name = "CRC-8-CCITT",
# width = 8,
# polynomial = (8, 7, 3, 2, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC8_DALLAS = CrcAlgorithm(
# name = "CRC-8-Dallas",
# width = 8,
# polynomial = (8, 5, 4, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC8 = CrcAlgorithm(
# name = "CRC-8",
# width = 8,
# polynomial = (8, 7, 6, 4, 2, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC8_J1850 = CrcAlgorithm(
# name = "CRC-8-J1850",
# width = 8,
# polynomial = (8, 4, 3, 2, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC10 = CrcAlgorithm(
# name = "CRC-10",
# width = 10,
# polynomial = (10, 9, 5, 4, 1, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC12 = CrcAlgorithm(
# name = "CRC-12",
# width = 12,
# polynomial = (12, 11, 3, 2, 1, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
#
# CRC64_ECMA182 = CrcAlgorithm(
# name = "CRC-64-ECMA-182",
# width = 64,
# polynomial = (64, 62, 57, 55, 54, 53, 52, 47, 46, 45, 40, 39, 38, 37,
# 35, 33, 32, 31, 29, 27, 24, 23, 22, 21, 19, 17, 13, 12,
# 10, 9, 7, 4, 1, 0),
# seed = ?,
# lsbFirst = ?,
# xorMask = ?)
def _callCalcString123456789(v):
return v.calcString('123456789')
def _printResults(fn=_callCalcString123456789):
import sys
d = sys.modules[__name__].__dict__
algorithms = sorted(
(v for (k, v) in d.items() if isinstance(v, CrcAlgorithm)),
key=lambda v: (v.width, v.name))
for a in algorithms:
format = ("%%0%dX" % ((a.width + 3) // 4))
print("%s:" % a.name, end=' ')
print(format % fn(a))
def _test():
import doctest
import sys
return doctest.testmod(sys.modules[__name__])
if __name__ == "__main__":
_test()
|
import pytest
@pytest.fixture
def dummy_socket():
return DummySocket()
class DummySocket:
def __init__(self):
self.data = [b'hello', b'world', b'']
def recv(self, bufsize):
return self.data.pop(0)
@pytest.fixture
def transport():
return None
|
__author__ = 'SufferProgrammer'
import mysql.connector as mariaDBConnector
class DBase:
def __init__(self):
self.conn = mariaDBConnector.connect(host = '192.168.8.101', user = 'developer', database = 'crud_trial', password='')
self.cur = self.conn.cursor()
def execute(self, command):
self.cur.execute(command)
def addData(self, data):
command = "INSERT INTO gui_trial(data) VALUES('%s')" %(data)
self.execute(command)
self.commit()
def delData(self, DropTarget):
command = "DELETE FROM gui_trial WHERE data=('%s')" %(DropTarget)
self.execute(command)
self.commit()
def commit(self):
return self.conn.commit()
def retrDataRow(self):
command = "SELECT * FROM gui_trial"
self.execute(command)
dataListRow = self.cur.fetchall()
return dataListRow
def retrData(self):
command = "SELECT * FROM gui_trial"
self.execute(command)
dataList = self.cur.fetchall()
return dataList
def closeConnection(self):
return self.conn.close()
|
# Generated by Django 2.0.7 on 2018-07-18 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_user_is_staff'),
]
operations = [
migrations.AlterField(
model_name='song',
name='artist',
field=models.ManyToManyField(related_name='songs', through='api.By', to='api.Artist'),
),
]
|
from ..algorithms.tracker import Tracker
from ..algorithms.utils import Volume
def test_add_volume():
tracker = Tracker()
tracker.add_volume(Volume(0, (5,5,5), (10,10,10)))
assert len(tracker.i_dict.keys()) == 1
assert len(tracker.i_dict[(5,10)]) == 1
assert len(tracker.i_dict[(5,10)][(5,10)]) == 1
assert list(tracker.i_dict[(5,10)][(5,10)].keys())[0] == (5,10)
def test_is_complete():
tracker = Tracker()
tracker.add_volume(Volume(0, (0,0,0), (10,10,5)))
tracker.print()
assert not tracker.is_complete(((0,0,0), (10,10,10)))
tracker.add_volume(Volume(0, (0,0,5), (10,10,10)))
tracker.print()
assert tracker.is_complete(((0,0,0), (10,10,10)))
|
#!/usr/bin/python
# ---------------------------------
# 文件工具方法, 主要包含常见的文件处理方法
# 以及常见的文件存储的方法
# ---------------------------------
import glob
import math
import os
import random
import smtplib
import time
from collections import Counter
from email.header import Header
from email.mime.text import MIMEText
import cv2
import matplotlib.pyplot as plt
import numpy as np
from dtw import dtw
from scipy.fftpack import fft
from scipy.stats import wasserstein_distance
from sklearn import metrics
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def tanh(x):
return math.sinh(x) / math.cosh(x)
def get_all_file_path(path, suffix='fif'): # 主要是获取某文件夹下面所有的文件列表
'''
:param path: 存储对应文件的路径
:return: 文件夹下面对应的文件路径
'''
names = []
file_map = {}
path_dir = []
dir = os.listdir(path)
names = dir
for d in dir:
path_dir.append(os.path.join(path, d))
for index, p in enumerate(path_dir):
new_suffix = '*.' + suffix
file_p = glob.glob(os.path.join(p, new_suffix))
file_map[names[index]] = file_p
return file_map
def matrix_normalization(data, resize_shape=(130, -1)):
'''
矩阵的归一化,主要是讲不通形状的矩阵变换为特定形状的矩阵, 矩阵的归一化主要是更改序列
也就是主要更改行
eg:(188, 200)->(130, 200) 归一化的表示
:param data:
:param resize_shape:
:return:
'''
data_shape = data.shape # 这个必须要求的是numpy的文件格式
if data_shape[0] != resize_shape[0]:
if resize_shape[0] > data_shape[0]: # 做插入处理
'''
扩大原来的矩阵
'''
d = resize_shape[0] - data_shape[0]
while d > 0:
ch_no = random.randint(1, data.shape[0] - 2)
tmp = (data[ch_no - 1] + data[ch_no + 1]) * 1.0 / 2
data = np.insert(data, ch_no, tmp, axis=0)
d -= 1
# channels_add = random.sample(range(1, data_shape[0] - 1), d)
# fake_channel = [] # 添加信道列表的值
# for c in channels_add:
# tmp = (data[c - 1] + data[c]) * 1.0 / 2
# fake_channel.append(tmp)
# data = np.insert(data, channels_add, fake_channel, axis=0)
else:
if resize_shape[0] < data_shape[0]: # 做删除处理
'''
删除掉原来的矩阵
'''
d = data_shape[0] - resize_shape[0]
channels_del = random.sample(range(1, data_shape[0] - 1), d)
data = np.delete(data, channels_del, axis=0)
return data
def matrix_normalization_recorder(data, resize_shape=(130, 200)): # 这个是矩阵初始化的记录版
'''
矩阵的归一化,主要是讲不通形状的矩阵变换为特定形状的矩阵, 矩阵的归一化主要是更改序列
也就是主要更改行
eg:(188, 200)->(130, 200) 归一化的表示
:param data:
:param resize_shape:
:return:
'''
recoder = [] # [-1/1/0, 10, 11, 12, 14] 第一位为标识位, -1 标识删除了的信道, +1 标识增加了的信道,0, 表示对信道没有变化 后面对应的是增加或者删除的信道
recoder.append(0)
data_shape = data.shape # 这个必须要求的是numpy的文件格式
if data_shape[0] != resize_shape[0]:
if resize_shape[0] > data_shape[0]: # 做插入处理
'''
扩大原来的矩阵
'''
d = resize_shape[0] - data_shape[0]
channels_add = random.sample(range(1, data_shape[0] - 1), d)
recoder[0] = 1
recoder += channels_add
fake_channel = [] # 添加信道列表的值
for c in channels_add:
tmp = (data[c - 1] + data[c]) * 1.0 / 2
fake_channel.append(tmp)
data = np.insert(data, channels_add, fake_channel, axis=0)
else:
if resize_shape[0] < data_shape[0]: # 做删除处理
'''
删除掉原来的矩阵
'''
d = data_shape[0] - resize_shape[0]
channels_del = random.sample(range(1, data_shape[0] - 1), d)
recoder[0] = -1
recoder += channels_del
data = np.delete(data, channels_del, axis=0)
return data, recoder
def get_label_data(path): # get data include label
'''
:param path:
:return: {"path":1, "path2":2}
'''
class_name = os.listdir(path)
data_name = []
data_label = []
for i, name in enumerate(class_name):
new_path = os.path.join(path, name)
data_file = os.listdir(new_path)
path_file = [os.path.join(new_path, x) for x in data_file]
data_name += path_file
data_label += [i] * len(data_file)
result_data_label = dict(zip(data_name, data_label))
return result_data_label
def get_first_dir_path(path, suffix="jpg"):
paths = glob.glob(os.path.join(path, '*.' + suffix))
return paths
def get_matrix_max_location(mtx_data, k, reverse=True):
'''
:param mtx_data: 矩阵的数据
:param k: 获取前K 个最大、最小
:param reverse: True : 最大,逆序 False: 最小,否则正序
:return:[(0, 0), (2, 1), (2, 2), (1, 1), (1, 2)] 结果是按照顺序进行排序
'''
d_f = mtx_data.flatten()
if reverse:
index_id = d_f.argsort()[-k:]
else:
index_id = d_f.argsort()[k:]
x_index, y_index = np.unravel_index(index_id, mtx_data.shape)
location = list(zip(x_index, y_index)) # 此时只是选取了最大几个,数据之间是没有顺序的
location_dic = {}
for x, y in location:
location_dic[(x, y)] = mtx_data[x][y]
location_dic = sorted(location_dic.items(), key=lambda x: -x[1]) if reverse else sorted(location_dic.items(),
key=lambda x: x[1])
result = [x[0] for x in location_dic]
return result
def mtx_similarity(mtx_a, mtx_b):
'''
:param mtx_a:
:param mtx_b:
:return: 计算矩阵的相似度
'''
mtx_1 = mtx_a.flatten() # 展开为一维
mtx_2 = mtx_b.flatten()
min_length = min(len(mtx_1), len(mtx_2))
mtx_1 = mtx_1[:min_length]
mtx_2 = mtx_2[:min_length]
result = np.dot(mtx_1, mtx_2.T) / (np.linalg.norm(mtx_1) * np.linalg.norm(mtx_2))
result = abs(result)
return result
def clean_dir(path):
print("this is danger operation!")
clean_path = os.path.join(path, "*")
print("you will clean all files in {}, do you continued?(y/n)".format(clean_path))
key = str(input())
if key == "y" or key == "Y":
print("cleaning the files in {}".format(clean_path))
os.system("rm -r {}".format(clean_path))
print("clean finished!")
else:
print("cancel operation !")
exit(0)
def trans_numpy_cv2(data):
data1 = sigmoid(data)
min_data = np.min(data1)
data1 = data1 - min_data
max_data = np.max(data1)
data1 = data1 / max_data * 255
result = data1.astype(np.uint8)
result = cv2.merge([result]) # 将信道进行整合
return result
def time_add(h, m, s, seconds_add):
'''
:param h: 小时
:param m: 分钟
:param s: 秒
:param seconds_add: 需要增加的时间秒数
:return: 返回的是绝对的时间
'''
s += seconds_add
s_m = s // 60
s = s % 60
m += s_m
m_h = m // 60
m = m % 60
h += m_h
h %= 24
return int(h), int(m), s
class IndicatorCalculation: # 包含二分类中各种指标
'''
tp, fp
fn, tn
'''
def __init__(self, prediction=None, ground_truth=None):
if prediction is not None and ground_truth is not None:
self.prediction = prediction # [0, 1, 0, 1, 1, 0]
self.ground_truth = ground_truth # [0, 1, 0, 0, 1 ]
@staticmethod
def __division_detection(number): # division detection if divisor is zero, the result is zero
return 0 if number == 0 else number
def __tp(self):
TP = 0
for i in range(len(self.prediction)):
TP += 1 if self.prediction[i] == 1 and self.ground_truth[i] == 1 else 0
return TP
def __fp(self):
FP = 0
for i in range(len(self.prediction)):
FP += 1 if self.prediction[i] == 1 and self.ground_truth[i] == 0 else 0
return FP
def __fn(self):
FN = 0
for i in range(len(self.prediction)):
FN += 1 if self.prediction[i] == 0 and self.ground_truth[i] == 1 else 0
return FN
def __tn(self):
TN = 0
for i in range(len(self.prediction)):
TN += 1 if self.prediction[i] == 0 and self.ground_truth[i] == 0 else 0
return TN
def set_values(self, prediction, ground_truth):
self.prediction = prediction
self.ground_truth = ground_truth
def get_accuracy(self, prediction=None, ground_truth=None):
if prediction is not None and ground_truth is not None:
self.prediction = prediction
self.ground_truth = ground_truth
return (self.__tp() + self.__tn()) / (self.__tn() + self.__tp() + self.__fn() + self.__fp())
def get_far(self, prediction=None, ground_truth=None):
if prediction is not None and ground_truth is not None:
self.prediction = prediction
self.ground_truth = ground_truth
return self.__fp() / (self.__fp() + self.__tn())
def get_recall(self, prediction=None, ground_truth=None):
if prediction is not None and ground_truth is not None:
self.prediction = prediction
self.ground_truth = ground_truth
divisor = self.__division_detection(self.__tp() + self.__fn())
if divisor == 0:
return 0
else:
return self.__tp() / divisor
def get_precision(self, prediction=None, ground_truth=None):
if prediction is not None and ground_truth is not None:
self.prediction = prediction
self.ground_truth = ground_truth
divisor = self.__division_detection(self.__tp() + self.__fp())
if divisor == 0:
return 0
else:
return self.__tp() / divisor
def get_f1score(self, prediction=None, ground_truth=None):
if prediction is not None and ground_truth is not None:
self.prediction = prediction
self.ground_truth = ground_truth
if (self.get_recall() is None) or (self.get_precision() is None) or (
(self.get_recall() + self.get_precision()) == 0):
return 0
else:
return (2 * self.__tp()) / (2 * self.__tp() + self.__fn() + self.__fp())
def get_auc(self, y_pre=None, y_real=None):
# if type(self.prediction) is not np.ndarray:
# self.prediction = np.asarray(self.prediction)
# self.ground_truth = np.asarray(self.ground_truth)
if y_real is None and y_pre is None:
y_predict = self.prediction
y_real = self.ground_truth
else:
y_predict = y_pre
y_real = y_real
auc_score = metrics.roc_auc_score(y_real, y_predict)
return auc_score
def dir_create_check(path_dir):
if os.path.exists(path_dir) is False:
os.makedirs(path_dir)
print("{} has been created!".format(path_dir))
else:
print("{} has existed!".format(path_dir))
class Pyemail:
def set_SMTP(self):
# 第三方 SMTP 服务
self.mail_host = "smtp.qq.com" # 设置服务器
self.mail_user = "danyuhao@qq.com" # 用户名
self.mail_pass = "guwoxifmcgribdfj" # 口令
def set_sender(self, sender='danyuhao@qq.com'):
self.sender = sender
def set_receivers(self, *kwg):
receivers = [x for x in kwg]
self.receivers = receivers
def send_info(self):
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(self.mail_host, 25) # 25 为 SMTP 端口号
smtpObj.login(self.mail_user, self.mail_pass)
smtpObj.sendmail(self.sender, self.receivers, self.message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def __init__(self, tital, message_info):
self.set_SMTP()
self.set_sender()
self.set_receivers("danyuhao@qq.com")
self.message = MIMEText(message_info, 'plain', 'utf-8')
self.message['From'] = Header("Lab", 'utf-8')
self.message['To'] = Header("Alex", 'utf-8')
self.subject = tital
self.message['Subject'] = Header(self.subject, 'utf-8')
self.send_info()
def similarity_dtw(s1, s2):
'''
:param s1: 序列1
:param s2: 序列2
:return:
'''
ratio = 10 # 设定的放缩系数,避免数据的相似度过于集中
euclidean_norm = lambda x, y: np.abs(ratio * (x - y))
d, cost_matrix, acc_cost_matrix, path = dtw(s1, s2, dist=euclidean_norm)
score = 1 - tanh(d) # 相似度的评分【0,1】 0: 完全不同, 1: 完全相同
return score
def similarity_EMD(s1, s2):
k = 1e3
score = 1 - np.tanh(k * wasserstein_distance(s1, s2))
return score
def fft_function(data):
'''
傅里叶的频谱分析
:param data:
:return:
'''
fft_y = fft(data)
N = len(data)
x = range(int(N / 2))
y_ = np.abs(fft_y) * 2 / N
y = y_[range(int(N / 2))]
plt.figure()
plt.plot(x, y, 'r')
plt.xlabel("Frequency")
plt.ylabel("Amplitude")
name = "Frequency"
plt.title(name)
plt.show()
def histogram_spectrum(data, file_pass=10):
frequency = []
for i, d in enumerate(data):
fft_y = fft(d)
N = len(d)
x = range(int(N / 2))
y_ = np.abs(fft_y) * 2 / N
y = y_[range(int(N / 2))]
index = np.argmax(y)
frequency.append(index)
print(frequency)
plt.figure()
frequency = list(filter(lambda x: x <= 10, frequency))
count = len(dict(Counter(frequency)))
plt.hist(frequency, density=True, bins=count)
plt.xlabel("Frequency")
plt.ylabel("Density")
plt.title("Histogram of the spectrum")
plt.show()
class LogRecord:
@staticmethod
def write_log(log_txt, log_path="../log/Running_result.txt"):
base_dir = os.path.dirname(log_path)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
if not os.path.exists(log_path):
f = open(log_path, 'w')
else:
f = open(log_path, 'a')
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
result = "{}\t{}\n".format(log_txt, time_now)
f.write(result)
f.close()
print(log_txt)
print("generate log message!")
return
def linear_matrix_normalization(M):
'''
:param M: 输入的举证,矩阵的线性归一化, M 的格式是tensor, 讲其转化为图片的格式
:return:
'''
m = (M - M.min()) / (M.max() - M.min())
return m
def calculation_result_standard_deviation(prediction, grand_truth, cal_def, epoch=5):
'''
: function 该方法用于附带标准差模型结果计算,计算原理是将预测结果和实际的结果按照批次进行采样
计算不同批次之间的标准差
:param prediction: 预测结果
:param grand_truth: 实际的标签
:param cal_def: 涉及到指标计算的方法
:param epoch: 划分的批次
:return:
'''
seed = time.time()
random.seed(seed)
random.shuffle(prediction)
random.seed(seed)
random.shuffle(grand_truth)
total_size = len(prediction)
batch_size = total_size // epoch
res = []
for i in range(epoch):
tmp_prediction = prediction[i * batch_size:(i + 1) * batch_size]
tmp_ground_truth = grand_truth[i * batch_size:(i + 1) * batch_size]
tmp_res = cal_def(tmp_prediction, tmp_ground_truth)
res.append(tmp_res)
return np.mean(res), np.std(res)
|
class Solution:
def minimumRounds(self, tasks: List[int]) -> int:
cnt = Counter(tasks)
# print(cnt)
res = 0
for k, v in cnt.items():
if v == 1:
return -1
if v % 3 == 0:
res += v // 3
if v % 3 != 0:
res += v // 3 + 1
return res
|
import unittest
from katas.kyu_6.stop_spinning_my_words import spin_words
class SpinWordsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(spin_words('Welcome'), 'emocleW')
def test_equals_2(self):
self.assertEqual(spin_words('Hey fellow warriors'),
'Hey wollef sroirraw')
def test_equals_3(self):
self.assertEqual(spin_words('This is a test'), 'This is a test')
def test_equals_4(self):
self.assertEqual(spin_words('This is another test'),
'This is rehtona test')
|
from post_question import generateRandomId
from datetime import datetime
"""-----------------------------------------------------------------
create_answer - Creates an answer
Purpose: Based upon the body provided it creates an answer in the
posts collection
Input: questionId : The id of the question on which we are posting the answer
body : the body we would like to use for the answer
user : current user or "" to denote anonymous user
posts : our collection of posts
Output: None
-----------------------------------------------------------------"""
def create_answer(questionId, body, user, posts):
# Used to modify the date and time format so that it matches the format shown in the Posts.json file
# eg. "CreationDate": "2011-02-22T21:47:22.987"
date_time = str(datetime.today())[:-3] # takes away the last three digits
date_time = date_time.replace(' ', 'T') # replaces the space in the middle with 'T'
theId = generateRandomId(posts) #get a unique Id for our answer
#fill in all the fields as per the assignment specifications
answer = {
"Id": theId,
"PostTypeId": "2",
"ParentId": questionId,
"CreationDate": date_time,
"Score": 0,
"Body": body,
"CommentCount": 0,
"ContentLicense":"CC BY-SA 2.5"
}
# insert following fields iff user is provided
if user != "":
answer["OwnerUserId"] = user
#insert our answer to the posts collection
posts.insert_one(answer)
print('Your answer has been posted!')
"""-----------------------------------------------------------------
list_answers - List the answers for a question
Purpose: Based upon the question provided by the user list the answers
to this question, if the question has an accepted answer this will be
listed first
posts collection
Input: questionId : The id of the question on which we are listing the answers
posts: the posts collection
Output: an array with two elements
1. the array of all the id's of the answers for our question(in the same
order and with the same indexes as these answers were presented to the user)
2. The length of said list
-----------------------------------------------------------------"""
def list_answers(questionId,posts):
#used to track if the question has an accepted answer
acceptedAnswerBool = False
#get the question from our posts collection
results = posts.find({"Id": questionId})
#Id of accepted answer "" to represent no accepted answer
acceptedAnswerID = ""
for result in results:
#try to access assepted answer (will only work if with post has an accepted answer field)
try:
acceptedAnswerID = result["AcceptedAnswerId"]
acceptedAnswerBool = True
except:
#post does not have an accepted answer
acceptedAnswerID = ""
acceptedAnswerBool = False
#index if very important, used to maintain some connection between the answers we show to the user
#and the answers in our answers array
index = 0
answers=[0]*10000
if (acceptedAnswerBool == True):
#make sure if there is an accepted answer that it appears first
answers[index] = acceptedAnswerID
results = posts.find({"Id": acceptedAnswerID})
for result in results:
score = result["Score"]
creationDate = result["CreationDate"]
body = result["Body"]
first80Chars = body[0:79]
#https://thispointer.com/python-how-to-get-first-n-characters-in-a-string/
#display the fields of our answer as per the assignment specs
print("* Index "+ str(index) + ":\nBody: "+first80Chars + "\nCreation Date: "+str(creationDate)+"\nScore: "+str(score))
print("======================================")
index = index + 1
#now that accepted answer is dealt with present the rest of this question's answers to the user and fill the answers array with the same ordering
results = posts.find({"ParentId": questionId})
for result in results:
if result["Id"] != acceptedAnswerID:
answers[index] = result["Id"]
try:
score = result["Score"]
except:
score = "No Score"
try:
creationDate = result["CreationDate"]
except:
creationDate = "No creation date"
try:
body = result["Body"]
except:
body = "No body"
first80Chars = body[0:79]
print("Index "+ str(index) + ":\nBody: "+first80Chars + "\nCreation Date: "+str(creationDate)+"\nScore: "+str(score))
print("======================================")
index = index +1
return [answers,index]
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from .commands import MakeCoffee
from .results import MakeCoffeeResult
from .events import CoffeeStarted
__all__ = [
'CoffeeStarted',
'MakeCoffeeResult',
'MakeCoffee',
]
|
#!/usr/bin/python
from os import environ
print "Content-Type: text/html"
print "Set-Cookie: foo=bar"
print
print """
<table>
<thead>
<tr>
<th>Name</th>
<th>Value</th>
</tr>
</thead>
<tbody>
"""
for x in environ.iteritems():
print "<tr><td>%s</td><td>%s</td></tr>" % x
print """
</tbody>
</table>
"""
|
"""
Api app views
"""
from django.http import Http404, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.crypto import get_random_string
from rest_framework import status, viewsets
from rest_framework.decorators import action, api_view, permission_classes
from rest_framework.exceptions import APIException
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from api import models, permissions, serializers, utils
from api.constants import MAIL_FROM, Limits, Methods, PermissionCodes, Templates
from api.utils import send_mail
@api_view([Methods.POST])
@permission_classes([AllowAny])
def auth_user(request):
"""
Authenticate user using email and password
"""
data = request.data
if utils.has_required(data.keys(), {"email", "password"}):
user = get_object_or_404(models.Account, email=data["email"])
if user.check_password(data["password"]):
response = JsonResponse(serializers.AccountSerializer(user).data)
else:
utils.raise_api_exc(
APIException("invalid credentials"), status.HTTP_400_BAD_REQUEST
)
else:
utils.raise_api_exc(
APIException("incomplete information"), status.HTTP_400_BAD_REQUEST
)
return response
@api_view([Methods.GET, Methods.POST])
@permission_classes([AllowAny])
def auth_reset(request):
"""
Control user account password reset
"""
if request.method == Methods.GET:
email = request.query_params.get("email")
if email:
user = get_object_or_404(models.Account, email=email)
reset_code = get_random_string(128)
user.set_reset_code(reset_code, True)
_send_reset_request_mail(request, user, reset_code)
response = Response(
data={"detail": "reset code has been sent to your email"},
status=status.HTTP_200_OK,
)
else:
utils.raise_api_exc(
APIException("email is required to request a reset"),
status.HTTP_400_BAD_REQUEST,
)
else:
# POST
data = request.data
if utils.has_required(data.keys(), {"email", "code", "password"}):
user = get_object_or_404(models.Account, email=data["email"])
if user.check_reset_code(data["code"]):
user.set_password(data["password"])
user.clear_reset_code()
user.save()
_send_reset_confirm_mail(request, user)
response = Response(
data={"detail": "password reset successfully"},
status=status.HTTP_200_OK,
)
else:
utils.raise_api_exc(
APIException("invalid reset code"), status.HTTP_400_BAD_REQUEST
)
else:
utils.raise_api_exc(
APIException("incomplete reset details"), status.HTTP_400_BAD_REQUEST
)
return response
def _send_reset_request_mail(request, user, code):
"""
Send password reset request mail
:param request: request context
:param user: user account
:param code: reset code
"""
reset_link = "{}?code={}".format(
request.build_absolute_uri(reverse("auth-reset")), code
)
send_mail(
sender=MAIL_FROM,
recievers=[user.email],
subject="Account Password Reset",
tmpl_file=Templates.Email.RESET_REQUEST,
tmpl_data={"{email}": user.email, "{reset_confirm_link}": reset_link},
)
def _send_reset_confirm_mail(_, user):
"""
Send confirmation of password reset change
"""
send_mail(
sender=MAIL_FROM,
recievers=[user.email],
subject="Account Password Changed",
tmpl_file=Templates.Email.RESET_COMPLETE,
tmpl_data={},
)
class ScopeViewSet(viewsets.ModelViewSet):
"""
Control auth scope model
"""
queryset = models.Scope.objects.all()
serializer_class = serializers.ScopeSerializer
class AuthViewSet(viewsets.ModelViewSet):
"""
Control auth model
"""
queryset = models.Auth.objects.all()
serializer_class = serializers.AuthSerializer
permission_classes = (permissions.JoggerPermissions,)
class AccountViewSet(viewsets.ModelViewSet):
"""
Control account model
"""
queryset = models.Account.objects.all()
serializer_class = serializers.AccountSerializer
permission_classes = (permissions.JoggerPermissions,)
def get_serializer_class(self):
if self.action == "trips":
return serializers.TripSerializer
return super().get_serializer_class()
def get_queryset(self):
acc = get_object_or_404(models.Account, pk=self.request.user.id)
if acc.is_superuser:
return self.queryset
return self.queryset.filter(pk=acc.id)
@action(
methods=[Methods.GET, Methods.PUT, Methods.PATCH, Methods.DELETE], detail=False
)
def profile(self, request, *_, **kwargs):
"""
Handle showing, updating and deletion of account
"""
obj = get_object_or_404(models.Account, pk=request.user.id)
if request.method == Methods.GET:
obj = get_object_or_404(models.Account, pk=request.user.id)
serializer = self.get_serializer(obj)
response = Response(serializer.data)
elif request.method in [Methods.PUT, Methods.PATCH]:
partial = kwargs.pop("partial", request.method == Methods.PATCH)
serializer = self.get_serializer(obj, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
response = Response(serializer.data)
else:
# DELETE
obj.delete()
response = Response(data=None, status=status.HTTP_204_NO_CONTENT)
return response
@action(
methods=[Methods.GET, Methods.POST],
detail=False,
url_path="(?P<user_id>[0-9]+)/trips",
)
def trips(self, request, user_id):
"""
Handle viewing and adding of managed user jogging sessions
"""
mgr = request.user
acc = get_object_or_404(models.Account, pk=user_id)
if all([mgr.id != acc.id, not mgr.is_superuser, mgr.id not in acc.managers]):
raise Http404()
if request.method == "GET":
trips = self.paginate_queryset(acc.trips.all())
serializer = self.get_serializer(
trips, many=True, context={"request": request}
)
response = self.get_paginated_response(serializer.data)
else:
# POST
result = create_trip(acc, request.data.copy(), request)
if result.errors:
response = Response(result.errors, status=status.HTTP_400_BAD_REQUEST)
else:
response = Response(result.data, status=status.HTTP_201_CREATED)
return response
@action(methods=[Methods.GET, Methods.POST, Methods.DELETE], detail=False)
def managers(self, request):
"""
Handle listing and adding accounts that can manage user
"""
user = get_object_or_404(models.Account, pk=request.user.id)
method = request.method
response_data = None
response_status = status.HTTP_500_INTERNAL_SERVER_ERROR
if method == Methods.GET:
mgrs = models.Account.objects.filter(pk__in=user.managers)
serializer = self.get_serializer(mgrs, many=True)
response_data = serializer.data
response_status = status.HTTP_200_OK
else:
# POST & DELETE
if utils.has_required(request.data.keys(), {"email"}):
mgr_email = request.data["email"]
if user.email == mgr_email:
utils.raise_api_exc(
APIException("you are signed with this email"),
status.HTTP_400_BAD_REQUEST,
)
mgr = get_object_or_404(models.Account, email=mgr_email)
if method == Methods.POST:
auth = auth_manager(user=user, mgr=mgr)
self._send_manage_request_mail(user, mgr_email, auth)
response_data = self.get_serializer(mgr).data
response_status = status.HTTP_202_ACCEPTED
else:
# DELETE
deauth_manager(user=user, mgr=mgr)
response_status = status.HTTP_204_NO_CONTENT
else:
utils.raise_api_exc(
APIException("no email supplied"), status.HTTP_400_BAD_REQUEST
)
return Response(data=response_data, status=response_status)
def _send_manage_request_mail(self, user, mgr_email, auth):
"""
Send mail to manager requested for confirmation
"""
confirm_link = "{}?code={}".format(
self.request.build_absolute_uri(reverse("account-managing")), auth.code
)
send_mail(
sender=MAIL_FROM,
recievers=[mgr_email],
subject="Account Manage Request",
tmpl_file=Templates.Email.MANAGE_REQUEST,
tmpl_data={
"{username}": user.username,
"{user_email}": user.email,
"{manager_email}": mgr_email,
"{manage_confirm_link}": confirm_link,
},
)
cancel_link = "{}?email={}".format(
self.request.build_absolute_uri(reverse("account-managers")), mgr_email
)
send_mail(
sender=MAIL_FROM,
recievers=[user.email],
subject="Account Manager Request",
tmpl_file=Templates.Email.MANAGER_REQUEST,
tmpl_data={
"{manager_email}": mgr_email,
"{manage_cancel_link}": cancel_link,
},
)
@action(methods=[Methods.GET, Methods.POST, Methods.DELETE], detail=False)
def managing(self, request):
"""
Handle listing and updating account user is currently managing
"""
mgr = get_object_or_404(models.Account, pk=request.user.id)
method = request.method
response_data = None
response_status = status.HTTP_500_INTERNAL_SERVER_ERROR
if method == Methods.GET:
mngn = models.Account.objects.filter(pk__in=mgr.managing)
serializer = self.get_serializer(mngn, many=True)
response_data = serializer.data
response_status = status.HTTP_200_OK
elif method == Methods.POST:
if utils.has_required(request.data.keys(), {"code"}):
auth_code = request.data["code"]
auth = get_object_or_404(models.Auth, code=auth_code, owner_id=mgr.id)
auth.activate()
response_data = self.get_serializer(auth.user).data
response_status = status.HTTP_200_OK
else:
utils.raise_api_exc(
APIException("no authorization code supplied"),
status.HTTP_400_BAD_REQUEST,
)
else:
# DELETE
if utils.has_required(request.data.keys(), {"email"}):
usr_email = request.data["email"]
user = get_object_or_404(models.Account, email=usr_email)
deauth_manager(user=user, mgr=mgr)
response_status = status.HTTP_204_NO_CONTENT
else:
utils.raise_api_exc(
APIException("no email supplied"), status.HTTP_400_BAD_REQUEST
)
return Response(data=response_data, status=response_status)
def auth_manager(user, mgr):
"""
Authorise and notify manager for user account
:raises APIException: if manager already authorized
:raises APIException: if manager managing at limit
:raises APIException: if user manager count at limit
"""
if mgr.id in user.managers:
utils.raise_api_exc(
APIException("email already authorised"), status.HTTP_400_BAD_REQUEST
)
if len(mgr.managing) >= Limits.ACCOUNT_MANAGED:
utils.raise_api_exc(
APIException("account is managing more than enough"),
status.HTTP_406_NOT_ACCEPTABLE,
)
if len(user.managers) >= Limits.ACCOUNT_MANAGER:
utils.raise_api_exc(
APIException("account has more than enough managers"),
status.HTTP_406_NOT_ACCEPTABLE,
)
mgr_scope = models.Scope.objects.get(codename=PermissionCodes.Account.MANAGE)
deauth_manager(user=user, mgr=mgr)
auth = models.Auth.objects.create(
owner=mgr, user=user, active=False, code=get_random_string(128)
)
auth.scopes.set({mgr_scope})
auth.save()
return auth
def deauth_manager(user, mgr):
"""
Deauthorise all manager auth on user account
"""
return (
models.Account.get_manage_scope()
.auths.filter(user=user, owner=mgr)
.update(active=False, code=None)
)
class TripViewSet(viewsets.ModelViewSet):
"""
Control trip session model on all accounts
"""
queryset = models.Trip.objects.all()
serializer_class = serializers.TripSerializer
permission_classes = (permissions.JoggerPermissions,)
def create(self, request, *_, **__):
result = create_trip(request.user, request.data.copy(), request)
if result.errors:
return Response(result.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(result.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
acc = get_object_or_404(models.Account, pk=self.request.user.id)
if acc.is_superuser:
return self.queryset
return self.queryset.filter(account_id__in=({acc.id} | acc.managing))
def create_trip(account, data, request=None):
"""
Create trip on account using data
"""
for k in ["account", "account_id"]:
if k in data:
del data[k]
data["account"] = account
setattr(request, "user", account)
serializer = serializers.TripSerializer(data=data, context={"request": request})
if serializer.is_valid():
serializer.save()
return serializer
|
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.8.0b3 on Fri Feb 23 22:28:04 2018
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((400, 300))
self.Open = wx.Button(self, wx.ID_ANY, "open")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.onOpen, self.Open)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("frame")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.Open, 0, wx.ALL, 5)
self.SetSizer(sizer)
self.Layout()
# end wxGlade
def onOpen(self, event): # wxGlade: MyFrame.<event_handler>
""""""
# print("Event handler 'onOpen' not implemented!")
# event.Skip()
with wx.FileDialog(self, "Open XYZ file", wildcard="XYZ files (*.xyz)|*.xyz|PDF files (*.pdf)|*.pdf",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# Proceed loading the file chosen by the user
pathname = fileDialog.GetPath()
print(pathname)
# end of class MyFrame
if __name__ == "__main__":
app = wx.App()
frame = MyFrame(None)
frame.Show()
app.MainLoop()
|
import json
from flask import Flask, make_response, request
import bot
from config import CONFIG
from storage import create_tables, get_or_create_event_log, create_user_message_reaction_log
from utils import hash_data
app = Flask(__name__)
pyBot = bot.Bot()
slack = pyBot.client
def _event_handler(event_type, slack_event):
"""
A helper function that routes events from Slack to our Bot
by event type and subtype.
Parameters
----------
event_type : str
type of event recieved from Slack
slack_event : dict
JSON response from a Slack reaction event
Returns
----------
obj
Response object with 200 - ok or 500 - No Event Handler error
"""
# ============= Reaction Added Events ============= #
# If the user has added an emoji reaction to one message
if event_type == "reaction_added":
user_id = slack_event["event"]["user"]
# Some messages aren't authored by "users," like those created by incoming webhooks.
# reaction_added events related to these messages will not include an item_user.
item_user_id = slack_event["event"].get("item_user")
reaction = slack_event["event"]["reaction"]
# only log others' poultry_leg reaction to a real user
if item_user_id and reaction == 'poultry_leg' and (CONFIG.DEBUG or item_user_id != user_id):
message = json.dumps(slack_event["event"]["item"], separators=(',', ':'))
print(f'{user_id} ({reaction}) > {item_user_id} @({message})')
create_user_message_reaction_log(to_user_id=item_user_id, from_user_id=user_id,
message_hash=hash_data(slack_event["event"]["item"]),
reaction=reaction)
# pyBot.notify_being_added_poultry_leg(user_id=user_id, item_user_id=item_user_id)
return make_response("reaction logged", 200, )
# If the user has mentioned the app
elif event_type == "app_mention":
# text = slack_event["event"]["text"]
channel = slack_event["event"]["channel"]
pyBot.tell_leaderboard(channel)
return make_response("reaction logged", 200, )
# ============= Event Type Not Found! ============= #
# If the event_type does not have a handler
message = "You have not added an event handler for the %s" % event_type
# Return a helpful error message
return make_response(message, 200, {"X-Slack-No-Retry": 1})
@app.route("/")
def hello():
return "Hello World!"
@app.route("/listening", methods=["GET", "POST"])
def hears():
"""
This route listens for incoming events from Slack and uses the event handler helper function to route events
to our Bot.
"""
slack_event = json.loads(request.data)
# ============= Slack URL Verification ============ #
if "challenge" in slack_event:
return make_response(slack_event["challenge"], 200, {"content_type": "application/json"})
# ============ Slack Token Verification =========== #
if pyBot.verification != slack_event.get("token"):
message = f"Invalid Slack verification token: {slack_event['token']}\npyBot has: {pyBot.verification}\n\n"
# By adding "X-Slack-No-Retry" : 1 to our response headers, we turn off Slack's automatic retries during
# development.
return make_response(message, 403, {"X-Slack-No-Retry": 1})
# ====== Prevent Duplicate Processing ====== #
_, created = get_or_create_event_log(hash_data(slack_event))
if not created:
return make_response("Got it.", 200, {"content_type": "application/json"})
# ====== Process Incoming Events from Slack ======= #
if "event" in slack_event:
event_type = slack_event["event"]["type"]
# Then handle the event by event_type and have your bot respond
return _event_handler(event_type, slack_event)
return make_response("[NO EVENT IN SLACK REQUEST] These are not the droids you're looking for.",
404, {"X-Slack-No-Retry": 1})
# @app.route("/install", methods=["GET"])
# def pre_install():
# """This route renders the installation page with 'Add to Slack' button."""
# # Since we've set the client ID and scope on our Bot object, we can change
# # them more easily while we're developing our app.
# client_id = pyBot.oauth["client_id"]
# scope = pyBot.oauth["scope"]
# # Our template is using the Jinja templating language to dynamically pass
# # our client id and scope
# return render_template("install.html", client_id=client_id, scope=scope)
#
#
# @app.route("/thanks", methods=["GET", "POST"])
# def thanks():
# """
# This route is called by Slack after the user installs our app. It will
# exchange the temporary authorization code Slack sends for an OAuth token
# which we'll save on the bot object to use later.
# To let the user know what's happened it will also render a thank you page.
# """
# # Let's grab that temporary authorization code Slack's sent us from
# # the request's parameters.
# code_arg = request.args.get('code')
# # The bot's auth method to handles exchanging the code for an OAuth token
# pyBot.auth(code_arg)
# return render_template("thanks.html")
def main():
# create_database()
create_tables()
app.run(debug=CONFIG.DEBUG, port=CONFIG.APP_PORT)
if __name__ == '__main__':
main()
|
from common.run_method import RunMethod
import allure
@allure.step("极运营/系统设置/优惠设置/优惠券/查看单个学生优惠券操作记录")
def coupon_queryOperationRecordByCouponItemId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查看单个学生优惠券操作记录"
url = f"/service-order/coupon/queryOperationRecordByCouponItemId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/上传学生名单")
def coupon_addCouponItemByUpload_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/上传学生名单"
url = f"/service-order/coupon/addCouponItemByUpload"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/设置自动发行优惠券")
def coupon_updateAutoIssueConditionByCouponId_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/设置自动发行优惠券"
url = f"/service-order/coupon/updateAutoIssueConditionByCouponId"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查询优惠券所属课程")
def coupon_queryCoursesByCouponId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查询优惠券所属课程"
url = f"/service-order/coupon/queryCoursesByCouponId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/修改备注")
def coupon_updateRemark_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/修改备注"
url = f"/service-order/coupon/updateRemark"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/删除优惠券")
def coupon_delete_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/删除优惠券"
url = f"/service-order/coupon/delete"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查询自动发行优惠券")
def coupon_queryAutoIssueConditionByCouponId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查询自动发行优惠券"
url = f"/service-order/coupon/queryAutoIssueConditionByCouponId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/新增优惠券")
def coupon_add_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/新增优惠券"
url = f"/service-order/coupon/add"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查询优惠券所属校区")
def coupon_querySchoolsByCouponId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查询优惠券所属校区"
url = f"/service-order/coupon/querySchoolsByCouponId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/修改优惠券状态")
def coupon_updateStatus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/修改优惠券状态"
url = f"/service-order/coupon/updateStatus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查看单个优惠券操作记录")
def coupon_queryOperationRecordByCouponId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查看单个优惠券操作记录"
url = f"/service-order/coupon/queryOperationRecordByCouponId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查询单个优惠券")
def coupon_queryById_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查询单个优惠券"
url = f"/service-order/coupon/queryById"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("系统设置/优惠设置/优惠券/查询优惠券列表")
def coupon_queryAll_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "系统设置/优惠设置/优惠券/查询优惠券列表"
url = f"/service-order/coupon/queryAll"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查看优惠券使用情况")
def coupon_queryUse_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查看优惠券使用情况"
url = f"/service-order/coupon/queryUse"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/导出上传的学生名单")
def coupon_exportUploadData_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/导出上传的学生名单"
url = f"/service-order/coupon/exportUploadData"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/报名/pc端获取可使用优惠券")
def coupon_queryMatchCoupon_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/报名/pc端获取可使用优惠券"
url = f"/service-order/coupon/queryMatchCoupon"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/查询优惠券所属班级")
def coupon_queryClassesByCouponId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/查询优惠券所属班级"
url = f"/service-order/coupon/queryClassesByCouponId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/修改学生优惠券状态")
def coupon_updateCouponItemStatus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/修改学生优惠券状态"
url = f"/service-order/coupon/updateCouponItemStatus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/在线添加学生")
def coupon_addCouponItemByOnline_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/在线添加学生"
url = f"/service-order/coupon/addCouponItemByOnline"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/删除学生优惠券")
def coupon_deleteCouponItem_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/删除学生优惠券"
url = f"/service-order/coupon/deleteCouponItem"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/筛选花名册添加")
def coupon_addCouponItemBySelectStudent_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/筛选花名册添加"
url = f"/service-order/coupon/addCouponItemBySelectStudent"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/继续提交学生名单")
def coupon_addCouponItemByKey_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/继续提交学生名单"
url = f"/service-order/coupon/addCouponItemByKey"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/优惠设置/优惠券/编辑优惠券")
def coupon_edit_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/优惠设置/优惠券/编辑优惠券"
url = f"/service-order/coupon/edit"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import numpy as np
import pandas as pd
# import datetime
def transform_cols (df, dict_col_types = None):
# Расширяйте для необходимых столбцов и их явной типизации
if dict_col_types is None:
dict_col_types = {
'amount_original':(float, 0.0),
'cdf_s_126':(str, u'null'),
'cdf_s_138':(str, u'null'),
'channel_indicator_desc':(str, u'null'),
'event_description':(str, u'null'),
'cdf_s_294':(int, 0),
'cdf_s_140':(float, 0.0),
'data_i_120':(int, 0),
'cdf_s_218':(str, u'null'),
'data_s_65':(int, 0),
'cdf_s_127':(int, 30),
'cdf_s_135':(int, 30),
'cdf_s_130':(int, 30),
'cdf_s_129':(int, 30),
'cdf_s_134':(int, 30),
'data_i_154':(float, -150),
'cdf_s_133':(int, 30),
'cdf_s_20':(str, u'null'),
'cdf_s_299':(str, u'null'),
'short_date':(int, 0)
}
if df.shape[0] > 0:
df.replace(u'null', np.nan, inplace=True)
for i in dict_col_types:
if i in df.columns:
df[i] = df[i].fillna(dict_col_types[i][1]).astype(dict_col_types[i][0])
return df
def calc_base_features(data):
feat_matrix = pd.DataFrame()
#data = data[data.event_description.isin([u'Перевод частному лицу',u'Оплата услуг',u'Перевод между своими счетами и картами'])]
if data.shape[0] == 0:
return feat_matrix
# заполняем ряд пропусков
data.cdf_s_140 = data.cdf_s_140.fillna(0).astype(float)/1000 # кумулятивная сумма опреаций за сутки, если не заполнена, то значит это первая операций, т.е. = 0
data.data_i_120.fillna(1, inplace=True)
feat_matrix['event_id'] = data.event_id
feat_matrix['user_id'] = data.user_id
feat_matrix['custom_mark'] = data.custom_mark
feat_matrix['event_time'] = data.event_time
feat_matrix['amount'] = data.amount_original
feat_matrix['client_age'] = [x.days/360 for x in (data.event_time - data.cdf_s_19)]
feat_matrix['cat_new_ip'] = [1 if x == u'ДА' else 0 if x == u'НЕТ' else 2 for x in data.cdf_s_126]
feat_matrix['cat_new_prov'] = [1 if x == u'ДА' else 0 if x == u'НЕТ' else 2 for x in data.cdf_s_138]
feat_matrix['channel_op'] = [0 if x == u'MOBILE' else 1 if x == u'WEB' else 2 for x in data.channel_indicator_desc]
feat_matrix['op_type'] = [0 if x == u'Перевод частному лицу' else 1 if x==u'Оплата услуг' else 2 if x ==u'Перевод между своими счетами и картами' else 3 for x in data.event_description]
feat_matrix ['recip_age'] = [1 if x == 0 else 0 for x in data.cdf_s_294] # бинарный флаг определяющий наличие возраста получателя (полезен для линейных моделей, менее для деревьев с учетом следующего признака)
feat_matrix['age_diff'] = feat_matrix.client_age - [int(x) if x != 0 else 1000 for x in data.cdf_s_294] # разница возорастов получателей и отправителей, если отсутствует/неприменимо, то padding 500
feat_matrix['cumulative_sum_total'] = data.cdf_s_140 # кумулятивная сумма операций за сутки в каналах web и МП
feat_matrix['data_i_120'] = data.data_i_120
feat_matrix['relative'] = [1 if x == u'ДА' else 0 for x in data.cdf_s_218] # перевод родственнику
feat_matrix['know_recip_power'] = [ x if x is not None else 0 for x in data.data_s_65] # сила связи отправителя и получателя
feat_matrix['cdf_s_127'] = data.cdf_s_127#.apply(lambda x: 1 if x is not None else 0)
feat_matrix['cdf_s_135'] = data.cdf_s_135#.apply(lambda x: 1 if x is not None else 0)
feat_matrix['cdf_s_130'] = data.cdf_s_130#.apply(lambda x: 1 if x is not None else 0)
feat_matrix['cdf_s_129'] = data.cdf_s_129#.apply(lambda x: 1 if x is not None else 0)
feat_matrix['cdf_s_134'] = data.cdf_s_134#.apply(lambda x: 1 if x is not None else 0)
feat_matrix['data_i_154'] = [ x if x is not None else -150 for x in data.data_i_154]
feat_matrix['cdf_s_133'] = data.cdf_s_133#.apply(lambda x: 1 if x is not None else 0)
feat_matrix['data_i_120'] = data.data_i_120
feat_matrix['know_recip_card_age'] = [1 if x is not None else 0 for x in data.cdf_s_124]
feat_matrix['recip_card_age'] = [x.days if type(x) is not pd.tslib.NaTType else 912321 for x in (data.event_time - data.cdf_s_124)]
# feat_matrix['cat_client_region'] = [x if x.isdigit() else 912321 for x in data.cdf_s_20]
feat_matrix['one_region'] = (data.cdf_s_20 == data.cdf_s_299).astype(int) # сравнение регионов
#ADD NEW FEATURES
feat_matrix['krp_pow2'] = (feat_matrix['know_recip_power']) ** 2
feat_matrix['log_amount'] = np.log(feat_matrix['amount'] + 1)
feat_matrix['ip_isp'] = np.array([x if x.isdigit() else 912321 for x in data.cdf_s_20], dtype=float)
feat_matrix['amnt2chnls'] = (data["amount_original"].fillna(0).astype(float) / \
(data["cdf_s_136"].fillna(0).astype(float) + data["amount_original"].fillna(0).astype(float) + \
data["amount_original"].fillna(0) + 1))
return feat_matrix
def load_data(chunk_names, fields=None, query=None, sample='train', dict_col_types=None):
df = pd.DataFrame({})
if not isinstance(chunk_names, list):
chunk_names = [chunk_names]
for chunk_name in chunk_names:
chunk_df = pd.read_feather(
'../data/raw_splits/{smpl}/{ch_nm}'.format(smpl=sample, ch_nm=chunk_name))
if fields is None:
fields = chunk_df.columns.tolist()
if query is None:
df = pd.concat([df,
transform_cols(
chunk_df[fields])], ignore_index=True)
else:
df = pd.concat([df,
transform_cols(
chunk_df).query(query)[fields]], ignore_index=True)
return df
def features_handler(chunk_names, calc_feat, query=None, chunk_size=5000):
res_df = pd.DataFrame()
for chunk_name in chunk_names:
feat_chunk = calc_feat(
load_data(
chunk_name,
query=query,
dict_col_types=None)
)
res_df = pd.concat([res_df, feat_chunk], ignore_index=True)
return res_df
def cust_mark_to_class(custom_mark):
"""
Преобразует входящее значение CUSTOM_MARK в класс
return:
1 - фрод
0 - легитимная
-1 - неизвестно
"""
ret = -1
if custom_mark in ['F','S']:
ret = 1
elif custom_mark in ['A','G', np.NaN]:
ret = 0
return ret
|
# encoding.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from bloggertool.exceptions import UnknownDocType
MARKDOWN = 'Markdown'
REST = 'ReST'
def find_type(f):
doctype = MARKDOWN
for line in f:
line = line.strip()
if not line:
continue
if line == '=' * len(line):
doctype = REST
return doctype
raise UnknownDocType()
def get_engine(doctype):
if doctype == MARKDOWN:
from .markdown import Engine
return Engine()
elif doctype == REST:
from .rest import Engine
return Engine()
else:
raise UnknownDocType()
class Meta(object):
labels = frozenset()
title = None
slug = None
|
{
'target_defaults': {
'xcode_settings': {
'SYMROOT': '<(DEPTH)/$SRCROOT/',
},
},
}
|
from math import *
path = [[0,0],
[0,1],
[0,2],
[1,2],
[2,2],
[3,2],
[4,2],
[4,3],
[4,4]]
def smooth(path, weight_data=0.5, weight_smooth=0.1, tolerance=0.000001):
#deep copy into newpath
newpath = [[0 for row in range(len(path[0]))] for col in range(len(path))]
for i in range(len(path)):
for j in range(len(path[0])):
newpath[i][j] = path[i][j]
change = True
while change:
change = False
for i in range(1,len(newpath)-1):
xi = Point(path[i][0], path[i][1])
yi = Point(newpath[i][0], newpath[i][1])
yip = Point(newpath[i+1][0], newpath[i+1][1])
yim = Point(newpath[i-1][0], newpath[i-1][1])
alpha = weight_data * (xi - yi)
#yi = yi + alpha
beta = weight_smooth * (yip + yim - (2 * yi))
#yi = yi + beta
yi = yi + alpha + beta
newpath[i][0] = yi[0]
newpath[i][1] = yi[1]
t = alpha + beta
tot = abs(t[0]) + abs(t[1])
if tot > tolerance:
change = True
return newpath
class Point:
def __init__(self, x=0, y=0):
self.x = float(x)
self.y = float(y)
def __add__(self, p):
return Point(self.x + p[0], self.y + p[1])
def __sub__(self, p):
return Point(self.x - p[0], self.y - p[1])
def __iadd__(self, p):
self.x += p[0]
self.y += p[1]
return self
def __isub__(self, p):
self.x -= p[0]
self.y -= p[0]
return self
def __rmul__(self,m):
return self.__mul__(m)
def __mul__(self,m):
m = float(m)
return Point(self.x * m, self.y * m)
def __div__(self,d):
d = float(d)
return Point(self.x / d, self.y / d)
def __imul__(self, m):
m = float(m)
self[0] *= m
self[1] *= m
def __idiv__(self,d):
d = float(d)
self[0] /= d
self[1] /= d
def __repr__(self):
return "[%.3f, %.3f]" % (self.x, self.y)
def __getitem__(self,key):
if (key==0):
return self.x
elif(key==1):
return self.y
else:
raise Exception("invalid key")
newpath = smooth(path)
for i in range(len(path)):
print '[' + ', '.join('%.6f' % x for x in path[i]) + '] -> [' + ','.join('%.6f' % x for x in newpath[i]) + ']'
|
from flask_dance.consumer.storage.sqla import OAuthConsumerMixin
from flask_login import UserMixin
from flask_security import RoleMixin
from sqlalchemy import (
Boolean, Column,
ForeignKey, Integer, PickleType, String, Table
)
from sqlalchemy.orm import relationship, synonym
from bitcoin_acks.database.base import Base
from bitcoin_acks.webapp.database import db
class Roles(Base, RoleMixin):
__tablename__ = 'roles'
id = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True)
description = Column(String(255))
# FIXME Change db.Model to Base for auto-gen migrations
class Users(Base, db.Model, UserMixin):
__tablename__ = 'users'
id = Column(String, primary_key=True)
login = Column(String, unique=True)
name = Column(String)
bio = Column(String)
url = Column(String)
email = Column(String, unique=True)
is_active = Column(Boolean)
avatar_url = Column(String)
avatarUrl = synonym('avatar_url')
twitter_handle = Column(String)
btcpay_host = Column(String)
btcpay_pairing_code = Column(String)
btcpay_client = Column(PickleType)
@property
def best_name(self):
return self.name or self.login
class OAuth(OAuthConsumerMixin, Base):
__tablename__ = 'oauth'
provider_user_id = Column(String(), unique=True, nullable=False)
user_id = Column(String, ForeignKey(Users.id), nullable=False)
user = relationship(Users)
roles_users = Table(
'roles_users',
Base.metadata,
Column('user_id', String, ForeignKey('users.id')),
Column('role_id', Integer, ForeignKey('roles.id')),
)
|
import sys
from tuntap import Packet,TunTap
import optparse
from _thread import start_new_thread
import traceback
def readtest(tap):
while not tap.quitting:
p = tap.read()
if not p:
continue
if tap.nic_type == "Tap":
packet = Packet(frame=p)
else:
packet = Packet(data=p)
if not packet.get_version()==4:
continue
print(''.join('{:02x} '.format(x) for x in packet.data))
if tap.nic_type == "Tun":
pingback = p[:12]+p[16:20]+p[12:16]+p[20:]
tap.write(pingback)
def main():
parser = optparse.OptionParser()
parser.add_option('--nic_type', default='Tun',dest='nic_type',
help='set type Tun or Tap')
parser.add_option('--nic_name', default='',dest='nic_name',
help='set device name')
parser.add_option('--tap-addr', default='192.168.33.10',dest='taddr',
help='set tunnel local address')
parser.add_option('--tap-netmask', default='255.255.255.0',dest='tmask',
help='set tunnel netmask')
parser.add_option('--tap-mtu', type='int', default=1500,dest='tmtu',
help='set tunnel MTU')
parser.add_option('--local-addr', default='0.0.0.0', dest='laddr',
help='set local address [%default]')
parser.add_option('--local-port', type='int', default=12000, dest='lport',
help='set local port [%default]')
# parser.add_option('--remote-addr', dest='raddr',
# help='set remote address')
# parser.add_option('--remote-port', type='int', dest='rport',
# help='set remote port')
opt, args = parser.parse_args()
# if not (opt.taddr and opt.raddr and opt.rport):
# parser.print_help()
# return 1
try:
tuntap = TunTap(opt.nic_type)
tuntap.create()
tuntap.config(opt.taddr, opt.tmask)
#, opt.tmtu, opt.laddr,opt.lport, opt.raddr, opt.rport)
except Exception as e:
print(str(e),traceback.format_exc())
traceback.print_stack(limit=10)
return 1
start_new_thread(readtest,(tuntap,))
input("press return key to quit!")
tuntap.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
#libraries
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
R = np.arange(-4, 4, 0.1)
X, Y = np.meshgrid(R, R)
Z = np.sum(np.exp(-0.5 * (X**2 + Y**2)))
P = (1/Z) * np.exp(-0.5 * (X**2 + Y**2))
invalid_xy = (X**2 + Y**2) < 1
P[invalid_xy] = 0
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(X, Y, P, s=0.5, alpha=0.5)
plt.show()
|
import json
import boto3
def boto3_client(resource):
"""Create Boto3 client."""
return boto3.client(resource)
def s3_add_tags(tags):
boto3_client('s3').put_bucket_tagging(
Bucket='dmansfield-dev',
Tagging=tags
)
def main():
dev_tags_file = open('test.json')
dev_tags = json.load(dev_tags_file)
tags = dev_tags
s3_add_tags(tags)
if __name__ == '__main__':
main()
|
def loadPassphrases():
phrases = []
with open('../inputs/day4.txt', 'r') as phraseFile:
line = phraseFile.readline()
while line:
phrases.append(line[:-1].split(' '))
line = phraseFile.readline()
return phrases
def validPhraseCount(phrases):
filteredPhrases = [set(phrase) for phrase in phrases if len(phrase) == len(set(phrase))]
return len(filteredPhrases)
def validNonAnagramPhraseCount(phrases):
sortedPhrases = [[''.join(sorted(word)) for word in phrase] for phrase in phrases]
filteredPhrases = [phrase for phrase in sortedPhrases if len(set(phrase)) == len(phrase)]
return len(filteredPhrases)
def main():
passphraseInputs = loadPassphrases()
print(validPhraseCount(passphraseInputs))
print(validNonAnagramPhraseCount(passphraseInputs))
if __name__ == '__main__':
main()
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index, name='index'),
path('profile/<int:officer_id>/',views.profile, name='profile'),
path('add', views.add_review, name='add_review'),
path('search', views.search, name='search'),
]
|
import logging
import fmcapi
def test__geolocations(fmc):
logging.info("Testing Geolocation class. Requires a configured Geolocation")
obj1 = fmcapi.Geolocation(fmc=fmc)
logging.info("All Geolocation -- >")
result = obj1.get()
logging.info(result)
logging.info(f"Total items: {len(result['items'])}")
del obj1
obj1 = fmcapi.Geolocation(fmc=fmc, name="_tmp")
logging.info("One Geolocation -- >")
logging.info(obj1.get())
logging.info("Testing Geolocation class done.\n")
|
from django.conf.urls import patterns, include, url
# from django.contrib import admin
# admin.autodiscover()
from apps.jobs import views as jobviews
urlpatterns = patterns('',
url(r'^calendar/events/$', jobviews.events, name='jobCalendarEvents'),
url(r'^calendar/$', jobviews.calendar, name='jobCalendar'),
url(r'^scheduler/update/(?P<job_scheduler_id>[\w\d]+)/$', jobviews.updateJobScheduler, name='updateJobScheduler'),
url(r'^scheduler/(?P<job_scheduler_id>[\w\d]+)/$', jobviews.viewJobScheduler, name='jobScheduler'),
url(r'(?P<job_id>\w+)/$', jobviews.viewJob, name='viewJob'),
)
|
import numpy as np
from utils.utils import Utils
class Score(object):
def scoreData(self, weights, indepData,layers, depData = None, isError = False):
self.utils = Utils()
thisInput = indepData
scores = {}
error = None
self.layers = layers
scores[0] = indepData
self.keys = weights.keys()
for x in self.keys:
thisWeight = weights[x]
thisLayer = self.layers[x]
thisInput = thisLayer.scoreData(self.utils.addBias(thisInput) ,thisWeight)
scores[x] = thisInput
if isError:
output = scores[len(scores) - 1]
error = np.mat(output - depData)
for row ,column in zip(*np.where(np.isnan(error))):
error[row ,column] = 0.0
return scores, error
|
"""
Generates polynomial from secret string with use of CRC encoding
"""
from bitstring import BitArray
import binascii
from Galois.Galois_Converter import GaloisConverter
from Galois.Galois_Field import GF
class PolynomialGenerator:
def __init__(self, secret_bytes, degree, crc_length, gf_exp):
"""
:param secret_bytes: secret in bytes format
:param degree: polynomial degree as int
:param crc_length: CRC length as int
:param gf_exp: exponential in GF(2**gf_exp)
"""
self.degree = degree
self.crc_length = crc_length
self.secret_bit = BitArray(bytes=secret_bytes, length=len(secret_bytes) * 8)
self.gf_exp = gf_exp
self.checksum_bit = BitArray(uint=binascii.crc32(self.secret_bit.bytes), length=self.crc_length)
# join secret bitstring with CRC
self.total_bit = self.secret_bit.copy()
self.total_bit.append(self.checksum_bit)
self.coefficients = self.extract_coefficients()
# save polynomial in GF 2**32 form for performance reasons
self.poly_gf_32 = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, 32)
# save galois field K for polynomial evaluations
self.K = GF(2, gf_exp)
def prune_secret(self, secret_bit):
""" Prunes secret if secret length + CRC length is not multiple of
polynomial degree + 1. Takes secret as BitArray
:returns pruned secret as Bitarray """
# check if bitstring has multiple of length of polynomial degree
# secret is pruned if length don't match multiple of polynomial degree + 1
remainder = (len(secret_bit) + self.crc_length) % (self.degree + 1)
secret_len = len(secret_bit) - remainder
if remainder == 0:
return secret_bit
else:
return secret_bit[0:secret_len]
def extract_coefficients(self):
""" extracts coefficients of polynomial from bitstring
:returns coefficients as list """
# split to parts, convert to uint and add to list
coefficients = []
assert len(self.total_bit) % (self.degree + 1) == 0
step = int(len(self.total_bit) / (self.degree + 1))
for i in range(0, len(self.total_bit), step):
# print(len(self.total_bit[i:i + step]))
# print(self.total_bit[i:i + step].bin)
# print(self.total_bit[i:i + step].uint)
coefficients.append(self.total_bit[i:i + step].uint)
return coefficients
def evaluate_polynomial_gf_2(self, x):
""" Evaluate polynomial of this polynomial generator at x in GF(2**m)
:param x: int
:param m: exponential in GF(2**m)
:returns function result as int """
m = self.gf_exp
if m == 32:
poly_gf = self.poly_gf_32
else:
poly_gf = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, m)
x_gf = GaloisConverter.convert_int_to_element_in_gf_2(x, m)
y_gf = self.K.eval_poly(poly_gf, x_gf)
result = GaloisConverter.convert_gf_2_element_to_int(y_gf, m)
# Safety check
if result > 2**m*2:
raise ValueError('Too large number generated in polynomial GF(2**{}):{}'.format(m, result))
return result
def evaluate_polynomial_gf_2_array(self, array):
""" Evaluate polynomial on list of integers
:param array: list of integers
:param m: exponential in GF(2**m) """
result = []
for x in array:
result.append(self.evaluate_polynomial_gf_2(x))
return result
|
# Bubble sort algorithm
# Very straightforward to implement in code.
# Merge, Quick and Heap are more complicated.
# Iterate multiple times, and initiate swaps to correct order.
# Check if the curr and curr + 1 are in correct order.
# If sorted, we move on, else we swap their position.
# Eg. [8, 5, 2, 9, 5, 6, 3]
# If swaps were done, then we need to iterate again, else we are done.
# Bubble sort happens in place.
# Space is going to be in place
# Space complexity is O(1)
# Time complexity: worst case is O(N^2), best case is O(N), when given array is sorted.
#
def bubbleSort(array):
isSorted = False
counter = 0
while not isSorted:
isSorted = True
for i in range(len(array) - 1 - counter):
if array[i] > array[i + 1]:
swap(i, i+1, array)
isSorted = False
counter += 1
return array
def swap(i, j, array):
array[i], array[j] = array[j], array[i]
|
from .. import list_segments, list_segments_by_coordinates
def test_list_segments():
# As of April 2020 there were more than 900 active segments.
segments = list_segments()
assert len(segments) > 900
def test_list_segments_by_coordinates():
# As of April 2020 there are more than 30 active segments in Schaarbeek
segments = list_segments_by_coordinates(lat=50.867, lon=4.373, radius=2)
assert len(segments) > 30
# 1003073114 should be one of them
assert 1003073114 in segments
# 1003063473 should not be one of them
assert 1003063473 not in segments
|
import requests
from pprint import pprint
url = f"https://api.github.com/users?"
data = requests.get(url).json()
for i in data:
f = open("User_List.txt", "a")
f.write(i['login'])
f.write("\n")
f.close()
|
# Standard Python libraries
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from collections import OrderedDict
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# http://www.numpy.org/
import numpy as np
# https://pandas.pydata.org/
import pandas as pd
# https://github.com/usnistgov/atomman
import atomman.unitconvert as uc
# iprPy imports
from iprPy.tools import aslist
def todict(record, full=True, flat=False):
"""
Converts the XML content of a record to a dictionary.
Parameters
----------
record : iprPy.Record
A record of the record style associated with this function.
full : bool, optional
Flag used by the calculation records. A True value will include
terms for both the calculation's input and results, while a value
of False will only include input terms (Default is True).
flat : bool, optional
Flag affecting the format of the dictionary terms. If True, the
dictionary terms are limited to having only str, int, and float
values, which is useful for comparisons. If False, the term
values can be of any data type, which is convenient for analysis.
(Default is False).
Returns
-------
dict
A dictionary representation of the record's content.
"""
model = DM(record)
calc = model['calculation-dislocation-Peierls-Nabarro']
params = {}
params['calc_key'] = calc['key']
params['calc_script'] = calc['calculation']['script']
params['iprPy_version'] = calc['calculation']['iprPy-version']
params['atomman_version'] = calc['calculation']['atomman-version']
rp = calc['calculation']['run-parameter']
params['delta_tau_xy'] = uc.value_unit(rp['delta_tau_xy'])
params['delta_tau_yy'] = uc.value_unit(rp['delta_tau_yy'])
params['delta_tau_yz'] = uc.value_unit(rp['delta_tau_yz'])
params['tausteps'] = rp['tausteps']
params['cdiffstress'] = rp['cdiffstress']
params['fullstress'] = rp['fullstress']
params['min_method'] = rp['minimize_style']
params['min_options'] = rp['minimize_options']
params['load_file'] = calc['system-info']['artifact']['file']
params['load_style'] = calc['system-info']['artifact']['format']
params['load_options'] = calc['system-info']['artifact']['load_options']
params['family'] = calc['system-info']['family']
symbols = aslist(calc['system-info']['symbol'])
params['dislocation_key'] = calc['dislocation-monopole']['key']
params['dislocation_id'] = calc['dislocation-monopole']['id']
params['gammasurface_calc_key'] = calc['gamma-surface']['calc_key']
params['peierlsnabarro_calc_key'] = calc['Peierls-Nabarro']['calc_key']
if flat is True:
params['symbols'] = ' '.join(symbols)
else:
params['symbols'] = symbols
params['status'] = calc.get('status', 'finished')
if full is True:
if params['status'] == 'error':
params['error'] = calc['error']
elif params['status'] == 'not calculated':
pass
else:
if flat is True:
pass
else:
params['tau_xy'] = uc.value_unit(calc['tau-xy'])
params['tau_yy'] = uc.value_unit(calc['tau-yy'])
params['tau_yz'] = uc.value_unit(calc['tau-yz'])
params['total_energy'] = uc.value_unit(calc['total-energy'])
return params
|
"""apps"""
from django.apps import AppConfig
class YbAppConfig(AppConfig):
"""app config"""
name = 'yb_app'
|
import unittest
from katas.kyu_7.distance_from_the_average import distances_from_average
class DistancesFromTheAverageTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(distances_from_average([55, 95, 62, 36, 48]),
[4.2, -35.8, -2.8, 23.2, 11.2])
def test_equals_2(self):
self.assertEqual(distances_from_average([1, 1, 1, 1, 1]),
[0, 0, 0, 0, 0])
def test_equals_3(self):
self.assertEqual(distances_from_average([1, -1, 1, -1, 1, -1]),
[-1.0, 1.0, -1.0, 1.0, -1.0, 1.0])
def test_equals_4(self):
self.assertEqual(distances_from_average([1, -1, 1, -1, 1]),
[-0.8, 1.2, -0.8, 1.2, -0.8])
def test_equals_5(self):
self.assertEqual(distances_from_average([2, -2]), [-2.0, 2.0])
def test_equals_6(self):
self.assertEqual(distances_from_average([1]), [0])
def test_equals_7(self):
self.assertEqual(distances_from_average(
[123, -65, 32432, -353, -534]
), [6197.6, 6385.6, -26111.4, 6673.6, 6854.6])
def test_equals_8(self):
self.assertEqual(distances_from_average(xrange(101)),
range(50, -51, -1))
def test_equals_9(self):
self.assertEqual(distances_from_average(xrange(1001)),
range(500, -501, -1))
def test_equals_10(self):
self.assertEqual(distances_from_average(xrange(1000001)),
range(500000, -500001, -1))
|
"""
steam.py
purpose: steam API integration to look up steam game information
"""
import re
import asyncio
import aiohttp
import discord
import json
from discord.ext import commands
#to check how similiar two strings are
from difflib import SequenceMatcher
#functions to make accessor functions easier and faster for list.sort() and list.sorted()
from operator import itemgetter, attrgetter
class Steam(commands.Cog):
def __init__(self, bot):
self.client = bot
api_key = 'AB34C196C2089915AF42DB847BC1D12F'
interface_name = 'ISteamNews'
method_name = 'GetNewsForApp'
version = '0002'
format_ = 'JSON'
app_id = ''
#steamAPIURL = f"http://api.steampowered.com/{interface_name}/{method_name}/v{version}/?key={api_key}&format={format_}&appid={app_id}"
@commands.command()
async def search(self, ctx, *game_name_input:str):
game_name = ' '.join(game_name_input)
print('game_name = ' + game_name)
await ctx.trigger_typing()
session = aiohttp.ClientSession()
async with session.get('http://api.steampowered.com/ISteamApps/GetAppList/v2') as r:
if r.status == 200:
response = await r.json()
response = response['applist']['apps']
def Similar(a,b):
"""
----- SequenceMatcher.ratio
return list of truples describing matching subsequences.
The idea is to find the longest contiguous matching subsequence that contains no "junk" elements like
blank lines or spaces (elements that are uninteresting in some sense). Handling junk is an extension
to the Ratcliff and Obershelp algorithm.
The same idea is then applied recursively to the pieces of the sequences to the left and right
of the matching sequence. This does not yield minimal edit sequences, but tends to yield matches
that "look right" to people.
Automatic junk heuristic:
SequenceMatcher supports a heuristic that automatically treats certain sequence items as junk. The heuristic
counts how many times each individual item appears in the sequence. If an item's duplicates (after the first one)
account for more than 1% of the sequence and the sequence is at least 200 items long, that item is marked as
"popular" and is treated as junk for the purpose of sequence-matching.
Timing:
SequenceMatcher is quadratic time for the worst case, linear for the best case.
Linear Time vs Quadratic Time:
A method is said to be linear (often written as O(n) in Big O notation) when the time it takes increases linearly with the number of
elements involved. ex. for loops
A method is quadratic (written as O(n^2))when the time it takes increases exponentially based on the number of
elements involved.
----- SequenceMatcher.get_matching_blocks
Called by SequenceMatcher.ratio to calculate the ratio
returns a list of triples describing matching subsequences.
Each triple is of the form (i, j, n) and means that a[i:i+n] == b[j:j+n]
The triples are monotonically increasing in i and j.
The last triple is a dummy and has the value (len(a), len(b), 0). It is the only triple with
n == 0.
If (i, j, n) and (i', j', n') are adjacent triples in the list and the second is not
the last triple in the list, then i+n != i' or j+n != j'.
In other words, adjacent triples always describe non-adjacent equal blocks.
ratio uses SequenceMatcher.get_matching_blocks's results and sums the sizes of all matched sequences.
It then calculates the ratio with
>>> return 2.0 * matches / length
where length is len(a) + len(b)
where a is the first sequence and b is the second
"""
return SequenceMatcher(None, a, b).ratio()
match_found = False
similar_found = False
match_name = ''
sim = []
gam = []
suggestions = ''
for i in response:
if i["name"].lower() == game_name.lower():
match_found = True
match_name = i["name"]
app_id = i["appid"]
elif Similar(i["name"].lower(), game_name.lower()) > 0.6:
similar_found = True
sim.append(str(round((Similar(i["name"].lower(), game_name.lower())*100),1)) + '% match')
gam.append('**' + i["name"] + '**')
suggestions = sorted(zip(gam, sim), key=itemgetter(1), reverse=True)[:5]
"""
----- sorted()
Python has a built in sorted() function that builds a new sorted list from an iterable.
>>> sorted([5,2,3,1,4])
[1,2,3,4,5]
----- sort()
Python lists also have a list.sort() method that modifies the list in-place. Less convenient than
sorted(), but if you don't need the original list, it's slightly more efficient.
----- zip()
zip() makes an iterator that aggregates elements from each of the iterables.
Returns an iterator of tuples, where the i-th tuple contains the i-th element from each of the
argument sequences or iterables.
The iterator stops when the shorted input iterable is exhausted.
No arguments returns an empty iterator.
Ex.
>>> x = [1,2,3]
>>> y = [4,5,6]
>>> zipped = zip(x,y)
>>> list(zipped)
[(1,4), (2,5), 3,6)]
"""
sug_join = '\n🔸 '.join(' | '.join(s) for s in suggestions)
if not match_found:
if similar_found:
await ctx.send(f"No matches were found for `{game_name}`, did you mean: \n🔸 {sug_join}")
else:
await ctx.send(f"No exact or similar matches found for `{game_name}`")
await session.close()
else:
print('app_id = ' + str(app_id))
embed1 = discord.Embed(title = match_name, url = "https://store.steampowered.com/app/" + str(app_id), colour = 0xD84234)
embed1.set_thumbnail(url='http://icons.iconarchive.com/icons/cornmanthe3rd/plex/256/Other-steam-red-icon.png')
embed1.add_field(name="Steam Game ID", value=str(app_id), inline=True)
embed1.set_author(name = 'Requested by: ' + ctx.author.name, icon_url=ctx.author.avatar_url_as(format='jpg'))
await ctx.send(embed = embed1)
embed1.clear_fields()
await session.close()
else:
await session.close()
@commands.command()
async def game(self, ctx, *game_name_input:str):
"""Convert game name to ID"""
game_name = ' '.join(game_name_input)
print('game_name = ' + game_name)
#converting game name to game id...
session = aiohttp.ClientSession()
await ctx.trigger_typing()
async with session.get('http://api.steampowered.com/ISteamApps/GetAppList/v2') as r:
print('r.status. = ' + str(r.status))
if r.status == 200:
response = await r.json()
response = response['applist']['apps']
try:
match_found = False
match_name = ''
for i in response:
if i["name"].lower() == game_name.lower():
match_found = True
match_name = i["name"]
app_id = i["appid"]
if not match_found:
await ctx.send(f"Sorry, no matches were found for ``{game_name}``. Use **$search [game name]** to search for a game.")
else:
print('app_id = ' + str(app_id))
embed1 = discord.Embed(title = match_name, url = "https://store.steampowered.com/app/" + str(app_id), colour = 0xD84234)
embed1.set_thumbnail(url='http://icons.iconarchive.com/icons/cornmanthe3rd/plex/256/Other-steam-red-icon.png')
embed1.add_field(name="Steam Game ID", value=str(app_id), inline=True)
embed1.set_author(name = 'Requested by: ' + ctx.author.name, icon_url=ctx.author.avatar_url_as(format='jpg'))
await ctx.send(embed = embed1)
embed1.clear_fields()
except Exception as err:
print('something went wrong: ' + str(err))
await session.close()
return
await session.close()
else:
print('r.status = ' + str(r.status))
def setup(bot):
bot.add_cog(Steam(bot))
|
# 6063
a, b = input().split()
a = int(a)
b = int(b)
c = (a if a >= b else b)
print(c)
# 6064
# 잘 안됨 ㅜ
# 6065
a, b, c = input().split()
a = int(a)
b = int(b)
c = int(c)
if a % 2 == 0:
print(a)
if b % 2 == 0:
print(b)
if c % 2 == 0:
print(c)
# 6066
a, b, c = input().split()
a = int(a)
b = int(b)
c = int(c)
if a % 2 == 0:
print("even")
else:
print("odd")
if b % 2 == 0:
print("even")
else:
print("odd")
if c % 2 == 0:
print("even")
else:
print("odd")
# 6067
n = int(input())
if n < 0:
if n % 2 ==0:
print('A')
else:
print('B')
else:
if n % 2 ==0:
print('C')
else:
print('D')
# 6068
n = int(input())
if n >= 90:
print('A')
elif n >= 70:
print('B')
elif n >= 40:
print('C')
else:
print('D')
# 6069
eval = input()
if eval == 'A':
print('best!!!')
elif eval == 'B':
print('good!!')
elif eval == 'C':
print('run!')
elif eval =='D':
print('slowly~')
else:
print('what?')
# 6070
n = int(input())
if n // 3 == 1:
print('spring')
elif n // 3 == 2:
print('summer')
elif n // 3 == 3:
print('fall')
else:
print('winter')
# 6071
n = 1
while n != 0:
n = int(input())
if n!=0:
print(n)
if n==0:
break
|
# Endi "while" operatoriga to'xtatlsak while operatori bu takrorlash operatoridir
# shu while ko'prastilgan shartgacha ya'ni xolatgacha takrorla shunga yetganda to'xta degani
i=1
while i<=10:
print(i)
i=i+1 # bu degani i ga bittadan oshirib boraver degani 1,2,3,4,.......
else:
print("Done")
|
# -*- coding:utf-8 -*-
"传入所有中继设备的信息和选择的中继设备编号"
def Fairness(UES):
#计算当前状态的设备的公平状态
U_total = 0
U_link_s = 0
for i in range(0,len(UES)):
U_total += UES[i].gains
U_link_s += (1-UES[i].link_e)
X=[]
for i in range(0,len(UES)):
Ui_overline = ((1-UES[i].link_e)/U_link_s)*U_total
if UES[i].gains<=Ui_overline:
X.append(UES[i].gains/Ui_overline)
else:
X.append(1.0)
print "%d-th gains:%f U_over:%f link_s:%f total_s:%f total gains:%f"%(
UES[i].num, UES[i].gains, Ui_overline, 1-UES[i].link_e, U_link_s, U_total)
sum_of_xi = 0
for i in range(0,len(X)):
sum_of_xi += X[i]
sum_of_xi2 = 0
for i in range(0,len(X)):
sum_of_xi2 += X[i]**2
fairness = (sum_of_xi)**2/(len(X)*sum_of_xi2)
print "in calcaulte fairness of each",X,"fairness:",fairness
# UES[x].fairness = fairness
return fairness
|
# Reverse Engineering
# Lab 5, script 2
# Jeremy Mlazovsky
print "Hello Lab5\n"
from idaapi import *
from idc import *
class MyDbgHook(DBG_Hooks):
""" Own debug hook class that implementd the callback functions """
def dbg_process_start(self, pid, tid, ea, name, base, size):
print "Process started, pid=%d tid=%d name=%s" % (pid, tid, name)
new_val = 0x75
patch_ea = 0x401241
print("Setting instruction to 0x75 (jnz)")
print("BYTE @ 0x%x before patching is [0x%X]" % (patch_ea, Byte(patch_ea)))
result = PatchByte(patch_ea, new_val)
print("Result was %d") % result
print("BYTE @ 0x%x after patching is [0x%X]\n" % (patch_ea, Byte(patch_ea)))
return 0
def dbg_process_exit(self, pid, tid, ea, code):
print "Process exited pid=%d tid=%d ea=0x%x code=%d" % (pid, tid, ea, code)
new_val = 0x74
patch_ea = 0x401241
print("")
print("Resetting instruction back to 0x74 (jz)")
print("BYTE @ 0x%x before patching is [0x%X]" % (patch_ea, Byte(patch_ea)))
result = PatchByte(patch_ea, new_val)
print("Result was %d") % result
print("BYTE @ 0x%x after patching is [0x%X]\n" % (patch_ea, Byte(patch_ea)))
return 0
def dbg_library_load(self, pid, tid, ea, name, base, size):
print "Library loaded: pid=%d tid=%d name=%s base=%x" % (pid, tid, name, base)
def dbg_bpt(self, tid, ea):
print "Break point at 0x%x pid=%d" % (ea, tid)
password_string = ""
# if at section where user input string is in ESI register ...
if ea == 0x0040122D:
password = LocByName("password")
print("password: [0x%x] %s") % (password, password)
password_length = get_item_size(password)
print("password_length: %d") % password_length
for character_number in range(0, password_length-1):
#print("")
character = Byte(password+character_number)
print("character_number=%d") % character_number
print("character 0x%x %d %s") % (character, character, character)
password_string = password_string + chr(character)
print("user entered the password:%s\n") % password_string
return 0
def dbg_trace(self, tid, ea):
print tid, ea
return 0
def dbg_step_into(self):
print "Step into"
return self.dbg_step_over()
def dbg_step_over(self):
eip = GetRegValue("EIP")
print "0x%x %s" % (eip, GetDisasm(eip))
self.steps += 1
if self.steps >= 5:
request_exit_process()
else:
request_step_over()
return 0
# Remove an existing debug hook
try:
if debughook:
print "Removing previous hook ...\n"
debughook.unhook()
except:
pass
# memory address at which the BYTE opcode 74 (jz) needs to be patched to 75 (jnz)
ea = 0x401241
# Install the debug hook
debughook = MyDbgHook()
debughook.hook()
debughook.steps = 0
# Add a breakpoint at address 0x401241 (right at the instruction we want to modify)
# Adding like this enables the breakpoint as well
AddBpt(ea)
# set breakpoint at location where was can get user-entered password
AddBpt(0x0040122D)
# Stop at the break point "ea"
request_run_to(ea)
# Start debugging
run_requests()
|
import pandas as pd
from sklearn.utils import shuffle
DATA_PATH = "./DataSet/"
REPOSITORY = "/On_Time_On_Time_Performance_"
CSV_PATH = "/On_Time_On_Time_Performance_"
useless1 = ["Quarter","UniqueCarrier","Carrier","TailNum","FlightNum","OriginAirportSeqID","Origin","OriginCityMarketID","OriginCityName","OriginStateFips","OriginStateName","OriginWac",
"DestAirportSeqID","Dest","DestCityMarketID","DestCityName","DestStateFips","DestStateName","DestWac","DepDelay","DepDel15","DepartureDelayGroups","DepTimeBlk","TaxiOut","WheelsOff",
"WheelsOn","TaxiIn","ArrDelay","ArrDel15","ArrivalDelayGroups","ArrTimeBlk","Cancelled","CancellationCode","Diverted",
"CRSElapsedTime","ActualElapsedTime","AirTime","Flights","Distance","FirstDepTime","TotalAddGTime","LongestAddGTime",
"DivAirportLandings","DivReachedDest","DivActualElapsedTime","DivArrDelay","DivDistance","Div1Airport","Div1AirportID","Div1AirportSeqID","Div1WheelsOn","Div1TotalGTime",
"Div1LongestGTime","Div1WheelsOff","Div1TailNum","Div2Airport","Div2AirportID","Div2AirportSeqID","Div2WheelsOn","Div2TotalGTime","Div2LongestGTime","Div2WheelsOff","Div2TailNum",
"Div3Airport","Div3AirportID","Div3AirportSeqID","Div3WheelsOn","Div3TotalGTime","Div3LongestGTime","Div3WheelsOff","Div3TailNum","Div4Airport","Div4AirportID","Div4AirportSeqID",
"Div4WheelsOn","Div4TotalGTime","Div4LongestGTime","Div4WheelsOff","Div4TailNum","Div5Airport","Div5AirportID","Div5AirportSeqID","Div5WheelsOn","Div5TotalGTime","Div5LongestGTime",
"Div5WheelsOff","Div5TailNum"]
#clean_ds toglie dal dataframe tutte le features contenute nel vettore vet
def clean_ds(dataframe, vet=useless1):
dataframe.dropna(subset=["Year", "Month", "DayofMonth", "DayOfWeek", "FlightDate", "AirlineID", "OriginAirportID", "OriginState",
"DestAirportID", "DestState", "CRSDepTime", "DepTime", "DepDelayMinutes", "CRSArrTime", "ArrTime",
"ArrDelayMinutes"], inplace=True)
dataframe.fillna(0, inplace=True)
for i in range(0,len(vet)):
del dataframe[vet[i]]
return
def split_data(data_set):
shuffle_set = shuffle(data_set)
train_size = int(len(data_set)*0.8)
train, test = shuffle_set.iloc[0:train_size, :], shuffle_set.iloc[train_size:len(data_set), :]
train = train.sort_values(['Year','Month','DayofMonth','DepTime'],axis=0,ascending=[True, True, True, True])
test = test.sort_values(['Year','Month','DayofMonth','DepTime'],axis=0,ascending=[True, True, True, True])
test.to_csv("test_set.csv",sep = ';',decimal = ',')
train.to_csv("train_set.csv",sep = ';',decimal = ',')
return
def split_data2(data_set):
shuffle_set = shuffle(data_set)
train_size = int(len(data_set)*0.8)
train, test = shuffle_set.iloc[0:train_size, :], shuffle_set.iloc[train_size:len(data_set), :]
train = train.sort_values(['Year(t)','Month(t)','DayofMonth(t)','DepTime(t)'],axis=0,ascending=[True, True, True, True])
test = test.sort_values(['Year(t)','Month(t)','DayofMonth(t)','DepTime(t)'],axis=0,ascending=[True, True, True, True])
test.to_csv("test_set_2.csv",sep = ';',decimal = ',')
train.to_csv("train_set_2.csv",sep = ';',decimal = ',')
return train, test
# create_ds unisce tutti i mesi dell'anno
def create_ds(year):
csvs = {}
for month in range(1,13):
DATA_PATH_i = DATA_PATH + str(year) + REPOSITORY + str(year) + "_" + str(month)
print(DATA_PATH_i)
CSV_PATH_i = CSV_PATH + str(year) + "_" + str(month)
print(CSV_PATH_i)
csvs[month-1] = pd.read_csv(DATA_PATH_i + CSV_PATH_i+".csv",dtype=object,delimiter=',')
clean_ds(csvs[month-1],useless1)
result = pd.concat(csvs)
return result
#unify_ds unisce gli anni ridotti ad una sola tratta e crea un nuovo file || crea un file per ogni anno
def unify_ds():
'''#UNICA TRATTA, TUTTI GLI ANNI
anni_puliti = {}
for year in range(0,4):
anni_puliti[year] = create_ds(year+2014)
#in ris ci sono solo i voli da NY a CA
anni_puliti[year] = anni_puliti[year][(anni_puliti[year]['OriginState'] == 'NY') & (anni_puliti[year]['DestState'] == 'CA')]
anni_puliti[year] = anni_puliti[year].sort_values(['FlightDate','DepTime'],axis=0,ascending=[True, True])
tutti_anni = pd.concat(anni_puliti)
tutti_anni.to_csv("voli_unicatratta_2.csv",sep = ';',decimal = ',')
'''
#COMPLETO UNICO ANNO
for year in range(0,4):
anno_pulito = create_ds(year+2014)
anno_pulito= anno_pulito.sort_values(['FlightDate','DepTime'],axis=0,ascending=[True, True])
anno_pulito.to_csv("voli_"+str(year+2014)+"_2.csv",sep = ';',decimal = ',')
return
#obtain_index restituisce l'indice della colonna di nome index_name
def obtain_index(data_frame, index_name):
name_list = list(data_frame.columns.values)
for i in range(len(name_list)):
if(name_list[i]==index_name):
return i
|
from django.db import models
class Quote(models.Model):
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
owner = models.ForeignKey('auth.User', related_name='quotes')
class Meta:
ordering = ('-created',)
|
#!/usr/bin/env python3
"""prelockd"""
import os
import mmap
from re import search
from sre_constants import error as invalid_re
from time import sleep, monotonic, process_time
from sys import stdout, stderr, exit, argv
from signal import signal, SIGTERM, SIGINT, SIGQUIT, SIGHUP
from ctypes import CDLL
def valid_re(reg_exp):
"""
Validate regular expression.
"""
try:
search(reg_exp, '')
except invalid_re:
errprint('Invalid config: invalid regexp: {}'.format(reg_exp))
exit(1)
def errprint(*text):
"""
"""
print(*text, file=stderr, flush=True)
def string_to_float_convert_test(string):
"""
Try to interprete string values as floats.
"""
try:
return float(string)
except ValueError:
return None
def mlockall():
"""
Lock process memory.
"""
MCL_FUTURE = 2
libc = CDLL('libc.so.6', use_errno=True)
result = libc.mlockall(MCL_FUTURE)
if result != 0:
errprint('ERROR: cannot lock process memory: [Errno {}]'.format(
result))
errprint('Exit.')
exit(1)
else:
if debug:
print('process memory locked with MCL_FUTURE')
def signal_handler(signum, frame):
"""
Handle signals: close fd and exit.
"""
if len(lock_dict) > 0:
for i in lock_dict:
lock_dict[i][1].close() # close mm
lock_dict[i][0].close() # close fd
print('Got signal {}, exit.'.format(signum))
exit()
def get_current_set():
"""
"""
m0 = monotonic()
p0 = process_time()
if debug:
print('Looking for mapped files...')
new_map_set = set()
for pid in os.listdir('/proc'):
if pid[0].isdecimal() is False:
continue
map_dir = '/proc/' + pid + '/map_files/'
try:
try:
f_list = os.listdir(map_dir)
except PermissionError as e:
errprint(e)
exit(1)
for f_name in f_list:
f_realpath = os.path.realpath(map_dir + f_name)
new_map_set.add(f_realpath)
except FileNotFoundError as e:
errprint(e)
continue
current_map_set = set()
for path in new_map_set:
if os.path.exists(path):
current_map_set.add(path)
else:
if debug:
print('skip:', path)
if debug:
list_d = list(current_map_set)
list_d.sort()
for path in list_d:
print('mapped:', path)
m1 = monotonic()
p1 = process_time()
if debug:
print('Found {} mapped files in {}s (process time: {}s)'.format(len(
current_map_set), round(m1 - m0, 3), round(p1 - p0, 3)))
return current_map_set
def lock_files(_map_set):
"""
"""
if debug:
print('mapping ang locking files...')
lock_counter = 0
mm_read_size_counter = 0
di = dict()
for rp in _map_set:
rp = str(rp)
if search(r, rp) is not None:
try:
s = os.path.getsize(rp)
if s > 0:
di[rp] = s
except FileNotFoundError as e:
if debug:
print(e)
else:
if debug:
print("skip (doesn't match $LOCK_RE): " + rp)
list_d = list(di.items())
list_d.sort(key=lambda i: i[1])
for f_realpath, _ in list_d:
try:
size = os.path.getsize(f_realpath)
except FileNotFoundError as e:
errprint(e)
continue
if size > MAX_SIZE:
if debug:
print('skip (file size {}M > $MAX_FILE_SIZE) {}'.format(round(
size / MIB, 1), f_realpath))
continue
else:
break
try:
rss = get_self_rss()
except OSError as e:
errprint(e)
break
if rss + size > max_self_rss:
if debug:
print('skip (rss ({}M) + size ({}M) > max_self_rss) {}'.format(
round(rss / MIB, 1), round(size / MIB, 1), f_realpath))
continue
else:
break
try:
if debug:
print('locking ({}M) {}'.format(
round(size / MIB, 1), f_realpath))
f = open(f_realpath, 'rb')
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
lock_dict[f_realpath] = (f, mm)
lock_counter += 1
mm_read_size_counter += size
except OSError as e:
errprint(e)
break
print('locked {}M files'.format(round(mm_read_size_counter / MIB, 1)))
def write_snapshot():
"""
"""
map_set = get_current_set()
map_list = sorted(list(map_set))
print('Saving mapped files...')
try:
with open(saved_path, 'w') as f:
for line in map_list:
f.write(line + '\n')
except FileNotFoundError:
os.mkdir(saved_dir)
os.chmod(saved_dir, mode=0o700)
with open(saved_path, 'w') as f:
for line in map_list:
f.write(line + '\n')
print('OK, mmapped files list has been saved in ' + saved_path)
def get_saved_set():
"""
"""
if debug:
print('Looking for saved files...')
saved_set = set()
try:
with open(saved_path) as f:
for line in f:
saved_set.add(line[:-1])
except FileNotFoundError as e:
errprint(e)
new_saved_set = set()
for path in saved_set:
if os.path.exists(path):
new_saved_set.add(path)
else:
if debug:
print('skip:', path)
if debug:
list_d = list(new_saved_set)
list_d.sort()
for path in list_d:
print('saved:', path)
print('Found {} saved paths'.format(len(new_saved_set)))
return new_saved_set
def get_self_rss():
"""
"""
with open('/proc/self/statm') as f:
return int(f.readline().split(' ')[1]) * SC_PAGESIZE
###############################################################################
MIB = 1024 * 1024
min_mem_available = 20 * 1024
saved_dir = '/var/lib/prelockd'
saved_path = '/var/lib/prelockd/saved_snapshot'
debug = True
a = argv[1:]
la = len(a)
if la == 0:
errprint('invalid input: missing CLI options')
exit(1)
elif la == 1:
if a[0] == '-p':
get_current_set()
exit()
elif a[0] == '-w':
write_snapshot()
exit()
else:
errprint('invalid input')
exit(1)
elif la == 2:
if a[0] == '-c':
config = a[1]
else:
errprint('invalid input')
exit(1)
else:
errprint('invalid input: too many options')
exit(1)
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_total = int(mem_list[0].split(':')[1][:-4])
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
try:
mem_available_index = mem_list_names.index('MemAvailable')
except ValueError:
errprint('Your Linux kernel is too old, Linux 3.14+ required\nExit')
SC_PAGESIZE = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
config_dict = dict()
try:
with open(config) as f:
for line in f:
if line[0] == '$' and '=' in line:
key, _, value = line.partition('=')
key = key.rstrip()
value = value.strip()
if key in config_dict:
errprint('config key {} duplication'.format(key))
exit(1)
config_dict[key] = value
except (PermissionError, UnicodeDecodeError, IsADirectoryError,
IndexError, FileNotFoundError) as e:
errprint('Invalid config: {}. Exit.'.format(e))
exit(1)
if '$DEBUG' in config_dict:
debug = config_dict['$DEBUG']
if debug == 'True':
debug = True
elif debug == 'False':
debug = False
else:
errprint('invalid $DEBUG value')
exit(1)
else:
errprint('missing $DEBUG key')
exit(1)
if '$LOCK_CURRENT_MAPPED' in config_dict:
lock_current_mapped = config_dict['$LOCK_CURRENT_MAPPED']
if lock_current_mapped == 'True':
lock_current_mapped = True
elif lock_current_mapped == 'False':
lock_current_mapped = False
else:
errprint('invalid $LOCK_CURRENT_MAPPED value')
exit(1)
else:
errprint('missing $LOCK_CURRENT_MAPPED key')
exit(1)
if '$LOCK_SAVED_MAPPED' in config_dict:
lock_saved_mapped = config_dict['$LOCK_SAVED_MAPPED']
if lock_saved_mapped == 'True':
lock_saved_mapped = True
elif lock_saved_mapped == 'False':
lock_saved_mapped = False
else:
errprint('invalid $LOCK_SAVED_MAPPED value')
exit(1)
else:
errprint('missing $LOCK_SAVED_MAPPED key')
exit(1)
if '$MAX_FILE_SIZE_MIB' in config_dict:
string = config_dict['$MAX_FILE_SIZE_MIB']
max_file_size_mib = string_to_float_convert_test(string)
if max_file_size_mib is None:
errprint('invalid $MAX_FILE_SIZE_MIB value')
exit(1)
max_file_size = int(max_file_size_mib * 1048576)
else:
errprint('missing $MAX_FILE_SIZE_MIB key')
exit(1)
if '$LOCK_RE' in config_dict:
lock_re = config_dict['$LOCK_RE']
valid_re(lock_re)
else:
errprint('missing $LOCK_RE key')
exit(1)
if '$MAX_SELF_RSS_PERCENT' in config_dict:
string = config_dict['$MAX_SELF_RSS_PERCENT']
max_self_rss_percent = string_to_float_convert_test(string)
if max_self_rss_percent is None:
errprint('invalid $MAX_SELF_RSS_PERCENT value')
exit(1)
max_self_rss_pc = int(mem_total * max_self_rss_percent / 100) * 1024
else:
errprint('missing $MAX_FILE_SIZE_MIB key')
exit(1)
if '$MAX_SELF_RSS_MIB' in config_dict:
string = config_dict['$MAX_SELF_RSS_MIB']
max_self_rss_mib = string_to_float_convert_test(string)
if max_self_rss_mib is None:
errprint('invalid $MAX_SELF_RSS_MIB value')
exit(1)
max_self_rss_mib = int(max_self_rss_mib * 1048576)
else:
errprint('missing $MAX_SELF_RSS_MIB key')
exit(1)
if max_self_rss_mib <= max_self_rss_pc:
max_self_rss = max_self_rss_mib
else:
max_self_rss = max_self_rss_pc
config = os.path.abspath(config)
print('Starting prelockd with the config: {}'.format(config))
if debug:
print('$MAX_SELF_RSS_PERCENT:', round(max_self_rss_pc / MIB, 1), '(MiB)')
print('$MAX_SELF_RSS_MIB: ', round(max_self_rss_mib / MIB, 1))
print('max_self_rss: ', round(max_self_rss / MIB, 1), '(MiB)')
print('$DEBUG: ', debug)
print('$LOCK_CURRENT_MAPPED: ', lock_current_mapped)
print('$LOCK_SAVED_MAPPED: ', lock_saved_mapped)
print('$MAX_FILE_SIZE_MIB: ', max_file_size / MIB)
print('$LOCK_RE: ', lock_re)
mlockall()
r = lock_re
# max file size that can be locked
MAX_SIZE = max_file_size
###############################################################################
p0 = process_time()
m0 = monotonic()
stdout.flush()
sig_list = [SIGTERM, SIGINT, SIGQUIT, SIGHUP]
lock_dict = dict()
for i in sig_list:
signal(i, signal_handler)
final_set = set()
if lock_current_mapped:
current_set = get_current_set()
final_set.update(current_set)
if lock_saved_mapped:
saved_set = get_saved_set()
final_set.update(saved_set)
fs = len(final_set)
if fs == 0:
print('Nothing to do!')
exit()
lock_files(final_set)
try:
num_fd = len(os.listdir('/proc/self/fd'))
print('fd opened:', num_fd)
except OSError as e:
errprint(e)
m = monotonic() - m0
p = process_time() - p0
print('self RSS: {}M, startup time: {}s, process time: {}s'.format(
round(get_self_rss() / MIB, 1), round(m, 1), round(p, 1)))
stdout.flush()
while True:
sleep(9999)
|
from django import template
register = template.Library()
@register.filter
def to_stroke_dashoffset(value):
return int(402 * (1 - value))
|
def odczyt_pliku(jezyk, plik):
a = open('TrescMaila/'f'{jezyk}''/'f'{plik}', mode='r', encoding='utf8', newline='\r\n')
return a.read()
def odczyt_zalacznika(jezyk, plik):
return open(f"TrescMaila/{jezyk}/{plik}", 'rb')
|
import unittest
from types import FunctionType
from katas.beta.string_repetition_without_function import str_repeat
class StrRepeatTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(str_repeat('a', 4), 'aaaa')
def test_equal_2(self):
self.assertEqual(str_repeat('hello ', 3), 'hello hello hello ')
def test_equal_3(self):
self.assertEqual(str_repeat('abc', 2), 'abcabc')
def test_is_instance_1(self):
self.assertNotIsInstance(str_repeat, FunctionType)
|
# Generated by Django 2.2.4 on 2019-09-14 13:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0006_auto_20190914_1907'),
]
operations = [
migrations.DeleteModel(
name='Library',
),
]
|
import os
from dotenv import load_dotenv
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
load_dotenv()
SENDGRID_API_KEY = os.getenv("SENDGRID_API_KEY")
MY_ADDRESS = os.getenv("MY_ADDRESS")
#print(MY_ADDRESS, SENDGRID_API_KEY)
client = SendGridAPIClient(SENDGRID_API_KEY) #> <class 'sendgrid.sendgrid.SendGridAPIClient>
print("CLIENT:", type(client))
print("------------------------")
print("ASSEMBLING MESSAGE CONTENTS...")
subject = "Grade Details - 'Shopping Cart' Project"
html_content = "<p>Hello, thanks for submitting your 'Shopping Cart' Project.</p>"
html_content += "<p>I've evaluated your submission, and the final score is <strong>91.09 / 100</strong>.<p>"
html_content += "<p>Please see details below for a score break-down by rubric category.</p>"
html_content += "<p>If you have any questions about your grade, please follow-up within the next few days.</p>"
html_content += "<p>Thanks, and see you in class!</p>"
html_content += "<p>- Prof Rossetti</p>"
message = Mail(from_email=MY_ADDRESS, to_emails=MY_ADDRESS, subject=subject, html_content=html_content)
print(type(message))
print(html_content)
try:
print("------------------------")
print("SENDING...")
response = client.send(message)
print("RESPONSE:", type(response)) #> <class 'python_http_client.client.Response'>
print(response.status_code) #> 202 indicates SUCCESS
except Exception as e:
print("OOPS, SOMETHING WENT WRONG...", e)
print(response.body)
print(response.headers)
|
from RPi import GPIO
import spidev
import time
class MCP3008:
def __init__(self,spi):
self.spi = spi
spi.open(0,0)
spi.max_speed_hz = 10**5
def read_channel(self, channel):
adc = self.spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
|
from common.run_method import RunMethod
import allure
@allure.step("JkyAPP/查询学生")
def students_studentInfo_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/查询学生"
url = f"/api-operation-app/students/studentInfo"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyApp/检查学生信息是否被完整")
def students_check_message_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/检查学生信息是否被完整"
url = f"/api-operation-app/students/check/message"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyApp/根据学生ID查找学生信息")
def students_queryById_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/根据学生ID查找学生信息"
url = f"/api-operation-app/students/queryById"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyApp/新增学生")
def students_add_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/新增学生"
url = f"/api-operation-app/students/add"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyApp/修改学生")
def students_update_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/修改学生"
url = f"/api-operation-app/students/update"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/学生信息")
def students_all_items_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/学生信息"
url = f"/api-operation-app/students/all/items"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/报班明细")
def students_class_detail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/报班明细"
url = f"/api-operation-app/students/class/detail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/编辑家长信息")
def students_parent_update_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/编辑家长信息"
url = f"/api-operation-app/students/parent/update"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import numpy as np
import pandas as pd
from scipy.stats import norm
from r.ts.adf import adf_r
from r.ts.arima import arima_r
from r.ts.kpss import kpss_r
from r.ts.smuce import smuce_r
from r.ts.stl import stl_r
### multiscale change-point inference
### see reference Sieling14
# x : pd.Series
# alpha : float
# -> (pd.Series, {(x.index.dtype, x.index.dtype): float})
def multiscale_change_point(x, alpha=0.05):
values, change_points, confidence_intervals = smuce_r(x.values, np.array(x.index), alpha)
fit = pd.Series(index=x.index)
for v, c in zip(values, change_points):
fit[c:] = v
return fit, dict(zip(confidence_intervals[1:], np.diff(values)))
### maximum likelihood estimate of change point
# x : pd.Series
# -> float
def _log_lf(x):
mu = x.mean()
sigma = x.std()
if sigma > 0:
return np.log(np.prod(norm.pdf(x, loc=mu, scale=sigma)))
else:
return 1.
# x : pd.Series
# -> x.index.dtype
def change_mle(x):
return x.index[np.argmax([_log_lf(x[:k]) + _log_lf(x[k:]) for k in x.index])]
### KPSS test of stationarity against the presence of a unit root
### see reference Kwiatkowski92
# x : pd.Series
# null_hypothesis : 'constant' or 'trend'
# -> (float, float)
def kpss(x, null_hypothesis='constant'):
if null_hypothesis is 'constant':
return kpss_r(x.values, null_hypothesis='Level')
elif null_hypothesis is 'trend':
return kpss_r(x.values, null_hypothesis='Trend')
else:
raise ValueError('invalid null hypothesis')
### augmented Dickey-Fuller test of the presence of a unit root against stationarity
### see reference Said84
# x : pd.Series
# -> (float, float)
def adf(x):
return adf_r(x.values)
### STL decomposition
### see reference Cleveland90
# x : pd.Series
# window : int
# -> pd.DataFrame
def stl(x, window):
dec = pd.DataFrame(index=x.index, columns=['seasonal', 'trend', 'remainder'])
dec['seasonal'], dec['trend'], dec['remainder'] = stl_r(x.values, window)
return dec
### ARIMA model fitting
### see references Brockwell91, Hyndman08
class ARIMAFit(pd.Series):
_ar_coef = None
_ma_coef = None
_diff_order = None
_residuals = None
# values : np.ndarray
# index : pd.Index
# ar_coef : np.ndarray
# ma_coef : np.ndarray
# diff_order : int
# residuals : np.ndarray
def __init__(self, values, index, ar_coef, ma_coef, diff_order, residuals):
self._ar_coef = ar_coef
self._ma_coef = ma_coef
self._diff_order = diff_order
self._residuals = residuals
super(ARIMAFit, self).__init__(values, index=index)
# -> (np.ndarray, np.ndarray)
def get_coefs(self):
return self._ar_coef, self._ma_coef
# -> int
def get_diff_order(self):
return self._diff_order
# -> pd.Series
def get_fit(self):
return pd.Series(self.values + self._residuals, index=self.index)
# -> pd.Series
def get_resids(self):
return pd.Series(self._residuals, index=self.index)
# x : pd.Series
# alpha : float
# max_diffs : int
# method : 'kpss' or 'adf'
# -> int
def _diff_order(x, alpha=0.05, max_diffs=2, method='kpss'):
d = 0
if len(set(x.values)) < 2:
return d
if method is 'kpss':
do_diff = (kpss_r(x.values)[1] < alpha)
elif method is 'adf':
do_diff = (adf_r(x.values)[1] > alpha)
else:
raise ValueError('invalid method')
if not do_diff:
return d
while (do_diff) and (d < max_diffs):
d += 1
x = x.diff()
if len(set(x.values)) < 2:
return d
if method is 'kpss':
do_diff = (kpss_r(x.values)[1] < alpha)
elif method is 'adf':
do_diff = (adf_r(x.values)[1] > alpha)
else:
raise ValueError('invalid method')
if not do_diff:
return d-1
return d
# aic : float
# n : int
# k : int
# -> float
def _aic_corr(aic, n, k):
return aic + (2*k*(k+1))/(n-k-1)
# x : np.ndarray
# model : (int, int)
# diff_order : int
# init_ic : float
# max_ar : int
# max_ma : int
# -> (int, int)
def _vary_model(x, model, diff_order, init_ic, max_ar=5, max_ma=5):
varied_orders = {0: (model[0]+1, model[1]), 1: (max(model[0]-1, 0), model[1]), 2: (model[0], model[1]+1), 3: (model[0], max(model[1]-1, 0)), 4: (model[0]+1, model[1]+1), 5: (max(model[0]-1, 0), max(model[1]-1, 0))}
varied_models = [_aic_corr(*arima_r(x, *v, diff_order)[-2:], v[0]+v[1]+1) for v in varied_orders.values()]
if (np.min(varied_models) < init_ic) and (varied_orders[np.argmin(varied_models)][0] < max_ar) and (varied_orders[np.argmin(varied_models)][1] < max_ma):
return _vary_model(x, varied_orders[np.argmin(varied_models)], diff_order, np.min(varied_models), max_ar-1, max_ma-1)
else:
return varied_orders[np.argmin(varied_models)]
# x : pd.Series
# alpha : float
# max_ar : int
# max_ma : int
# max_diff : int
# method : 'kpss' or 'adf'
# -> (int, int, int)
def _select_orders(x, alpha=0.05, max_ar=5, max_ma=5, max_diff=2, method='kpss'):
d = _diff_order(x, alpha, max_diff, method)
init_model_orders = {0: (2, 2), 1: (0, 0), 2: (1, 0), 3: (0, 1)}
models = [_aic_corr(*arima_r(x.values, *v, d)[-2:], v[0]+v[1]+1) for v in init_model_orders.values()]
m = init_model_orders[np.argmin(models)]
m = _vary_model(x.values, m, d, np.min(models), max_ar, max_ma)
return (m[0], m[1], d)
# x : pd.Series
# order : (int, int, int)
# max_ar : int
# max_ma : int
# max_diff : int
# alpha : float
# diff_count_method : 'kpss' or 'adf'
# -> ARIMAFit
def arima(x, order=None, max_ar=5, max_ma=5, max_diff=2, alpha=0.05, diff_count_method='kpss'):
if not order:
order = _select_orders(x, alpha, max_ar, max_ma, max_diff, diff_count_method)
coef, resid, aic, nobs = arima_r(x.values, *order)
return ARIMAFit(x.values, x.index, coef[:order[0]], coef[-order[1]:], order[2], resid)
|
import telebot
from telebot import types
import config
class Bot:
""" Singleton class to create bot object """
__instance = None
def get_instance():
if Bot.__instance is None:
Bot()
return Bot.__instance
def __init__(self, proxy=False):
if Bot.__instance is not None:
raise Exception("This class is a singleton!")
else:
Bot.__instance = telebot.TeleBot(config.telegram_token)
# create telegram bot instance
bot = Bot.get_instance()
# create custom keyboard for the game
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
btn0 = types.KeyboardButton('/rules')
btn1 = types.KeyboardButton('/top')
btn2 = types.KeyboardButton('/singleplayer')
btn3 = types.KeyboardButton('/multiplayer')
markup.row(btn0, btn1)
markup.row(btn2)
markup.row(btn3)
|
# -*- coding: utf-8 -*-
"""Tests for output writers."""
import unittest
from dtformats import output_writers
from tests import test_lib
class StdoutWriterTest(test_lib.BaseTestCase):
"""Stdout output writer tests."""
def testClose(self):
"""Tests the Close function."""
test_writer = output_writers.StdoutWriter()
test_writer.Close()
def testOpen(self):
"""Tests the Open function."""
test_writer = output_writers.StdoutWriter()
test_writer.Open()
def testWriteText(self):
"""Tests the WriteText function."""
test_writer = output_writers.StdoutWriter()
test_writer.WriteText('')
if __name__ == '__main__':
unittest.main()
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path("banner/", views.BannerListAPIView.as_view())
]
|
#!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from urlparse import urlparse, parse_qs
from Controller import Controller
import logging
import json
PORT_NUMBER = 7777
#This class will handles any incoming request from
#the browser
class myHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
query_components = parse_qs(urlparse(self.path).query)
if query_components:
query_phrase = query_components["query"]
if query_phrase:
result = self._processQuery(query_phrase)
else:
result = dict()
result["status"] = "error:no_query"
else:
result = dict()
result["status"] = "error:malformed_query"
result_json = (unicode(json.dumps(result, ensure_ascii=False, indent=4, separators=(',', ': '))))
self.send_response(200)
self.send_header('Content-type','text/json')
self.send_header("Access-Control-Allow-Origin","*")
self.send_header("Access-Control-Expose-Headers: Access-Control-Allow-Origin")
self.send_header("Access-Control-Allow-Headers: Origin, X-Requested-With, Content-Type, Accept")
self.end_headers()
self.wfile.write(result_json)
return
def do_POST(self):
self.doGET()
def _processQuery(self, query):
controller = Controller(4, query,logging.DEBUG)
result = controller.getWebResultsForQuery()
return result
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
|
import shout
import time
import sys
class RadioConnectionException(Exception):
pass
class RadioPlayer():
def __init__(self, host="localhost", port=8501, user='source', password='hackme', mount='/mymout'):
self._s = shout.Shout()
self._s.host = host
self._s.port = port
self._s.user = user
self._s.password = password
self._s.mount = mount
self._status = shout.SHOUTERR_UNCONNECTED
@staticmethod
def format_songname(song):
result = song.split("/")[-1].split(".")
result = ".".join(result[:len(result) - 1]).replace("_", " ").replace("-", " - ")
return result
def open_player(self):
if self._status != shout.SHOUTERR_CONNECTED:
self._status = self._s.open()
if self._status != shout.SHOUTERR_CONNECTED:
raise RadioConnectionException("Unable to connect to server: %d", self._status)
def close_player(self):
if self._status == shout.SHOUTERR_CONNECTED:
self._s.close()
def play_track(self, song_name="", song_file=""):
self._s.format = 'mp3'
# self._s.protocol = 'http' | 'xaudiocast' | 'icy'
self._s.name = 'Deep South Sounds'
# self._s.genre = 'Deep House'
# self._s.url = 'http://www.deepsouthsounds.com/'
# self._s.public = 0 | 1
# self._s.audio_info = { 'key': 'val', ... }
# (keys are shout.SHOUT_AI_BITRATE, shout.SHOUT_AI_SAMPLERATE,
# shout.SHOUT_AI_CHANNELS, shout.SHOUT_AI_QUALITY)
total = 0
st = time.time()
print "opening file %s" % song_file
f = open(song_file)
self._s.set_metadata({'song': str(song_name)})
nbuf = f.read(4096)
while 1:
buf = nbuf
nbuf = f.read(4096)
total += len(buf)
if len(buf) == 0:
break
self._s.send(buf)
self._s.sync()
f.close()
et = time.time()
br = total * 0.008 / (et - st)
print "Sent %d bytes in %d seconds (%f kbps)" % (total, et - st, br)
pass
|
from pprint import pprint
# from sudoku import pretty_repr
import sys
import numpy as np
# SAT dependencies
from pysat.formula import CNF
from pysat.solvers import MinisatGH
# CSP dependencies
from ortools.sat.python import cp_model
# ILP dependencies
import gurobipy as gp
from gurobipy import GRB
# ASP dependencies
import clingo
### Propagation function to be used in the recursive sudoku solver
def propagate(sudoku_possible_values,k):
"""
easy3.sudoku: 0.00 seconds
easy4.sudoku: 3.32 seconds
easy5.sudoku: seemingly endless
hard3.sudoku: 0.00 seconds
hard4.sudoku: 0.12 seconds
hard5.sudoku: seemingly endless
"""
# Instantiate a counter to track # of cells with len == 1; if no such cells are found, matrix is empty
# counter = 0
#iterate over rows
for i in range(len(sudoku_possible_values)):
#iterate over cells
for j in range(len(sudoku_possible_values)):
#check for invalid domains of len == 0
if len(sudoku_possible_values[i][j]) == 0:
return None
#skip cells where the domain consists of a single value already
# if len(sudoku_possible_values[i][j]) == 1:
# counter += 1
# continue
#only look at uncertain cells
elif len(sudoku_possible_values[i][j]) > 1:
#specify the domain D of the current cell
allowed_values = sudoku_possible_values[i][j]
"""
for a given cell in the sudoku with a domain D with |D| > 1:
1. Find k*k square to which cell belongs;
for all certain cells in that square, except the current cell:
check if their value is in D
if so, delete value from D
2. for all certain cells in row i, except the current cell:
check if their value is in D:
if so, delete their value from D
3. for all certain cells in column j, except the current cell:
check if their value is in D:
if so, delete their value from D
"""
# 1
# find square to which cell i,j belongs
col = j // k
row = i // k
rows = sudoku_possible_values[(row * k):(row * k + k)]
box = [x[(col * k):(col * k + k)] for x in rows]
# iterate over values in square, delete from domain if applicable
for v in range(k):
for w in range(k):
cell = box[v][w]
if len(cell) == 1:
value = cell[0]
# check whether that value is in D, and make sure we're not looking at cell i,j
if value in allowed_values and (row * k + v, col * k + w) != (i, j):
allowed_values.remove(value)
if len(allowed_values) == 0:
break
# 2
# iterative over cells in row i, delete certain values from D if applicable
for g in range(len(sudoku_possible_values)):
cell = sudoku_possible_values[i][g]
# find values of cells that were assigned a value already
if len(cell) == 1:
value = cell[0]
# check whether that value is in D, and make sure we're not looking at cell i,j
if value in allowed_values and (i,g) != (i,j):
allowed_values.remove(value)
if len(allowed_values) == 0:
break
# 3
# iterate over cells in column j, delete certain values from D if applicable
for c in range(len(sudoku_possible_values)):
cell = sudoku_possible_values[c][j]
# print(cell)
if len(cell) == 1:
value = cell[0]
# check whether that value is in D, and make sure we're not looking at cell i,j
if value in allowed_values and (c,j) != (i,j):
# print('deleting {}'.format(value))
allowed_values.remove(value)
if len(allowed_values) == 0:
break
# if the input matrix did not have any certain cells, we return a k**2 * k**2 matrix which violates the
# contradiction function in solve_sudoku_prop, thereby yielding a "no solution"
# if counter == 0:
# entry = [1,]
# row = [entry] * (k**2)
# matrix = [row] * (k**2)
return sudoku_possible_values
# else:
# return sudoku_possible_values
### Solver that uses SAT encoding
def solve_sudoku_SAT(sudoku,k):
"""
This function encodes the sudoku problem into SAT, and uses a SAT solver to solve the problem
"""
# First we create a list containing all the edges for all vertices in the sudoku
length = len(sudoku)
num_vertices = length**2
matrix = np.arange(num_vertices).reshape(length, length)
edges = []
sudoku = np.array(sudoku).reshape(length*length)
# The loop below fills the edges list with all edges in the sudoku
for i in range(length):
for j in range(length):
# specify the current value i,j as the left-hand value for the edge tuple
left = int(matrix[i][j] + 1)
# iterate over values in the square
col = j // k
row = i // k
rows = matrix[(row * k):(row * k + k)]
box = [x[(col * k):(col * k + k)] for x in rows]
for v in range(k):
for w in range(k):
right = int(box[v][w] + 1)
# make sure that we're not assigning the current value as the right-hand vertix
if (row * k + v, col * k + w) != (i, j):
if (left, right) not in edges:
edges.append((left, right))
# iterative over cells in row i,
for g in range(length):
right = int(matrix[i][g] + 1)
if (i, g) != (i, j):
if (left,right) not in edges:
edges.append((left, right))
# iterate over cells in column j,
for c in range(length):
right = int(matrix[c][j] + 1)
if (c, j) != (i, j):
if (left,right) not in edges:
edges.append((left, right))
# specify the range of values which can be assigned to empty cells
num_values = k**2
formula = CNF()
# this function will asign a positive integer for each propositional variable
def var_number(i, c):
return ((i - 1) * num_values) + c
# we then add a clause which states that for each vertex in the graph, there must be at least one value v,
# for which p__i,v is true
for i in range(1, num_vertices + 1):
clause = []
# if the given sudoku has a 0 at that index, we assign all possible values from num_values
if int(sudoku[i-1]) == 0:
for c in range(1, num_values + 1):
clause.append(var_number(i, c))
formula.append(clause)
# if the given sudoku already has an assigned value for that index, we only add a single clause
else:
clause.append(var_number(i, int(sudoku[i-1])))
formula.append(clause)
# at most one value for which p__i,v is true
for i in range(1, num_vertices + 1):
for c1 in range(1, num_values + 1):
for c2 in range(c1 + 1, num_values + 1):
clause = [-1 * var_number(i, c1), -1 * var_number(i, c2)]
formula.append(clause)
# ensure that values are assigned "properly"
for (i1, i2) in edges:
for c in range(1, num_values + 1):
clause = [-1 * var_number(i1, c), -1 * var_number(i2, c)]
formula.append(clause)
solver = MinisatGH()
solver.append_formula(formula)
answer = solver.solve()
# reshape the resulting matrix so that we can index it with a single value
matrix = matrix.reshape(length**2)
if answer == True:
print("The sudoku is solved.")
model = solver.get_model()
for i in range(1, num_vertices + 1):
for c in range(1, num_values + 1):
if var_number(i, c) in model:
matrix[i-1] = c
return matrix.reshape(length, length).tolist()
else:
print("The sudoku has no solution.")
return None
### Solver that uses CSP encoding
def solve_sudoku_CSP(sudoku,k):
# First we create a list containing all the edges for all vertices in the sudoku
length = len(sudoku)
num_vertices = length ** 2
matrix = np.arange(num_vertices).reshape(length, length)
# edges = {'squares':[], 'rows':[], 'columns':[]}
edges = []
sudoku = np.array(sudoku).reshape(length * length)
# The loop below fills the edges list with all edges in the sudoku
for i in range(length):
for j in range(length):
# specify the current value i,j as the left-hand value for the edge tuple
left = int(matrix[i][j] + 1)
# iterate over values in the square
col = j // k
row = i // k
rows = matrix[(row * k):(row * k + k)]
box = [x[(col * k):(col * k + k)] for x in rows]
for v in range(k):
for w in range(k):
right = int(box[v][w] + 1)
# make sure that we're not assigning the current value as the right-hand vertix
if (row * k + v, col * k + w) != (i, j):
if (right,left) not in edges:
edges.append((left, right))
# iterative over cells in row i,
for g in range(length):
right = int(matrix[i][g] + 1)
if (i, g) != (i, j):
if (right,left) not in edges:
edges.append((left, right))
# iterate over cells in column j,
for c in range(length):
right = int(matrix[c][j] + 1)
if (c, j) != (i, j):
if (right,left) not in edges:
# edges['columns'].append((left, right))
edges.append((left, right))
# for each variable in the sudoku we set a domain d {1,2,.....,9}, except for the variables
# for which we already have a fixed value
# sys.exit()
# max_val = sum(range(0,length+1))
# print(max_val)
model = cp_model.CpModel()
vars = dict()
# domain = {}
# for i in range(1, num_vertices +1):
# domain[i] = []
#
# boxes = []
# for i in range(k):
# for j in range(k):
# box = []
# rows = matrix[i*k:(i+1)*k]
# for row in rows:
# subrow = row[j*k:(j+1)*k].tolist()
# for x in subrow:
# box.append(sudoku[x])
# boxes.append(box)
#
# rows = []
# for i in range(length):
# row = []
# for j in range(length):
# row.append(sudoku[matrix[i][j]])
# # for idx in range(1, idxs)
# rows.append(row)
#
# columns = []
# for i in range(length):
# column = []
# for j in range(length):
# column.append(sudoku[matrix[j][i]])
# # for idx in range(1, idxs)
# columns.append(column)
#
# columns = columns * length
#
# for i in range(num_vertices):
# row_idx = i // length
# for x in rows[row_idx]:
# domain[i+1].append(x)
#
# for i in range(num_vertices):
# for x in columns[i]:
# domain[i+1].append(x)
#
# # for i in range(1, length+1):
# # for j in range(1, length+1):
# # idx = i*j
# # for x in boxes[i-1]:
# # domain[idx].append(x)
#
# print(set(domain[2]))
#
#
# # for i in range(1, length+1):
# # for j in range(1, length+1):
# # idx = i*j
# # for x in boxes[i-1]:
# # domain[idx].append(x)
#
#
# # for x in rows[i-1]:
# # domain[idx].append(x)
# # for x in columns[i-1]:
# # domain[idx].append(x)
#
# # domain[idx].append(rows[i-1])
#
# # domain[idx].append(columns)
#
# # print(set(domain[24]))
# # sys.exit()
#
#
#
# Set domains for each variable
for i in range(1, num_vertices + 1):
sudoku_val = int(sudoku[i-1])
if sudoku_val == 0:
vars[i] = model.NewIntVar(1, length, "x{}".format(i))
else:
vars[i] = model.NewIntVar(sudoku_val, sudoku_val, "x{}".format(i))
# vars[i] = model.NewIntVar(1, 9, "x{}".format(i))
for (i, j) in edges:
# model.AddAllDifferent([vars[i], vars[j]])
model.Add(vars[i] != vars[j])
# for i in range(length):
# row = []
# for j in range(length):
# row.append(vars[matrix[i][j]+1])
# model.Add(sum(row) == max_val)
# model.AddAllDifferent(row)
#
# for i in range(length):
# col = []
# for j in range(length):
# col.append(vars[matrix[j][i]+1])
# model.Add(sum(col) == max_val)
# model.AddAllDifferent(col)
#
# for i in range(k):
# for j in range(k):
# rows = matrix[i*k:(i+1)*k]
# box = []
# for row in rows:
# subrow = row[j*k:(j+1)*k].tolist()
# for x in subrow:
# box.append(vars[x+1])
# model.Add(sum(box) == max_val)
# model.AddAllDifferent(box)
# print('start solving')
solver = cp_model.CpSolver()
answer = solver.Solve(model)
matrix = matrix.reshape(length**2)
if answer == cp_model.FEASIBLE:
for i in range(1, num_vertices + 1):
matrix[i-1] = solver.Value(vars[i])
return matrix.reshape(length, length).tolist()
else:
return None
### Solver that uses ASP encoding
def solve_sudoku_ASP(sudoku,k):
# First we create a list containing all the edges for all vertices in the sudoku
length = len(sudoku)
num_vertices = length ** 2
matrix = np.arange(num_vertices).reshape(length, length)
# edges = {'squares':[], 'rows':[], 'columns':[]}
edges = []
sudoku = np.array(sudoku).reshape(length * length)
# The loop below fills the edges list with all edges in the sudoku
for i in range(length):
for j in range(length):
# specify the current value i,j as the left-hand value for the edge tuple
left = int(matrix[i][j] + 1)
# iterate over values in the square
col = j // k
row = i // k
rows = matrix[(row * k):(row * k + k)]
box = [x[(col * k):(col * k + k)] for x in rows]
for v in range(k):
for w in range(k):
right = int(box[v][w] + 1)
# make sure that we're not assigning the current value as the right-hand vertix
if (row * k + v, col * k + w) != (i, j):
if (right, left) not in edges:
edges.append((left, right))
# iterative over cells in row i,
for g in range(length):
right = int(matrix[i][g] + 1)
if (i, g) != (i, j):
if (right, left) not in edges:
edges.append((left, right))
# iterate over cells in column j,
for c in range(length):
right = int(matrix[c][j] + 1)
if (c, j) != (i, j):
if (right, left) not in edges:
# edges['columns'].append((left, right))
edges.append((left, right))
# We encode the graph in ASP
asp_code = ""
for i in range(1, num_vertices + 1):
asp_code += '\n\tvertex(v{}).'.format(i)
for (i,j) in edges:
asp_code += '\n\tedge(v{},v{}).'.format(i,j)
asp_code += '\n\t'
for value in range(1, length +1):
rule = 'value(V,{}) :- vertex(V)'.format(value)
for i in range(1, length+1):
if i != value:
rule += ', not value(V,{})'.format(i)
rule += '.'
asp_code += """{}\n\t""".format(rule)
# for value in range(1, length+1):
# rule = 'value(V,{}) :- vertex(V), not color (V,2), not color (V,3)'
# print_answer_sets("""
# choice(1..{}).
# choose(X,a) :- not choose(X,b), choice(X).
# choose(X,b) :- not choose(X,a), choice(X).
# """.format(k))
for i in range(length**2):
# if i == :
# break
if sudoku[i] != 0:
val = sudoku[i]
asp_code += '\n\tvalue(v{},{}) :- vertex(v{}).'.format(i+1,val,i+1)
asp_code += """
:- edge(V1,V2), value(V1,C), value(V2,C).
"""
# asp_code += """
# value(v1,2) :- vertex(v1).
# """
asp_code += """
#show value/2.
"""
# asp_code_2 = ''
#
# asp_code_2 += """
# vertex(v1).
# vertex(v2).
# vertex(v3).
# vertex(v4).
# edge(v1,v2).
# edge(v1,v3).
# edge(v2,v3).
# edge(v2,v4).
# edge(v3,v4).
# """;
#
# asp_code_2 += """
# color(V,1) :- vertex(V), not color(V,2), not color(V,3).
# color(V,2) :- vertex(V), not color(V,1), not color(V,3).
# color(V,3) :- vertex(V), not color(V,1), not color(V,2).
# """
#
# asp_code_2 += """
# :- edge(V1,V2), color(V1,C), color(V2,C).
# """
#
# asp_code_2 += """
# #show color/2.
# """
#
# print(asp_code)
#
# print(asp_code_2)
# print_answer_sets(asp_code)
control = clingo.Control()
control.add("base", [], asp_code)
control.ground([("base", [])])
def on_model(model):
model.symbols(shown=False)
control.configuration.solve.models = 1
answer = control.solve(on_model=on_model)
if answer.satisfiable:
solution = []
with control.solve(yield_=True) as handle:
for model in handle:
# solution.append(model.symbols(shown=True))
for atom in model.symbols(shown=True):
solution.append(str(atom))
solution.sort(key=lambda x: int(x.split(',')[0].split('(v')[-1]))
result = np.ones(length**2, dtype=int)
for i, vertex in enumerate(solution):
res = vertex[:-1].split(',')[-1]
res = int(res)
result[i] = res
result = result.reshape(length,length).tolist()
return result
else:
return None
# if answer.satisfiable == True:
# print("The sudoku has a solution")
# else:
# print("The graph is not 3-colorable")
# sys.exit()
### Solver that uses ILP encoding
def solve_sudoku_ILP(sudoku,k):
model = gp.Model()
# First we create a list containing all the edges for all vertices in the sudoku
length = len(sudoku)
num_vertices = length ** 2
matrix = np.arange(num_vertices).reshape(length, length)
# edges = {'squares':[], 'rows':[], 'columns':[]}
edges = []
sudoku = np.array(sudoku).reshape(length * length)
# The loop below fills the edges list with all edges in the sudoku
for i in range(length):
for j in range(length):
# specify the current value i,j as the left-hand value for the edge tuple
left = int(matrix[i][j] + 1)
# iterate over values in the square
col = j // k
row = i // k
rows = matrix[(row * k):(row * k + k)]
box = [x[(col * k):(col * k + k)] for x in rows]
for v in range(k):
for w in range(k):
right = int(box[v][w] + 1)
# make sure that we're not assigning the current value as the right-hand vertix
if (row * k + v, col * k + w) != (i, j):
if (right, left) not in edges:
edges.append((left, right))
# iterative over cells in row i,
for g in range(length):
right = int(matrix[i][g] + 1)
if (i, g) != (i, j):
if (right, left) not in edges:
edges.append((left, right))
# iterate over cells in column j,
for c in range(length):
right = int(matrix[c][j] + 1)
if (c, j) != (i, j):
if (right, left) not in edges:
# edges['columns'].append((left, right))
edges.append((left, right))
num_values = length
# create vars for each entry: if we have an uncertain value, make entries in var for all v's in the range {1, ..., length}
# otherwise, if sudoku_val != 0, assign to v the certain value from the sudoku and ignore the other values of v
vars = dict()
for i in range(1, num_vertices + 1):
sudoku_val = int(sudoku[i-1])
if sudoku_val == 0:
for v in range(1, num_values + 1):
vars[(i, v)] = model.addVar(vtype=GRB.BINARY, name="x({},{})".format(i, v))
else:
vars[(i, sudoku_val)] = model.addVar(vtype=GRB.BINARY, name="x({},{})".format(i, sudoku_val))
# Now we add a constraint which essentially says that for a given variable, we can assign at most one v to it
# We also add a hard-coded constraint that each certain vertex, the variable with the certain value must exist
for i in range(1, num_vertices + 1):
sudoku_val = int(sudoku[i - 1])
if sudoku_val == 0:
model.addConstr(gp.quicksum([vars[(i, v)] for v in range(1, num_values + 1)]) == 1)
else:
model.addConstr(gp.quicksum([vars[(i, sudoku_val)]]) == 1)
# For each edge, we are iterating over all possible values v. However, for some entries in the var dict,
# we already knew what value v should take.
for (i1, i2) in edges:
for v in range(1, num_values + 1):
if (i1, v) in vars.keys() and (i2, v) in vars.keys():
model.addConstr(vars[(i1, v)] + vars[(i2, v)] <= 1)
model.optimize()
# Printing the solution:
matrix = matrix.reshape(length ** 2)
if model.status == GRB.OPTIMAL:
for i in range(1, num_vertices + 1):
for v in range(1, num_values + 1):
if (i, v) in vars.keys():
if vars[(i, v)].x == 1:
matrix[i-1] = v
return matrix.reshape(length, length).tolist()
else:
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.